problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
18.9k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 465
23.6k
| num_tokens_prompt
int64 556
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_19645
|
rasdani/github-patches
|
git_diff
|
Nitrate__Nitrate-607
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Abstract jQuery.ajax calls
Lots of jQuery.ajax calls in JavaScript code, that repeat same structure and part of the same request arguments.
</issue>
<code>
[start of src/tcms/comments/views.py]
1 # -*- coding: utf-8 -*-
2
3 import logging
4
5 from django.conf import settings
6 from django.contrib.auth.mixins import PermissionRequiredMixin
7 from django.http import JsonResponse
8 from django.shortcuts import render
9 from django.views import generic
10 from django.views.decorators.http import require_POST
11
12 import django_comments.signals
13 from django_comments.views.moderation import perform_delete
14
15 from tcms.comments.models import post_comment
16 from tcms.comments.exceptions import InvalidCommentPostRequest
17 from tcms.core.responses import JsonResponseBadRequest
18
19 log = logging.getLogger(__name__)
20
21
22 @require_POST
23 def post(request, template_name='comments/comments.html'):
24 """Post a comment"""
25 data = request.POST.copy()
26 try:
27 target, _ = post_comment(
28 data, request.user, request.META.get('REMOTE_ADDR'))
29 except InvalidCommentPostRequest as e:
30 target = e.target
31 return render(request, template_name, context={'object': target})
32
33
34 class DeleteCommentView(PermissionRequiredMixin, generic.View):
35 """Delete comment from given objects"""
36
37 permission_required = 'django_comments.can_moderate'
38
39 def post(self, request):
40 comments = django_comments.get_model().objects.filter(
41 pk__in=request.POST.getlist('comment_id'),
42 site__pk=settings.SITE_ID,
43 is_removed=False,
44 user_id=request.user.id
45 )
46
47 if not comments:
48 return JsonResponseBadRequest({
49 'message': 'No incoming comment id exists.'
50 })
51
52 # Flag the comment as deleted instead of actually deleting it.
53 for comment in comments:
54 perform_delete(request, comment)
55
56 return JsonResponse({})
57
[end of src/tcms/comments/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/tcms/comments/views.py b/src/tcms/comments/views.py
--- a/src/tcms/comments/views.py
+++ b/src/tcms/comments/views.py
@@ -5,7 +5,6 @@
from django.conf import settings
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.http import JsonResponse
-from django.shortcuts import render
from django.views import generic
from django.views.decorators.http import require_POST
@@ -27,8 +26,10 @@
target, _ = post_comment(
data, request.user, request.META.get('REMOTE_ADDR'))
except InvalidCommentPostRequest as e:
- target = e.target
- return render(request, template_name, context={'object': target})
+ msg = f'Fail to add comment to object {e.target}'
+ log.exception(msg)
+ return JsonResponseBadRequest({'message': msg})
+ return JsonResponse({})
class DeleteCommentView(PermissionRequiredMixin, generic.View):
|
{"golden_diff": "diff --git a/src/tcms/comments/views.py b/src/tcms/comments/views.py\n--- a/src/tcms/comments/views.py\n+++ b/src/tcms/comments/views.py\n@@ -5,7 +5,6 @@\n from django.conf import settings\n from django.contrib.auth.mixins import PermissionRequiredMixin\n from django.http import JsonResponse\n-from django.shortcuts import render\n from django.views import generic\n from django.views.decorators.http import require_POST\n \n@@ -27,8 +26,10 @@\n target, _ = post_comment(\n data, request.user, request.META.get('REMOTE_ADDR'))\n except InvalidCommentPostRequest as e:\n- target = e.target\n- return render(request, template_name, context={'object': target})\n+ msg = f'Fail to add comment to object {e.target}'\n+ log.exception(msg)\n+ return JsonResponseBadRequest({'message': msg})\n+ return JsonResponse({})\n \n \n class DeleteCommentView(PermissionRequiredMixin, generic.View):\n", "issue": "Abstract jQuery.ajax calls\nLots of jQuery.ajax calls in JavaScript code, that repeat same structure and part of the same request arguments.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport logging\n\nfrom django.conf import settings\nfrom django.contrib.auth.mixins import PermissionRequiredMixin\nfrom django.http import JsonResponse\nfrom django.shortcuts import render\nfrom django.views import generic\nfrom django.views.decorators.http import require_POST\n\nimport django_comments.signals\nfrom django_comments.views.moderation import perform_delete\n\nfrom tcms.comments.models import post_comment\nfrom tcms.comments.exceptions import InvalidCommentPostRequest\nfrom tcms.core.responses import JsonResponseBadRequest\n\nlog = logging.getLogger(__name__)\n\n\n@require_POST\ndef post(request, template_name='comments/comments.html'):\n \"\"\"Post a comment\"\"\"\n data = request.POST.copy()\n try:\n target, _ = post_comment(\n data, request.user, request.META.get('REMOTE_ADDR'))\n except InvalidCommentPostRequest as e:\n target = e.target\n return render(request, template_name, context={'object': target})\n\n\nclass DeleteCommentView(PermissionRequiredMixin, generic.View):\n \"\"\"Delete comment from given objects\"\"\"\n\n permission_required = 'django_comments.can_moderate'\n\n def post(self, request):\n comments = django_comments.get_model().objects.filter(\n pk__in=request.POST.getlist('comment_id'),\n site__pk=settings.SITE_ID,\n is_removed=False,\n user_id=request.user.id\n )\n\n if not comments:\n return JsonResponseBadRequest({\n 'message': 'No incoming comment id exists.'\n })\n\n # Flag the comment as deleted instead of actually deleting it.\n for comment in comments:\n perform_delete(request, comment)\n\n return JsonResponse({})\n", "path": "src/tcms/comments/views.py"}]}
| 1,007 | 204 |
gh_patches_debug_30601
|
rasdani/github-patches
|
git_diff
|
wagtail__wagtail-1888
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Errors from embed chooser are not reported back to the user
It seems that embed functionality of wagtail hello.js editor is not working. I tried to embed a link, modal comes up, everything is fine but when I press "Insert" I get nothing and browser displays this error:
(Firefox) TypeError: a is undefined rangy-core.js:33:479
(Chrome) rangy-core.js:33 Uncaught TypeError: Cannot read property 'nodeType' of undefined
But network data says everything is OK:
POST .../admin/embeds/chooser/upload/ 200 OK
After closing the modal window, embed link is not present.
Errors from embed chooser are not reported back to the user
It seems that embed functionality of wagtail hello.js editor is not working. I tried to embed a link, modal comes up, everything is fine but when I press "Insert" I get nothing and browser displays this error:
(Firefox) TypeError: a is undefined rangy-core.js:33:479
(Chrome) rangy-core.js:33 Uncaught TypeError: Cannot read property 'nodeType' of undefined
But network data says everything is OK:
POST .../admin/embeds/chooser/upload/ 200 OK
After closing the modal window, embed link is not present.
</issue>
<code>
[start of wagtail/wagtailembeds/views/chooser.py]
1 from django.forms.utils import ErrorList
2 from django.utils.translation import ugettext as _
3
4 from wagtail.wagtailadmin.modal_workflow import render_modal_workflow
5 from wagtail.wagtailembeds.forms import EmbedForm
6 from wagtail.wagtailembeds.format import embed_to_editor_html
7
8 from wagtail.wagtailembeds.embeds import EmbedNotFoundException, EmbedlyException, AccessDeniedEmbedlyException
9
10
11
12 def chooser(request):
13 form = EmbedForm()
14
15 return render_modal_workflow(request, 'wagtailembeds/chooser/chooser.html', 'wagtailembeds/chooser/chooser.js', {
16 'form': form,
17 })
18
19
20 def chooser_upload(request):
21 if request.POST:
22 form = EmbedForm(request.POST, request.FILES)
23
24 if form.is_valid():
25 error = None
26 try:
27 embed_html = embed_to_editor_html(form.cleaned_data['url'])
28 return render_modal_workflow(
29 request, None, 'wagtailembeds/chooser/embed_chosen.js',
30 {'embed_html': embed_html}
31 )
32 except AccessDeniedEmbedlyException:
33 error = _("There seems to be a problem with your embedly API key. Please check your settings.")
34 except EmbedNotFoundException:
35 error = _("Cannot find an embed for this URL.")
36 except EmbedlyException:
37 error = _("There seems to be an error with Embedly while trying to embed this URL. Please try again later.")
38
39 if error:
40 errors = form._errors.setdefault('url', ErrorList())
41 errors.append(error)
42 return render_modal_workflow(request, 'wagtailembeds/chooser/chooser.html', 'wagtailembeds/chooser/chooser.js', {
43 'form': form,
44 })
45 else:
46 form = EmbedForm()
47
48 return render_modal_workflow(request, 'wagtailembeds/chooser/chooser.html', 'wagtailembeds/chooser/chooser.js', {
49 'form': form,
50 })
51
[end of wagtail/wagtailembeds/views/chooser.py]
[start of wagtail/wagtailembeds/rich_text.py]
1 from wagtail.wagtailembeds import format
2
3
4 class MediaEmbedHandler(object):
5 """
6 MediaEmbedHandler will be invoked whenever we encounter an element in HTML content
7 with an attribute of data-embedtype="media". The resulting element in the database
8 representation will be:
9 <embed embedtype="media" url="http://vimeo.com/XXXXX">
10 """
11 @staticmethod
12 def get_db_attributes(tag):
13 """
14 Given a tag that we've identified as a media embed (because it has a
15 data-embedtype="media" attribute), return a dict of the attributes we should
16 have on the resulting <embed> element.
17 """
18 return {
19 'url': tag['data-url'],
20 }
21
22 @staticmethod
23 def expand_db_attributes(attrs, for_editor):
24 """
25 Given a dict of attributes from the <embed> tag, return the real HTML
26 representation.
27 """
28 if for_editor:
29 return format.embed_to_editor_html(attrs['url'])
30 else:
31 return format.embed_to_frontend_html(attrs['url'])
32
[end of wagtail/wagtailembeds/rich_text.py]
[start of wagtail/wagtailembeds/format.py]
1 from __future__ import division # Use true division
2
3 from django.template.loader import render_to_string
4
5 from wagtail.wagtailembeds import embeds
6
7
8 def embed_to_frontend_html(url):
9 try:
10 embed = embeds.get_embed(url)
11
12 # Work out ratio
13 if embed.width and embed.height:
14 ratio = str(embed.height / embed.width * 100) + "%"
15 else:
16 ratio = "0"
17
18 # Render template
19 return render_to_string('wagtailembeds/embed_frontend.html', {
20 'embed': embed,
21 'ratio': ratio,
22 })
23 except embeds.EmbedException:
24 return ''
25
26
27 def embed_to_editor_html(url):
28 try:
29 embed = embeds.get_embed(url)
30
31 # Render template
32 return render_to_string('wagtailembeds/embed_editor.html', {
33 'embed': embed,
34 })
35 except embeds.EmbedException:
36 # Could be replaced with a nice error message
37 return ''
38
[end of wagtail/wagtailembeds/format.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/wagtail/wagtailembeds/format.py b/wagtail/wagtailembeds/format.py
--- a/wagtail/wagtailembeds/format.py
+++ b/wagtail/wagtailembeds/format.py
@@ -21,17 +21,15 @@
'ratio': ratio,
})
except embeds.EmbedException:
+ # silently ignore failed embeds, rather than letting them crash the page
return ''
def embed_to_editor_html(url):
- try:
- embed = embeds.get_embed(url)
+ embed = embeds.get_embed(url)
+ # catching EmbedException is the responsibility of the caller
- # Render template
- return render_to_string('wagtailembeds/embed_editor.html', {
- 'embed': embed,
- })
- except embeds.EmbedException:
- # Could be replaced with a nice error message
- return ''
+ # Render template
+ return render_to_string('wagtailembeds/embed_editor.html', {
+ 'embed': embed,
+ })
diff --git a/wagtail/wagtailembeds/rich_text.py b/wagtail/wagtailembeds/rich_text.py
--- a/wagtail/wagtailembeds/rich_text.py
+++ b/wagtail/wagtailembeds/rich_text.py
@@ -1,4 +1,4 @@
-from wagtail.wagtailembeds import format
+from wagtail.wagtailembeds import format, embeds
class MediaEmbedHandler(object):
@@ -26,6 +26,10 @@
representation.
"""
if for_editor:
- return format.embed_to_editor_html(attrs['url'])
+ try:
+ return format.embed_to_editor_html(attrs['url'])
+ except embeds.EmbedException:
+ # Could be replaced with a nice error message
+ return ''
else:
return format.embed_to_frontend_html(attrs['url'])
diff --git a/wagtail/wagtailembeds/views/chooser.py b/wagtail/wagtailembeds/views/chooser.py
--- a/wagtail/wagtailembeds/views/chooser.py
+++ b/wagtail/wagtailembeds/views/chooser.py
@@ -8,7 +8,6 @@
from wagtail.wagtailembeds.embeds import EmbedNotFoundException, EmbedlyException, AccessDeniedEmbedlyException
-
def chooser(request):
form = EmbedForm()
|
{"golden_diff": "diff --git a/wagtail/wagtailembeds/format.py b/wagtail/wagtailembeds/format.py\n--- a/wagtail/wagtailembeds/format.py\n+++ b/wagtail/wagtailembeds/format.py\n@@ -21,17 +21,15 @@\n 'ratio': ratio,\n })\n except embeds.EmbedException:\n+ # silently ignore failed embeds, rather than letting them crash the page\n return ''\n \n \n def embed_to_editor_html(url):\n- try:\n- embed = embeds.get_embed(url)\n+ embed = embeds.get_embed(url)\n+ # catching EmbedException is the responsibility of the caller\n \n- # Render template\n- return render_to_string('wagtailembeds/embed_editor.html', {\n- 'embed': embed,\n- })\n- except embeds.EmbedException:\n- # Could be replaced with a nice error message\n- return ''\n+ # Render template\n+ return render_to_string('wagtailembeds/embed_editor.html', {\n+ 'embed': embed,\n+ })\ndiff --git a/wagtail/wagtailembeds/rich_text.py b/wagtail/wagtailembeds/rich_text.py\n--- a/wagtail/wagtailembeds/rich_text.py\n+++ b/wagtail/wagtailembeds/rich_text.py\n@@ -1,4 +1,4 @@\n-from wagtail.wagtailembeds import format\n+from wagtail.wagtailembeds import format, embeds\n \n \n class MediaEmbedHandler(object):\n@@ -26,6 +26,10 @@\n representation.\n \"\"\"\n if for_editor:\n- return format.embed_to_editor_html(attrs['url'])\n+ try:\n+ return format.embed_to_editor_html(attrs['url'])\n+ except embeds.EmbedException:\n+ # Could be replaced with a nice error message\n+ return ''\n else:\n return format.embed_to_frontend_html(attrs['url'])\ndiff --git a/wagtail/wagtailembeds/views/chooser.py b/wagtail/wagtailembeds/views/chooser.py\n--- a/wagtail/wagtailembeds/views/chooser.py\n+++ b/wagtail/wagtailembeds/views/chooser.py\n@@ -8,7 +8,6 @@\n from wagtail.wagtailembeds.embeds import EmbedNotFoundException, EmbedlyException, AccessDeniedEmbedlyException\n \n \n-\n def chooser(request):\n form = EmbedForm()\n", "issue": "Errors from embed chooser are not reported back to the user\nIt seems that embed functionality of wagtail hello.js editor is not working. I tried to embed a link, modal comes up, everything is fine but when I press \"Insert\" I get nothing and browser displays this error:\n\n(Firefox) TypeError: a is undefined rangy-core.js:33:479\n(Chrome) rangy-core.js:33 Uncaught TypeError: Cannot read property 'nodeType' of undefined\n\nBut network data says everything is OK:\nPOST .../admin/embeds/chooser/upload/ 200 OK\nAfter closing the modal window, embed link is not present.\n\nErrors from embed chooser are not reported back to the user\nIt seems that embed functionality of wagtail hello.js editor is not working. I tried to embed a link, modal comes up, everything is fine but when I press \"Insert\" I get nothing and browser displays this error:\n\n(Firefox) TypeError: a is undefined rangy-core.js:33:479\n(Chrome) rangy-core.js:33 Uncaught TypeError: Cannot read property 'nodeType' of undefined\n\nBut network data says everything is OK:\nPOST .../admin/embeds/chooser/upload/ 200 OK\nAfter closing the modal window, embed link is not present.\n\n", "before_files": [{"content": "from django.forms.utils import ErrorList\nfrom django.utils.translation import ugettext as _\n\nfrom wagtail.wagtailadmin.modal_workflow import render_modal_workflow\nfrom wagtail.wagtailembeds.forms import EmbedForm\nfrom wagtail.wagtailembeds.format import embed_to_editor_html\n\nfrom wagtail.wagtailembeds.embeds import EmbedNotFoundException, EmbedlyException, AccessDeniedEmbedlyException\n\n\n\ndef chooser(request):\n form = EmbedForm()\n\n return render_modal_workflow(request, 'wagtailembeds/chooser/chooser.html', 'wagtailembeds/chooser/chooser.js', {\n 'form': form,\n })\n\n\ndef chooser_upload(request):\n if request.POST:\n form = EmbedForm(request.POST, request.FILES)\n\n if form.is_valid():\n error = None\n try:\n embed_html = embed_to_editor_html(form.cleaned_data['url'])\n return render_modal_workflow(\n request, None, 'wagtailembeds/chooser/embed_chosen.js',\n {'embed_html': embed_html}\n )\n except AccessDeniedEmbedlyException:\n error = _(\"There seems to be a problem with your embedly API key. Please check your settings.\")\n except EmbedNotFoundException:\n error = _(\"Cannot find an embed for this URL.\")\n except EmbedlyException:\n error = _(\"There seems to be an error with Embedly while trying to embed this URL. Please try again later.\")\n\n if error:\n errors = form._errors.setdefault('url', ErrorList())\n errors.append(error)\n return render_modal_workflow(request, 'wagtailembeds/chooser/chooser.html', 'wagtailembeds/chooser/chooser.js', {\n 'form': form,\n })\n else:\n form = EmbedForm()\n\n return render_modal_workflow(request, 'wagtailembeds/chooser/chooser.html', 'wagtailembeds/chooser/chooser.js', {\n 'form': form,\n })\n", "path": "wagtail/wagtailembeds/views/chooser.py"}, {"content": "from wagtail.wagtailembeds import format\n\n\nclass MediaEmbedHandler(object):\n \"\"\"\n MediaEmbedHandler will be invoked whenever we encounter an element in HTML content\n with an attribute of data-embedtype=\"media\". The resulting element in the database\n representation will be:\n <embed embedtype=\"media\" url=\"http://vimeo.com/XXXXX\">\n \"\"\"\n @staticmethod\n def get_db_attributes(tag):\n \"\"\"\n Given a tag that we've identified as a media embed (because it has a\n data-embedtype=\"media\" attribute), return a dict of the attributes we should\n have on the resulting <embed> element.\n \"\"\"\n return {\n 'url': tag['data-url'],\n }\n\n @staticmethod\n def expand_db_attributes(attrs, for_editor):\n \"\"\"\n Given a dict of attributes from the <embed> tag, return the real HTML\n representation.\n \"\"\"\n if for_editor:\n return format.embed_to_editor_html(attrs['url'])\n else:\n return format.embed_to_frontend_html(attrs['url'])\n", "path": "wagtail/wagtailembeds/rich_text.py"}, {"content": "from __future__ import division # Use true division\n\nfrom django.template.loader import render_to_string\n\nfrom wagtail.wagtailembeds import embeds\n\n\ndef embed_to_frontend_html(url):\n try:\n embed = embeds.get_embed(url)\n\n # Work out ratio\n if embed.width and embed.height:\n ratio = str(embed.height / embed.width * 100) + \"%\"\n else:\n ratio = \"0\"\n\n # Render template\n return render_to_string('wagtailembeds/embed_frontend.html', {\n 'embed': embed,\n 'ratio': ratio,\n })\n except embeds.EmbedException:\n return ''\n\n\ndef embed_to_editor_html(url):\n try:\n embed = embeds.get_embed(url)\n\n # Render template\n return render_to_string('wagtailembeds/embed_editor.html', {\n 'embed': embed,\n })\n except embeds.EmbedException:\n # Could be replaced with a nice error message\n return ''\n", "path": "wagtail/wagtailembeds/format.py"}]}
| 1,992 | 568 |
gh_patches_debug_23974
|
rasdani/github-patches
|
git_diff
|
scipy__scipy-17714
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ENH: Expose DisjointSet._sizes
### Is your feature request related to a problem? Please describe.
Currently, `scipy.cluster.hierarchy.DisjointSet` internally keeps track of the size of each subset in the `_sizes` dict, making it available in O(1) (well, one needs to canonicalize keys first, in O(alpha(n)) ~ O(1)), but does not expose that info; the only way to access it publically is to reconstruct the actual subset (`.subset()`) and compute its size (`len()`), which is much slower (that's proportional to the size of the subset).
An example of algorithm that benefits from this info is "An efficient algorithm for exact evaluation of stochastic watersheds", Malmberg & Luengo 2014 (algorithm 2 relies on the disjoint-set, and equation 3 is the one where the size of the subset is needed).
### Describe the solution you'd like.
Add a `DisjointSet().size(x)` API (perhaps `size_of_subset` if one wants to be super clear), which is `return self._sizes[self[x]]` (where `self[x]` canonicalizes the key).
### Describe alternatives you've considered.
N/A
### Additional context (e.g. screenshots, GIFs)
N/A
</issue>
<code>
[start of scipy/_lib/_disjoint_set.py]
1 """
2 Disjoint set data structure
3 """
4
5
6 class DisjointSet:
7 """ Disjoint set data structure for incremental connectivity queries.
8
9 .. versionadded:: 1.6.0
10
11 Attributes
12 ----------
13 n_subsets : int
14 The number of subsets.
15
16 Methods
17 -------
18 add
19 merge
20 connected
21 subset
22 subsets
23 __getitem__
24
25 Notes
26 -----
27 This class implements the disjoint set [1]_, also known as the *union-find*
28 or *merge-find* data structure. The *find* operation (implemented in
29 `__getitem__`) implements the *path halving* variant. The *merge* method
30 implements the *merge by size* variant.
31
32 References
33 ----------
34 .. [1] https://en.wikipedia.org/wiki/Disjoint-set_data_structure
35
36 Examples
37 --------
38 >>> from scipy.cluster.hierarchy import DisjointSet
39
40 Initialize a disjoint set:
41
42 >>> disjoint_set = DisjointSet([1, 2, 3, 'a', 'b'])
43
44 Merge some subsets:
45
46 >>> disjoint_set.merge(1, 2)
47 True
48 >>> disjoint_set.merge(3, 'a')
49 True
50 >>> disjoint_set.merge('a', 'b')
51 True
52 >>> disjoint_set.merge('b', 'b')
53 False
54
55 Find root elements:
56
57 >>> disjoint_set[2]
58 1
59 >>> disjoint_set['b']
60 3
61
62 Test connectivity:
63
64 >>> disjoint_set.connected(1, 2)
65 True
66 >>> disjoint_set.connected(1, 'b')
67 False
68
69 List elements in disjoint set:
70
71 >>> list(disjoint_set)
72 [1, 2, 3, 'a', 'b']
73
74 Get the subset containing 'a':
75
76 >>> disjoint_set.subset('a')
77 {'a', 3, 'b'}
78
79 Get all subsets in the disjoint set:
80
81 >>> disjoint_set.subsets()
82 [{1, 2}, {'a', 3, 'b'}]
83 """
84 def __init__(self, elements=None):
85 self.n_subsets = 0
86 self._sizes = {}
87 self._parents = {}
88 # _nbrs is a circular linked list which links connected elements.
89 self._nbrs = {}
90 # _indices tracks the element insertion order in `__iter__`.
91 self._indices = {}
92 if elements is not None:
93 for x in elements:
94 self.add(x)
95
96 def __iter__(self):
97 """Returns an iterator of the elements in the disjoint set.
98
99 Elements are ordered by insertion order.
100 """
101 return iter(self._indices)
102
103 def __len__(self):
104 return len(self._indices)
105
106 def __contains__(self, x):
107 return x in self._indices
108
109 def __getitem__(self, x):
110 """Find the root element of `x`.
111
112 Parameters
113 ----------
114 x : hashable object
115 Input element.
116
117 Returns
118 -------
119 root : hashable object
120 Root element of `x`.
121 """
122 if x not in self._indices:
123 raise KeyError(x)
124
125 # find by "path halving"
126 parents = self._parents
127 while self._indices[x] != self._indices[parents[x]]:
128 parents[x] = parents[parents[x]]
129 x = parents[x]
130 return x
131
132 def add(self, x):
133 """Add element `x` to disjoint set
134 """
135 if x in self._indices:
136 return
137
138 self._sizes[x] = 1
139 self._parents[x] = x
140 self._nbrs[x] = x
141 self._indices[x] = len(self._indices)
142 self.n_subsets += 1
143
144 def merge(self, x, y):
145 """Merge the subsets of `x` and `y`.
146
147 The smaller subset (the child) is merged into the larger subset (the
148 parent). If the subsets are of equal size, the root element which was
149 first inserted into the disjoint set is selected as the parent.
150
151 Parameters
152 ----------
153 x, y : hashable object
154 Elements to merge.
155
156 Returns
157 -------
158 merged : bool
159 True if `x` and `y` were in disjoint sets, False otherwise.
160 """
161 xr = self[x]
162 yr = self[y]
163 if self._indices[xr] == self._indices[yr]:
164 return False
165
166 sizes = self._sizes
167 if (sizes[xr], self._indices[yr]) < (sizes[yr], self._indices[xr]):
168 xr, yr = yr, xr
169 self._parents[yr] = xr
170 self._sizes[xr] += self._sizes[yr]
171 self._nbrs[xr], self._nbrs[yr] = self._nbrs[yr], self._nbrs[xr]
172 self.n_subsets -= 1
173 return True
174
175 def connected(self, x, y):
176 """Test whether `x` and `y` are in the same subset.
177
178 Parameters
179 ----------
180 x, y : hashable object
181 Elements to test.
182
183 Returns
184 -------
185 result : bool
186 True if `x` and `y` are in the same set, False otherwise.
187 """
188 return self._indices[self[x]] == self._indices[self[y]]
189
190 def subset(self, x):
191 """Get the subset containing `x`.
192
193 Parameters
194 ----------
195 x : hashable object
196 Input element.
197
198 Returns
199 -------
200 result : set
201 Subset containing `x`.
202 """
203 if x not in self._indices:
204 raise KeyError(x)
205
206 result = [x]
207 nxt = self._nbrs[x]
208 while self._indices[nxt] != self._indices[x]:
209 result.append(nxt)
210 nxt = self._nbrs[nxt]
211 return set(result)
212
213 def subsets(self):
214 """Get all the subsets in the disjoint set.
215
216 Returns
217 -------
218 result : list
219 Subsets in the disjoint set.
220 """
221 result = []
222 visited = set()
223 for x in self:
224 if x not in visited:
225 xset = self.subset(x)
226 visited.update(xset)
227 result.append(xset)
228 return result
229
[end of scipy/_lib/_disjoint_set.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/scipy/_lib/_disjoint_set.py b/scipy/_lib/_disjoint_set.py
--- a/scipy/_lib/_disjoint_set.py
+++ b/scipy/_lib/_disjoint_set.py
@@ -19,6 +19,7 @@
merge
connected
subset
+ subset_size
subsets
__getitem__
@@ -76,6 +77,12 @@
>>> disjoint_set.subset('a')
{'a', 3, 'b'}
+ Get the size of the subset containing 'a' (without actually instantiating
+ the subset):
+
+ >>> disjoint_set.subset_size('a')
+ 3
+
Get all subsets in the disjoint set:
>>> disjoint_set.subsets()
@@ -210,6 +217,25 @@
nxt = self._nbrs[nxt]
return set(result)
+ def subset_size(self, x):
+ """Get the size of the subset containing `x`.
+
+ Note that this method is faster than ``len(self.subset(x))`` because
+ the size is directly read off an internal field, without the need to
+ instantiate the full subset.
+
+ Parameters
+ ----------
+ x : hashable object
+ Input element.
+
+ Returns
+ -------
+ result : int
+ Size of the subset containing `x`.
+ """
+ return self._sizes[self[x]]
+
def subsets(self):
"""Get all the subsets in the disjoint set.
|
{"golden_diff": "diff --git a/scipy/_lib/_disjoint_set.py b/scipy/_lib/_disjoint_set.py\n--- a/scipy/_lib/_disjoint_set.py\n+++ b/scipy/_lib/_disjoint_set.py\n@@ -19,6 +19,7 @@\n merge\n connected\n subset\n+ subset_size\n subsets\n __getitem__\n \n@@ -76,6 +77,12 @@\n >>> disjoint_set.subset('a')\n {'a', 3, 'b'}\n \n+ Get the size of the subset containing 'a' (without actually instantiating\n+ the subset):\n+\n+ >>> disjoint_set.subset_size('a')\n+ 3\n+\n Get all subsets in the disjoint set:\n \n >>> disjoint_set.subsets()\n@@ -210,6 +217,25 @@\n nxt = self._nbrs[nxt]\n return set(result)\n \n+ def subset_size(self, x):\n+ \"\"\"Get the size of the subset containing `x`.\n+\n+ Note that this method is faster than ``len(self.subset(x))`` because\n+ the size is directly read off an internal field, without the need to\n+ instantiate the full subset.\n+\n+ Parameters\n+ ----------\n+ x : hashable object\n+ Input element.\n+\n+ Returns\n+ -------\n+ result : int\n+ Size of the subset containing `x`.\n+ \"\"\"\n+ return self._sizes[self[x]]\n+\n def subsets(self):\n \"\"\"Get all the subsets in the disjoint set.\n", "issue": "ENH: Expose DisjointSet._sizes\n### Is your feature request related to a problem? Please describe.\n\nCurrently, `scipy.cluster.hierarchy.DisjointSet` internally keeps track of the size of each subset in the `_sizes` dict, making it available in O(1) (well, one needs to canonicalize keys first, in O(alpha(n)) ~ O(1)), but does not expose that info; the only way to access it publically is to reconstruct the actual subset (`.subset()`) and compute its size (`len()`), which is much slower (that's proportional to the size of the subset).\r\n\r\nAn example of algorithm that benefits from this info is \"An efficient algorithm for exact evaluation of stochastic watersheds\", Malmberg & Luengo 2014 (algorithm 2 relies on the disjoint-set, and equation 3 is the one where the size of the subset is needed).\n\n### Describe the solution you'd like.\n\nAdd a `DisjointSet().size(x)` API (perhaps `size_of_subset` if one wants to be super clear), which is `return self._sizes[self[x]]` (where `self[x]` canonicalizes the key).\n\n### Describe alternatives you've considered.\n\nN/A\n\n### Additional context (e.g. screenshots, GIFs)\n\nN/A\n", "before_files": [{"content": "\"\"\"\nDisjoint set data structure\n\"\"\"\n\n\nclass DisjointSet:\n \"\"\" Disjoint set data structure for incremental connectivity queries.\n\n .. versionadded:: 1.6.0\n\n Attributes\n ----------\n n_subsets : int\n The number of subsets.\n\n Methods\n -------\n add\n merge\n connected\n subset\n subsets\n __getitem__\n\n Notes\n -----\n This class implements the disjoint set [1]_, also known as the *union-find*\n or *merge-find* data structure. The *find* operation (implemented in\n `__getitem__`) implements the *path halving* variant. The *merge* method\n implements the *merge by size* variant.\n\n References\n ----------\n .. [1] https://en.wikipedia.org/wiki/Disjoint-set_data_structure\n\n Examples\n --------\n >>> from scipy.cluster.hierarchy import DisjointSet\n\n Initialize a disjoint set:\n\n >>> disjoint_set = DisjointSet([1, 2, 3, 'a', 'b'])\n\n Merge some subsets:\n\n >>> disjoint_set.merge(1, 2)\n True\n >>> disjoint_set.merge(3, 'a')\n True\n >>> disjoint_set.merge('a', 'b')\n True\n >>> disjoint_set.merge('b', 'b')\n False\n\n Find root elements:\n\n >>> disjoint_set[2]\n 1\n >>> disjoint_set['b']\n 3\n\n Test connectivity:\n\n >>> disjoint_set.connected(1, 2)\n True\n >>> disjoint_set.connected(1, 'b')\n False\n\n List elements in disjoint set:\n\n >>> list(disjoint_set)\n [1, 2, 3, 'a', 'b']\n\n Get the subset containing 'a':\n\n >>> disjoint_set.subset('a')\n {'a', 3, 'b'}\n\n Get all subsets in the disjoint set:\n\n >>> disjoint_set.subsets()\n [{1, 2}, {'a', 3, 'b'}]\n \"\"\"\n def __init__(self, elements=None):\n self.n_subsets = 0\n self._sizes = {}\n self._parents = {}\n # _nbrs is a circular linked list which links connected elements.\n self._nbrs = {}\n # _indices tracks the element insertion order in `__iter__`.\n self._indices = {}\n if elements is not None:\n for x in elements:\n self.add(x)\n\n def __iter__(self):\n \"\"\"Returns an iterator of the elements in the disjoint set.\n\n Elements are ordered by insertion order.\n \"\"\"\n return iter(self._indices)\n\n def __len__(self):\n return len(self._indices)\n\n def __contains__(self, x):\n return x in self._indices\n\n def __getitem__(self, x):\n \"\"\"Find the root element of `x`.\n\n Parameters\n ----------\n x : hashable object\n Input element.\n\n Returns\n -------\n root : hashable object\n Root element of `x`.\n \"\"\"\n if x not in self._indices:\n raise KeyError(x)\n\n # find by \"path halving\"\n parents = self._parents\n while self._indices[x] != self._indices[parents[x]]:\n parents[x] = parents[parents[x]]\n x = parents[x]\n return x\n\n def add(self, x):\n \"\"\"Add element `x` to disjoint set\n \"\"\"\n if x in self._indices:\n return\n\n self._sizes[x] = 1\n self._parents[x] = x\n self._nbrs[x] = x\n self._indices[x] = len(self._indices)\n self.n_subsets += 1\n\n def merge(self, x, y):\n \"\"\"Merge the subsets of `x` and `y`.\n\n The smaller subset (the child) is merged into the larger subset (the\n parent). If the subsets are of equal size, the root element which was\n first inserted into the disjoint set is selected as the parent.\n\n Parameters\n ----------\n x, y : hashable object\n Elements to merge.\n\n Returns\n -------\n merged : bool\n True if `x` and `y` were in disjoint sets, False otherwise.\n \"\"\"\n xr = self[x]\n yr = self[y]\n if self._indices[xr] == self._indices[yr]:\n return False\n\n sizes = self._sizes\n if (sizes[xr], self._indices[yr]) < (sizes[yr], self._indices[xr]):\n xr, yr = yr, xr\n self._parents[yr] = xr\n self._sizes[xr] += self._sizes[yr]\n self._nbrs[xr], self._nbrs[yr] = self._nbrs[yr], self._nbrs[xr]\n self.n_subsets -= 1\n return True\n\n def connected(self, x, y):\n \"\"\"Test whether `x` and `y` are in the same subset.\n\n Parameters\n ----------\n x, y : hashable object\n Elements to test.\n\n Returns\n -------\n result : bool\n True if `x` and `y` are in the same set, False otherwise.\n \"\"\"\n return self._indices[self[x]] == self._indices[self[y]]\n\n def subset(self, x):\n \"\"\"Get the subset containing `x`.\n\n Parameters\n ----------\n x : hashable object\n Input element.\n\n Returns\n -------\n result : set\n Subset containing `x`.\n \"\"\"\n if x not in self._indices:\n raise KeyError(x)\n\n result = [x]\n nxt = self._nbrs[x]\n while self._indices[nxt] != self._indices[x]:\n result.append(nxt)\n nxt = self._nbrs[nxt]\n return set(result)\n\n def subsets(self):\n \"\"\"Get all the subsets in the disjoint set.\n\n Returns\n -------\n result : list\n Subsets in the disjoint set.\n \"\"\"\n result = []\n visited = set()\n for x in self:\n if x not in visited:\n xset = self.subset(x)\n visited.update(xset)\n result.append(xset)\n return result\n", "path": "scipy/_lib/_disjoint_set.py"}]}
| 2,779 | 344 |
gh_patches_debug_37918
|
rasdani/github-patches
|
git_diff
|
rasterio__rasterio-2157
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Missing `RMS` support in build_overviews
https://github.com/mapbox/rasterio/blob/3c630f8a30d31ceb61aa52ad3ab96f4fc25f2ecd/rasterio/_io.pyx#L1794-L1801
> @param pszResampling one of "AVERAGE", "AVERAGE_MAGPHASE", "RMS", "BILINEAR", "CUBIC", "CUBICSPLINE", "GAUSS", "LANCZOS", "MODE", "NEAREST", "NONE"
https://github.com/OSGeo/gdal/blob/3d32de21b79ab7e09024f55a0e6740f00318dd3e/gdal/gcore/gdaldataset.cpp#L1908-L1909
</issue>
<code>
[start of rasterio/enums.py]
1 """Enumerations."""
2
3 from enum import Enum, IntEnum
4
5
6 class ColorInterp(IntEnum):
7 """Raster band color interpretation."""
8 undefined = 0
9 gray = 1
10 grey = 1
11 palette = 2
12 red = 3
13 green = 4
14 blue = 5
15 alpha = 6
16 hue = 7
17 saturation = 8
18 lightness = 9
19 cyan = 10
20 magenta = 11
21 yellow = 12
22 black = 13
23 Y = 14
24 Cb = 15
25 Cr = 16
26
27
28 class Resampling(IntEnum):
29 """Available warp resampling algorithms.
30
31 The first 8, 'nearest', 'bilinear', 'cubic', 'cubic_spline',
32 'lanczos', 'average', 'mode', and 'gauss', are available for making
33 dataset overviews.
34
35 'max', 'min', 'med', 'q1', 'q3' are only supported in GDAL >= 2.0.0.
36
37 'nearest', 'bilinear', 'cubic', 'cubic_spline', 'lanczos',
38 'average', 'mode' are always available (GDAL >= 1.10).
39
40 'sum' is only supported in GDAL >= 3.1.
41
42 'rms' is only supported in GDAL >= 3.3.
43
44 Note: 'gauss' is not available to the functions in rio.warp.
45 """
46 nearest = 0
47 bilinear = 1
48 cubic = 2
49 cubic_spline = 3
50 lanczos = 4
51 average = 5
52 mode = 6
53 gauss = 7
54 max = 8
55 min = 9
56 med = 10
57 q1 = 11
58 q3 = 12
59 sum = 13
60 rms = 14
61
62
63 class Compression(Enum):
64 """Available compression algorithms."""
65 jpeg = 'JPEG'
66 lzw = 'LZW'
67 packbits = 'PACKBITS'
68 deflate = 'DEFLATE'
69 ccittrle = 'CCITTRLE'
70 ccittfax3 = 'CCITTFAX3'
71 ccittfax4 = 'CCITTFAX4'
72 lzma = 'LZMA'
73 none = 'NONE'
74 zstd = 'ZSTD'
75 lerc = 'LERC'
76 webp = 'WEBP'
77 jpeg2000 = 'JPEG2000'
78
79
80 class Interleaving(Enum):
81 pixel = 'PIXEL'
82 line = 'LINE'
83 band = 'BAND'
84
85
86 class MaskFlags(IntEnum):
87 all_valid = 1
88 per_dataset = 2
89 alpha = 4
90 nodata = 8
91
92
93 class PhotometricInterp(Enum):
94 black = 'MINISBLACK'
95 white = 'MINISWHITE'
96 rgb = 'RGB'
97 cmyk = 'CMYK'
98 ycbcr = 'YCbCr'
99 cielab = 'CIELAB'
100 icclab = 'ICCLAB'
101 itulab = 'ITULAB'
102
103 class MergeAlg(Enum):
104 """Available rasterization algorithms"""
105 replace = 'REPLACE'
106 add = 'ADD'
107
[end of rasterio/enums.py]
[start of rasterio/rio/overview.py]
1 # coding: utf-8
2 """Manage overviews of a dataset."""
3
4 from functools import reduce
5 import logging
6 import operator
7
8 import click
9
10 from . import options
11 import rasterio
12 from rasterio.enums import Resampling
13
14
15 def build_handler(ctx, param, value):
16 if value:
17 try:
18 if '^' in value:
19 base, exp_range = value.split('^')
20 exp_min, exp_max = (int(v) for v in exp_range.split('..'))
21 value = [pow(int(base), k) for k in range(exp_min, exp_max + 1)]
22 elif ',' in value:
23 value = [int(v) for v in value.split(',')]
24 elif value == "auto":
25 pass
26 else:
27 raise Exception
28 except Exception:
29 raise click.BadParameter(u"must match 'n,n,n,…', 'n^n..n', or 'auto'.")
30 return value
31
32
33 def get_maximum_overview_level(width, height, minsize=256):
34 """
35 Calculate the maximum overview level of a dataset at which
36 the smallest overview is smaller than `minsize`.
37
38 Attributes
39 ----------
40 width : int
41 Width of the dataset.
42 height : int
43 Height of the dataset.
44 minsize : int (default: 256)
45 Minimum overview size.
46
47 Returns
48 -------
49 overview_level: int
50 overview level.
51
52 """
53 overview_level = 0
54 overview_factor = 1
55 while min(width // overview_factor, height // overview_factor) > minsize:
56 overview_factor *= 2
57 overview_level += 1
58
59 return overview_level
60
61
62 @click.command('overview', short_help="Construct overviews in an existing dataset.")
63 @options.file_in_arg
64 @click.option('--build', callback=build_handler, metavar=u"f1,f2,…|b^min..max|auto",
65 help="A sequence of decimation factors specified as "
66 "comma-separated list of numbers or a base and range of "
67 "exponents, or 'auto' to automatically determine the maximum factor.")
68 @click.option('--ls', help="Print the overviews for each band.",
69 is_flag=True, default=False)
70 @click.option('--rebuild', help="Reconstruct existing overviews.",
71 is_flag=True, default=False)
72 @click.option('--resampling', help="Resampling algorithm.",
73 type=click.Choice(
74 [it.name for it in Resampling if it.value in [0, 1, 2, 3, 4, 5, 6, 7]]),
75 default='nearest', show_default=True)
76 @click.pass_context
77 def overview(ctx, input, build, ls, rebuild, resampling):
78 """Construct overviews in an existing dataset.
79
80 A pyramid of overviews computed once and stored in the dataset can
81 improve performance in some applications.
82
83 The decimation levels at which to build overviews can be specified as
84 a comma separated list
85
86 rio overview --build 2,4,8,16
87
88 or a base and range of exponents
89
90 rio overview --build 2^1..4
91
92 or 'auto' to automatically determine the maximum decimation level at
93 which the smallest overview is smaller than 256 pixels in size.
94
95 rio overview --build auto
96
97 Note that overviews can not currently be removed and are not
98 automatically updated when the dataset's primary bands are
99 modified.
100
101 Information about existing overviews can be printed using the --ls
102 option.
103
104 rio overview --ls
105
106 """
107 with ctx.obj['env']:
108 if ls:
109 with rasterio.open(input, 'r') as dst:
110 resampling_method = dst.tags(
111 ns='rio_overview').get('resampling') or 'unknown'
112
113 click.echo("Overview factors:")
114 for idx in dst.indexes:
115 click.echo(" Band %d: %s (method: '%s')" % (
116 idx, dst.overviews(idx) or 'None', resampling_method))
117 elif rebuild:
118 with rasterio.open(input, 'r+') as dst:
119 # Build the same overviews for all bands.
120 factors = reduce(
121 operator.or_,
122 [set(dst.overviews(i)) for i in dst.indexes])
123
124 # Attempt to recover the resampling method from dataset tags.
125 resampling_method = dst.tags(
126 ns='rio_overview').get('resampling') or resampling
127
128 dst.build_overviews(
129 list(factors), Resampling[resampling_method])
130
131 elif build:
132 with rasterio.open(input, 'r+') as dst:
133 if build == "auto":
134 overview_level = get_maximum_overview_level(dst.width, dst.height)
135 build = [2 ** j for j in range(1, overview_level + 1)]
136 dst.build_overviews(build, Resampling[resampling])
137
138 # Save the resampling method to a tag.
139 dst.update_tags(ns='rio_overview', resampling=resampling)
140
141 else:
142 raise click.UsageError(
143 "Please specify --ls, --rebuild, or --build ...")
144
[end of rasterio/rio/overview.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/rasterio/enums.py b/rasterio/enums.py
--- a/rasterio/enums.py
+++ b/rasterio/enums.py
@@ -60,6 +60,30 @@
rms = 14
+class OverviewResampling(IntEnum):
+ """Available Overview resampling algorithms.
+
+ The first 8, 'nearest', 'bilinear', 'cubic', 'cubic_spline',
+ 'lanczos', 'average', 'mode', and 'gauss', are available for making
+ dataset overviews.
+
+ 'nearest', 'bilinear', 'cubic', 'cubic_spline', 'lanczos',
+ 'average', 'mode' are always available (GDAL >= 1.10).
+
+ 'rms' is only supported in GDAL >= 3.3.
+
+ """
+ nearest = 0
+ bilinear = 1
+ cubic = 2
+ cubic_spline = 3
+ lanczos = 4
+ average = 5
+ mode = 6
+ gauss = 7
+ rms = 14
+
+
class Compression(Enum):
"""Available compression algorithms."""
jpeg = 'JPEG'
diff --git a/rasterio/rio/overview.py b/rasterio/rio/overview.py
--- a/rasterio/rio/overview.py
+++ b/rasterio/rio/overview.py
@@ -9,7 +9,7 @@
from . import options
import rasterio
-from rasterio.enums import Resampling
+from rasterio.enums import OverviewResampling
def build_handler(ctx, param, value):
@@ -70,8 +70,7 @@
@click.option('--rebuild', help="Reconstruct existing overviews.",
is_flag=True, default=False)
@click.option('--resampling', help="Resampling algorithm.",
- type=click.Choice(
- [it.name for it in Resampling if it.value in [0, 1, 2, 3, 4, 5, 6, 7]]),
+ type=click.Choice([it.name for it in OverviewResampling]),
default='nearest', show_default=True)
@click.pass_context
def overview(ctx, input, build, ls, rebuild, resampling):
@@ -126,14 +125,14 @@
ns='rio_overview').get('resampling') or resampling
dst.build_overviews(
- list(factors), Resampling[resampling_method])
+ list(factors), OverviewResampling[resampling_method])
elif build:
with rasterio.open(input, 'r+') as dst:
if build == "auto":
overview_level = get_maximum_overview_level(dst.width, dst.height)
build = [2 ** j for j in range(1, overview_level + 1)]
- dst.build_overviews(build, Resampling[resampling])
+ dst.build_overviews(build, OverviewResampling[resampling])
# Save the resampling method to a tag.
dst.update_tags(ns='rio_overview', resampling=resampling)
|
{"golden_diff": "diff --git a/rasterio/enums.py b/rasterio/enums.py\n--- a/rasterio/enums.py\n+++ b/rasterio/enums.py\n@@ -60,6 +60,30 @@\n rms = 14\n \n \n+class OverviewResampling(IntEnum):\n+ \"\"\"Available Overview resampling algorithms.\n+\n+ The first 8, 'nearest', 'bilinear', 'cubic', 'cubic_spline',\n+ 'lanczos', 'average', 'mode', and 'gauss', are available for making\n+ dataset overviews.\n+\n+ 'nearest', 'bilinear', 'cubic', 'cubic_spline', 'lanczos',\n+ 'average', 'mode' are always available (GDAL >= 1.10).\n+\n+ 'rms' is only supported in GDAL >= 3.3.\n+\n+ \"\"\"\n+ nearest = 0\n+ bilinear = 1\n+ cubic = 2\n+ cubic_spline = 3\n+ lanczos = 4\n+ average = 5\n+ mode = 6\n+ gauss = 7\n+ rms = 14\n+\n+\n class Compression(Enum):\n \"\"\"Available compression algorithms.\"\"\"\n jpeg = 'JPEG'\ndiff --git a/rasterio/rio/overview.py b/rasterio/rio/overview.py\n--- a/rasterio/rio/overview.py\n+++ b/rasterio/rio/overview.py\n@@ -9,7 +9,7 @@\n \n from . import options\n import rasterio\n-from rasterio.enums import Resampling\n+from rasterio.enums import OverviewResampling\n \n \n def build_handler(ctx, param, value):\n@@ -70,8 +70,7 @@\n @click.option('--rebuild', help=\"Reconstruct existing overviews.\",\n is_flag=True, default=False)\n @click.option('--resampling', help=\"Resampling algorithm.\",\n- type=click.Choice(\n- [it.name for it in Resampling if it.value in [0, 1, 2, 3, 4, 5, 6, 7]]),\n+ type=click.Choice([it.name for it in OverviewResampling]),\n default='nearest', show_default=True)\n @click.pass_context\n def overview(ctx, input, build, ls, rebuild, resampling):\n@@ -126,14 +125,14 @@\n ns='rio_overview').get('resampling') or resampling\n \n dst.build_overviews(\n- list(factors), Resampling[resampling_method])\n+ list(factors), OverviewResampling[resampling_method])\n \n elif build:\n with rasterio.open(input, 'r+') as dst:\n if build == \"auto\":\n overview_level = get_maximum_overview_level(dst.width, dst.height)\n build = [2 ** j for j in range(1, overview_level + 1)]\n- dst.build_overviews(build, Resampling[resampling])\n+ dst.build_overviews(build, OverviewResampling[resampling])\n \n # Save the resampling method to a tag.\n dst.update_tags(ns='rio_overview', resampling=resampling)\n", "issue": "Missing `RMS` support in build_overviews\nhttps://github.com/mapbox/rasterio/blob/3c630f8a30d31ceb61aa52ad3ab96f4fc25f2ecd/rasterio/_io.pyx#L1794-L1801\r\n\r\n> @param pszResampling one of \"AVERAGE\", \"AVERAGE_MAGPHASE\", \"RMS\", \"BILINEAR\", \"CUBIC\", \"CUBICSPLINE\", \"GAUSS\", \"LANCZOS\", \"MODE\", \"NEAREST\", \"NONE\"\r\n \r\nhttps://github.com/OSGeo/gdal/blob/3d32de21b79ab7e09024f55a0e6740f00318dd3e/gdal/gcore/gdaldataset.cpp#L1908-L1909\n", "before_files": [{"content": "\"\"\"Enumerations.\"\"\"\n\nfrom enum import Enum, IntEnum\n\n\nclass ColorInterp(IntEnum):\n \"\"\"Raster band color interpretation.\"\"\"\n undefined = 0\n gray = 1\n grey = 1\n palette = 2\n red = 3\n green = 4\n blue = 5\n alpha = 6\n hue = 7\n saturation = 8\n lightness = 9\n cyan = 10\n magenta = 11\n yellow = 12\n black = 13\n Y = 14\n Cb = 15\n Cr = 16\n\n\nclass Resampling(IntEnum):\n \"\"\"Available warp resampling algorithms.\n\n The first 8, 'nearest', 'bilinear', 'cubic', 'cubic_spline',\n 'lanczos', 'average', 'mode', and 'gauss', are available for making\n dataset overviews.\n\n 'max', 'min', 'med', 'q1', 'q3' are only supported in GDAL >= 2.0.0.\n\n 'nearest', 'bilinear', 'cubic', 'cubic_spline', 'lanczos',\n 'average', 'mode' are always available (GDAL >= 1.10).\n\n 'sum' is only supported in GDAL >= 3.1.\n\n 'rms' is only supported in GDAL >= 3.3.\n\n Note: 'gauss' is not available to the functions in rio.warp.\n \"\"\"\n nearest = 0\n bilinear = 1\n cubic = 2\n cubic_spline = 3\n lanczos = 4\n average = 5\n mode = 6\n gauss = 7\n max = 8\n min = 9\n med = 10\n q1 = 11\n q3 = 12\n sum = 13\n rms = 14\n\n\nclass Compression(Enum):\n \"\"\"Available compression algorithms.\"\"\"\n jpeg = 'JPEG'\n lzw = 'LZW'\n packbits = 'PACKBITS'\n deflate = 'DEFLATE'\n ccittrle = 'CCITTRLE'\n ccittfax3 = 'CCITTFAX3'\n ccittfax4 = 'CCITTFAX4'\n lzma = 'LZMA'\n none = 'NONE'\n zstd = 'ZSTD'\n lerc = 'LERC'\n webp = 'WEBP'\n jpeg2000 = 'JPEG2000'\n\n\nclass Interleaving(Enum):\n pixel = 'PIXEL'\n line = 'LINE'\n band = 'BAND'\n\n\nclass MaskFlags(IntEnum):\n all_valid = 1\n per_dataset = 2\n alpha = 4\n nodata = 8\n\n\nclass PhotometricInterp(Enum):\n black = 'MINISBLACK'\n white = 'MINISWHITE'\n rgb = 'RGB'\n cmyk = 'CMYK'\n ycbcr = 'YCbCr'\n cielab = 'CIELAB'\n icclab = 'ICCLAB'\n itulab = 'ITULAB'\n\nclass MergeAlg(Enum):\n \"\"\"Available rasterization algorithms\"\"\"\n replace = 'REPLACE'\n add = 'ADD'\n", "path": "rasterio/enums.py"}, {"content": "# coding: utf-8\n\"\"\"Manage overviews of a dataset.\"\"\"\n\nfrom functools import reduce\nimport logging\nimport operator\n\nimport click\n\nfrom . import options\nimport rasterio\nfrom rasterio.enums import Resampling\n\n\ndef build_handler(ctx, param, value):\n if value:\n try:\n if '^' in value:\n base, exp_range = value.split('^')\n exp_min, exp_max = (int(v) for v in exp_range.split('..'))\n value = [pow(int(base), k) for k in range(exp_min, exp_max + 1)]\n elif ',' in value:\n value = [int(v) for v in value.split(',')]\n elif value == \"auto\":\n pass\n else:\n raise Exception\n except Exception:\n raise click.BadParameter(u\"must match 'n,n,n,\u2026', 'n^n..n', or 'auto'.\")\n return value\n\n\ndef get_maximum_overview_level(width, height, minsize=256):\n \"\"\"\n Calculate the maximum overview level of a dataset at which\n the smallest overview is smaller than `minsize`.\n\n Attributes\n ----------\n width : int\n Width of the dataset.\n height : int\n Height of the dataset.\n minsize : int (default: 256)\n Minimum overview size.\n\n Returns\n -------\n overview_level: int\n overview level.\n\n \"\"\"\n overview_level = 0\n overview_factor = 1\n while min(width // overview_factor, height // overview_factor) > minsize:\n overview_factor *= 2\n overview_level += 1\n\n return overview_level\n\n\[email protected]('overview', short_help=\"Construct overviews in an existing dataset.\")\[email protected]_in_arg\[email protected]('--build', callback=build_handler, metavar=u\"f1,f2,\u2026|b^min..max|auto\",\n help=\"A sequence of decimation factors specified as \"\n \"comma-separated list of numbers or a base and range of \"\n \"exponents, or 'auto' to automatically determine the maximum factor.\")\[email protected]('--ls', help=\"Print the overviews for each band.\",\n is_flag=True, default=False)\[email protected]('--rebuild', help=\"Reconstruct existing overviews.\",\n is_flag=True, default=False)\[email protected]('--resampling', help=\"Resampling algorithm.\",\n type=click.Choice(\n [it.name for it in Resampling if it.value in [0, 1, 2, 3, 4, 5, 6, 7]]),\n default='nearest', show_default=True)\[email protected]_context\ndef overview(ctx, input, build, ls, rebuild, resampling):\n \"\"\"Construct overviews in an existing dataset.\n\n A pyramid of overviews computed once and stored in the dataset can\n improve performance in some applications.\n\n The decimation levels at which to build overviews can be specified as\n a comma separated list\n\n rio overview --build 2,4,8,16\n\n or a base and range of exponents\n\n rio overview --build 2^1..4\n\n or 'auto' to automatically determine the maximum decimation level at\n which the smallest overview is smaller than 256 pixels in size.\n\n rio overview --build auto\n\n Note that overviews can not currently be removed and are not\n automatically updated when the dataset's primary bands are\n modified.\n\n Information about existing overviews can be printed using the --ls\n option.\n\n rio overview --ls\n\n \"\"\"\n with ctx.obj['env']:\n if ls:\n with rasterio.open(input, 'r') as dst:\n resampling_method = dst.tags(\n ns='rio_overview').get('resampling') or 'unknown'\n\n click.echo(\"Overview factors:\")\n for idx in dst.indexes:\n click.echo(\" Band %d: %s (method: '%s')\" % (\n idx, dst.overviews(idx) or 'None', resampling_method))\n elif rebuild:\n with rasterio.open(input, 'r+') as dst:\n # Build the same overviews for all bands.\n factors = reduce(\n operator.or_,\n [set(dst.overviews(i)) for i in dst.indexes])\n\n # Attempt to recover the resampling method from dataset tags.\n resampling_method = dst.tags(\n ns='rio_overview').get('resampling') or resampling\n\n dst.build_overviews(\n list(factors), Resampling[resampling_method])\n\n elif build:\n with rasterio.open(input, 'r+') as dst:\n if build == \"auto\":\n overview_level = get_maximum_overview_level(dst.width, dst.height)\n build = [2 ** j for j in range(1, overview_level + 1)]\n dst.build_overviews(build, Resampling[resampling])\n\n # Save the resampling method to a tag.\n dst.update_tags(ns='rio_overview', resampling=resampling)\n\n else:\n raise click.UsageError(\n \"Please specify --ls, --rebuild, or --build ...\")\n", "path": "rasterio/rio/overview.py"}]}
| 3,159 | 692 |
gh_patches_debug_16372
|
rasdani/github-patches
|
git_diff
|
sanic-org__sanic-2595
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Auto reloader consumes full CPU core
**Describe the bug**
Enabling the auto reloader (`auto_reload=True` or `--auto-reload`) will consume a full CPU core, because it is looping over all files in all loaded modules and `stat`-ing every file. 100% usage on a single core is observed on macOS and Linux.
The problematic loop is here:
https://github.com/sanic-org/sanic/blob/f891995b487f01ff1207afcd241ae359725a8e3c/sanic/worker/reloader.py#L46-L64
**Code snippet**
Run the following minimal example with `python3 test.py` or `sanic test:app --auto-reload` and watch CPU usage
```python
from sanic import Sanic, text
app = Sanic("test")
@app.route("/")
def hello_world(request):
return text("hello, world")
if __name__ == "__main__":
app.run(auto_reload=True)
```
**Expected behavior**
The reloader should not consume a full CPU core, but should watch the file system for changes more intelligently, using platform-specific methods (inotify, FSEvents, kqueue, etc). See the [watchdog](https://github.com/gorakhargosh/watchdog/) project for inspiration. Maybe `watchdog` could be included as an optional dependency and used if available?
For instance, [another popular framework has implemented two reloader loops](https://github.com/pallets/werkzeug/blob/2.2.2/src/werkzeug/_reloader.py) and will select the `watchdog` loop if the package is available.
Alternatively, simply add a short sleep step to the reloader loop. I think a one second delay in reloading is acceptable.
**Environment (please complete the following information):**
- OS: macOS 12.6, Ubuntu Linux 22.04 (likely also Windows, but not tested)
- Sanic Version: 22.9.0
</issue>
<code>
[start of sanic/worker/reloader.py]
1 from __future__ import annotations
2
3 import os
4 import sys
5
6 from asyncio import new_event_loop
7 from itertools import chain
8 from multiprocessing.connection import Connection
9 from pathlib import Path
10 from signal import SIGINT, SIGTERM
11 from signal import signal as signal_func
12 from typing import Dict, Set
13
14 from sanic.server.events import trigger_events
15 from sanic.worker.loader import AppLoader
16
17
18 class Reloader:
19 def __init__(
20 self,
21 publisher: Connection,
22 interval: float,
23 reload_dirs: Set[Path],
24 app_loader: AppLoader,
25 ):
26 self._publisher = publisher
27 self.interval = interval
28 self.reload_dirs = reload_dirs
29 self.run = True
30 self.app_loader = app_loader
31
32 def __call__(self) -> None:
33 app = self.app_loader.load()
34 signal_func(SIGINT, self.stop)
35 signal_func(SIGTERM, self.stop)
36 mtimes: Dict[str, float] = {}
37
38 reloader_start = app.listeners.get("reload_process_start")
39 reloader_stop = app.listeners.get("reload_process_stop")
40 before_trigger = app.listeners.get("before_reload_trigger")
41 after_trigger = app.listeners.get("after_reload_trigger")
42 loop = new_event_loop()
43 if reloader_start:
44 trigger_events(reloader_start, loop, app)
45
46 while self.run:
47 changed = set()
48 for filename in self.files():
49 try:
50 if self.check_file(filename, mtimes):
51 path = (
52 filename
53 if isinstance(filename, str)
54 else filename.resolve()
55 )
56 changed.add(str(path))
57 except OSError:
58 continue
59 if changed:
60 if before_trigger:
61 trigger_events(before_trigger, loop, app)
62 self.reload(",".join(changed) if changed else "unknown")
63 if after_trigger:
64 trigger_events(after_trigger, loop, app)
65 else:
66 if reloader_stop:
67 trigger_events(reloader_stop, loop, app)
68
69 def stop(self, *_):
70 self.run = False
71
72 def reload(self, reloaded_files):
73 message = f"__ALL_PROCESSES__:{reloaded_files}"
74 self._publisher.send(message)
75
76 def files(self):
77 return chain(
78 self.python_files(),
79 *(d.glob("**/*") for d in self.reload_dirs),
80 )
81
82 def python_files(self): # no cov
83 """This iterates over all relevant Python files.
84
85 It goes through all
86 loaded files from modules, all files in folders of already loaded
87 modules as well as all files reachable through a package.
88 """
89 # The list call is necessary on Python 3 in case the module
90 # dictionary modifies during iteration.
91 for module in list(sys.modules.values()):
92 if module is None:
93 continue
94 filename = getattr(module, "__file__", None)
95 if filename:
96 old = None
97 while not os.path.isfile(filename):
98 old = filename
99 filename = os.path.dirname(filename)
100 if filename == old:
101 break
102 else:
103 if filename[-4:] in (".pyc", ".pyo"):
104 filename = filename[:-1]
105 yield filename
106
107 @staticmethod
108 def check_file(filename, mtimes) -> bool:
109 need_reload = False
110
111 mtime = os.stat(filename).st_mtime
112 old_time = mtimes.get(filename)
113 if old_time is None:
114 mtimes[filename] = mtime
115 elif mtime > old_time:
116 mtimes[filename] = mtime
117 need_reload = True
118
119 return need_reload
120
[end of sanic/worker/reloader.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sanic/worker/reloader.py b/sanic/worker/reloader.py
--- a/sanic/worker/reloader.py
+++ b/sanic/worker/reloader.py
@@ -9,6 +9,7 @@
from pathlib import Path
from signal import SIGINT, SIGTERM
from signal import signal as signal_func
+from time import sleep
from typing import Dict, Set
from sanic.server.events import trigger_events
@@ -62,6 +63,7 @@
self.reload(",".join(changed) if changed else "unknown")
if after_trigger:
trigger_events(after_trigger, loop, app)
+ sleep(self.interval)
else:
if reloader_stop:
trigger_events(reloader_stop, loop, app)
|
{"golden_diff": "diff --git a/sanic/worker/reloader.py b/sanic/worker/reloader.py\n--- a/sanic/worker/reloader.py\n+++ b/sanic/worker/reloader.py\n@@ -9,6 +9,7 @@\n from pathlib import Path\n from signal import SIGINT, SIGTERM\n from signal import signal as signal_func\n+from time import sleep\n from typing import Dict, Set\n \n from sanic.server.events import trigger_events\n@@ -62,6 +63,7 @@\n self.reload(\",\".join(changed) if changed else \"unknown\")\n if after_trigger:\n trigger_events(after_trigger, loop, app)\n+ sleep(self.interval)\n else:\n if reloader_stop:\n trigger_events(reloader_stop, loop, app)\n", "issue": "Auto reloader consumes full CPU core\n**Describe the bug**\r\n\r\nEnabling the auto reloader (`auto_reload=True` or `--auto-reload`) will consume a full CPU core, because it is looping over all files in all loaded modules and `stat`-ing every file. 100% usage on a single core is observed on macOS and Linux.\r\n\r\nThe problematic loop is here:\r\n\r\nhttps://github.com/sanic-org/sanic/blob/f891995b487f01ff1207afcd241ae359725a8e3c/sanic/worker/reloader.py#L46-L64\r\n\r\n**Code snippet**\r\n\r\nRun the following minimal example with `python3 test.py` or `sanic test:app --auto-reload` and watch CPU usage\r\n\r\n```python\r\nfrom sanic import Sanic, text\r\n\r\napp = Sanic(\"test\")\r\n\r\n\r\[email protected](\"/\")\r\ndef hello_world(request):\r\n return text(\"hello, world\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app.run(auto_reload=True)\r\n```\r\n\r\n\r\n**Expected behavior**\r\n\r\nThe reloader should not consume a full CPU core, but should watch the file system for changes more intelligently, using platform-specific methods (inotify, FSEvents, kqueue, etc). See the [watchdog](https://github.com/gorakhargosh/watchdog/) project for inspiration. Maybe `watchdog` could be included as an optional dependency and used if available?\r\n\r\nFor instance, [another popular framework has implemented two reloader loops](https://github.com/pallets/werkzeug/blob/2.2.2/src/werkzeug/_reloader.py) and will select the `watchdog` loop if the package is available.\r\n\r\nAlternatively, simply add a short sleep step to the reloader loop. I think a one second delay in reloading is acceptable.\r\n\r\n**Environment (please complete the following information):**\r\n - OS: macOS 12.6, Ubuntu Linux 22.04 (likely also Windows, but not tested)\r\n - Sanic Version: 22.9.0\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport os\nimport sys\n\nfrom asyncio import new_event_loop\nfrom itertools import chain\nfrom multiprocessing.connection import Connection\nfrom pathlib import Path\nfrom signal import SIGINT, SIGTERM\nfrom signal import signal as signal_func\nfrom typing import Dict, Set\n\nfrom sanic.server.events import trigger_events\nfrom sanic.worker.loader import AppLoader\n\n\nclass Reloader:\n def __init__(\n self,\n publisher: Connection,\n interval: float,\n reload_dirs: Set[Path],\n app_loader: AppLoader,\n ):\n self._publisher = publisher\n self.interval = interval\n self.reload_dirs = reload_dirs\n self.run = True\n self.app_loader = app_loader\n\n def __call__(self) -> None:\n app = self.app_loader.load()\n signal_func(SIGINT, self.stop)\n signal_func(SIGTERM, self.stop)\n mtimes: Dict[str, float] = {}\n\n reloader_start = app.listeners.get(\"reload_process_start\")\n reloader_stop = app.listeners.get(\"reload_process_stop\")\n before_trigger = app.listeners.get(\"before_reload_trigger\")\n after_trigger = app.listeners.get(\"after_reload_trigger\")\n loop = new_event_loop()\n if reloader_start:\n trigger_events(reloader_start, loop, app)\n\n while self.run:\n changed = set()\n for filename in self.files():\n try:\n if self.check_file(filename, mtimes):\n path = (\n filename\n if isinstance(filename, str)\n else filename.resolve()\n )\n changed.add(str(path))\n except OSError:\n continue\n if changed:\n if before_trigger:\n trigger_events(before_trigger, loop, app)\n self.reload(\",\".join(changed) if changed else \"unknown\")\n if after_trigger:\n trigger_events(after_trigger, loop, app)\n else:\n if reloader_stop:\n trigger_events(reloader_stop, loop, app)\n\n def stop(self, *_):\n self.run = False\n\n def reload(self, reloaded_files):\n message = f\"__ALL_PROCESSES__:{reloaded_files}\"\n self._publisher.send(message)\n\n def files(self):\n return chain(\n self.python_files(),\n *(d.glob(\"**/*\") for d in self.reload_dirs),\n )\n\n def python_files(self): # no cov\n \"\"\"This iterates over all relevant Python files.\n\n It goes through all\n loaded files from modules, all files in folders of already loaded\n modules as well as all files reachable through a package.\n \"\"\"\n # The list call is necessary on Python 3 in case the module\n # dictionary modifies during iteration.\n for module in list(sys.modules.values()):\n if module is None:\n continue\n filename = getattr(module, \"__file__\", None)\n if filename:\n old = None\n while not os.path.isfile(filename):\n old = filename\n filename = os.path.dirname(filename)\n if filename == old:\n break\n else:\n if filename[-4:] in (\".pyc\", \".pyo\"):\n filename = filename[:-1]\n yield filename\n\n @staticmethod\n def check_file(filename, mtimes) -> bool:\n need_reload = False\n\n mtime = os.stat(filename).st_mtime\n old_time = mtimes.get(filename)\n if old_time is None:\n mtimes[filename] = mtime\n elif mtime > old_time:\n mtimes[filename] = mtime\n need_reload = True\n\n return need_reload\n", "path": "sanic/worker/reloader.py"}]}
| 2,003 | 162 |
gh_patches_debug_32203
|
rasdani/github-patches
|
git_diff
|
zulip__zulip-13479
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Don't generate useless local_database_password secret in production
The `local_database_password` secret is only used in the Zulip development environment, and it's a bug that `generate_secrets.py` generates it in production. We should ensure it doesn't get generated there for new installations, since it's confusing.
It might also be helpful to make `generate_secrets.py` remove or comment it on upgrade, though I suppose that carries some risk if the user has written their own code to access that setting.
@andersk @mateuszmandera FYI
</issue>
<code>
[start of zproject/config.py]
1 import os
2 from typing import Optional, overload
3 import configparser
4
5 DEPLOY_ROOT = os.path.realpath(os.path.dirname(os.path.dirname(__file__)))
6
7 config_file = configparser.RawConfigParser()
8 config_file.read("/etc/zulip/zulip.conf")
9
10 # Whether this instance of Zulip is running in a production environment.
11 PRODUCTION = config_file.has_option('machine', 'deploy_type')
12 DEVELOPMENT = not PRODUCTION
13
14 secrets_file = configparser.RawConfigParser()
15 if PRODUCTION:
16 secrets_file.read("/etc/zulip/zulip-secrets.conf")
17 else:
18 secrets_file.read(os.path.join(DEPLOY_ROOT, "zproject/dev-secrets.conf"))
19
20 @overload
21 def get_secret(key: str, default_value: str, development_only: bool=False) -> str:
22 ...
23 @overload
24 def get_secret(key: str, default_value: Optional[str]=None,
25 development_only: bool=False) -> Optional[str]:
26 ...
27 def get_secret(key: str, default_value: Optional[str]=None,
28 development_only: bool=False) -> Optional[str]:
29 if development_only and PRODUCTION:
30 return default_value
31 if secrets_file.has_option('secrets', key):
32 return secrets_file.get('secrets', key)
33 return default_value
34
35 @overload
36 def get_config(section: str, key: str, default_value: str) -> str:
37 ...
38 @overload
39 def get_config(section: str, key: str, default_value: Optional[str]=None) -> Optional[str]:
40 ...
41 def get_config(section: str, key: str, default_value: Optional[str]=None) -> Optional[str]:
42 if config_file.has_option(section, key):
43 return config_file.get(section, key)
44 return default_value
45
46 def get_from_file_if_exists(path: str) -> str:
47 if os.path.exists(path):
48 with open(path, "r") as f:
49 return f.read()
50 else:
51 return ''
52
[end of zproject/config.py]
[start of scripts/setup/generate_secrets.py]
1 #!/usr/bin/env python3
2 # This tools generates /etc/zulip/zulip-secrets.conf
3
4 import sys
5 import os
6
7 from typing import Dict, List
8
9 BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
10 sys.path.append(BASE_DIR)
11 import scripts.lib.setup_path_on_import
12
13 os.environ['DJANGO_SETTINGS_MODULE'] = 'zproject.settings'
14
15 from django.utils.crypto import get_random_string
16 import argparse
17 import uuid
18 import configparser
19 from zerver.lib.utils import generate_random_token
20
21 os.chdir(os.path.join(os.path.dirname(__file__), '..', '..'))
22
23 CAMO_CONFIG_FILENAME = '/etc/default/camo'
24
25 # Standard, 64-bit tokens
26 AUTOGENERATED_SETTINGS = [
27 'avatar_salt',
28 'initial_password_salt',
29 'local_database_password',
30 'rabbitmq_password',
31 'shared_secret',
32 'thumbor_key',
33 ]
34
35 # TODO: We can eliminate this function if we refactor the install
36 # script to run generate_secrets before zulip-puppet-apply.
37 def generate_camo_config_file(camo_key):
38 # type: (str) -> None
39 camo_config = """ENABLED=yes
40 PORT=9292
41 CAMO_KEY=%s
42 """ % (camo_key,)
43 with open(CAMO_CONFIG_FILENAME, 'w') as camo_file:
44 camo_file.write(camo_config)
45 print("Generated Camo config file %s" % (CAMO_CONFIG_FILENAME,))
46
47 def generate_django_secretkey():
48 # type: () -> str
49 """Secret key generation taken from Django's startproject.py"""
50 chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
51 return get_random_string(50, chars)
52
53 def get_old_conf(output_filename):
54 # type: (str) -> Dict[str, str]
55 if not os.path.exists(output_filename) or os.path.getsize(output_filename) == 0:
56 return {}
57
58 secrets_file = configparser.RawConfigParser()
59 secrets_file.read(output_filename)
60
61 return dict(secrets_file.items("secrets"))
62
63 def generate_secrets(development=False):
64 # type: (bool) -> None
65 if development:
66 OUTPUT_SETTINGS_FILENAME = "zproject/dev-secrets.conf"
67 else:
68 OUTPUT_SETTINGS_FILENAME = "/etc/zulip/zulip-secrets.conf"
69 current_conf = get_old_conf(OUTPUT_SETTINGS_FILENAME)
70
71 lines = [] # type: List[str]
72 if len(current_conf) == 0:
73 lines = ['[secrets]\n']
74
75 def need_secret(name):
76 # type: (str) -> bool
77 return name not in current_conf
78
79 def add_secret(name, value):
80 # type: (str, str) -> None
81 lines.append("%s = %s\n" % (name, value))
82 current_conf[name] = value
83
84 for name in AUTOGENERATED_SETTINGS:
85 if need_secret(name):
86 add_secret(name, generate_random_token(64))
87
88 if need_secret('secret_key'):
89 add_secret('secret_key', generate_django_secretkey())
90
91 if need_secret('camo_key'):
92 add_secret('camo_key', get_random_string(64))
93
94 # zulip_org_key is generated using os.urandom().
95 # zulip_org_id does not require a secure CPRNG,
96 # it only needs to be unique.
97 if need_secret('zulip_org_key'):
98 add_secret('zulip_org_key', get_random_string(64))
99 if need_secret('zulip_org_id'):
100 add_secret('zulip_org_id', str(uuid.uuid4()))
101
102 if not development:
103 # Write the Camo config file directly
104 generate_camo_config_file(current_conf['camo_key'])
105
106 if len(lines) == 0:
107 print("generate_secrets: No new secrets to generate.")
108 return
109
110 with open(OUTPUT_SETTINGS_FILENAME, 'a') as f:
111 # Write a newline at the start, in case there was no newline at
112 # the end of the file due to human editing.
113 f.write("\n" + "".join(lines))
114
115 print("Generated new secrets in %s." % (OUTPUT_SETTINGS_FILENAME,))
116
117 if __name__ == '__main__':
118
119 parser = argparse.ArgumentParser()
120 group = parser.add_mutually_exclusive_group(required=True)
121 group.add_argument('--development', action='store_true', dest='development',
122 help='For setting up the developer env for zulip')
123 group.add_argument('--production', action='store_false', dest='development',
124 help='For setting up the production env for zulip')
125 results = parser.parse_args()
126
127 generate_secrets(results.development)
128
[end of scripts/setup/generate_secrets.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/scripts/setup/generate_secrets.py b/scripts/setup/generate_secrets.py
--- a/scripts/setup/generate_secrets.py
+++ b/scripts/setup/generate_secrets.py
@@ -26,7 +26,6 @@
AUTOGENERATED_SETTINGS = [
'avatar_salt',
'initial_password_salt',
- 'local_database_password',
'rabbitmq_password',
'shared_secret',
'thumbor_key',
@@ -85,6 +84,9 @@
if need_secret(name):
add_secret(name, generate_random_token(64))
+ if development and need_secret("local_database_password"):
+ add_secret("local_database_password", generate_random_token(64))
+
if need_secret('secret_key'):
add_secret('secret_key', generate_django_secretkey())
diff --git a/zproject/config.py b/zproject/config.py
--- a/zproject/config.py
+++ b/zproject/config.py
@@ -28,9 +28,7 @@
development_only: bool=False) -> Optional[str]:
if development_only and PRODUCTION:
return default_value
- if secrets_file.has_option('secrets', key):
- return secrets_file.get('secrets', key)
- return default_value
+ return secrets_file.get('secrets', key, fallback=default_value)
@overload
def get_config(section: str, key: str, default_value: str) -> str:
@@ -39,9 +37,7 @@
def get_config(section: str, key: str, default_value: Optional[str]=None) -> Optional[str]:
...
def get_config(section: str, key: str, default_value: Optional[str]=None) -> Optional[str]:
- if config_file.has_option(section, key):
- return config_file.get(section, key)
- return default_value
+ return config_file.get(section, key, fallback=default_value)
def get_from_file_if_exists(path: str) -> str:
if os.path.exists(path):
|
{"golden_diff": "diff --git a/scripts/setup/generate_secrets.py b/scripts/setup/generate_secrets.py\n--- a/scripts/setup/generate_secrets.py\n+++ b/scripts/setup/generate_secrets.py\n@@ -26,7 +26,6 @@\n AUTOGENERATED_SETTINGS = [\n 'avatar_salt',\n 'initial_password_salt',\n- 'local_database_password',\n 'rabbitmq_password',\n 'shared_secret',\n 'thumbor_key',\n@@ -85,6 +84,9 @@\n if need_secret(name):\n add_secret(name, generate_random_token(64))\n \n+ if development and need_secret(\"local_database_password\"):\n+ add_secret(\"local_database_password\", generate_random_token(64))\n+\n if need_secret('secret_key'):\n add_secret('secret_key', generate_django_secretkey())\n \ndiff --git a/zproject/config.py b/zproject/config.py\n--- a/zproject/config.py\n+++ b/zproject/config.py\n@@ -28,9 +28,7 @@\n development_only: bool=False) -> Optional[str]:\n if development_only and PRODUCTION:\n return default_value\n- if secrets_file.has_option('secrets', key):\n- return secrets_file.get('secrets', key)\n- return default_value\n+ return secrets_file.get('secrets', key, fallback=default_value)\n \n @overload\n def get_config(section: str, key: str, default_value: str) -> str:\n@@ -39,9 +37,7 @@\n def get_config(section: str, key: str, default_value: Optional[str]=None) -> Optional[str]:\n ...\n def get_config(section: str, key: str, default_value: Optional[str]=None) -> Optional[str]:\n- if config_file.has_option(section, key):\n- return config_file.get(section, key)\n- return default_value\n+ return config_file.get(section, key, fallback=default_value)\n \n def get_from_file_if_exists(path: str) -> str:\n if os.path.exists(path):\n", "issue": "Don't generate useless local_database_password secret in production\nThe `local_database_password` secret is only used in the Zulip development environment, and it's a bug that `generate_secrets.py` generates it in production. We should ensure it doesn't get generated there for new installations, since it's confusing. \r\n\r\nIt might also be helpful to make `generate_secrets.py` remove or comment it on upgrade, though I suppose that carries some risk if the user has written their own code to access that setting. \r\n\r\n@andersk @mateuszmandera FYI\n", "before_files": [{"content": "import os\nfrom typing import Optional, overload\nimport configparser\n\nDEPLOY_ROOT = os.path.realpath(os.path.dirname(os.path.dirname(__file__)))\n\nconfig_file = configparser.RawConfigParser()\nconfig_file.read(\"/etc/zulip/zulip.conf\")\n\n# Whether this instance of Zulip is running in a production environment.\nPRODUCTION = config_file.has_option('machine', 'deploy_type')\nDEVELOPMENT = not PRODUCTION\n\nsecrets_file = configparser.RawConfigParser()\nif PRODUCTION:\n secrets_file.read(\"/etc/zulip/zulip-secrets.conf\")\nelse:\n secrets_file.read(os.path.join(DEPLOY_ROOT, \"zproject/dev-secrets.conf\"))\n\n@overload\ndef get_secret(key: str, default_value: str, development_only: bool=False) -> str:\n ...\n@overload\ndef get_secret(key: str, default_value: Optional[str]=None,\n development_only: bool=False) -> Optional[str]:\n ...\ndef get_secret(key: str, default_value: Optional[str]=None,\n development_only: bool=False) -> Optional[str]:\n if development_only and PRODUCTION:\n return default_value\n if secrets_file.has_option('secrets', key):\n return secrets_file.get('secrets', key)\n return default_value\n\n@overload\ndef get_config(section: str, key: str, default_value: str) -> str:\n ...\n@overload\ndef get_config(section: str, key: str, default_value: Optional[str]=None) -> Optional[str]:\n ...\ndef get_config(section: str, key: str, default_value: Optional[str]=None) -> Optional[str]:\n if config_file.has_option(section, key):\n return config_file.get(section, key)\n return default_value\n\ndef get_from_file_if_exists(path: str) -> str:\n if os.path.exists(path):\n with open(path, \"r\") as f:\n return f.read()\n else:\n return ''\n", "path": "zproject/config.py"}, {"content": "#!/usr/bin/env python3\n# This tools generates /etc/zulip/zulip-secrets.conf\n\nimport sys\nimport os\n\nfrom typing import Dict, List\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nsys.path.append(BASE_DIR)\nimport scripts.lib.setup_path_on_import\n\nos.environ['DJANGO_SETTINGS_MODULE'] = 'zproject.settings'\n\nfrom django.utils.crypto import get_random_string\nimport argparse\nimport uuid\nimport configparser\nfrom zerver.lib.utils import generate_random_token\n\nos.chdir(os.path.join(os.path.dirname(__file__), '..', '..'))\n\nCAMO_CONFIG_FILENAME = '/etc/default/camo'\n\n# Standard, 64-bit tokens\nAUTOGENERATED_SETTINGS = [\n 'avatar_salt',\n 'initial_password_salt',\n 'local_database_password',\n 'rabbitmq_password',\n 'shared_secret',\n 'thumbor_key',\n]\n\n# TODO: We can eliminate this function if we refactor the install\n# script to run generate_secrets before zulip-puppet-apply.\ndef generate_camo_config_file(camo_key):\n # type: (str) -> None\n camo_config = \"\"\"ENABLED=yes\nPORT=9292\nCAMO_KEY=%s\n\"\"\" % (camo_key,)\n with open(CAMO_CONFIG_FILENAME, 'w') as camo_file:\n camo_file.write(camo_config)\n print(\"Generated Camo config file %s\" % (CAMO_CONFIG_FILENAME,))\n\ndef generate_django_secretkey():\n # type: () -> str\n \"\"\"Secret key generation taken from Django's startproject.py\"\"\"\n chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'\n return get_random_string(50, chars)\n\ndef get_old_conf(output_filename):\n # type: (str) -> Dict[str, str]\n if not os.path.exists(output_filename) or os.path.getsize(output_filename) == 0:\n return {}\n\n secrets_file = configparser.RawConfigParser()\n secrets_file.read(output_filename)\n\n return dict(secrets_file.items(\"secrets\"))\n\ndef generate_secrets(development=False):\n # type: (bool) -> None\n if development:\n OUTPUT_SETTINGS_FILENAME = \"zproject/dev-secrets.conf\"\n else:\n OUTPUT_SETTINGS_FILENAME = \"/etc/zulip/zulip-secrets.conf\"\n current_conf = get_old_conf(OUTPUT_SETTINGS_FILENAME)\n\n lines = [] # type: List[str]\n if len(current_conf) == 0:\n lines = ['[secrets]\\n']\n\n def need_secret(name):\n # type: (str) -> bool\n return name not in current_conf\n\n def add_secret(name, value):\n # type: (str, str) -> None\n lines.append(\"%s = %s\\n\" % (name, value))\n current_conf[name] = value\n\n for name in AUTOGENERATED_SETTINGS:\n if need_secret(name):\n add_secret(name, generate_random_token(64))\n\n if need_secret('secret_key'):\n add_secret('secret_key', generate_django_secretkey())\n\n if need_secret('camo_key'):\n add_secret('camo_key', get_random_string(64))\n\n # zulip_org_key is generated using os.urandom().\n # zulip_org_id does not require a secure CPRNG,\n # it only needs to be unique.\n if need_secret('zulip_org_key'):\n add_secret('zulip_org_key', get_random_string(64))\n if need_secret('zulip_org_id'):\n add_secret('zulip_org_id', str(uuid.uuid4()))\n\n if not development:\n # Write the Camo config file directly\n generate_camo_config_file(current_conf['camo_key'])\n\n if len(lines) == 0:\n print(\"generate_secrets: No new secrets to generate.\")\n return\n\n with open(OUTPUT_SETTINGS_FILENAME, 'a') as f:\n # Write a newline at the start, in case there was no newline at\n # the end of the file due to human editing.\n f.write(\"\\n\" + \"\".join(lines))\n\n print(\"Generated new secrets in %s.\" % (OUTPUT_SETTINGS_FILENAME,))\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n group = parser.add_mutually_exclusive_group(required=True)\n group.add_argument('--development', action='store_true', dest='development',\n help='For setting up the developer env for zulip')\n group.add_argument('--production', action='store_false', dest='development',\n help='For setting up the production env for zulip')\n results = parser.parse_args()\n\n generate_secrets(results.development)\n", "path": "scripts/setup/generate_secrets.py"}]}
| 2,483 | 432 |
gh_patches_debug_35613
|
rasdani/github-patches
|
git_diff
|
deepset-ai__haystack-6497
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Serialisation of the PromptModel is broken if the invocation_layer_class param is set, both from and to yaml
**Describe the bug**
A clear and concise description of what the bug is.
I am using a pipeline that uses a `PromptModel` with an `invocation_layer_class` specified, to deploy Haystack API via docker.
On executing docker-compose up this will result in the error described below.
yml file:
name: PromptModel
params:
api_key: EMPTY
invocation_layer_class: !!python/name:haystack.nodes.prompt.invocation_layer.open_ai.OpenAIInvocationLayer ''
max_length: 1024
model_kwargs:
api_base: http://localhost:8100/v1
maximum_context_length: 2048
type: PromptModel
**Error message**
Error that was thrown (if available)
`yaml.constructor.ConstructorError: could not determine a constructor for the tag 'tag:yaml.org,2002:python/name:haystack.nodes.prompt.invocation_layer.open_ai.OpenAIInvocationLayer'`
**Expected behavior**
A clear and concise description of what you expected to happen.
I expected the pipleline to load correctly.
**Additional context**
Add any other context about the problem here, like document types / preprocessing steps / settings of reader etc.
The issue has been replicated by @anakin87
**To Reproduce**
Steps to reproduce the behavior
1. Serialize a pipeline with a `PromptModel ` with `invocation_layer_class` param, to yaml
2. Load the pipeline in the haystack api docker container
**FAQ Check**
- [x] Have you had a look at [our new FAQ page](https://docs.haystack.deepset.ai/docs/faq)?
**System:**
- OS: Windows11 + WSL2
- GPU/CPU: GPU
- Haystack version (commit or version number): 1.22.1
- DocumentStore: WeaviateDocumentStore
- Reader: -
- Retriever: EmbeddingRetriever
</issue>
<code>
[start of haystack/nodes/prompt/prompt_model.py]
1 import inspect
2 import logging
3 from typing import Any, Dict, List, Optional, Tuple, Type, Union, overload
4
5 from haystack.nodes.base import BaseComponent
6 from haystack.nodes.prompt.invocation_layer import PromptModelInvocationLayer
7 from haystack.schema import Document, MultiLabel
8 from haystack.lazy_imports import LazyImport
9
10 with LazyImport(message="Run 'pip install farm-haystack[inference]'") as torch_import:
11 import torch
12
13
14 logger = logging.getLogger(__name__)
15
16
17 class PromptModel(BaseComponent):
18 """
19 The PromptModel class is a component that uses a pre-trained model to perform tasks defined in a prompt. Out of
20 the box, it supports model invocation layers for:
21 - Hugging Face transformers (all text2text-generation and text-generation models)
22 - OpenAI InstructGPT models
23 - Azure OpenAI InstructGPT models
24
25 Although it's possible to use PromptModel to make prompt invocations on the underlying model, use
26 PromptNode to interact with the model. PromptModel instances are a way for multiple
27 PromptNode instances to use a single PromptNode, and thus save computational resources.
28
29 For more details, refer to [PromptModels](https://docs.haystack.deepset.ai/docs/prompt_node#models).
30 """
31
32 outgoing_edges = 1
33
34 def __init__(
35 self,
36 model_name_or_path: str = "google/flan-t5-base",
37 max_length: Optional[int] = 100,
38 api_key: Optional[str] = None,
39 timeout: Optional[float] = None,
40 use_auth_token: Optional[Union[str, bool]] = None,
41 use_gpu: Optional[bool] = None,
42 devices: Optional[List[Union[str, "torch.device"]]] = None,
43 invocation_layer_class: Optional[Type[PromptModelInvocationLayer]] = None,
44 model_kwargs: Optional[Dict] = None,
45 ):
46 """
47 Creates an instance of PromptModel.
48
49 :param model_name_or_path: The name or path of the underlying model.
50 :param max_length: The maximum number of tokens the output text generated by the model can have.
51 :param api_key: The API key to use for the model.
52 :param use_auth_token: The Hugging Face token to use.
53 :param use_gpu: Whether to use GPU or not.
54 :param devices: The devices to use where the model is loaded.
55 :param invocation_layer_class: The custom invocation layer class to use. If None, known invocation layers are used.
56 :param model_kwargs: Additional keyword arguments passed to the underlying model.
57
58 Note that Azure OpenAI InstructGPT models require two additional parameters: azure_base_url (The URL for the
59 Azure OpenAI API endpoint, usually in the form `https://<your-endpoint>.openai.azure.com') and
60 azure_deployment_name (the name of the Azure OpenAI API deployment). You should add these parameters
61 in the `model_kwargs` dictionary.
62 """
63 super().__init__()
64 self.model_name_or_path = model_name_or_path
65 self.max_length = max_length
66 self.api_key = api_key
67 self.timeout = timeout
68 self.use_auth_token = use_auth_token
69 self.use_gpu = use_gpu
70 self.devices = devices
71
72 self.model_kwargs = model_kwargs if model_kwargs else {}
73 self.model_invocation_layer = self.create_invocation_layer(invocation_layer_class=invocation_layer_class)
74
75 def create_invocation_layer(
76 self, invocation_layer_class: Optional[Type[PromptModelInvocationLayer]]
77 ) -> PromptModelInvocationLayer:
78 kwargs = {
79 "api_key": self.api_key,
80 "timeout": self.timeout,
81 "use_auth_token": self.use_auth_token,
82 "use_gpu": self.use_gpu,
83 "devices": self.devices,
84 }
85 all_kwargs = {**self.model_kwargs, **kwargs}
86
87 if invocation_layer_class:
88 return invocation_layer_class(
89 model_name_or_path=self.model_name_or_path, max_length=self.max_length, **all_kwargs
90 )
91
92 for invocation_layer in PromptModelInvocationLayer.invocation_layer_providers:
93 if inspect.isabstract(invocation_layer):
94 continue
95 if invocation_layer.supports(self.model_name_or_path, **all_kwargs):
96 return invocation_layer(
97 model_name_or_path=self.model_name_or_path, max_length=self.max_length, **all_kwargs
98 )
99 raise ValueError(
100 f"Model {self.model_name_or_path} is not supported - no matching invocation layer found."
101 f" Currently supported invocation layers are: {PromptModelInvocationLayer.invocation_layer_providers}"
102 f" You can implement and provide custom invocation layer for {self.model_name_or_path} by subclassing "
103 "PromptModelInvocationLayer."
104 f" Also please ensure you are authorised to load the model {self.model_name_or_path} and you are "
105 "logged-in into the huggingface cli."
106 )
107
108 def invoke(self, prompt: Union[str, List[str], List[Dict[str, str]]], **kwargs) -> List[str]:
109 """
110 Takes in a prompt and returns a list of responses using the underlying invocation layer.
111
112 :param prompt: The prompt to use for the invocation. It can be a single prompt or a list of prompts.
113 :param kwargs: Additional keyword arguments to pass to the invocation layer.
114 :return: A list of model-generated responses for the prompt or prompts.
115 """
116 output = self.model_invocation_layer.invoke(prompt=prompt, **kwargs)
117 return output
118
119 async def ainvoke(self, prompt: Union[str, List[str], List[Dict[str, str]]], **kwargs) -> List[str]:
120 """
121 Drop-in replacement asyncio version of the `invoke` method, see there for documentation.
122 """
123 if hasattr(self.model_invocation_layer, "ainvoke"):
124 return await self.model_invocation_layer.ainvoke(prompt=prompt, **kwargs)
125
126 # The underlying invocation layer doesn't support asyncio
127 return self.model_invocation_layer.invoke(prompt=prompt, **kwargs)
128
129 @overload
130 def _ensure_token_limit(self, prompt: str) -> str:
131 ...
132
133 @overload
134 def _ensure_token_limit(self, prompt: List[Dict[str, str]]) -> List[Dict[str, str]]:
135 ...
136
137 def _ensure_token_limit(self, prompt: Union[str, List[Dict[str, str]]]) -> Union[str, List[Dict[str, str]]]:
138 """Ensure that length of the prompt and answer is within the maximum token length of the PromptModel.
139
140 :param prompt: Prompt text to be sent to the generative model.
141 """
142 return self.model_invocation_layer._ensure_token_limit(prompt=prompt)
143
144 def run(
145 self,
146 query: Optional[str] = None,
147 file_paths: Optional[List[str]] = None,
148 labels: Optional[MultiLabel] = None,
149 documents: Optional[List[Document]] = None,
150 meta: Optional[dict] = None,
151 ) -> Tuple[Dict, str]:
152 raise NotImplementedError("This method should never be implemented in the derived class")
153
154 def run_batch(
155 self,
156 queries: Optional[Union[str, List[str]]] = None,
157 file_paths: Optional[List[str]] = None,
158 labels: Optional[Union[MultiLabel, List[MultiLabel]]] = None,
159 documents: Optional[Union[List[Document], List[List[Document]]]] = None,
160 meta: Optional[Union[Dict[str, Any], List[Dict[str, Any]]]] = None,
161 params: Optional[dict] = None,
162 debug: Optional[bool] = None,
163 ):
164 raise NotImplementedError("This method should never be implemented in the derived class")
165
166 def __repr__(self):
167 return "{}({!r})".format(self.__class__.__name__, self.__dict__)
168
[end of haystack/nodes/prompt/prompt_model.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/haystack/nodes/prompt/prompt_model.py b/haystack/nodes/prompt/prompt_model.py
--- a/haystack/nodes/prompt/prompt_model.py
+++ b/haystack/nodes/prompt/prompt_model.py
@@ -1,5 +1,6 @@
import inspect
import logging
+import importlib
from typing import Any, Dict, List, Optional, Tuple, Type, Union, overload
from haystack.nodes.base import BaseComponent
@@ -40,7 +41,7 @@
use_auth_token: Optional[Union[str, bool]] = None,
use_gpu: Optional[bool] = None,
devices: Optional[List[Union[str, "torch.device"]]] = None,
- invocation_layer_class: Optional[Type[PromptModelInvocationLayer]] = None,
+ invocation_layer_class: Optional[Union[Type[PromptModelInvocationLayer], str]] = None,
model_kwargs: Optional[Dict] = None,
):
"""
@@ -73,7 +74,7 @@
self.model_invocation_layer = self.create_invocation_layer(invocation_layer_class=invocation_layer_class)
def create_invocation_layer(
- self, invocation_layer_class: Optional[Type[PromptModelInvocationLayer]]
+ self, invocation_layer_class: Optional[Union[Type[PromptModelInvocationLayer], str]]
) -> PromptModelInvocationLayer:
kwargs = {
"api_key": self.api_key,
@@ -84,6 +85,18 @@
}
all_kwargs = {**self.model_kwargs, **kwargs}
+ if isinstance(invocation_layer_class, str):
+ module_name, class_name = invocation_layer_class.rsplit(".", maxsplit=1)
+ try:
+ module = importlib.import_module(module_name)
+ except ImportError as e:
+ msg = f"Can't find module {module_name}"
+ raise ValueError(msg) from e
+ invocation_layer_class = getattr(module, class_name)
+ if invocation_layer_class is None:
+ msg = f"Can'f find class {class_name} in module {module_name}"
+ ValueError(msg)
+
if invocation_layer_class:
return invocation_layer_class(
model_name_or_path=self.model_name_or_path, max_length=self.max_length, **all_kwargs
|
{"golden_diff": "diff --git a/haystack/nodes/prompt/prompt_model.py b/haystack/nodes/prompt/prompt_model.py\n--- a/haystack/nodes/prompt/prompt_model.py\n+++ b/haystack/nodes/prompt/prompt_model.py\n@@ -1,5 +1,6 @@\n import inspect\n import logging\n+import importlib\n from typing import Any, Dict, List, Optional, Tuple, Type, Union, overload\n \n from haystack.nodes.base import BaseComponent\n@@ -40,7 +41,7 @@\n use_auth_token: Optional[Union[str, bool]] = None,\n use_gpu: Optional[bool] = None,\n devices: Optional[List[Union[str, \"torch.device\"]]] = None,\n- invocation_layer_class: Optional[Type[PromptModelInvocationLayer]] = None,\n+ invocation_layer_class: Optional[Union[Type[PromptModelInvocationLayer], str]] = None,\n model_kwargs: Optional[Dict] = None,\n ):\n \"\"\"\n@@ -73,7 +74,7 @@\n self.model_invocation_layer = self.create_invocation_layer(invocation_layer_class=invocation_layer_class)\n \n def create_invocation_layer(\n- self, invocation_layer_class: Optional[Type[PromptModelInvocationLayer]]\n+ self, invocation_layer_class: Optional[Union[Type[PromptModelInvocationLayer], str]]\n ) -> PromptModelInvocationLayer:\n kwargs = {\n \"api_key\": self.api_key,\n@@ -84,6 +85,18 @@\n }\n all_kwargs = {**self.model_kwargs, **kwargs}\n \n+ if isinstance(invocation_layer_class, str):\n+ module_name, class_name = invocation_layer_class.rsplit(\".\", maxsplit=1)\n+ try:\n+ module = importlib.import_module(module_name)\n+ except ImportError as e:\n+ msg = f\"Can't find module {module_name}\"\n+ raise ValueError(msg) from e\n+ invocation_layer_class = getattr(module, class_name)\n+ if invocation_layer_class is None:\n+ msg = f\"Can'f find class {class_name} in module {module_name}\"\n+ ValueError(msg)\n+\n if invocation_layer_class:\n return invocation_layer_class(\n model_name_or_path=self.model_name_or_path, max_length=self.max_length, **all_kwargs\n", "issue": "Serialisation of the PromptModel is broken if the invocation_layer_class param is set, both from and to yaml\n**Describe the bug**\r\nA clear and concise description of what the bug is.\r\n\r\nI am using a pipeline that uses a `PromptModel` with an `invocation_layer_class` specified, to deploy Haystack API via docker.\r\nOn executing docker-compose up this will result in the error described below.\r\n\r\nyml file:\r\n\r\nname: PromptModel\r\nparams:\r\n api_key: EMPTY\r\n invocation_layer_class: !!python/name:haystack.nodes.prompt.invocation_layer.open_ai.OpenAIInvocationLayer ''\r\n max_length: 1024\r\n model_kwargs:\r\n api_base: http://localhost:8100/v1\r\n maximum_context_length: 2048\r\ntype: PromptModel\r\n\r\n**Error message**\r\nError that was thrown (if available)\r\n\r\n`yaml.constructor.ConstructorError: could not determine a constructor for the tag 'tag:yaml.org,2002:python/name:haystack.nodes.prompt.invocation_layer.open_ai.OpenAIInvocationLayer'`\r\n\r\n**Expected behavior**\r\nA clear and concise description of what you expected to happen.\r\n\r\nI expected the pipleline to load correctly.\r\n\r\n**Additional context**\r\nAdd any other context about the problem here, like document types / preprocessing steps / settings of reader etc.\r\n\r\nThe issue has been replicated by @anakin87\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior\r\n1. Serialize a pipeline with a `PromptModel ` with `invocation_layer_class` param, to yaml\r\n2. Load the pipeline in the haystack api docker container\r\n\r\n\r\n**FAQ Check**\r\n- [x] Have you had a look at [our new FAQ page](https://docs.haystack.deepset.ai/docs/faq)?\r\n\r\n**System:**\r\n - OS: Windows11 + WSL2\r\n - GPU/CPU: GPU\r\n - Haystack version (commit or version number): 1.22.1\r\n - DocumentStore: WeaviateDocumentStore\r\n - Reader: -\r\n - Retriever: EmbeddingRetriever\r\n\n", "before_files": [{"content": "import inspect\nimport logging\nfrom typing import Any, Dict, List, Optional, Tuple, Type, Union, overload\n\nfrom haystack.nodes.base import BaseComponent\nfrom haystack.nodes.prompt.invocation_layer import PromptModelInvocationLayer\nfrom haystack.schema import Document, MultiLabel\nfrom haystack.lazy_imports import LazyImport\n\nwith LazyImport(message=\"Run 'pip install farm-haystack[inference]'\") as torch_import:\n import torch\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass PromptModel(BaseComponent):\n \"\"\"\n The PromptModel class is a component that uses a pre-trained model to perform tasks defined in a prompt. Out of\n the box, it supports model invocation layers for:\n - Hugging Face transformers (all text2text-generation and text-generation models)\n - OpenAI InstructGPT models\n - Azure OpenAI InstructGPT models\n\n Although it's possible to use PromptModel to make prompt invocations on the underlying model, use\n PromptNode to interact with the model. PromptModel instances are a way for multiple\n PromptNode instances to use a single PromptNode, and thus save computational resources.\n\n For more details, refer to [PromptModels](https://docs.haystack.deepset.ai/docs/prompt_node#models).\n \"\"\"\n\n outgoing_edges = 1\n\n def __init__(\n self,\n model_name_or_path: str = \"google/flan-t5-base\",\n max_length: Optional[int] = 100,\n api_key: Optional[str] = None,\n timeout: Optional[float] = None,\n use_auth_token: Optional[Union[str, bool]] = None,\n use_gpu: Optional[bool] = None,\n devices: Optional[List[Union[str, \"torch.device\"]]] = None,\n invocation_layer_class: Optional[Type[PromptModelInvocationLayer]] = None,\n model_kwargs: Optional[Dict] = None,\n ):\n \"\"\"\n Creates an instance of PromptModel.\n\n :param model_name_or_path: The name or path of the underlying model.\n :param max_length: The maximum number of tokens the output text generated by the model can have.\n :param api_key: The API key to use for the model.\n :param use_auth_token: The Hugging Face token to use.\n :param use_gpu: Whether to use GPU or not.\n :param devices: The devices to use where the model is loaded.\n :param invocation_layer_class: The custom invocation layer class to use. If None, known invocation layers are used.\n :param model_kwargs: Additional keyword arguments passed to the underlying model.\n\n Note that Azure OpenAI InstructGPT models require two additional parameters: azure_base_url (The URL for the\n Azure OpenAI API endpoint, usually in the form `https://<your-endpoint>.openai.azure.com') and\n azure_deployment_name (the name of the Azure OpenAI API deployment). You should add these parameters\n in the `model_kwargs` dictionary.\n \"\"\"\n super().__init__()\n self.model_name_or_path = model_name_or_path\n self.max_length = max_length\n self.api_key = api_key\n self.timeout = timeout\n self.use_auth_token = use_auth_token\n self.use_gpu = use_gpu\n self.devices = devices\n\n self.model_kwargs = model_kwargs if model_kwargs else {}\n self.model_invocation_layer = self.create_invocation_layer(invocation_layer_class=invocation_layer_class)\n\n def create_invocation_layer(\n self, invocation_layer_class: Optional[Type[PromptModelInvocationLayer]]\n ) -> PromptModelInvocationLayer:\n kwargs = {\n \"api_key\": self.api_key,\n \"timeout\": self.timeout,\n \"use_auth_token\": self.use_auth_token,\n \"use_gpu\": self.use_gpu,\n \"devices\": self.devices,\n }\n all_kwargs = {**self.model_kwargs, **kwargs}\n\n if invocation_layer_class:\n return invocation_layer_class(\n model_name_or_path=self.model_name_or_path, max_length=self.max_length, **all_kwargs\n )\n\n for invocation_layer in PromptModelInvocationLayer.invocation_layer_providers:\n if inspect.isabstract(invocation_layer):\n continue\n if invocation_layer.supports(self.model_name_or_path, **all_kwargs):\n return invocation_layer(\n model_name_or_path=self.model_name_or_path, max_length=self.max_length, **all_kwargs\n )\n raise ValueError(\n f\"Model {self.model_name_or_path} is not supported - no matching invocation layer found.\"\n f\" Currently supported invocation layers are: {PromptModelInvocationLayer.invocation_layer_providers}\"\n f\" You can implement and provide custom invocation layer for {self.model_name_or_path} by subclassing \"\n \"PromptModelInvocationLayer.\"\n f\" Also please ensure you are authorised to load the model {self.model_name_or_path} and you are \"\n \"logged-in into the huggingface cli.\"\n )\n\n def invoke(self, prompt: Union[str, List[str], List[Dict[str, str]]], **kwargs) -> List[str]:\n \"\"\"\n Takes in a prompt and returns a list of responses using the underlying invocation layer.\n\n :param prompt: The prompt to use for the invocation. It can be a single prompt or a list of prompts.\n :param kwargs: Additional keyword arguments to pass to the invocation layer.\n :return: A list of model-generated responses for the prompt or prompts.\n \"\"\"\n output = self.model_invocation_layer.invoke(prompt=prompt, **kwargs)\n return output\n\n async def ainvoke(self, prompt: Union[str, List[str], List[Dict[str, str]]], **kwargs) -> List[str]:\n \"\"\"\n Drop-in replacement asyncio version of the `invoke` method, see there for documentation.\n \"\"\"\n if hasattr(self.model_invocation_layer, \"ainvoke\"):\n return await self.model_invocation_layer.ainvoke(prompt=prompt, **kwargs)\n\n # The underlying invocation layer doesn't support asyncio\n return self.model_invocation_layer.invoke(prompt=prompt, **kwargs)\n\n @overload\n def _ensure_token_limit(self, prompt: str) -> str:\n ...\n\n @overload\n def _ensure_token_limit(self, prompt: List[Dict[str, str]]) -> List[Dict[str, str]]:\n ...\n\n def _ensure_token_limit(self, prompt: Union[str, List[Dict[str, str]]]) -> Union[str, List[Dict[str, str]]]:\n \"\"\"Ensure that length of the prompt and answer is within the maximum token length of the PromptModel.\n\n :param prompt: Prompt text to be sent to the generative model.\n \"\"\"\n return self.model_invocation_layer._ensure_token_limit(prompt=prompt)\n\n def run(\n self,\n query: Optional[str] = None,\n file_paths: Optional[List[str]] = None,\n labels: Optional[MultiLabel] = None,\n documents: Optional[List[Document]] = None,\n meta: Optional[dict] = None,\n ) -> Tuple[Dict, str]:\n raise NotImplementedError(\"This method should never be implemented in the derived class\")\n\n def run_batch(\n self,\n queries: Optional[Union[str, List[str]]] = None,\n file_paths: Optional[List[str]] = None,\n labels: Optional[Union[MultiLabel, List[MultiLabel]]] = None,\n documents: Optional[Union[List[Document], List[List[Document]]]] = None,\n meta: Optional[Union[Dict[str, Any], List[Dict[str, Any]]]] = None,\n params: Optional[dict] = None,\n debug: Optional[bool] = None,\n ):\n raise NotImplementedError(\"This method should never be implemented in the derived class\")\n\n def __repr__(self):\n return \"{}({!r})\".format(self.__class__.__name__, self.__dict__)\n", "path": "haystack/nodes/prompt/prompt_model.py"}]}
| 3,055 | 500 |
gh_patches_debug_4084
|
rasdani/github-patches
|
git_diff
|
cookiecutter__cookiecutter-1874
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
getting a parameters original choices
* Cookiecutter version: 2.1.1
* Template project url: github.com/kjaymiller/cookiecutter_relecloud
* Python version: Python 3.11.0
* Operating System: MacOS Ventura - 13.4.1 (22F82)
### Description:
If my cookiecutter.json has:
```json
{
"cc_options": [1, 2, 3, 4]
}
```
is there a way to get all of the options.
### What I've run:
```jinja
{{cookiecutter.json}}
```
```jinja
{{cookiecutter.cc_options}}
```
### Expected outcome
I would like some attibute that would give me my original options. Perhaps if you call the value with an underscore in front.
```jinja
{{cookiecutter._cc_options}} >>> [1,2,3,4]
```
another option could be to have the original values preserved in a variable
```jinja
{{cookiecutter.source_values.cc_options}} >>> [1,2,3,4]
```
</issue>
<code>
[start of cookiecutter/main.py]
1 """
2 Main entry point for the `cookiecutter` command.
3
4 The code in this module is also a good example of how to use Cookiecutter as a
5 library rather than a script.
6 """
7 import logging
8 import os
9 import re
10 import sys
11 from copy import copy
12
13 from cookiecutter.config import get_user_config
14 from cookiecutter.exceptions import InvalidModeException
15 from cookiecutter.generate import generate_context, generate_files
16 from cookiecutter.prompt import prompt_for_config
17 from cookiecutter.replay import dump, load
18 from cookiecutter.repository import determine_repo_dir
19 from cookiecutter.utils import rmtree
20
21 logger = logging.getLogger(__name__)
22
23
24 def cookiecutter(
25 template,
26 checkout=None,
27 no_input=False,
28 extra_context=None,
29 replay=None,
30 overwrite_if_exists=False,
31 output_dir='.',
32 config_file=None,
33 default_config=False,
34 password=None,
35 directory=None,
36 skip_if_file_exists=False,
37 accept_hooks=True,
38 keep_project_on_failure=False,
39 ):
40 """
41 Run Cookiecutter just as if using it from the command line.
42
43 :param template: A directory containing a project template directory,
44 or a URL to a git repository.
45 :param checkout: The branch, tag or commit ID to checkout after clone.
46 :param no_input: Do not prompt for user input.
47 Use default values for template parameters taken from `cookiecutter.json`, user
48 config and `extra_dict`. Force a refresh of cached resources.
49 :param extra_context: A dictionary of context that overrides default
50 and user configuration.
51 :param replay: Do not prompt for input, instead read from saved json. If
52 ``True`` read from the ``replay_dir``.
53 if it exists
54 :param output_dir: Where to output the generated project dir into.
55 :param config_file: User configuration file path.
56 :param default_config: Use default values rather than a config file.
57 :param password: The password to use when extracting the repository.
58 :param directory: Relative path to a cookiecutter template in a repository.
59 :param accept_hooks: Accept pre and post hooks if set to `True`.
60 :param keep_project_on_failure: If `True` keep generated project directory even when
61 generation fails
62 """
63 if replay and ((no_input is not False) or (extra_context is not None)):
64 err_msg = (
65 "You can not use both replay and no_input or extra_context "
66 "at the same time."
67 )
68 raise InvalidModeException(err_msg)
69
70 config_dict = get_user_config(
71 config_file=config_file,
72 default_config=default_config,
73 )
74
75 repo_dir, cleanup = determine_repo_dir(
76 template=template,
77 abbreviations=config_dict['abbreviations'],
78 clone_to_dir=config_dict['cookiecutters_dir'],
79 checkout=checkout,
80 no_input=no_input,
81 password=password,
82 directory=directory,
83 )
84 import_patch = _patch_import_path_for_repo(repo_dir)
85
86 template_name = os.path.basename(os.path.abspath(repo_dir))
87
88 if replay:
89 with import_patch:
90 if isinstance(replay, bool):
91 context = load(config_dict['replay_dir'], template_name)
92 else:
93 path, template_name = os.path.split(os.path.splitext(replay)[0])
94 context = load(path, template_name)
95 else:
96 context_file = os.path.join(repo_dir, 'cookiecutter.json')
97 logger.debug('context_file is %s', context_file)
98
99 context = generate_context(
100 context_file=context_file,
101 default_context=config_dict['default_context'],
102 extra_context=extra_context,
103 )
104
105 # prompt the user to manually configure at the command line.
106 # except when 'no-input' flag is set
107 with import_patch:
108 context['cookiecutter'] = prompt_for_config(context, no_input)
109
110 if "template" in context["cookiecutter"]:
111 nested_template = re.search(
112 r'\((.*?)\)', context["cookiecutter"]["template"]
113 ).group(1)
114 return cookiecutter(
115 template=os.path.join(template, nested_template),
116 checkout=checkout,
117 no_input=no_input,
118 extra_context=extra_context,
119 replay=replay,
120 overwrite_if_exists=overwrite_if_exists,
121 output_dir=output_dir,
122 config_file=config_file,
123 default_config=default_config,
124 password=password,
125 directory=directory,
126 skip_if_file_exists=skip_if_file_exists,
127 accept_hooks=accept_hooks,
128 keep_project_on_failure=keep_project_on_failure,
129 )
130
131 # include template dir or url in the context dict
132 context['cookiecutter']['_template'] = template
133
134 # include repo dir or url in the context dict
135 context['cookiecutter']['_repo_dir'] = repo_dir
136
137 # include output+dir in the context dict
138 context['cookiecutter']['_output_dir'] = os.path.abspath(output_dir)
139
140 dump(config_dict['replay_dir'], template_name, context)
141
142 # Create project from local context and project template.
143 with import_patch:
144 result = generate_files(
145 repo_dir=repo_dir,
146 context=context,
147 overwrite_if_exists=overwrite_if_exists,
148 skip_if_file_exists=skip_if_file_exists,
149 output_dir=output_dir,
150 accept_hooks=accept_hooks,
151 keep_project_on_failure=keep_project_on_failure,
152 )
153
154 # Cleanup (if required)
155 if cleanup:
156 rmtree(repo_dir)
157
158 return result
159
160
161 class _patch_import_path_for_repo:
162 def __init__(self, repo_dir):
163 self._repo_dir = repo_dir
164 self._path = None
165
166 def __enter__(self):
167 self._path = copy(sys.path)
168 sys.path.append(self._repo_dir)
169
170 def __exit__(self, type, value, traceback):
171 sys.path = self._path
172
[end of cookiecutter/main.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/cookiecutter/main.py b/cookiecutter/main.py
--- a/cookiecutter/main.py
+++ b/cookiecutter/main.py
@@ -102,6 +102,9 @@
extra_context=extra_context,
)
+ # preserve the original cookiecutter options
+ context['_cookiecutter'] = context['cookiecutter']
+
# prompt the user to manually configure at the command line.
# except when 'no-input' flag is set
with import_patch:
|
{"golden_diff": "diff --git a/cookiecutter/main.py b/cookiecutter/main.py\n--- a/cookiecutter/main.py\n+++ b/cookiecutter/main.py\n@@ -102,6 +102,9 @@\n extra_context=extra_context,\n )\n \n+ # preserve the original cookiecutter options\n+ context['_cookiecutter'] = context['cookiecutter']\n+\n # prompt the user to manually configure at the command line.\n # except when 'no-input' flag is set\n with import_patch:\n", "issue": "getting a parameters original choices\n* Cookiecutter version: 2.1.1\r\n* Template project url: github.com/kjaymiller/cookiecutter_relecloud\r\n* Python version: Python 3.11.0\r\n* Operating System: MacOS Ventura - 13.4.1 (22F82)\r\n\r\n### Description:\r\n\r\nIf my cookiecutter.json has:\r\n\r\n```json\r\n{\r\n \"cc_options\": [1, 2, 3, 4]\r\n}\r\n```\r\nis there a way to get all of the options.\r\n\r\n### What I've run:\r\n\r\n```jinja\r\n{{cookiecutter.json}}\r\n```\r\n\r\n```jinja\r\n{{cookiecutter.cc_options}}\r\n```\r\n\r\n### Expected outcome\r\n\r\nI would like some attibute that would give me my original options. Perhaps if you call the value with an underscore in front.\r\n\r\n```jinja\r\n{{cookiecutter._cc_options}} >>> [1,2,3,4]\r\n```\r\n\r\nanother option could be to have the original values preserved in a variable\r\n\r\n```jinja\r\n{{cookiecutter.source_values.cc_options}} >>> [1,2,3,4]\r\n```\n", "before_files": [{"content": "\"\"\"\nMain entry point for the `cookiecutter` command.\n\nThe code in this module is also a good example of how to use Cookiecutter as a\nlibrary rather than a script.\n\"\"\"\nimport logging\nimport os\nimport re\nimport sys\nfrom copy import copy\n\nfrom cookiecutter.config import get_user_config\nfrom cookiecutter.exceptions import InvalidModeException\nfrom cookiecutter.generate import generate_context, generate_files\nfrom cookiecutter.prompt import prompt_for_config\nfrom cookiecutter.replay import dump, load\nfrom cookiecutter.repository import determine_repo_dir\nfrom cookiecutter.utils import rmtree\n\nlogger = logging.getLogger(__name__)\n\n\ndef cookiecutter(\n template,\n checkout=None,\n no_input=False,\n extra_context=None,\n replay=None,\n overwrite_if_exists=False,\n output_dir='.',\n config_file=None,\n default_config=False,\n password=None,\n directory=None,\n skip_if_file_exists=False,\n accept_hooks=True,\n keep_project_on_failure=False,\n):\n \"\"\"\n Run Cookiecutter just as if using it from the command line.\n\n :param template: A directory containing a project template directory,\n or a URL to a git repository.\n :param checkout: The branch, tag or commit ID to checkout after clone.\n :param no_input: Do not prompt for user input.\n Use default values for template parameters taken from `cookiecutter.json`, user\n config and `extra_dict`. Force a refresh of cached resources.\n :param extra_context: A dictionary of context that overrides default\n and user configuration.\n :param replay: Do not prompt for input, instead read from saved json. If\n ``True`` read from the ``replay_dir``.\n if it exists\n :param output_dir: Where to output the generated project dir into.\n :param config_file: User configuration file path.\n :param default_config: Use default values rather than a config file.\n :param password: The password to use when extracting the repository.\n :param directory: Relative path to a cookiecutter template in a repository.\n :param accept_hooks: Accept pre and post hooks if set to `True`.\n :param keep_project_on_failure: If `True` keep generated project directory even when\n generation fails\n \"\"\"\n if replay and ((no_input is not False) or (extra_context is not None)):\n err_msg = (\n \"You can not use both replay and no_input or extra_context \"\n \"at the same time.\"\n )\n raise InvalidModeException(err_msg)\n\n config_dict = get_user_config(\n config_file=config_file,\n default_config=default_config,\n )\n\n repo_dir, cleanup = determine_repo_dir(\n template=template,\n abbreviations=config_dict['abbreviations'],\n clone_to_dir=config_dict['cookiecutters_dir'],\n checkout=checkout,\n no_input=no_input,\n password=password,\n directory=directory,\n )\n import_patch = _patch_import_path_for_repo(repo_dir)\n\n template_name = os.path.basename(os.path.abspath(repo_dir))\n\n if replay:\n with import_patch:\n if isinstance(replay, bool):\n context = load(config_dict['replay_dir'], template_name)\n else:\n path, template_name = os.path.split(os.path.splitext(replay)[0])\n context = load(path, template_name)\n else:\n context_file = os.path.join(repo_dir, 'cookiecutter.json')\n logger.debug('context_file is %s', context_file)\n\n context = generate_context(\n context_file=context_file,\n default_context=config_dict['default_context'],\n extra_context=extra_context,\n )\n\n # prompt the user to manually configure at the command line.\n # except when 'no-input' flag is set\n with import_patch:\n context['cookiecutter'] = prompt_for_config(context, no_input)\n\n if \"template\" in context[\"cookiecutter\"]:\n nested_template = re.search(\n r'\\((.*?)\\)', context[\"cookiecutter\"][\"template\"]\n ).group(1)\n return cookiecutter(\n template=os.path.join(template, nested_template),\n checkout=checkout,\n no_input=no_input,\n extra_context=extra_context,\n replay=replay,\n overwrite_if_exists=overwrite_if_exists,\n output_dir=output_dir,\n config_file=config_file,\n default_config=default_config,\n password=password,\n directory=directory,\n skip_if_file_exists=skip_if_file_exists,\n accept_hooks=accept_hooks,\n keep_project_on_failure=keep_project_on_failure,\n )\n\n # include template dir or url in the context dict\n context['cookiecutter']['_template'] = template\n\n # include repo dir or url in the context dict\n context['cookiecutter']['_repo_dir'] = repo_dir\n\n # include output+dir in the context dict\n context['cookiecutter']['_output_dir'] = os.path.abspath(output_dir)\n\n dump(config_dict['replay_dir'], template_name, context)\n\n # Create project from local context and project template.\n with import_patch:\n result = generate_files(\n repo_dir=repo_dir,\n context=context,\n overwrite_if_exists=overwrite_if_exists,\n skip_if_file_exists=skip_if_file_exists,\n output_dir=output_dir,\n accept_hooks=accept_hooks,\n keep_project_on_failure=keep_project_on_failure,\n )\n\n # Cleanup (if required)\n if cleanup:\n rmtree(repo_dir)\n\n return result\n\n\nclass _patch_import_path_for_repo:\n def __init__(self, repo_dir):\n self._repo_dir = repo_dir\n self._path = None\n\n def __enter__(self):\n self._path = copy(sys.path)\n sys.path.append(self._repo_dir)\n\n def __exit__(self, type, value, traceback):\n sys.path = self._path\n", "path": "cookiecutter/main.py"}]}
| 2,445 | 117 |
gh_patches_debug_8574
|
rasdani/github-patches
|
git_diff
|
fossasia__open-event-server-3611
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Export of Documents leads to 404
When Trying to export the event as Documents (json files) the page reports that my request was completed successfully.
However, when clicking on the download link I get a 404.
In this case, the exact link visited was:
https://eventyay.com/notifications/opev/open_eventhttps://storage.googleapis.com/eventyay.com/exports/69/aVllSlAvdk/event69.zip
</issue>
<code>
[start of app/api/helpers/export_helpers.py]
1 import json
2 import os
3 import shutil
4 from collections import OrderedDict
5 from datetime import datetime
6
7 import requests
8 from flask import current_app as app
9 from flask import request, g, url_for
10 from flask_restplus import marshal
11
12 from app.helpers.data import save_to_db
13 from app.helpers.data_getter import DataGetter
14 from app.helpers.helpers import send_email_after_export, send_notif_after_export
15 from app.models.event import Event as EventModel
16 from app.models.export_jobs import ExportJob
17 from import_helpers import is_downloadable, get_filename_from_cd
18 from .non_apis import CustomFormDAO, CUSTOM_FORM
19 from ..events import DAO as EventDAO, EVENT as EVENT_MODEL
20 from ..microlocations import DAO as MicrolocationDAO, MICROLOCATION
21 from ..sessions import DAO as SessionDAO, SESSION, \
22 TypeDAO as SessionTypeDAO, SESSION_TYPE
23 from ..speakers import DAO as SpeakerDAO, SPEAKER
24 from ..sponsors import DAO as SponsorDAO, SPONSOR
25 from ..tracks import DAO as TrackDAO, TRACK
26 from app.helpers.storage import upload, UPLOAD_PATHS, UploadedFile
27 from app.settings import get_settings
28
29 # DELETE FIELDS
30 # All fields to be deleted go here
31 EVENT = EVENT_MODEL.clone('EventExport')
32
33 EXPORTS = [
34 ('event', EventDAO, EVENT),
35 ('microlocations', MicrolocationDAO, MICROLOCATION),
36 ('sessions', SessionDAO, SESSION),
37 ('speakers', SpeakerDAO, SPEAKER),
38 ('sponsors', SponsorDAO, SPONSOR),
39 ('tracks', TrackDAO, TRACK),
40 ('session_types', SessionTypeDAO, SESSION_TYPE),
41 ('forms', CustomFormDAO, CUSTOM_FORM)
42 ]
43
44 # order of keys in export json
45 FIELD_ORDER = {
46 'event': [
47 'id', 'name', 'latitude', 'longitude', 'location_name', 'start_time', 'end_time',
48 'timezone', 'description', 'background_image', 'logo', 'organizer_name',
49 'organizer_description', 'event_url', 'social_links', 'ticket_url', 'privacy', 'type',
50 'topic', 'sub_topic', 'code_of_conduct', 'copyright'
51 ],
52 'microlocations': ['id', 'name', 'floor'],
53 'sessions': [
54 'id', 'title', 'subtitle', 'short_abstract', 'long_abstract', 'start_time', 'end_time',
55 'session_type', 'track', 'comments', 'language', 'slides', 'audio', 'video'
56 ],
57 'speakers': [
58 'id', 'name', 'email', 'mobile', 'photo', 'organisation', 'position', 'country',
59 'short_biography', 'long_biography', 'website', 'twitter', 'facebook', 'github', 'linkedin'
60 ],
61 'sponsors': ['id', 'name', 'logo', 'level', 'sponsor_type', 'url', 'description'],
62 'tracks': ['id', 'name', 'color', 'font_color'],
63 'session_types': ['id', 'name', 'length'],
64 'forms': []
65 }
66
67 # keep sync with storage.UPLOAD_PATHS
68 DOWNLOAD_FIEDLS = {
69 'sessions': {
70 'video': ['video', '/videos/session_%d'],
71 'audio': ['audio', '/audios/session_%d'],
72 'slides': ['document', '/slides/session_%d']
73 },
74 'speakers': {
75 'photo': ['image', '/images/speakers/%s_%d']
76 },
77 'event': {
78 'logo': ['image', '/images/logo'],
79 'background_image': ['image', '/images/background']
80 },
81 'sponsors': {
82 'logo': ['image', '/images/sponsors/%s_%d']
83 },
84 'tracks': {
85 'track_image_url': ['image', '/images/tracks/image_%d']
86 }
87 }
88
89 # strings to remove in a filename
90 FILENAME_EXCLUDE = '<>:"/\|?*;'
91
92
93 # FUNCTIONS
94
95 def sorted_dict(data):
96 """
97 sorts a json (dict/list->dict) and returns OrderedDict
98 """
99 if type(data) == OrderedDict:
100 data = dict(data)
101 if type(data) == dict:
102 data = OrderedDict(sorted(data.items(), key=lambda t: t[0]))
103 elif type(data) == list:
104 for count in range(len(data)):
105 data[count] = OrderedDict(sorted(data[count].items(), key=lambda t: t[0]))
106 return data
107
108
109 def _order_json(data, srv):
110 """
111 sorts the data a/c FIELD_ORDER and returns.
112 If some keys are not included in FIELD_ORDER, they go at last, sorted alphabetically
113 """
114 new_data = OrderedDict()
115 for field in FIELD_ORDER[srv[0]]:
116 new_data[field] = sorted_dict(data[field])
117 data.pop(field, None)
118 # remaining fields, sort and add
119 # https://docs.python.org/2/library/collections.html#collections.OrderedDict
120 data = OrderedDict(sorted(data.items(), key=lambda t: t[0]))
121 for key in data:
122 new_data[key] = sorted_dict(data[key])
123 return new_data
124
125
126 def _download_media(data, srv, dir_path, settings):
127 """
128 Downloads the media and saves it
129 """
130 if srv not in DOWNLOAD_FIEDLS:
131 return
132 for i in DOWNLOAD_FIEDLS[srv]:
133 if not data[i]:
134 continue
135 if not settings[DOWNLOAD_FIEDLS[srv][i][0]]:
136 continue
137 path = DOWNLOAD_FIEDLS[srv][i][1]
138 if srv == 'speakers':
139 path %= make_filename(data['name']), data['id']
140 elif srv == 'sponsors':
141 path %= make_filename(data['name']), data['id']
142 elif srv != 'event':
143 path = path % (data['id'])
144 if data[i].find('.') > -1: # add extension
145 ext = data[i].rsplit('.', 1)[1]
146 if ext.find('/') == -1:
147 path += '.' + ext
148 full_path = dir_path + path
149 # make dir
150 cdir = full_path.rsplit('/', 1)[0]
151 if not os.path.isdir(cdir):
152 os.makedirs(cdir)
153 # download and set
154 url = data[i]
155 if not is_downloadable(url):
156 continue
157 try:
158 r = requests.get(url, allow_redirects=True)
159 ext = get_filename_from_cd(r.headers.get('content-disposition'))[1]
160 full_path += ext
161 path += ext
162 open(full_path, 'wb').write(r.content)
163 data[i] = path
164 except Exception:
165 pass
166
167
168 def _generate_meta():
169 """
170 Generate Meta information for export
171 """
172 d = {'root_url': request.url_root}
173 return d
174
175
176 def export_event_json(event_id, settings):
177 """
178 Exports the event as a zip on the server and return its path
179 """
180 # make directory
181 exports_dir = app.config['BASE_DIR'] + '/static/uploads/exports/'
182 if not os.path.isdir(exports_dir):
183 os.mkdir(exports_dir)
184 dir_path = exports_dir + 'event%d' % event_id
185 if os.path.isdir(dir_path):
186 shutil.rmtree(dir_path, ignore_errors=True)
187 os.mkdir(dir_path)
188 # save to directory
189 for e in EXPORTS:
190 if e[0] == 'event':
191 data = _order_json(marshal(e[1].get(event_id), e[2]), e)
192 _download_media(data, 'event', dir_path, settings)
193 else:
194 data = marshal(e[1].list(event_id), e[2])
195 for count in range(len(data)):
196 data[count] = _order_json(data[count], e)
197 _download_media(data[count], e[0], dir_path, settings)
198 data_str = json.dumps(data, indent=4, ensure_ascii=False).encode('utf-8')
199 fp = open(dir_path + '/' + e[0], 'w')
200 fp.write(data_str)
201 fp.close()
202 # add meta
203 data_str = json.dumps(
204 _generate_meta(), sort_keys=True,
205 indent=4, ensure_ascii=False
206 ).encode('utf-8')
207 fp = open(dir_path + '/meta', 'w')
208 fp.write(data_str)
209 fp.close()
210 # make zip
211 shutil.make_archive(dir_path, 'zip', dir_path)
212 dir_path = dir_path + ".zip"
213
214 storage_path = UPLOAD_PATHS['exports']['zip'].format(
215 event_id = event_id
216 )
217 uploaded_file = UploadedFile(dir_path, dir_path.rsplit('/', 1)[1])
218 storage_url = upload(uploaded_file, storage_path)
219
220 if get_settings()['storage_place'] != "s3" or get_settings()['storage_place'] != 'gs':
221 storage_url = app.config['BASE_DIR'] + storage_url.replace("/serve_","/")
222 return storage_url
223
224
225 # HELPERS
226
227 def create_export_job(task_id, event_id):
228 """
229 Create export job for an export that is going to start
230 """
231 export_job = ExportJob.query.filter_by(event_id=event_id).first()
232 task_url = url_for('api.extras_celery_task', task_id=task_id)
233 if export_job:
234 export_job.task = task_url
235 export_job.user_email = g.user.email
236 export_job.event = EventModel.query.get(event_id)
237 export_job.start_time = datetime.now()
238 else:
239 export_job = ExportJob(
240 task=task_url, user_email=g.user.email,
241 event=EventModel.query.get(event_id)
242 )
243 save_to_db(export_job, 'ExportJob saved')
244
245
246 def send_export_mail(event_id, result):
247 """
248 send export event mail after the process is complete
249 """
250 job = DataGetter.get_export_jobs(event_id)
251 if not job: # job not stored, happens in case of CELERY_ALWAYS_EAGER
252 return
253 event = EventModel.query.get(event_id)
254 if not event:
255 event_name = '(Undefined)'
256 else:
257 event_name = event.name
258 send_email_after_export(job.user_email, event_name, result)
259 user = DataGetter.get_user_by_email(job.user_email)
260 send_notif_after_export(user, event_name, result)
261
262
263 # FIELD DATA FORMATTERS
264 def make_filename(name):
265 """Make speaker image filename for export"""
266 for _ in FILENAME_EXCLUDE:
267 name = name.replace(_, ' ')
268 return ''.join(s.title() for s in name.split() if s)
269
[end of app/api/helpers/export_helpers.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/app/api/helpers/export_helpers.py b/app/api/helpers/export_helpers.py
--- a/app/api/helpers/export_helpers.py
+++ b/app/api/helpers/export_helpers.py
@@ -217,7 +217,7 @@
uploaded_file = UploadedFile(dir_path, dir_path.rsplit('/', 1)[1])
storage_url = upload(uploaded_file, storage_path)
- if get_settings()['storage_place'] != "s3" or get_settings()['storage_place'] != 'gs':
+ if get_settings()['storage_place'] != "s3" and get_settings()['storage_place'] != 'gs':
storage_url = app.config['BASE_DIR'] + storage_url.replace("/serve_","/")
return storage_url
|
{"golden_diff": "diff --git a/app/api/helpers/export_helpers.py b/app/api/helpers/export_helpers.py\n--- a/app/api/helpers/export_helpers.py\n+++ b/app/api/helpers/export_helpers.py\n@@ -217,7 +217,7 @@\n uploaded_file = UploadedFile(dir_path, dir_path.rsplit('/', 1)[1])\n storage_url = upload(uploaded_file, storage_path)\n \n- if get_settings()['storage_place'] != \"s3\" or get_settings()['storage_place'] != 'gs':\n+ if get_settings()['storage_place'] != \"s3\" and get_settings()['storage_place'] != 'gs':\n storage_url = app.config['BASE_DIR'] + storage_url.replace(\"/serve_\",\"/\")\n return storage_url\n", "issue": "Export of Documents leads to 404\nWhen Trying to export the event as Documents (json files) the page reports that my request was completed successfully. \r\nHowever, when clicking on the download link I get a 404. \r\nIn this case, the exact link visited was: \r\nhttps://eventyay.com/notifications/opev/open_eventhttps://storage.googleapis.com/eventyay.com/exports/69/aVllSlAvdk/event69.zip\r\n\n", "before_files": [{"content": "import json\nimport os\nimport shutil\nfrom collections import OrderedDict\nfrom datetime import datetime\n\nimport requests\nfrom flask import current_app as app\nfrom flask import request, g, url_for\nfrom flask_restplus import marshal\n\nfrom app.helpers.data import save_to_db\nfrom app.helpers.data_getter import DataGetter\nfrom app.helpers.helpers import send_email_after_export, send_notif_after_export\nfrom app.models.event import Event as EventModel\nfrom app.models.export_jobs import ExportJob\nfrom import_helpers import is_downloadable, get_filename_from_cd\nfrom .non_apis import CustomFormDAO, CUSTOM_FORM\nfrom ..events import DAO as EventDAO, EVENT as EVENT_MODEL\nfrom ..microlocations import DAO as MicrolocationDAO, MICROLOCATION\nfrom ..sessions import DAO as SessionDAO, SESSION, \\\n TypeDAO as SessionTypeDAO, SESSION_TYPE\nfrom ..speakers import DAO as SpeakerDAO, SPEAKER\nfrom ..sponsors import DAO as SponsorDAO, SPONSOR\nfrom ..tracks import DAO as TrackDAO, TRACK\nfrom app.helpers.storage import upload, UPLOAD_PATHS, UploadedFile\nfrom app.settings import get_settings\n\n# DELETE FIELDS\n# All fields to be deleted go here\nEVENT = EVENT_MODEL.clone('EventExport')\n\nEXPORTS = [\n ('event', EventDAO, EVENT),\n ('microlocations', MicrolocationDAO, MICROLOCATION),\n ('sessions', SessionDAO, SESSION),\n ('speakers', SpeakerDAO, SPEAKER),\n ('sponsors', SponsorDAO, SPONSOR),\n ('tracks', TrackDAO, TRACK),\n ('session_types', SessionTypeDAO, SESSION_TYPE),\n ('forms', CustomFormDAO, CUSTOM_FORM)\n]\n\n# order of keys in export json\nFIELD_ORDER = {\n 'event': [\n 'id', 'name', 'latitude', 'longitude', 'location_name', 'start_time', 'end_time',\n 'timezone', 'description', 'background_image', 'logo', 'organizer_name',\n 'organizer_description', 'event_url', 'social_links', 'ticket_url', 'privacy', 'type',\n 'topic', 'sub_topic', 'code_of_conduct', 'copyright'\n ],\n 'microlocations': ['id', 'name', 'floor'],\n 'sessions': [\n 'id', 'title', 'subtitle', 'short_abstract', 'long_abstract', 'start_time', 'end_time',\n 'session_type', 'track', 'comments', 'language', 'slides', 'audio', 'video'\n ],\n 'speakers': [\n 'id', 'name', 'email', 'mobile', 'photo', 'organisation', 'position', 'country',\n 'short_biography', 'long_biography', 'website', 'twitter', 'facebook', 'github', 'linkedin'\n ],\n 'sponsors': ['id', 'name', 'logo', 'level', 'sponsor_type', 'url', 'description'],\n 'tracks': ['id', 'name', 'color', 'font_color'],\n 'session_types': ['id', 'name', 'length'],\n 'forms': []\n}\n\n# keep sync with storage.UPLOAD_PATHS\nDOWNLOAD_FIEDLS = {\n 'sessions': {\n 'video': ['video', '/videos/session_%d'],\n 'audio': ['audio', '/audios/session_%d'],\n 'slides': ['document', '/slides/session_%d']\n },\n 'speakers': {\n 'photo': ['image', '/images/speakers/%s_%d']\n },\n 'event': {\n 'logo': ['image', '/images/logo'],\n 'background_image': ['image', '/images/background']\n },\n 'sponsors': {\n 'logo': ['image', '/images/sponsors/%s_%d']\n },\n 'tracks': {\n 'track_image_url': ['image', '/images/tracks/image_%d']\n }\n}\n\n# strings to remove in a filename\nFILENAME_EXCLUDE = '<>:\"/\\|?*;'\n\n\n# FUNCTIONS\n\ndef sorted_dict(data):\n \"\"\"\n sorts a json (dict/list->dict) and returns OrderedDict\n \"\"\"\n if type(data) == OrderedDict:\n data = dict(data)\n if type(data) == dict:\n data = OrderedDict(sorted(data.items(), key=lambda t: t[0]))\n elif type(data) == list:\n for count in range(len(data)):\n data[count] = OrderedDict(sorted(data[count].items(), key=lambda t: t[0]))\n return data\n\n\ndef _order_json(data, srv):\n \"\"\"\n sorts the data a/c FIELD_ORDER and returns.\n If some keys are not included in FIELD_ORDER, they go at last, sorted alphabetically\n \"\"\"\n new_data = OrderedDict()\n for field in FIELD_ORDER[srv[0]]:\n new_data[field] = sorted_dict(data[field])\n data.pop(field, None)\n # remaining fields, sort and add\n # https://docs.python.org/2/library/collections.html#collections.OrderedDict\n data = OrderedDict(sorted(data.items(), key=lambda t: t[0]))\n for key in data:\n new_data[key] = sorted_dict(data[key])\n return new_data\n\n\ndef _download_media(data, srv, dir_path, settings):\n \"\"\"\n Downloads the media and saves it\n \"\"\"\n if srv not in DOWNLOAD_FIEDLS:\n return\n for i in DOWNLOAD_FIEDLS[srv]:\n if not data[i]:\n continue\n if not settings[DOWNLOAD_FIEDLS[srv][i][0]]:\n continue\n path = DOWNLOAD_FIEDLS[srv][i][1]\n if srv == 'speakers':\n path %= make_filename(data['name']), data['id']\n elif srv == 'sponsors':\n path %= make_filename(data['name']), data['id']\n elif srv != 'event':\n path = path % (data['id'])\n if data[i].find('.') > -1: # add extension\n ext = data[i].rsplit('.', 1)[1]\n if ext.find('/') == -1:\n path += '.' + ext\n full_path = dir_path + path\n # make dir\n cdir = full_path.rsplit('/', 1)[0]\n if not os.path.isdir(cdir):\n os.makedirs(cdir)\n # download and set\n url = data[i]\n if not is_downloadable(url):\n continue\n try:\n r = requests.get(url, allow_redirects=True)\n ext = get_filename_from_cd(r.headers.get('content-disposition'))[1]\n full_path += ext\n path += ext\n open(full_path, 'wb').write(r.content)\n data[i] = path\n except Exception:\n pass\n\n\ndef _generate_meta():\n \"\"\"\n Generate Meta information for export\n \"\"\"\n d = {'root_url': request.url_root}\n return d\n\n\ndef export_event_json(event_id, settings):\n \"\"\"\n Exports the event as a zip on the server and return its path\n \"\"\"\n # make directory\n exports_dir = app.config['BASE_DIR'] + '/static/uploads/exports/'\n if not os.path.isdir(exports_dir):\n os.mkdir(exports_dir)\n dir_path = exports_dir + 'event%d' % event_id\n if os.path.isdir(dir_path):\n shutil.rmtree(dir_path, ignore_errors=True)\n os.mkdir(dir_path)\n # save to directory\n for e in EXPORTS:\n if e[0] == 'event':\n data = _order_json(marshal(e[1].get(event_id), e[2]), e)\n _download_media(data, 'event', dir_path, settings)\n else:\n data = marshal(e[1].list(event_id), e[2])\n for count in range(len(data)):\n data[count] = _order_json(data[count], e)\n _download_media(data[count], e[0], dir_path, settings)\n data_str = json.dumps(data, indent=4, ensure_ascii=False).encode('utf-8')\n fp = open(dir_path + '/' + e[0], 'w')\n fp.write(data_str)\n fp.close()\n # add meta\n data_str = json.dumps(\n _generate_meta(), sort_keys=True,\n indent=4, ensure_ascii=False\n ).encode('utf-8')\n fp = open(dir_path + '/meta', 'w')\n fp.write(data_str)\n fp.close()\n # make zip\n shutil.make_archive(dir_path, 'zip', dir_path)\n dir_path = dir_path + \".zip\"\n\n storage_path = UPLOAD_PATHS['exports']['zip'].format(\n event_id = event_id\n )\n uploaded_file = UploadedFile(dir_path, dir_path.rsplit('/', 1)[1])\n storage_url = upload(uploaded_file, storage_path)\n\n if get_settings()['storage_place'] != \"s3\" or get_settings()['storage_place'] != 'gs':\n storage_url = app.config['BASE_DIR'] + storage_url.replace(\"/serve_\",\"/\")\n return storage_url\n\n\n# HELPERS\n\ndef create_export_job(task_id, event_id):\n \"\"\"\n Create export job for an export that is going to start\n \"\"\"\n export_job = ExportJob.query.filter_by(event_id=event_id).first()\n task_url = url_for('api.extras_celery_task', task_id=task_id)\n if export_job:\n export_job.task = task_url\n export_job.user_email = g.user.email\n export_job.event = EventModel.query.get(event_id)\n export_job.start_time = datetime.now()\n else:\n export_job = ExportJob(\n task=task_url, user_email=g.user.email,\n event=EventModel.query.get(event_id)\n )\n save_to_db(export_job, 'ExportJob saved')\n\n\ndef send_export_mail(event_id, result):\n \"\"\"\n send export event mail after the process is complete\n \"\"\"\n job = DataGetter.get_export_jobs(event_id)\n if not job: # job not stored, happens in case of CELERY_ALWAYS_EAGER\n return\n event = EventModel.query.get(event_id)\n if not event:\n event_name = '(Undefined)'\n else:\n event_name = event.name\n send_email_after_export(job.user_email, event_name, result)\n user = DataGetter.get_user_by_email(job.user_email)\n send_notif_after_export(user, event_name, result)\n\n\n# FIELD DATA FORMATTERS\ndef make_filename(name):\n \"\"\"Make speaker image filename for export\"\"\"\n for _ in FILENAME_EXCLUDE:\n name = name.replace(_, ' ')\n return ''.join(s.title() for s in name.split() if s)\n", "path": "app/api/helpers/export_helpers.py"}]}
| 3,612 | 156 |
gh_patches_debug_19877
|
rasdani/github-patches
|
git_diff
|
spyder-ide__spyder-7975
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Go to definition in the Editor stopped working after the introspection services migration to use the LSP
## Problem Description
Go to definition (Ctrl-G) in the Editor stopped working after PR #4751 was merged.
A minor comment first: the `lsp_client_python.log` file is taking forever to build and if something introspection related is done while the file is being built, everything seems to break (help, go to definition and code style warnings).
I think the reason that go to definition has stopped working is that there is an error in the path of the file that the language server is returning to Spyder in [handle_go_to_definition](https://github.com/spyder-ide/spyder/blob/master/spyder/widgets/sourcecode/codeeditor.py#L956). An extra `/` is added at the beginning and the disk name is not capitalized.
For example, let's say that I Ctrl-G [GoToLineDialog](https://github.com/spyder-ide/spyder/blob/master/spyder/widgets/sourcecode/codeeditor.py#L1682) on line 1682 of the file `codeeditor.py`. This file is located on my computer at `C:\Users\User\spyder\spyder\widgets\sourcecode\codeeditor.py`.
The `position['params']` that is returned to [handle_go_to_definition](https://github.com/spyder-ide/spyder/blob/master/spyder/widgets/sourcecode/codeeditor.py#L956) is:
```python
{'uri': 'file:///c:/Users/User/spyder/spyder/widgets/sourcecode/codeeditor.py',
'range': {'start': {'line': 102, 'character': 6}, 'end': {'line': 102, 'character': 20}},
'file': '/c:/Users/User/spyder/spyder/widgets/sourcecode/codeeditor.py'
}
```
So as you can see, the value stored for the file is `'/c:/Users/User/spyder/spyder/widgets/sourcecode/codeeditor.py'` instead of `C:\Users\User\spyder\spyder\widgets\sourcecode\codeeditor.py`, the value that is stored in `CodeEditor.filename`.
If I replace [handle_go_to_definition](https://github.com/spyder-ide/spyder/blob/master/spyder/widgets/sourcecode/codeeditor.py#L956) by:
```python
@handles(LSPRequestTypes.DOCUMENT_DEFINITION)
def handle_go_to_definition(self, position):
position = position['params']
if position is not None:
def_range = position['range']
start = def_range['start']
position_file = osp.normpath(position['file'][1:])
if osp.normpath(self.filename) == position_file:
self.go_to_line(start['line'] + 1, start['character'],
None, word=None)
else:
self.go_to_definition.emit(position_file, start['line'] + 1,
start['character'])
```
to remove the extra `\` and normalize the paths before comparing them, then the go to definition feature is working again as expected.
## Versions
<!--- You can get this information from Help > About Spyder...
or (if Spyder won't launch) the "conda list" command
from the Anaconda Prompt/Terminal/command line. --->
* Spyder version: Spyder 4.0.0.dev0
* Python version: Python 3.6.5 64bits
* Qt version: Qt 5.9.3
* PyQt version: PyQt5 5.9.2
* Operating System name/version: Windows
### Dependencies
<!--- Please go to the menu entry Help > Dependencies,
press the Copy to clipboard button and paste below --->
```
pyflakes >=0.6.0 : 1.6.0 (OK)
pycodestyle >=2.3 : 2.3.1 (OK)
pygments >=2.0 : 2.2.0 (OK)
sphinx >=0.6.6 : 1.7.1 (OK)
rope >=0.9.4 : 0.10.7 (OK)
jedi >=0.11.0 : 0.12.1 (OK)
nbconvert >=4.0 : 5.3.1 (OK)
pandas >=0.13.1 : 0.22.0 (OK)
numpy >=1.7 : 1.13.0 (OK)
sympy >=0.7.3 : 1.1.1 (OK)
cython >=0.21 : 0.27.3 (OK)
qtconsole >=4.2.0 : 4.4.0.dev (OK)
IPython >=4.0 : 6.2.1 (OK)
matplotlib >=2.0.0: 2.2.0 (OK)
pylint >=0.25 : 1.8.3 (OK)
```
</issue>
<code>
[start of spyder/plugins/editor/lsp/providers/document.py]
1 # -*- coding: utf-8 -*-
2
3 # Copyright © Spyder Project Contributors
4 # Licensed under the terms of the MIT License
5 # (see spyder/__init__.py for details)
6
7 """Spyder Language Server Protocol Client document handler routines."""
8
9 import os.path as osp
10
11 from spyder.py3compat import PY2
12 from spyder.config.base import debug_print
13 from spyder.plugins.editor.lsp import (
14 LSPRequestTypes, InsertTextFormat, CompletionItemKind,
15 ClientConstants)
16 from spyder.plugins.editor.lsp.decorators import handles, send_request
17
18 if PY2:
19 import pathlib2 as pathlib
20 from urlparse import urlparse
21 else:
22 import pathlib
23 from urllib.parse import urlparse
24
25
26 def path_as_uri(path):
27 return pathlib.Path(osp.abspath(path)).as_uri()
28
29
30 class DocumentProvider:
31 def register_file(self, filename, signal):
32 filename = path_as_uri(filename)
33 if filename not in self.watched_files:
34 self.watched_files[filename] = []
35 self.watched_files[filename].append(signal)
36
37 @handles(LSPRequestTypes.DOCUMENT_PUBLISH_DIAGNOSTICS)
38 def process_document_diagnostics(self, response, *args):
39 uri = response['uri']
40 diagnostics = response['diagnostics']
41 callbacks = self.watched_files[uri]
42 for callback in callbacks:
43 callback.emit(
44 LSPRequestTypes.DOCUMENT_PUBLISH_DIAGNOSTICS,
45 {'params': diagnostics})
46
47 @send_request(
48 method=LSPRequestTypes.DOCUMENT_DID_CHANGE, requires_response=False)
49 def document_changed(self, params):
50 params = {
51 'textDocument': {
52 'uri': path_as_uri(params['file']),
53 'version': params['version']
54 },
55 'contentChanges': [{
56 'text': params['text']
57 }]
58 }
59 return params
60
61 @send_request(
62 method=LSPRequestTypes.DOCUMENT_DID_OPEN, requires_response=False)
63 def document_open(self, editor_params):
64 uri = path_as_uri(editor_params['file'])
65 if uri not in self.watched_files:
66 self.register_file(editor_params['file'], editor_params['signal'])
67 params = {
68 'textDocument': {
69 'uri': uri,
70 'languageId': editor_params['language'],
71 'version': editor_params['version'],
72 'text': editor_params['text']
73 }
74 }
75
76 return params
77
78 @send_request(method=LSPRequestTypes.DOCUMENT_COMPLETION)
79 def document_completion_request(self, params):
80 params = {
81 'textDocument': {
82 'uri': path_as_uri(params['file'])
83 },
84 'position': {
85 'line': params['line'],
86 'character': params['column']
87 }
88 }
89
90 return params
91
92 @handles(LSPRequestTypes.DOCUMENT_COMPLETION)
93 def process_document_completion(self, response, req_id):
94 if isinstance(response, dict):
95 response = response['items']
96 for item in response:
97 item['kind'] = item.get('kind', CompletionItemKind.TEXT)
98 item['detail'] = item.get('detail', '')
99 item['documentation'] = item.get('documentation', '')
100 item['sortText'] = item.get('sortText', item['label'])
101 item['filterText'] = item.get('filterText', item['label'])
102 item['insertTextFormat'] = item.get(
103 'insertTextFormat', InsertTextFormat.PLAIN_TEXT)
104 item['insertText'] = item.get('insertText', item['label'])
105
106 if req_id in self.req_reply:
107 self.req_reply[req_id].emit(
108 LSPRequestTypes.DOCUMENT_COMPLETION, {'params': response})
109
110 @send_request(method=LSPRequestTypes.DOCUMENT_SIGNATURE)
111 def signature_help_request(self, params):
112 params = {
113 'textDocument': {
114 'uri': path_as_uri(params['file'])
115 },
116 'position': {
117 'line': params['line'],
118 'character': params['column']
119 }
120 }
121
122 return params
123
124 @handles(LSPRequestTypes.DOCUMENT_SIGNATURE)
125 def process_signature_completion(self, response, req_id):
126 if len(response['signatures']) > 0:
127 response['signatures'] = response['signatures'][
128 response['activeSignature']]
129 else:
130 response = None
131 if req_id in self.req_reply:
132 self.req_reply[req_id].emit(
133 LSPRequestTypes.DOCUMENT_SIGNATURE,
134 {'params': response})
135
136 @send_request(method=LSPRequestTypes.DOCUMENT_HOVER)
137 def hover_request(self, params):
138 params = {
139 'textDocument': {
140 'uri': path_as_uri(params['file'])
141 },
142 'position': {
143 'line': params['line'],
144 'character': params['column']
145 }
146 }
147
148 return params
149
150 @handles(LSPRequestTypes.DOCUMENT_HOVER)
151 def process_hover_result(self, result, req_id):
152 contents = result['contents']
153 if isinstance(contents, list):
154 contents = contents[0]
155 if isinstance(contents, dict):
156 contents = contents['value']
157 if req_id in self.req_reply:
158 self.req_reply[req_id].emit(
159 LSPRequestTypes.DOCUMENT_HOVER,
160 {'params': contents})
161
162 @send_request(method=LSPRequestTypes.DOCUMENT_DEFINITION)
163 def go_to_definition_request(self, params):
164 params = {
165 'textDocument': {
166 'uri': path_as_uri(params['file'])
167 },
168 'position': {
169 'line': params['line'],
170 'character': params['column']
171 }
172 }
173
174 return params
175
176 @handles(LSPRequestTypes.DOCUMENT_DEFINITION)
177 def process_go_to_definition(self, result, req_id):
178 if isinstance(result, list):
179 if len(result) > 0:
180 result = result[0]
181 uri = urlparse(result['uri'])
182 result['file'] = osp.join(uri.netloc, uri.path)
183 else:
184 result = None
185 if req_id in self.req_reply:
186 self.req_reply[req_id].emit(
187 LSPRequestTypes.DOCUMENT_DEFINITION,
188 {'params': result})
189
190 @send_request(method=LSPRequestTypes.DOCUMENT_WILL_SAVE,
191 requires_response=False)
192 def document_will_save_notification(self, params):
193 params = {
194 'textDocument': {
195 'uri': path_as_uri(params['file'])
196 },
197 'reason': params['reason']
198 }
199 return params
200
201 @send_request(method=LSPRequestTypes.DOCUMENT_DID_CLOSE,
202 requires_response=False)
203 def document_did_close(self, params):
204 file_signal = params['signal']
205 debug_print('[{0}] File: {1}'.format(
206 LSPRequestTypes.DOCUMENT_DID_CLOSE, params['file']))
207 filename = path_as_uri(params['file'])
208
209 params = {
210 'textDocument': {
211 'uri': filename
212 }
213 }
214 if filename not in self.watched_files:
215 params[ClientConstants.CANCEL] = True
216 else:
217 signals = self.watched_files[filename]
218 idx = -1
219 for i, signal in enumerate(signals):
220 if id(file_signal) == id(signal):
221 idx = i
222 break
223 if idx > 0:
224 signals.pop(idx)
225
226 if len(signals) == 0:
227 self.watched_files.pop(filename)
228 return params
229
[end of spyder/plugins/editor/lsp/providers/document.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/spyder/plugins/editor/lsp/providers/document.py b/spyder/plugins/editor/lsp/providers/document.py
--- a/spyder/plugins/editor/lsp/providers/document.py
+++ b/spyder/plugins/editor/lsp/providers/document.py
@@ -18,9 +18,11 @@
if PY2:
import pathlib2 as pathlib
from urlparse import urlparse
+ from urllib import url2pathname
else:
import pathlib
from urllib.parse import urlparse
+ from urllib.request import url2pathname
def path_as_uri(path):
@@ -179,7 +181,10 @@
if len(result) > 0:
result = result[0]
uri = urlparse(result['uri'])
- result['file'] = osp.join(uri.netloc, uri.path)
+ netloc, path = uri.netloc, uri.path
+ # Prepend UNC share notation if we have a UNC path.
+ netloc = '\\\\' + netloc if netloc else netloc
+ result['file'] = url2pathname(netloc + path)
else:
result = None
if req_id in self.req_reply:
|
{"golden_diff": "diff --git a/spyder/plugins/editor/lsp/providers/document.py b/spyder/plugins/editor/lsp/providers/document.py\n--- a/spyder/plugins/editor/lsp/providers/document.py\n+++ b/spyder/plugins/editor/lsp/providers/document.py\n@@ -18,9 +18,11 @@\n if PY2:\n import pathlib2 as pathlib\n from urlparse import urlparse\n+ from urllib import url2pathname\n else:\n import pathlib\n from urllib.parse import urlparse\n+ from urllib.request import url2pathname\n \n \n def path_as_uri(path):\n@@ -179,7 +181,10 @@\n if len(result) > 0:\n result = result[0]\n uri = urlparse(result['uri'])\n- result['file'] = osp.join(uri.netloc, uri.path)\n+ netloc, path = uri.netloc, uri.path\n+ # Prepend UNC share notation if we have a UNC path.\n+ netloc = '\\\\\\\\' + netloc if netloc else netloc\n+ result['file'] = url2pathname(netloc + path)\n else:\n result = None\n if req_id in self.req_reply:\n", "issue": "Go to definition in the Editor stopped working after the introspection services migration to use the LSP\n## Problem Description\r\n\r\nGo to definition (Ctrl-G) in the Editor stopped working after PR #4751 was merged.\r\n\r\nA minor comment first: the `lsp_client_python.log` file is taking forever to build and if something introspection related is done while the file is being built, everything seems to break (help, go to definition and code style warnings).\r\n\r\nI think the reason that go to definition has stopped working is that there is an error in the path of the file that the language server is returning to Spyder in [handle_go_to_definition](https://github.com/spyder-ide/spyder/blob/master/spyder/widgets/sourcecode/codeeditor.py#L956). An extra `/` is added at the beginning and the disk name is not capitalized.\r\n\r\nFor example, let's say that I Ctrl-G [GoToLineDialog](https://github.com/spyder-ide/spyder/blob/master/spyder/widgets/sourcecode/codeeditor.py#L1682) on line 1682 of the file `codeeditor.py`. This file is located on my computer at `C:\\Users\\User\\spyder\\spyder\\widgets\\sourcecode\\codeeditor.py`.\r\n\r\nThe `position['params']` that is returned to [handle_go_to_definition](https://github.com/spyder-ide/spyder/blob/master/spyder/widgets/sourcecode/codeeditor.py#L956) is:\r\n\r\n```python\r\n{'uri': 'file:///c:/Users/User/spyder/spyder/widgets/sourcecode/codeeditor.py', \r\n 'range': {'start': {'line': 102, 'character': 6}, 'end': {'line': 102, 'character': 20}},\r\n 'file': '/c:/Users/User/spyder/spyder/widgets/sourcecode/codeeditor.py'\r\n}\r\n```\r\nSo as you can see, the value stored for the file is `'/c:/Users/User/spyder/spyder/widgets/sourcecode/codeeditor.py'` instead of `C:\\Users\\User\\spyder\\spyder\\widgets\\sourcecode\\codeeditor.py`, the value that is stored in `CodeEditor.filename`.\r\n\r\nIf I replace [handle_go_to_definition](https://github.com/spyder-ide/spyder/blob/master/spyder/widgets/sourcecode/codeeditor.py#L956) by:\r\n\r\n```python\r\n @handles(LSPRequestTypes.DOCUMENT_DEFINITION)\r\n def handle_go_to_definition(self, position):\r\n position = position['params']\r\n if position is not None:\r\n def_range = position['range']\r\n start = def_range['start']\r\n position_file = osp.normpath(position['file'][1:])\r\n if osp.normpath(self.filename) == position_file:\r\n self.go_to_line(start['line'] + 1, start['character'],\r\n None, word=None)\r\n else:\r\n self.go_to_definition.emit(position_file, start['line'] + 1,\r\n start['character'])\r\n```\r\n\r\nto remove the extra `\\` and normalize the paths before comparing them, then the go to definition feature is working again as expected.\r\n\r\n\r\n## Versions\r\n<!--- You can get this information from Help > About Spyder...\r\nor (if Spyder won't launch) the \"conda list\" command\r\nfrom the Anaconda Prompt/Terminal/command line. --->\r\n\r\n* Spyder version: Spyder 4.0.0.dev0\r\n* Python version: Python 3.6.5 64bits\r\n* Qt version: Qt 5.9.3\r\n* PyQt version: PyQt5 5.9.2\r\n* Operating System name/version: Windows \r\n\r\n### Dependencies\r\n<!--- Please go to the menu entry Help > Dependencies,\r\npress the Copy to clipboard button and paste below --->\r\n\r\n```\r\npyflakes >=0.6.0 : 1.6.0 (OK)\r\npycodestyle >=2.3 : 2.3.1 (OK)\r\npygments >=2.0 : 2.2.0 (OK)\r\nsphinx >=0.6.6 : 1.7.1 (OK)\r\nrope >=0.9.4 : 0.10.7 (OK)\r\njedi >=0.11.0 : 0.12.1 (OK)\r\nnbconvert >=4.0 : 5.3.1 (OK)\r\npandas >=0.13.1 : 0.22.0 (OK)\r\nnumpy >=1.7 : 1.13.0 (OK)\r\nsympy >=0.7.3 : 1.1.1 (OK)\r\ncython >=0.21 : 0.27.3 (OK)\r\nqtconsole >=4.2.0 : 4.4.0.dev (OK)\r\nIPython >=4.0 : 6.2.1 (OK)\r\nmatplotlib >=2.0.0: 2.2.0 (OK)\r\npylint >=0.25 : 1.8.3 (OK)\r\n```\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 Spyder Project Contributors\n# Licensed under the terms of the MIT License\n# (see spyder/__init__.py for details)\n\n\"\"\"Spyder Language Server Protocol Client document handler routines.\"\"\"\n\nimport os.path as osp\n\nfrom spyder.py3compat import PY2\nfrom spyder.config.base import debug_print\nfrom spyder.plugins.editor.lsp import (\n LSPRequestTypes, InsertTextFormat, CompletionItemKind,\n ClientConstants)\nfrom spyder.plugins.editor.lsp.decorators import handles, send_request\n\nif PY2:\n import pathlib2 as pathlib\n from urlparse import urlparse\nelse:\n import pathlib\n from urllib.parse import urlparse\n\n\ndef path_as_uri(path):\n return pathlib.Path(osp.abspath(path)).as_uri()\n\n\nclass DocumentProvider:\n def register_file(self, filename, signal):\n filename = path_as_uri(filename)\n if filename not in self.watched_files:\n self.watched_files[filename] = []\n self.watched_files[filename].append(signal)\n\n @handles(LSPRequestTypes.DOCUMENT_PUBLISH_DIAGNOSTICS)\n def process_document_diagnostics(self, response, *args):\n uri = response['uri']\n diagnostics = response['diagnostics']\n callbacks = self.watched_files[uri]\n for callback in callbacks:\n callback.emit(\n LSPRequestTypes.DOCUMENT_PUBLISH_DIAGNOSTICS,\n {'params': diagnostics})\n\n @send_request(\n method=LSPRequestTypes.DOCUMENT_DID_CHANGE, requires_response=False)\n def document_changed(self, params):\n params = {\n 'textDocument': {\n 'uri': path_as_uri(params['file']),\n 'version': params['version']\n },\n 'contentChanges': [{\n 'text': params['text']\n }]\n }\n return params\n\n @send_request(\n method=LSPRequestTypes.DOCUMENT_DID_OPEN, requires_response=False)\n def document_open(self, editor_params):\n uri = path_as_uri(editor_params['file'])\n if uri not in self.watched_files:\n self.register_file(editor_params['file'], editor_params['signal'])\n params = {\n 'textDocument': {\n 'uri': uri,\n 'languageId': editor_params['language'],\n 'version': editor_params['version'],\n 'text': editor_params['text']\n }\n }\n\n return params\n\n @send_request(method=LSPRequestTypes.DOCUMENT_COMPLETION)\n def document_completion_request(self, params):\n params = {\n 'textDocument': {\n 'uri': path_as_uri(params['file'])\n },\n 'position': {\n 'line': params['line'],\n 'character': params['column']\n }\n }\n\n return params\n\n @handles(LSPRequestTypes.DOCUMENT_COMPLETION)\n def process_document_completion(self, response, req_id):\n if isinstance(response, dict):\n response = response['items']\n for item in response:\n item['kind'] = item.get('kind', CompletionItemKind.TEXT)\n item['detail'] = item.get('detail', '')\n item['documentation'] = item.get('documentation', '')\n item['sortText'] = item.get('sortText', item['label'])\n item['filterText'] = item.get('filterText', item['label'])\n item['insertTextFormat'] = item.get(\n 'insertTextFormat', InsertTextFormat.PLAIN_TEXT)\n item['insertText'] = item.get('insertText', item['label'])\n\n if req_id in self.req_reply:\n self.req_reply[req_id].emit(\n LSPRequestTypes.DOCUMENT_COMPLETION, {'params': response})\n\n @send_request(method=LSPRequestTypes.DOCUMENT_SIGNATURE)\n def signature_help_request(self, params):\n params = {\n 'textDocument': {\n 'uri': path_as_uri(params['file'])\n },\n 'position': {\n 'line': params['line'],\n 'character': params['column']\n }\n }\n\n return params\n\n @handles(LSPRequestTypes.DOCUMENT_SIGNATURE)\n def process_signature_completion(self, response, req_id):\n if len(response['signatures']) > 0:\n response['signatures'] = response['signatures'][\n response['activeSignature']]\n else:\n response = None\n if req_id in self.req_reply:\n self.req_reply[req_id].emit(\n LSPRequestTypes.DOCUMENT_SIGNATURE,\n {'params': response})\n\n @send_request(method=LSPRequestTypes.DOCUMENT_HOVER)\n def hover_request(self, params):\n params = {\n 'textDocument': {\n 'uri': path_as_uri(params['file'])\n },\n 'position': {\n 'line': params['line'],\n 'character': params['column']\n }\n }\n\n return params\n\n @handles(LSPRequestTypes.DOCUMENT_HOVER)\n def process_hover_result(self, result, req_id):\n contents = result['contents']\n if isinstance(contents, list):\n contents = contents[0]\n if isinstance(contents, dict):\n contents = contents['value']\n if req_id in self.req_reply:\n self.req_reply[req_id].emit(\n LSPRequestTypes.DOCUMENT_HOVER,\n {'params': contents})\n\n @send_request(method=LSPRequestTypes.DOCUMENT_DEFINITION)\n def go_to_definition_request(self, params):\n params = {\n 'textDocument': {\n 'uri': path_as_uri(params['file'])\n },\n 'position': {\n 'line': params['line'],\n 'character': params['column']\n }\n }\n\n return params\n\n @handles(LSPRequestTypes.DOCUMENT_DEFINITION)\n def process_go_to_definition(self, result, req_id):\n if isinstance(result, list):\n if len(result) > 0:\n result = result[0]\n uri = urlparse(result['uri'])\n result['file'] = osp.join(uri.netloc, uri.path)\n else:\n result = None\n if req_id in self.req_reply:\n self.req_reply[req_id].emit(\n LSPRequestTypes.DOCUMENT_DEFINITION,\n {'params': result})\n\n @send_request(method=LSPRequestTypes.DOCUMENT_WILL_SAVE,\n requires_response=False)\n def document_will_save_notification(self, params):\n params = {\n 'textDocument': {\n 'uri': path_as_uri(params['file'])\n },\n 'reason': params['reason']\n }\n return params\n\n @send_request(method=LSPRequestTypes.DOCUMENT_DID_CLOSE,\n requires_response=False)\n def document_did_close(self, params):\n file_signal = params['signal']\n debug_print('[{0}] File: {1}'.format(\n LSPRequestTypes.DOCUMENT_DID_CLOSE, params['file']))\n filename = path_as_uri(params['file'])\n\n params = {\n 'textDocument': {\n 'uri': filename\n }\n }\n if filename not in self.watched_files:\n params[ClientConstants.CANCEL] = True\n else:\n signals = self.watched_files[filename]\n idx = -1\n for i, signal in enumerate(signals):\n if id(file_signal) == id(signal):\n idx = i\n break\n if idx > 0:\n signals.pop(idx)\n\n if len(signals) == 0:\n self.watched_files.pop(filename)\n return params\n", "path": "spyder/plugins/editor/lsp/providers/document.py"}]}
| 3,822 | 255 |
gh_patches_debug_152
|
rasdani/github-patches
|
git_diff
|
blaze__blaze-1136
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
psutil.NUM_CPUS deprecated and removed
``` python
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-11-5c5ee3cb747a> in <module>()
----> 1 import blaze
/home/skipper/.virtualenvs/py3/lib/python3.4/site-packages/blaze/__init__.py in <module>()
16
17 from datashape import dshape, discover
---> 18 from .utils import ignoring
19 from .expr import (Symbol, TableSymbol, symbol, ndim, shape)
20 from .expr import (by, count, count_values, distinct, head, join, label, like,
/home/skipper/.virtualenvs/py3/lib/python3.4/site-packages/blaze/utils.py in <module>()
25 from .dispatch import dispatch
26
---> 27 thread_pool = ThreadPool(psutil.NUM_CPUS)
28
29
AttributeError: 'module' object has no attribute 'NUM_CPUS'
```
```
Python 3.4.0 (default, Apr 11 2014, 13:05:11)
Type "copyright", "credits" or "license" for more information.
IPython 3.1.0 -- An enhanced Interactive Python.
? -> Introduction and overview of IPython's features.
%quickref -> Quick reference.
help -> Python's own help system.
object? -> Details about 'object', use 'object??' for extra details.
[TerminalIPythonApp] WARNING | File not found: '/home/skipper/.pystartup'
import pu
[~/]
[1]: import psutil
[~/]
[2]: psutil.__version__
[2]: '3.0.0'
```
https://github.com/giampaolo/psutil/issues/451
</issue>
<code>
[start of blaze/utils.py]
1 from __future__ import absolute_import, division, print_function
2
3 import os
4 import datetime
5 from functools import wraps
6
7 try:
8 from cytoolz import nth
9 except ImportError:
10 from toolz import nth
11
12 from itertools import islice
13 from collections import Iterator
14 from multiprocessing.pool import ThreadPool
15
16 # these are used throughout blaze, don't remove them
17 from odo.utils import tmpfile, filetext, filetexts, raises, keywords, ignoring
18
19 import psutil
20 import numpy as np
21
22 # Imports that replace older utils.
23 from .compatibility import map, zip
24
25 from .dispatch import dispatch
26
27 thread_pool = ThreadPool(psutil.NUM_CPUS)
28
29
30 def nth_list(n, seq):
31 """
32
33 >>> tuple(nth_list([0, 1, 4], 'Hello'))
34 ('H', 'e', 'o')
35 >>> tuple(nth_list([4, 1, 0], 'Hello'))
36 ('o', 'e', 'H')
37 >>> tuple(nth_list([0, 0, 0], 'Hello'))
38 ('H', 'H', 'H')
39 """
40 seq = iter(seq)
41
42 result = []
43 old = 0
44 item = next(seq)
45 for index in sorted(n):
46 for i in range(index - old):
47 item = next(seq)
48 result.append(item)
49 old = index
50
51 order = [x[1] for x in sorted(zip(n, range(len(n))))]
52 return (result[i] for i in order)
53
54
55 def get(ind, coll, lazy=False):
56 """
57
58 >>> get(0, 'Hello')
59 'H'
60
61 >>> get([1, 0], 'Hello')
62 ('e', 'H')
63
64 >>> get(slice(1, 4), 'Hello')
65 ('e', 'l', 'l')
66
67 >>> get(slice(1, 4), 'Hello', lazy=True)
68 <itertools.islice object at ...>
69 """
70 if isinstance(ind, list):
71 result = nth_list(ind, coll)
72 elif isinstance(ind, slice):
73 result = islice(coll, ind.start, ind.stop, ind.step)
74 else:
75 if isinstance(coll, Iterator):
76 result = nth(ind, coll)
77 else:
78 result = coll[ind]
79 if not lazy and isinstance(result, Iterator):
80 result = tuple(result)
81 return result
82
83
84 def ndget(ind, data):
85 """
86 Get from N-Dimensional getable
87
88 Can index with elements, lists, or slices. Mimic's numpy fancy indexing on
89 generic indexibles.
90
91 >>> data = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]
92 >>> ndget(0, data)
93 [[1, 2], [3, 4]]
94 >>> ndget((0, 1), data)
95 [3, 4]
96 >>> ndget((0, 0, 0), data)
97 1
98 >>> ndget((slice(0, 2), [0, 1], 0), data)
99 ((1, 3), (5, 7))
100 """
101 if isinstance(ind, tuple) and len(ind) == 1:
102 ind = ind[0]
103 if not isinstance(ind, tuple):
104 return get(ind, data)
105 result = get(ind[0], data)
106 if isinstance(ind[0], (list, slice)):
107 return type(result)(ndget(ind[1:], row) for row in result)
108 else:
109 return ndget(ind[1:], result)
110
111
112 def normalize_to_date(dt):
113 if isinstance(dt, datetime.datetime) and not dt.time():
114 return dt.date()
115 else:
116 return dt
117
118
119 def assert_allclose(lhs, rhs):
120 for tb in map(zip, lhs, rhs):
121 for left, right in tb:
122 if isinstance(left, (np.floating, float)):
123 # account for nans
124 assert np.all(np.isclose(left, right, equal_nan=True))
125 continue
126 if isinstance(left, datetime.datetime):
127 left = normalize_to_date(left)
128 if isinstance(right, datetime.datetime):
129 right = normalize_to_date(right)
130 assert left == right
131
132
133 def example(filename, datapath=os.path.join('examples', 'data')):
134 import blaze
135 return os.path.join(os.path.dirname(blaze.__file__), datapath, filename)
136
137
138 def available_memory():
139 return psutil.virtual_memory().available
140
141
142 def listpack(x):
143 """
144 >>> listpack(1)
145 [1]
146 >>> listpack((1, 2))
147 [1, 2]
148 >>> listpack([1, 2])
149 [1, 2]
150 """
151 if isinstance(x, tuple):
152 return list(x)
153 elif isinstance(x, list):
154 return x
155 else:
156 return [x]
157
158
159 @dispatch(datetime.datetime)
160 def json_dumps(dt):
161 s = dt.isoformat()
162 if not dt.tzname():
163 s += 'Z'
164 return s
165
[end of blaze/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/blaze/utils.py b/blaze/utils.py
--- a/blaze/utils.py
+++ b/blaze/utils.py
@@ -24,7 +24,7 @@
from .dispatch import dispatch
-thread_pool = ThreadPool(psutil.NUM_CPUS)
+thread_pool = ThreadPool(psutil.cpu_count())
def nth_list(n, seq):
|
{"golden_diff": "diff --git a/blaze/utils.py b/blaze/utils.py\n--- a/blaze/utils.py\n+++ b/blaze/utils.py\n@@ -24,7 +24,7 @@\n \n from .dispatch import dispatch\n \n-thread_pool = ThreadPool(psutil.NUM_CPUS)\n+thread_pool = ThreadPool(psutil.cpu_count())\n \n \n def nth_list(n, seq):\n", "issue": "psutil.NUM_CPUS deprecated and removed\n``` python\n---------------------------------------------------------------------------\nAttributeError Traceback (most recent call last)\n<ipython-input-11-5c5ee3cb747a> in <module>()\n----> 1 import blaze\n\n/home/skipper/.virtualenvs/py3/lib/python3.4/site-packages/blaze/__init__.py in <module>()\n 16 \n 17 from datashape import dshape, discover\n---> 18 from .utils import ignoring\n 19 from .expr import (Symbol, TableSymbol, symbol, ndim, shape)\n 20 from .expr import (by, count, count_values, distinct, head, join, label, like,\n\n/home/skipper/.virtualenvs/py3/lib/python3.4/site-packages/blaze/utils.py in <module>()\n 25 from .dispatch import dispatch\n 26 \n---> 27 thread_pool = ThreadPool(psutil.NUM_CPUS)\n 28 \n 29 \n\nAttributeError: 'module' object has no attribute 'NUM_CPUS'\n```\n\n```\nPython 3.4.0 (default, Apr 11 2014, 13:05:11) \nType \"copyright\", \"credits\" or \"license\" for more information.\n\nIPython 3.1.0 -- An enhanced Interactive Python.\n? -> Introduction and overview of IPython's features.\n%quickref -> Quick reference.\nhelp -> Python's own help system.\nobject? -> Details about 'object', use 'object??' for extra details.\n[TerminalIPythonApp] WARNING | File not found: '/home/skipper/.pystartup'\nimport pu\n[~/]\n[1]: import psutil\n\n[~/]\n[2]: psutil.__version__\n[2]: '3.0.0'\n```\n\nhttps://github.com/giampaolo/psutil/issues/451\n\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport os\nimport datetime\nfrom functools import wraps\n\ntry:\n from cytoolz import nth\nexcept ImportError:\n from toolz import nth\n\nfrom itertools import islice\nfrom collections import Iterator\nfrom multiprocessing.pool import ThreadPool\n\n# these are used throughout blaze, don't remove them\nfrom odo.utils import tmpfile, filetext, filetexts, raises, keywords, ignoring\n\nimport psutil\nimport numpy as np\n\n# Imports that replace older utils.\nfrom .compatibility import map, zip\n\nfrom .dispatch import dispatch\n\nthread_pool = ThreadPool(psutil.NUM_CPUS)\n\n\ndef nth_list(n, seq):\n \"\"\"\n\n >>> tuple(nth_list([0, 1, 4], 'Hello'))\n ('H', 'e', 'o')\n >>> tuple(nth_list([4, 1, 0], 'Hello'))\n ('o', 'e', 'H')\n >>> tuple(nth_list([0, 0, 0], 'Hello'))\n ('H', 'H', 'H')\n \"\"\"\n seq = iter(seq)\n\n result = []\n old = 0\n item = next(seq)\n for index in sorted(n):\n for i in range(index - old):\n item = next(seq)\n result.append(item)\n old = index\n\n order = [x[1] for x in sorted(zip(n, range(len(n))))]\n return (result[i] for i in order)\n\n\ndef get(ind, coll, lazy=False):\n \"\"\"\n\n >>> get(0, 'Hello')\n 'H'\n\n >>> get([1, 0], 'Hello')\n ('e', 'H')\n\n >>> get(slice(1, 4), 'Hello')\n ('e', 'l', 'l')\n\n >>> get(slice(1, 4), 'Hello', lazy=True)\n <itertools.islice object at ...>\n \"\"\"\n if isinstance(ind, list):\n result = nth_list(ind, coll)\n elif isinstance(ind, slice):\n result = islice(coll, ind.start, ind.stop, ind.step)\n else:\n if isinstance(coll, Iterator):\n result = nth(ind, coll)\n else:\n result = coll[ind]\n if not lazy and isinstance(result, Iterator):\n result = tuple(result)\n return result\n\n\ndef ndget(ind, data):\n \"\"\"\n Get from N-Dimensional getable\n\n Can index with elements, lists, or slices. Mimic's numpy fancy indexing on\n generic indexibles.\n\n >>> data = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]\n >>> ndget(0, data)\n [[1, 2], [3, 4]]\n >>> ndget((0, 1), data)\n [3, 4]\n >>> ndget((0, 0, 0), data)\n 1\n >>> ndget((slice(0, 2), [0, 1], 0), data)\n ((1, 3), (5, 7))\n \"\"\"\n if isinstance(ind, tuple) and len(ind) == 1:\n ind = ind[0]\n if not isinstance(ind, tuple):\n return get(ind, data)\n result = get(ind[0], data)\n if isinstance(ind[0], (list, slice)):\n return type(result)(ndget(ind[1:], row) for row in result)\n else:\n return ndget(ind[1:], result)\n\n\ndef normalize_to_date(dt):\n if isinstance(dt, datetime.datetime) and not dt.time():\n return dt.date()\n else:\n return dt\n\n\ndef assert_allclose(lhs, rhs):\n for tb in map(zip, lhs, rhs):\n for left, right in tb:\n if isinstance(left, (np.floating, float)):\n # account for nans\n assert np.all(np.isclose(left, right, equal_nan=True))\n continue\n if isinstance(left, datetime.datetime):\n left = normalize_to_date(left)\n if isinstance(right, datetime.datetime):\n right = normalize_to_date(right)\n assert left == right\n\n\ndef example(filename, datapath=os.path.join('examples', 'data')):\n import blaze\n return os.path.join(os.path.dirname(blaze.__file__), datapath, filename)\n\n\ndef available_memory():\n return psutil.virtual_memory().available\n\n\ndef listpack(x):\n \"\"\"\n >>> listpack(1)\n [1]\n >>> listpack((1, 2))\n [1, 2]\n >>> listpack([1, 2])\n [1, 2]\n \"\"\"\n if isinstance(x, tuple):\n return list(x)\n elif isinstance(x, list):\n return x\n else:\n return [x]\n\n\n@dispatch(datetime.datetime)\ndef json_dumps(dt):\n s = dt.isoformat()\n if not dt.tzname():\n s += 'Z'\n return s\n", "path": "blaze/utils.py"}]}
| 2,436 | 75 |
gh_patches_debug_32199
|
rasdani/github-patches
|
git_diff
|
pyg-team__pytorch_geometric-8143
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`test/transforms/test_add_positional_encoding.py::test_eigenvector_permutation_invariance` fails with AMD CPU
### 🐛 Describe the bug
The unit test added in #8087 fails with the error bellow on AMD CPU based systems.
```
NVIDIA_TF32_OVERRIDE=0 pytest -s --cov --cov-report=xml test/transforms/test_add_positional_encoding.py::test_eigenvector_permutation_invariance
```
Traceback:
```
======================================================================== FAILURES ========================================================================$
_________________________________________________________ test_eigenvector_permutation_invariance ________________________________________________________$
def test_eigenvector_permutation_invariance():
edge_index = torch.tensor([[0, 1, 0, 4, 1, 4, 2, 3, 3, 5],
[1, 0, 4, 0, 4, 1, 3, 2, 5, 3]])
data = Data(edge_index=edge_index, num_nodes=6)
perm = torch.tensor([5, 4, 3, 2, 1, 0])
transform = AddLaplacianEigenvectorPE(
k=1,
is_undirected=True,
attr_name='x',
v0=torch.arange(data.num_nodes),
)
out1 = transform(data)
transform = AddLaplacianEigenvectorPE(
k=1,
is_undirected=True,
attr_name='x',
v0=perm,
)
out2 = transform(data.subgraph(perm))
print(out1.x[perm].abs())
print(out2.x.abs())
> assert torch.allclose(out1.x[perm].abs(), out2.x.abs(), atol=1e-1)
E assert False
E + where False = <built-in method allclose of type object at 0x7f2f94b318a0>(tensor([[0.1360],\n [0.5556],\n [0.1924],\n [0.1
360],\n [0.5556],\n [0.5556]]), tensor([[0.2091],\n [0.5245],\n [0.2957],\n [0.2091],\n [0.5245],\n [0.524
5]]), atol=0.1)
E + where <built-in method allclose of type object at 0x7f2f94b318a0> = torch.allclose
E + and tensor([[0.1360],\n [0.5556],\n [0.1924],\n [0.1360],\n [0.5556],\n [0.5556]]) = <built-in method ab
s of Tensor object at 0x7f2bc6e1e160>()
E + where <built-in method abs of Tensor object at 0x7f2bc6e1e160> = tensor([[ 0.1360],\n [-0.5556],\n [ 0.1924],\n [ 0.13
60],\n [-0.5556],\n [-0.5556]]).abs
E + and tensor([[0.2091],\n [0.5245],\n [0.2957],\n [0.2091],\n [0.5245],\n [0.5245]]) = <built-in method ab
s of Tensor object at 0x7f2bc89d3470>()
E + where <built-in method abs of Tensor object at 0x7f2bc89d3470> = tensor([[ 0.2091],\n [-0.5245],\n [ 0.2957],\n [ 0.20
91],\n [-0.5245],\n [-0.5245]]).abs
E + where tensor([[ 0.2091],\n [-0.5245],\n [ 0.2957],\n [ 0.2091],\n [-0.5245],\n [-0.5245]]) = Data(edge
_index=[2, 10], num_nodes=6, x=[6, 1]).x
```
### Environment
* PyG version: 2.4.0 (built from source)
* PyTorch version: 2.1.0
* OS: Ubuntu 22.04.3 LTS
* Python version: 3.10.12
* CUDA/cuDNN version: 12.2
* How you installed PyTorch and PyG (`conda`, `pip`, source): source
</issue>
<code>
[start of torch_geometric/transforms/add_positional_encoding.py]
1 from typing import Any, Optional
2
3 import numpy as np
4 import torch
5
6 from torch_geometric.data import Data
7 from torch_geometric.data.datapipes import functional_transform
8 from torch_geometric.transforms import BaseTransform
9 from torch_geometric.utils import (
10 get_laplacian,
11 get_self_loop_attr,
12 scatter,
13 to_edge_index,
14 to_scipy_sparse_matrix,
15 to_torch_csr_tensor,
16 )
17
18
19 def add_node_attr(data: Data, value: Any,
20 attr_name: Optional[str] = None) -> Data:
21 # TODO Move to `BaseTransform`.
22 if attr_name is None:
23 if 'x' in data:
24 x = data.x.view(-1, 1) if data.x.dim() == 1 else data.x
25 data.x = torch.cat([x, value.to(x.device, x.dtype)], dim=-1)
26 else:
27 data.x = value
28 else:
29 data[attr_name] = value
30
31 return data
32
33
34 @functional_transform('add_laplacian_eigenvector_pe')
35 class AddLaplacianEigenvectorPE(BaseTransform):
36 r"""Adds the Laplacian eigenvector positional encoding from the
37 `"Benchmarking Graph Neural Networks" <https://arxiv.org/abs/2003.00982>`_
38 paper to the given graph
39 (functional name: :obj:`add_laplacian_eigenvector_pe`).
40
41 Args:
42 k (int): The number of non-trivial eigenvectors to consider.
43 attr_name (str, optional): The attribute name of the data object to add
44 positional encodings to. If set to :obj:`None`, will be
45 concatenated to :obj:`data.x`.
46 (default: :obj:`"laplacian_eigenvector_pe"`)
47 is_undirected (bool, optional): If set to :obj:`True`, this transform
48 expects undirected graphs as input, and can hence speed up the
49 computation of eigenvectors. (default: :obj:`False`)
50 **kwargs (optional): Additional arguments of
51 :meth:`scipy.sparse.linalg.eigs` (when :attr:`is_undirected` is
52 :obj:`False`) or :meth:`scipy.sparse.linalg.eigsh` (when
53 :attr:`is_undirected` is :obj:`True`).
54 """
55 def __init__(
56 self,
57 k: int,
58 attr_name: Optional[str] = 'laplacian_eigenvector_pe',
59 is_undirected: bool = False,
60 **kwargs,
61 ):
62 self.k = k
63 self.attr_name = attr_name
64 self.is_undirected = is_undirected
65 self.kwargs = kwargs
66
67 def forward(self, data: Data) -> Data:
68 from scipy.sparse.linalg import eigs, eigsh
69 eig_fn = eigs if not self.is_undirected else eigsh
70
71 num_nodes = data.num_nodes
72 edge_index, edge_weight = get_laplacian(
73 data.edge_index,
74 data.edge_weight,
75 normalization='sym',
76 num_nodes=num_nodes,
77 )
78
79 L = to_scipy_sparse_matrix(edge_index, edge_weight, num_nodes)
80 L = L.tocsr()
81
82 eig_vals, eig_vecs = eig_fn(
83 L,
84 k=self.k + 1,
85 which='SR' if not self.is_undirected else 'SA',
86 return_eigenvectors=True,
87 **self.kwargs,
88 )
89
90 eig_vecs = np.real(eig_vecs[:, eig_vals.argsort()])
91 pe = torch.from_numpy(eig_vecs[:, 1:self.k + 1])
92 sign = -1 + 2 * torch.randint(0, 2, (self.k, ))
93 pe *= sign
94
95 data = add_node_attr(data, pe, attr_name=self.attr_name)
96 return data
97
98
99 @functional_transform('add_random_walk_pe')
100 class AddRandomWalkPE(BaseTransform):
101 r"""Adds the random walk positional encoding from the `"Graph Neural
102 Networks with Learnable Structural and Positional Representations"
103 <https://arxiv.org/abs/2110.07875>`_ paper to the given graph
104 (functional name: :obj:`add_random_walk_pe`).
105
106 Args:
107 walk_length (int): The number of random walk steps.
108 attr_name (str, optional): The attribute name of the data object to add
109 positional encodings to. If set to :obj:`None`, will be
110 concatenated to :obj:`data.x`.
111 (default: :obj:`"random_walk_pe"`)
112 """
113 def __init__(
114 self,
115 walk_length: int,
116 attr_name: Optional[str] = 'random_walk_pe',
117 ):
118 self.walk_length = walk_length
119 self.attr_name = attr_name
120
121 def forward(self, data: Data) -> Data:
122 row, col = data.edge_index
123 N = data.num_nodes
124
125 value = data.edge_weight
126 if value is None:
127 value = torch.ones(data.num_edges, device=row.device)
128 value = scatter(value, row, dim_size=N, reduce='sum').clamp(min=1)[row]
129 value = 1.0 / value
130
131 adj = to_torch_csr_tensor(data.edge_index, value, size=data.size())
132
133 out = adj
134 pe_list = [get_self_loop_attr(*to_edge_index(out), num_nodes=N)]
135 for _ in range(self.walk_length - 1):
136 out = out @ adj
137 pe_list.append(get_self_loop_attr(*to_edge_index(out), N))
138 pe = torch.stack(pe_list, dim=-1)
139
140 data = add_node_attr(data, pe, attr_name=self.attr_name)
141 return data
142
[end of torch_geometric/transforms/add_positional_encoding.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/torch_geometric/transforms/add_positional_encoding.py b/torch_geometric/transforms/add_positional_encoding.py
--- a/torch_geometric/transforms/add_positional_encoding.py
+++ b/torch_geometric/transforms/add_positional_encoding.py
@@ -52,6 +52,9 @@
:obj:`False`) or :meth:`scipy.sparse.linalg.eigsh` (when
:attr:`is_undirected` is :obj:`True`).
"""
+ # Number of nodes from which to use sparse eigenvector computation:
+ SPARSE_THRESHOLD: int = 100
+
def __init__(
self,
k: int,
@@ -65,9 +68,6 @@
self.kwargs = kwargs
def forward(self, data: Data) -> Data:
- from scipy.sparse.linalg import eigs, eigsh
- eig_fn = eigs if not self.is_undirected else eigsh
-
num_nodes = data.num_nodes
edge_index, edge_weight = get_laplacian(
data.edge_index,
@@ -77,15 +77,23 @@
)
L = to_scipy_sparse_matrix(edge_index, edge_weight, num_nodes)
- L = L.tocsr()
-
- eig_vals, eig_vecs = eig_fn(
- L,
- k=self.k + 1,
- which='SR' if not self.is_undirected else 'SA',
- return_eigenvectors=True,
- **self.kwargs,
- )
+
+ if num_nodes < self.SPARSE_THRESHOLD:
+ from numpy.linalg import eig, eigh
+ eig_fn = eig if not self.is_undirected else eigh
+
+ eig_vals, eig_vecs = eig_fn(L.todense())
+ else:
+ from scipy.sparse.linalg import eigs, eigsh
+ eig_fn = eigs if not self.is_undirected else eigsh
+
+ eig_vals, eig_vecs = eig_fn(
+ L,
+ k=self.k + 1,
+ which='SR' if not self.is_undirected else 'SA',
+ return_eigenvectors=True,
+ **self.kwargs,
+ )
eig_vecs = np.real(eig_vecs[:, eig_vals.argsort()])
pe = torch.from_numpy(eig_vecs[:, 1:self.k + 1])
|
{"golden_diff": "diff --git a/torch_geometric/transforms/add_positional_encoding.py b/torch_geometric/transforms/add_positional_encoding.py\n--- a/torch_geometric/transforms/add_positional_encoding.py\n+++ b/torch_geometric/transforms/add_positional_encoding.py\n@@ -52,6 +52,9 @@\n :obj:`False`) or :meth:`scipy.sparse.linalg.eigsh` (when\n :attr:`is_undirected` is :obj:`True`).\n \"\"\"\n+ # Number of nodes from which to use sparse eigenvector computation:\n+ SPARSE_THRESHOLD: int = 100\n+\n def __init__(\n self,\n k: int,\n@@ -65,9 +68,6 @@\n self.kwargs = kwargs\n \n def forward(self, data: Data) -> Data:\n- from scipy.sparse.linalg import eigs, eigsh\n- eig_fn = eigs if not self.is_undirected else eigsh\n-\n num_nodes = data.num_nodes\n edge_index, edge_weight = get_laplacian(\n data.edge_index,\n@@ -77,15 +77,23 @@\n )\n \n L = to_scipy_sparse_matrix(edge_index, edge_weight, num_nodes)\n- L = L.tocsr()\n-\n- eig_vals, eig_vecs = eig_fn(\n- L,\n- k=self.k + 1,\n- which='SR' if not self.is_undirected else 'SA',\n- return_eigenvectors=True,\n- **self.kwargs,\n- )\n+\n+ if num_nodes < self.SPARSE_THRESHOLD:\n+ from numpy.linalg import eig, eigh\n+ eig_fn = eig if not self.is_undirected else eigh\n+\n+ eig_vals, eig_vecs = eig_fn(L.todense())\n+ else:\n+ from scipy.sparse.linalg import eigs, eigsh\n+ eig_fn = eigs if not self.is_undirected else eigsh\n+\n+ eig_vals, eig_vecs = eig_fn(\n+ L,\n+ k=self.k + 1,\n+ which='SR' if not self.is_undirected else 'SA',\n+ return_eigenvectors=True,\n+ **self.kwargs,\n+ )\n \n eig_vecs = np.real(eig_vecs[:, eig_vals.argsort()])\n pe = torch.from_numpy(eig_vecs[:, 1:self.k + 1])\n", "issue": "`test/transforms/test_add_positional_encoding.py::test_eigenvector_permutation_invariance` fails with AMD CPU\n### \ud83d\udc1b Describe the bug\r\n\r\nThe unit test added in #8087 fails with the error bellow on AMD CPU based systems.\r\n\r\n\r\n```\r\nNVIDIA_TF32_OVERRIDE=0 pytest -s --cov --cov-report=xml test/transforms/test_add_positional_encoding.py::test_eigenvector_permutation_invariance\r\n```\r\n\r\nTraceback:\r\n```\r\n======================================================================== FAILURES ========================================================================$\r\n_________________________________________________________ test_eigenvector_permutation_invariance ________________________________________________________$\r\n\r\n def test_eigenvector_permutation_invariance():\r\n edge_index = torch.tensor([[0, 1, 0, 4, 1, 4, 2, 3, 3, 5],\r\n [1, 0, 4, 0, 4, 1, 3, 2, 5, 3]])\r\n data = Data(edge_index=edge_index, num_nodes=6)\r\n\r\n perm = torch.tensor([5, 4, 3, 2, 1, 0])\r\n transform = AddLaplacianEigenvectorPE(\r\n k=1,\r\n is_undirected=True,\r\n attr_name='x',\r\n v0=torch.arange(data.num_nodes),\r\n )\r\n out1 = transform(data)\r\n\r\n transform = AddLaplacianEigenvectorPE(\r\n k=1,\r\n is_undirected=True,\r\n attr_name='x',\r\n v0=perm,\r\n )\r\n out2 = transform(data.subgraph(perm))\r\n\r\n print(out1.x[perm].abs())\r\n print(out2.x.abs())\r\n\r\n> assert torch.allclose(out1.x[perm].abs(), out2.x.abs(), atol=1e-1)\r\nE assert False\r\nE + where False = <built-in method allclose of type object at 0x7f2f94b318a0>(tensor([[0.1360],\\n [0.5556],\\n [0.1924],\\n [0.1\r\n360],\\n [0.5556],\\n [0.5556]]), tensor([[0.2091],\\n [0.5245],\\n [0.2957],\\n [0.2091],\\n [0.5245],\\n [0.524\r\n5]]), atol=0.1)\r\nE + where <built-in method allclose of type object at 0x7f2f94b318a0> = torch.allclose\r\nE + and tensor([[0.1360],\\n [0.5556],\\n [0.1924],\\n [0.1360],\\n [0.5556],\\n [0.5556]]) = <built-in method ab\r\ns of Tensor object at 0x7f2bc6e1e160>()\r\nE + where <built-in method abs of Tensor object at 0x7f2bc6e1e160> = tensor([[ 0.1360],\\n [-0.5556],\\n [ 0.1924],\\n [ 0.13\r\n60],\\n [-0.5556],\\n [-0.5556]]).abs\r\nE + and tensor([[0.2091],\\n [0.5245],\\n [0.2957],\\n [0.2091],\\n [0.5245],\\n [0.5245]]) = <built-in method ab\r\ns of Tensor object at 0x7f2bc89d3470>()\r\nE + where <built-in method abs of Tensor object at 0x7f2bc89d3470> = tensor([[ 0.2091],\\n [-0.5245],\\n [ 0.2957],\\n [ 0.20\r\n91],\\n [-0.5245],\\n [-0.5245]]).abs\r\nE + where tensor([[ 0.2091],\\n [-0.5245],\\n [ 0.2957],\\n [ 0.2091],\\n [-0.5245],\\n [-0.5245]]) = Data(edge\r\n_index=[2, 10], num_nodes=6, x=[6, 1]).x\r\n```\r\n\r\n### Environment\r\n\r\n* PyG version: 2.4.0 (built from source)\r\n* PyTorch version: 2.1.0\r\n* OS: Ubuntu 22.04.3 LTS\r\n* Python version: 3.10.12\r\n* CUDA/cuDNN version: 12.2\r\n* How you installed PyTorch and PyG (`conda`, `pip`, source): source\n", "before_files": [{"content": "from typing import Any, Optional\n\nimport numpy as np\nimport torch\n\nfrom torch_geometric.data import Data\nfrom torch_geometric.data.datapipes import functional_transform\nfrom torch_geometric.transforms import BaseTransform\nfrom torch_geometric.utils import (\n get_laplacian,\n get_self_loop_attr,\n scatter,\n to_edge_index,\n to_scipy_sparse_matrix,\n to_torch_csr_tensor,\n)\n\n\ndef add_node_attr(data: Data, value: Any,\n attr_name: Optional[str] = None) -> Data:\n # TODO Move to `BaseTransform`.\n if attr_name is None:\n if 'x' in data:\n x = data.x.view(-1, 1) if data.x.dim() == 1 else data.x\n data.x = torch.cat([x, value.to(x.device, x.dtype)], dim=-1)\n else:\n data.x = value\n else:\n data[attr_name] = value\n\n return data\n\n\n@functional_transform('add_laplacian_eigenvector_pe')\nclass AddLaplacianEigenvectorPE(BaseTransform):\n r\"\"\"Adds the Laplacian eigenvector positional encoding from the\n `\"Benchmarking Graph Neural Networks\" <https://arxiv.org/abs/2003.00982>`_\n paper to the given graph\n (functional name: :obj:`add_laplacian_eigenvector_pe`).\n\n Args:\n k (int): The number of non-trivial eigenvectors to consider.\n attr_name (str, optional): The attribute name of the data object to add\n positional encodings to. If set to :obj:`None`, will be\n concatenated to :obj:`data.x`.\n (default: :obj:`\"laplacian_eigenvector_pe\"`)\n is_undirected (bool, optional): If set to :obj:`True`, this transform\n expects undirected graphs as input, and can hence speed up the\n computation of eigenvectors. (default: :obj:`False`)\n **kwargs (optional): Additional arguments of\n :meth:`scipy.sparse.linalg.eigs` (when :attr:`is_undirected` is\n :obj:`False`) or :meth:`scipy.sparse.linalg.eigsh` (when\n :attr:`is_undirected` is :obj:`True`).\n \"\"\"\n def __init__(\n self,\n k: int,\n attr_name: Optional[str] = 'laplacian_eigenvector_pe',\n is_undirected: bool = False,\n **kwargs,\n ):\n self.k = k\n self.attr_name = attr_name\n self.is_undirected = is_undirected\n self.kwargs = kwargs\n\n def forward(self, data: Data) -> Data:\n from scipy.sparse.linalg import eigs, eigsh\n eig_fn = eigs if not self.is_undirected else eigsh\n\n num_nodes = data.num_nodes\n edge_index, edge_weight = get_laplacian(\n data.edge_index,\n data.edge_weight,\n normalization='sym',\n num_nodes=num_nodes,\n )\n\n L = to_scipy_sparse_matrix(edge_index, edge_weight, num_nodes)\n L = L.tocsr()\n\n eig_vals, eig_vecs = eig_fn(\n L,\n k=self.k + 1,\n which='SR' if not self.is_undirected else 'SA',\n return_eigenvectors=True,\n **self.kwargs,\n )\n\n eig_vecs = np.real(eig_vecs[:, eig_vals.argsort()])\n pe = torch.from_numpy(eig_vecs[:, 1:self.k + 1])\n sign = -1 + 2 * torch.randint(0, 2, (self.k, ))\n pe *= sign\n\n data = add_node_attr(data, pe, attr_name=self.attr_name)\n return data\n\n\n@functional_transform('add_random_walk_pe')\nclass AddRandomWalkPE(BaseTransform):\n r\"\"\"Adds the random walk positional encoding from the `\"Graph Neural\n Networks with Learnable Structural and Positional Representations\"\n <https://arxiv.org/abs/2110.07875>`_ paper to the given graph\n (functional name: :obj:`add_random_walk_pe`).\n\n Args:\n walk_length (int): The number of random walk steps.\n attr_name (str, optional): The attribute name of the data object to add\n positional encodings to. If set to :obj:`None`, will be\n concatenated to :obj:`data.x`.\n (default: :obj:`\"random_walk_pe\"`)\n \"\"\"\n def __init__(\n self,\n walk_length: int,\n attr_name: Optional[str] = 'random_walk_pe',\n ):\n self.walk_length = walk_length\n self.attr_name = attr_name\n\n def forward(self, data: Data) -> Data:\n row, col = data.edge_index\n N = data.num_nodes\n\n value = data.edge_weight\n if value is None:\n value = torch.ones(data.num_edges, device=row.device)\n value = scatter(value, row, dim_size=N, reduce='sum').clamp(min=1)[row]\n value = 1.0 / value\n\n adj = to_torch_csr_tensor(data.edge_index, value, size=data.size())\n\n out = adj\n pe_list = [get_self_loop_attr(*to_edge_index(out), num_nodes=N)]\n for _ in range(self.walk_length - 1):\n out = out @ adj\n pe_list.append(get_self_loop_attr(*to_edge_index(out), N))\n pe = torch.stack(pe_list, dim=-1)\n\n data = add_node_attr(data, pe, attr_name=self.attr_name)\n return data\n", "path": "torch_geometric/transforms/add_positional_encoding.py"}]}
| 3,323 | 531 |
gh_patches_debug_37247
|
rasdani/github-patches
|
git_diff
|
python-pillow__Pillow-2265
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
.ico files are saved as 255x255
### What did you do?
Loading an .ico of the size 256x256 and then saving it results in an .ico file of the size 255x255. I tested it on an example .ico file: http://www.axialis.com/tutorials/iw/down.ico
### What did you expect to happen?
The icon file should stay the same after saving. No other adjustments have been made. See the code below.
### What actually happened?
The saved .ico file ended up as 255x255 file. Because Windows doesn't support 255x255 size for .ico files, it is downscaled to 128x128 when used as desktop icons. [Example](http://i.imgur.com/fm7FTfw.png). The Chromium and Chrome icons are default ones, the Chrome Canary one was saved with Pillow.
### What versions of Pillow and Python are you using?
Pillow 3.4.2, Python 3.5.2
```python
from PIL import Image
im = Image.open('down.ico')
im.save('saved.ico')
im_saved = Image.open('saved.ico')
In [2]: im.size
Out[2]: (256, 256)
In [3]: im_saved.size
Out[3]: (255, 255)
```
`(255, 255)` appears on the list of sizes at https://github.com/python-pillow/Pillow/blob/master/PIL/IcoImagePlugin.py#L45 .
</issue>
<code>
[start of PIL/IcoImagePlugin.py]
1 #
2 # The Python Imaging Library.
3 # $Id$
4 #
5 # Windows Icon support for PIL
6 #
7 # History:
8 # 96-05-27 fl Created
9 #
10 # Copyright (c) Secret Labs AB 1997.
11 # Copyright (c) Fredrik Lundh 1996.
12 #
13 # See the README file for information on usage and redistribution.
14 #
15
16 # This plugin is a refactored version of Win32IconImagePlugin by Bryan Davis
17 # <[email protected]>.
18 # https://code.google.com/archive/p/casadebender/wikis/Win32IconImagePlugin.wiki
19 #
20 # Icon format references:
21 # * https://en.wikipedia.org/wiki/ICO_(file_format)
22 # * https://msdn.microsoft.com/en-us/library/ms997538.aspx
23
24
25 import struct
26 from io import BytesIO
27
28 from PIL import Image, ImageFile, BmpImagePlugin, PngImagePlugin, _binary
29 from math import log, ceil
30
31 __version__ = "0.1"
32
33 #
34 # --------------------------------------------------------------------
35
36 i8 = _binary.i8
37 i16 = _binary.i16le
38 i32 = _binary.i32le
39
40 _MAGIC = b"\0\0\1\0"
41
42
43 def _save(im, fp, filename):
44 fp.write(_MAGIC) # (2+2)
45 sizes = im.encoderinfo.get("sizes",
46 [(16, 16), (24, 24), (32, 32), (48, 48),
47 (64, 64), (128, 128), (255, 255)])
48 width, height = im.size
49 filter(lambda x: False if (x[0] > width or x[1] > height or
50 x[0] > 255 or x[1] > 255) else True, sizes)
51 fp.write(struct.pack("<H", len(sizes))) # idCount(2)
52 offset = fp.tell() + len(sizes)*16
53 for size in sizes:
54 width, height = size
55 fp.write(struct.pack("B", width)) # bWidth(1)
56 fp.write(struct.pack("B", height)) # bHeight(1)
57 fp.write(b"\0") # bColorCount(1)
58 fp.write(b"\0") # bReserved(1)
59 fp.write(b"\0\0") # wPlanes(2)
60 fp.write(struct.pack("<H", 32)) # wBitCount(2)
61
62 image_io = BytesIO()
63 tmp = im.copy()
64 tmp.thumbnail(size, Image.LANCZOS)
65 tmp.save(image_io, "png")
66 image_io.seek(0)
67 image_bytes = image_io.read()
68 bytes_len = len(image_bytes)
69 fp.write(struct.pack("<I", bytes_len)) # dwBytesInRes(4)
70 fp.write(struct.pack("<I", offset)) # dwImageOffset(4)
71 current = fp.tell()
72 fp.seek(offset)
73 fp.write(image_bytes)
74 offset = offset + bytes_len
75 fp.seek(current)
76
77
78 def _accept(prefix):
79 return prefix[:4] == _MAGIC
80
81
82 class IcoFile(object):
83 def __init__(self, buf):
84 """
85 Parse image from file-like object containing ico file data
86 """
87
88 # check magic
89 s = buf.read(6)
90 if not _accept(s):
91 raise SyntaxError("not an ICO file")
92
93 self.buf = buf
94 self.entry = []
95
96 # Number of items in file
97 self.nb_items = i16(s[4:])
98
99 # Get headers for each item
100 for i in range(self.nb_items):
101 s = buf.read(16)
102
103 icon_header = {
104 'width': i8(s[0]),
105 'height': i8(s[1]),
106 'nb_color': i8(s[2]), # No. of colors in image (0 if >=8bpp)
107 'reserved': i8(s[3]),
108 'planes': i16(s[4:]),
109 'bpp': i16(s[6:]),
110 'size': i32(s[8:]),
111 'offset': i32(s[12:])
112 }
113
114 # See Wikipedia
115 for j in ('width', 'height'):
116 if not icon_header[j]:
117 icon_header[j] = 256
118
119 # See Wikipedia notes about color depth.
120 # We need this just to differ images with equal sizes
121 icon_header['color_depth'] = (icon_header['bpp'] or
122 (icon_header['nb_color'] != 0 and
123 ceil(log(icon_header['nb_color'],
124 2))) or 256)
125
126 icon_header['dim'] = (icon_header['width'], icon_header['height'])
127 icon_header['square'] = (icon_header['width'] *
128 icon_header['height'])
129
130 self.entry.append(icon_header)
131
132 self.entry = sorted(self.entry, key=lambda x: x['color_depth'])
133 # ICO images are usually squares
134 # self.entry = sorted(self.entry, key=lambda x: x['width'])
135 self.entry = sorted(self.entry, key=lambda x: x['square'])
136 self.entry.reverse()
137
138 def sizes(self):
139 """
140 Get a list of all available icon sizes and color depths.
141 """
142 return {(h['width'], h['height']) for h in self.entry}
143
144 def getimage(self, size, bpp=False):
145 """
146 Get an image from the icon
147 """
148 for (i, h) in enumerate(self.entry):
149 if size == h['dim'] and (bpp is False or bpp == h['color_depth']):
150 return self.frame(i)
151 return self.frame(0)
152
153 def frame(self, idx):
154 """
155 Get an image from frame idx
156 """
157
158 header = self.entry[idx]
159
160 self.buf.seek(header['offset'])
161 data = self.buf.read(8)
162 self.buf.seek(header['offset'])
163
164 if data[:8] == PngImagePlugin._MAGIC:
165 # png frame
166 im = PngImagePlugin.PngImageFile(self.buf)
167 else:
168 # XOR + AND mask bmp frame
169 im = BmpImagePlugin.DibImageFile(self.buf)
170
171 # change tile dimension to only encompass XOR image
172 im.size = (im.size[0], int(im.size[1] / 2))
173 d, e, o, a = im.tile[0]
174 im.tile[0] = d, (0, 0) + im.size, o, a
175
176 # figure out where AND mask image starts
177 mode = a[0]
178 bpp = 8
179 for k in BmpImagePlugin.BIT2MODE.keys():
180 if mode == BmpImagePlugin.BIT2MODE[k][1]:
181 bpp = k
182 break
183
184 if 32 == bpp:
185 # 32-bit color depth icon image allows semitransparent areas
186 # PIL's DIB format ignores transparency bits, recover them.
187 # The DIB is packed in BGRX byte order where X is the alpha
188 # channel.
189
190 # Back up to start of bmp data
191 self.buf.seek(o)
192 # extract every 4th byte (eg. 3,7,11,15,...)
193 alpha_bytes = self.buf.read(im.size[0] * im.size[1] * 4)[3::4]
194
195 # convert to an 8bpp grayscale image
196 mask = Image.frombuffer(
197 'L', # 8bpp
198 im.size, # (w, h)
199 alpha_bytes, # source chars
200 'raw', # raw decoder
201 ('L', 0, -1) # 8bpp inverted, unpadded, reversed
202 )
203 else:
204 # get AND image from end of bitmap
205 w = im.size[0]
206 if (w % 32) > 0:
207 # bitmap row data is aligned to word boundaries
208 w += 32 - (im.size[0] % 32)
209
210 # the total mask data is
211 # padded row size * height / bits per char
212
213 and_mask_offset = o + int(im.size[0] * im.size[1] *
214 (bpp / 8.0))
215 total_bytes = int((w * im.size[1]) / 8)
216
217 self.buf.seek(and_mask_offset)
218 maskData = self.buf.read(total_bytes)
219
220 # convert raw data to image
221 mask = Image.frombuffer(
222 '1', # 1 bpp
223 im.size, # (w, h)
224 maskData, # source chars
225 'raw', # raw decoder
226 ('1;I', int(w/8), -1) # 1bpp inverted, padded, reversed
227 )
228
229 # now we have two images, im is XOR image and mask is AND image
230
231 # apply mask image as alpha channel
232 im = im.convert('RGBA')
233 im.putalpha(mask)
234
235 return im
236
237
238 ##
239 # Image plugin for Windows Icon files.
240
241 class IcoImageFile(ImageFile.ImageFile):
242 """
243 PIL read-only image support for Microsoft Windows .ico files.
244
245 By default the largest resolution image in the file will be loaded. This
246 can be changed by altering the 'size' attribute before calling 'load'.
247
248 The info dictionary has a key 'sizes' that is a list of the sizes available
249 in the icon file.
250
251 Handles classic, XP and Vista icon formats.
252
253 This plugin is a refactored version of Win32IconImagePlugin by Bryan Davis
254 <[email protected]>.
255 https://code.google.com/archive/p/casadebender/wikis/Win32IconImagePlugin.wiki
256 """
257 format = "ICO"
258 format_description = "Windows Icon"
259
260 def _open(self):
261 self.ico = IcoFile(self.fp)
262 self.info['sizes'] = self.ico.sizes()
263 self.size = self.ico.entry[0]['dim']
264 self.load()
265
266 def load(self):
267 im = self.ico.getimage(self.size)
268 # if tile is PNG, it won't really be loaded yet
269 im.load()
270 self.im = im.im
271 self.mode = im.mode
272 self.size = im.size
273
274 def load_seek(self):
275 # Flag the ImageFile.Parser so that it
276 # just does all the decode at the end.
277 pass
278 #
279 # --------------------------------------------------------------------
280
281 Image.register_open(IcoImageFile.format, IcoImageFile, _accept)
282 Image.register_save(IcoImageFile.format, _save)
283 Image.register_extension(IcoImageFile.format, ".ico")
284
[end of PIL/IcoImagePlugin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/PIL/IcoImagePlugin.py b/PIL/IcoImagePlugin.py
--- a/PIL/IcoImagePlugin.py
+++ b/PIL/IcoImagePlugin.py
@@ -44,16 +44,17 @@
fp.write(_MAGIC) # (2+2)
sizes = im.encoderinfo.get("sizes",
[(16, 16), (24, 24), (32, 32), (48, 48),
- (64, 64), (128, 128), (255, 255)])
+ (64, 64), (128, 128), (256, 256)])
width, height = im.size
filter(lambda x: False if (x[0] > width or x[1] > height or
- x[0] > 255 or x[1] > 255) else True, sizes)
+ x[0] > 256 or x[1] > 256) else True, sizes)
fp.write(struct.pack("<H", len(sizes))) # idCount(2)
offset = fp.tell() + len(sizes)*16
for size in sizes:
width, height = size
- fp.write(struct.pack("B", width)) # bWidth(1)
- fp.write(struct.pack("B", height)) # bHeight(1)
+ # 0 means 256
+ fp.write(struct.pack("B", width if width < 256 else 0)) # bWidth(1)
+ fp.write(struct.pack("B", height if height < 256 else 0)) # bHeight(1)
fp.write(b"\0") # bColorCount(1)
fp.write(b"\0") # bReserved(1)
fp.write(b"\0\0") # wPlanes(2)
@@ -215,13 +216,13 @@
total_bytes = int((w * im.size[1]) / 8)
self.buf.seek(and_mask_offset)
- maskData = self.buf.read(total_bytes)
+ mask_data = self.buf.read(total_bytes)
# convert raw data to image
mask = Image.frombuffer(
'1', # 1 bpp
im.size, # (w, h)
- maskData, # source chars
+ mask_data, # source chars
'raw', # raw decoder
('1;I', int(w/8), -1) # 1bpp inverted, padded, reversed
)
@@ -278,6 +279,7 @@
#
# --------------------------------------------------------------------
+
Image.register_open(IcoImageFile.format, IcoImageFile, _accept)
Image.register_save(IcoImageFile.format, _save)
Image.register_extension(IcoImageFile.format, ".ico")
|
{"golden_diff": "diff --git a/PIL/IcoImagePlugin.py b/PIL/IcoImagePlugin.py\n--- a/PIL/IcoImagePlugin.py\n+++ b/PIL/IcoImagePlugin.py\n@@ -44,16 +44,17 @@\n fp.write(_MAGIC) # (2+2)\n sizes = im.encoderinfo.get(\"sizes\",\n [(16, 16), (24, 24), (32, 32), (48, 48),\n- (64, 64), (128, 128), (255, 255)])\n+ (64, 64), (128, 128), (256, 256)])\n width, height = im.size\n filter(lambda x: False if (x[0] > width or x[1] > height or\n- x[0] > 255 or x[1] > 255) else True, sizes)\n+ x[0] > 256 or x[1] > 256) else True, sizes)\n fp.write(struct.pack(\"<H\", len(sizes))) # idCount(2)\n offset = fp.tell() + len(sizes)*16\n for size in sizes:\n width, height = size\n- fp.write(struct.pack(\"B\", width)) # bWidth(1)\n- fp.write(struct.pack(\"B\", height)) # bHeight(1)\n+ # 0 means 256\n+ fp.write(struct.pack(\"B\", width if width < 256 else 0)) # bWidth(1)\n+ fp.write(struct.pack(\"B\", height if height < 256 else 0)) # bHeight(1)\n fp.write(b\"\\0\") # bColorCount(1)\n fp.write(b\"\\0\") # bReserved(1)\n fp.write(b\"\\0\\0\") # wPlanes(2)\n@@ -215,13 +216,13 @@\n total_bytes = int((w * im.size[1]) / 8)\n \n self.buf.seek(and_mask_offset)\n- maskData = self.buf.read(total_bytes)\n+ mask_data = self.buf.read(total_bytes)\n \n # convert raw data to image\n mask = Image.frombuffer(\n '1', # 1 bpp\n im.size, # (w, h)\n- maskData, # source chars\n+ mask_data, # source chars\n 'raw', # raw decoder\n ('1;I', int(w/8), -1) # 1bpp inverted, padded, reversed\n )\n@@ -278,6 +279,7 @@\n #\n # --------------------------------------------------------------------\n \n+\n Image.register_open(IcoImageFile.format, IcoImageFile, _accept)\n Image.register_save(IcoImageFile.format, _save)\n Image.register_extension(IcoImageFile.format, \".ico\")\n", "issue": ".ico files are saved as 255x255\n### What did you do?\r\n\r\nLoading an .ico of the size 256x256 and then saving it results in an .ico file of the size 255x255. I tested it on an example .ico file: http://www.axialis.com/tutorials/iw/down.ico\r\n\r\n### What did you expect to happen?\r\n\r\nThe icon file should stay the same after saving. No other adjustments have been made. See the code below.\r\n\r\n### What actually happened?\r\n\r\nThe saved .ico file ended up as 255x255 file. Because Windows doesn't support 255x255 size for .ico files, it is downscaled to 128x128 when used as desktop icons. [Example](http://i.imgur.com/fm7FTfw.png). The Chromium and Chrome icons are default ones, the Chrome Canary one was saved with Pillow.\r\n\r\n### What versions of Pillow and Python are you using?\r\nPillow 3.4.2, Python 3.5.2\r\n\r\n```python\r\nfrom PIL import Image\r\nim = Image.open('down.ico')\r\nim.save('saved.ico')\r\nim_saved = Image.open('saved.ico')\r\n\r\nIn [2]: im.size\r\nOut[2]: (256, 256)\r\n\r\nIn [3]: im_saved.size\r\nOut[3]: (255, 255)\r\n```\r\n\r\n`(255, 255)` appears on the list of sizes at https://github.com/python-pillow/Pillow/blob/master/PIL/IcoImagePlugin.py#L45 .\n", "before_files": [{"content": "#\n# The Python Imaging Library.\n# $Id$\n#\n# Windows Icon support for PIL\n#\n# History:\n# 96-05-27 fl Created\n#\n# Copyright (c) Secret Labs AB 1997.\n# Copyright (c) Fredrik Lundh 1996.\n#\n# See the README file for information on usage and redistribution.\n#\n\n# This plugin is a refactored version of Win32IconImagePlugin by Bryan Davis\n# <[email protected]>.\n# https://code.google.com/archive/p/casadebender/wikis/Win32IconImagePlugin.wiki\n#\n# Icon format references:\n# * https://en.wikipedia.org/wiki/ICO_(file_format)\n# * https://msdn.microsoft.com/en-us/library/ms997538.aspx\n\n\nimport struct\nfrom io import BytesIO\n\nfrom PIL import Image, ImageFile, BmpImagePlugin, PngImagePlugin, _binary\nfrom math import log, ceil\n\n__version__ = \"0.1\"\n\n#\n# --------------------------------------------------------------------\n\ni8 = _binary.i8\ni16 = _binary.i16le\ni32 = _binary.i32le\n\n_MAGIC = b\"\\0\\0\\1\\0\"\n\n\ndef _save(im, fp, filename):\n fp.write(_MAGIC) # (2+2)\n sizes = im.encoderinfo.get(\"sizes\",\n [(16, 16), (24, 24), (32, 32), (48, 48),\n (64, 64), (128, 128), (255, 255)])\n width, height = im.size\n filter(lambda x: False if (x[0] > width or x[1] > height or\n x[0] > 255 or x[1] > 255) else True, sizes)\n fp.write(struct.pack(\"<H\", len(sizes))) # idCount(2)\n offset = fp.tell() + len(sizes)*16\n for size in sizes:\n width, height = size\n fp.write(struct.pack(\"B\", width)) # bWidth(1)\n fp.write(struct.pack(\"B\", height)) # bHeight(1)\n fp.write(b\"\\0\") # bColorCount(1)\n fp.write(b\"\\0\") # bReserved(1)\n fp.write(b\"\\0\\0\") # wPlanes(2)\n fp.write(struct.pack(\"<H\", 32)) # wBitCount(2)\n\n image_io = BytesIO()\n tmp = im.copy()\n tmp.thumbnail(size, Image.LANCZOS)\n tmp.save(image_io, \"png\")\n image_io.seek(0)\n image_bytes = image_io.read()\n bytes_len = len(image_bytes)\n fp.write(struct.pack(\"<I\", bytes_len)) # dwBytesInRes(4)\n fp.write(struct.pack(\"<I\", offset)) # dwImageOffset(4)\n current = fp.tell()\n fp.seek(offset)\n fp.write(image_bytes)\n offset = offset + bytes_len\n fp.seek(current)\n\n\ndef _accept(prefix):\n return prefix[:4] == _MAGIC\n\n\nclass IcoFile(object):\n def __init__(self, buf):\n \"\"\"\n Parse image from file-like object containing ico file data\n \"\"\"\n\n # check magic\n s = buf.read(6)\n if not _accept(s):\n raise SyntaxError(\"not an ICO file\")\n\n self.buf = buf\n self.entry = []\n\n # Number of items in file\n self.nb_items = i16(s[4:])\n\n # Get headers for each item\n for i in range(self.nb_items):\n s = buf.read(16)\n\n icon_header = {\n 'width': i8(s[0]),\n 'height': i8(s[1]),\n 'nb_color': i8(s[2]), # No. of colors in image (0 if >=8bpp)\n 'reserved': i8(s[3]),\n 'planes': i16(s[4:]),\n 'bpp': i16(s[6:]),\n 'size': i32(s[8:]),\n 'offset': i32(s[12:])\n }\n\n # See Wikipedia\n for j in ('width', 'height'):\n if not icon_header[j]:\n icon_header[j] = 256\n\n # See Wikipedia notes about color depth.\n # We need this just to differ images with equal sizes\n icon_header['color_depth'] = (icon_header['bpp'] or\n (icon_header['nb_color'] != 0 and\n ceil(log(icon_header['nb_color'],\n 2))) or 256)\n\n icon_header['dim'] = (icon_header['width'], icon_header['height'])\n icon_header['square'] = (icon_header['width'] *\n icon_header['height'])\n\n self.entry.append(icon_header)\n\n self.entry = sorted(self.entry, key=lambda x: x['color_depth'])\n # ICO images are usually squares\n # self.entry = sorted(self.entry, key=lambda x: x['width'])\n self.entry = sorted(self.entry, key=lambda x: x['square'])\n self.entry.reverse()\n\n def sizes(self):\n \"\"\"\n Get a list of all available icon sizes and color depths.\n \"\"\"\n return {(h['width'], h['height']) for h in self.entry}\n\n def getimage(self, size, bpp=False):\n \"\"\"\n Get an image from the icon\n \"\"\"\n for (i, h) in enumerate(self.entry):\n if size == h['dim'] and (bpp is False or bpp == h['color_depth']):\n return self.frame(i)\n return self.frame(0)\n\n def frame(self, idx):\n \"\"\"\n Get an image from frame idx\n \"\"\"\n\n header = self.entry[idx]\n\n self.buf.seek(header['offset'])\n data = self.buf.read(8)\n self.buf.seek(header['offset'])\n\n if data[:8] == PngImagePlugin._MAGIC:\n # png frame\n im = PngImagePlugin.PngImageFile(self.buf)\n else:\n # XOR + AND mask bmp frame\n im = BmpImagePlugin.DibImageFile(self.buf)\n\n # change tile dimension to only encompass XOR image\n im.size = (im.size[0], int(im.size[1] / 2))\n d, e, o, a = im.tile[0]\n im.tile[0] = d, (0, 0) + im.size, o, a\n\n # figure out where AND mask image starts\n mode = a[0]\n bpp = 8\n for k in BmpImagePlugin.BIT2MODE.keys():\n if mode == BmpImagePlugin.BIT2MODE[k][1]:\n bpp = k\n break\n\n if 32 == bpp:\n # 32-bit color depth icon image allows semitransparent areas\n # PIL's DIB format ignores transparency bits, recover them.\n # The DIB is packed in BGRX byte order where X is the alpha\n # channel.\n\n # Back up to start of bmp data\n self.buf.seek(o)\n # extract every 4th byte (eg. 3,7,11,15,...)\n alpha_bytes = self.buf.read(im.size[0] * im.size[1] * 4)[3::4]\n\n # convert to an 8bpp grayscale image\n mask = Image.frombuffer(\n 'L', # 8bpp\n im.size, # (w, h)\n alpha_bytes, # source chars\n 'raw', # raw decoder\n ('L', 0, -1) # 8bpp inverted, unpadded, reversed\n )\n else:\n # get AND image from end of bitmap\n w = im.size[0]\n if (w % 32) > 0:\n # bitmap row data is aligned to word boundaries\n w += 32 - (im.size[0] % 32)\n\n # the total mask data is\n # padded row size * height / bits per char\n\n and_mask_offset = o + int(im.size[0] * im.size[1] *\n (bpp / 8.0))\n total_bytes = int((w * im.size[1]) / 8)\n\n self.buf.seek(and_mask_offset)\n maskData = self.buf.read(total_bytes)\n\n # convert raw data to image\n mask = Image.frombuffer(\n '1', # 1 bpp\n im.size, # (w, h)\n maskData, # source chars\n 'raw', # raw decoder\n ('1;I', int(w/8), -1) # 1bpp inverted, padded, reversed\n )\n\n # now we have two images, im is XOR image and mask is AND image\n\n # apply mask image as alpha channel\n im = im.convert('RGBA')\n im.putalpha(mask)\n\n return im\n\n\n##\n# Image plugin for Windows Icon files.\n\nclass IcoImageFile(ImageFile.ImageFile):\n \"\"\"\n PIL read-only image support for Microsoft Windows .ico files.\n\n By default the largest resolution image in the file will be loaded. This\n can be changed by altering the 'size' attribute before calling 'load'.\n\n The info dictionary has a key 'sizes' that is a list of the sizes available\n in the icon file.\n\n Handles classic, XP and Vista icon formats.\n\n This plugin is a refactored version of Win32IconImagePlugin by Bryan Davis\n <[email protected]>.\n https://code.google.com/archive/p/casadebender/wikis/Win32IconImagePlugin.wiki\n \"\"\"\n format = \"ICO\"\n format_description = \"Windows Icon\"\n\n def _open(self):\n self.ico = IcoFile(self.fp)\n self.info['sizes'] = self.ico.sizes()\n self.size = self.ico.entry[0]['dim']\n self.load()\n\n def load(self):\n im = self.ico.getimage(self.size)\n # if tile is PNG, it won't really be loaded yet\n im.load()\n self.im = im.im\n self.mode = im.mode\n self.size = im.size\n\n def load_seek(self):\n # Flag the ImageFile.Parser so that it\n # just does all the decode at the end.\n pass\n#\n# --------------------------------------------------------------------\n\nImage.register_open(IcoImageFile.format, IcoImageFile, _accept)\nImage.register_save(IcoImageFile.format, _save)\nImage.register_extension(IcoImageFile.format, \".ico\")\n", "path": "PIL/IcoImagePlugin.py"}]}
| 4,014 | 669 |
gh_patches_debug_825
|
rasdani/github-patches
|
git_diff
|
bridgecrewio__checkov-1905
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bump boto3 to the latest version
**Describe the bug**
I am trying to installing checkov and the latest boto3 version within an environment. However, checkov depends on version 1.17.*
Could you please bump boto3 to the latest version?
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 import logging
3 import os
4 from importlib import util
5 from os import path
6
7 import setuptools
8 from setuptools import setup
9
10 # read the contents of your README file
11 this_directory = path.abspath(path.dirname(__file__))
12 with open(path.join(this_directory, "README.md"), encoding="utf-8") as f:
13 long_description = f.read()
14
15 logger = logging.getLogger(__name__)
16 spec = util.spec_from_file_location(
17 "checkov.version", os.path.join("checkov", "version.py")
18 )
19 # noinspection PyUnresolvedReferences
20 mod = util.module_from_spec(spec)
21 spec.loader.exec_module(mod) # type: ignore
22 version = mod.version # type: ignore
23
24 setup(
25 extras_require={
26 "dev": [
27 "pytest==5.3.1",
28 "coverage==5.5",
29 "coverage-badge",
30 "GitPython==3.1.7",
31 "bandit",
32 "jsonschema",
33 ]
34 },
35 install_requires=[
36 "bc-python-hcl2>=0.3.24",
37 "cloudsplaining>=0.4.1",
38 "deep_merge",
39 "tabulate",
40 "colorama",
41 "termcolor",
42 "junit-xml>=1.9",
43 "dpath>=1.5.0,<2",
44 "pyyaml>=5.4.1",
45 "boto3==1.17.*",
46 "GitPython",
47 "jmespath",
48 "tqdm",
49 "update_checker",
50 "semantic_version",
51 "packaging",
52 "networkx",
53 "dockerfile-parse",
54 "docker",
55 "configargparse",
56 "detect-secrets",
57 "policyuniverse",
58 "typing-extensions",
59 "cachetools",
60 "cyclonedx-python-lib==0.6.2"
61 ],
62 license="Apache License 2.0",
63 name="checkov",
64 version=version,
65 python_requires=">=3.7",
66 description="Infrastructure as code static analysis",
67 author="bridgecrew",
68 author_email="[email protected]",
69 url="https://github.com/bridgecrewio/checkov",
70 packages=setuptools.find_packages(exclude=["tests*", "integration_tests*"]),
71 include_package_data=True,
72 package_dir={
73 "checkov.terraform.checks.graph_checks": "checkov/terraform/checks/graph_checks"
74 },
75 package_data={
76 "checkov.terraform.checks.graph_checks": [
77 "aws/*.yaml",
78 "gcp/*.yaml",
79 "azure/*.yaml",
80 ]
81 },
82 scripts=["bin/checkov", "bin/checkov.cmd"],
83 long_description=long_description,
84 long_description_content_type="text/markdown",
85 classifiers=[
86 "Environment :: Console",
87 "Intended Audience :: Developers",
88 "Intended Audience :: System Administrators",
89 "Programming Language :: Python :: 3.7",
90 "Programming Language :: Python :: 3.8",
91 "Programming Language :: Python :: 3.9",
92 "Topic :: Security",
93 "Topic :: Software Development :: Build Tools",
94 ],
95 )
96
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -42,7 +42,7 @@
"junit-xml>=1.9",
"dpath>=1.5.0,<2",
"pyyaml>=5.4.1",
- "boto3==1.17.*",
+ "boto3>=1.17",
"GitPython",
"jmespath",
"tqdm",
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -42,7 +42,7 @@\n \"junit-xml>=1.9\",\n \"dpath>=1.5.0,<2\",\n \"pyyaml>=5.4.1\",\n- \"boto3==1.17.*\",\n+ \"boto3>=1.17\",\n \"GitPython\",\n \"jmespath\",\n \"tqdm\",\n", "issue": "Bump boto3 to the latest version\n**Describe the bug**\r\nI am trying to installing checkov and the latest boto3 version within an environment. However, checkov depends on version 1.17.* \r\n\r\nCould you please bump boto3 to the latest version?\n", "before_files": [{"content": "#!/usr/bin/env python\nimport logging\nimport os\nfrom importlib import util\nfrom os import path\n\nimport setuptools\nfrom setuptools import setup\n\n# read the contents of your README file\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nlogger = logging.getLogger(__name__)\nspec = util.spec_from_file_location(\n \"checkov.version\", os.path.join(\"checkov\", \"version.py\")\n)\n# noinspection PyUnresolvedReferences\nmod = util.module_from_spec(spec)\nspec.loader.exec_module(mod) # type: ignore\nversion = mod.version # type: ignore\n\nsetup(\n extras_require={\n \"dev\": [\n \"pytest==5.3.1\",\n \"coverage==5.5\",\n \"coverage-badge\",\n \"GitPython==3.1.7\",\n \"bandit\",\n \"jsonschema\",\n ]\n },\n install_requires=[\n \"bc-python-hcl2>=0.3.24\",\n \"cloudsplaining>=0.4.1\",\n \"deep_merge\",\n \"tabulate\",\n \"colorama\",\n \"termcolor\",\n \"junit-xml>=1.9\",\n \"dpath>=1.5.0,<2\",\n \"pyyaml>=5.4.1\",\n \"boto3==1.17.*\",\n \"GitPython\",\n \"jmespath\",\n \"tqdm\",\n \"update_checker\",\n \"semantic_version\",\n \"packaging\",\n \"networkx\",\n \"dockerfile-parse\",\n \"docker\",\n \"configargparse\",\n \"detect-secrets\",\n \"policyuniverse\",\n \"typing-extensions\",\n \"cachetools\",\n \"cyclonedx-python-lib==0.6.2\"\n ],\n license=\"Apache License 2.0\",\n name=\"checkov\",\n version=version,\n python_requires=\">=3.7\",\n description=\"Infrastructure as code static analysis\",\n author=\"bridgecrew\",\n author_email=\"[email protected]\",\n url=\"https://github.com/bridgecrewio/checkov\",\n packages=setuptools.find_packages(exclude=[\"tests*\", \"integration_tests*\"]),\n include_package_data=True,\n package_dir={\n \"checkov.terraform.checks.graph_checks\": \"checkov/terraform/checks/graph_checks\"\n },\n package_data={\n \"checkov.terraform.checks.graph_checks\": [\n \"aws/*.yaml\",\n \"gcp/*.yaml\",\n \"azure/*.yaml\",\n ]\n },\n scripts=[\"bin/checkov\", \"bin/checkov.cmd\"],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n classifiers=[\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: System Administrators\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Security\",\n \"Topic :: Software Development :: Build Tools\",\n ],\n)\n", "path": "setup.py"}]}
| 1,442 | 109 |
gh_patches_debug_16881
|
rasdani/github-patches
|
git_diff
|
facebookresearch__CompilerGym-160
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add support for Python 3.9
## 🚀 Feature
Add support for python 3.9. This shouldn't require any code changes, but the dependencies may not updating.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python3
2 #
3 # Copyright (c) Facebook, Inc. and its affiliates.
4 #
5 # This source code is licensed under the MIT license found in the
6 # LICENSE file in the root directory of this source tree.
7
8 import distutils.util
9 import io
10
11 import setuptools
12
13 with open("VERSION") as f:
14 version = f.read().strip()
15 with open("README.md") as f:
16 # Force UTF-8 file encoding to support non-ascii characters in the readme.
17 with io.open("README.md", encoding="utf-8") as f:
18 long_description = f.read()
19 with open("compiler_gym/requirements.txt") as f:
20 requirements = [ln.split("#")[0].rstrip() for ln in f.readlines()]
21
22 # When building a bdist_wheel we need to set the appropriate tags: this package
23 # includes compiled binaries, and does not include compiled python extensions.
24 try:
25 from wheel.bdist_wheel import bdist_wheel as _bdist_wheel
26
27 class bdist_wheel(_bdist_wheel):
28 def finalize_options(self):
29 _bdist_wheel.finalize_options(self)
30 self.root_is_pure = False
31
32 def get_tag(self):
33 python, abi, plat = _bdist_wheel.get_tag(self)
34 python, abi = "py3", "none"
35 return python, abi, plat
36
37
38 except ImportError:
39 bdist_wheel = None
40
41 setuptools.setup(
42 name="compiler_gym",
43 version=version,
44 description="Reinforcement learning environments for compiler research",
45 author="Facebook AI Research",
46 long_description=long_description,
47 long_description_content_type="text/markdown",
48 url="https://github.com/facebookresearch/CompilerGym",
49 license="MIT",
50 packages=[
51 "compiler_gym",
52 "compiler_gym.bin",
53 "compiler_gym.datasets",
54 "compiler_gym.envs",
55 "compiler_gym.envs.llvm",
56 "compiler_gym.envs.llvm.service",
57 "compiler_gym.envs.llvm.service.passes",
58 "compiler_gym.service",
59 "compiler_gym.service.proto",
60 "compiler_gym.spaces",
61 "compiler_gym.third_party",
62 "compiler_gym.third_party.autophase",
63 "compiler_gym.third_party.llvm",
64 "compiler_gym.third_party.inst2vec",
65 "compiler_gym.util",
66 "compiler_gym.util.flags",
67 "compiler_gym.views",
68 ],
69 package_dir={
70 "": "bazel-bin/package.runfiles/CompilerGym",
71 },
72 package_data={
73 "compiler_gym": [
74 "envs/llvm/service/passes/*.txt",
75 "envs/llvm/service/compiler_gym-llvm-service",
76 "envs/llvm/service/libLLVMPolly.so",
77 "third_party/inst2vec/*.pickle",
78 "third_party/cBench/benchmarks.txt",
79 "third_party/cBench/cBench-v*/*",
80 "third_party/cBench/runtime_data/**/*",
81 ]
82 },
83 install_requires=requirements,
84 include_package_data=True,
85 python_requires=">=3.6",
86 classifiers=[
87 "Development Status :: 2 - Pre-Alpha",
88 "Environment :: Console",
89 "Intended Audience :: Developers",
90 "Intended Audience :: Science/Research",
91 "License :: OSI Approved :: MIT License",
92 "Topic :: Scientific/Engineering :: Artificial Intelligence",
93 "Topic :: Software Development :: Compilers",
94 ],
95 cmdclass={"bdist_wheel": bdist_wheel},
96 platforms=[distutils.util.get_platform()],
97 zip_safe=False,
98 )
99
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -84,11 +84,16 @@
include_package_data=True,
python_requires=">=3.6",
classifiers=[
- "Development Status :: 2 - Pre-Alpha",
+ "Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
+ "Programming Language :: Python :: 3.6",
+ "Programming Language :: Python :: 3.7",
+ "Programming Language :: Python :: 3.8",
+ "Programming Language :: Python :: 3.9",
+ "Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development :: Compilers",
],
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -84,11 +84,16 @@\n include_package_data=True,\n python_requires=\">=3.6\",\n classifiers=[\n- \"Development Status :: 2 - Pre-Alpha\",\n+ \"Development Status :: 3 - Alpha\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: MIT License\",\n+ \"Programming Language :: Python :: 3.6\",\n+ \"Programming Language :: Python :: 3.7\",\n+ \"Programming Language :: Python :: 3.8\",\n+ \"Programming Language :: Python :: 3.9\",\n+ \"Programming Language :: Python :: 3\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Software Development :: Compilers\",\n ],\n", "issue": "Add support for Python 3.9\n## \ud83d\ude80 Feature\r\n\r\nAdd support for python 3.9. This shouldn't require any code changes, but the dependencies may not updating.\n", "before_files": [{"content": "#!/usr/bin/env python3\n#\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport distutils.util\nimport io\n\nimport setuptools\n\nwith open(\"VERSION\") as f:\n version = f.read().strip()\nwith open(\"README.md\") as f:\n # Force UTF-8 file encoding to support non-ascii characters in the readme.\n with io.open(\"README.md\", encoding=\"utf-8\") as f:\n long_description = f.read()\nwith open(\"compiler_gym/requirements.txt\") as f:\n requirements = [ln.split(\"#\")[0].rstrip() for ln in f.readlines()]\n\n# When building a bdist_wheel we need to set the appropriate tags: this package\n# includes compiled binaries, and does not include compiled python extensions.\ntry:\n from wheel.bdist_wheel import bdist_wheel as _bdist_wheel\n\n class bdist_wheel(_bdist_wheel):\n def finalize_options(self):\n _bdist_wheel.finalize_options(self)\n self.root_is_pure = False\n\n def get_tag(self):\n python, abi, plat = _bdist_wheel.get_tag(self)\n python, abi = \"py3\", \"none\"\n return python, abi, plat\n\n\nexcept ImportError:\n bdist_wheel = None\n\nsetuptools.setup(\n name=\"compiler_gym\",\n version=version,\n description=\"Reinforcement learning environments for compiler research\",\n author=\"Facebook AI Research\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/facebookresearch/CompilerGym\",\n license=\"MIT\",\n packages=[\n \"compiler_gym\",\n \"compiler_gym.bin\",\n \"compiler_gym.datasets\",\n \"compiler_gym.envs\",\n \"compiler_gym.envs.llvm\",\n \"compiler_gym.envs.llvm.service\",\n \"compiler_gym.envs.llvm.service.passes\",\n \"compiler_gym.service\",\n \"compiler_gym.service.proto\",\n \"compiler_gym.spaces\",\n \"compiler_gym.third_party\",\n \"compiler_gym.third_party.autophase\",\n \"compiler_gym.third_party.llvm\",\n \"compiler_gym.third_party.inst2vec\",\n \"compiler_gym.util\",\n \"compiler_gym.util.flags\",\n \"compiler_gym.views\",\n ],\n package_dir={\n \"\": \"bazel-bin/package.runfiles/CompilerGym\",\n },\n package_data={\n \"compiler_gym\": [\n \"envs/llvm/service/passes/*.txt\",\n \"envs/llvm/service/compiler_gym-llvm-service\",\n \"envs/llvm/service/libLLVMPolly.so\",\n \"third_party/inst2vec/*.pickle\",\n \"third_party/cBench/benchmarks.txt\",\n \"third_party/cBench/cBench-v*/*\",\n \"third_party/cBench/runtime_data/**/*\",\n ]\n },\n install_requires=requirements,\n include_package_data=True,\n python_requires=\">=3.6\",\n classifiers=[\n \"Development Status :: 2 - Pre-Alpha\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: MIT License\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Software Development :: Compilers\",\n ],\n cmdclass={\"bdist_wheel\": bdist_wheel},\n platforms=[distutils.util.get_platform()],\n zip_safe=False,\n)\n", "path": "setup.py"}]}
| 1,530 | 196 |
gh_patches_debug_33374
|
rasdani/github-patches
|
git_diff
|
sql-machine-learning__elasticdl-308
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
non-trainable variables update in training
Some keras layers have non-trainable variables, such as BatchNorm and RNN. These variables are not updated by gradients, but by update ops.
We need to run the update ops to update these internal states of the layers.
</issue>
<code>
[start of elasticdl/worker/worker.py]
1 import tensorflow as tf
2 assert tf.executing_eagerly()
3
4 from google.protobuf import empty_pb2
5 from tensorflow.python.ops import math_ops
6 from proto import master_pb2_grpc
7 from proto import master_pb2
8 from common.ndarray import ndarray_to_tensor, tensor_to_ndarray
9 import itertools
10 import recordio
11
12 # the default max number of a minibatch retrain as its gradients are not accepted by master.
13 DEFAULT_MAX_MINIBATCH_RETRAIN_NUM = 64
14
15 class Worker(object):
16 """ElasticDL worker"""
17
18 def __init__(self,
19 model_cls,
20 channel=None,
21 max_retrain_num=DEFAULT_MAX_MINIBATCH_RETRAIN_NUM):
22 """
23 Arguments:
24 model_cls: A class to define the model
25 channel: grpc channel
26 max_retrain_num: max number of a minibatch retrain as its gradients are not accepted by master
27 """
28
29 self._model = model_cls()
30 self._model.build(model_cls.input_shapes())
31
32 self._input_fn = model_cls.input_fn
33 self._opt_fn = model_cls.optimizer
34
35 if channel is None:
36 self._stub = None
37 else:
38 self._stub = master_pb2_grpc.MasterStub(channel)
39 self._max_retrain_num = max_retrain_num
40 self._model_version = -1
41
42 def get_task(self):
43 """
44 get task from master
45 """
46 return self._stub.GetTask(empty_pb2.Empty())
47
48 def get_model(self, min_version):
49 """
50 get model from master, and update model_version
51 """
52 req = master_pb2.GetModelRequest()
53 req.min_version = min_version
54 model = self._stub.GetModel(req)
55
56 for var in self._model.trainable_variables:
57 # Assumes all trainable variables exist in model.param.
58 var.assign(
59 tensor_to_ndarray(model.param[var.name]))
60 self._model_version = model.version
61
62 def report_task_result(self, task_id, err_msg):
63 """
64 report task result to master
65 """
66 report = master_pb2.ReportTaskResultRequest()
67 report.task_id = task_id
68 report.err_message = err_msg
69 return self._stub.ReportTaskResult(report)
70
71 def report_gradient(self, grads):
72 """
73 report gradient to ps, return (accepted, model_version) from rpc call.
74 """
75 req = master_pb2.ReportGradientRequest()
76 for g, v in zip(grads, self._model.trainable_variables):
77 req.gradient[v.name].CopyFrom(
78 ndarray_to_tensor(g.numpy()))
79 req.model_version = self._model_version
80 res = self._stub.ReportGradient(req)
81 return res.accepted, res.model_version
82
83 def distributed_train(self):
84 """
85 Distributed training.
86 """
87 while True:
88 task = self.get_task()
89 if not task.shard_file_name:
90 # No more task
91 break
92 batch_size = task.minibatch_size
93 err_msg = ""
94 try:
95 with recordio.File(task.shard_file_name, "r") as rdio_r:
96 reader = rdio_r.get_reader(task.start, task.end)
97 min_model_version = task.model_version
98 while True:
99 record_buf = list(
100 itertools.islice(reader, 0, batch_size))
101 if not record_buf:
102 break
103
104 for _ in range(self._max_retrain_num):
105 # TODO: optimize the logic to avoid unnecessary get_model call.
106 self.get_model(
107 max(self._model_version, min_model_version))
108
109 batch_input_data = self._input_fn(record_buf)
110
111 with tf.GradientTape() as tape:
112 inputs = []
113 for input_name in self._model.input_names():
114 inputs.append(batch_input_data[input_name])
115 if len(inputs) == 1:
116 inputs = inputs[0]
117 outputs = self._model.call(inputs)
118 loss = self._model.loss(outputs, batch_input_data)
119
120 # TODO: Add regularization loss if any,
121 # which should be divided by the number of contributing workers.
122 grads = tape.gradient(
123 loss, self._model.trainable_variables)
124 print("Loss is ", loss.numpy())
125
126 accepted, min_model_version = self.report_gradient(
127 grads)
128 if accepted:
129 break
130 else:
131 # Worker got stuck, fail the task.
132 # TODO: stop the worker if it fails to make any progress for some time.
133 raise RuntimeError("Worker got stuck")
134
135
136 except Exception as ex:
137 err_msg = str(ex)
138 self.report_task_result(task.task_id, err_msg)
139
140 def local_train(self, file_list, batch_size, epoch=1, kwargs=None):
141 """
142 Local training for local testing. Must in eager mode.
143 Argments:
144 batch_size: batch size in training
145 epoch: the number of epoch in training
146 kwargs: contains a dict of parameters used in training
147 """
148 optimizer = self._opt_fn()
149 for _ in range(epoch):
150 for f in file_list:
151 with recordio.File(f, "r") as rdio_r:
152 reader = rdio_r.get_reader(0, rdio_r.count())
153 while True:
154 record_buf = list(
155 itertools.islice(reader, 0, batch_size))
156 if not record_buf:
157 break
158
159 data = self._input_fn(record_buf)
160
161 with tf.GradientTape() as tape:
162 inputs = []
163 for input_name in self._model.input_names():
164 inputs.append(data[input_name])
165 if len(inputs) == 1:
166 inputs = inputs[0]
167 outputs = self._model.call(inputs)
168 loss = self._model.loss(outputs, data)
169
170 # Add regularization loss if any.
171 # Note: for distributed training, the regularization loss should
172 # be divided by the number of contributing workers, which
173 # might be difficult for elasticdl.
174 if self._model.losses:
175 loss += math_ops.add_n(self._model.losses)
176 grads = tape.gradient(
177 loss, self._model.trainable_variables)
178 optimizer.apply_gradients(
179 zip(grads, self._model.trainable_variables))
180 print("Loss is ", loss.numpy())
181
[end of elasticdl/worker/worker.py]
[start of elasticdl/examples/mnist/mnist.py]
1 import tensorflow as tf
2 tf.enable_eager_execution()
3
4 import os
5 import argparse
6 import numpy as np
7 from worker.worker import Worker
8
9
10 class MnistModel(tf.keras.Model):
11 def __init__(self, channel_last=True):
12 super(MnistModel, self).__init__(name='mnist_model')
13 if channel_last:
14 self._reshape = tf.keras.layers.Reshape((28, 28, 1))
15 else:
16 self._reshape = tf.keras.layers.Reshape((1, 28, 28))
17 self._conv1 = tf.keras.layers.Conv2D(
18 32, kernel_size=(3, 3), activation='relu')
19 self._conv2 = tf.keras.layers.Conv2D(
20 64, kernel_size=(3, 3), activation='relu')
21 self._batch_norm = tf.keras.layers.BatchNormalization()
22 self._maxpooling = tf.keras.layers.MaxPooling2D(
23 pool_size=(2, 2))
24 self._dropout = tf.keras.layers.Dropout(0.25)
25 self._flatten = tf.keras.layers.Flatten()
26 self._dense = tf.keras.layers.Dense(10)
27
28 def call(self, inputs):
29 x = self._reshape(inputs)
30 x = self._conv1(x)
31 x = self._conv2(x)
32 x = self._batch_norm(x)
33 x = self._maxpooling(x)
34 x = self._dropout(x)
35 x = self._flatten(x)
36 x = self._dense(x)
37 return x
38
39 @staticmethod
40 def input_shapes():
41 return (1, 28, 28)
42
43 @staticmethod
44 def input_names():
45 return ['image']
46
47 @staticmethod
48 def loss(output, labels):
49 return tf.reduce_mean(
50 tf.nn.sparse_softmax_cross_entropy_with_logits(
51 logits=output, labels=labels['label']))
52
53 @staticmethod
54 def optimizer(lr=0.1):
55 return tf.train.GradientDescentOptimizer(lr)
56
57 @staticmethod
58 def input_fn(records):
59 image_list = []
60 label_list = []
61 # deserialize
62 for r in records:
63 parsed = np.frombuffer(r, dtype="uint8")
64 label = parsed[-1]
65 image = np.resize(parsed[:-1], new_shape=(28, 28))
66 image = image.astype(np.float32)
67 image /= 255
68 label = label.astype(np.int32)
69 image_list.append(image)
70 label_list.append(label)
71
72 # batching
73 batch_size = len(image_list)
74 images = np.concatenate(image_list, axis=0)
75 images = np.reshape(images, (batch_size, 28, 28))
76 labels = np.array(label_list)
77 return {'image': images, 'label': labels}
78
[end of elasticdl/examples/mnist/mnist.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/elasticdl/examples/mnist/mnist.py b/elasticdl/examples/mnist/mnist.py
--- a/elasticdl/examples/mnist/mnist.py
+++ b/elasticdl/examples/mnist/mnist.py
@@ -25,13 +25,14 @@
self._flatten = tf.keras.layers.Flatten()
self._dense = tf.keras.layers.Dense(10)
- def call(self, inputs):
+ def call(self, inputs, training=False):
x = self._reshape(inputs)
x = self._conv1(x)
x = self._conv2(x)
- x = self._batch_norm(x)
+ x = self._batch_norm(x, training=training)
x = self._maxpooling(x)
- x = self._dropout(x)
+ if training:
+ x = self._dropout(x, training=training)
x = self._flatten(x)
x = self._dense(x)
return x
diff --git a/elasticdl/worker/worker.py b/elasticdl/worker/worker.py
--- a/elasticdl/worker/worker.py
+++ b/elasticdl/worker/worker.py
@@ -114,7 +114,7 @@
inputs.append(batch_input_data[input_name])
if len(inputs) == 1:
inputs = inputs[0]
- outputs = self._model.call(inputs)
+ outputs = self._model.call(inputs, training=True)
loss = self._model.loss(outputs, batch_input_data)
# TODO: Add regularization loss if any,
@@ -164,7 +164,7 @@
inputs.append(data[input_name])
if len(inputs) == 1:
inputs = inputs[0]
- outputs = self._model.call(inputs)
+ outputs = self._model.call(inputs, training=True)
loss = self._model.loss(outputs, data)
# Add regularization loss if any.
|
{"golden_diff": "diff --git a/elasticdl/examples/mnist/mnist.py b/elasticdl/examples/mnist/mnist.py\n--- a/elasticdl/examples/mnist/mnist.py\n+++ b/elasticdl/examples/mnist/mnist.py\n@@ -25,13 +25,14 @@\n self._flatten = tf.keras.layers.Flatten()\n self._dense = tf.keras.layers.Dense(10)\n \n- def call(self, inputs):\n+ def call(self, inputs, training=False):\n x = self._reshape(inputs)\n x = self._conv1(x)\n x = self._conv2(x)\n- x = self._batch_norm(x)\n+ x = self._batch_norm(x, training=training)\n x = self._maxpooling(x)\n- x = self._dropout(x)\n+ if training:\n+ x = self._dropout(x, training=training)\n x = self._flatten(x)\n x = self._dense(x)\n return x\ndiff --git a/elasticdl/worker/worker.py b/elasticdl/worker/worker.py\n--- a/elasticdl/worker/worker.py\n+++ b/elasticdl/worker/worker.py\n@@ -114,7 +114,7 @@\n inputs.append(batch_input_data[input_name])\n if len(inputs) == 1:\n inputs = inputs[0]\n- outputs = self._model.call(inputs)\n+ outputs = self._model.call(inputs, training=True)\n loss = self._model.loss(outputs, batch_input_data)\n \n # TODO: Add regularization loss if any,\n@@ -164,7 +164,7 @@\n inputs.append(data[input_name])\n if len(inputs) == 1:\n inputs = inputs[0]\n- outputs = self._model.call(inputs)\n+ outputs = self._model.call(inputs, training=True)\n loss = self._model.loss(outputs, data)\n \n # Add regularization loss if any.\n", "issue": "non-trainable variables update in training\nSome keras layers have non-trainable variables, such as BatchNorm and RNN. These variables are not updated by gradients, but by update ops.\r\n\r\nWe need to run the update ops to update these internal states of the layers.\r\n\r\n\n", "before_files": [{"content": "import tensorflow as tf\nassert tf.executing_eagerly()\n\nfrom google.protobuf import empty_pb2\nfrom tensorflow.python.ops import math_ops\nfrom proto import master_pb2_grpc\nfrom proto import master_pb2\nfrom common.ndarray import ndarray_to_tensor, tensor_to_ndarray\nimport itertools\nimport recordio\n\n# the default max number of a minibatch retrain as its gradients are not accepted by master.\nDEFAULT_MAX_MINIBATCH_RETRAIN_NUM = 64\n\nclass Worker(object):\n \"\"\"ElasticDL worker\"\"\"\n\n def __init__(self,\n model_cls,\n channel=None,\n max_retrain_num=DEFAULT_MAX_MINIBATCH_RETRAIN_NUM):\n \"\"\"\n Arguments:\n model_cls: A class to define the model\n channel: grpc channel\n max_retrain_num: max number of a minibatch retrain as its gradients are not accepted by master\n \"\"\"\n\n self._model = model_cls()\n self._model.build(model_cls.input_shapes())\n\n self._input_fn = model_cls.input_fn \n self._opt_fn = model_cls.optimizer\n\n if channel is None:\n self._stub = None\n else:\n self._stub = master_pb2_grpc.MasterStub(channel)\n self._max_retrain_num = max_retrain_num\n self._model_version = -1\n\n def get_task(self):\n \"\"\"\n get task from master\n \"\"\"\n return self._stub.GetTask(empty_pb2.Empty())\n\n def get_model(self, min_version):\n \"\"\"\n get model from master, and update model_version\n \"\"\"\n req = master_pb2.GetModelRequest()\n req.min_version = min_version\n model = self._stub.GetModel(req)\n\n for var in self._model.trainable_variables:\n # Assumes all trainable variables exist in model.param.\n var.assign(\n tensor_to_ndarray(model.param[var.name]))\n self._model_version = model.version\n\n def report_task_result(self, task_id, err_msg):\n \"\"\"\n report task result to master\n \"\"\"\n report = master_pb2.ReportTaskResultRequest()\n report.task_id = task_id\n report.err_message = err_msg\n return self._stub.ReportTaskResult(report)\n\n def report_gradient(self, grads):\n \"\"\"\n report gradient to ps, return (accepted, model_version) from rpc call.\n \"\"\"\n req = master_pb2.ReportGradientRequest()\n for g, v in zip(grads, self._model.trainable_variables):\n req.gradient[v.name].CopyFrom(\n ndarray_to_tensor(g.numpy()))\n req.model_version = self._model_version\n res = self._stub.ReportGradient(req)\n return res.accepted, res.model_version\n\n def distributed_train(self):\n \"\"\"\n Distributed training.\n \"\"\"\n while True:\n task = self.get_task()\n if not task.shard_file_name:\n # No more task\n break\n batch_size = task.minibatch_size\n err_msg = \"\"\n try:\n with recordio.File(task.shard_file_name, \"r\") as rdio_r:\n reader = rdio_r.get_reader(task.start, task.end)\n min_model_version = task.model_version\n while True:\n record_buf = list(\n itertools.islice(reader, 0, batch_size))\n if not record_buf:\n break\n\n for _ in range(self._max_retrain_num):\n # TODO: optimize the logic to avoid unnecessary get_model call.\n self.get_model(\n max(self._model_version, min_model_version))\n\n batch_input_data = self._input_fn(record_buf)\n\n with tf.GradientTape() as tape:\n inputs = []\n for input_name in self._model.input_names():\n inputs.append(batch_input_data[input_name])\n if len(inputs) == 1:\n inputs = inputs[0]\n outputs = self._model.call(inputs)\n loss = self._model.loss(outputs, batch_input_data)\n\n # TODO: Add regularization loss if any,\n # which should be divided by the number of contributing workers.\n grads = tape.gradient(\n loss, self._model.trainable_variables)\n print(\"Loss is \", loss.numpy())\n\n accepted, min_model_version = self.report_gradient(\n grads)\n if accepted:\n break\n else:\n # Worker got stuck, fail the task.\n # TODO: stop the worker if it fails to make any progress for some time.\n raise RuntimeError(\"Worker got stuck\")\n\n\n except Exception as ex:\n err_msg = str(ex)\n self.report_task_result(task.task_id, err_msg)\n\n def local_train(self, file_list, batch_size, epoch=1, kwargs=None):\n \"\"\"\n Local training for local testing. Must in eager mode.\n Argments:\n batch_size: batch size in training\n epoch: the number of epoch in training\n kwargs: contains a dict of parameters used in training\n \"\"\"\n optimizer = self._opt_fn()\n for _ in range(epoch):\n for f in file_list:\n with recordio.File(f, \"r\") as rdio_r:\n reader = rdio_r.get_reader(0, rdio_r.count())\n while True:\n record_buf = list(\n itertools.islice(reader, 0, batch_size))\n if not record_buf:\n break\n\n data = self._input_fn(record_buf)\n\n with tf.GradientTape() as tape:\n inputs = []\n for input_name in self._model.input_names():\n inputs.append(data[input_name])\n if len(inputs) == 1:\n inputs = inputs[0]\n outputs = self._model.call(inputs)\n loss = self._model.loss(outputs, data)\n\n # Add regularization loss if any.\n # Note: for distributed training, the regularization loss should\n # be divided by the number of contributing workers, which\n # might be difficult for elasticdl.\n if self._model.losses:\n loss += math_ops.add_n(self._model.losses)\n grads = tape.gradient(\n loss, self._model.trainable_variables)\n optimizer.apply_gradients(\n zip(grads, self._model.trainable_variables))\n print(\"Loss is \", loss.numpy())\n", "path": "elasticdl/worker/worker.py"}, {"content": "import tensorflow as tf\ntf.enable_eager_execution()\n\nimport os\nimport argparse\nimport numpy as np\nfrom worker.worker import Worker\n\n\nclass MnistModel(tf.keras.Model):\n def __init__(self, channel_last=True):\n super(MnistModel, self).__init__(name='mnist_model')\n if channel_last:\n self._reshape = tf.keras.layers.Reshape((28, 28, 1))\n else:\n self._reshape = tf.keras.layers.Reshape((1, 28, 28))\n self._conv1 = tf.keras.layers.Conv2D(\n 32, kernel_size=(3, 3), activation='relu')\n self._conv2 = tf.keras.layers.Conv2D(\n 64, kernel_size=(3, 3), activation='relu')\n self._batch_norm = tf.keras.layers.BatchNormalization()\n self._maxpooling = tf.keras.layers.MaxPooling2D(\n pool_size=(2, 2))\n self._dropout = tf.keras.layers.Dropout(0.25)\n self._flatten = tf.keras.layers.Flatten()\n self._dense = tf.keras.layers.Dense(10)\n\n def call(self, inputs):\n x = self._reshape(inputs)\n x = self._conv1(x)\n x = self._conv2(x)\n x = self._batch_norm(x)\n x = self._maxpooling(x)\n x = self._dropout(x)\n x = self._flatten(x)\n x = self._dense(x)\n return x\n\n @staticmethod\n def input_shapes():\n return (1, 28, 28)\n\n @staticmethod\n def input_names():\n return ['image']\n\n @staticmethod\n def loss(output, labels):\n return tf.reduce_mean(\n tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits=output, labels=labels['label']))\n\n @staticmethod\n def optimizer(lr=0.1):\n return tf.train.GradientDescentOptimizer(lr)\n\n @staticmethod\n def input_fn(records):\n image_list = []\n label_list = []\n # deserialize\n for r in records:\n parsed = np.frombuffer(r, dtype=\"uint8\")\n label = parsed[-1]\n image = np.resize(parsed[:-1], new_shape=(28, 28))\n image = image.astype(np.float32)\n image /= 255\n label = label.astype(np.int32)\n image_list.append(image)\n label_list.append(label)\n\n # batching\n batch_size = len(image_list)\n images = np.concatenate(image_list, axis=0)\n images = np.reshape(images, (batch_size, 28, 28))\n labels = np.array(label_list)\n return {'image': images, 'label': labels}\n", "path": "elasticdl/examples/mnist/mnist.py"}]}
| 3,132 | 424 |
gh_patches_debug_29117
|
rasdani/github-patches
|
git_diff
|
sublimelsp__LSP-1242
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Server is being shutdown on server sending empty stderr line
When server triggers stderr output that is an empty string (or becomes an empty string after `rstrip` then LSP closes the transports and thus the server.
Issue found when analyzing https://github.com/sublimelsp/LSP-angular/issues/1
</issue>
<code>
[start of plugin/core/transports.py]
1 from .logging import exception_log, debug
2 from .types import ClientConfig
3 from .typing import Dict, Any, Optional, IO, Protocol
4 from abc import ABCMeta, abstractmethod
5 from contextlib import closing
6 from queue import Queue
7 import json
8 import os
9 import shutil
10 import socket
11 import sublime
12 import subprocess
13 import threading
14 import time
15 import weakref
16
17
18 TCP_CONNECT_TIMEOUT = 5
19
20
21 class Transport(metaclass=ABCMeta):
22
23 @abstractmethod
24 def send(self, payload: Dict[str, Any]) -> None:
25 pass
26
27 @abstractmethod
28 def close(self) -> None:
29 pass
30
31
32 class TransportCallbacks(Protocol):
33
34 def on_transport_close(self, exit_code: int, exception: Optional[Exception]) -> None:
35 ...
36
37 def on_payload(self, payload: Dict[str, Any]) -> None:
38 ...
39
40 def on_stderr_message(self, message: str) -> None:
41 ...
42
43
44 class JsonRpcTransport(Transport):
45
46 def __init__(self, name: str, process: subprocess.Popen, socket: Optional[socket.socket], reader: IO[bytes],
47 writer: IO[bytes], stderr: Optional[IO[bytes]], callback_object: TransportCallbacks) -> None:
48 self._process = process
49 self._socket = socket
50 self._reader = reader
51 self._writer = writer
52 self._stderr = stderr
53 self._reader_thread = threading.Thread(target=self._read_loop, name='{}-reader'.format(name))
54 self._writer_thread = threading.Thread(target=self._write_loop, name='{}-writer'.format(name))
55 self._stderr_thread = threading.Thread(target=self._stderr_loop, name='{}-stderr'.format(name))
56 self._callback_object = weakref.ref(callback_object)
57 self._send_queue = Queue(0) # type: Queue[Optional[Dict[str, Any]]]
58 self._reader_thread.start()
59 self._writer_thread.start()
60 self._stderr_thread.start()
61 self._closed = False
62
63 def send(self, payload: Dict[str, Any]) -> None:
64 self._send_queue.put_nowait(payload)
65
66 def close(self) -> None:
67 if not self._closed:
68 self._send_queue.put_nowait(None)
69 if self._socket:
70 self._socket.close()
71 self._closed = True
72
73 def _join_thread(self, t: threading.Thread) -> None:
74 if t.ident == threading.current_thread().ident:
75 return
76 try:
77 t.join(2)
78 except TimeoutError as ex:
79 exception_log("failed to join {} thread".format(t.name), ex)
80
81 def __del__(self) -> None:
82 self.close()
83 self._join_thread(self._writer_thread)
84 self._join_thread(self._reader_thread)
85 self._join_thread(self._stderr_thread)
86
87 def _read_loop(self) -> None:
88 try:
89 while self._reader:
90 line = self._reader.readline()
91 if not line:
92 break
93 try:
94 num_bytes = _content_length(line)
95 except ValueError:
96 continue
97 if num_bytes is None:
98 continue
99 while line and line.strip():
100 line = self._reader.readline()
101 if not line:
102 continue
103 body = self._reader.read(num_bytes)
104 callback_object = self._callback_object()
105 if callback_object:
106 try:
107 callback_object.on_payload(_decode(body))
108 except Exception as ex:
109 exception_log("Error handling payload", ex)
110 else:
111 break
112 except (AttributeError, BrokenPipeError):
113 pass
114 except Exception as ex:
115 exception_log("Unexpected exception", ex)
116 self._send_queue.put_nowait(None)
117
118 def _end(self, exception: Optional[Exception]) -> None:
119 exit_code = 0
120 if not exception:
121 try:
122 # Allow the process to stop itself.
123 exit_code = self._process.wait(1)
124 except (AttributeError, ProcessLookupError, subprocess.TimeoutExpired):
125 pass
126 if self._process:
127 try:
128 # The process didn't stop itself. Terminate!
129 self._process.kill()
130 # still wait for the process to die, or zombie processes might be the result
131 # Ignore the exit code in this case, it's going to be something non-zero because we sent SIGKILL.
132 self._process.wait()
133 except (AttributeError, ProcessLookupError):
134 pass
135 except Exception as ex:
136 exception = ex # TODO: Old captured exception is overwritten
137 callback_object = self._callback_object()
138 if callback_object:
139 callback_object.on_transport_close(exit_code, exception)
140
141 def _write_loop(self) -> None:
142 exception = None # type: Optional[Exception]
143 try:
144 while self._writer:
145 d = self._send_queue.get()
146 if d is None:
147 break
148 body = _encode(d)
149 self._writer.writelines(("Content-Length: {}\r\n\r\n".format(len(body)).encode('ascii'), body))
150 self._writer.flush()
151 except (BrokenPipeError, AttributeError):
152 pass
153 except Exception as ex:
154 exception = ex
155 self._end(exception)
156
157 def _stderr_loop(self) -> None:
158 try:
159 while self._stderr:
160 message = self._stderr.readline().decode('utf-8', 'replace').rstrip()
161 if not message:
162 break
163 callback_object = self._callback_object()
164 if callback_object:
165 callback_object.on_stderr_message(message)
166 else:
167 break
168 except (BrokenPipeError, AttributeError):
169 pass
170 except Exception as ex:
171 exception_log('unexpected exception type in stderr loop', ex)
172 self._send_queue.put_nowait(None)
173
174
175 def create_transport(config: ClientConfig, cwd: Optional[str], window: sublime.Window,
176 callback_object: TransportCallbacks, variables: Dict[str, str]) -> JsonRpcTransport:
177 tcp_port = None # type: Optional[int]
178 if config.tcp_port is not None:
179 tcp_port = _find_free_port() if config.tcp_port == 0 else config.tcp_port
180 if tcp_port is not None:
181 variables["port"] = str(tcp_port)
182 args = sublime.expand_variables(config.binary_args, variables)
183 args = [os.path.expanduser(arg) for arg in args]
184 if tcp_port is not None:
185 # DEPRECATED -- replace {port} with $port or ${port} in your client config
186 args = [a.replace('{port}', str(tcp_port)) for a in args]
187 env = os.environ.copy()
188 for var, value in config.env.items():
189 env[var] = sublime.expand_variables(value, variables)
190 if tcp_port is not None:
191 stdout = subprocess.DEVNULL
192 stdin = subprocess.DEVNULL
193 else:
194 stdout = subprocess.PIPE
195 stdin = subprocess.PIPE
196 if sublime.platform() == "windows":
197 startupinfo = subprocess.STARTUPINFO() # type: ignore
198 startupinfo.dwFlags |= subprocess.SW_HIDE | subprocess.STARTF_USESHOWWINDOW # type: ignore
199 executable_arg = args[0]
200 fname, ext = os.path.splitext(executable_arg)
201 if len(ext) < 1:
202 path_to_executable = shutil.which(executable_arg)
203 # what extensions should we append so CreateProcess can find it?
204 # node has .cmd
205 # dart has .bat
206 # python has .exe wrappers - not needed
207 for extension in ['.cmd', '.bat']:
208 if path_to_executable and path_to_executable.lower().endswith(extension):
209 args[0] = executable_arg + extension
210 break
211 else:
212 startupinfo = None
213 debug("starting {} in {}".format(args, cwd if cwd else os.getcwd()))
214 process = subprocess.Popen(
215 args=args,
216 stdin=stdin,
217 stdout=stdout,
218 stderr=subprocess.PIPE,
219 startupinfo=startupinfo,
220 env=env,
221 cwd=cwd)
222 _subprocesses.add(process)
223 sock = None # type: Optional[socket.socket]
224 if tcp_port:
225 sock = _connect_tcp(tcp_port)
226 if sock is None:
227 raise RuntimeError("Failed to connect on port {}".format(config.tcp_port))
228 reader = sock.makefile('rwb') # type: IO[bytes]
229 writer = reader
230 else:
231 reader = process.stdout # type: ignore
232 writer = process.stdin # type: ignore
233 return JsonRpcTransport(config.name, process, sock, reader, writer, process.stderr, callback_object)
234
235
236 _subprocesses = weakref.WeakSet() # type: weakref.WeakSet[subprocess.Popen]
237
238
239 def kill_all_subprocesses() -> None:
240 global _subprocesses
241 subprocesses = list(_subprocesses)
242 for p in subprocesses:
243 try:
244 p.kill()
245 except Exception:
246 pass
247 for p in subprocesses:
248 try:
249 p.wait()
250 except Exception:
251 pass
252
253
254 def _connect_tcp(port: int) -> Optional[socket.socket]:
255 start_time = time.time()
256 while time.time() - start_time < TCP_CONNECT_TIMEOUT:
257 try:
258 return socket.create_connection(('localhost', port))
259 except ConnectionRefusedError:
260 pass
261 return None
262
263
264 def _find_free_port() -> int:
265 with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
266 s.bind(('', 0))
267 s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
268 return s.getsockname()[1]
269
270
271 def _encode(d: Dict[str, Any]) -> bytes:
272 return json.dumps(d, sort_keys=False, check_circular=False, separators=(',', ':')).encode('utf-8')
273
274
275 def _decode(message: bytes) -> Dict[str, Any]:
276 return json.loads(message.decode('utf-8'))
277
278
279 def _content_length(line: bytes) -> Optional[int]:
280 if line.startswith(b'Content-Length: '):
281 _, value = line.split(b'Content-Length: ')
282 value = value.strip()
283 try:
284 return int(value)
285 except ValueError as ex:
286 raise ValueError("Invalid Content-Length header: {}".format(value.decode('ascii'))) from ex
287 return None
288
[end of plugin/core/transports.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/plugin/core/transports.py b/plugin/core/transports.py
--- a/plugin/core/transports.py
+++ b/plugin/core/transports.py
@@ -45,6 +45,7 @@
def __init__(self, name: str, process: subprocess.Popen, socket: Optional[socket.socket], reader: IO[bytes],
writer: IO[bytes], stderr: Optional[IO[bytes]], callback_object: TransportCallbacks) -> None:
+ self._closed = False
self._process = process
self._socket = socket
self._reader = reader
@@ -58,7 +59,6 @@
self._reader_thread.start()
self._writer_thread.start()
self._stderr_thread.start()
- self._closed = False
def send(self, payload: Dict[str, Any]) -> None:
self._send_queue.put_nowait(payload)
@@ -157,9 +157,9 @@
def _stderr_loop(self) -> None:
try:
while self._stderr:
- message = self._stderr.readline().decode('utf-8', 'replace').rstrip()
- if not message:
+ if self._closed:
break
+ message = self._stderr.readline().decode('utf-8', 'replace').rstrip()
callback_object = self._callback_object()
if callback_object:
callback_object.on_stderr_message(message)
|
{"golden_diff": "diff --git a/plugin/core/transports.py b/plugin/core/transports.py\n--- a/plugin/core/transports.py\n+++ b/plugin/core/transports.py\n@@ -45,6 +45,7 @@\n \n def __init__(self, name: str, process: subprocess.Popen, socket: Optional[socket.socket], reader: IO[bytes],\n writer: IO[bytes], stderr: Optional[IO[bytes]], callback_object: TransportCallbacks) -> None:\n+ self._closed = False\n self._process = process\n self._socket = socket\n self._reader = reader\n@@ -58,7 +59,6 @@\n self._reader_thread.start()\n self._writer_thread.start()\n self._stderr_thread.start()\n- self._closed = False\n \n def send(self, payload: Dict[str, Any]) -> None:\n self._send_queue.put_nowait(payload)\n@@ -157,9 +157,9 @@\n def _stderr_loop(self) -> None:\n try:\n while self._stderr:\n- message = self._stderr.readline().decode('utf-8', 'replace').rstrip()\n- if not message:\n+ if self._closed:\n break\n+ message = self._stderr.readline().decode('utf-8', 'replace').rstrip()\n callback_object = self._callback_object()\n if callback_object:\n callback_object.on_stderr_message(message)\n", "issue": "Server is being shutdown on server sending empty stderr line\nWhen server triggers stderr output that is an empty string (or becomes an empty string after `rstrip` then LSP closes the transports and thus the server.\r\n\r\nIssue found when analyzing https://github.com/sublimelsp/LSP-angular/issues/1\n", "before_files": [{"content": "from .logging import exception_log, debug\nfrom .types import ClientConfig\nfrom .typing import Dict, Any, Optional, IO, Protocol\nfrom abc import ABCMeta, abstractmethod\nfrom contextlib import closing\nfrom queue import Queue\nimport json\nimport os\nimport shutil\nimport socket\nimport sublime\nimport subprocess\nimport threading\nimport time\nimport weakref\n\n\nTCP_CONNECT_TIMEOUT = 5\n\n\nclass Transport(metaclass=ABCMeta):\n\n @abstractmethod\n def send(self, payload: Dict[str, Any]) -> None:\n pass\n\n @abstractmethod\n def close(self) -> None:\n pass\n\n\nclass TransportCallbacks(Protocol):\n\n def on_transport_close(self, exit_code: int, exception: Optional[Exception]) -> None:\n ...\n\n def on_payload(self, payload: Dict[str, Any]) -> None:\n ...\n\n def on_stderr_message(self, message: str) -> None:\n ...\n\n\nclass JsonRpcTransport(Transport):\n\n def __init__(self, name: str, process: subprocess.Popen, socket: Optional[socket.socket], reader: IO[bytes],\n writer: IO[bytes], stderr: Optional[IO[bytes]], callback_object: TransportCallbacks) -> None:\n self._process = process\n self._socket = socket\n self._reader = reader\n self._writer = writer\n self._stderr = stderr\n self._reader_thread = threading.Thread(target=self._read_loop, name='{}-reader'.format(name))\n self._writer_thread = threading.Thread(target=self._write_loop, name='{}-writer'.format(name))\n self._stderr_thread = threading.Thread(target=self._stderr_loop, name='{}-stderr'.format(name))\n self._callback_object = weakref.ref(callback_object)\n self._send_queue = Queue(0) # type: Queue[Optional[Dict[str, Any]]]\n self._reader_thread.start()\n self._writer_thread.start()\n self._stderr_thread.start()\n self._closed = False\n\n def send(self, payload: Dict[str, Any]) -> None:\n self._send_queue.put_nowait(payload)\n\n def close(self) -> None:\n if not self._closed:\n self._send_queue.put_nowait(None)\n if self._socket:\n self._socket.close()\n self._closed = True\n\n def _join_thread(self, t: threading.Thread) -> None:\n if t.ident == threading.current_thread().ident:\n return\n try:\n t.join(2)\n except TimeoutError as ex:\n exception_log(\"failed to join {} thread\".format(t.name), ex)\n\n def __del__(self) -> None:\n self.close()\n self._join_thread(self._writer_thread)\n self._join_thread(self._reader_thread)\n self._join_thread(self._stderr_thread)\n\n def _read_loop(self) -> None:\n try:\n while self._reader:\n line = self._reader.readline()\n if not line:\n break\n try:\n num_bytes = _content_length(line)\n except ValueError:\n continue\n if num_bytes is None:\n continue\n while line and line.strip():\n line = self._reader.readline()\n if not line:\n continue\n body = self._reader.read(num_bytes)\n callback_object = self._callback_object()\n if callback_object:\n try:\n callback_object.on_payload(_decode(body))\n except Exception as ex:\n exception_log(\"Error handling payload\", ex)\n else:\n break\n except (AttributeError, BrokenPipeError):\n pass\n except Exception as ex:\n exception_log(\"Unexpected exception\", ex)\n self._send_queue.put_nowait(None)\n\n def _end(self, exception: Optional[Exception]) -> None:\n exit_code = 0\n if not exception:\n try:\n # Allow the process to stop itself.\n exit_code = self._process.wait(1)\n except (AttributeError, ProcessLookupError, subprocess.TimeoutExpired):\n pass\n if self._process:\n try:\n # The process didn't stop itself. Terminate!\n self._process.kill()\n # still wait for the process to die, or zombie processes might be the result\n # Ignore the exit code in this case, it's going to be something non-zero because we sent SIGKILL.\n self._process.wait()\n except (AttributeError, ProcessLookupError):\n pass\n except Exception as ex:\n exception = ex # TODO: Old captured exception is overwritten\n callback_object = self._callback_object()\n if callback_object:\n callback_object.on_transport_close(exit_code, exception)\n\n def _write_loop(self) -> None:\n exception = None # type: Optional[Exception]\n try:\n while self._writer:\n d = self._send_queue.get()\n if d is None:\n break\n body = _encode(d)\n self._writer.writelines((\"Content-Length: {}\\r\\n\\r\\n\".format(len(body)).encode('ascii'), body))\n self._writer.flush()\n except (BrokenPipeError, AttributeError):\n pass\n except Exception as ex:\n exception = ex\n self._end(exception)\n\n def _stderr_loop(self) -> None:\n try:\n while self._stderr:\n message = self._stderr.readline().decode('utf-8', 'replace').rstrip()\n if not message:\n break\n callback_object = self._callback_object()\n if callback_object:\n callback_object.on_stderr_message(message)\n else:\n break\n except (BrokenPipeError, AttributeError):\n pass\n except Exception as ex:\n exception_log('unexpected exception type in stderr loop', ex)\n self._send_queue.put_nowait(None)\n\n\ndef create_transport(config: ClientConfig, cwd: Optional[str], window: sublime.Window,\n callback_object: TransportCallbacks, variables: Dict[str, str]) -> JsonRpcTransport:\n tcp_port = None # type: Optional[int]\n if config.tcp_port is not None:\n tcp_port = _find_free_port() if config.tcp_port == 0 else config.tcp_port\n if tcp_port is not None:\n variables[\"port\"] = str(tcp_port)\n args = sublime.expand_variables(config.binary_args, variables)\n args = [os.path.expanduser(arg) for arg in args]\n if tcp_port is not None:\n # DEPRECATED -- replace {port} with $port or ${port} in your client config\n args = [a.replace('{port}', str(tcp_port)) for a in args]\n env = os.environ.copy()\n for var, value in config.env.items():\n env[var] = sublime.expand_variables(value, variables)\n if tcp_port is not None:\n stdout = subprocess.DEVNULL\n stdin = subprocess.DEVNULL\n else:\n stdout = subprocess.PIPE\n stdin = subprocess.PIPE\n if sublime.platform() == \"windows\":\n startupinfo = subprocess.STARTUPINFO() # type: ignore\n startupinfo.dwFlags |= subprocess.SW_HIDE | subprocess.STARTF_USESHOWWINDOW # type: ignore\n executable_arg = args[0]\n fname, ext = os.path.splitext(executable_arg)\n if len(ext) < 1:\n path_to_executable = shutil.which(executable_arg)\n # what extensions should we append so CreateProcess can find it?\n # node has .cmd\n # dart has .bat\n # python has .exe wrappers - not needed\n for extension in ['.cmd', '.bat']:\n if path_to_executable and path_to_executable.lower().endswith(extension):\n args[0] = executable_arg + extension\n break\n else:\n startupinfo = None\n debug(\"starting {} in {}\".format(args, cwd if cwd else os.getcwd()))\n process = subprocess.Popen(\n args=args,\n stdin=stdin,\n stdout=stdout,\n stderr=subprocess.PIPE,\n startupinfo=startupinfo,\n env=env,\n cwd=cwd)\n _subprocesses.add(process)\n sock = None # type: Optional[socket.socket]\n if tcp_port:\n sock = _connect_tcp(tcp_port)\n if sock is None:\n raise RuntimeError(\"Failed to connect on port {}\".format(config.tcp_port))\n reader = sock.makefile('rwb') # type: IO[bytes]\n writer = reader\n else:\n reader = process.stdout # type: ignore\n writer = process.stdin # type: ignore\n return JsonRpcTransport(config.name, process, sock, reader, writer, process.stderr, callback_object)\n\n\n_subprocesses = weakref.WeakSet() # type: weakref.WeakSet[subprocess.Popen]\n\n\ndef kill_all_subprocesses() -> None:\n global _subprocesses\n subprocesses = list(_subprocesses)\n for p in subprocesses:\n try:\n p.kill()\n except Exception:\n pass\n for p in subprocesses:\n try:\n p.wait()\n except Exception:\n pass\n\n\ndef _connect_tcp(port: int) -> Optional[socket.socket]:\n start_time = time.time()\n while time.time() - start_time < TCP_CONNECT_TIMEOUT:\n try:\n return socket.create_connection(('localhost', port))\n except ConnectionRefusedError:\n pass\n return None\n\n\ndef _find_free_port() -> int:\n with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:\n s.bind(('', 0))\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n return s.getsockname()[1]\n\n\ndef _encode(d: Dict[str, Any]) -> bytes:\n return json.dumps(d, sort_keys=False, check_circular=False, separators=(',', ':')).encode('utf-8')\n\n\ndef _decode(message: bytes) -> Dict[str, Any]:\n return json.loads(message.decode('utf-8'))\n\n\ndef _content_length(line: bytes) -> Optional[int]:\n if line.startswith(b'Content-Length: '):\n _, value = line.split(b'Content-Length: ')\n value = value.strip()\n try:\n return int(value)\n except ValueError as ex:\n raise ValueError(\"Invalid Content-Length header: {}\".format(value.decode('ascii'))) from ex\n return None\n", "path": "plugin/core/transports.py"}]}
| 3,558 | 304 |
gh_patches_debug_9649
|
rasdani/github-patches
|
git_diff
|
OBOFoundry__OBOFoundry.github.io-1980
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
OBO prefix map should include prefixes of obsolete ontologies
I think OBO prefix map should contain prefixes of obsolete ontologies. Sometimes, like [here](https://github.com/OBOFoundry/OBOFoundry.github.io/pull/1974/files) one ontology is merged into another, with identifiers and all - it would still be useful to be able to loop these up.
@jamesaoverton objects to that?
</issue>
<code>
[start of util/make-shacl-prefixes.py]
1 #!/usr/bin/env python3
2
3 import csv
4 import sys
5 from argparse import ArgumentParser
6
7 import yaml
8
9
10 def main(args):
11 """
12 Takes ontologies.yml file and makes a triple file with SHACL prefixes.
13
14 For example, for uberon it will generate:
15
16 [ sh:prefix "UBERON" ; sh:namespace "http://purl.obolibrary.org/obo/UBERON_"]
17
18 We always assume the CURIE prefix is uppercase, unless 'preferred_prefix' is specified
19 (for mixed-case prefixes, e.g. FBbt)
20
21 This can be useful for converting an OBO class PURL to a prefix without assumption-embedding string conversions.
22 It can be used to interconvert PURLs to CURIEs.
23
24 Note that while prefixes can sometimes be seen in RDF files, this is part of the syntax and not part of the data,
25 the prefixes are expanded at parse time. The obo_prefixes.ttl file makes these explicit.
26
27 We use the SHACL vocabulary since it provides convenient predicates for putting prefixes in the domain of discourse;
28 however, it does not entail any use of SHACL
29
30 """
31 parser = ArgumentParser(
32 description="""
33 Takes ontologies.yml file and makes a triple file with shacl prefixes"""
34 )
35 parser.add_argument("input")
36 args = parser.parse_args()
37 stream = open(args.input, "r")
38 data = yaml.load(stream, Loader=yaml.SafeLoader)
39
40 print("@prefix sh: <http://www.w3.org/ns/shacl#> .")
41 print("@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .")
42 print("[")
43 print(" sh:declare")
44 sep = ""
45 for ont in data["ontologies"]:
46 if ont.get("is_obsolete", False):
47 continue
48 prefix = ont.get("preferredPrefix", ont["id"].upper())
49 print(
50 f'{sep}[ sh:prefix "{prefix}" ; sh:namespace "http://purl.obolibrary.org/obo/{prefix}_"]'
51 )
52 sep = ","
53 print("] .")
54
55
56 if __name__ == "__main__":
57 main(sys.argv)
58
[end of util/make-shacl-prefixes.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/util/make-shacl-prefixes.py b/util/make-shacl-prefixes.py
--- a/util/make-shacl-prefixes.py
+++ b/util/make-shacl-prefixes.py
@@ -43,8 +43,9 @@
print(" sh:declare")
sep = ""
for ont in data["ontologies"]:
- if ont.get("is_obsolete", False):
- continue
+ # if ont.get("is_obsolete", False):
+ # continue
+ # See https://github.com/OBOFoundry/OBOFoundry.github.io/issues/1976
prefix = ont.get("preferredPrefix", ont["id"].upper())
print(
f'{sep}[ sh:prefix "{prefix}" ; sh:namespace "http://purl.obolibrary.org/obo/{prefix}_"]'
|
{"golden_diff": "diff --git a/util/make-shacl-prefixes.py b/util/make-shacl-prefixes.py\n--- a/util/make-shacl-prefixes.py\n+++ b/util/make-shacl-prefixes.py\n@@ -43,8 +43,9 @@\n print(\" sh:declare\")\n sep = \"\"\n for ont in data[\"ontologies\"]:\n- if ont.get(\"is_obsolete\", False):\n- continue\n+ # if ont.get(\"is_obsolete\", False):\n+ # continue\n+ # See https://github.com/OBOFoundry/OBOFoundry.github.io/issues/1976\n prefix = ont.get(\"preferredPrefix\", ont[\"id\"].upper())\n print(\n f'{sep}[ sh:prefix \"{prefix}\" ; sh:namespace \"http://purl.obolibrary.org/obo/{prefix}_\"]'\n", "issue": "OBO prefix map should include prefixes of obsolete ontologies\nI think OBO prefix map should contain prefixes of obsolete ontologies. Sometimes, like [here](https://github.com/OBOFoundry/OBOFoundry.github.io/pull/1974/files) one ontology is merged into another, with identifiers and all - it would still be useful to be able to loop these up.\r\n\r\n@jamesaoverton objects to that?\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport csv\nimport sys\nfrom argparse import ArgumentParser\n\nimport yaml\n\n\ndef main(args):\n \"\"\"\n Takes ontologies.yml file and makes a triple file with SHACL prefixes.\n\n For example, for uberon it will generate:\n\n [ sh:prefix \"UBERON\" ; sh:namespace \"http://purl.obolibrary.org/obo/UBERON_\"]\n\n We always assume the CURIE prefix is uppercase, unless 'preferred_prefix' is specified\n (for mixed-case prefixes, e.g. FBbt)\n\n This can be useful for converting an OBO class PURL to a prefix without assumption-embedding string conversions.\n It can be used to interconvert PURLs to CURIEs.\n\n Note that while prefixes can sometimes be seen in RDF files, this is part of the syntax and not part of the data,\n the prefixes are expanded at parse time. The obo_prefixes.ttl file makes these explicit.\n\n We use the SHACL vocabulary since it provides convenient predicates for putting prefixes in the domain of discourse;\n however, it does not entail any use of SHACL\n\n \"\"\"\n parser = ArgumentParser(\n description=\"\"\"\n Takes ontologies.yml file and makes a triple file with shacl prefixes\"\"\"\n )\n parser.add_argument(\"input\")\n args = parser.parse_args()\n stream = open(args.input, \"r\")\n data = yaml.load(stream, Loader=yaml.SafeLoader)\n\n print(\"@prefix sh:\t<http://www.w3.org/ns/shacl#> .\")\n print(\"@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .\")\n print(\"[\")\n print(\" sh:declare\")\n sep = \"\"\n for ont in data[\"ontologies\"]:\n if ont.get(\"is_obsolete\", False):\n continue\n prefix = ont.get(\"preferredPrefix\", ont[\"id\"].upper())\n print(\n f'{sep}[ sh:prefix \"{prefix}\" ; sh:namespace \"http://purl.obolibrary.org/obo/{prefix}_\"]'\n )\n sep = \",\"\n print(\"] .\")\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n", "path": "util/make-shacl-prefixes.py"}]}
| 1,216 | 185 |
gh_patches_debug_27264
|
rasdani/github-patches
|
git_diff
|
spack__spack-4345
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
perl "remembers" spack's compiler, which does the wrong thing
Additional Perl packages (e.g. from CPAN) should (need to) be built with the same compiler that built `perl` itself.
Perl handles this detail by remembering the compiler with which it was built. It "remembers" it by stashing a copy of it in the `Config` hash defined in `Config.pm`:
```perl
tie %Config, 'Config', {
archlibexp => '/home/hartzelg/tmp/spack-perl-cc/opt/spack/linux-centos7-x86_64/gcc-4.8.5/perl-5.24.1-35ejv4426dmzreum4ekdibu3ddmhquvi/lib/5.24.1/x86_64-linux',
archname => 'x86_64-linux',
cc => 'cc',
# [...]
```
On my system this drags `/usr/bin/cc` into the fray, resulting in sadness.
As an example, using the `cpanm` that Spack built
```
cpanm -l /path/to/library/dir XML::Simple
```
will eventually fail while trying to build `Net::HTTP` because it tries to use `cc` as its compiler.
As a quick hack/test, telling configure to use `gcc` as the C compiler results `$Config{cc}` containing `gcc` and happily the first gcc on my path is the one I used to build Perl.
```diff
diff --git a/var/spack/repos/builtin/packages/perl/package.py b/var/spack/repos/builtin/packages/perl/package.py
index 10895f60..ed30bac5 100644
--- a/var/spack/repos/builtin/packages/perl/package.py
+++ b/var/spack/repos/builtin/packages/perl/package.py
@@ -86,7 +86,8 @@ class Perl(Package): # Perl doesn't use Autotools, it should subclass Package
'-des',
'-Dprefix={0}'.format(prefix),
'-Dlocincpth=' + self.spec['gdbm'].prefix.include,
- '-Dloclibpth=' + self.spec['gdbm'].prefix.lib
+ '-Dloclibpth=' + self.spec['gdbm'].prefix.lib,
+ '-Dcc=gcc'
]
# Discussion of -fPIC for Intel at:
```
This changes lets `Net::HTTP` build and install.
Spack's compiler wrappers and *etc* seem magical to me. What's a good way to fix this?
</issue>
<code>
[start of var/spack/repos/builtin/packages/perl/package.py]
1 ##############################################################################
2 # Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
3 # Produced at the Lawrence Livermore National Laboratory.
4 #
5 # This file is part of Spack.
6 # Created by Todd Gamblin, [email protected], All rights reserved.
7 # LLNL-CODE-647188
8 #
9 # For details, see https://github.com/llnl/spack
10 # Please also see the LICENSE file for our notice and the LGPL.
11 #
12 # This program is free software; you can redistribute it and/or modify
13 # it under the terms of the GNU Lesser General Public License (as
14 # published by the Free Software Foundation) version 2.1, February 1999.
15 #
16 # This program is distributed in the hope that it will be useful, but
17 # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
18 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
19 # conditions of the GNU Lesser General Public License for more details.
20 #
21 # You should have received a copy of the GNU Lesser General Public
22 # License along with this program; if not, write to the Free Software
23 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 ##############################################################################
25 #
26 # Author: Milton Woods <[email protected]>
27 # Date: March 22, 2017
28 # Author: George Hartzell <[email protected]>
29 # Date: July 21, 2016
30 # Author: Justin Too <[email protected]>
31 # Date: September 6, 2015
32 #
33 from spack import *
34
35
36 class Perl(Package): # Perl doesn't use Autotools, it should subclass Package
37 """Perl 5 is a highly capable, feature-rich programming language with over
38 27 years of development."""
39
40 homepage = "http://www.perl.org"
41 # URL must remain http:// so Spack can bootstrap curl
42 url = "http://www.cpan.org/src/5.0/perl-5.24.1.tar.gz"
43
44 # Development releases
45 version('5.25.11', '37a398682c36cd85992b34b5c1c25dc1')
46
47 # Maintenance releases (recommended)
48 version('5.24.1', '765ef511b5b87a164e2531403ee16b3c', preferred=True)
49 version('5.22.3', 'aa4f236dc2fc6f88b871436b8d0fda95')
50
51 # Misc releases that people need
52 version('5.22.2', '5767e2a10dd62a46d7b57f74a90d952b')
53 version('5.22.1', '19295bbb775a3c36123161b9bf4892f1')
54
55 # End of life releases
56 version('5.20.3', 'd647d0ea5a7a8194c34759ab9f2610cd')
57 version('5.18.4', '1f9334ff730adc05acd3dd7130d295db')
58 version('5.16.3', 'eb5c40f2575df6c155bc99e3fe0a9d82')
59
60 extendable = True
61
62 depends_on('gdbm')
63
64 # Installing cpanm alongside the core makes it safe and simple for
65 # people/projects to install their own sets of perl modules. Not
66 # having it in core increases the "energy of activation" for doing
67 # things cleanly.
68 variant('cpanm', default=True,
69 description='Optionally install cpanm with the core packages.')
70
71 resource(
72 name="cpanm",
73 url="http://search.cpan.org/CPAN/authors/id/M/MI/MIYAGAWA/App-cpanminus-1.7042.tar.gz",
74 md5="e87f55fbcb3c13a4754500c18e89219f",
75 destination="cpanm",
76 placement="cpanm"
77 )
78
79 phases = ['configure', 'build', 'install']
80
81 def configure_args(self):
82 spec = self.spec
83 prefix = self.prefix
84
85 config_args = [
86 '-des',
87 '-Dprefix={0}'.format(prefix),
88 '-Dlocincpth=' + self.spec['gdbm'].prefix.include,
89 '-Dloclibpth=' + self.spec['gdbm'].prefix.lib
90 ]
91
92 # Discussion of -fPIC for Intel at:
93 # https://github.com/LLNL/spack/pull/3081
94 if spec.satisfies('%intel'):
95 config_args.append('-Accflags={0}'.format(self.compiler.pic_flag))
96
97 return config_args
98
99 def configure(self, spec, prefix):
100 configure = Executable('./Configure')
101 configure(*self.configure_args())
102
103 def build(self, spec, prefix):
104 make()
105
106 @run_after('build')
107 @on_package_attributes(run_tests=True)
108 def test(self):
109 make('test')
110
111 def install(self, spec, prefix):
112 make('install')
113
114 @run_after('install')
115 def install_cpanm(self):
116 spec = self.spec
117
118 if '+cpanm' in spec:
119 with working_dir(join_path('cpanm', 'cpanm')):
120 perl = spec['perl'].command
121 perl('Makefile.PL')
122 make()
123 make('install')
124
125 def setup_environment(self, spack_env, run_env):
126 """Set PERL5LIB to support activation of Perl packages"""
127 run_env.set('PERL5LIB', join_path(self.prefix, 'lib', 'perl5'))
128
129 def setup_dependent_environment(self, spack_env, run_env, dependent_spec):
130 """Set PATH and PERL5LIB to include the extension and
131 any other perl extensions it depends on,
132 assuming they were installed with INSTALL_BASE defined."""
133 perl_lib_dirs = []
134 perl_bin_dirs = []
135 for d in dependent_spec.traverse(
136 deptype=('build', 'run'), deptype_query='run'):
137 if d.package.extends(self.spec):
138 perl_lib_dirs.append(join_path(d.prefix, 'lib', 'perl5'))
139 perl_bin_dirs.append(join_path(d.prefix, 'bin'))
140 perl_bin_path = ':'.join(perl_bin_dirs)
141 perl_lib_path = ':'.join(perl_lib_dirs)
142 spack_env.prepend_path('PATH', perl_bin_path)
143 spack_env.prepend_path('PERL5LIB', perl_lib_path)
144 run_env.prepend_path('PATH', perl_bin_path)
145 run_env.prepend_path('PERL5LIB', perl_lib_path)
146
147 def setup_dependent_package(self, module, dependent_spec):
148 """Called before perl modules' install() methods.
149 In most cases, extensions will only need to have one line:
150 perl('Makefile.PL','INSTALL_BASE=%s' % self.prefix)
151 """
152
153 # perl extension builds can have a global perl executable function
154 module.perl = Executable(join_path(self.spec.prefix.bin, 'perl'))
155
156 # Add variables for library directory
157 module.perl_lib_dir = join_path(dependent_spec.prefix, 'lib', 'perl5')
158
159 # Make the site packages directory for extensions,
160 # if it does not exist already.
161 if dependent_spec.package.is_extension:
162 mkdirp(module.perl_lib_dir)
163
[end of var/spack/repos/builtin/packages/perl/package.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/var/spack/repos/builtin/packages/perl/package.py b/var/spack/repos/builtin/packages/perl/package.py
--- a/var/spack/repos/builtin/packages/perl/package.py
+++ b/var/spack/repos/builtin/packages/perl/package.py
@@ -31,6 +31,7 @@
# Date: September 6, 2015
#
from spack import *
+import os
class Perl(Package): # Perl doesn't use Autotools, it should subclass Package
@@ -160,3 +161,33 @@
# if it does not exist already.
if dependent_spec.package.is_extension:
mkdirp(module.perl_lib_dir)
+
+ @run_after('install')
+ def filter_config_dot_pm(self):
+ """Run after install so that Config.pm records the compiler that Spack
+ built the package with. If this isn't done, $Config{cc} will
+ be set to Spack's cc wrapper script.
+ """
+
+ kwargs = {'ignore_absent': True, 'backup': False, 'string': False}
+
+ # Find the actual path to the installed Config.pm file.
+ perl = Executable(join_path(prefix.bin, 'perl'))
+ config_dot_pm = perl('-MModule::Loaded', '-MConfig', '-e',
+ 'print is_loaded(Config)', output=str)
+
+ match = 'cc *=>.*'
+ substitute = "cc => '{cc}',".format(cc=self.compiler.cc)
+ filter_file(match, substitute, config_dot_pm, **kwargs)
+
+ # And the path Config_heavy.pl
+ d = os.path.dirname(config_dot_pm)
+ config_heavy = join_path(d, 'Config_heavy.pl')
+
+ match = '^cc=.*'
+ substitute = "cc='{cc}'".format(cc=self.compiler.cc)
+ filter_file(match, substitute, config_heavy, **kwargs)
+
+ match = '^ld=.*'
+ substitute = "ld='{ld}'".format(ld=self.compiler.cc)
+ filter_file(match, substitute, config_heavy, **kwargs)
|
{"golden_diff": "diff --git a/var/spack/repos/builtin/packages/perl/package.py b/var/spack/repos/builtin/packages/perl/package.py\n--- a/var/spack/repos/builtin/packages/perl/package.py\n+++ b/var/spack/repos/builtin/packages/perl/package.py\n@@ -31,6 +31,7 @@\n # Date: September 6, 2015\n #\n from spack import *\n+import os\n \n \n class Perl(Package): # Perl doesn't use Autotools, it should subclass Package\n@@ -160,3 +161,33 @@\n # if it does not exist already.\n if dependent_spec.package.is_extension:\n mkdirp(module.perl_lib_dir)\n+\n+ @run_after('install')\n+ def filter_config_dot_pm(self):\n+ \"\"\"Run after install so that Config.pm records the compiler that Spack\n+ built the package with. If this isn't done, $Config{cc} will\n+ be set to Spack's cc wrapper script.\n+ \"\"\"\n+\n+ kwargs = {'ignore_absent': True, 'backup': False, 'string': False}\n+\n+ # Find the actual path to the installed Config.pm file.\n+ perl = Executable(join_path(prefix.bin, 'perl'))\n+ config_dot_pm = perl('-MModule::Loaded', '-MConfig', '-e',\n+ 'print is_loaded(Config)', output=str)\n+\n+ match = 'cc *=>.*'\n+ substitute = \"cc => '{cc}',\".format(cc=self.compiler.cc)\n+ filter_file(match, substitute, config_dot_pm, **kwargs)\n+\n+ # And the path Config_heavy.pl\n+ d = os.path.dirname(config_dot_pm)\n+ config_heavy = join_path(d, 'Config_heavy.pl')\n+\n+ match = '^cc=.*'\n+ substitute = \"cc='{cc}'\".format(cc=self.compiler.cc)\n+ filter_file(match, substitute, config_heavy, **kwargs)\n+\n+ match = '^ld=.*'\n+ substitute = \"ld='{ld}'\".format(ld=self.compiler.cc)\n+ filter_file(match, substitute, config_heavy, **kwargs)\n", "issue": "perl \"remembers\" spack's compiler, which does the wrong thing\nAdditional Perl packages (e.g. from CPAN) should (need to) be built with the same compiler that built `perl` itself.\r\n\r\nPerl handles this detail by remembering the compiler with which it was built. It \"remembers\" it by stashing a copy of it in the `Config` hash defined in `Config.pm`:\r\n\r\n```perl\r\ntie %Config, 'Config', {\r\n archlibexp => '/home/hartzelg/tmp/spack-perl-cc/opt/spack/linux-centos7-x86_64/gcc-4.8.5/perl-5.24.1-35ejv4426dmzreum4ekdibu3ddmhquvi/lib/5.24.1/x86_64-linux',\r\n archname => 'x86_64-linux',\r\n cc => 'cc',\r\n # [...]\r\n```\r\n\r\nOn my system this drags `/usr/bin/cc` into the fray, resulting in sadness.\r\n\r\nAs an example, using the `cpanm` that Spack built\r\n\r\n```\r\ncpanm -l /path/to/library/dir XML::Simple\r\n```\r\n\r\nwill eventually fail while trying to build `Net::HTTP` because it tries to use `cc` as its compiler.\r\n\r\nAs a quick hack/test, telling configure to use `gcc` as the C compiler results `$Config{cc}` containing `gcc` and happily the first gcc on my path is the one I used to build Perl.\r\n\r\n```diff\r\ndiff --git a/var/spack/repos/builtin/packages/perl/package.py b/var/spack/repos/builtin/packages/perl/package.py\r\nindex 10895f60..ed30bac5 100644\r\n--- a/var/spack/repos/builtin/packages/perl/package.py\r\n+++ b/var/spack/repos/builtin/packages/perl/package.py\r\n@@ -86,7 +86,8 @@ class Perl(Package): # Perl doesn't use Autotools, it should subclass Package\r\n '-des',\r\n '-Dprefix={0}'.format(prefix),\r\n '-Dlocincpth=' + self.spec['gdbm'].prefix.include,\r\n- '-Dloclibpth=' + self.spec['gdbm'].prefix.lib\r\n+ '-Dloclibpth=' + self.spec['gdbm'].prefix.lib,\r\n+ '-Dcc=gcc'\r\n ]\r\n\r\n # Discussion of -fPIC for Intel at:\r\n```\r\n\r\nThis changes lets `Net::HTTP` build and install.\r\n\r\nSpack's compiler wrappers and *etc* seem magical to me. What's a good way to fix this?\r\n\r\n\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "##############################################################################\n# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n#\n# This file is part of Spack.\n# Created by Todd Gamblin, [email protected], All rights reserved.\n# LLNL-CODE-647188\n#\n# For details, see https://github.com/llnl/spack\n# Please also see the LICENSE file for our notice and the LGPL.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License (as\n# published by the Free Software Foundation) version 2.1, February 1999.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and\n# conditions of the GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n##############################################################################\n#\n# Author: Milton Woods <[email protected]>\n# Date: March 22, 2017\n# Author: George Hartzell <[email protected]>\n# Date: July 21, 2016\n# Author: Justin Too <[email protected]>\n# Date: September 6, 2015\n#\nfrom spack import *\n\n\nclass Perl(Package): # Perl doesn't use Autotools, it should subclass Package\n \"\"\"Perl 5 is a highly capable, feature-rich programming language with over\n 27 years of development.\"\"\"\n\n homepage = \"http://www.perl.org\"\n # URL must remain http:// so Spack can bootstrap curl\n url = \"http://www.cpan.org/src/5.0/perl-5.24.1.tar.gz\"\n\n # Development releases\n version('5.25.11', '37a398682c36cd85992b34b5c1c25dc1')\n\n # Maintenance releases (recommended)\n version('5.24.1', '765ef511b5b87a164e2531403ee16b3c', preferred=True)\n version('5.22.3', 'aa4f236dc2fc6f88b871436b8d0fda95')\n\n # Misc releases that people need\n version('5.22.2', '5767e2a10dd62a46d7b57f74a90d952b')\n version('5.22.1', '19295bbb775a3c36123161b9bf4892f1')\n\n # End of life releases\n version('5.20.3', 'd647d0ea5a7a8194c34759ab9f2610cd')\n version('5.18.4', '1f9334ff730adc05acd3dd7130d295db')\n version('5.16.3', 'eb5c40f2575df6c155bc99e3fe0a9d82')\n\n extendable = True\n\n depends_on('gdbm')\n\n # Installing cpanm alongside the core makes it safe and simple for\n # people/projects to install their own sets of perl modules. Not\n # having it in core increases the \"energy of activation\" for doing\n # things cleanly.\n variant('cpanm', default=True,\n description='Optionally install cpanm with the core packages.')\n\n resource(\n name=\"cpanm\",\n url=\"http://search.cpan.org/CPAN/authors/id/M/MI/MIYAGAWA/App-cpanminus-1.7042.tar.gz\",\n md5=\"e87f55fbcb3c13a4754500c18e89219f\",\n destination=\"cpanm\",\n placement=\"cpanm\"\n )\n\n phases = ['configure', 'build', 'install']\n\n def configure_args(self):\n spec = self.spec\n prefix = self.prefix\n\n config_args = [\n '-des',\n '-Dprefix={0}'.format(prefix),\n '-Dlocincpth=' + self.spec['gdbm'].prefix.include,\n '-Dloclibpth=' + self.spec['gdbm'].prefix.lib\n ]\n\n # Discussion of -fPIC for Intel at:\n # https://github.com/LLNL/spack/pull/3081\n if spec.satisfies('%intel'):\n config_args.append('-Accflags={0}'.format(self.compiler.pic_flag))\n\n return config_args\n\n def configure(self, spec, prefix):\n configure = Executable('./Configure')\n configure(*self.configure_args())\n\n def build(self, spec, prefix):\n make()\n\n @run_after('build')\n @on_package_attributes(run_tests=True)\n def test(self):\n make('test')\n\n def install(self, spec, prefix):\n make('install')\n\n @run_after('install')\n def install_cpanm(self):\n spec = self.spec\n\n if '+cpanm' in spec:\n with working_dir(join_path('cpanm', 'cpanm')):\n perl = spec['perl'].command\n perl('Makefile.PL')\n make()\n make('install')\n\n def setup_environment(self, spack_env, run_env):\n \"\"\"Set PERL5LIB to support activation of Perl packages\"\"\"\n run_env.set('PERL5LIB', join_path(self.prefix, 'lib', 'perl5'))\n\n def setup_dependent_environment(self, spack_env, run_env, dependent_spec):\n \"\"\"Set PATH and PERL5LIB to include the extension and\n any other perl extensions it depends on,\n assuming they were installed with INSTALL_BASE defined.\"\"\"\n perl_lib_dirs = []\n perl_bin_dirs = []\n for d in dependent_spec.traverse(\n deptype=('build', 'run'), deptype_query='run'):\n if d.package.extends(self.spec):\n perl_lib_dirs.append(join_path(d.prefix, 'lib', 'perl5'))\n perl_bin_dirs.append(join_path(d.prefix, 'bin'))\n perl_bin_path = ':'.join(perl_bin_dirs)\n perl_lib_path = ':'.join(perl_lib_dirs)\n spack_env.prepend_path('PATH', perl_bin_path)\n spack_env.prepend_path('PERL5LIB', perl_lib_path)\n run_env.prepend_path('PATH', perl_bin_path)\n run_env.prepend_path('PERL5LIB', perl_lib_path)\n\n def setup_dependent_package(self, module, dependent_spec):\n \"\"\"Called before perl modules' install() methods.\n In most cases, extensions will only need to have one line:\n perl('Makefile.PL','INSTALL_BASE=%s' % self.prefix)\n \"\"\"\n\n # perl extension builds can have a global perl executable function\n module.perl = Executable(join_path(self.spec.prefix.bin, 'perl'))\n\n # Add variables for library directory\n module.perl_lib_dir = join_path(dependent_spec.prefix, 'lib', 'perl5')\n\n # Make the site packages directory for extensions,\n # if it does not exist already.\n if dependent_spec.package.is_extension:\n mkdirp(module.perl_lib_dir)\n", "path": "var/spack/repos/builtin/packages/perl/package.py"}]}
| 3,253 | 466 |
gh_patches_debug_28642
|
rasdani/github-patches
|
git_diff
|
akvo__akvo-rsr-3000
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Reuse the __unicode__ on ProjectLocation other types of locations
Also, display the country for a location, since it is inferred and not showing it in the UI makes it bad UX.
</issue>
<code>
[start of akvo/rsr/models/location.py]
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7
8 from django.db import models
9 from django.utils.translation import ugettext_lazy as _
10
11 from ..fields import LatitudeField, LongitudeField, ValidXMLCharField
12 from akvo.codelists.models import (Country, GeographicExactness, GeographicLocationClass,
13 GeographicLocationReach, GeographicVocabulary, LocationType)
14 from akvo.codelists.store.codelists_v202 import (
15 COUNTRY, GEOGRAPHIC_EXACTNESS, GEOGRAPHIC_LOCATION_CLASS, GEOGRAPHIC_LOCATION_REACH,
16 GEOGRAPHIC_VOCABULARY, LOCATION_TYPE
17 )
18 from akvo.utils import codelist_choices, codelist_value, get_country
19
20
21 class BaseLocation(models.Model):
22 latitude = LatitudeField(
23 _(u'latitude'), null=True, blank=True, db_index=True, default=None,
24 help_text=_(u'Use a period to denote decimals.')
25 )
26 longitude = LongitudeField(
27 _(u'longitude'), null=True, blank=True, db_index=True, default=None,
28 help_text=_(u'Use a period to denote decimals.')
29 )
30 city = ValidXMLCharField(_(u'city'), blank=True, max_length=255)
31 state = ValidXMLCharField(_(u'state'), blank=True, max_length=255)
32 address_1 = ValidXMLCharField(_(u'address 1'), max_length=255, blank=True)
33 address_2 = ValidXMLCharField(_(u'address 2'), max_length=255, blank=True)
34 postcode = ValidXMLCharField(_(u'postal code'), max_length=10, blank=True)
35 country = models.ForeignKey('Country', null=True, blank=True, verbose_name=_(u'country'))
36
37 def delete(self, *args, **kwargs):
38 super(BaseLocation, self).delete(*args, **kwargs)
39
40 # If location_target has more locations, set the first as primary location
41 location_target = self.location_target
42 other_locations = location_target.locations.all()
43
44 if other_locations.count() > 0:
45 location_target.primary_location = other_locations.first()
46 else:
47 location_target.primary_location = None
48
49 location_target.save()
50
51 def save(self, *args, **kwargs):
52 get_country = False
53 if not self.pk:
54 get_country = True
55
56 else:
57 original = self._meta.model.objects.get(id=self.pk)
58 if original.latitude != self.latitude or original.longitude != self.longitude:
59 get_country = True
60
61 # Set a country based on the latitude and longitude if possible
62 if get_country and self.latitude is not None and self.longitude is not None:
63 self.country = self.get_country_from_lat_lon()
64 if 'update_fields' in kwargs and 'country' not in kwargs['update_fields']:
65 kwargs['update_fields'].append('country')
66
67 super(BaseLocation, self).save(*args, **kwargs)
68
69 # Set location as primary location if it is the first location
70 location_target = self.location_target
71 if location_target.primary_location is None or location_target.primary_location.pk > self.pk:
72 location_target.primary_location = self
73 location_target.save()
74
75 def get_country_from_lat_lon(self):
76 """Get the country based on the location's latitude and longitude."""
77
78 if self.latitude is None or self.longitude is None:
79 return None
80
81 try:
82 country, iso_code = get_country(float(self.latitude), float(self.longitude))
83 except ValueError:
84 iso_code = None
85
86 if iso_code is not None:
87 # FIXME: We have one too many country models!
88 Country = models.get_model('rsr', 'Country')
89 return Country.objects.filter(iso_code=iso_code).first()
90
91 class Meta:
92 app_label = 'rsr'
93 abstract = True
94 ordering = ['id', ]
95
96
97 class OrganisationLocation(BaseLocation):
98 location_target = models.ForeignKey('Organisation', related_name='locations')
99 iati_country = ValidXMLCharField(
100 _(u'country'), blank=True, max_length=2, choices=codelist_choices(COUNTRY, show_code=False),
101 help_text=_(u'The country in which the organisation is located.')
102 )
103
104 def iati_country_value(self):
105 return codelist_value(Country, self, 'iati_country')
106
107 def iati_country_value_unicode(self):
108 return unicode(self.iati_country_value())
109
110
111 class ProjectLocation(BaseLocation):
112 location_target = models.ForeignKey('Project', related_name='locations')
113
114 # Additional IATI fields
115 reference = ValidXMLCharField(
116 _(u'reference'), blank=True, max_length=50,
117 help_text=_(u'An internal reference that describes the location in the reporting '
118 u'organisation\'s own system. For reference see: '
119 u'<a href="http://iatistandard.org/202/activity-standard/iati-activities/'
120 u'iati-activity/location/#attributes" target="_blank">'
121 u'http://iatistandard.org/202/activity-standard/iati-activities/iati-activity/'
122 u'location/#attributes</a>.')
123 )
124 location_code = ValidXMLCharField(
125 _(u'code'), blank=True, max_length=25,
126 help_text=_(u'Enter a code to identify the region. Codes are based on DAC region codes. '
127 u'Where an activity is considered global, the code 998 can be used. For '
128 u'reference: <a href="http://www.oecd.org/dac/stats/dacandcrscodelists.htm" '
129 u'target="_blank">http://www.oecd.org/dac/stats/dacandcrscodelists.htm</a>.')
130 )
131 vocabulary = ValidXMLCharField(_(u'vocabulary'), blank=True, max_length=2,
132 choices=codelist_choices(GEOGRAPHIC_VOCABULARY))
133 name = ValidXMLCharField(
134 _(u'name'), blank=True, max_length=100,
135 help_text=_(u'The human-readable name for the location.')
136 )
137 description = ValidXMLCharField(
138 _(u'location description'), blank=True, max_length=2000,
139 help_text=_(u'This provides free text space for providing an additional description, if '
140 u'needed, of the actual target of the activity. A description that qualifies '
141 u'the location, not the activity.')
142 )
143 activity_description = ValidXMLCharField(
144 _(u'activity description'), blank=True, max_length=2000,
145 help_text=_(u'A description that qualifies the activity taking place at the location. '
146 u'This should not duplicate information provided in the main activity '
147 u'description, and should typically be used to distinguish between activities '
148 u'at multiple locations within a single iati-activity record.')
149 )
150 exactness = ValidXMLCharField(
151 _(u'location precision'), blank=True, max_length=1,
152 choices=codelist_choices(GEOGRAPHIC_EXACTNESS),
153 help_text=_(u'Defines whether the location represents the most distinct point reasonably '
154 u'possible for this type of activity or is an approximation due to lack of '
155 u'more detailed information.')
156 )
157 location_reach = ValidXMLCharField(
158 _(u'reach'), blank=True, max_length=1, choices=codelist_choices(GEOGRAPHIC_LOCATION_REACH),
159 help_text=_(u'Does this location describe where the activity takes place or where the '
160 u'intended beneficiaries reside?')
161 )
162 location_class = ValidXMLCharField(
163 _(u'class'), blank=True, max_length=1, choices=codelist_choices(GEOGRAPHIC_LOCATION_CLASS),
164 help_text=_(u'Does the location refer to a physical structure such as a building, a '
165 u'populated place (e.g. city or village), an administrative division, or '
166 u'another topological feature (e.g. river, nature reserve)? For reference: '
167 u'<a href="http://iatistandard.org/202/codelists/GeographicLocationClass/" '
168 u'target="_blank">http://iatistandard.org/202/codelists/'
169 u'GeographicLocationClass/</a>.')
170 )
171 feature_designation = ValidXMLCharField(
172 _(u'feature designation'), blank=True, max_length=5,
173 choices=codelist_choices(LOCATION_TYPE),
174 help_text=_(u'A more refined coded classification of the type of feature referred to by '
175 u'this location. For reference: <a href="http://iatistandard.org/202/codelists/'
176 u'LocationType/" target="_blank">http://iatistandard.org/202/codelists/'
177 u'LocationType/</a>.')
178 )
179
180 def __unicode__(self):
181 return u'{0}, {1}{2}'.format(
182 u'{0}: {1}'.format(
183 _(u'Latitude'),
184 unicode(self.latitude) if self.latitude else _(u'No latitude specified')),
185 u'{0}: {1}'.format(
186 _(u'Longitude'),
187 unicode(self.longitude) if self.longitude else _(u'No longitude specified')),
188 u' ({0})'.format(self.name) if self.name else u''
189 )
190
191 def iati_country(self):
192 return codelist_value(Country, self, 'country')
193
194 def iati_country_unicode(self):
195 return unicode(self.iati_country())
196
197 def iati_vocabulary(self):
198 return codelist_value(GeographicVocabulary, self, 'vocabulary')
199
200 def iati_vocabulary_unicode(self):
201 return unicode(self.iati_vocabulary())
202
203 def iati_exactness(self):
204 return codelist_value(GeographicExactness, self, 'exactness')
205
206 def iati_exactness_unicode(self):
207 return unicode(self.iati_exactness())
208
209 def iati_reach(self):
210 return codelist_value(GeographicLocationReach, self, 'location_reach')
211
212 def iati_reach_unicode(self):
213 return unicode(self.iati_reach())
214
215 def iati_class(self):
216 return codelist_value(GeographicLocationClass, self, 'location_class')
217
218 def iati_class_unicode(self):
219 return unicode(self.iati_class())
220
221 def iati_designation(self):
222 return codelist_value(LocationType, self, 'feature_designation')
223
224 def iati_designation_unicode(self):
225 return unicode(self.iati_designation())
226
227
228 # Over-riding fields doesn't work in Django < 1.10, and hence this hack.
229 ProjectLocation._meta.get_field('country').help_text = _(
230 u'The country or countries that benefit(s) from the activity.'
231 )
232
233
234 class AdministrativeLocation(models.Model):
235 location = models.ForeignKey(
236 'ProjectLocation', verbose_name=_(u'location'), related_name='administratives'
237 )
238 code = ValidXMLCharField(
239 _(u'administrative code'), blank=True, max_length=25,
240 help_text=_(u'Coded identification of national and sub-national divisions according to '
241 u'recognised administrative boundary repositories. Multiple levels may be '
242 u'reported.')
243 )
244 vocabulary = ValidXMLCharField(
245 _(u'administrative vocabulary'), blank=True, max_length=2,
246 choices=codelist_choices(GEOGRAPHIC_VOCABULARY),
247 help_text=_(u'For reference: <a href="http://iatistandard.org/202/codelists/'
248 u'GeographicVocabulary/" target="_blank">http://iatistandard.org/202/codelists/'
249 u'GeographicVocabulary/</a>.')
250 )
251
252 level = models.PositiveSmallIntegerField(_(u'administrative level'), blank=True, null=True)
253
254 def __unicode__(self):
255 return unicode(self.code) if self.code else u'%s' % _(u'No code specified')
256
257 def iati_vocabulary(self):
258 return codelist_value(GeographicVocabulary, self, 'vocabulary')
259
260 def iati_vocabulary_unicode(self):
261 return unicode(self.iati_vocabulary())
262
263 class Meta:
264 app_label = 'rsr'
265 verbose_name = _(u'location administrative')
266 verbose_name_plural = _(u'location administratives')
267
268
269 class ProjectUpdateLocation(BaseLocation):
270 location_target = models.ForeignKey('ProjectUpdate', related_name='locations')
271
[end of akvo/rsr/models/location.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/akvo/rsr/models/location.py b/akvo/rsr/models/location.py
--- a/akvo/rsr/models/location.py
+++ b/akvo/rsr/models/location.py
@@ -34,6 +34,20 @@
postcode = ValidXMLCharField(_(u'postal code'), max_length=10, blank=True)
country = models.ForeignKey('Country', null=True, blank=True, verbose_name=_(u'country'))
+ def __unicode__(self):
+ return u'{0}, {1}, {2}{3}'.format(
+ u'{0}: {1}'.format(
+ _(u'Latitude'),
+ unicode(self.latitude) if self.latitude else _(u'No latitude specified')),
+ u'{0}: {1}'.format(
+ _(u'Longitude'),
+ unicode(self.longitude) if self.longitude else _(u'No longitude specified')),
+ u'{0}: {1}'.format(
+ _(u'Country'),
+ unicode(self.country.name) if self.country else _(u'No country specified')),
+ u' ({0})'.format(self.name) if getattr(self, 'name', None) else u''
+ )
+
def delete(self, *args, **kwargs):
super(BaseLocation, self).delete(*args, **kwargs)
@@ -177,17 +191,6 @@
u'LocationType/</a>.')
)
- def __unicode__(self):
- return u'{0}, {1}{2}'.format(
- u'{0}: {1}'.format(
- _(u'Latitude'),
- unicode(self.latitude) if self.latitude else _(u'No latitude specified')),
- u'{0}: {1}'.format(
- _(u'Longitude'),
- unicode(self.longitude) if self.longitude else _(u'No longitude specified')),
- u' ({0})'.format(self.name) if self.name else u''
- )
-
def iati_country(self):
return codelist_value(Country, self, 'country')
|
{"golden_diff": "diff --git a/akvo/rsr/models/location.py b/akvo/rsr/models/location.py\n--- a/akvo/rsr/models/location.py\n+++ b/akvo/rsr/models/location.py\n@@ -34,6 +34,20 @@\n postcode = ValidXMLCharField(_(u'postal code'), max_length=10, blank=True)\n country = models.ForeignKey('Country', null=True, blank=True, verbose_name=_(u'country'))\n \n+ def __unicode__(self):\n+ return u'{0}, {1}, {2}{3}'.format(\n+ u'{0}: {1}'.format(\n+ _(u'Latitude'),\n+ unicode(self.latitude) if self.latitude else _(u'No latitude specified')),\n+ u'{0}: {1}'.format(\n+ _(u'Longitude'),\n+ unicode(self.longitude) if self.longitude else _(u'No longitude specified')),\n+ u'{0}: {1}'.format(\n+ _(u'Country'),\n+ unicode(self.country.name) if self.country else _(u'No country specified')),\n+ u' ({0})'.format(self.name) if getattr(self, 'name', None) else u''\n+ )\n+\n def delete(self, *args, **kwargs):\n super(BaseLocation, self).delete(*args, **kwargs)\n \n@@ -177,17 +191,6 @@\n u'LocationType/</a>.')\n )\n \n- def __unicode__(self):\n- return u'{0}, {1}{2}'.format(\n- u'{0}: {1}'.format(\n- _(u'Latitude'),\n- unicode(self.latitude) if self.latitude else _(u'No latitude specified')),\n- u'{0}: {1}'.format(\n- _(u'Longitude'),\n- unicode(self.longitude) if self.longitude else _(u'No longitude specified')),\n- u' ({0})'.format(self.name) if self.name else u''\n- )\n-\n def iati_country(self):\n return codelist_value(Country, self, 'country')\n", "issue": "Reuse the __unicode__ on ProjectLocation other types of locations\nAlso, display the country for a location, since it is inferred and not showing it in the UI makes it bad UX. \n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\n\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom ..fields import LatitudeField, LongitudeField, ValidXMLCharField\nfrom akvo.codelists.models import (Country, GeographicExactness, GeographicLocationClass,\n GeographicLocationReach, GeographicVocabulary, LocationType)\nfrom akvo.codelists.store.codelists_v202 import (\n COUNTRY, GEOGRAPHIC_EXACTNESS, GEOGRAPHIC_LOCATION_CLASS, GEOGRAPHIC_LOCATION_REACH,\n GEOGRAPHIC_VOCABULARY, LOCATION_TYPE\n)\nfrom akvo.utils import codelist_choices, codelist_value, get_country\n\n\nclass BaseLocation(models.Model):\n latitude = LatitudeField(\n _(u'latitude'), null=True, blank=True, db_index=True, default=None,\n help_text=_(u'Use a period to denote decimals.')\n )\n longitude = LongitudeField(\n _(u'longitude'), null=True, blank=True, db_index=True, default=None,\n help_text=_(u'Use a period to denote decimals.')\n )\n city = ValidXMLCharField(_(u'city'), blank=True, max_length=255)\n state = ValidXMLCharField(_(u'state'), blank=True, max_length=255)\n address_1 = ValidXMLCharField(_(u'address 1'), max_length=255, blank=True)\n address_2 = ValidXMLCharField(_(u'address 2'), max_length=255, blank=True)\n postcode = ValidXMLCharField(_(u'postal code'), max_length=10, blank=True)\n country = models.ForeignKey('Country', null=True, blank=True, verbose_name=_(u'country'))\n\n def delete(self, *args, **kwargs):\n super(BaseLocation, self).delete(*args, **kwargs)\n\n # If location_target has more locations, set the first as primary location\n location_target = self.location_target\n other_locations = location_target.locations.all()\n\n if other_locations.count() > 0:\n location_target.primary_location = other_locations.first()\n else:\n location_target.primary_location = None\n\n location_target.save()\n\n def save(self, *args, **kwargs):\n get_country = False\n if not self.pk:\n get_country = True\n\n else:\n original = self._meta.model.objects.get(id=self.pk)\n if original.latitude != self.latitude or original.longitude != self.longitude:\n get_country = True\n\n # Set a country based on the latitude and longitude if possible\n if get_country and self.latitude is not None and self.longitude is not None:\n self.country = self.get_country_from_lat_lon()\n if 'update_fields' in kwargs and 'country' not in kwargs['update_fields']:\n kwargs['update_fields'].append('country')\n\n super(BaseLocation, self).save(*args, **kwargs)\n\n # Set location as primary location if it is the first location\n location_target = self.location_target\n if location_target.primary_location is None or location_target.primary_location.pk > self.pk:\n location_target.primary_location = self\n location_target.save()\n\n def get_country_from_lat_lon(self):\n \"\"\"Get the country based on the location's latitude and longitude.\"\"\"\n\n if self.latitude is None or self.longitude is None:\n return None\n\n try:\n country, iso_code = get_country(float(self.latitude), float(self.longitude))\n except ValueError:\n iso_code = None\n\n if iso_code is not None:\n # FIXME: We have one too many country models!\n Country = models.get_model('rsr', 'Country')\n return Country.objects.filter(iso_code=iso_code).first()\n\n class Meta:\n app_label = 'rsr'\n abstract = True\n ordering = ['id', ]\n\n\nclass OrganisationLocation(BaseLocation):\n location_target = models.ForeignKey('Organisation', related_name='locations')\n iati_country = ValidXMLCharField(\n _(u'country'), blank=True, max_length=2, choices=codelist_choices(COUNTRY, show_code=False),\n help_text=_(u'The country in which the organisation is located.')\n )\n\n def iati_country_value(self):\n return codelist_value(Country, self, 'iati_country')\n\n def iati_country_value_unicode(self):\n return unicode(self.iati_country_value())\n\n\nclass ProjectLocation(BaseLocation):\n location_target = models.ForeignKey('Project', related_name='locations')\n\n # Additional IATI fields\n reference = ValidXMLCharField(\n _(u'reference'), blank=True, max_length=50,\n help_text=_(u'An internal reference that describes the location in the reporting '\n u'organisation\\'s own system. For reference see: '\n u'<a href=\"http://iatistandard.org/202/activity-standard/iati-activities/'\n u'iati-activity/location/#attributes\" target=\"_blank\">'\n u'http://iatistandard.org/202/activity-standard/iati-activities/iati-activity/'\n u'location/#attributes</a>.')\n )\n location_code = ValidXMLCharField(\n _(u'code'), blank=True, max_length=25,\n help_text=_(u'Enter a code to identify the region. Codes are based on DAC region codes. '\n u'Where an activity is considered global, the code 998 can be used. For '\n u'reference: <a href=\"http://www.oecd.org/dac/stats/dacandcrscodelists.htm\" '\n u'target=\"_blank\">http://www.oecd.org/dac/stats/dacandcrscodelists.htm</a>.')\n )\n vocabulary = ValidXMLCharField(_(u'vocabulary'), blank=True, max_length=2,\n choices=codelist_choices(GEOGRAPHIC_VOCABULARY))\n name = ValidXMLCharField(\n _(u'name'), blank=True, max_length=100,\n help_text=_(u'The human-readable name for the location.')\n )\n description = ValidXMLCharField(\n _(u'location description'), blank=True, max_length=2000,\n help_text=_(u'This provides free text space for providing an additional description, if '\n u'needed, of the actual target of the activity. A description that qualifies '\n u'the location, not the activity.')\n )\n activity_description = ValidXMLCharField(\n _(u'activity description'), blank=True, max_length=2000,\n help_text=_(u'A description that qualifies the activity taking place at the location. '\n u'This should not duplicate information provided in the main activity '\n u'description, and should typically be used to distinguish between activities '\n u'at multiple locations within a single iati-activity record.')\n )\n exactness = ValidXMLCharField(\n _(u'location precision'), blank=True, max_length=1,\n choices=codelist_choices(GEOGRAPHIC_EXACTNESS),\n help_text=_(u'Defines whether the location represents the most distinct point reasonably '\n u'possible for this type of activity or is an approximation due to lack of '\n u'more detailed information.')\n )\n location_reach = ValidXMLCharField(\n _(u'reach'), blank=True, max_length=1, choices=codelist_choices(GEOGRAPHIC_LOCATION_REACH),\n help_text=_(u'Does this location describe where the activity takes place or where the '\n u'intended beneficiaries reside?')\n )\n location_class = ValidXMLCharField(\n _(u'class'), blank=True, max_length=1, choices=codelist_choices(GEOGRAPHIC_LOCATION_CLASS),\n help_text=_(u'Does the location refer to a physical structure such as a building, a '\n u'populated place (e.g. city or village), an administrative division, or '\n u'another topological feature (e.g. river, nature reserve)? For reference: '\n u'<a href=\"http://iatistandard.org/202/codelists/GeographicLocationClass/\" '\n u'target=\"_blank\">http://iatistandard.org/202/codelists/'\n u'GeographicLocationClass/</a>.')\n )\n feature_designation = ValidXMLCharField(\n _(u'feature designation'), blank=True, max_length=5,\n choices=codelist_choices(LOCATION_TYPE),\n help_text=_(u'A more refined coded classification of the type of feature referred to by '\n u'this location. For reference: <a href=\"http://iatistandard.org/202/codelists/'\n u'LocationType/\" target=\"_blank\">http://iatistandard.org/202/codelists/'\n u'LocationType/</a>.')\n )\n\n def __unicode__(self):\n return u'{0}, {1}{2}'.format(\n u'{0}: {1}'.format(\n _(u'Latitude'),\n unicode(self.latitude) if self.latitude else _(u'No latitude specified')),\n u'{0}: {1}'.format(\n _(u'Longitude'),\n unicode(self.longitude) if self.longitude else _(u'No longitude specified')),\n u' ({0})'.format(self.name) if self.name else u''\n )\n\n def iati_country(self):\n return codelist_value(Country, self, 'country')\n\n def iati_country_unicode(self):\n return unicode(self.iati_country())\n\n def iati_vocabulary(self):\n return codelist_value(GeographicVocabulary, self, 'vocabulary')\n\n def iati_vocabulary_unicode(self):\n return unicode(self.iati_vocabulary())\n\n def iati_exactness(self):\n return codelist_value(GeographicExactness, self, 'exactness')\n\n def iati_exactness_unicode(self):\n return unicode(self.iati_exactness())\n\n def iati_reach(self):\n return codelist_value(GeographicLocationReach, self, 'location_reach')\n\n def iati_reach_unicode(self):\n return unicode(self.iati_reach())\n\n def iati_class(self):\n return codelist_value(GeographicLocationClass, self, 'location_class')\n\n def iati_class_unicode(self):\n return unicode(self.iati_class())\n\n def iati_designation(self):\n return codelist_value(LocationType, self, 'feature_designation')\n\n def iati_designation_unicode(self):\n return unicode(self.iati_designation())\n\n\n# Over-riding fields doesn't work in Django < 1.10, and hence this hack.\nProjectLocation._meta.get_field('country').help_text = _(\n u'The country or countries that benefit(s) from the activity.'\n)\n\n\nclass AdministrativeLocation(models.Model):\n location = models.ForeignKey(\n 'ProjectLocation', verbose_name=_(u'location'), related_name='administratives'\n )\n code = ValidXMLCharField(\n _(u'administrative code'), blank=True, max_length=25,\n help_text=_(u'Coded identification of national and sub-national divisions according to '\n u'recognised administrative boundary repositories. Multiple levels may be '\n u'reported.')\n )\n vocabulary = ValidXMLCharField(\n _(u'administrative vocabulary'), blank=True, max_length=2,\n choices=codelist_choices(GEOGRAPHIC_VOCABULARY),\n help_text=_(u'For reference: <a href=\"http://iatistandard.org/202/codelists/'\n u'GeographicVocabulary/\" target=\"_blank\">http://iatistandard.org/202/codelists/'\n u'GeographicVocabulary/</a>.')\n )\n\n level = models.PositiveSmallIntegerField(_(u'administrative level'), blank=True, null=True)\n\n def __unicode__(self):\n return unicode(self.code) if self.code else u'%s' % _(u'No code specified')\n\n def iati_vocabulary(self):\n return codelist_value(GeographicVocabulary, self, 'vocabulary')\n\n def iati_vocabulary_unicode(self):\n return unicode(self.iati_vocabulary())\n\n class Meta:\n app_label = 'rsr'\n verbose_name = _(u'location administrative')\n verbose_name_plural = _(u'location administratives')\n\n\nclass ProjectUpdateLocation(BaseLocation):\n location_target = models.ForeignKey('ProjectUpdate', related_name='locations')\n", "path": "akvo/rsr/models/location.py"}]}
| 4,024 | 450 |
gh_patches_debug_27023
|
rasdani/github-patches
|
git_diff
|
pypa__cibuildwheel-455
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Windows: urlopen error [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed
Hello,
Can you, please, help me to debug https://travis-ci.org/github/ets-labs/python-dependency-injector/jobs/739050288?
It occurs only on Windows. Error says: ``urllib.error.URLError: <urlopen error [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1108)>``. I'm unclear whos certificate has expired.
Best,
Roman
</issue>
<code>
[start of setup.py]
1 # -*- coding: utf-8 -*-
2 from pathlib import Path
3
4 try:
5 from setuptools import setup
6 except ImportError:
7 from distutils.core import setup
8
9 this_directory = Path(__file__).parent
10 long_description = (this_directory / 'README.md').read_text(encoding='utf-8')
11
12 setup(
13 name='cibuildwheel',
14 version='1.6.3',
15 install_requires=['bashlex!=0.13', 'toml'],
16 description="Build Python wheels on CI with minimal configuration.",
17 long_description=long_description,
18 long_description_content_type='text/markdown',
19 author="Joe Rickerby",
20 author_email='[email protected]',
21 url='https://github.com/joerick/cibuildwheel',
22 project_urls={
23 'Changelog': 'https://github.com/joerick/cibuildwheel#changelog',
24 'Documentation': 'https://cibuildwheel.readthedocs.io/',
25 },
26 packages=['cibuildwheel', ],
27 license="BSD",
28 zip_safe=False,
29 package_data={
30 'cibuildwheel': ['resources/*'],
31 },
32 # Supported python versions
33 python_requires='>=3.6',
34 keywords='ci wheel packaging pypi travis appveyor macos linux windows',
35 classifiers=[
36 'Intended Audience :: Developers',
37 'Natural Language :: English',
38 'Programming Language :: Python :: 3',
39 'Development Status :: 5 - Production/Stable',
40 'License :: OSI Approved :: BSD License',
41 'Programming Language :: Python :: Implementation :: CPython',
42 'Topic :: Software Development :: Build Tools',
43 ],
44 entry_points={
45 'console_scripts': [
46 'cibuildwheel = cibuildwheel.__main__:main',
47 ],
48 },
49 )
50
[end of setup.py]
[start of cibuildwheel/util.py]
1 import os
2 import textwrap
3 import urllib.request
4 from fnmatch import fnmatch
5 from pathlib import Path
6 from time import sleep
7
8 from typing import Dict, List, NamedTuple, Optional, Union
9
10 from .environment import ParsedEnvironment
11
12
13 def prepare_command(command: str, **kwargs: Union[str, os.PathLike]) -> str:
14 '''
15 Preprocesses a command by expanding variables like {python}.
16
17 For example, used in the test_command option to specify the path to the
18 project's root.
19 '''
20 return command.format(python='python', pip='pip', **kwargs)
21
22
23 def get_build_verbosity_extra_flags(level: int) -> List[str]:
24 if level > 0:
25 return ['-' + level * 'v']
26 elif level < 0:
27 return ['-' + -level * 'q']
28 else:
29 return []
30
31
32 class BuildSelector:
33 def __init__(self, build_config: str, skip_config: str):
34 self.build_patterns = build_config.split()
35 self.skip_patterns = skip_config.split()
36
37 def __call__(self, build_id: str) -> bool:
38 def match_any(patterns: List[str]) -> bool:
39 return any(fnmatch(build_id, pattern) for pattern in patterns)
40 return match_any(self.build_patterns) and not match_any(self.skip_patterns)
41
42 def __repr__(self) -> str:
43 return f'BuildSelector({" ".join(self.build_patterns)!r} - {" ".join(self.skip_patterns)!r})'
44
45
46 # Taken from https://stackoverflow.com/a/107717
47 class Unbuffered:
48 def __init__(self, stream): # type: ignore
49 self.stream = stream
50
51 def write(self, data): # type: ignore
52 self.stream.write(data)
53 self.stream.flush()
54
55 def writelines(self, datas): # type: ignore
56 self.stream.writelines(datas)
57 self.stream.flush()
58
59 def __getattr__(self, attr): # type: ignore
60 return getattr(self.stream, attr)
61
62
63 def download(url: str, dest: Path) -> None:
64 print(f'+ Download {url} to {dest}')
65 dest_dir = dest.parent
66 if not dest_dir.exists():
67 dest_dir.mkdir(parents=True)
68
69 repeat_num = 3
70 for i in range(repeat_num):
71 try:
72 response = urllib.request.urlopen(url)
73 except Exception:
74 if i == repeat_num - 1:
75 raise
76 sleep(3)
77 continue
78 break
79
80 try:
81 dest.write_bytes(response.read())
82 finally:
83 response.close()
84
85
86 class DependencyConstraints:
87 def __init__(self, base_file_path: Path):
88 assert base_file_path.exists()
89 self.base_file_path = base_file_path.resolve()
90
91 @staticmethod
92 def with_defaults() -> 'DependencyConstraints':
93 return DependencyConstraints(
94 base_file_path=resources_dir / 'constraints.txt'
95 )
96
97 def get_for_python_version(self, version: str) -> Path:
98 version_parts = version.split('.')
99
100 # try to find a version-specific dependency file e.g. if
101 # ./constraints.txt is the base, look for ./constraints-python27.txt
102 specific_stem = self.base_file_path.stem + f'-python{version_parts[0]}{version_parts[1]}'
103 specific_name = specific_stem + self.base_file_path.suffix
104 specific_file_path = self.base_file_path.with_name(specific_name)
105 if specific_file_path.exists():
106 return specific_file_path
107 else:
108 return self.base_file_path
109
110 def __str__(self):
111 return f"File '{self.base_file_path}'"
112
113
114 class BuildOptions(NamedTuple):
115 package_dir: Path
116 output_dir: Path
117 build_selector: BuildSelector
118 environment: ParsedEnvironment
119 before_all: str
120 before_build: Optional[str]
121 repair_command: str
122 manylinux_images: Optional[Dict[str, str]]
123 dependency_constraints: Optional[DependencyConstraints]
124 test_command: Optional[str]
125 before_test: Optional[str]
126 test_requires: List[str]
127 test_extras: str
128 build_verbosity: int
129
130
131 resources_dir = Path(__file__).resolve().parent / 'resources'
132 get_pip_script = resources_dir / 'get-pip.py'
133 install_certifi_script = resources_dir / "install_certifi.py"
134
135
136 class NonPlatformWheelError(Exception):
137 def __init__(self) -> None:
138 message = textwrap.dedent('''
139 cibuildwheel: Build failed because a pure Python wheel was generated.
140
141 If you intend to build a pure-Python wheel, you don't need cibuildwheel - use
142 `pip wheel -w DEST_DIR .` instead.
143
144 If you expected a platform wheel, check your project configuration, or run
145 cibuildwheel with CIBW_BUILD_VERBOSITY=1 to view build logs.
146 ''')
147
148 super().__init__(message)
149
[end of cibuildwheel/util.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/cibuildwheel/util.py b/cibuildwheel/util.py
--- a/cibuildwheel/util.py
+++ b/cibuildwheel/util.py
@@ -1,6 +1,8 @@
import os
import textwrap
+import certifi
import urllib.request
+import ssl
from fnmatch import fnmatch
from pathlib import Path
from time import sleep
@@ -66,10 +68,14 @@
if not dest_dir.exists():
dest_dir.mkdir(parents=True)
+ # we've had issues when relying on the host OS' CA certificates on Windows,
+ # so we use certifi (this sounds odd but requests also does this by default)
+ cafile = os.environ.get('SSL_CERT_FILE', certifi.where())
+ context = ssl.create_default_context(cafile=cafile)
repeat_num = 3
for i in range(repeat_num):
try:
- response = urllib.request.urlopen(url)
+ response = urllib.request.urlopen(url, context=context)
except Exception:
if i == repeat_num - 1:
raise
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -12,7 +12,7 @@
setup(
name='cibuildwheel',
version='1.6.3',
- install_requires=['bashlex!=0.13', 'toml'],
+ install_requires=['bashlex!=0.13', 'toml', 'certifi'],
description="Build Python wheels on CI with minimal configuration.",
long_description=long_description,
long_description_content_type='text/markdown',
|
{"golden_diff": "diff --git a/cibuildwheel/util.py b/cibuildwheel/util.py\n--- a/cibuildwheel/util.py\n+++ b/cibuildwheel/util.py\n@@ -1,6 +1,8 @@\n import os\n import textwrap\n+import certifi\n import urllib.request\n+import ssl\n from fnmatch import fnmatch\n from pathlib import Path\n from time import sleep\n@@ -66,10 +68,14 @@\n if not dest_dir.exists():\n dest_dir.mkdir(parents=True)\n \n+ # we've had issues when relying on the host OS' CA certificates on Windows,\n+ # so we use certifi (this sounds odd but requests also does this by default)\n+ cafile = os.environ.get('SSL_CERT_FILE', certifi.where())\n+ context = ssl.create_default_context(cafile=cafile)\n repeat_num = 3\n for i in range(repeat_num):\n try:\n- response = urllib.request.urlopen(url)\n+ response = urllib.request.urlopen(url, context=context)\n except Exception:\n if i == repeat_num - 1:\n raise\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -12,7 +12,7 @@\n setup(\n name='cibuildwheel',\n version='1.6.3',\n- install_requires=['bashlex!=0.13', 'toml'],\n+ install_requires=['bashlex!=0.13', 'toml', 'certifi'],\n description=\"Build Python wheels on CI with minimal configuration.\",\n long_description=long_description,\n long_description_content_type='text/markdown',\n", "issue": "Windows: urlopen error [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed\nHello,\r\n\r\nCan you, please, help me to debug https://travis-ci.org/github/ets-labs/python-dependency-injector/jobs/739050288?\r\n\r\nIt occurs only on Windows. Error says: ``urllib.error.URLError: <urlopen error [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1108)>``. I'm unclear whos certificate has expired.\r\n\r\nBest,\r\nRoman\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom pathlib import Path\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\nthis_directory = Path(__file__).parent\nlong_description = (this_directory / 'README.md').read_text(encoding='utf-8')\n\nsetup(\n name='cibuildwheel',\n version='1.6.3',\n install_requires=['bashlex!=0.13', 'toml'],\n description=\"Build Python wheels on CI with minimal configuration.\",\n long_description=long_description,\n long_description_content_type='text/markdown',\n author=\"Joe Rickerby\",\n author_email='[email protected]',\n url='https://github.com/joerick/cibuildwheel',\n project_urls={\n 'Changelog': 'https://github.com/joerick/cibuildwheel#changelog',\n 'Documentation': 'https://cibuildwheel.readthedocs.io/',\n },\n packages=['cibuildwheel', ],\n license=\"BSD\",\n zip_safe=False,\n package_data={\n 'cibuildwheel': ['resources/*'],\n },\n # Supported python versions\n python_requires='>=3.6',\n keywords='ci wheel packaging pypi travis appveyor macos linux windows',\n classifiers=[\n 'Intended Audience :: Developers',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 3',\n 'Development Status :: 5 - Production/Stable',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Topic :: Software Development :: Build Tools',\n ],\n entry_points={\n 'console_scripts': [\n 'cibuildwheel = cibuildwheel.__main__:main',\n ],\n },\n)\n", "path": "setup.py"}, {"content": "import os\nimport textwrap\nimport urllib.request\nfrom fnmatch import fnmatch\nfrom pathlib import Path\nfrom time import sleep\n\nfrom typing import Dict, List, NamedTuple, Optional, Union\n\nfrom .environment import ParsedEnvironment\n\n\ndef prepare_command(command: str, **kwargs: Union[str, os.PathLike]) -> str:\n '''\n Preprocesses a command by expanding variables like {python}.\n\n For example, used in the test_command option to specify the path to the\n project's root.\n '''\n return command.format(python='python', pip='pip', **kwargs)\n\n\ndef get_build_verbosity_extra_flags(level: int) -> List[str]:\n if level > 0:\n return ['-' + level * 'v']\n elif level < 0:\n return ['-' + -level * 'q']\n else:\n return []\n\n\nclass BuildSelector:\n def __init__(self, build_config: str, skip_config: str):\n self.build_patterns = build_config.split()\n self.skip_patterns = skip_config.split()\n\n def __call__(self, build_id: str) -> bool:\n def match_any(patterns: List[str]) -> bool:\n return any(fnmatch(build_id, pattern) for pattern in patterns)\n return match_any(self.build_patterns) and not match_any(self.skip_patterns)\n\n def __repr__(self) -> str:\n return f'BuildSelector({\" \".join(self.build_patterns)!r} - {\" \".join(self.skip_patterns)!r})'\n\n\n# Taken from https://stackoverflow.com/a/107717\nclass Unbuffered:\n def __init__(self, stream): # type: ignore\n self.stream = stream\n\n def write(self, data): # type: ignore\n self.stream.write(data)\n self.stream.flush()\n\n def writelines(self, datas): # type: ignore\n self.stream.writelines(datas)\n self.stream.flush()\n\n def __getattr__(self, attr): # type: ignore\n return getattr(self.stream, attr)\n\n\ndef download(url: str, dest: Path) -> None:\n print(f'+ Download {url} to {dest}')\n dest_dir = dest.parent\n if not dest_dir.exists():\n dest_dir.mkdir(parents=True)\n\n repeat_num = 3\n for i in range(repeat_num):\n try:\n response = urllib.request.urlopen(url)\n except Exception:\n if i == repeat_num - 1:\n raise\n sleep(3)\n continue\n break\n\n try:\n dest.write_bytes(response.read())\n finally:\n response.close()\n\n\nclass DependencyConstraints:\n def __init__(self, base_file_path: Path):\n assert base_file_path.exists()\n self.base_file_path = base_file_path.resolve()\n\n @staticmethod\n def with_defaults() -> 'DependencyConstraints':\n return DependencyConstraints(\n base_file_path=resources_dir / 'constraints.txt'\n )\n\n def get_for_python_version(self, version: str) -> Path:\n version_parts = version.split('.')\n\n # try to find a version-specific dependency file e.g. if\n # ./constraints.txt is the base, look for ./constraints-python27.txt\n specific_stem = self.base_file_path.stem + f'-python{version_parts[0]}{version_parts[1]}'\n specific_name = specific_stem + self.base_file_path.suffix\n specific_file_path = self.base_file_path.with_name(specific_name)\n if specific_file_path.exists():\n return specific_file_path\n else:\n return self.base_file_path\n\n def __str__(self):\n return f\"File '{self.base_file_path}'\"\n\n\nclass BuildOptions(NamedTuple):\n package_dir: Path\n output_dir: Path\n build_selector: BuildSelector\n environment: ParsedEnvironment\n before_all: str\n before_build: Optional[str]\n repair_command: str\n manylinux_images: Optional[Dict[str, str]]\n dependency_constraints: Optional[DependencyConstraints]\n test_command: Optional[str]\n before_test: Optional[str]\n test_requires: List[str]\n test_extras: str\n build_verbosity: int\n\n\nresources_dir = Path(__file__).resolve().parent / 'resources'\nget_pip_script = resources_dir / 'get-pip.py'\ninstall_certifi_script = resources_dir / \"install_certifi.py\"\n\n\nclass NonPlatformWheelError(Exception):\n def __init__(self) -> None:\n message = textwrap.dedent('''\n cibuildwheel: Build failed because a pure Python wheel was generated.\n\n If you intend to build a pure-Python wheel, you don't need cibuildwheel - use\n `pip wheel -w DEST_DIR .` instead.\n\n If you expected a platform wheel, check your project configuration, or run\n cibuildwheel with CIBW_BUILD_VERBOSITY=1 to view build logs.\n ''')\n\n super().__init__(message)\n", "path": "cibuildwheel/util.py"}]}
| 2,560 | 358 |
gh_patches_debug_21297
|
rasdani/github-patches
|
git_diff
|
rucio__rucio-980
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
setup_clients.py classifiers needs to be a list, not tuples
Motivation
----------
Classifiers were changed to tuple, which does not work, needs to be a list.
</issue>
<code>
[start of setup_rucio_client.py]
1 # Copyright 2014-2018 CERN for the benefit of the ATLAS collaboration.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 #
15 # Authors:
16 # - Vincent Garonne <[email protected]>, 2014-2018
17 # - Martin Barisits <[email protected]>, 2017
18
19 import os
20 import re
21 import shutil
22 import subprocess
23 import sys
24
25 from distutils.command.sdist import sdist as _sdist # pylint:disable=no-name-in-module,import-error
26 from setuptools import setup
27
28 sys.path.insert(0, os.path.abspath('lib/'))
29
30 from rucio import version # noqa
31
32 if sys.version_info < (2, 5):
33 print('ERROR: Rucio requires at least Python 2.6 to run.')
34 sys.exit(1)
35 sys.path.insert(0, os.path.abspath('lib/'))
36
37
38 # Arguments to the setup script to build Basic/Lite distributions
39 COPY_ARGS = sys.argv[1:]
40 NAME = 'rucio-clients'
41 IS_RELEASE = False
42 PACKAGES = ['rucio', 'rucio.client', 'rucio.common',
43 'rucio.rse.protocols', 'rucio.rse', 'rucio.tests']
44 REQUIREMENTS_FILES = ['tools/pip-requires-client']
45 DESCRIPTION = "Rucio Client Lite Package"
46 DATA_FILES = [('etc/', ['etc/rse-accounts.cfg.template', 'etc/rucio.cfg.template', 'etc/rucio.cfg.atlas.client.template']),
47 ('tools/', ['tools/pip-requires-client', ]), ]
48
49 SCRIPTS = ['bin/rucio', 'bin/rucio-admin']
50 if os.path.exists('build/'):
51 shutil.rmtree('build/')
52 if os.path.exists('lib/rucio_clients.egg-info/'):
53 shutil.rmtree('lib/rucio_clients.egg-info/')
54 if os.path.exists('lib/rucio.egg-info/'):
55 shutil.rmtree('lib/rucio.egg-info/')
56
57 SSH_EXTRAS = ['paramiko==1.18.4']
58 KERBEROS_EXTRAS = ['kerberos>=1.2.5', 'pykerberos>=1.1.14', 'requests-kerberos>=0.11.0']
59 SWIFT_EXTRAS = ['python-swiftclient>=3.5.0', ]
60 EXTRAS_REQUIRES = dict(ssh=SSH_EXTRAS,
61 kerberos=KERBEROS_EXTRAS,
62 swift=SWIFT_EXTRAS)
63
64 if '--release' in COPY_ARGS:
65 IS_RELEASE = True
66 COPY_ARGS.remove('--release')
67
68
69 # If Sphinx is installed on the box running setup.py,
70 # enable setup.py to build the documentation, otherwise,
71 # just ignore it
72 cmdclass = {}
73
74 try:
75 from sphinx.setup_command import BuildDoc
76
77 class local_BuildDoc(BuildDoc):
78 '''
79 local_BuildDoc
80 '''
81 def run(self):
82 '''
83 run
84 '''
85 for builder in ['html']: # 'man','latex'
86 self.builder = builder
87 self.finalize_options()
88 BuildDoc.run(self)
89 cmdclass['build_sphinx'] = local_BuildDoc
90 except Exception:
91 pass
92
93
94 def get_reqs_from_file(requirements_file):
95 '''
96 get_reqs_from_file
97 '''
98 if os.path.exists(requirements_file):
99 return open(requirements_file, 'r').read().split('\n')
100 return []
101
102
103 def parse_requirements(requirements_files):
104 '''
105 parse_requirements
106 '''
107 requirements = []
108 for requirements_file in requirements_files:
109 for line in get_reqs_from_file(requirements_file):
110 if re.match(r'\s*-e\s+', line):
111 requirements.append(re.sub(r'\s*-e\s+.*#egg=(.*)$', r'\1', line))
112 elif re.match(r'\s*-f\s+', line):
113 pass
114 else:
115 requirements.append(line)
116 return requirements
117
118
119 def parse_dependency_links(requirements_files):
120 '''
121 parse_dependency_links
122 '''
123 dependency_links = []
124 for requirements_file in requirements_files:
125 for line in get_reqs_from_file(requirements_file):
126 if re.match(r'(\s*#)|(\s*$)', line):
127 continue
128 if re.match(r'\s*-[ef]\s+', line):
129 dependency_links.append(re.sub(r'\s*-[ef]\s+', '', line))
130 return dependency_links
131
132
133 def write_requirements():
134 '''
135 write_requirements
136 '''
137 venv = os.environ.get('VIRTUAL_ENV', None)
138 if venv is not None:
139 req_file = open("requirements.txt", "w")
140 output = subprocess.Popen(["pip", "freeze", "-l"], stdout=subprocess.PIPE)
141 requirements = output.communicate()[0].strip()
142 req_file.write(requirements)
143 req_file.close()
144
145
146 REQUIRES = parse_requirements(requirements_files=REQUIREMENTS_FILES)
147 DEPEND_LINKS = parse_dependency_links(requirements_files=REQUIREMENTS_FILES)
148
149
150 class CustomSdist(_sdist):
151 '''
152 CustomSdist
153 '''
154 user_options = [
155 ('packaging=', None, "Some option to indicate what should be packaged")
156 ] + _sdist.user_options
157
158 def __init__(self, *args, **kwargs):
159 '''
160 __init__
161 '''
162 _sdist.__init__(self, *args, **kwargs)
163 self.packaging = "default value for this option"
164
165 def get_file_list(self):
166 '''
167 get_file_list
168 '''
169 print("Chosen packaging option: " + NAME)
170 self.distribution.data_files = DATA_FILES
171 _sdist.get_file_list(self)
172
173
174 cmdclass['sdist'] = CustomSdist
175
176 setup(
177 name=NAME,
178 version=version.version_string(),
179 packages=PACKAGES,
180 package_dir={'': 'lib'},
181 data_files=DATA_FILES,
182 script_args=COPY_ARGS,
183 cmdclass=cmdclass,
184 include_package_data=True,
185 scripts=SCRIPTS,
186 # doc=cmdclass,
187 author="Rucio",
188 author_email="[email protected]",
189 description=DESCRIPTION,
190 license="Apache License, Version 2.0",
191 url="http://rucio.cern.ch/",
192 python_requires=">=2.6, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*",
193 classifiers=[
194 'Development Status :: 5 - Production/Stable',
195 'License :: OSI Approved :: Apache Software License',
196 'Intended Audience :: Information Technology',
197 'Intended Audience :: System Administrators',
198 'Operating System :: POSIX :: Linux',
199 'Natural Language :: English',
200 'Programming Language :: Python',
201 'Programming Language :: Python :: 2.6',
202 'Programming Language :: Python :: 2.7',
203 'Programming Language :: Python :: 3',
204 'Programming Language :: Python :: 3.4',
205 'Programming Language :: Python :: 3.5',
206 'Programming Language :: Python :: 3.6',
207 'Programming Language :: Python :: Implementation :: CPython',
208 'Programming Language :: Python :: Implementation :: PyPy',
209 'Environment :: No Input/Output (Daemon)'
210 ],
211 install_requires=REQUIRES,
212 extras_require=EXTRAS_REQUIRES,
213 dependency_links=DEPEND_LINKS,
214 )
215
[end of setup_rucio_client.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/setup_rucio_client.py b/setup_rucio_client.py
--- a/setup_rucio_client.py
+++ b/setup_rucio_client.py
@@ -190,7 +190,7 @@
license="Apache License, Version 2.0",
url="http://rucio.cern.ch/",
python_requires=">=2.6, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*",
- classifiers=(
+ classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Intended Audience :: Information Technology',
@@ -207,7 +207,7 @@
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Environment :: No Input/Output (Daemon)'
- ),
+ ],
install_requires=REQUIRES,
extras_require=EXTRAS_REQUIRES,
dependency_links=DEPEND_LINKS,
|
{"golden_diff": "diff --git a/setup_rucio_client.py b/setup_rucio_client.py\n--- a/setup_rucio_client.py\n+++ b/setup_rucio_client.py\n@@ -190,7 +190,7 @@\n license=\"Apache License, Version 2.0\",\n url=\"http://rucio.cern.ch/\",\n python_requires=\">=2.6, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*\",\n- classifiers=(\n+ classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'License :: OSI Approved :: Apache Software License',\n 'Intended Audience :: Information Technology',\n@@ -207,7 +207,7 @@\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Environment :: No Input/Output (Daemon)'\n- ),\n+ ],\n install_requires=REQUIRES,\n extras_require=EXTRAS_REQUIRES,\n dependency_links=DEPEND_LINKS,\n", "issue": "setup_clients.py classifiers needs to be a list, not tuples\nMotivation\r\n----------\r\nClassifiers were changed to tuple, which does not work, needs to be a list.\n", "before_files": [{"content": "# Copyright 2014-2018 CERN for the benefit of the ATLAS collaboration.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Authors:\n# - Vincent Garonne <[email protected]>, 2014-2018\n# - Martin Barisits <[email protected]>, 2017\n\nimport os\nimport re\nimport shutil\nimport subprocess\nimport sys\n\nfrom distutils.command.sdist import sdist as _sdist # pylint:disable=no-name-in-module,import-error\nfrom setuptools import setup\n\nsys.path.insert(0, os.path.abspath('lib/'))\n\nfrom rucio import version # noqa\n\nif sys.version_info < (2, 5):\n print('ERROR: Rucio requires at least Python 2.6 to run.')\n sys.exit(1)\nsys.path.insert(0, os.path.abspath('lib/'))\n\n\n# Arguments to the setup script to build Basic/Lite distributions\nCOPY_ARGS = sys.argv[1:]\nNAME = 'rucio-clients'\nIS_RELEASE = False\nPACKAGES = ['rucio', 'rucio.client', 'rucio.common',\n 'rucio.rse.protocols', 'rucio.rse', 'rucio.tests']\nREQUIREMENTS_FILES = ['tools/pip-requires-client']\nDESCRIPTION = \"Rucio Client Lite Package\"\nDATA_FILES = [('etc/', ['etc/rse-accounts.cfg.template', 'etc/rucio.cfg.template', 'etc/rucio.cfg.atlas.client.template']),\n ('tools/', ['tools/pip-requires-client', ]), ]\n\nSCRIPTS = ['bin/rucio', 'bin/rucio-admin']\nif os.path.exists('build/'):\n shutil.rmtree('build/')\nif os.path.exists('lib/rucio_clients.egg-info/'):\n shutil.rmtree('lib/rucio_clients.egg-info/')\nif os.path.exists('lib/rucio.egg-info/'):\n shutil.rmtree('lib/rucio.egg-info/')\n\nSSH_EXTRAS = ['paramiko==1.18.4']\nKERBEROS_EXTRAS = ['kerberos>=1.2.5', 'pykerberos>=1.1.14', 'requests-kerberos>=0.11.0']\nSWIFT_EXTRAS = ['python-swiftclient>=3.5.0', ]\nEXTRAS_REQUIRES = dict(ssh=SSH_EXTRAS,\n kerberos=KERBEROS_EXTRAS,\n swift=SWIFT_EXTRAS)\n\nif '--release' in COPY_ARGS:\n IS_RELEASE = True\n COPY_ARGS.remove('--release')\n\n\n# If Sphinx is installed on the box running setup.py,\n# enable setup.py to build the documentation, otherwise,\n# just ignore it\ncmdclass = {}\n\ntry:\n from sphinx.setup_command import BuildDoc\n\n class local_BuildDoc(BuildDoc):\n '''\n local_BuildDoc\n '''\n def run(self):\n '''\n run\n '''\n for builder in ['html']: # 'man','latex'\n self.builder = builder\n self.finalize_options()\n BuildDoc.run(self)\n cmdclass['build_sphinx'] = local_BuildDoc\nexcept Exception:\n pass\n\n\ndef get_reqs_from_file(requirements_file):\n '''\n get_reqs_from_file\n '''\n if os.path.exists(requirements_file):\n return open(requirements_file, 'r').read().split('\\n')\n return []\n\n\ndef parse_requirements(requirements_files):\n '''\n parse_requirements\n '''\n requirements = []\n for requirements_file in requirements_files:\n for line in get_reqs_from_file(requirements_file):\n if re.match(r'\\s*-e\\s+', line):\n requirements.append(re.sub(r'\\s*-e\\s+.*#egg=(.*)$', r'\\1', line))\n elif re.match(r'\\s*-f\\s+', line):\n pass\n else:\n requirements.append(line)\n return requirements\n\n\ndef parse_dependency_links(requirements_files):\n '''\n parse_dependency_links\n '''\n dependency_links = []\n for requirements_file in requirements_files:\n for line in get_reqs_from_file(requirements_file):\n if re.match(r'(\\s*#)|(\\s*$)', line):\n continue\n if re.match(r'\\s*-[ef]\\s+', line):\n dependency_links.append(re.sub(r'\\s*-[ef]\\s+', '', line))\n return dependency_links\n\n\ndef write_requirements():\n '''\n write_requirements\n '''\n venv = os.environ.get('VIRTUAL_ENV', None)\n if venv is not None:\n req_file = open(\"requirements.txt\", \"w\")\n output = subprocess.Popen([\"pip\", \"freeze\", \"-l\"], stdout=subprocess.PIPE)\n requirements = output.communicate()[0].strip()\n req_file.write(requirements)\n req_file.close()\n\n\nREQUIRES = parse_requirements(requirements_files=REQUIREMENTS_FILES)\nDEPEND_LINKS = parse_dependency_links(requirements_files=REQUIREMENTS_FILES)\n\n\nclass CustomSdist(_sdist):\n '''\n CustomSdist\n '''\n user_options = [\n ('packaging=', None, \"Some option to indicate what should be packaged\")\n ] + _sdist.user_options\n\n def __init__(self, *args, **kwargs):\n '''\n __init__\n '''\n _sdist.__init__(self, *args, **kwargs)\n self.packaging = \"default value for this option\"\n\n def get_file_list(self):\n '''\n get_file_list\n '''\n print(\"Chosen packaging option: \" + NAME)\n self.distribution.data_files = DATA_FILES\n _sdist.get_file_list(self)\n\n\ncmdclass['sdist'] = CustomSdist\n\nsetup(\n name=NAME,\n version=version.version_string(),\n packages=PACKAGES,\n package_dir={'': 'lib'},\n data_files=DATA_FILES,\n script_args=COPY_ARGS,\n cmdclass=cmdclass,\n include_package_data=True,\n scripts=SCRIPTS,\n # doc=cmdclass,\n author=\"Rucio\",\n author_email=\"[email protected]\",\n description=DESCRIPTION,\n license=\"Apache License, Version 2.0\",\n url=\"http://rucio.cern.ch/\",\n python_requires=\">=2.6, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*\",\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'License :: OSI Approved :: Apache Software License',\n 'Intended Audience :: Information Technology',\n 'Intended Audience :: System Administrators',\n 'Operating System :: POSIX :: Linux',\n 'Natural Language :: English',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Environment :: No Input/Output (Daemon)'\n ],\n install_requires=REQUIRES,\n extras_require=EXTRAS_REQUIRES,\n dependency_links=DEPEND_LINKS,\n)\n", "path": "setup_rucio_client.py"}]}
| 2,804 | 223 |
gh_patches_debug_8856
|
rasdani/github-patches
|
git_diff
|
modin-project__modin-965
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve Dask performance by tuning `Client` parameters
### Describe the problem
<!-- Describe the problem clearly here. -->
Dask generally performs well, but it could be improved. I played around with changing `n_workers` as a parameter to `Client` and I found that the default does not take full advantage of the hardware in many cases.
### Source code / logs
To update here:
https://github.com/modin-project/modin/blob/898889c8ac8804b7eed4cb3d733ee0d1e29686a8/modin/pandas/__init__.py#L227
<!-- Include any logs or source code that would be helpful to diagnose the problem. If including tracebacks, please include the full traceback. Large logs and files should be attached. Try to provide a reproducible test case that is the bare minimum necessary to generate the problem. -->
</issue>
<code>
[start of modin/pandas/__init__.py]
1 import pandas
2
3 __pandas_version__ = "0.25.3"
4
5 if pandas.__version__ != __pandas_version__:
6 raise ImportError(
7 "The pandas version installed does not match the required pandas "
8 "version in Modin. Please install pandas {} to use "
9 "Modin.".format(__pandas_version__)
10 )
11
12 from pandas import (
13 eval,
14 unique,
15 value_counts,
16 cut,
17 to_numeric,
18 factorize,
19 test,
20 qcut,
21 date_range,
22 period_range,
23 Index,
24 MultiIndex,
25 CategoricalIndex,
26 bdate_range,
27 DatetimeIndex,
28 Timedelta,
29 Timestamp,
30 to_timedelta,
31 set_eng_float_format,
32 options,
33 set_option,
34 NaT,
35 PeriodIndex,
36 Categorical,
37 Interval,
38 UInt8Dtype,
39 UInt16Dtype,
40 UInt32Dtype,
41 UInt64Dtype,
42 SparseDtype,
43 Int8Dtype,
44 Int16Dtype,
45 Int32Dtype,
46 Int64Dtype,
47 CategoricalDtype,
48 DatetimeTZDtype,
49 IntervalDtype,
50 PeriodDtype,
51 RangeIndex,
52 Int64Index,
53 UInt64Index,
54 Float64Index,
55 TimedeltaIndex,
56 IntervalIndex,
57 IndexSlice,
58 Grouper,
59 array,
60 Period,
61 show_versions,
62 DateOffset,
63 timedelta_range,
64 infer_freq,
65 interval_range,
66 ExcelWriter,
67 SparseArray,
68 SparseSeries,
69 SparseDataFrame,
70 datetime,
71 NamedAgg,
72 )
73 import threading
74 import os
75 import types
76 import sys
77
78 from .. import __version__
79 from .concat import concat
80 from .dataframe import DataFrame
81 from .datetimes import to_datetime
82 from .io import (
83 read_csv,
84 read_parquet,
85 read_json,
86 read_html,
87 read_clipboard,
88 read_excel,
89 read_hdf,
90 read_feather,
91 read_msgpack,
92 read_stata,
93 read_sas,
94 read_pickle,
95 read_sql,
96 read_gbq,
97 read_table,
98 read_fwf,
99 read_sql_table,
100 read_sql_query,
101 read_spss,
102 ExcelFile,
103 to_pickle,
104 HDFStore,
105 )
106 from .reshape import get_dummies, melt, crosstab, lreshape, wide_to_long
107 from .series import Series
108 from .general import (
109 isna,
110 isnull,
111 merge,
112 merge_asof,
113 merge_ordered,
114 pivot_table,
115 notnull,
116 notna,
117 pivot,
118 )
119 from .plotting import Plotting as plotting
120 from .. import __execution_engine__ as execution_engine
121
122 # Set this so that Pandas doesn't try to multithread by itself
123 os.environ["OMP_NUM_THREADS"] = "1"
124 num_cpus = 1
125
126
127 def initialize_ray():
128 import ray
129
130 """Initializes ray based on environment variables and internal defaults."""
131 if threading.current_thread().name == "MainThread":
132 plasma_directory = None
133 cluster = os.environ.get("MODIN_RAY_CLUSTER", None)
134 redis_address = os.environ.get("MODIN_REDIS_ADDRESS", None)
135 if cluster == "True" and redis_address is not None:
136 # We only start ray in a cluster setting for the head node.
137 ray.init(
138 include_webui=False,
139 ignore_reinit_error=True,
140 redis_address=redis_address,
141 logging_level=100,
142 )
143 elif cluster is None:
144 object_store_memory = os.environ.get("MODIN_MEMORY", None)
145 if os.environ.get("MODIN_OUT_OF_CORE", "False").title() == "True":
146 from tempfile import gettempdir
147
148 plasma_directory = gettempdir()
149 # We may have already set the memory from the environment variable, we don't
150 # want to overwrite that value if we have.
151 if object_store_memory is None:
152 # Round down to the nearest Gigabyte.
153 mem_bytes = ray.utils.get_system_memory() // 10 ** 9 * 10 ** 9
154 # Default to 8x memory for out of core
155 object_store_memory = 8 * mem_bytes
156 # In case anything failed above, we can still improve the memory for Modin.
157 if object_store_memory is None:
158 # Round down to the nearest Gigabyte.
159 object_store_memory = int(
160 0.6 * ray.utils.get_system_memory() // 10 ** 9 * 10 ** 9
161 )
162 # If the memory pool is smaller than 2GB, just use the default in ray.
163 if object_store_memory == 0:
164 object_store_memory = None
165 else:
166 object_store_memory = int(object_store_memory)
167 ray.init(
168 include_webui=False,
169 ignore_reinit_error=True,
170 plasma_directory=plasma_directory,
171 object_store_memory=object_store_memory,
172 redis_address=redis_address,
173 logging_level=100,
174 memory=object_store_memory,
175 )
176 # Register custom serializer for method objects to avoid warning message.
177 # We serialize `MethodType` objects when we use AxisPartition operations.
178 ray.register_custom_serializer(types.MethodType, use_pickle=True)
179
180 # Register a fix import function to run on all_workers including the driver.
181 # This is a hack solution to fix #647, #746
182 def move_stdlib_ahead_of_site_packages(*args):
183 site_packages_path = None
184 site_packages_path_index = -1
185 for i, path in enumerate(sys.path):
186 if sys.exec_prefix in path and path.endswith("site-packages"):
187 site_packages_path = path
188 site_packages_path_index = i
189 # break on first found
190 break
191
192 if site_packages_path is not None:
193 # stdlib packages layout as follows:
194 # - python3.x
195 # - typing.py
196 # - site-packages/
197 # - pandas
198 # So extracting the dirname of the site_packages can point us
199 # to the directory containing standard libraries.
200 sys.path.insert(
201 site_packages_path_index, os.path.dirname(site_packages_path)
202 )
203
204 move_stdlib_ahead_of_site_packages()
205 ray.worker.global_worker.run_function_on_all_workers(
206 move_stdlib_ahead_of_site_packages
207 )
208
209
210 if execution_engine == "Ray":
211 import ray
212
213 initialize_ray()
214 num_cpus = ray.cluster_resources()["CPU"]
215 elif execution_engine == "Dask": # pragma: no cover
216 from distributed.client import _get_global_client
217 import warnings
218
219 warnings.warn("The Dask Engine for Modin is experimental.")
220
221 if threading.current_thread().name == "MainThread":
222 # initialize the dask client
223 client = _get_global_client()
224 if client is None:
225 from distributed import Client
226
227 client = Client()
228 num_cpus = sum(client.ncores().values())
229 elif execution_engine != "Python":
230 raise ImportError("Unrecognized execution engine: {}.".format(execution_engine))
231
232 DEFAULT_NPARTITIONS = max(4, int(num_cpus))
233
234 __all__ = [
235 "DataFrame",
236 "Series",
237 "read_csv",
238 "read_parquet",
239 "read_json",
240 "read_html",
241 "read_clipboard",
242 "read_excel",
243 "read_hdf",
244 "read_feather",
245 "read_msgpack",
246 "read_stata",
247 "read_sas",
248 "read_pickle",
249 "read_sql",
250 "read_gbq",
251 "read_table",
252 "read_spss",
253 "concat",
254 "eval",
255 "unique",
256 "value_counts",
257 "cut",
258 "to_numeric",
259 "factorize",
260 "test",
261 "qcut",
262 "to_datetime",
263 "get_dummies",
264 "isna",
265 "isnull",
266 "merge",
267 "pivot_table",
268 "date_range",
269 "Index",
270 "MultiIndex",
271 "Series",
272 "bdate_range",
273 "period_range",
274 "DatetimeIndex",
275 "to_timedelta",
276 "set_eng_float_format",
277 "options",
278 "set_option",
279 "CategoricalIndex",
280 "Timedelta",
281 "Timestamp",
282 "NaT",
283 "PeriodIndex",
284 "Categorical",
285 "__version__",
286 "melt",
287 "crosstab",
288 "plotting",
289 "Interval",
290 "UInt8Dtype",
291 "UInt16Dtype",
292 "UInt32Dtype",
293 "UInt64Dtype",
294 "SparseDtype",
295 "Int8Dtype",
296 "Int16Dtype",
297 "Int32Dtype",
298 "Int64Dtype",
299 "CategoricalDtype",
300 "DatetimeTZDtype",
301 "IntervalDtype",
302 "PeriodDtype",
303 "RangeIndex",
304 "Int64Index",
305 "UInt64Index",
306 "Float64Index",
307 "TimedeltaIndex",
308 "IntervalIndex",
309 "IndexSlice",
310 "Grouper",
311 "array",
312 "Period",
313 "show_versions",
314 "DateOffset",
315 "timedelta_range",
316 "infer_freq",
317 "interval_range",
318 "ExcelWriter",
319 "read_fwf",
320 "read_sql_table",
321 "read_sql_query",
322 "ExcelFile",
323 "to_pickle",
324 "HDFStore",
325 "lreshape",
326 "wide_to_long",
327 "merge_asof",
328 "merge_ordered",
329 "notnull",
330 "notna",
331 "pivot",
332 "SparseArray",
333 "SparseSeries",
334 "SparseDataFrame",
335 "datetime",
336 "NamedAgg",
337 "DEFAULT_NPARTITIONS",
338 ]
339
340 del pandas
341
[end of modin/pandas/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/modin/pandas/__init__.py b/modin/pandas/__init__.py
--- a/modin/pandas/__init__.py
+++ b/modin/pandas/__init__.py
@@ -223,9 +223,10 @@
client = _get_global_client()
if client is None:
from distributed import Client
+ import multiprocessing
- client = Client()
- num_cpus = sum(client.ncores().values())
+ num_cpus = multiprocessing.cpu_count()
+ client = Client(n_workers=num_cpus)
elif execution_engine != "Python":
raise ImportError("Unrecognized execution engine: {}.".format(execution_engine))
|
{"golden_diff": "diff --git a/modin/pandas/__init__.py b/modin/pandas/__init__.py\n--- a/modin/pandas/__init__.py\n+++ b/modin/pandas/__init__.py\n@@ -223,9 +223,10 @@\n client = _get_global_client()\n if client is None:\n from distributed import Client\n+ import multiprocessing\n \n- client = Client()\n- num_cpus = sum(client.ncores().values())\n+ num_cpus = multiprocessing.cpu_count()\n+ client = Client(n_workers=num_cpus)\n elif execution_engine != \"Python\":\n raise ImportError(\"Unrecognized execution engine: {}.\".format(execution_engine))\n", "issue": "Improve Dask performance by tuning `Client` parameters\n\r\n### Describe the problem\r\n<!-- Describe the problem clearly here. -->\r\nDask generally performs well, but it could be improved. I played around with changing `n_workers` as a parameter to `Client` and I found that the default does not take full advantage of the hardware in many cases.\r\n\r\n### Source code / logs\r\n\r\nTo update here: \r\nhttps://github.com/modin-project/modin/blob/898889c8ac8804b7eed4cb3d733ee0d1e29686a8/modin/pandas/__init__.py#L227\r\n<!-- Include any logs or source code that would be helpful to diagnose the problem. If including tracebacks, please include the full traceback. Large logs and files should be attached. Try to provide a reproducible test case that is the bare minimum necessary to generate the problem. -->\r\n\n", "before_files": [{"content": "import pandas\n\n__pandas_version__ = \"0.25.3\"\n\nif pandas.__version__ != __pandas_version__:\n raise ImportError(\n \"The pandas version installed does not match the required pandas \"\n \"version in Modin. Please install pandas {} to use \"\n \"Modin.\".format(__pandas_version__)\n )\n\nfrom pandas import (\n eval,\n unique,\n value_counts,\n cut,\n to_numeric,\n factorize,\n test,\n qcut,\n date_range,\n period_range,\n Index,\n MultiIndex,\n CategoricalIndex,\n bdate_range,\n DatetimeIndex,\n Timedelta,\n Timestamp,\n to_timedelta,\n set_eng_float_format,\n options,\n set_option,\n NaT,\n PeriodIndex,\n Categorical,\n Interval,\n UInt8Dtype,\n UInt16Dtype,\n UInt32Dtype,\n UInt64Dtype,\n SparseDtype,\n Int8Dtype,\n Int16Dtype,\n Int32Dtype,\n Int64Dtype,\n CategoricalDtype,\n DatetimeTZDtype,\n IntervalDtype,\n PeriodDtype,\n RangeIndex,\n Int64Index,\n UInt64Index,\n Float64Index,\n TimedeltaIndex,\n IntervalIndex,\n IndexSlice,\n Grouper,\n array,\n Period,\n show_versions,\n DateOffset,\n timedelta_range,\n infer_freq,\n interval_range,\n ExcelWriter,\n SparseArray,\n SparseSeries,\n SparseDataFrame,\n datetime,\n NamedAgg,\n)\nimport threading\nimport os\nimport types\nimport sys\n\nfrom .. import __version__\nfrom .concat import concat\nfrom .dataframe import DataFrame\nfrom .datetimes import to_datetime\nfrom .io import (\n read_csv,\n read_parquet,\n read_json,\n read_html,\n read_clipboard,\n read_excel,\n read_hdf,\n read_feather,\n read_msgpack,\n read_stata,\n read_sas,\n read_pickle,\n read_sql,\n read_gbq,\n read_table,\n read_fwf,\n read_sql_table,\n read_sql_query,\n read_spss,\n ExcelFile,\n to_pickle,\n HDFStore,\n)\nfrom .reshape import get_dummies, melt, crosstab, lreshape, wide_to_long\nfrom .series import Series\nfrom .general import (\n isna,\n isnull,\n merge,\n merge_asof,\n merge_ordered,\n pivot_table,\n notnull,\n notna,\n pivot,\n)\nfrom .plotting import Plotting as plotting\nfrom .. import __execution_engine__ as execution_engine\n\n# Set this so that Pandas doesn't try to multithread by itself\nos.environ[\"OMP_NUM_THREADS\"] = \"1\"\nnum_cpus = 1\n\n\ndef initialize_ray():\n import ray\n\n \"\"\"Initializes ray based on environment variables and internal defaults.\"\"\"\n if threading.current_thread().name == \"MainThread\":\n plasma_directory = None\n cluster = os.environ.get(\"MODIN_RAY_CLUSTER\", None)\n redis_address = os.environ.get(\"MODIN_REDIS_ADDRESS\", None)\n if cluster == \"True\" and redis_address is not None:\n # We only start ray in a cluster setting for the head node.\n ray.init(\n include_webui=False,\n ignore_reinit_error=True,\n redis_address=redis_address,\n logging_level=100,\n )\n elif cluster is None:\n object_store_memory = os.environ.get(\"MODIN_MEMORY\", None)\n if os.environ.get(\"MODIN_OUT_OF_CORE\", \"False\").title() == \"True\":\n from tempfile import gettempdir\n\n plasma_directory = gettempdir()\n # We may have already set the memory from the environment variable, we don't\n # want to overwrite that value if we have.\n if object_store_memory is None:\n # Round down to the nearest Gigabyte.\n mem_bytes = ray.utils.get_system_memory() // 10 ** 9 * 10 ** 9\n # Default to 8x memory for out of core\n object_store_memory = 8 * mem_bytes\n # In case anything failed above, we can still improve the memory for Modin.\n if object_store_memory is None:\n # Round down to the nearest Gigabyte.\n object_store_memory = int(\n 0.6 * ray.utils.get_system_memory() // 10 ** 9 * 10 ** 9\n )\n # If the memory pool is smaller than 2GB, just use the default in ray.\n if object_store_memory == 0:\n object_store_memory = None\n else:\n object_store_memory = int(object_store_memory)\n ray.init(\n include_webui=False,\n ignore_reinit_error=True,\n plasma_directory=plasma_directory,\n object_store_memory=object_store_memory,\n redis_address=redis_address,\n logging_level=100,\n memory=object_store_memory,\n )\n # Register custom serializer for method objects to avoid warning message.\n # We serialize `MethodType` objects when we use AxisPartition operations.\n ray.register_custom_serializer(types.MethodType, use_pickle=True)\n\n # Register a fix import function to run on all_workers including the driver.\n # This is a hack solution to fix #647, #746\n def move_stdlib_ahead_of_site_packages(*args):\n site_packages_path = None\n site_packages_path_index = -1\n for i, path in enumerate(sys.path):\n if sys.exec_prefix in path and path.endswith(\"site-packages\"):\n site_packages_path = path\n site_packages_path_index = i\n # break on first found\n break\n\n if site_packages_path is not None:\n # stdlib packages layout as follows:\n # - python3.x\n # - typing.py\n # - site-packages/\n # - pandas\n # So extracting the dirname of the site_packages can point us\n # to the directory containing standard libraries.\n sys.path.insert(\n site_packages_path_index, os.path.dirname(site_packages_path)\n )\n\n move_stdlib_ahead_of_site_packages()\n ray.worker.global_worker.run_function_on_all_workers(\n move_stdlib_ahead_of_site_packages\n )\n\n\nif execution_engine == \"Ray\":\n import ray\n\n initialize_ray()\n num_cpus = ray.cluster_resources()[\"CPU\"]\nelif execution_engine == \"Dask\": # pragma: no cover\n from distributed.client import _get_global_client\n import warnings\n\n warnings.warn(\"The Dask Engine for Modin is experimental.\")\n\n if threading.current_thread().name == \"MainThread\":\n # initialize the dask client\n client = _get_global_client()\n if client is None:\n from distributed import Client\n\n client = Client()\n num_cpus = sum(client.ncores().values())\nelif execution_engine != \"Python\":\n raise ImportError(\"Unrecognized execution engine: {}.\".format(execution_engine))\n\nDEFAULT_NPARTITIONS = max(4, int(num_cpus))\n\n__all__ = [\n \"DataFrame\",\n \"Series\",\n \"read_csv\",\n \"read_parquet\",\n \"read_json\",\n \"read_html\",\n \"read_clipboard\",\n \"read_excel\",\n \"read_hdf\",\n \"read_feather\",\n \"read_msgpack\",\n \"read_stata\",\n \"read_sas\",\n \"read_pickle\",\n \"read_sql\",\n \"read_gbq\",\n \"read_table\",\n \"read_spss\",\n \"concat\",\n \"eval\",\n \"unique\",\n \"value_counts\",\n \"cut\",\n \"to_numeric\",\n \"factorize\",\n \"test\",\n \"qcut\",\n \"to_datetime\",\n \"get_dummies\",\n \"isna\",\n \"isnull\",\n \"merge\",\n \"pivot_table\",\n \"date_range\",\n \"Index\",\n \"MultiIndex\",\n \"Series\",\n \"bdate_range\",\n \"period_range\",\n \"DatetimeIndex\",\n \"to_timedelta\",\n \"set_eng_float_format\",\n \"options\",\n \"set_option\",\n \"CategoricalIndex\",\n \"Timedelta\",\n \"Timestamp\",\n \"NaT\",\n \"PeriodIndex\",\n \"Categorical\",\n \"__version__\",\n \"melt\",\n \"crosstab\",\n \"plotting\",\n \"Interval\",\n \"UInt8Dtype\",\n \"UInt16Dtype\",\n \"UInt32Dtype\",\n \"UInt64Dtype\",\n \"SparseDtype\",\n \"Int8Dtype\",\n \"Int16Dtype\",\n \"Int32Dtype\",\n \"Int64Dtype\",\n \"CategoricalDtype\",\n \"DatetimeTZDtype\",\n \"IntervalDtype\",\n \"PeriodDtype\",\n \"RangeIndex\",\n \"Int64Index\",\n \"UInt64Index\",\n \"Float64Index\",\n \"TimedeltaIndex\",\n \"IntervalIndex\",\n \"IndexSlice\",\n \"Grouper\",\n \"array\",\n \"Period\",\n \"show_versions\",\n \"DateOffset\",\n \"timedelta_range\",\n \"infer_freq\",\n \"interval_range\",\n \"ExcelWriter\",\n \"read_fwf\",\n \"read_sql_table\",\n \"read_sql_query\",\n \"ExcelFile\",\n \"to_pickle\",\n \"HDFStore\",\n \"lreshape\",\n \"wide_to_long\",\n \"merge_asof\",\n \"merge_ordered\",\n \"notnull\",\n \"notna\",\n \"pivot\",\n \"SparseArray\",\n \"SparseSeries\",\n \"SparseDataFrame\",\n \"datetime\",\n \"NamedAgg\",\n \"DEFAULT_NPARTITIONS\",\n]\n\ndel pandas\n", "path": "modin/pandas/__init__.py"}]}
| 3,763 | 144 |
gh_patches_debug_66302
|
rasdani/github-patches
|
git_diff
|
cloud-custodian__cloud-custodian-3465
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Govcloud SNS prefix incorrect
Hello,
It seems that cloud custodian is putting the SNS arn prefix incorrectly for govcloud. This is the error I'm getting:
botocore.errorfactory.InvalidParameterException: An error occurred (InvalidParameter) when calling the Publish operation: Invalid parameter: TopicArn Reason: A us-gov-west-1 ARN must begin with arn:aws-us-gov, not arn:aws:sns:us-gov-west-1:xxxxxxxxxxx:snsname
policies:
- name: custodian-tags
resource: ec2
filters:
- or:
- "tag:CustodianTest": present
- "tag:CustodianTest2": present
actions:
- type: notify
to:
- [email protected]
template: default
transport:
type: sns
region: us-gov-west-1
topic: snsname
</issue>
<code>
[start of c7n/actions/notify.py]
1 # Copyright 2017-2018 Capital One Services, LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15
16 import base64
17 import copy
18 import zlib
19
20 from .core import EventAction
21 from c7n import utils
22 from c7n.exceptions import PolicyValidationError
23 from c7n.resolver import ValuesFrom
24
25
26 class BaseNotify(EventAction):
27
28 batch_size = 250
29
30 def expand_variables(self, message):
31 """expand any variables in the action to_from/cc_from fields.
32 """
33 p = copy.deepcopy(self.data)
34 if 'to_from' in self.data:
35 to_from = self.data['to_from'].copy()
36 to_from['url'] = to_from['url'].format(**message)
37 if 'expr' in to_from:
38 to_from['expr'] = to_from['expr'].format(**message)
39 p.setdefault('to', []).extend(ValuesFrom(to_from, self.manager).get_values())
40 if 'cc_from' in self.data:
41 cc_from = self.data['cc_from'].copy()
42 cc_from['url'] = cc_from['url'].format(**message)
43 if 'expr' in cc_from:
44 cc_from['expr'] = cc_from['expr'].format(**message)
45 p.setdefault('cc', []).extend(ValuesFrom(cc_from, self.manager).get_values())
46 return p
47
48 def pack(self, message):
49 dumped = utils.dumps(message)
50 compressed = zlib.compress(dumped.encode('utf8'))
51 b64encoded = base64.b64encode(compressed)
52 return b64encoded.decode('ascii')
53
54
55 class Notify(BaseNotify):
56 """
57 Flexible notifications require quite a bit of implementation support
58 on pluggable transports, templates, address resolution, variable
59 extraction, batch periods, etc.
60
61 For expedience and flexibility then, we instead send the data to
62 an sqs queue, for processing. ie. actual communications can be enabled
63 with the c7n-mailer tool, found under tools/c7n_mailer.
64
65 Attaching additional string message attributes are supported on the SNS
66 transport, with the exception of the ``mtype`` attribute, which is a
67 reserved attribute used by Cloud Custodian.
68
69 Example::
70
71 policies:
72 - name: ec2-bad-instance-kill
73 resource: ec2
74 filters:
75 - Name: bad-instance
76 actions:
77 - terminate
78 - type: notify
79 to:
80 - event-user
81 - resource-creator
82 - email@address
83 owner_absent_contact:
84 - other_email@address
85 # which template for the email should we use
86 template: policy-template
87 transport:
88 type: sqs
89 region: us-east-1
90 queue: xyz
91
92 - name: ec2-notify-with-attributes
93 resource: ec2
94 filters:
95 - Name: bad-instance
96 actions:
97 - type: notify
98 to:
99 - event-user
100 - resource-creator
101 - email@address
102 owner_absent_contact:
103 - other_email@address
104 # which template for the email should we use
105 template: policy-template
106 transport:
107 type: sns
108 region: us-east-1
109 topic: your-notify-topic
110 attributes:
111 - attribute_key: attribute_value
112 - attribute_key_2: attribute_value_2
113 """
114
115 C7N_DATA_MESSAGE = "maidmsg/1.0"
116
117 schema_alias = True
118 schema = {
119 'type': 'object',
120 'anyOf': [
121 {'required': ['type', 'transport', 'to']},
122 {'required': ['type', 'transport', 'to_from']}],
123 'properties': {
124 'type': {'enum': ['notify']},
125 'to': {'type': 'array', 'items': {'type': 'string'}},
126 'owner_absent_contact': {'type': 'array', 'items': {'type': 'string'}},
127 'to_from': ValuesFrom.schema,
128 'cc': {'type': 'array', 'items': {'type': 'string'}},
129 'cc_from': ValuesFrom.schema,
130 'cc_manager': {'type': 'boolean'},
131 'from': {'type': 'string'},
132 'subject': {'type': 'string'},
133 'template': {'type': 'string'},
134 'transport': {
135 'oneOf': [
136 {'type': 'object',
137 'required': ['type', 'queue'],
138 'properties': {
139 'queue': {'type': 'string'},
140 'type': {'enum': ['sqs']}}},
141 {'type': 'object',
142 'required': ['type', 'topic'],
143 'properties': {
144 'topic': {'type': 'string'},
145 'type': {'enum': ['sns']},
146 'attributes': {'type': 'object'},
147 }}]
148 },
149 'assume_role': {'type': 'boolean'}
150 }
151 }
152
153 def __init__(self, data=None, manager=None, log_dir=None):
154 super(Notify, self).__init__(data, manager, log_dir)
155 self.assume_role = data.get('assume_role', True)
156
157 def validate(self):
158 if self.data.get('transport', {}).get('type') == 'sns' and \
159 self.data.get('transport').get('attributes') and \
160 'mtype' in self.data.get('transport').get('attributes').keys():
161 raise PolicyValidationError(
162 "attribute: mtype is a reserved attribute for sns transport")
163 return self
164
165 def get_permissions(self):
166 if self.data.get('transport', {}).get('type') == 'sns':
167 return ('sns:Publish',)
168 if self.data.get('transport', {'type': 'sqs'}).get('type') == 'sqs':
169 return ('sqs:SendMessage',)
170 return ()
171
172 def process(self, resources, event=None):
173 alias = utils.get_account_alias_from_sts(
174 utils.local_session(self.manager.session_factory))
175 message = {
176 'event': event,
177 'account_id': self.manager.config.account_id,
178 'account': alias,
179 'region': self.manager.config.region,
180 'policy': self.manager.data}
181 message['action'] = self.expand_variables(message)
182
183 for batch in utils.chunks(resources, self.batch_size):
184 message['resources'] = self.prepare_resources(batch)
185 receipt = self.send_data_message(message)
186 self.log.info("sent message:%s policy:%s template:%s count:%s" % (
187 receipt, self.manager.data['name'],
188 self.data.get('template', 'default'), len(batch)))
189
190 def prepare_resources(self, resources):
191 """Resources preparation for transport.
192
193 If we have sensitive or overly large resource metadata we want to
194 remove or additional serialization we need to perform, this
195 provides a mechanism.
196
197 TODO: consider alternative implementations, at min look at adding
198 provider as additional discriminator to resource type. One alternative
199 would be dynamically adjusting buffer size based on underlying
200 transport.
201 """
202 handler = getattr(self, "prepare_%s" % (
203 self.manager.type.replace('-', '_')),
204 None)
205 if handler is None:
206 return resources
207 return handler(resources)
208
209 def prepare_launch_config(self, resources):
210 for r in resources:
211 r.pop('UserData', None)
212 return resources
213
214 def prepare_asg(self, resources):
215 for r in resources:
216 if 'c7n:user-data' in r:
217 r.pop('c7n:user-data', None)
218 return resources
219
220 def prepare_ec2(self, resources):
221 for r in resources:
222 if 'c7n:user-data' in r:
223 r.pop('c7n:user-data')
224 return resources
225
226 def send_data_message(self, message):
227 if self.data['transport']['type'] == 'sqs':
228 return self.send_sqs(message)
229 elif self.data['transport']['type'] == 'sns':
230 return self.send_sns(message)
231
232 def send_sns(self, message):
233 topic = self.data['transport']['topic'].format(**message)
234 user_attributes = self.data['transport'].get('attributes')
235 if topic.startswith('arn:aws:sns'):
236 region = region = topic.split(':', 5)[3]
237 topic_arn = topic
238 else:
239 region = message['region']
240 topic_arn = "arn:aws:sns:%s:%s:%s" % (
241 message['region'], message['account_id'], topic)
242 client = self.manager.session_factory(
243 region=region, assume=self.assume_role).client('sns')
244 attrs = {
245 'mtype': {
246 'DataType': 'String',
247 'StringValue': self.C7N_DATA_MESSAGE,
248 },
249 }
250 if user_attributes:
251 for k, v in user_attributes.items():
252 if k != 'mtype':
253 attrs[k] = {'DataType': 'String', 'StringValue': v}
254 client.publish(
255 TopicArn=topic_arn,
256 Message=self.pack(message),
257 MessageAttributes=attrs
258 )
259
260 def send_sqs(self, message):
261 queue = self.data['transport']['queue'].format(**message)
262 if queue.startswith('https://queue.amazonaws.com'):
263 region = 'us-east-1'
264 queue_url = queue
265 elif 'queue.amazonaws.com' in queue:
266 region = queue[len('https://'):].split('.', 1)[0]
267 queue_url = queue
268 elif queue.startswith('https://sqs.'):
269 region = queue.split('.', 2)[1]
270 queue_url = queue
271 elif queue.startswith('arn:aws:sqs'):
272 queue_arn_split = queue.split(':', 5)
273 region = queue_arn_split[3]
274 owner_id = queue_arn_split[4]
275 queue_name = queue_arn_split[5]
276 queue_url = "https://sqs.%s.amazonaws.com/%s/%s" % (
277 region, owner_id, queue_name)
278 else:
279 region = self.manager.config.region
280 owner_id = self.manager.config.account_id
281 queue_name = queue
282 queue_url = "https://sqs.%s.amazonaws.com/%s/%s" % (
283 region, owner_id, queue_name)
284 client = self.manager.session_factory(
285 region=region, assume=self.assume_role).client('sqs')
286 attrs = {
287 'mtype': {
288 'DataType': 'String',
289 'StringValue': self.C7N_DATA_MESSAGE,
290 },
291 }
292 result = client.send_message(
293 QueueUrl=queue_url,
294 MessageBody=self.pack(message),
295 MessageAttributes=attrs)
296 return result['MessageId']
297
[end of c7n/actions/notify.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/c7n/actions/notify.py b/c7n/actions/notify.py
--- a/c7n/actions/notify.py
+++ b/c7n/actions/notify.py
@@ -232,7 +232,7 @@
def send_sns(self, message):
topic = self.data['transport']['topic'].format(**message)
user_attributes = self.data['transport'].get('attributes')
- if topic.startswith('arn:aws:sns'):
+ if topic.startswith('arn:'):
region = region = topic.split(':', 5)[3]
topic_arn = topic
else:
|
{"golden_diff": "diff --git a/c7n/actions/notify.py b/c7n/actions/notify.py\n--- a/c7n/actions/notify.py\n+++ b/c7n/actions/notify.py\n@@ -232,7 +232,7 @@\n def send_sns(self, message):\n topic = self.data['transport']['topic'].format(**message)\n user_attributes = self.data['transport'].get('attributes')\n- if topic.startswith('arn:aws:sns'):\n+ if topic.startswith('arn:'):\n region = region = topic.split(':', 5)[3]\n topic_arn = topic\n else:\n", "issue": "Govcloud SNS prefix incorrect\nHello,\r\n\r\nIt seems that cloud custodian is putting the SNS arn prefix incorrectly for govcloud. This is the error I'm getting:\r\n\r\nbotocore.errorfactory.InvalidParameterException: An error occurred (InvalidParameter) when calling the Publish operation: Invalid parameter: TopicArn Reason: A us-gov-west-1 ARN must begin with arn:aws-us-gov, not arn:aws:sns:us-gov-west-1:xxxxxxxxxxx:snsname\r\n\r\n\r\npolicies:\r\n - name: custodian-tags\r\n resource: ec2\r\n filters:\r\n - or:\r\n - \"tag:CustodianTest\": present\r\n - \"tag:CustodianTest2\": present\r\n actions:\r\n - type: notify\r\n to:\r\n - [email protected]\r\n template: default\r\n transport:\r\n type: sns\r\n region: us-gov-west-1\r\n topic: snsname\r\n\n", "before_files": [{"content": "# Copyright 2017-2018 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport base64\nimport copy\nimport zlib\n\nfrom .core import EventAction\nfrom c7n import utils\nfrom c7n.exceptions import PolicyValidationError\nfrom c7n.resolver import ValuesFrom\n\n\nclass BaseNotify(EventAction):\n\n batch_size = 250\n\n def expand_variables(self, message):\n \"\"\"expand any variables in the action to_from/cc_from fields.\n \"\"\"\n p = copy.deepcopy(self.data)\n if 'to_from' in self.data:\n to_from = self.data['to_from'].copy()\n to_from['url'] = to_from['url'].format(**message)\n if 'expr' in to_from:\n to_from['expr'] = to_from['expr'].format(**message)\n p.setdefault('to', []).extend(ValuesFrom(to_from, self.manager).get_values())\n if 'cc_from' in self.data:\n cc_from = self.data['cc_from'].copy()\n cc_from['url'] = cc_from['url'].format(**message)\n if 'expr' in cc_from:\n cc_from['expr'] = cc_from['expr'].format(**message)\n p.setdefault('cc', []).extend(ValuesFrom(cc_from, self.manager).get_values())\n return p\n\n def pack(self, message):\n dumped = utils.dumps(message)\n compressed = zlib.compress(dumped.encode('utf8'))\n b64encoded = base64.b64encode(compressed)\n return b64encoded.decode('ascii')\n\n\nclass Notify(BaseNotify):\n \"\"\"\n Flexible notifications require quite a bit of implementation support\n on pluggable transports, templates, address resolution, variable\n extraction, batch periods, etc.\n\n For expedience and flexibility then, we instead send the data to\n an sqs queue, for processing. ie. actual communications can be enabled\n with the c7n-mailer tool, found under tools/c7n_mailer.\n\n Attaching additional string message attributes are supported on the SNS\n transport, with the exception of the ``mtype`` attribute, which is a\n reserved attribute used by Cloud Custodian.\n\n Example::\n\n policies:\n - name: ec2-bad-instance-kill\n resource: ec2\n filters:\n - Name: bad-instance\n actions:\n - terminate\n - type: notify\n to:\n - event-user\n - resource-creator\n - email@address\n owner_absent_contact:\n - other_email@address\n # which template for the email should we use\n template: policy-template\n transport:\n type: sqs\n region: us-east-1\n queue: xyz\n\n - name: ec2-notify-with-attributes\n resource: ec2\n filters:\n - Name: bad-instance\n actions:\n - type: notify\n to:\n - event-user\n - resource-creator\n - email@address\n owner_absent_contact:\n - other_email@address\n # which template for the email should we use\n template: policy-template\n transport:\n type: sns\n region: us-east-1\n topic: your-notify-topic\n attributes:\n - attribute_key: attribute_value\n - attribute_key_2: attribute_value_2\n \"\"\"\n\n C7N_DATA_MESSAGE = \"maidmsg/1.0\"\n\n schema_alias = True\n schema = {\n 'type': 'object',\n 'anyOf': [\n {'required': ['type', 'transport', 'to']},\n {'required': ['type', 'transport', 'to_from']}],\n 'properties': {\n 'type': {'enum': ['notify']},\n 'to': {'type': 'array', 'items': {'type': 'string'}},\n 'owner_absent_contact': {'type': 'array', 'items': {'type': 'string'}},\n 'to_from': ValuesFrom.schema,\n 'cc': {'type': 'array', 'items': {'type': 'string'}},\n 'cc_from': ValuesFrom.schema,\n 'cc_manager': {'type': 'boolean'},\n 'from': {'type': 'string'},\n 'subject': {'type': 'string'},\n 'template': {'type': 'string'},\n 'transport': {\n 'oneOf': [\n {'type': 'object',\n 'required': ['type', 'queue'],\n 'properties': {\n 'queue': {'type': 'string'},\n 'type': {'enum': ['sqs']}}},\n {'type': 'object',\n 'required': ['type', 'topic'],\n 'properties': {\n 'topic': {'type': 'string'},\n 'type': {'enum': ['sns']},\n 'attributes': {'type': 'object'},\n }}]\n },\n 'assume_role': {'type': 'boolean'}\n }\n }\n\n def __init__(self, data=None, manager=None, log_dir=None):\n super(Notify, self).__init__(data, manager, log_dir)\n self.assume_role = data.get('assume_role', True)\n\n def validate(self):\n if self.data.get('transport', {}).get('type') == 'sns' and \\\n self.data.get('transport').get('attributes') and \\\n 'mtype' in self.data.get('transport').get('attributes').keys():\n raise PolicyValidationError(\n \"attribute: mtype is a reserved attribute for sns transport\")\n return self\n\n def get_permissions(self):\n if self.data.get('transport', {}).get('type') == 'sns':\n return ('sns:Publish',)\n if self.data.get('transport', {'type': 'sqs'}).get('type') == 'sqs':\n return ('sqs:SendMessage',)\n return ()\n\n def process(self, resources, event=None):\n alias = utils.get_account_alias_from_sts(\n utils.local_session(self.manager.session_factory))\n message = {\n 'event': event,\n 'account_id': self.manager.config.account_id,\n 'account': alias,\n 'region': self.manager.config.region,\n 'policy': self.manager.data}\n message['action'] = self.expand_variables(message)\n\n for batch in utils.chunks(resources, self.batch_size):\n message['resources'] = self.prepare_resources(batch)\n receipt = self.send_data_message(message)\n self.log.info(\"sent message:%s policy:%s template:%s count:%s\" % (\n receipt, self.manager.data['name'],\n self.data.get('template', 'default'), len(batch)))\n\n def prepare_resources(self, resources):\n \"\"\"Resources preparation for transport.\n\n If we have sensitive or overly large resource metadata we want to\n remove or additional serialization we need to perform, this\n provides a mechanism.\n\n TODO: consider alternative implementations, at min look at adding\n provider as additional discriminator to resource type. One alternative\n would be dynamically adjusting buffer size based on underlying\n transport.\n \"\"\"\n handler = getattr(self, \"prepare_%s\" % (\n self.manager.type.replace('-', '_')),\n None)\n if handler is None:\n return resources\n return handler(resources)\n\n def prepare_launch_config(self, resources):\n for r in resources:\n r.pop('UserData', None)\n return resources\n\n def prepare_asg(self, resources):\n for r in resources:\n if 'c7n:user-data' in r:\n r.pop('c7n:user-data', None)\n return resources\n\n def prepare_ec2(self, resources):\n for r in resources:\n if 'c7n:user-data' in r:\n r.pop('c7n:user-data')\n return resources\n\n def send_data_message(self, message):\n if self.data['transport']['type'] == 'sqs':\n return self.send_sqs(message)\n elif self.data['transport']['type'] == 'sns':\n return self.send_sns(message)\n\n def send_sns(self, message):\n topic = self.data['transport']['topic'].format(**message)\n user_attributes = self.data['transport'].get('attributes')\n if topic.startswith('arn:aws:sns'):\n region = region = topic.split(':', 5)[3]\n topic_arn = topic\n else:\n region = message['region']\n topic_arn = \"arn:aws:sns:%s:%s:%s\" % (\n message['region'], message['account_id'], topic)\n client = self.manager.session_factory(\n region=region, assume=self.assume_role).client('sns')\n attrs = {\n 'mtype': {\n 'DataType': 'String',\n 'StringValue': self.C7N_DATA_MESSAGE,\n },\n }\n if user_attributes:\n for k, v in user_attributes.items():\n if k != 'mtype':\n attrs[k] = {'DataType': 'String', 'StringValue': v}\n client.publish(\n TopicArn=topic_arn,\n Message=self.pack(message),\n MessageAttributes=attrs\n )\n\n def send_sqs(self, message):\n queue = self.data['transport']['queue'].format(**message)\n if queue.startswith('https://queue.amazonaws.com'):\n region = 'us-east-1'\n queue_url = queue\n elif 'queue.amazonaws.com' in queue:\n region = queue[len('https://'):].split('.', 1)[0]\n queue_url = queue\n elif queue.startswith('https://sqs.'):\n region = queue.split('.', 2)[1]\n queue_url = queue\n elif queue.startswith('arn:aws:sqs'):\n queue_arn_split = queue.split(':', 5)\n region = queue_arn_split[3]\n owner_id = queue_arn_split[4]\n queue_name = queue_arn_split[5]\n queue_url = \"https://sqs.%s.amazonaws.com/%s/%s\" % (\n region, owner_id, queue_name)\n else:\n region = self.manager.config.region\n owner_id = self.manager.config.account_id\n queue_name = queue\n queue_url = \"https://sqs.%s.amazonaws.com/%s/%s\" % (\n region, owner_id, queue_name)\n client = self.manager.session_factory(\n region=region, assume=self.assume_role).client('sqs')\n attrs = {\n 'mtype': {\n 'DataType': 'String',\n 'StringValue': self.C7N_DATA_MESSAGE,\n },\n }\n result = client.send_message(\n QueueUrl=queue_url,\n MessageBody=self.pack(message),\n MessageAttributes=attrs)\n return result['MessageId']\n", "path": "c7n/actions/notify.py"}]}
| 3,930 | 134 |
gh_patches_debug_9489
|
rasdani/github-patches
|
git_diff
|
StackStorm__st2-4723
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Infinity values in log messages cause issues when parsing JSON
##### SUMMARY
When writing out structured log data using the `GelfFormatter`, periodically the field `_content_length` is output with a value of: `"_content_length": Infinity`. This is invalid JSON and causes an error when parsing from external logging tools such as FluentD
##### ISSUE TYPE
- Bug Report
##### STACKSTORM VERSION
```shell
st2 3.0.1, on Python 2.7.5
```
##### OS / ENVIRONMENT / INSTALL METHOD
```
OS = Red Hat 7.6
Install = puppet-st2
```
##### STEPS TO REPRODUCE
Configure GELF logging as described here: https://stackstorm.com/2017/08/22/stackstorm-centralized-logging-graylog/
Monitor the `/var/log/st2/st2stream.gelf.log` and periodically lines will be written out with `"_content_length": Infinity"`
```shell
st2stream.gelf.log:{"_path": "/v1/stream", "version": "1.1", "level": 6, "timestamp": 1561477579, "_content_length": Infinity, "_python": {"name": "st2.st2common.middleware.logging", "process": 81491, "module": "logging", "funcName": "__call__", "processName": "MainProcess", "lineno": 118, "filename": "logging.py"}, "_request_id": "2e85dac9-1cb2-46d0-969a-2c93efba10fa", "timestamp_f": 1561477579.465585, "host": "stackstorm.domain.tld", "full_message": "2e85dac9-1cb2-46d0-969a-2c93efba10fa - 200 inf 15.51ms", "_remote_addr": "127.0.0.1", "_runtime": 15.51, "_status": 200, "_method": "GET", "short_message": "2e85dac9-1cb2-46d0-969a-2c93efba10fa - 200 inf 15.51ms"}
st2stream.gelf.log:{"_path": "/v1/stream", "version": "1.1", "level": 6, "timestamp": 1561479377, "_content_length": Infinity, "_python": {"name": "st2.st2common.middleware.logging", "process": 81491, "module": "logging", "funcName": "__call__", "processName": "MainProcess", "lineno": 118, "filename": "logging.py"}, "_request_id": "46199a0b-c917-4221-b826-4380e919fd5c", "timestamp_f": 1561479377.730839, "host": "stackstorm.domain.tld", "full_message": "46199a0b-c917-4221-b826-4380e919fd5c - 200 inf 12.296ms", "_remote_addr": "127.0.0.1", "_runtime": 12.296, "_status": 200, "_method": "GET", "short_message": "46199a0b-c917-4221-b826-4380e919fd5c - 200 inf 12.296ms"}
st2stream.gelf.log:{"_path": "/v1/stream", "version": "1.1", "level": 6, "timestamp": 1561481182, "_content_length": Infinity, "_python": {"name": "st2.st2common.middleware.logging", "process": 81491, "module": "logging", "funcName": "__call__", "processName": "MainProcess", "lineno": 118, "filename": "logging.py"}, "_request_id": "60bdfd46-c49a-481e-a591-dc6a5ad7eeff", "timestamp_f": 1561481182.687065, "host": "stackstorm.domain.tld", "full_message": "60bdfd46-c49a-481e-a591-dc6a5ad7eeff - 200 inf 14.903ms", "_remote_addr": "127.0.0.1", "_runtime": 14.903, "_status": 200, "_method": "GET", "short_message": "60bdfd46-c49a-481e-a591-dc6a5ad7eeff - 200 inf 14.903ms"}
```
##### EXPECTED RESULTS
Valid JSON be written to these files so that it can be parsed from an external tool
##### ANALYSIS
The only place in the code where i can find `content_length` being set to Infinity is here: https://github.com/StackStorm/st2/blob/master/st2common/st2common/middleware/logging.py#L102
</issue>
<code>
[start of st2common/st2common/middleware/logging.py]
1 # Copyright 2019 Extreme Networks, Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from __future__ import absolute_import
16
17 import time
18 import types
19 import itertools
20
21 from oslo_config import cfg
22
23 from st2common.constants.api import REQUEST_ID_HEADER
24 from st2common.constants.auth import QUERY_PARAM_ATTRIBUTE_NAME
25 from st2common.constants.auth import QUERY_PARAM_API_KEY_ATTRIBUTE_NAME
26 from st2common.constants.secrets import MASKED_ATTRIBUTE_VALUE
27 from st2common.constants.secrets import MASKED_ATTRIBUTES_BLACKLIST
28 from st2common import log as logging
29 from st2common.router import Request, NotFoundException
30
31 LOG = logging.getLogger(__name__)
32
33 SECRET_QUERY_PARAMS = [
34 QUERY_PARAM_ATTRIBUTE_NAME,
35 QUERY_PARAM_API_KEY_ATTRIBUTE_NAME
36 ] + MASKED_ATTRIBUTES_BLACKLIST
37
38 try:
39 clock = time.perf_counter
40 except AttributeError:
41 clock = time.time
42
43
44 class LoggingMiddleware(object):
45 """
46 Logs all incoming requests and outgoing responses
47 """
48
49 def __init__(self, app, router):
50 self.app = app
51 self.router = router
52
53 def __call__(self, environ, start_response):
54 start_time = clock()
55 status_code = []
56 content_length = []
57
58 request = Request(environ)
59
60 query_params = request.GET.dict_of_lists()
61
62 # Mask secret / sensitive query params
63 secret_query_params = SECRET_QUERY_PARAMS + cfg.CONF.log.mask_secrets_blacklist
64 for param_name in secret_query_params:
65 if param_name in query_params:
66 query_params[param_name] = MASKED_ATTRIBUTE_VALUE
67
68 # Log the incoming request
69 values = {
70 'method': request.method,
71 'path': request.path,
72 'remote_addr': request.remote_addr,
73 'query': query_params,
74 'request_id': request.headers.get(REQUEST_ID_HEADER, None)
75 }
76
77 LOG.info('%(request_id)s - %(method)s %(path)s with query=%(query)s' %
78 values, extra=values)
79
80 def custom_start_response(status, headers, exc_info=None):
81 status_code.append(int(status.split(' ')[0]))
82
83 for name, value in headers:
84 if name.lower() == 'content-length':
85 content_length.append(int(value))
86 break
87
88 return start_response(status, headers, exc_info)
89
90 retval = self.app(environ, custom_start_response)
91
92 try:
93 endpoint, path_vars = self.router.match(request)
94 except NotFoundException:
95 endpoint = {}
96
97 log_result = endpoint.get('x-log-result', True)
98
99 if isinstance(retval, (types.GeneratorType, itertools.chain)):
100 # Note: We don't log the result when return value is a generator, because this would
101 # result in calling str() on the generator and as such, exhausting it
102 content_length = [float('inf')]
103 log_result = False
104
105 # Log the response
106 values = {
107 'method': request.method,
108 'path': request.path,
109 'remote_addr': request.remote_addr,
110 'status': status_code[0],
111 'runtime': float("{0:.3f}".format((clock() - start_time) * 10**3)),
112 'content_length': content_length[0] if content_length else len(b''.join(retval)),
113 'request_id': request.headers.get(REQUEST_ID_HEADER, None)
114 }
115
116 log_msg = '%(request_id)s - %(status)s %(content_length)s %(runtime)sms' % (values)
117 LOG.info(log_msg, extra=values)
118
119 if log_result:
120 values['result'] = retval[0]
121 log_msg = ('%(request_id)s - %(status)s %(content_length)s %(runtime)sms\n%(result)s' %
122 (values))
123 LOG.debug(log_msg, extra=values)
124
125 return retval
126
[end of st2common/st2common/middleware/logging.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/st2common/st2common/middleware/logging.py b/st2common/st2common/middleware/logging.py
--- a/st2common/st2common/middleware/logging.py
+++ b/st2common/st2common/middleware/logging.py
@@ -99,7 +99,7 @@
if isinstance(retval, (types.GeneratorType, itertools.chain)):
# Note: We don't log the result when return value is a generator, because this would
# result in calling str() on the generator and as such, exhausting it
- content_length = [float('inf')]
+ content_length = [0]
log_result = False
# Log the response
|
{"golden_diff": "diff --git a/st2common/st2common/middleware/logging.py b/st2common/st2common/middleware/logging.py\n--- a/st2common/st2common/middleware/logging.py\n+++ b/st2common/st2common/middleware/logging.py\n@@ -99,7 +99,7 @@\n if isinstance(retval, (types.GeneratorType, itertools.chain)):\n # Note: We don't log the result when return value is a generator, because this would\n # result in calling str() on the generator and as such, exhausting it\n- content_length = [float('inf')]\n+ content_length = [0]\n log_result = False\n \n # Log the response\n", "issue": "Infinity values in log messages cause issues when parsing JSON\n##### SUMMARY\r\n\r\nWhen writing out structured log data using the `GelfFormatter`, periodically the field `_content_length` is output with a value of: `\"_content_length\": Infinity`. This is invalid JSON and causes an error when parsing from external logging tools such as FluentD\r\n\r\n##### ISSUE TYPE\r\n - Bug Report\r\n\r\n##### STACKSTORM VERSION\r\n```shell\r\nst2 3.0.1, on Python 2.7.5\r\n```\r\n\r\n\r\n##### OS / ENVIRONMENT / INSTALL METHOD\r\n```\r\nOS = Red Hat 7.6\r\nInstall = puppet-st2\r\n```\r\n\r\n##### STEPS TO REPRODUCE\r\nConfigure GELF logging as described here: https://stackstorm.com/2017/08/22/stackstorm-centralized-logging-graylog/\r\n\r\nMonitor the `/var/log/st2/st2stream.gelf.log` and periodically lines will be written out with `\"_content_length\": Infinity\"`\r\n\r\n```shell\r\nst2stream.gelf.log:{\"_path\": \"/v1/stream\", \"version\": \"1.1\", \"level\": 6, \"timestamp\": 1561477579, \"_content_length\": Infinity, \"_python\": {\"name\": \"st2.st2common.middleware.logging\", \"process\": 81491, \"module\": \"logging\", \"funcName\": \"__call__\", \"processName\": \"MainProcess\", \"lineno\": 118, \"filename\": \"logging.py\"}, \"_request_id\": \"2e85dac9-1cb2-46d0-969a-2c93efba10fa\", \"timestamp_f\": 1561477579.465585, \"host\": \"stackstorm.domain.tld\", \"full_message\": \"2e85dac9-1cb2-46d0-969a-2c93efba10fa - 200 inf 15.51ms\", \"_remote_addr\": \"127.0.0.1\", \"_runtime\": 15.51, \"_status\": 200, \"_method\": \"GET\", \"short_message\": \"2e85dac9-1cb2-46d0-969a-2c93efba10fa - 200 inf 15.51ms\"}\r\nst2stream.gelf.log:{\"_path\": \"/v1/stream\", \"version\": \"1.1\", \"level\": 6, \"timestamp\": 1561479377, \"_content_length\": Infinity, \"_python\": {\"name\": \"st2.st2common.middleware.logging\", \"process\": 81491, \"module\": \"logging\", \"funcName\": \"__call__\", \"processName\": \"MainProcess\", \"lineno\": 118, \"filename\": \"logging.py\"}, \"_request_id\": \"46199a0b-c917-4221-b826-4380e919fd5c\", \"timestamp_f\": 1561479377.730839, \"host\": \"stackstorm.domain.tld\", \"full_message\": \"46199a0b-c917-4221-b826-4380e919fd5c - 200 inf 12.296ms\", \"_remote_addr\": \"127.0.0.1\", \"_runtime\": 12.296, \"_status\": 200, \"_method\": \"GET\", \"short_message\": \"46199a0b-c917-4221-b826-4380e919fd5c - 200 inf 12.296ms\"}\r\nst2stream.gelf.log:{\"_path\": \"/v1/stream\", \"version\": \"1.1\", \"level\": 6, \"timestamp\": 1561481182, \"_content_length\": Infinity, \"_python\": {\"name\": \"st2.st2common.middleware.logging\", \"process\": 81491, \"module\": \"logging\", \"funcName\": \"__call__\", \"processName\": \"MainProcess\", \"lineno\": 118, \"filename\": \"logging.py\"}, \"_request_id\": \"60bdfd46-c49a-481e-a591-dc6a5ad7eeff\", \"timestamp_f\": 1561481182.687065, \"host\": \"stackstorm.domain.tld\", \"full_message\": \"60bdfd46-c49a-481e-a591-dc6a5ad7eeff - 200 inf 14.903ms\", \"_remote_addr\": \"127.0.0.1\", \"_runtime\": 14.903, \"_status\": 200, \"_method\": \"GET\", \"short_message\": \"60bdfd46-c49a-481e-a591-dc6a5ad7eeff - 200 inf 14.903ms\"}\r\n```\r\n\r\n##### EXPECTED RESULTS\r\n\r\nValid JSON be written to these files so that it can be parsed from an external tool\r\n\r\n##### ANALYSIS\r\n\r\nThe only place in the code where i can find `content_length` being set to Infinity is here: https://github.com/StackStorm/st2/blob/master/st2common/st2common/middleware/logging.py#L102\r\n\n", "before_files": [{"content": "# Copyright 2019 Extreme Networks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nimport time\nimport types\nimport itertools\n\nfrom oslo_config import cfg\n\nfrom st2common.constants.api import REQUEST_ID_HEADER\nfrom st2common.constants.auth import QUERY_PARAM_ATTRIBUTE_NAME\nfrom st2common.constants.auth import QUERY_PARAM_API_KEY_ATTRIBUTE_NAME\nfrom st2common.constants.secrets import MASKED_ATTRIBUTE_VALUE\nfrom st2common.constants.secrets import MASKED_ATTRIBUTES_BLACKLIST\nfrom st2common import log as logging\nfrom st2common.router import Request, NotFoundException\n\nLOG = logging.getLogger(__name__)\n\nSECRET_QUERY_PARAMS = [\n QUERY_PARAM_ATTRIBUTE_NAME,\n QUERY_PARAM_API_KEY_ATTRIBUTE_NAME\n] + MASKED_ATTRIBUTES_BLACKLIST\n\ntry:\n clock = time.perf_counter\nexcept AttributeError:\n clock = time.time\n\n\nclass LoggingMiddleware(object):\n \"\"\"\n Logs all incoming requests and outgoing responses\n \"\"\"\n\n def __init__(self, app, router):\n self.app = app\n self.router = router\n\n def __call__(self, environ, start_response):\n start_time = clock()\n status_code = []\n content_length = []\n\n request = Request(environ)\n\n query_params = request.GET.dict_of_lists()\n\n # Mask secret / sensitive query params\n secret_query_params = SECRET_QUERY_PARAMS + cfg.CONF.log.mask_secrets_blacklist\n for param_name in secret_query_params:\n if param_name in query_params:\n query_params[param_name] = MASKED_ATTRIBUTE_VALUE\n\n # Log the incoming request\n values = {\n 'method': request.method,\n 'path': request.path,\n 'remote_addr': request.remote_addr,\n 'query': query_params,\n 'request_id': request.headers.get(REQUEST_ID_HEADER, None)\n }\n\n LOG.info('%(request_id)s - %(method)s %(path)s with query=%(query)s' %\n values, extra=values)\n\n def custom_start_response(status, headers, exc_info=None):\n status_code.append(int(status.split(' ')[0]))\n\n for name, value in headers:\n if name.lower() == 'content-length':\n content_length.append(int(value))\n break\n\n return start_response(status, headers, exc_info)\n\n retval = self.app(environ, custom_start_response)\n\n try:\n endpoint, path_vars = self.router.match(request)\n except NotFoundException:\n endpoint = {}\n\n log_result = endpoint.get('x-log-result', True)\n\n if isinstance(retval, (types.GeneratorType, itertools.chain)):\n # Note: We don't log the result when return value is a generator, because this would\n # result in calling str() on the generator and as such, exhausting it\n content_length = [float('inf')]\n log_result = False\n\n # Log the response\n values = {\n 'method': request.method,\n 'path': request.path,\n 'remote_addr': request.remote_addr,\n 'status': status_code[0],\n 'runtime': float(\"{0:.3f}\".format((clock() - start_time) * 10**3)),\n 'content_length': content_length[0] if content_length else len(b''.join(retval)),\n 'request_id': request.headers.get(REQUEST_ID_HEADER, None)\n }\n\n log_msg = '%(request_id)s - %(status)s %(content_length)s %(runtime)sms' % (values)\n LOG.info(log_msg, extra=values)\n\n if log_result:\n values['result'] = retval[0]\n log_msg = ('%(request_id)s - %(status)s %(content_length)s %(runtime)sms\\n%(result)s' %\n (values))\n LOG.debug(log_msg, extra=values)\n\n return retval\n", "path": "st2common/st2common/middleware/logging.py"}]}
| 2,998 | 146 |
gh_patches_debug_17509
|
rasdani/github-patches
|
git_diff
|
bokeh__bokeh-3888
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bokeh 0.11: Enables webgl causes inconsistent update of scatter points
This problem might be related to this issue: https://github.com/bokeh/bokeh/issues/3795
Lets consider this minimal example:
``` python
from bokeh.plotting import figure, show, ColumnDataSource
from bokeh.models import Select
from bokeh.models.widgets import RadioGroup
from bokeh.io import vform
import numpy as np
N = 100
x1 = np.random.normal(0.0, 1.0,N)
x2 = x1 * (1.0 + np.random.normal(0.0, 0.1,N))
y1 = 5.0*x1 + np.random.normal(0, 0.1, N)
y2 = 50.0*x2 + np.random.normal(0, 1, N)
source = ColumnDataSource({'x' : x1, 'y' : y1})
TOOLS = "box_select,box_zoom,lasso_select,help,reset,pan"
p = figure(tools=TOOLS, plot_width=400, plot_height=400, webgl=True)
p.circle(x='x', y='y', source=source)
x_radio_group = RadioGroup(labels=["x1", "x2"], active=0)
y_radio_group = RadioGroup(labels=["y1", "y2"], active=0)
controls = vform(x_radio_group, y_radio_group)
def update(attr, old, new):
source.data["y"] = y1 if y_radio_group.active==0 else y2
source.data["x"] = x1 if x_radio_group.active==0 else x2
# Updating the source in one step does not change this behavior
#data = dict(x=x1 if x_radio_group.active==0 else x2, y=y1 if y_radio_group.active==0 else y2)
#source.data = data
source.on_change('selected', update)
def select_x(value):
update('x-change', None, None)
def select_y(value):
update('y-change', None, None)
x_radio_group.on_click(select_x)
y_radio_group.on_click(select_y)
```
You can reproduce this problem in the following way (using the bokeh server):
- Fire up the bokeh server (e.g. bokeh server file --show)
- Select a subset of the data points (using box select or lasso)
- Switch between y1 and y2 radio buttons
- Notice that even though the y-axis scaled as expected, the scatter points did not!
Now if you repeat this exercise with webgl=False, you should see that both the y-axis and the scatter points are updated as expected.
maps_cities.py data disappears after pan/zoom
ping @almarklein
Other WebGL examples work fine.
Could consider a _very_ short self-contained fix for `0.11`
```
examples/glyphs/maps_cities.py:
data disappears after pan/zoom
```
</issue>
<code>
[start of examples/plotting/file/line_compare.py]
1 """ Compare WebGL with canvas line.
2
3 """
4
5 import numpy as np
6
7 from bokeh.models import Slider, Dropdown, HBox, VBox, CustomJS
8 from bokeh.plotting import Figure, show, output_file
9
10 p1 = Figure(title="Canvas", webgl=False)
11
12 p2 = Figure(title="WebGL", webgl=True)
13
14 ys = 10 # yscale, to increase anisotropy
15
16 for p in (p1, p2):
17
18 t = np.linspace(0, 2 * np.pi, 50)
19 x = np.sin(t) * 10
20 y = np.cos(t) * 10
21 l1 = p.line(x, y * ys, color="#2222aa",
22 line_width=6, line_cap='butt',
23 line_join='round', line_dash=(10, 6, 3, 6, 3, 6))
24
25 t = np.arange(10)
26 t = np.linspace(0, 4 * np.pi, 150)
27 x = t - 5
28 y = (t + 1) * ((t % 2) * 2 - 1)
29 y = np.sin(t) + 5
30 l2 = p.line(x, y * ys, color="#22aa22",
31 line_width=6, line_cap='butt', line_join='round')
32
33 t = np.arange(10)
34 x = t - 5
35 y = 0.3 * (t + 1) * ((t % 2) * 2 - 1) - 6
36 l3 = p.line(x, y * ys, color="#aa2222",
37 line_width=6, line_cap='butt',
38 line_join='round', line_dash=(10, 10))
39 l4 = p.line(y, x * ys, color="#aa2222",
40 line_width=6, line_cap='butt',
41 line_join='round', line_dash=(10, 10))
42
43 def add_callback(widget, prop):
44 lines = [l1, l2, l3, l4]
45 widget.callback = CustomJS(args=dict(widget=widget), code="""
46 for ( var i = 0; i < %s; i++ ) {
47 var g = eval( 'line' + i ).get( 'glyph' );
48 g.set( '%s', widget.get( 'value' ) );
49 window.g = g;
50 }
51 """ % (len(lines), prop))
52 for i, line in enumerate(lines):
53 widget.callback.args['line%i' % i] = line
54
55 def make_slider(prop, start, end, value):
56 slider = Slider(title=prop, start=start, end=end, value=value)
57 add_callback(slider, prop)
58 return slider
59
60 def make_dropdown(prop, menu):
61 dropdown = Dropdown(label=prop, menu=menu)
62 add_callback(dropdown, prop)
63 return dropdown
64
65 sliders = [
66 make_slider('line_width', start=0.2, end=16, value=5),
67 make_slider('line_dash_offset', start=0, end=100, value=1),
68 make_dropdown('line_cap', [("butt", "butt"), ("round", "round"), ("square", "square")]),
69 make_dropdown('line_join', [("miter", "miter"), ("round", "round"), ("bevel", "bevel")]),
70 ]
71
72 sliders = VBox(*sliders)
73
74 output_file("line_compare.html", title="line_compare.py example")
75
76 show(HBox(sliders, p1, p2))
77
[end of examples/plotting/file/line_compare.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/examples/plotting/file/line_compare.py b/examples/plotting/file/line_compare.py
--- a/examples/plotting/file/line_compare.py
+++ b/examples/plotting/file/line_compare.py
@@ -13,6 +13,7 @@
ys = 10 # yscale, to increase anisotropy
+lines = []
for p in (p1, p2):
t = np.linspace(0, 2 * np.pi, 50)
@@ -39,9 +40,10 @@
l4 = p.line(y, x * ys, color="#aa2222",
line_width=6, line_cap='butt',
line_join='round', line_dash=(10, 10))
+
+ lines.extend([l1, l2, l3, l4])
def add_callback(widget, prop):
- lines = [l1, l2, l3, l4]
widget.callback = CustomJS(args=dict(widget=widget), code="""
for ( var i = 0; i < %s; i++ ) {
var g = eval( 'line' + i ).get( 'glyph' );
|
{"golden_diff": "diff --git a/examples/plotting/file/line_compare.py b/examples/plotting/file/line_compare.py\n--- a/examples/plotting/file/line_compare.py\n+++ b/examples/plotting/file/line_compare.py\n@@ -13,6 +13,7 @@\n \n ys = 10 # yscale, to increase anisotropy\n \n+lines = []\n for p in (p1, p2):\n \n t = np.linspace(0, 2 * np.pi, 50)\n@@ -39,9 +40,10 @@\n l4 = p.line(y, x * ys, color=\"#aa2222\",\n line_width=6, line_cap='butt',\n line_join='round', line_dash=(10, 10))\n+ \n+ lines.extend([l1, l2, l3, l4])\n \n def add_callback(widget, prop):\n- lines = [l1, l2, l3, l4]\n widget.callback = CustomJS(args=dict(widget=widget), code=\"\"\"\n for ( var i = 0; i < %s; i++ ) {\n var g = eval( 'line' + i ).get( 'glyph' );\n", "issue": "Bokeh 0.11: Enables webgl causes inconsistent update of scatter points\nThis problem might be related to this issue: https://github.com/bokeh/bokeh/issues/3795\n\nLets consider this minimal example:\n\n``` python\nfrom bokeh.plotting import figure, show, ColumnDataSource\nfrom bokeh.models import Select\nfrom bokeh.models.widgets import RadioGroup\nfrom bokeh.io import vform\nimport numpy as np\n\nN = 100\nx1 = np.random.normal(0.0, 1.0,N)\nx2 = x1 * (1.0 + np.random.normal(0.0, 0.1,N))\ny1 = 5.0*x1 + np.random.normal(0, 0.1, N)\ny2 = 50.0*x2 + np.random.normal(0, 1, N)\n\nsource = ColumnDataSource({'x' : x1, 'y' : y1})\n\nTOOLS = \"box_select,box_zoom,lasso_select,help,reset,pan\"\np = figure(tools=TOOLS, plot_width=400, plot_height=400, webgl=True)\np.circle(x='x', y='y', source=source)\n\nx_radio_group = RadioGroup(labels=[\"x1\", \"x2\"], active=0)\ny_radio_group = RadioGroup(labels=[\"y1\", \"y2\"], active=0)\ncontrols = vform(x_radio_group, y_radio_group)\n\ndef update(attr, old, new):\n source.data[\"y\"] = y1 if y_radio_group.active==0 else y2\n source.data[\"x\"] = x1 if x_radio_group.active==0 else x2\n\n # Updating the source in one step does not change this behavior\n #data = dict(x=x1 if x_radio_group.active==0 else x2, y=y1 if y_radio_group.active==0 else y2)\n #source.data = data\n\nsource.on_change('selected', update)\n\ndef select_x(value):\n update('x-change', None, None)\n\ndef select_y(value):\n update('y-change', None, None)\n\nx_radio_group.on_click(select_x)\ny_radio_group.on_click(select_y)\n```\n\nYou can reproduce this problem in the following way (using the bokeh server):\n- Fire up the bokeh server (e.g. bokeh server file --show)\n- Select a subset of the data points (using box select or lasso)\n- Switch between y1 and y2 radio buttons\n- Notice that even though the y-axis scaled as expected, the scatter points did not!\n\nNow if you repeat this exercise with webgl=False, you should see that both the y-axis and the scatter points are updated as expected.\n\nmaps_cities.py data disappears after pan/zoom\nping @almarklein \n\nOther WebGL examples work fine. \n\nCould consider a _very_ short self-contained fix for `0.11`\n\n```\nexamples/glyphs/maps_cities.py:\n data disappears after pan/zoom\n```\n\n", "before_files": [{"content": "\"\"\" Compare WebGL with canvas line.\n\n\"\"\"\n\nimport numpy as np\n\nfrom bokeh.models import Slider, Dropdown, HBox, VBox, CustomJS\nfrom bokeh.plotting import Figure, show, output_file\n\np1 = Figure(title=\"Canvas\", webgl=False)\n\np2 = Figure(title=\"WebGL\", webgl=True)\n\nys = 10 # yscale, to increase anisotropy\n\nfor p in (p1, p2):\n\n t = np.linspace(0, 2 * np.pi, 50)\n x = np.sin(t) * 10\n y = np.cos(t) * 10\n l1 = p.line(x, y * ys, color=\"#2222aa\",\n line_width=6, line_cap='butt',\n line_join='round', line_dash=(10, 6, 3, 6, 3, 6))\n\n t = np.arange(10)\n t = np.linspace(0, 4 * np.pi, 150)\n x = t - 5\n y = (t + 1) * ((t % 2) * 2 - 1)\n y = np.sin(t) + 5\n l2 = p.line(x, y * ys, color=\"#22aa22\",\n line_width=6, line_cap='butt', line_join='round')\n\n t = np.arange(10)\n x = t - 5\n y = 0.3 * (t + 1) * ((t % 2) * 2 - 1) - 6\n l3 = p.line(x, y * ys, color=\"#aa2222\",\n line_width=6, line_cap='butt',\n line_join='round', line_dash=(10, 10))\n l4 = p.line(y, x * ys, color=\"#aa2222\",\n line_width=6, line_cap='butt',\n line_join='round', line_dash=(10, 10))\n\ndef add_callback(widget, prop):\n lines = [l1, l2, l3, l4]\n widget.callback = CustomJS(args=dict(widget=widget), code=\"\"\"\n for ( var i = 0; i < %s; i++ ) {\n var g = eval( 'line' + i ).get( 'glyph' );\n g.set( '%s', widget.get( 'value' ) );\n window.g = g;\n }\n \"\"\" % (len(lines), prop))\n for i, line in enumerate(lines):\n widget.callback.args['line%i' % i] = line\n\ndef make_slider(prop, start, end, value):\n slider = Slider(title=prop, start=start, end=end, value=value)\n add_callback(slider, prop)\n return slider\n\ndef make_dropdown(prop, menu):\n dropdown = Dropdown(label=prop, menu=menu)\n add_callback(dropdown, prop)\n return dropdown\n\nsliders = [\n make_slider('line_width', start=0.2, end=16, value=5),\n make_slider('line_dash_offset', start=0, end=100, value=1),\n make_dropdown('line_cap', [(\"butt\", \"butt\"), (\"round\", \"round\"), (\"square\", \"square\")]),\n make_dropdown('line_join', [(\"miter\", \"miter\"), (\"round\", \"round\"), (\"bevel\", \"bevel\")]),\n]\n\nsliders = VBox(*sliders)\n\noutput_file(\"line_compare.html\", title=\"line_compare.py example\")\n\nshow(HBox(sliders, p1, p2))\n", "path": "examples/plotting/file/line_compare.py"}]}
| 2,128 | 266 |
gh_patches_debug_29833
|
rasdani/github-patches
|
git_diff
|
Project-MONAI__MONAI-5067
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ThreadDataLoader dataloading order is related to numworker, which makes it difficult to reproduce
**Describe the bug**
During our experiments, we found that the ThreadDataLoader loading order made a difference when using the shuffling strategy related to numworkers
**To Reproduce**
```python
import torch
from monai.utils import set_determinism
from monai.data import ThreadDataLoader
import monai
monai.config.print_debug_info()
set_determinism(0)
loader = ThreadDataLoader(torch.arange(10),batch_size=2,buffer_size=10,shuffle=True,num_workers=0)
for epoch in range(0,2):
print("epoch:", epoch)
for data in loader:
print(data)
print("------")
loader = ThreadDataLoader(torch.arange(10),batch_size=2,buffer_size=10,shuffle=True,num_workers=10)
for epoch in range(0,2):
print("epoch:", epoch)
for data in loader:
print(data)
print("------")
```
**Expected behavior**
produces the same output with different num_woker settings
**Screenshots**

**Environment**
```python
MONAI version: 0.8.1
Numpy version: 1.21.4
Pytorch version: 1.8.1+cu111
MONAI flags: HAS_EXT = False, USE_COMPILED = False
MONAI rev id: 71ff399a3ea07aef667b23653620a290364095b1
Optional dependencies:
Pytorch Ignite version: 0.4.8
Nibabel version: 3.2.1
scikit-image version: 0.18.3
Pillow version: 8.4.0
Tensorboard version: 2.8.0
gdown version: NOT INSTALLED or UNKNOWN VERSION.
TorchVision version: 0.9.1+cu111
tqdm version: 4.62.3
lmdb version: 1.3.0
psutil version: 5.9.0
pandas version: 1.3.4
einops version: 0.4.0
transformers version: NOT INSTALLED or UNKNOWN VERSION.
mlflow version: NOT INSTALLED or UNKNOWN VERSION.
```
Ensuring you use the relevant python executable, please paste the output of:
</issue>
<code>
[start of monai/data/dataloader.py]
1 # Copyright (c) MONAI Consortium
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at
5 # http://www.apache.org/licenses/LICENSE-2.0
6 # Unless required by applicable law or agreed to in writing, software
7 # distributed under the License is distributed on an "AS IS" BASIS,
8 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9 # See the License for the specific language governing permissions and
10 # limitations under the License.
11
12 import torch
13 from torch.utils.data import DataLoader as _TorchDataLoader
14 from torch.utils.data import Dataset
15
16 from monai.data.utils import list_data_collate, set_rnd, worker_init_fn
17
18 __all__ = ["DataLoader"]
19
20
21 class DataLoader(_TorchDataLoader):
22 """
23 Provides an iterable over the given `dataset`. It inherits the PyTorch
24 DataLoader and adds enhanced `collate_fn` and `worker_fn` by default.
25
26 Although this class could be configured to be the same as
27 `torch.utils.data.DataLoader`, its default configuration is
28 recommended, mainly for the following extra features:
29
30 - It handles MONAI randomizable objects with appropriate random state
31 managements for deterministic behaviour.
32 - It is aware of the patch-based transform (such as
33 :py:class:`monai.transforms.RandSpatialCropSamplesDict`) samples for
34 preprocessing with enhanced data collating behaviour.
35 See: :py:class:`monai.transforms.Compose`.
36
37 For more details about :py:class:`torch.utils.data.DataLoader`, please see:
38 https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader.
39
40 For example, to construct a randomized dataset and iterate with the data loader:
41
42 .. code-block:: python
43
44 import torch
45
46 from monai.data import DataLoader
47 from monai.transforms import Randomizable
48
49
50 class RandomDataset(torch.utils.data.Dataset, Randomizable):
51 def __getitem__(self, index):
52 return self.R.randint(0, 1000, (1,))
53
54 def __len__(self):
55 return 16
56
57
58 dataset = RandomDataset()
59 dataloader = DataLoader(dataset, batch_size=2, num_workers=4)
60 for epoch in range(2):
61 for i, batch in enumerate(dataloader):
62 print(epoch, i, batch.data.numpy().flatten().tolist())
63
64 Args:
65 dataset: dataset from which to load the data.
66 num_workers: how many subprocesses to use for data
67 loading. ``0`` means that the data will be loaded in the main process.
68 (default: ``0``)
69 kwargs: other parameters for PyTorch DataLoader.
70 """
71
72 def __init__(self, dataset: Dataset, num_workers: int = 0, **kwargs) -> None:
73 if num_workers == 0:
74 # when num_workers > 0, random states are determined by worker_init_fn
75 # this is to make the behavior consistent when num_workers == 0
76 # torch.int64 doesn't work well on some versions of windows
77 _seed = torch.empty((), dtype=torch.int32).random_(generator=None).item()
78 set_rnd(dataset, int(_seed))
79 if "collate_fn" not in kwargs:
80 kwargs.update({"collate_fn": list_data_collate})
81 if "worker_init_fn" not in kwargs:
82 kwargs.update({"worker_init_fn": worker_init_fn})
83
84 super().__init__(dataset=dataset, num_workers=num_workers, **kwargs)
85
[end of monai/data/dataloader.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/monai/data/dataloader.py b/monai/data/dataloader.py
--- a/monai/data/dataloader.py
+++ b/monai/data/dataloader.py
@@ -66,6 +66,8 @@
num_workers: how many subprocesses to use for data
loading. ``0`` means that the data will be loaded in the main process.
(default: ``0``)
+ collate_fn: default to :py:func:`monai.data.utils.list_data_collate`.
+ worker_init_fn: default to :py:func:`monai.data.utils.worker_init_fn`.
kwargs: other parameters for PyTorch DataLoader.
"""
@@ -74,11 +76,14 @@
# when num_workers > 0, random states are determined by worker_init_fn
# this is to make the behavior consistent when num_workers == 0
# torch.int64 doesn't work well on some versions of windows
- _seed = torch.empty((), dtype=torch.int32).random_(generator=None).item()
+ _g = torch.random.default_generator if kwargs.get("generator", None) is None else kwargs["generator"]
+ init_seed = _g.initial_seed()
+ _seed = torch.empty((), dtype=torch.int64).random_(generator=_g).item()
set_rnd(dataset, int(_seed))
+ _g.manual_seed(init_seed)
if "collate_fn" not in kwargs:
- kwargs.update({"collate_fn": list_data_collate})
+ kwargs["collate_fn"] = list_data_collate
if "worker_init_fn" not in kwargs:
- kwargs.update({"worker_init_fn": worker_init_fn})
+ kwargs["worker_init_fn"] = worker_init_fn
super().__init__(dataset=dataset, num_workers=num_workers, **kwargs)
|
{"golden_diff": "diff --git a/monai/data/dataloader.py b/monai/data/dataloader.py\n--- a/monai/data/dataloader.py\n+++ b/monai/data/dataloader.py\n@@ -66,6 +66,8 @@\n num_workers: how many subprocesses to use for data\n loading. ``0`` means that the data will be loaded in the main process.\n (default: ``0``)\n+ collate_fn: default to :py:func:`monai.data.utils.list_data_collate`.\n+ worker_init_fn: default to :py:func:`monai.data.utils.worker_init_fn`.\n kwargs: other parameters for PyTorch DataLoader.\n \"\"\"\n \n@@ -74,11 +76,14 @@\n # when num_workers > 0, random states are determined by worker_init_fn\n # this is to make the behavior consistent when num_workers == 0\n # torch.int64 doesn't work well on some versions of windows\n- _seed = torch.empty((), dtype=torch.int32).random_(generator=None).item()\n+ _g = torch.random.default_generator if kwargs.get(\"generator\", None) is None else kwargs[\"generator\"]\n+ init_seed = _g.initial_seed()\n+ _seed = torch.empty((), dtype=torch.int64).random_(generator=_g).item()\n set_rnd(dataset, int(_seed))\n+ _g.manual_seed(init_seed)\n if \"collate_fn\" not in kwargs:\n- kwargs.update({\"collate_fn\": list_data_collate})\n+ kwargs[\"collate_fn\"] = list_data_collate\n if \"worker_init_fn\" not in kwargs:\n- kwargs.update({\"worker_init_fn\": worker_init_fn})\n+ kwargs[\"worker_init_fn\"] = worker_init_fn\n \n super().__init__(dataset=dataset, num_workers=num_workers, **kwargs)\n", "issue": "ThreadDataLoader dataloading order is related to numworker, which makes it difficult to reproduce\n**Describe the bug**\r\nDuring our experiments, we found that the ThreadDataLoader loading order made a difference when using the shuffling strategy related to numworkers\r\n\r\n**To Reproduce**\r\n```python\r\nimport torch\r\nfrom monai.utils import set_determinism\r\nfrom monai.data import ThreadDataLoader\r\nimport monai\r\nmonai.config.print_debug_info()\r\nset_determinism(0)\r\nloader = ThreadDataLoader(torch.arange(10),batch_size=2,buffer_size=10,shuffle=True,num_workers=0)\r\nfor epoch in range(0,2):\r\n print(\"epoch:\", epoch)\r\n for data in loader:\r\n print(data)\r\n print(\"------\")\r\nloader = ThreadDataLoader(torch.arange(10),batch_size=2,buffer_size=10,shuffle=True,num_workers=10)\r\nfor epoch in range(0,2):\r\n print(\"epoch:\", epoch)\r\n for data in loader:\r\n print(data)\r\n print(\"------\")\r\n```\r\n\r\n**Expected behavior**\r\nproduces the same output with different num_woker settings\r\n\r\n**Screenshots**\r\n\r\n\r\n\r\n**Environment**\r\n```python\r\nMONAI version: 0.8.1\r\nNumpy version: 1.21.4\r\nPytorch version: 1.8.1+cu111\r\nMONAI flags: HAS_EXT = False, USE_COMPILED = False\r\nMONAI rev id: 71ff399a3ea07aef667b23653620a290364095b1\r\n\r\nOptional dependencies:\r\nPytorch Ignite version: 0.4.8\r\nNibabel version: 3.2.1\r\nscikit-image version: 0.18.3\r\nPillow version: 8.4.0\r\nTensorboard version: 2.8.0\r\ngdown version: NOT INSTALLED or UNKNOWN VERSION.\r\nTorchVision version: 0.9.1+cu111\r\ntqdm version: 4.62.3\r\nlmdb version: 1.3.0\r\npsutil version: 5.9.0\r\npandas version: 1.3.4\r\neinops version: 0.4.0\r\ntransformers version: NOT INSTALLED or UNKNOWN VERSION.\r\nmlflow version: NOT INSTALLED or UNKNOWN VERSION.\r\n```\r\nEnsuring you use the relevant python executable, please paste the output of:\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright (c) MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\nfrom torch.utils.data import DataLoader as _TorchDataLoader\nfrom torch.utils.data import Dataset\n\nfrom monai.data.utils import list_data_collate, set_rnd, worker_init_fn\n\n__all__ = [\"DataLoader\"]\n\n\nclass DataLoader(_TorchDataLoader):\n \"\"\"\n Provides an iterable over the given `dataset`. It inherits the PyTorch\n DataLoader and adds enhanced `collate_fn` and `worker_fn` by default.\n\n Although this class could be configured to be the same as\n `torch.utils.data.DataLoader`, its default configuration is\n recommended, mainly for the following extra features:\n\n - It handles MONAI randomizable objects with appropriate random state\n managements for deterministic behaviour.\n - It is aware of the patch-based transform (such as\n :py:class:`monai.transforms.RandSpatialCropSamplesDict`) samples for\n preprocessing with enhanced data collating behaviour.\n See: :py:class:`monai.transforms.Compose`.\n\n For more details about :py:class:`torch.utils.data.DataLoader`, please see:\n https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader.\n\n For example, to construct a randomized dataset and iterate with the data loader:\n\n .. code-block:: python\n\n import torch\n\n from monai.data import DataLoader\n from monai.transforms import Randomizable\n\n\n class RandomDataset(torch.utils.data.Dataset, Randomizable):\n def __getitem__(self, index):\n return self.R.randint(0, 1000, (1,))\n\n def __len__(self):\n return 16\n\n\n dataset = RandomDataset()\n dataloader = DataLoader(dataset, batch_size=2, num_workers=4)\n for epoch in range(2):\n for i, batch in enumerate(dataloader):\n print(epoch, i, batch.data.numpy().flatten().tolist())\n\n Args:\n dataset: dataset from which to load the data.\n num_workers: how many subprocesses to use for data\n loading. ``0`` means that the data will be loaded in the main process.\n (default: ``0``)\n kwargs: other parameters for PyTorch DataLoader.\n \"\"\"\n\n def __init__(self, dataset: Dataset, num_workers: int = 0, **kwargs) -> None:\n if num_workers == 0:\n # when num_workers > 0, random states are determined by worker_init_fn\n # this is to make the behavior consistent when num_workers == 0\n # torch.int64 doesn't work well on some versions of windows\n _seed = torch.empty((), dtype=torch.int32).random_(generator=None).item()\n set_rnd(dataset, int(_seed))\n if \"collate_fn\" not in kwargs:\n kwargs.update({\"collate_fn\": list_data_collate})\n if \"worker_init_fn\" not in kwargs:\n kwargs.update({\"worker_init_fn\": worker_init_fn})\n\n super().__init__(dataset=dataset, num_workers=num_workers, **kwargs)\n", "path": "monai/data/dataloader.py"}]}
| 2,101 | 403 |
gh_patches_debug_21980
|
rasdani/github-patches
|
git_diff
|
opensearch-project__opensearch-build-2919
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Bug]: Cron created by manifest workflow doesn't have build_platform parameter
### Describe the bug
Our manifest workflow will help us create input manifest and add the build job to our cron once we have a new version from OS or OSD core. e.g. #2894
However since we start to build for different platform distribution, we would need to add build_platform parameters to trigger the build workflow which our current manifest workflow doesn't have. We would need to manually add the param to the cron in order to trigger it. #2911
### To reproduce
When we have a new OS or OSD version, run `./manifest.sh update`
### Expected behavior
When the workflow raise the PR, it should also add this param to the cron. For different job type(OS or OSD), it would also add different platform parameter. e.g. for OS it should be `BUILD_PLATFORM=linux macos windows` but for OSD it's `BUILD_PLATFORM=linux windows`
### Screenshots
If applicable, add screenshots to help explain your problem.
### Host / Environment
_No response_
### Additional context
_No response_
### Relevant log output
_No response_
</issue>
<code>
[start of src/manifests_workflow/input_manifests.py]
1 # Copyright OpenSearch Contributors
2 # SPDX-License-Identifier: Apache-2.0
3 #
4 # The OpenSearch Contributors require contributions made to
5 # this file be licensed under the Apache-2.0 license or a
6 # compatible open source license.
7
8 import glob
9 import logging
10 import os
11 import re
12 from abc import abstractmethod
13 from typing import Dict, List, Type, Union
14
15 import ruamel.yaml
16
17 from manifests.component_manifest import ComponentFromSource
18 from manifests.input_manifest import InputComponents, InputManifest
19 from manifests.manifests import Manifests
20 from manifests_workflow.component_opensearch import ComponentOpenSearch
21 from manifests_workflow.component_opensearch_dashboards_min import ComponentOpenSearchDashboardsMin
22 from manifests_workflow.component_opensearch_min import ComponentOpenSearchMin
23 from system.temporary_directory import TemporaryDirectory
24
25
26 class InputManifests(Manifests):
27 def __init__(self, name: str) -> None:
28 self.name = name
29 self.prefix = name.lower().replace(" ", "-")
30 super().__init__(InputManifest, InputManifests.files(self.prefix))
31
32 @classmethod
33 def manifests_path(self) -> str:
34 return os.path.realpath(os.path.join(os.path.dirname(__file__), "..", "..", "manifests"))
35
36 @classmethod
37 def workflows_path(self) -> str:
38 return os.path.realpath(os.path.join(os.path.dirname(__file__), "..", "..", ".github", "workflows"))
39
40 @classmethod
41 def legacy_manifests_path(self) -> str:
42 return os.path.realpath(os.path.join(os.path.dirname(__file__), "..", "..", "legacy-manifests"))
43
44 @classmethod
45 def jenkins_path(self) -> str:
46 return os.path.realpath(os.path.join(os.path.dirname(__file__), "..", "..", "jenkins"))
47
48 @classmethod
49 def cron_jenkinsfile(self) -> str:
50 return os.path.join(self.jenkins_path(), "check-for-build.jenkinsfile")
51
52 @classmethod
53 def versionincrement_workflow(self) -> str:
54 return os.path.join(self.workflows_path(), "increment-plugin-versions.yml")
55
56 @classmethod
57 def files(self, name: str) -> List:
58 results = []
59 for path in [self.manifests_path(), self.legacy_manifests_path()]:
60 for filename in glob.glob(os.path.join(path, f"**/{name}-*.yml")):
61 # avoids the -maven manifest
62 match = re.search(rf"^{name}-([0-9.]*).yml$", os.path.basename(filename))
63 if match:
64 results.append(filename)
65 return results
66
67 @abstractmethod
68 def update(
69 self,
70 min_klass: Union[Type[ComponentOpenSearchMin], Type[ComponentOpenSearchDashboardsMin]],
71 component_klass: Type[ComponentOpenSearch],
72 keep: bool = False,
73 ) -> None:
74 known_versions = self.versions
75 logging.info(f"Known versions: {known_versions}")
76 main_versions: Dict = {}
77 with TemporaryDirectory(keep=keep, chdir=True) as work_dir:
78 logging.info(f"Checking out components into {work_dir.name}")
79
80 # check out and build #main, 1.x, etc.
81 branches = min_klass.branches()
82
83 logging.info(f"Checking {self.name} {branches} branches")
84 for branch in branches:
85 c = min_klass.checkout(
86 path=os.path.join(work_dir.name, self.name.replace(" ", ""), branch),
87 branch=branch,
88 )
89
90 version = c.version
91 logging.info(f"{self.name}#{branch} is version {version}")
92 if version not in main_versions.keys():
93 main_versions[version] = [c]
94
95 if component_klass is not None:
96 # components can increment their own version first without incrementing min
97 manifest = self.latest
98 logging.info(f"Examining components in the latest manifest of {manifest.build.name} ({manifest.build.version})")
99 for component in manifest.components.values():
100 if component.name == self.name:
101 continue
102
103 if type(component) is ComponentFromSource:
104 logging.info(f"Checking out {component.name}#main")
105 component = component_klass.checkout(
106 name=component.name,
107 path=os.path.join(work_dir.name, component.name),
108 opensearch_version=manifest.build.version,
109 repo_url=component.repository,
110 branch="main",
111 )
112
113 component_version = component.version
114 if component_version:
115 release_version = ".".join(component_version.split(".")[:3])
116 if release_version not in main_versions.keys():
117 main_versions[release_version] = []
118 main_versions[release_version].append(component)
119 logging.info(f"{component.name}#main is version {release_version} (from {component_version})")
120
121 # summarize
122 logging.info("Found versions on main:")
123 for main_version in main_versions.keys():
124 for component in main_versions[main_version]:
125 logging.info(f" {component.name}={main_version}")
126
127 # generate new manifests
128 for release_version in sorted(main_versions.keys() - known_versions):
129 self.write_manifest(release_version, main_versions[release_version])
130 self.add_to_cron(release_version)
131 self.add_to_versionincrement_workflow(release_version)
132
133 def create_manifest(self, version: str, components: List = []) -> InputManifest:
134 templates_base_path = os.path.join(self.manifests_path(), "templates")
135 template_version_folder = version.split(".")[0] + ".x"
136 template_full_path = os.path.join(templates_base_path, self.prefix, template_version_folder, "manifest.yml")
137 if not os.path.exists(template_full_path):
138 template_full_path = os.path.join(templates_base_path, self.prefix, "default", "manifest.yml")
139
140 manifest = InputManifest.from_file(open(template_full_path))
141
142 manifest.build.version = version
143 manifests_components = []
144
145 for component in components:
146 logging.info(f" Adding {component.name}")
147 manifests_components.append(component.to_dict())
148
149 manifest.components = InputComponents(manifests_components) # type: ignore
150 return manifest
151
152 def write_manifest(self, version: str, components: List = []) -> None:
153 logging.info(f"Creating new version: {version}")
154 manifest = self.create_manifest(version, components)
155 manifest_dir = os.path.join(self.manifests_path(), version)
156 os.makedirs(manifest_dir, exist_ok=True)
157 manifest_path = os.path.join(manifest_dir, f"{self.prefix}-{version}.yml")
158 manifest.to_file(manifest_path)
159 logging.info(f"Wrote {manifest_path}")
160
161 def add_to_cron(self, version: str) -> None:
162 logging.info(f"Adding new version to cron: {version}")
163 jenkinsfile = self.cron_jenkinsfile()
164 with open(jenkinsfile, "r") as f:
165 data = f.read()
166
167 cron_entry = f"H 1 * * * %INPUT_MANIFEST={version}/{self.prefix}-{version}.yml;TARGET_JOB_NAME=distribution-build-{self.prefix}\n"
168
169 if cron_entry in data:
170 raise ValueError(f"{jenkinsfile} already contains an entry for {self.prefix} {version}")
171
172 data = data.replace("parameterizedCron '''\n", f"parameterizedCron '''\n{' ' * 12}{cron_entry}")
173
174 with open(jenkinsfile, "w") as f:
175 f.write(data)
176
177 logging.info(f"Wrote {jenkinsfile}")
178
179 def add_to_versionincrement_workflow(self, version: str) -> None:
180 versionincrement_workflow_file = self.versionincrement_workflow()
181 yaml = ruamel.yaml.YAML()
182 yaml.explicit_start = True # type: ignore
183 yaml.preserve_quotes = True # type: ignore
184
185 with open(versionincrement_workflow_file) as f:
186 data = yaml.load(f)
187
188 version_entry = []
189 major_version_entry = version.split(".")[0] + ".x"
190 minor_version_entry = version.rsplit(".", 1)[0]
191 if minor_version_entry not in data["jobs"]["plugin-version-increment-sync"]["strategy"]["matrix"]["branch"]:
192 print(f"Adding {minor_version_entry} to {versionincrement_workflow_file}")
193 version_entry.append(minor_version_entry)
194 if major_version_entry not in data["jobs"]["plugin-version-increment-sync"]["strategy"]["matrix"]["branch"]:
195 print(f"Adding {major_version_entry} to {versionincrement_workflow_file}")
196 version_entry.append(major_version_entry)
197
198 if version_entry:
199 branch_list = list(data["jobs"]["plugin-version-increment-sync"]["strategy"]["matrix"]["branch"])
200 branch_list.extend(version_entry)
201 data["jobs"]["plugin-version-increment-sync"]["strategy"]["matrix"]["branch"] = branch_list
202 yaml.indent(mapping=2, sequence=4, offset=2)
203 with open(versionincrement_workflow_file, 'w') as f:
204 yaml.dump(data, f)
205 logging.info("Added new version to the version increment workflow")
206
[end of src/manifests_workflow/input_manifests.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/manifests_workflow/input_manifests.py b/src/manifests_workflow/input_manifests.py
--- a/src/manifests_workflow/input_manifests.py
+++ b/src/manifests_workflow/input_manifests.py
@@ -24,6 +24,11 @@
class InputManifests(Manifests):
+ BUILD_PLATFORM = {
+ "opensearch": "linux macos windows",
+ "opensearch-dashboards": "linux windows"
+ }
+
def __init__(self, name: str) -> None:
self.name = name
self.prefix = name.lower().replace(" ", "-")
@@ -164,7 +169,11 @@
with open(jenkinsfile, "r") as f:
data = f.read()
- cron_entry = f"H 1 * * * %INPUT_MANIFEST={version}/{self.prefix}-{version}.yml;TARGET_JOB_NAME=distribution-build-{self.prefix}\n"
+ build_platform = self.BUILD_PLATFORM.get(self.prefix, "linux")
+
+ cron_entry = f"H 1 * * * %INPUT_MANIFEST={version}/{self.prefix}-{version}.yml;" \
+ f"TARGET_JOB_NAME=distribution-build-{self.prefix};" \
+ f"BUILD_PLATFORM={build_platform}\n"
if cron_entry in data:
raise ValueError(f"{jenkinsfile} already contains an entry for {self.prefix} {version}")
|
{"golden_diff": "diff --git a/src/manifests_workflow/input_manifests.py b/src/manifests_workflow/input_manifests.py\n--- a/src/manifests_workflow/input_manifests.py\n+++ b/src/manifests_workflow/input_manifests.py\n@@ -24,6 +24,11 @@\n \n \n class InputManifests(Manifests):\n+ BUILD_PLATFORM = {\n+ \"opensearch\": \"linux macos windows\",\n+ \"opensearch-dashboards\": \"linux windows\"\n+ }\n+\n def __init__(self, name: str) -> None:\n self.name = name\n self.prefix = name.lower().replace(\" \", \"-\")\n@@ -164,7 +169,11 @@\n with open(jenkinsfile, \"r\") as f:\n data = f.read()\n \n- cron_entry = f\"H 1 * * * %INPUT_MANIFEST={version}/{self.prefix}-{version}.yml;TARGET_JOB_NAME=distribution-build-{self.prefix}\\n\"\n+ build_platform = self.BUILD_PLATFORM.get(self.prefix, \"linux\")\n+\n+ cron_entry = f\"H 1 * * * %INPUT_MANIFEST={version}/{self.prefix}-{version}.yml;\" \\\n+ f\"TARGET_JOB_NAME=distribution-build-{self.prefix};\" \\\n+ f\"BUILD_PLATFORM={build_platform}\\n\"\n \n if cron_entry in data:\n raise ValueError(f\"{jenkinsfile} already contains an entry for {self.prefix} {version}\")\n", "issue": "[Bug]: Cron created by manifest workflow doesn't have build_platform parameter\n### Describe the bug\n\nOur manifest workflow will help us create input manifest and add the build job to our cron once we have a new version from OS or OSD core. e.g. #2894 \r\n\r\nHowever since we start to build for different platform distribution, we would need to add build_platform parameters to trigger the build workflow which our current manifest workflow doesn't have. We would need to manually add the param to the cron in order to trigger it. #2911 \n\n### To reproduce\n\nWhen we have a new OS or OSD version, run `./manifest.sh update`\n\n### Expected behavior\n\nWhen the workflow raise the PR, it should also add this param to the cron. For different job type(OS or OSD), it would also add different platform parameter. e.g. for OS it should be `BUILD_PLATFORM=linux macos windows` but for OSD it's `BUILD_PLATFORM=linux windows`\n\n### Screenshots\n\nIf applicable, add screenshots to help explain your problem.\n\n### Host / Environment\n\n_No response_\n\n### Additional context\n\n_No response_\n\n### Relevant log output\n\n_No response_\n", "before_files": [{"content": "# Copyright OpenSearch Contributors\n# SPDX-License-Identifier: Apache-2.0\n#\n# The OpenSearch Contributors require contributions made to\n# this file be licensed under the Apache-2.0 license or a\n# compatible open source license.\n\nimport glob\nimport logging\nimport os\nimport re\nfrom abc import abstractmethod\nfrom typing import Dict, List, Type, Union\n\nimport ruamel.yaml\n\nfrom manifests.component_manifest import ComponentFromSource\nfrom manifests.input_manifest import InputComponents, InputManifest\nfrom manifests.manifests import Manifests\nfrom manifests_workflow.component_opensearch import ComponentOpenSearch\nfrom manifests_workflow.component_opensearch_dashboards_min import ComponentOpenSearchDashboardsMin\nfrom manifests_workflow.component_opensearch_min import ComponentOpenSearchMin\nfrom system.temporary_directory import TemporaryDirectory\n\n\nclass InputManifests(Manifests):\n def __init__(self, name: str) -> None:\n self.name = name\n self.prefix = name.lower().replace(\" \", \"-\")\n super().__init__(InputManifest, InputManifests.files(self.prefix))\n\n @classmethod\n def manifests_path(self) -> str:\n return os.path.realpath(os.path.join(os.path.dirname(__file__), \"..\", \"..\", \"manifests\"))\n\n @classmethod\n def workflows_path(self) -> str:\n return os.path.realpath(os.path.join(os.path.dirname(__file__), \"..\", \"..\", \".github\", \"workflows\"))\n\n @classmethod\n def legacy_manifests_path(self) -> str:\n return os.path.realpath(os.path.join(os.path.dirname(__file__), \"..\", \"..\", \"legacy-manifests\"))\n\n @classmethod\n def jenkins_path(self) -> str:\n return os.path.realpath(os.path.join(os.path.dirname(__file__), \"..\", \"..\", \"jenkins\"))\n\n @classmethod\n def cron_jenkinsfile(self) -> str:\n return os.path.join(self.jenkins_path(), \"check-for-build.jenkinsfile\")\n\n @classmethod\n def versionincrement_workflow(self) -> str:\n return os.path.join(self.workflows_path(), \"increment-plugin-versions.yml\")\n\n @classmethod\n def files(self, name: str) -> List:\n results = []\n for path in [self.manifests_path(), self.legacy_manifests_path()]:\n for filename in glob.glob(os.path.join(path, f\"**/{name}-*.yml\")):\n # avoids the -maven manifest\n match = re.search(rf\"^{name}-([0-9.]*).yml$\", os.path.basename(filename))\n if match:\n results.append(filename)\n return results\n\n @abstractmethod\n def update(\n self,\n min_klass: Union[Type[ComponentOpenSearchMin], Type[ComponentOpenSearchDashboardsMin]],\n component_klass: Type[ComponentOpenSearch],\n keep: bool = False,\n ) -> None:\n known_versions = self.versions\n logging.info(f\"Known versions: {known_versions}\")\n main_versions: Dict = {}\n with TemporaryDirectory(keep=keep, chdir=True) as work_dir:\n logging.info(f\"Checking out components into {work_dir.name}\")\n\n # check out and build #main, 1.x, etc.\n branches = min_klass.branches()\n\n logging.info(f\"Checking {self.name} {branches} branches\")\n for branch in branches:\n c = min_klass.checkout(\n path=os.path.join(work_dir.name, self.name.replace(\" \", \"\"), branch),\n branch=branch,\n )\n\n version = c.version\n logging.info(f\"{self.name}#{branch} is version {version}\")\n if version not in main_versions.keys():\n main_versions[version] = [c]\n\n if component_klass is not None:\n # components can increment their own version first without incrementing min\n manifest = self.latest\n logging.info(f\"Examining components in the latest manifest of {manifest.build.name} ({manifest.build.version})\")\n for component in manifest.components.values():\n if component.name == self.name:\n continue\n\n if type(component) is ComponentFromSource:\n logging.info(f\"Checking out {component.name}#main\")\n component = component_klass.checkout(\n name=component.name,\n path=os.path.join(work_dir.name, component.name),\n opensearch_version=manifest.build.version,\n repo_url=component.repository,\n branch=\"main\",\n )\n\n component_version = component.version\n if component_version:\n release_version = \".\".join(component_version.split(\".\")[:3])\n if release_version not in main_versions.keys():\n main_versions[release_version] = []\n main_versions[release_version].append(component)\n logging.info(f\"{component.name}#main is version {release_version} (from {component_version})\")\n\n # summarize\n logging.info(\"Found versions on main:\")\n for main_version in main_versions.keys():\n for component in main_versions[main_version]:\n logging.info(f\" {component.name}={main_version}\")\n\n # generate new manifests\n for release_version in sorted(main_versions.keys() - known_versions):\n self.write_manifest(release_version, main_versions[release_version])\n self.add_to_cron(release_version)\n self.add_to_versionincrement_workflow(release_version)\n\n def create_manifest(self, version: str, components: List = []) -> InputManifest:\n templates_base_path = os.path.join(self.manifests_path(), \"templates\")\n template_version_folder = version.split(\".\")[0] + \".x\"\n template_full_path = os.path.join(templates_base_path, self.prefix, template_version_folder, \"manifest.yml\")\n if not os.path.exists(template_full_path):\n template_full_path = os.path.join(templates_base_path, self.prefix, \"default\", \"manifest.yml\")\n\n manifest = InputManifest.from_file(open(template_full_path))\n\n manifest.build.version = version\n manifests_components = []\n\n for component in components:\n logging.info(f\" Adding {component.name}\")\n manifests_components.append(component.to_dict())\n\n manifest.components = InputComponents(manifests_components) # type: ignore\n return manifest\n\n def write_manifest(self, version: str, components: List = []) -> None:\n logging.info(f\"Creating new version: {version}\")\n manifest = self.create_manifest(version, components)\n manifest_dir = os.path.join(self.manifests_path(), version)\n os.makedirs(manifest_dir, exist_ok=True)\n manifest_path = os.path.join(manifest_dir, f\"{self.prefix}-{version}.yml\")\n manifest.to_file(manifest_path)\n logging.info(f\"Wrote {manifest_path}\")\n\n def add_to_cron(self, version: str) -> None:\n logging.info(f\"Adding new version to cron: {version}\")\n jenkinsfile = self.cron_jenkinsfile()\n with open(jenkinsfile, \"r\") as f:\n data = f.read()\n\n cron_entry = f\"H 1 * * * %INPUT_MANIFEST={version}/{self.prefix}-{version}.yml;TARGET_JOB_NAME=distribution-build-{self.prefix}\\n\"\n\n if cron_entry in data:\n raise ValueError(f\"{jenkinsfile} already contains an entry for {self.prefix} {version}\")\n\n data = data.replace(\"parameterizedCron '''\\n\", f\"parameterizedCron '''\\n{' ' * 12}{cron_entry}\")\n\n with open(jenkinsfile, \"w\") as f:\n f.write(data)\n\n logging.info(f\"Wrote {jenkinsfile}\")\n\n def add_to_versionincrement_workflow(self, version: str) -> None:\n versionincrement_workflow_file = self.versionincrement_workflow()\n yaml = ruamel.yaml.YAML()\n yaml.explicit_start = True # type: ignore\n yaml.preserve_quotes = True # type: ignore\n\n with open(versionincrement_workflow_file) as f:\n data = yaml.load(f)\n\n version_entry = []\n major_version_entry = version.split(\".\")[0] + \".x\"\n minor_version_entry = version.rsplit(\".\", 1)[0]\n if minor_version_entry not in data[\"jobs\"][\"plugin-version-increment-sync\"][\"strategy\"][\"matrix\"][\"branch\"]:\n print(f\"Adding {minor_version_entry} to {versionincrement_workflow_file}\")\n version_entry.append(minor_version_entry)\n if major_version_entry not in data[\"jobs\"][\"plugin-version-increment-sync\"][\"strategy\"][\"matrix\"][\"branch\"]:\n print(f\"Adding {major_version_entry} to {versionincrement_workflow_file}\")\n version_entry.append(major_version_entry)\n\n if version_entry:\n branch_list = list(data[\"jobs\"][\"plugin-version-increment-sync\"][\"strategy\"][\"matrix\"][\"branch\"])\n branch_list.extend(version_entry)\n data[\"jobs\"][\"plugin-version-increment-sync\"][\"strategy\"][\"matrix\"][\"branch\"] = branch_list\n yaml.indent(mapping=2, sequence=4, offset=2)\n with open(versionincrement_workflow_file, 'w') as f:\n yaml.dump(data, f)\n logging.info(\"Added new version to the version increment workflow\")\n", "path": "src/manifests_workflow/input_manifests.py"}]}
| 3,215 | 315 |
gh_patches_debug_11625
|
rasdani/github-patches
|
git_diff
|
fidals__shopelectro-419
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
/robots.txt url returns 301. stb2
Check this for example:
https://www.shopelectro.ru/robots.txt
</issue>
<code>
[start of shopelectro/urls.py]
1 from collections import OrderedDict
2
3 from django.conf import settings
4 from django.conf.urls import url, include
5 from django.conf.urls.static import static
6 from django.contrib.sitemaps.views import sitemap
7 from django.views.decorators.cache import cache_page
8
9 from pages.views import RobotsView, SitemapPage
10 from pages.urls import custom_page_url
11
12 from shopelectro import sitemaps, config, views
13 from shopelectro.admin import se_admin
14
15 # Orders sitemaps instances
16 sitemaps = OrderedDict([
17 ('index', sitemaps.IndexSitemap),
18 ('category', sitemaps.CategorySitemap),
19 ('category-with-tags', sitemaps.CategoryWithTagsSitemap),
20 ('products', sitemaps.ProductSitemap),
21 ('site', sitemaps.PagesSitemap)
22 ])
23
24 # disable cache
25 if settings.DEBUG:
26 def cache_page(arg): # Ignore PyFlakesBear
27 if callable(arg):
28 return arg
29 return cache_page
30
31 cached_60d = cache_page(config.cached_time(days=60))
32 cached_2h = cache_page(config.cached_time(hours=2))
33
34 admin_urls = [
35 url(r'^', se_admin.urls),
36 url(r'^autocomplete/$', views.AdminAutocomplete.as_view(), name='admin_autocomplete'),
37 url(r'^get-tree-items/$', views.Tree.as_view()),
38 url(r'^redirect-to-product/$', views.RedirectToProduct.as_view()),
39 url(r'^table-editor-api/$', views.TableEditorAPI.as_view()),
40 url(r'^select2/', include('django_select2.urls')),
41 ]
42
43 catalog_urls = [
44 # "category" group
45 url(r'^categories/(?P<slug>[\w-]+)/$',
46 cached_2h(views.CategoryPage.as_view()), name='category'),
47 url(r'^categories/(?P<slug>[\w-]+)/tags/(?P<tags>[\w-]+)/$',
48 cached_2h(views.CategoryPage.as_view()), name='category'),
49 url(r'^categories/(?P<slug>[\w-]+)/(?P<sorting>[0-9]*)/$',
50 views.CategoryPage.as_view(), name='category'),
51 url(r'^categories/(?P<slug>[\w-]+)/(?P<sorting>[0-9]*)/tags/(?P<tags>[\w-]+)/$',
52 views.CategoryPage.as_view(), name='category'),
53 # "load more" group
54 url(r'categories/(?P<category_slug>[\w-]+)/load-more/'
55 r'(?P<offset>[0-9]+)/(?P<sorting>[0-9]*)/$',
56 views.load_more, name='load_more'),
57 url(r'categories/(?P<category_slug>[\w-]+)/load-more/'
58 r'(?P<offset>[0-9]+)/(?P<sorting>[0-9]*)/tags/(?P<tags>[\w-]+)/$',
59 views.load_more, name='load_more'),
60 # rest of urls
61 url(r'^no-images/$', views.ProductsWithoutImages.as_view(),
62 name='products_without_images'),
63 url(r'^no-text/$', views.ProductsWithoutText.as_view(),
64 name='products_without_text'),
65 url(r'^products/(?P<product_vendor_code>[0-9]+)/$',
66 views.ProductPage.as_view(), name='product'),
67 ]
68
69 service_urls = [
70 url(r'^ya-kassa/aviso/$', views.yandex_aviso, name='yandex_aviso'),
71 url(r'^ya-kassa/check/$', views.yandex_check, name='yandex_check'),
72 url(r'^ya-feedback/redirect/$',
73 views.ya_feedback_with_redirect, name='ya_feedback_with_redirect'),
74 url(r'^ya-feedback/request/$',
75 views.ya_feedback_request, name='ya_feedback_request'),
76 ]
77
78 search_urls = [
79 url(r'^autocomplete/$', views.Autocomplete.as_view(), name='autocomplete'),
80 ]
81
82 ecommerce_urls = [
83 url(r'^cart-add/$', views.AddToCart.as_view(), name='cart_add'),
84 url(r'^cart-change/$', views.ChangeCount.as_view(), name='cart_set_count'),
85 url(r'^cart-flush/$', views.FlushCart.as_view(), name='cart_flush'),
86 url(r'^cart-remove/$', views.RemoveFromCart.as_view(), name='cart_remove'),
87 url(r'^order-call/$', views.order_call),
88 url(r'^one-click-buy/$', views.one_click_buy),
89 url(r'^yandex-order/$', views.YandexOrder.as_view()),
90 url(r'', include('ecommerce.urls')),
91 ]
92
93 custom_pages = [
94 custom_page_url(r'^(?P<page>)$', cached_2h(views.IndexPage.as_view())),
95 custom_page_url(r'^(?P<page>robots\.txt)/$', RobotsView.as_view(in_db=True)),
96 custom_page_url(r'^(?P<page>search)/$', views.Search.as_view()),
97 custom_page_url(r'^(?P<page>catalog)/$', cached_2h(views.CategoryTree.as_view())),
98 custom_page_url(r'^shop/(?P<page>order)/$', views.OrderPage.as_view()),
99 custom_page_url(r'^shop/(?P<page>order-success)/$', views.OrderSuccess.as_view()),
100 custom_page_url(r'^(?P<page>sitemap)/$', SitemapPage.as_view()),
101 ]
102
103 urlpatterns = [
104 url('', include(custom_pages)),
105 url(r'^admin/', include(admin_urls)),
106 url(r'^catalog/', include(catalog_urls)),
107 url(r'^pages/', include('pages.urls')),
108 url(r'^save-feedback/$', views.save_feedback),
109 url(r'^delete-feedback/$', views.delete_feedback),
110 url(r'^set-view-type/$', views.set_view_type, name='set_view_type'),
111 url(r'^shop/', include(ecommerce_urls)),
112 url(r'^search/', include(search_urls)),
113 url(r'^service/', include(service_urls)),
114 url(r'^sitemap\.xml$', cached_60d(sitemap), {'sitemaps': sitemaps}, name='sitemap'),
115 ]
116
117 if settings.DEBUG:
118 import debug_toolbar
119
120 urlpatterns += [
121 url(r'^__debug__/', include(debug_toolbar.urls)),
122 *static(settings.STATIC_URL, document_root=settings.STATIC_ROOT),
123 *static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT),
124 ]
125
[end of shopelectro/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/shopelectro/urls.py b/shopelectro/urls.py
--- a/shopelectro/urls.py
+++ b/shopelectro/urls.py
@@ -92,7 +92,7 @@
custom_pages = [
custom_page_url(r'^(?P<page>)$', cached_2h(views.IndexPage.as_view())),
- custom_page_url(r'^(?P<page>robots\.txt)/$', RobotsView.as_view(in_db=True)),
+ custom_page_url(r'^(?P<page>robots\.txt)$', RobotsView.as_view(in_db=True)),
custom_page_url(r'^(?P<page>search)/$', views.Search.as_view()),
custom_page_url(r'^(?P<page>catalog)/$', cached_2h(views.CategoryTree.as_view())),
custom_page_url(r'^shop/(?P<page>order)/$', views.OrderPage.as_view()),
|
{"golden_diff": "diff --git a/shopelectro/urls.py b/shopelectro/urls.py\n--- a/shopelectro/urls.py\n+++ b/shopelectro/urls.py\n@@ -92,7 +92,7 @@\n \n custom_pages = [\n custom_page_url(r'^(?P<page>)$', cached_2h(views.IndexPage.as_view())),\n- custom_page_url(r'^(?P<page>robots\\.txt)/$', RobotsView.as_view(in_db=True)),\n+ custom_page_url(r'^(?P<page>robots\\.txt)$', RobotsView.as_view(in_db=True)),\n custom_page_url(r'^(?P<page>search)/$', views.Search.as_view()),\n custom_page_url(r'^(?P<page>catalog)/$', cached_2h(views.CategoryTree.as_view())),\n custom_page_url(r'^shop/(?P<page>order)/$', views.OrderPage.as_view()),\n", "issue": "/robots.txt url returns 301. stb2\nCheck this for example:\r\nhttps://www.shopelectro.ru/robots.txt\n", "before_files": [{"content": "from collections import OrderedDict\n\nfrom django.conf import settings\nfrom django.conf.urls import url, include\nfrom django.conf.urls.static import static\nfrom django.contrib.sitemaps.views import sitemap\nfrom django.views.decorators.cache import cache_page\n\nfrom pages.views import RobotsView, SitemapPage\nfrom pages.urls import custom_page_url\n\nfrom shopelectro import sitemaps, config, views\nfrom shopelectro.admin import se_admin\n\n# Orders sitemaps instances\nsitemaps = OrderedDict([\n ('index', sitemaps.IndexSitemap),\n ('category', sitemaps.CategorySitemap),\n ('category-with-tags', sitemaps.CategoryWithTagsSitemap),\n ('products', sitemaps.ProductSitemap),\n ('site', sitemaps.PagesSitemap)\n])\n\n# disable cache\nif settings.DEBUG:\n def cache_page(arg): # Ignore PyFlakesBear\n if callable(arg):\n return arg\n return cache_page\n\ncached_60d = cache_page(config.cached_time(days=60))\ncached_2h = cache_page(config.cached_time(hours=2))\n\nadmin_urls = [\n url(r'^', se_admin.urls),\n url(r'^autocomplete/$', views.AdminAutocomplete.as_view(), name='admin_autocomplete'),\n url(r'^get-tree-items/$', views.Tree.as_view()),\n url(r'^redirect-to-product/$', views.RedirectToProduct.as_view()),\n url(r'^table-editor-api/$', views.TableEditorAPI.as_view()),\n url(r'^select2/', include('django_select2.urls')),\n]\n\ncatalog_urls = [\n # \"category\" group\n url(r'^categories/(?P<slug>[\\w-]+)/$',\n cached_2h(views.CategoryPage.as_view()), name='category'),\n url(r'^categories/(?P<slug>[\\w-]+)/tags/(?P<tags>[\\w-]+)/$',\n cached_2h(views.CategoryPage.as_view()), name='category'),\n url(r'^categories/(?P<slug>[\\w-]+)/(?P<sorting>[0-9]*)/$',\n views.CategoryPage.as_view(), name='category'),\n url(r'^categories/(?P<slug>[\\w-]+)/(?P<sorting>[0-9]*)/tags/(?P<tags>[\\w-]+)/$',\n views.CategoryPage.as_view(), name='category'),\n # \"load more\" group\n url(r'categories/(?P<category_slug>[\\w-]+)/load-more/'\n r'(?P<offset>[0-9]+)/(?P<sorting>[0-9]*)/$',\n views.load_more, name='load_more'),\n url(r'categories/(?P<category_slug>[\\w-]+)/load-more/'\n r'(?P<offset>[0-9]+)/(?P<sorting>[0-9]*)/tags/(?P<tags>[\\w-]+)/$',\n views.load_more, name='load_more'),\n # rest of urls\n url(r'^no-images/$', views.ProductsWithoutImages.as_view(),\n name='products_without_images'),\n url(r'^no-text/$', views.ProductsWithoutText.as_view(),\n name='products_without_text'),\n url(r'^products/(?P<product_vendor_code>[0-9]+)/$',\n views.ProductPage.as_view(), name='product'),\n]\n\nservice_urls = [\n url(r'^ya-kassa/aviso/$', views.yandex_aviso, name='yandex_aviso'),\n url(r'^ya-kassa/check/$', views.yandex_check, name='yandex_check'),\n url(r'^ya-feedback/redirect/$',\n views.ya_feedback_with_redirect, name='ya_feedback_with_redirect'),\n url(r'^ya-feedback/request/$',\n views.ya_feedback_request, name='ya_feedback_request'),\n]\n\nsearch_urls = [\n url(r'^autocomplete/$', views.Autocomplete.as_view(), name='autocomplete'),\n]\n\necommerce_urls = [\n url(r'^cart-add/$', views.AddToCart.as_view(), name='cart_add'),\n url(r'^cart-change/$', views.ChangeCount.as_view(), name='cart_set_count'),\n url(r'^cart-flush/$', views.FlushCart.as_view(), name='cart_flush'),\n url(r'^cart-remove/$', views.RemoveFromCart.as_view(), name='cart_remove'),\n url(r'^order-call/$', views.order_call),\n url(r'^one-click-buy/$', views.one_click_buy),\n url(r'^yandex-order/$', views.YandexOrder.as_view()),\n url(r'', include('ecommerce.urls')),\n]\n\ncustom_pages = [\n custom_page_url(r'^(?P<page>)$', cached_2h(views.IndexPage.as_view())),\n custom_page_url(r'^(?P<page>robots\\.txt)/$', RobotsView.as_view(in_db=True)),\n custom_page_url(r'^(?P<page>search)/$', views.Search.as_view()),\n custom_page_url(r'^(?P<page>catalog)/$', cached_2h(views.CategoryTree.as_view())),\n custom_page_url(r'^shop/(?P<page>order)/$', views.OrderPage.as_view()),\n custom_page_url(r'^shop/(?P<page>order-success)/$', views.OrderSuccess.as_view()),\n custom_page_url(r'^(?P<page>sitemap)/$', SitemapPage.as_view()),\n]\n\nurlpatterns = [\n url('', include(custom_pages)),\n url(r'^admin/', include(admin_urls)),\n url(r'^catalog/', include(catalog_urls)),\n url(r'^pages/', include('pages.urls')),\n url(r'^save-feedback/$', views.save_feedback),\n url(r'^delete-feedback/$', views.delete_feedback),\n url(r'^set-view-type/$', views.set_view_type, name='set_view_type'),\n url(r'^shop/', include(ecommerce_urls)),\n url(r'^search/', include(search_urls)),\n url(r'^service/', include(service_urls)),\n url(r'^sitemap\\.xml$', cached_60d(sitemap), {'sitemaps': sitemaps}, name='sitemap'),\n]\n\nif settings.DEBUG:\n import debug_toolbar\n\n urlpatterns += [\n url(r'^__debug__/', include(debug_toolbar.urls)),\n *static(settings.STATIC_URL, document_root=settings.STATIC_ROOT),\n *static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT),\n ]\n", "path": "shopelectro/urls.py"}]}
| 2,160 | 200 |
gh_patches_debug_30877
|
rasdani/github-patches
|
git_diff
|
ray-project__ray-10840
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[tune] How to test if I'm running inside a tune session?
Is there an API to test if I'm running inside a tune session? I'd like to conditionally call `tune.report()` in my code.
There are functions like `get_trial_dir`, `get_trial_name`, `get_trial_id`, that internally call `get_session()`. I guess I could use one of them see if they return `None` or not. But they also log a warning when they can't find a session which is not ideal.
</issue>
<code>
[start of python/ray/tune/session.py]
1 from contextlib import contextmanager
2 import os
3 import logging
4
5 logger = logging.getLogger(__name__)
6
7 _session = None
8
9
10 def get_session():
11 global _session
12 if not _session:
13 logger.warning(
14 "Session not detected. You should not be calling this function "
15 "outside `tune.run` or while using the class API. ")
16 return _session
17
18
19 def init(reporter, ignore_reinit_error=True):
20 """Initializes the global trial context for this process."""
21 global _session
22
23 if _session is not None:
24 # TODO(ng): would be nice to stack crawl at creation time to report
25 # where that initial trial was created, and that creation line
26 # info is helpful to keep around anyway.
27 reinit_msg = (
28 "A Tune session already exists in the current process. "
29 "If you are using ray.init(local_mode=True), "
30 "you must set ray.init(..., num_cpus=1, num_gpus=1) to limit "
31 "available concurrency.")
32 if ignore_reinit_error:
33 logger.warning(reinit_msg)
34 return
35 else:
36 raise ValueError(reinit_msg)
37
38 if reporter is None:
39 logger.warning("You are using a Tune session outside of Tune. "
40 "Most session commands will have no effect.")
41
42 _session = reporter
43
44
45 def shutdown():
46 """Cleans up the trial and removes it from the global context."""
47
48 global _session
49 _session = None
50
51
52 def report(**kwargs):
53 """Logs all keyword arguments.
54
55 .. code-block:: python
56
57 import time
58 from ray import tune
59
60 def run_me(config):
61 for iter in range(100):
62 time.sleep(1)
63 tune.report(hello="world", ray="tune")
64
65 analysis = tune.run(run_me)
66
67 Args:
68 **kwargs: Any key value pair to be logged by Tune. Any of these
69 metrics can be used for early stopping or optimization.
70 """
71 _session = get_session()
72 if _session:
73 return _session(**kwargs)
74
75
76 def make_checkpoint_dir(step=None):
77 """Gets the next checkpoint dir.
78
79 .. versionadded:: 0.8.6
80
81 .. deprecated:: 0.8.7
82 Use tune.checkpoint_dir instead.
83 """
84 raise DeprecationWarning(
85 "Deprecated method. Use `tune.checkpoint_dir` instead.")
86
87
88 def save_checkpoint(checkpoint):
89 """Register the given checkpoint.
90
91 .. versionadded:: 0.8.6
92
93 .. deprecated:: 0.8.7
94 Use tune.checkpoint_dir instead.
95 """
96 raise DeprecationWarning(
97 "Deprecated method. Use `tune.checkpoint_dir` instead.")
98
99
100 @contextmanager
101 def checkpoint_dir(step):
102 """Returns a checkpoint dir inside a context.
103
104 Store any files related to restoring state within the
105 provided checkpoint dir.
106
107 Args:
108 step (int): Index for the checkpoint. Expected to be a
109 monotonically increasing quantity.
110
111 .. code-block:: python
112
113 import os
114 import json
115 import time
116 from ray import tune
117
118 def func(config, checkpoint_dir=None):
119 start = 0
120 if checkpoint_dir:
121 with open(os.path.join(checkpoint_dir, "checkpoint")) as f:
122 state = json.loads(f.read())
123 accuracy = state["acc"]
124 start = state["step"] + 1
125
126 for iter in range(start, 10):
127 time.sleep(1)
128
129 with tune.checkpoint_dir(step=iter) as checkpoint_dir:
130 path = os.path.join(checkpoint_dir, "checkpoint")
131 with open(path, "w") as f:
132 f.write(json.dumps({"step": start}))
133
134 tune.report(hello="world", ray="tune")
135
136 Yields:
137 checkpoint_dir (str): Directory for checkpointing.
138
139 .. versionadded:: 0.8.7
140 """
141 _session = get_session()
142
143 if step is None:
144 raise ValueError("checkpoint_dir(step) must be provided - got None.")
145
146 if _session:
147 _checkpoint_dir = _session.make_checkpoint_dir(step=step)
148 else:
149 _checkpoint_dir = os.path.abspath("./")
150
151 yield _checkpoint_dir
152
153 if _session:
154 _session.set_checkpoint(_checkpoint_dir)
155
156
157 def get_trial_dir():
158 """Returns the directory where trial results are saved.
159
160 For function API use only.
161 """
162 _session = get_session()
163 if _session:
164 return _session.logdir
165
166
167 def get_trial_name():
168 """Trial name for the corresponding trial.
169
170 For function API use only.
171 """
172 _session = get_session()
173 if _session:
174 return _session.trial_name
175
176
177 def get_trial_id():
178 """Trial id for the corresponding trial.
179
180 For function API use only.
181 """
182 _session = get_session()
183 if _session:
184 return _session.trial_id
185
186
187 __all__ = ["report", "get_trial_dir", "get_trial_name", "get_trial_id"]
188
[end of python/ray/tune/session.py]
[start of python/ray/tune/__init__.py]
1 from ray.tune.error import TuneError
2 from ray.tune.tune import run_experiments, run
3 from ray.tune.function_runner import with_parameters
4 from ray.tune.syncer import SyncConfig
5 from ray.tune.experiment import Experiment
6 from ray.tune.analysis import ExperimentAnalysis, Analysis
7 from ray.tune.stopper import Stopper, EarlyStopping
8 from ray.tune.registry import register_env, register_trainable
9 from ray.tune.trainable import Trainable
10 from ray.tune.durable_trainable import DurableTrainable
11 from ray.tune.suggest import grid_search
12 from ray.tune.session import (report, get_trial_dir, get_trial_name,
13 get_trial_id, make_checkpoint_dir,
14 save_checkpoint, checkpoint_dir)
15 from ray.tune.progress_reporter import (ProgressReporter, CLIReporter,
16 JupyterNotebookReporter)
17 from ray.tune.sample import (function, sample_from, uniform, quniform, choice,
18 randint, qrandint, randn, qrandn, loguniform,
19 qloguniform)
20 from ray.tune.suggest import create_searcher
21 from ray.tune.schedulers import create_scheduler
22
23 __all__ = [
24 "Trainable", "DurableTrainable", "TuneError", "grid_search",
25 "register_env", "register_trainable", "run", "run_experiments",
26 "with_parameters", "Stopper", "EarlyStopping", "Experiment", "function",
27 "sample_from", "track", "uniform", "quniform", "choice", "randint",
28 "qrandint", "randn", "qrandn", "loguniform", "qloguniform",
29 "ExperimentAnalysis", "Analysis", "CLIReporter", "JupyterNotebookReporter",
30 "ProgressReporter", "report", "get_trial_dir", "get_trial_name",
31 "get_trial_id", "make_checkpoint_dir", "save_checkpoint", "checkpoint_dir",
32 "SyncConfig", "create_searcher", "create_scheduler"
33 ]
34
[end of python/ray/tune/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/python/ray/tune/__init__.py b/python/ray/tune/__init__.py
--- a/python/ray/tune/__init__.py
+++ b/python/ray/tune/__init__.py
@@ -9,9 +9,9 @@
from ray.tune.trainable import Trainable
from ray.tune.durable_trainable import DurableTrainable
from ray.tune.suggest import grid_search
-from ray.tune.session import (report, get_trial_dir, get_trial_name,
- get_trial_id, make_checkpoint_dir,
- save_checkpoint, checkpoint_dir)
+from ray.tune.session import (
+ report, get_trial_dir, get_trial_name, get_trial_id, make_checkpoint_dir,
+ save_checkpoint, checkpoint_dir, is_session_enabled)
from ray.tune.progress_reporter import (ProgressReporter, CLIReporter,
JupyterNotebookReporter)
from ray.tune.sample import (function, sample_from, uniform, quniform, choice,
@@ -28,6 +28,7 @@
"qrandint", "randn", "qrandn", "loguniform", "qloguniform",
"ExperimentAnalysis", "Analysis", "CLIReporter", "JupyterNotebookReporter",
"ProgressReporter", "report", "get_trial_dir", "get_trial_name",
- "get_trial_id", "make_checkpoint_dir", "save_checkpoint", "checkpoint_dir",
- "SyncConfig", "create_searcher", "create_scheduler"
+ "get_trial_id", "make_checkpoint_dir", "save_checkpoint",
+ "is_session_enabled", "checkpoint_dir", "SyncConfig", "create_searcher",
+ "create_scheduler"
]
diff --git a/python/ray/tune/session.py b/python/ray/tune/session.py
--- a/python/ray/tune/session.py
+++ b/python/ray/tune/session.py
@@ -7,6 +7,12 @@
_session = None
+def is_session_enabled() -> bool:
+ """Returns True if running within an Tune process."""
+ global _session
+ return _session is not None
+
+
def get_session():
global _session
if not _session:
|
{"golden_diff": "diff --git a/python/ray/tune/__init__.py b/python/ray/tune/__init__.py\n--- a/python/ray/tune/__init__.py\n+++ b/python/ray/tune/__init__.py\n@@ -9,9 +9,9 @@\n from ray.tune.trainable import Trainable\n from ray.tune.durable_trainable import DurableTrainable\n from ray.tune.suggest import grid_search\n-from ray.tune.session import (report, get_trial_dir, get_trial_name,\n- get_trial_id, make_checkpoint_dir,\n- save_checkpoint, checkpoint_dir)\n+from ray.tune.session import (\n+ report, get_trial_dir, get_trial_name, get_trial_id, make_checkpoint_dir,\n+ save_checkpoint, checkpoint_dir, is_session_enabled)\n from ray.tune.progress_reporter import (ProgressReporter, CLIReporter,\n JupyterNotebookReporter)\n from ray.tune.sample import (function, sample_from, uniform, quniform, choice,\n@@ -28,6 +28,7 @@\n \"qrandint\", \"randn\", \"qrandn\", \"loguniform\", \"qloguniform\",\n \"ExperimentAnalysis\", \"Analysis\", \"CLIReporter\", \"JupyterNotebookReporter\",\n \"ProgressReporter\", \"report\", \"get_trial_dir\", \"get_trial_name\",\n- \"get_trial_id\", \"make_checkpoint_dir\", \"save_checkpoint\", \"checkpoint_dir\",\n- \"SyncConfig\", \"create_searcher\", \"create_scheduler\"\n+ \"get_trial_id\", \"make_checkpoint_dir\", \"save_checkpoint\",\n+ \"is_session_enabled\", \"checkpoint_dir\", \"SyncConfig\", \"create_searcher\",\n+ \"create_scheduler\"\n ]\ndiff --git a/python/ray/tune/session.py b/python/ray/tune/session.py\n--- a/python/ray/tune/session.py\n+++ b/python/ray/tune/session.py\n@@ -7,6 +7,12 @@\n _session = None\n \n \n+def is_session_enabled() -> bool:\n+ \"\"\"Returns True if running within an Tune process.\"\"\"\n+ global _session\n+ return _session is not None\n+\n+\n def get_session():\n global _session\n if not _session:\n", "issue": "[tune] How to test if I'm running inside a tune session?\nIs there an API to test if I'm running inside a tune session? I'd like to conditionally call `tune.report()` in my code.\r\n\r\nThere are functions like `get_trial_dir`, `get_trial_name`, `get_trial_id`, that internally call `get_session()`. I guess I could use one of them see if they return `None` or not. But they also log a warning when they can't find a session which is not ideal.\n", "before_files": [{"content": "from contextlib import contextmanager\nimport os\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n_session = None\n\n\ndef get_session():\n global _session\n if not _session:\n logger.warning(\n \"Session not detected. You should not be calling this function \"\n \"outside `tune.run` or while using the class API. \")\n return _session\n\n\ndef init(reporter, ignore_reinit_error=True):\n \"\"\"Initializes the global trial context for this process.\"\"\"\n global _session\n\n if _session is not None:\n # TODO(ng): would be nice to stack crawl at creation time to report\n # where that initial trial was created, and that creation line\n # info is helpful to keep around anyway.\n reinit_msg = (\n \"A Tune session already exists in the current process. \"\n \"If you are using ray.init(local_mode=True), \"\n \"you must set ray.init(..., num_cpus=1, num_gpus=1) to limit \"\n \"available concurrency.\")\n if ignore_reinit_error:\n logger.warning(reinit_msg)\n return\n else:\n raise ValueError(reinit_msg)\n\n if reporter is None:\n logger.warning(\"You are using a Tune session outside of Tune. \"\n \"Most session commands will have no effect.\")\n\n _session = reporter\n\n\ndef shutdown():\n \"\"\"Cleans up the trial and removes it from the global context.\"\"\"\n\n global _session\n _session = None\n\n\ndef report(**kwargs):\n \"\"\"Logs all keyword arguments.\n\n .. code-block:: python\n\n import time\n from ray import tune\n\n def run_me(config):\n for iter in range(100):\n time.sleep(1)\n tune.report(hello=\"world\", ray=\"tune\")\n\n analysis = tune.run(run_me)\n\n Args:\n **kwargs: Any key value pair to be logged by Tune. Any of these\n metrics can be used for early stopping or optimization.\n \"\"\"\n _session = get_session()\n if _session:\n return _session(**kwargs)\n\n\ndef make_checkpoint_dir(step=None):\n \"\"\"Gets the next checkpoint dir.\n\n .. versionadded:: 0.8.6\n\n .. deprecated:: 0.8.7\n Use tune.checkpoint_dir instead.\n \"\"\"\n raise DeprecationWarning(\n \"Deprecated method. Use `tune.checkpoint_dir` instead.\")\n\n\ndef save_checkpoint(checkpoint):\n \"\"\"Register the given checkpoint.\n\n .. versionadded:: 0.8.6\n\n .. deprecated:: 0.8.7\n Use tune.checkpoint_dir instead.\n \"\"\"\n raise DeprecationWarning(\n \"Deprecated method. Use `tune.checkpoint_dir` instead.\")\n\n\n@contextmanager\ndef checkpoint_dir(step):\n \"\"\"Returns a checkpoint dir inside a context.\n\n Store any files related to restoring state within the\n provided checkpoint dir.\n\n Args:\n step (int): Index for the checkpoint. Expected to be a\n monotonically increasing quantity.\n\n .. code-block:: python\n\n import os\n import json\n import time\n from ray import tune\n\n def func(config, checkpoint_dir=None):\n start = 0\n if checkpoint_dir:\n with open(os.path.join(checkpoint_dir, \"checkpoint\")) as f:\n state = json.loads(f.read())\n accuracy = state[\"acc\"]\n start = state[\"step\"] + 1\n\n for iter in range(start, 10):\n time.sleep(1)\n\n with tune.checkpoint_dir(step=iter) as checkpoint_dir:\n path = os.path.join(checkpoint_dir, \"checkpoint\")\n with open(path, \"w\") as f:\n f.write(json.dumps({\"step\": start}))\n\n tune.report(hello=\"world\", ray=\"tune\")\n\n Yields:\n checkpoint_dir (str): Directory for checkpointing.\n\n .. versionadded:: 0.8.7\n \"\"\"\n _session = get_session()\n\n if step is None:\n raise ValueError(\"checkpoint_dir(step) must be provided - got None.\")\n\n if _session:\n _checkpoint_dir = _session.make_checkpoint_dir(step=step)\n else:\n _checkpoint_dir = os.path.abspath(\"./\")\n\n yield _checkpoint_dir\n\n if _session:\n _session.set_checkpoint(_checkpoint_dir)\n\n\ndef get_trial_dir():\n \"\"\"Returns the directory where trial results are saved.\n\n For function API use only.\n \"\"\"\n _session = get_session()\n if _session:\n return _session.logdir\n\n\ndef get_trial_name():\n \"\"\"Trial name for the corresponding trial.\n\n For function API use only.\n \"\"\"\n _session = get_session()\n if _session:\n return _session.trial_name\n\n\ndef get_trial_id():\n \"\"\"Trial id for the corresponding trial.\n\n For function API use only.\n \"\"\"\n _session = get_session()\n if _session:\n return _session.trial_id\n\n\n__all__ = [\"report\", \"get_trial_dir\", \"get_trial_name\", \"get_trial_id\"]\n", "path": "python/ray/tune/session.py"}, {"content": "from ray.tune.error import TuneError\nfrom ray.tune.tune import run_experiments, run\nfrom ray.tune.function_runner import with_parameters\nfrom ray.tune.syncer import SyncConfig\nfrom ray.tune.experiment import Experiment\nfrom ray.tune.analysis import ExperimentAnalysis, Analysis\nfrom ray.tune.stopper import Stopper, EarlyStopping\nfrom ray.tune.registry import register_env, register_trainable\nfrom ray.tune.trainable import Trainable\nfrom ray.tune.durable_trainable import DurableTrainable\nfrom ray.tune.suggest import grid_search\nfrom ray.tune.session import (report, get_trial_dir, get_trial_name,\n get_trial_id, make_checkpoint_dir,\n save_checkpoint, checkpoint_dir)\nfrom ray.tune.progress_reporter import (ProgressReporter, CLIReporter,\n JupyterNotebookReporter)\nfrom ray.tune.sample import (function, sample_from, uniform, quniform, choice,\n randint, qrandint, randn, qrandn, loguniform,\n qloguniform)\nfrom ray.tune.suggest import create_searcher\nfrom ray.tune.schedulers import create_scheduler\n\n__all__ = [\n \"Trainable\", \"DurableTrainable\", \"TuneError\", \"grid_search\",\n \"register_env\", \"register_trainable\", \"run\", \"run_experiments\",\n \"with_parameters\", \"Stopper\", \"EarlyStopping\", \"Experiment\", \"function\",\n \"sample_from\", \"track\", \"uniform\", \"quniform\", \"choice\", \"randint\",\n \"qrandint\", \"randn\", \"qrandn\", \"loguniform\", \"qloguniform\",\n \"ExperimentAnalysis\", \"Analysis\", \"CLIReporter\", \"JupyterNotebookReporter\",\n \"ProgressReporter\", \"report\", \"get_trial_dir\", \"get_trial_name\",\n \"get_trial_id\", \"make_checkpoint_dir\", \"save_checkpoint\", \"checkpoint_dir\",\n \"SyncConfig\", \"create_searcher\", \"create_scheduler\"\n]\n", "path": "python/ray/tune/__init__.py"}]}
| 2,719 | 469 |
gh_patches_debug_11833
|
rasdani/github-patches
|
git_diff
|
optuna__optuna-5153
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Clarify `GridSampler` and ask-and-tell interface problem
### What is an issue?
`GridSampler` with ask-and-tell interface does not work the same way as the `optimize` interface due to `RuntimeError` as reported in https://github.com/optuna/optuna/issues/4121 and https://github.com/optuna/optuna/issues/5141. This should be clarified in the GridSampler page, possibly the first note section that sounds related to this problem.
</issue>
<code>
[start of optuna/samplers/_grid.py]
1 import itertools
2 from numbers import Real
3 from typing import Any
4 from typing import Dict
5 from typing import List
6 from typing import Mapping
7 from typing import Optional
8 from typing import Sequence
9 from typing import Union
10 import warnings
11
12 import numpy as np
13
14 from optuna.distributions import BaseDistribution
15 from optuna.logging import get_logger
16 from optuna.samplers import BaseSampler
17 from optuna.samplers._lazy_random_state import LazyRandomState
18 from optuna.study import Study
19 from optuna.trial import FrozenTrial
20 from optuna.trial import TrialState
21
22
23 GridValueType = Union[str, float, int, bool, None]
24
25
26 _logger = get_logger(__name__)
27
28
29 class GridSampler(BaseSampler):
30 """Sampler using grid search.
31
32 With :class:`~optuna.samplers.GridSampler`, the trials suggest all combinations of parameters
33 in the given search space during the study.
34
35 Example:
36
37 .. testcode::
38
39 import optuna
40
41
42 def objective(trial):
43 x = trial.suggest_float("x", -100, 100)
44 y = trial.suggest_int("y", -100, 100)
45 return x**2 + y**2
46
47
48 search_space = {"x": [-50, 0, 50], "y": [-99, 0, 99]}
49 study = optuna.create_study(sampler=optuna.samplers.GridSampler(search_space))
50 study.optimize(objective)
51
52 Note:
53
54 :class:`~optuna.samplers.GridSampler` automatically stops the optimization if all
55 combinations in the passed ``search_space`` have already been evaluated, internally
56 invoking the :func:`~optuna.study.Study.stop` method.
57
58 Note:
59
60 :class:`~optuna.samplers.GridSampler` does not take care of a parameter's quantization
61 specified by discrete suggest methods but just samples one of values specified in the
62 search space. E.g., in the following code snippet, either of ``-0.5`` or ``0.5`` is
63 sampled as ``x`` instead of an integer point.
64
65 .. testcode::
66
67 import optuna
68
69
70 def objective(trial):
71 # The following suggest method specifies integer points between -5 and 5.
72 x = trial.suggest_float("x", -5, 5, step=1)
73 return x**2
74
75
76 # Non-int points are specified in the grid.
77 search_space = {"x": [-0.5, 0.5]}
78 study = optuna.create_study(sampler=optuna.samplers.GridSampler(search_space))
79 study.optimize(objective, n_trials=2)
80
81 Note:
82 A parameter configuration in the grid is not considered finished until its trial is
83 finished. Therefore, during distributed optimization where trials run concurrently,
84 different workers will occasionally suggest the same parameter configuration.
85 The total number of actual trials may therefore exceed the size of the grid.
86
87 Note:
88 All parameters must be specified when using :class:`~optuna.samplers.GridSampler` with
89 :meth:`~optuna.study.Study.enqueue_trial`.
90
91 Args:
92 search_space:
93 A dictionary whose key and value are a parameter name and the corresponding candidates
94 of values, respectively.
95 seed:
96 A seed to fix the order of trials as the grid is randomly shuffled. Please note that
97 it is not recommended using this option in distributed optimization settings since
98 this option cannot ensure the order of trials and may increase the number of duplicate
99 suggestions during distributed optimization.
100 """
101
102 def __init__(
103 self, search_space: Mapping[str, Sequence[GridValueType]], seed: Optional[int] = None
104 ) -> None:
105 for param_name, param_values in search_space.items():
106 for value in param_values:
107 self._check_value(param_name, value)
108
109 self._search_space = {}
110 for param_name, param_values in sorted(search_space.items()):
111 self._search_space[param_name] = list(param_values)
112
113 self._all_grids = list(itertools.product(*self._search_space.values()))
114 self._param_names = sorted(search_space.keys())
115 self._n_min_trials = len(self._all_grids)
116 self._rng = LazyRandomState(seed)
117 self._rng.rng.shuffle(self._all_grids)
118
119 def reseed_rng(self) -> None:
120 self._rng.rng.seed()
121
122 def before_trial(self, study: Study, trial: FrozenTrial) -> None:
123 # Instead of returning param values, GridSampler puts the target grid id as a system attr,
124 # and the values are returned from `sample_independent`. This is because the distribution
125 # object is hard to get at the beginning of trial, while we need the access to the object
126 # to validate the sampled value.
127
128 # When the trial is created by RetryFailedTrialCallback or enqueue_trial, we should not
129 # assign a new grid_id.
130 if "grid_id" in trial.system_attrs or "fixed_params" in trial.system_attrs:
131 return
132
133 if 0 <= trial.number and trial.number < self._n_min_trials:
134 study._storage.set_trial_system_attr(
135 trial._trial_id, "search_space", self._search_space
136 )
137 study._storage.set_trial_system_attr(trial._trial_id, "grid_id", trial.number)
138 return
139
140 target_grids = self._get_unvisited_grid_ids(study)
141
142 if len(target_grids) == 0:
143 # This case may occur with distributed optimization or trial queue. If there is no
144 # target grid, `GridSampler` evaluates a visited, duplicated point with the current
145 # trial. After that, the optimization stops.
146
147 _logger.warning(
148 "`GridSampler` is re-evaluating a configuration because the grid has been "
149 "exhausted. This may happen due to a timing issue during distributed optimization "
150 "or when re-running optimizations on already finished studies."
151 )
152
153 # One of all grids is randomly picked up in this case.
154 target_grids = list(range(len(self._all_grids)))
155
156 # In distributed optimization, multiple workers may simultaneously pick up the same grid.
157 # To make the conflict less frequent, the grid is chosen randomly.
158 grid_id = int(self._rng.rng.choice(target_grids))
159
160 study._storage.set_trial_system_attr(trial._trial_id, "search_space", self._search_space)
161 study._storage.set_trial_system_attr(trial._trial_id, "grid_id", grid_id)
162
163 def infer_relative_search_space(
164 self, study: Study, trial: FrozenTrial
165 ) -> Dict[str, BaseDistribution]:
166 return {}
167
168 def sample_relative(
169 self, study: Study, trial: FrozenTrial, search_space: Dict[str, BaseDistribution]
170 ) -> Dict[str, Any]:
171 return {}
172
173 def sample_independent(
174 self,
175 study: Study,
176 trial: FrozenTrial,
177 param_name: str,
178 param_distribution: BaseDistribution,
179 ) -> Any:
180 if "grid_id" not in trial.system_attrs:
181 message = "All parameters must be specified when using GridSampler with enqueue_trial."
182 raise ValueError(message)
183
184 if param_name not in self._search_space:
185 message = "The parameter name, {}, is not found in the given grid.".format(param_name)
186 raise ValueError(message)
187
188 # TODO(c-bata): Reduce the number of duplicated evaluations on multiple workers.
189 # Current selection logic may evaluate the same parameters multiple times.
190 # See https://gist.github.com/c-bata/f759f64becb24eea2040f4b2e3afce8f for details.
191 grid_id = trial.system_attrs["grid_id"]
192 param_value = self._all_grids[grid_id][self._param_names.index(param_name)]
193 contains = param_distribution._contains(param_distribution.to_internal_repr(param_value))
194 if not contains:
195 warnings.warn(
196 f"The value `{param_value}` is out of range of the parameter `{param_name}`. "
197 f"The value will be used but the actual distribution is: `{param_distribution}`."
198 )
199
200 return param_value
201
202 def after_trial(
203 self,
204 study: Study,
205 trial: FrozenTrial,
206 state: TrialState,
207 values: Optional[Sequence[float]],
208 ) -> None:
209 target_grids = self._get_unvisited_grid_ids(study)
210
211 if len(target_grids) == 0:
212 study.stop()
213 elif len(target_grids) == 1:
214 grid_id = study._storage.get_trial_system_attrs(trial._trial_id)["grid_id"]
215 if grid_id == target_grids[0]:
216 study.stop()
217
218 @staticmethod
219 def _check_value(param_name: str, param_value: Any) -> None:
220 if param_value is None or isinstance(param_value, (str, int, float, bool)):
221 return
222
223 message = (
224 "{} contains a value with the type of {}, which is not supported by "
225 "`GridSampler`. Please make sure a value is `str`, `int`, `float`, `bool`"
226 " or `None` for persistent storage.".format(param_name, type(param_value))
227 )
228 warnings.warn(message)
229
230 def _get_unvisited_grid_ids(self, study: Study) -> List[int]:
231 # List up unvisited grids based on already finished ones.
232 visited_grids = []
233 running_grids = []
234
235 # We directly query the storage to get trials here instead of `study.get_trials`,
236 # since some pruners such as `HyperbandPruner` use the study transformed
237 # to filter trials. See https://github.com/optuna/optuna/issues/2327 for details.
238 trials = study._storage.get_all_trials(study._study_id, deepcopy=False)
239
240 for t in trials:
241 if "grid_id" in t.system_attrs and self._same_search_space(
242 t.system_attrs["search_space"]
243 ):
244 if t.state.is_finished():
245 visited_grids.append(t.system_attrs["grid_id"])
246 elif t.state == TrialState.RUNNING:
247 running_grids.append(t.system_attrs["grid_id"])
248
249 unvisited_grids = set(range(self._n_min_trials)) - set(visited_grids) - set(running_grids)
250
251 # If evaluations for all grids have been started, return grids that have not yet finished
252 # because all grids should be evaluated before stopping the optimization.
253 if len(unvisited_grids) == 0:
254 unvisited_grids = set(range(self._n_min_trials)) - set(visited_grids)
255
256 return list(unvisited_grids)
257
258 @staticmethod
259 def _grid_value_equal(value1: GridValueType, value2: GridValueType) -> bool:
260 value1_is_nan = isinstance(value1, Real) and np.isnan(float(value1))
261 value2_is_nan = isinstance(value2, Real) and np.isnan(float(value2))
262 return (value1 == value2) or (value1_is_nan and value2_is_nan)
263
264 def _same_search_space(self, search_space: Mapping[str, Sequence[GridValueType]]) -> bool:
265 if set(search_space.keys()) != set(self._search_space.keys()):
266 return False
267
268 for param_name in search_space.keys():
269 if len(search_space[param_name]) != len(self._search_space[param_name]):
270 return False
271
272 for i, param_value in enumerate(search_space[param_name]):
273 if not self._grid_value_equal(param_value, self._search_space[param_name][i]):
274 return False
275
276 return True
277
[end of optuna/samplers/_grid.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/optuna/samplers/_grid.py b/optuna/samplers/_grid.py
--- a/optuna/samplers/_grid.py
+++ b/optuna/samplers/_grid.py
@@ -51,9 +51,12 @@
Note:
- :class:`~optuna.samplers.GridSampler` automatically stops the optimization if all
- combinations in the passed ``search_space`` have already been evaluated, internally
- invoking the :func:`~optuna.study.Study.stop` method.
+ This sampler with :ref:`ask_and_tell` raises :exc:`RuntimeError` just after evaluating
+ the final grid. This is because :class:`~optuna.samplers.GridSampler` automatically
+ stops the optimization if all combinations in the passed ``search_space`` have already
+ been evaluated, internally invoking the :func:`~optuna.study.Study.stop` method.
+ As a workaround, we need to handle the error manually as in
+ https://github.com/optuna/optuna/issues/4121#issuecomment-1305289910.
Note:
|
{"golden_diff": "diff --git a/optuna/samplers/_grid.py b/optuna/samplers/_grid.py\n--- a/optuna/samplers/_grid.py\n+++ b/optuna/samplers/_grid.py\n@@ -51,9 +51,12 @@\n \n Note:\n \n- :class:`~optuna.samplers.GridSampler` automatically stops the optimization if all\n- combinations in the passed ``search_space`` have already been evaluated, internally\n- invoking the :func:`~optuna.study.Study.stop` method.\n+ This sampler with :ref:`ask_and_tell` raises :exc:`RuntimeError` just after evaluating\n+ the final grid. This is because :class:`~optuna.samplers.GridSampler` automatically\n+ stops the optimization if all combinations in the passed ``search_space`` have already\n+ been evaluated, internally invoking the :func:`~optuna.study.Study.stop` method.\n+ As a workaround, we need to handle the error manually as in\n+ https://github.com/optuna/optuna/issues/4121#issuecomment-1305289910.\n \n Note:\n", "issue": "Clarify `GridSampler` and ask-and-tell interface problem\n### What is an issue?\n\n`GridSampler` with ask-and-tell interface does not work the same way as the `optimize` interface due to `RuntimeError` as reported in https://github.com/optuna/optuna/issues/4121 and https://github.com/optuna/optuna/issues/5141. This should be clarified in the GridSampler page, possibly the first note section that sounds related to this problem.\n", "before_files": [{"content": "import itertools\nfrom numbers import Real\nfrom typing import Any\nfrom typing import Dict\nfrom typing import List\nfrom typing import Mapping\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Union\nimport warnings\n\nimport numpy as np\n\nfrom optuna.distributions import BaseDistribution\nfrom optuna.logging import get_logger\nfrom optuna.samplers import BaseSampler\nfrom optuna.samplers._lazy_random_state import LazyRandomState\nfrom optuna.study import Study\nfrom optuna.trial import FrozenTrial\nfrom optuna.trial import TrialState\n\n\nGridValueType = Union[str, float, int, bool, None]\n\n\n_logger = get_logger(__name__)\n\n\nclass GridSampler(BaseSampler):\n \"\"\"Sampler using grid search.\n\n With :class:`~optuna.samplers.GridSampler`, the trials suggest all combinations of parameters\n in the given search space during the study.\n\n Example:\n\n .. testcode::\n\n import optuna\n\n\n def objective(trial):\n x = trial.suggest_float(\"x\", -100, 100)\n y = trial.suggest_int(\"y\", -100, 100)\n return x**2 + y**2\n\n\n search_space = {\"x\": [-50, 0, 50], \"y\": [-99, 0, 99]}\n study = optuna.create_study(sampler=optuna.samplers.GridSampler(search_space))\n study.optimize(objective)\n\n Note:\n\n :class:`~optuna.samplers.GridSampler` automatically stops the optimization if all\n combinations in the passed ``search_space`` have already been evaluated, internally\n invoking the :func:`~optuna.study.Study.stop` method.\n\n Note:\n\n :class:`~optuna.samplers.GridSampler` does not take care of a parameter's quantization\n specified by discrete suggest methods but just samples one of values specified in the\n search space. E.g., in the following code snippet, either of ``-0.5`` or ``0.5`` is\n sampled as ``x`` instead of an integer point.\n\n .. testcode::\n\n import optuna\n\n\n def objective(trial):\n # The following suggest method specifies integer points between -5 and 5.\n x = trial.suggest_float(\"x\", -5, 5, step=1)\n return x**2\n\n\n # Non-int points are specified in the grid.\n search_space = {\"x\": [-0.5, 0.5]}\n study = optuna.create_study(sampler=optuna.samplers.GridSampler(search_space))\n study.optimize(objective, n_trials=2)\n\n Note:\n A parameter configuration in the grid is not considered finished until its trial is\n finished. Therefore, during distributed optimization where trials run concurrently,\n different workers will occasionally suggest the same parameter configuration.\n The total number of actual trials may therefore exceed the size of the grid.\n\n Note:\n All parameters must be specified when using :class:`~optuna.samplers.GridSampler` with\n :meth:`~optuna.study.Study.enqueue_trial`.\n\n Args:\n search_space:\n A dictionary whose key and value are a parameter name and the corresponding candidates\n of values, respectively.\n seed:\n A seed to fix the order of trials as the grid is randomly shuffled. Please note that\n it is not recommended using this option in distributed optimization settings since\n this option cannot ensure the order of trials and may increase the number of duplicate\n suggestions during distributed optimization.\n \"\"\"\n\n def __init__(\n self, search_space: Mapping[str, Sequence[GridValueType]], seed: Optional[int] = None\n ) -> None:\n for param_name, param_values in search_space.items():\n for value in param_values:\n self._check_value(param_name, value)\n\n self._search_space = {}\n for param_name, param_values in sorted(search_space.items()):\n self._search_space[param_name] = list(param_values)\n\n self._all_grids = list(itertools.product(*self._search_space.values()))\n self._param_names = sorted(search_space.keys())\n self._n_min_trials = len(self._all_grids)\n self._rng = LazyRandomState(seed)\n self._rng.rng.shuffle(self._all_grids)\n\n def reseed_rng(self) -> None:\n self._rng.rng.seed()\n\n def before_trial(self, study: Study, trial: FrozenTrial) -> None:\n # Instead of returning param values, GridSampler puts the target grid id as a system attr,\n # and the values are returned from `sample_independent`. This is because the distribution\n # object is hard to get at the beginning of trial, while we need the access to the object\n # to validate the sampled value.\n\n # When the trial is created by RetryFailedTrialCallback or enqueue_trial, we should not\n # assign a new grid_id.\n if \"grid_id\" in trial.system_attrs or \"fixed_params\" in trial.system_attrs:\n return\n\n if 0 <= trial.number and trial.number < self._n_min_trials:\n study._storage.set_trial_system_attr(\n trial._trial_id, \"search_space\", self._search_space\n )\n study._storage.set_trial_system_attr(trial._trial_id, \"grid_id\", trial.number)\n return\n\n target_grids = self._get_unvisited_grid_ids(study)\n\n if len(target_grids) == 0:\n # This case may occur with distributed optimization or trial queue. If there is no\n # target grid, `GridSampler` evaluates a visited, duplicated point with the current\n # trial. After that, the optimization stops.\n\n _logger.warning(\n \"`GridSampler` is re-evaluating a configuration because the grid has been \"\n \"exhausted. This may happen due to a timing issue during distributed optimization \"\n \"or when re-running optimizations on already finished studies.\"\n )\n\n # One of all grids is randomly picked up in this case.\n target_grids = list(range(len(self._all_grids)))\n\n # In distributed optimization, multiple workers may simultaneously pick up the same grid.\n # To make the conflict less frequent, the grid is chosen randomly.\n grid_id = int(self._rng.rng.choice(target_grids))\n\n study._storage.set_trial_system_attr(trial._trial_id, \"search_space\", self._search_space)\n study._storage.set_trial_system_attr(trial._trial_id, \"grid_id\", grid_id)\n\n def infer_relative_search_space(\n self, study: Study, trial: FrozenTrial\n ) -> Dict[str, BaseDistribution]:\n return {}\n\n def sample_relative(\n self, study: Study, trial: FrozenTrial, search_space: Dict[str, BaseDistribution]\n ) -> Dict[str, Any]:\n return {}\n\n def sample_independent(\n self,\n study: Study,\n trial: FrozenTrial,\n param_name: str,\n param_distribution: BaseDistribution,\n ) -> Any:\n if \"grid_id\" not in trial.system_attrs:\n message = \"All parameters must be specified when using GridSampler with enqueue_trial.\"\n raise ValueError(message)\n\n if param_name not in self._search_space:\n message = \"The parameter name, {}, is not found in the given grid.\".format(param_name)\n raise ValueError(message)\n\n # TODO(c-bata): Reduce the number of duplicated evaluations on multiple workers.\n # Current selection logic may evaluate the same parameters multiple times.\n # See https://gist.github.com/c-bata/f759f64becb24eea2040f4b2e3afce8f for details.\n grid_id = trial.system_attrs[\"grid_id\"]\n param_value = self._all_grids[grid_id][self._param_names.index(param_name)]\n contains = param_distribution._contains(param_distribution.to_internal_repr(param_value))\n if not contains:\n warnings.warn(\n f\"The value `{param_value}` is out of range of the parameter `{param_name}`. \"\n f\"The value will be used but the actual distribution is: `{param_distribution}`.\"\n )\n\n return param_value\n\n def after_trial(\n self,\n study: Study,\n trial: FrozenTrial,\n state: TrialState,\n values: Optional[Sequence[float]],\n ) -> None:\n target_grids = self._get_unvisited_grid_ids(study)\n\n if len(target_grids) == 0:\n study.stop()\n elif len(target_grids) == 1:\n grid_id = study._storage.get_trial_system_attrs(trial._trial_id)[\"grid_id\"]\n if grid_id == target_grids[0]:\n study.stop()\n\n @staticmethod\n def _check_value(param_name: str, param_value: Any) -> None:\n if param_value is None or isinstance(param_value, (str, int, float, bool)):\n return\n\n message = (\n \"{} contains a value with the type of {}, which is not supported by \"\n \"`GridSampler`. Please make sure a value is `str`, `int`, `float`, `bool`\"\n \" or `None` for persistent storage.\".format(param_name, type(param_value))\n )\n warnings.warn(message)\n\n def _get_unvisited_grid_ids(self, study: Study) -> List[int]:\n # List up unvisited grids based on already finished ones.\n visited_grids = []\n running_grids = []\n\n # We directly query the storage to get trials here instead of `study.get_trials`,\n # since some pruners such as `HyperbandPruner` use the study transformed\n # to filter trials. See https://github.com/optuna/optuna/issues/2327 for details.\n trials = study._storage.get_all_trials(study._study_id, deepcopy=False)\n\n for t in trials:\n if \"grid_id\" in t.system_attrs and self._same_search_space(\n t.system_attrs[\"search_space\"]\n ):\n if t.state.is_finished():\n visited_grids.append(t.system_attrs[\"grid_id\"])\n elif t.state == TrialState.RUNNING:\n running_grids.append(t.system_attrs[\"grid_id\"])\n\n unvisited_grids = set(range(self._n_min_trials)) - set(visited_grids) - set(running_grids)\n\n # If evaluations for all grids have been started, return grids that have not yet finished\n # because all grids should be evaluated before stopping the optimization.\n if len(unvisited_grids) == 0:\n unvisited_grids = set(range(self._n_min_trials)) - set(visited_grids)\n\n return list(unvisited_grids)\n\n @staticmethod\n def _grid_value_equal(value1: GridValueType, value2: GridValueType) -> bool:\n value1_is_nan = isinstance(value1, Real) and np.isnan(float(value1))\n value2_is_nan = isinstance(value2, Real) and np.isnan(float(value2))\n return (value1 == value2) or (value1_is_nan and value2_is_nan)\n\n def _same_search_space(self, search_space: Mapping[str, Sequence[GridValueType]]) -> bool:\n if set(search_space.keys()) != set(self._search_space.keys()):\n return False\n\n for param_name in search_space.keys():\n if len(search_space[param_name]) != len(self._search_space[param_name]):\n return False\n\n for i, param_value in enumerate(search_space[param_name]):\n if not self._grid_value_equal(param_value, self._search_space[param_name][i]):\n return False\n\n return True\n", "path": "optuna/samplers/_grid.py"}]}
| 3,873 | 248 |
gh_patches_debug_33062
|
rasdani/github-patches
|
git_diff
|
bridgecrewio__checkov-3387
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Crash mentioned in issue #2370 still exist for AWS Batch Job with Fargate
**Describe the issue**
Checkov is failing for Fargate AWS Batch Job with error - `[ERROR] Failed to run check: Batch job does not define a privileged container for configuration`. I am using terraform `resource "aws_batch_job_definition"` and as per AWS document, Fargate Batch Jobs do not have Privileged parameters. [https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-privileged](url)
**Examples**
https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/batch_job_definition#fargate-platform-capability
**Exception Trace**
```
2022-08-19 15:22:00,858 [MainThread ] [DEBUG] Scanning file: /tf.json
2022-08-19 15:22:00,858 [MainThread ] [DEBUG] Should run check CKV_AWS_210: True
2022-08-19 15:22:00,858 [MainThread ] [DEBUG] Running check: Batch job does not define a privileged container on file /tf.json
2022-08-19 15:22:00,860 [MainThread ] [ERROR] Failed to run check: Batch job does not define a privileged container for configuration: {'container_properties': ['{"command":["echo","test"],"fargatePlatformConfiguration":{"platformVersion":"LATEST"},"image":"busybox","resourceRequirements":[{"type":"VCPU","value":"0.25"},{"type":"MEMORY","value":"512"}]}'], 'name': ['tf_test_batch_job_definition'], 'parameters': [None], 'platform_capabilities': [['FARGATE']], 'propagate_tags': [False], 'retry_strategy': [[]], 'tags': [None], 'timeout': [[]], 'type': ['container'], '__startline__': [14], '__endline__': [26], 'start_line': [13], 'end_line': [25], '__address__': 'aws_batch_job_definition.test', '__change_actions__': ['create']} at file: /tf.json
Process ForkProcess-4:
Traceback (most recent call last):
File "/usr/local/lib/python3.8/multiprocessing/process.py", line 315, in _bootstrap
self.run()
File "/usr/local/lib/python3.8/multiprocessing/process.py", line 108, in run
self._target(*self._args, **self._kwargs)
File "/usr/local/lib/python3.8/site-packages/checkov/common/parallelizer/parallel_runner.py", line 37, in func_wrapper
result = original_func(item)
2022-08-19 15:22:00,862 [MainThread ] [DEBUG] Environment variable BITBUCKET_REPO_FULL_NAME was not set. Cannot fetch branch restrictions.
File "/usr/local/lib/python3.8/site-packages/checkov/common/runners/runner_registry.py", line 83, in _parallel_run
return runner.run(
File "/usr/local/lib/python3.8/site-packages/checkov/terraform/plan_runner.py", line 74, in run
self.check_tf_definition(report, root_folder, runner_filter)
File "/usr/local/lib/python3.8/site-packages/checkov/terraform/plan_runner.py", line 92, in check_tf_definition
self.run_block(definition[block_type], None, full_file_path, root_folder, report, scanned_file,
File "/usr/local/lib/python3.8/site-packages/checkov/terraform/plan_runner.py", line 112, in run_block
results = registry.scan(scanned_file, entity, [], runner_filter, report_type=CheckType.TERRAFORM_PLAN)
File "/usr/local/lib/python3.8/site-packages/checkov/common/checks/base_check_registry.py", line 126, in scan
result = self.run_check(check, entity_configuration, entity_name, entity_type, scanned_file, skip_info)
File "/usr/local/lib/python3.8/site-packages/checkov/common/checks/base_check_registry.py", line 140, in run_check
result = check.run(
File "/usr/local/lib/python3.8/site-packages/checkov/common/checks/base_check.py", line 70, in run
check_result["result"] = self.scan_entity_conf(entity_configuration, entity_type)
File "/usr/local/lib/python3.8/site-packages/checkov/terraform/checks/resource/base_resource_check.py", line 43, in scan_entity_conf
return self.scan_resource_conf(conf)
File "/usr/local/lib/python3.8/site-packages/checkov/terraform/checks/resource/aws/BatchJobIsNotPrivileged.py", line 26, in scan_resource_conf
if container.get("privileged"):
File "/usr/local/lib/python3.8/site-packages/checkov/common/parsers/node.py", line 34, in __getattr__
raise TemplateAttributeError(f'{name} is invalid')
checkov.common.parsers.node.TemplateAttributeError: get is invalid
```
**Desktop (please complete the following information):**
- OS: [MacOS, Linux]
- Checkov Version [2.1.137]
**Additional context**
The issue was also mentioned in #2370 and it was Fixed by https://github.com/bridgecrewio/checkov/pull/2372 but I don't see that the Fargate type was considered in the test.
</issue>
<code>
[start of checkov/terraform/checks/resource/aws/BatchJobIsNotPrivileged.py]
1 import json
2 import logging
3
4 from checkov.common.models.enums import CheckResult, CheckCategories
5 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
6
7
8 class BatchJobIsNotPrivileged(BaseResourceCheck):
9 def __init__(self):
10 name = "Batch job does not define a privileged container"
11 id = "CKV_AWS_210"
12 supported_resources = ['aws_batch_job_definition']
13 categories = [CheckCategories.GENERAL_SECURITY]
14 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
15
16 def scan_resource_conf(self, conf):
17 if conf.get("container_properties"):
18 if type(conf.get("container_properties")[0]) is str:
19 try:
20 container = json.loads(conf.get("container_properties")[0])
21 except json.JSONDecodeError as e:
22 logging.error(e)
23 return CheckResult.UNKNOWN
24 else:
25 container = conf.get("container_properties")[0]
26 if container.get("privileged"):
27 return CheckResult.FAILED
28 return CheckResult.PASSED
29 return CheckResult.UNKNOWN
30
31
32 check = BatchJobIsNotPrivileged()
33
[end of checkov/terraform/checks/resource/aws/BatchJobIsNotPrivileged.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/checkov/terraform/checks/resource/aws/BatchJobIsNotPrivileged.py b/checkov/terraform/checks/resource/aws/BatchJobIsNotPrivileged.py
--- a/checkov/terraform/checks/resource/aws/BatchJobIsNotPrivileged.py
+++ b/checkov/terraform/checks/resource/aws/BatchJobIsNotPrivileged.py
@@ -1,28 +1,34 @@
+from __future__ import annotations
+
import json
import logging
+from typing import Any
from checkov.common.models.enums import CheckResult, CheckCategories
from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
class BatchJobIsNotPrivileged(BaseResourceCheck):
- def __init__(self):
+ def __init__(self) -> None:
name = "Batch job does not define a privileged container"
id = "CKV_AWS_210"
- supported_resources = ['aws_batch_job_definition']
- categories = [CheckCategories.GENERAL_SECURITY]
+ supported_resources = ("aws_batch_job_definition",)
+ categories = (CheckCategories.GENERAL_SECURITY,)
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
- def scan_resource_conf(self, conf):
- if conf.get("container_properties"):
- if type(conf.get("container_properties")[0]) is str:
+ def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:
+ container_properties = conf.get("container_properties")
+ if container_properties:
+ if isinstance(container_properties[0], str):
try:
- container = json.loads(conf.get("container_properties")[0])
+ container = json.loads(container_properties[0])
except json.JSONDecodeError as e:
logging.error(e)
return CheckResult.UNKNOWN
else:
- container = conf.get("container_properties")[0]
+ container = container_properties[0]
+ if not isinstance(container, dict):
+ return CheckResult.UNKNOWN
if container.get("privileged"):
return CheckResult.FAILED
return CheckResult.PASSED
|
{"golden_diff": "diff --git a/checkov/terraform/checks/resource/aws/BatchJobIsNotPrivileged.py b/checkov/terraform/checks/resource/aws/BatchJobIsNotPrivileged.py\n--- a/checkov/terraform/checks/resource/aws/BatchJobIsNotPrivileged.py\n+++ b/checkov/terraform/checks/resource/aws/BatchJobIsNotPrivileged.py\n@@ -1,28 +1,34 @@\n+from __future__ import annotations\n+\n import json\n import logging\n+from typing import Any\n \n from checkov.common.models.enums import CheckResult, CheckCategories\n from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n \n \n class BatchJobIsNotPrivileged(BaseResourceCheck):\n- def __init__(self):\n+ def __init__(self) -> None:\n name = \"Batch job does not define a privileged container\"\n id = \"CKV_AWS_210\"\n- supported_resources = ['aws_batch_job_definition']\n- categories = [CheckCategories.GENERAL_SECURITY]\n+ supported_resources = (\"aws_batch_job_definition\",)\n+ categories = (CheckCategories.GENERAL_SECURITY,)\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n \n- def scan_resource_conf(self, conf):\n- if conf.get(\"container_properties\"):\n- if type(conf.get(\"container_properties\")[0]) is str:\n+ def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:\n+ container_properties = conf.get(\"container_properties\")\n+ if container_properties:\n+ if isinstance(container_properties[0], str):\n try:\n- container = json.loads(conf.get(\"container_properties\")[0])\n+ container = json.loads(container_properties[0])\n except json.JSONDecodeError as e:\n logging.error(e)\n return CheckResult.UNKNOWN\n else:\n- container = conf.get(\"container_properties\")[0]\n+ container = container_properties[0]\n+ if not isinstance(container, dict):\n+ return CheckResult.UNKNOWN\n if container.get(\"privileged\"):\n return CheckResult.FAILED\n return CheckResult.PASSED\n", "issue": "Crash mentioned in issue #2370 still exist for AWS Batch Job with Fargate \n**Describe the issue**\r\nCheckov is failing for Fargate AWS Batch Job with error - `[ERROR] Failed to run check: Batch job does not define a privileged container for configuration`. I am using terraform `resource \"aws_batch_job_definition\"` and as per AWS document, Fargate Batch Jobs do not have Privileged parameters. [https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-batch-jobdefinition-containerproperties.html#cfn-batch-jobdefinition-containerproperties-privileged](url)\r\n\r\n**Examples**\r\nhttps://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/batch_job_definition#fargate-platform-capability\r\n\r\n**Exception Trace**\r\n```\r\n2022-08-19 15:22:00,858 [MainThread ] [DEBUG] Scanning file: /tf.json\r\n2022-08-19 15:22:00,858 [MainThread ] [DEBUG] Should run check CKV_AWS_210: True\r\n2022-08-19 15:22:00,858 [MainThread ] [DEBUG] Running check: Batch job does not define a privileged container on file /tf.json\r\n2022-08-19 15:22:00,860 [MainThread ] [ERROR] Failed to run check: Batch job does not define a privileged container for configuration: {'container_properties': ['{\"command\":[\"echo\",\"test\"],\"fargatePlatformConfiguration\":{\"platformVersion\":\"LATEST\"},\"image\":\"busybox\",\"resourceRequirements\":[{\"type\":\"VCPU\",\"value\":\"0.25\"},{\"type\":\"MEMORY\",\"value\":\"512\"}]}'], 'name': ['tf_test_batch_job_definition'], 'parameters': [None], 'platform_capabilities': [['FARGATE']], 'propagate_tags': [False], 'retry_strategy': [[]], 'tags': [None], 'timeout': [[]], 'type': ['container'], '__startline__': [14], '__endline__': [26], 'start_line': [13], 'end_line': [25], '__address__': 'aws_batch_job_definition.test', '__change_actions__': ['create']} at file: /tf.json\r\nProcess ForkProcess-4:\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.8/multiprocessing/process.py\", line 315, in _bootstrap\r\n self.run()\r\n File \"/usr/local/lib/python3.8/multiprocessing/process.py\", line 108, in run\r\n self._target(*self._args, **self._kwargs)\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/common/parallelizer/parallel_runner.py\", line 37, in func_wrapper\r\n result = original_func(item)\r\n2022-08-19 15:22:00,862 [MainThread ] [DEBUG] Environment variable BITBUCKET_REPO_FULL_NAME was not set. Cannot fetch branch restrictions.\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/common/runners/runner_registry.py\", line 83, in _parallel_run\r\n return runner.run(\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/terraform/plan_runner.py\", line 74, in run\r\n self.check_tf_definition(report, root_folder, runner_filter)\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/terraform/plan_runner.py\", line 92, in check_tf_definition\r\n self.run_block(definition[block_type], None, full_file_path, root_folder, report, scanned_file,\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/terraform/plan_runner.py\", line 112, in run_block\r\n results = registry.scan(scanned_file, entity, [], runner_filter, report_type=CheckType.TERRAFORM_PLAN)\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/common/checks/base_check_registry.py\", line 126, in scan\r\n result = self.run_check(check, entity_configuration, entity_name, entity_type, scanned_file, skip_info)\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/common/checks/base_check_registry.py\", line 140, in run_check\r\n result = check.run(\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/common/checks/base_check.py\", line 70, in run\r\n check_result[\"result\"] = self.scan_entity_conf(entity_configuration, entity_type)\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/terraform/checks/resource/base_resource_check.py\", line 43, in scan_entity_conf\r\n return self.scan_resource_conf(conf)\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/terraform/checks/resource/aws/BatchJobIsNotPrivileged.py\", line 26, in scan_resource_conf\r\n if container.get(\"privileged\"):\r\n File \"/usr/local/lib/python3.8/site-packages/checkov/common/parsers/node.py\", line 34, in __getattr__\r\n raise TemplateAttributeError(f'{name} is invalid')\r\ncheckov.common.parsers.node.TemplateAttributeError: get is invalid\r\n```\r\n\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: [MacOS, Linux]\r\n - Checkov Version [2.1.137]\r\n\r\n**Additional context**\r\nThe issue was also mentioned in #2370 and it was Fixed by https://github.com/bridgecrewio/checkov/pull/2372 but I don't see that the Fargate type was considered in the test.\r\n\n", "before_files": [{"content": "import json\nimport logging\n\nfrom checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n\n\nclass BatchJobIsNotPrivileged(BaseResourceCheck):\n def __init__(self):\n name = \"Batch job does not define a privileged container\"\n id = \"CKV_AWS_210\"\n supported_resources = ['aws_batch_job_definition']\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n if conf.get(\"container_properties\"):\n if type(conf.get(\"container_properties\")[0]) is str:\n try:\n container = json.loads(conf.get(\"container_properties\")[0])\n except json.JSONDecodeError as e:\n logging.error(e)\n return CheckResult.UNKNOWN\n else:\n container = conf.get(\"container_properties\")[0]\n if container.get(\"privileged\"):\n return CheckResult.FAILED\n return CheckResult.PASSED\n return CheckResult.UNKNOWN\n\n\ncheck = BatchJobIsNotPrivileged()\n", "path": "checkov/terraform/checks/resource/aws/BatchJobIsNotPrivileged.py"}]}
| 2,103 | 457 |
gh_patches_debug_6008
|
rasdani/github-patches
|
git_diff
|
pantsbuild__pants-15602
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
--black-skip breaks fmt goal on isort exception
**Describe the bug**
There are a few related bugs, but the breaking behaviour is the most important I think. I've tested on 2.11, and everything works as expected, so this appears to only be a pre-release/dev bug.
While writing a plugin, I started implementing and testing the `skip` functionality and noticed some strange behaviour on `main`. This behaviour isn't limited to `black`, but it's quick to replicate.
Note below that even after skipping `black` in the `lint` goal, it is in the associated list and marked "succeeded". In reality, it should be failing if it's actually running. In 2.11, black just wouldn't be shown in that list. Personally, I think "black skipped" in yellow text might make more sense, but 🤷🏽
```bash
> ./pants lint --black-skip ::
✓ autoflake succeeded.
✓ bandit succeeded.
✓ black succeeded.
✕ flake8 failed.
✓ isort succeeded.
✓ pylint succeeded.
✓ pyupgrade succeeded.
✓ shellcheck succeeded.
✓ shfmt succeeded.
```
The more concerning behaviour is on format.
```bash
> ./pants fmt --black-skip ::
14:28:06.41 [ERROR] 1 Exception encountered:
Engine traceback:
in select
in pants.core.goals.fmt.fmt
in pants.core.goals.fmt.fmt_language
in pants.backend.python.lint.isort.rules.isort_fmt (isort)
in pants.engine.process.fallible_to_exec_result_or_raise
Traceback (most recent call last):
File "/Users/sj/.cache/pants/setup/bootstrap-Darwin-x86_64/pants.NUYNTS/install/lib/python3.9/site-packages/pants/engine/process.py", line 272, in fallible_to_exec_result_or_raise
raise ProcessExecutionFailure(
pants.engine.process.ProcessExecutionFailure: Process 'Run isort on 7 files.' failed with exit code 1.
```
**Pants version**
2.13.0.dev1
**OS**
MacOS
**Additional info**
Using this repo (https://github.com/sureshjoshi/pants-example-plugin) and I manually set the toml to `2.13.0.dev`.
I also went in and messed with the `main.py` formatting, so I knew `black` would want to run.
</issue>
<code>
[start of src/python/pants/core/goals/fmt.py]
1 # Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 from __future__ import annotations
5
6 import itertools
7 import logging
8 from collections import defaultdict
9 from dataclasses import dataclass
10 from typing import Iterable, TypeVar
11
12 from pants.core.goals.style_request import (
13 StyleRequest,
14 determine_specified_tool_names,
15 only_option_help,
16 style_batch_size_help,
17 )
18 from pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest
19 from pants.engine.console import Console
20 from pants.engine.engine_aware import EngineAwareReturnType
21 from pants.engine.fs import Digest, MergeDigests, Snapshot, SnapshotDiff, Workspace
22 from pants.engine.goal import Goal, GoalSubsystem
23 from pants.engine.internals.native_engine import EMPTY_SNAPSHOT
24 from pants.engine.process import FallibleProcessResult, ProcessResult
25 from pants.engine.rules import Get, MultiGet, collect_rules, goal_rule, rule
26 from pants.engine.target import FieldSet, FilteredTargets, SourcesField, Targets
27 from pants.engine.unions import UnionMembership, union
28 from pants.option.option_types import IntOption, StrListOption
29 from pants.util.collections import partition_sequentially
30 from pants.util.logging import LogLevel
31 from pants.util.meta import frozen_after_init
32 from pants.util.strutil import strip_v2_chroot_path
33
34 _F = TypeVar("_F", bound="FmtResult")
35 _FS = TypeVar("_FS", bound=FieldSet)
36
37 logger = logging.getLogger(__name__)
38
39
40 @dataclass(frozen=True)
41 class FmtResult(EngineAwareReturnType):
42 input: Snapshot
43 output: Snapshot
44 stdout: str
45 stderr: str
46 formatter_name: str
47
48 @classmethod
49 def create(
50 cls,
51 request: FmtRequest,
52 process_result: ProcessResult | FallibleProcessResult,
53 output: Snapshot,
54 *,
55 strip_chroot_path: bool = False,
56 ) -> FmtResult:
57 def prep_output(s: bytes) -> str:
58 return strip_v2_chroot_path(s) if strip_chroot_path else s.decode()
59
60 return cls(
61 input=request.snapshot,
62 output=output,
63 stdout=prep_output(process_result.stdout),
64 stderr=prep_output(process_result.stderr),
65 formatter_name=request.name,
66 )
67
68 def __post_init__(self):
69 # NB: We debug log stdout/stderr because `message` doesn't log it.
70 log = f"Output from {self.formatter_name}"
71 if self.stdout:
72 log += f"\n{self.stdout}"
73 if self.stderr:
74 log += f"\n{self.stderr}"
75 logger.debug(log)
76
77 @classmethod
78 def skip(cls: type[_F], *, formatter_name: str) -> _F:
79 return cls(
80 input=EMPTY_SNAPSHOT,
81 output=EMPTY_SNAPSHOT,
82 stdout="",
83 stderr="",
84 formatter_name=formatter_name,
85 )
86
87 @property
88 def skipped(self) -> bool:
89 return (
90 self.input == EMPTY_SNAPSHOT
91 and self.output == EMPTY_SNAPSHOT
92 and not self.stdout
93 and not self.stderr
94 )
95
96 @property
97 def did_change(self) -> bool:
98 return self.output != self.input
99
100 def level(self) -> LogLevel | None:
101 if self.skipped:
102 return LogLevel.DEBUG
103 return LogLevel.WARN if self.did_change else LogLevel.INFO
104
105 def message(self) -> str | None:
106 if self.skipped:
107 return f"{self.formatter_name} skipped."
108 message = "made changes." if self.did_change else "made no changes."
109
110 # NB: Instead of printing out `stdout` and `stderr`, we just print a list of files which
111 # changed. We do this for two reasons:
112 # 1. This is run as part of both `fmt` and `lint`, and we want consistent output between both
113 # 2. Different formatters have different stdout/stderr. This way is consistent across all
114 # formatters.
115 if self.did_change:
116 output = "".join(
117 f"\n {file}"
118 for file in SnapshotDiff.from_snapshots(self.input, self.output).changed_files
119 )
120 else:
121 output = ""
122
123 return f"{self.formatter_name} {message}{output}"
124
125 def cacheable(self) -> bool:
126 """Is marked uncacheable to ensure that it always renders."""
127 return False
128
129
130 @union
131 @frozen_after_init
132 @dataclass(unsafe_hash=True)
133 class FmtRequest(StyleRequest[_FS]):
134 snapshot: Snapshot
135
136 def __init__(self, field_sets: Iterable[_FS], snapshot: Snapshot) -> None:
137 self.snapshot = snapshot
138 super().__init__(field_sets)
139
140
141 @dataclass(frozen=True)
142 class _LanguageFmtRequest:
143 request_types: tuple[type[FmtRequest], ...]
144 targets: Targets
145
146
147 @dataclass(frozen=True)
148 class _LanguageFmtResults:
149
150 results: tuple[FmtResult, ...]
151 input: Digest
152 output: Digest
153
154 @property
155 def did_change(self) -> bool:
156 return self.input != self.output
157
158
159 class FmtSubsystem(GoalSubsystem):
160 name = "fmt"
161 help = "Autoformat source code."
162
163 @classmethod
164 def activated(cls, union_membership: UnionMembership) -> bool:
165 return FmtRequest in union_membership
166
167 only = StrListOption(
168 "--only",
169 help=only_option_help("fmt", "formatter", "isort", "shfmt"),
170 )
171 batch_size = IntOption(
172 "--batch-size",
173 advanced=True,
174 default=128,
175 help=style_batch_size_help(uppercase="Formatter", lowercase="formatter"),
176 )
177
178
179 class Fmt(Goal):
180 subsystem_cls = FmtSubsystem
181
182
183 @goal_rule
184 async def fmt(
185 console: Console,
186 targets: FilteredTargets,
187 fmt_subsystem: FmtSubsystem,
188 workspace: Workspace,
189 union_membership: UnionMembership,
190 ) -> Fmt:
191 request_types = union_membership[FmtRequest]
192 specified_names = determine_specified_tool_names("fmt", fmt_subsystem.only, request_types)
193
194 # Group targets by the sequence of FmtRequests that apply to them.
195 targets_by_fmt_request_order = defaultdict(list)
196 for target in targets:
197 fmt_requests = []
198 for fmt_request in request_types:
199 valid_name = fmt_request.name in specified_names
200 if valid_name and fmt_request.field_set_type.is_applicable(target): # type: ignore[misc]
201 fmt_requests.append(fmt_request)
202 if fmt_requests:
203 targets_by_fmt_request_order[tuple(fmt_requests)].append(target)
204
205 # Spawn sequential formatting per unique sequence of FmtRequests.
206 per_language_results = await MultiGet(
207 Get(
208 _LanguageFmtResults,
209 _LanguageFmtRequest(fmt_requests, Targets(target_batch)),
210 )
211 for fmt_requests, targets in targets_by_fmt_request_order.items()
212 for target_batch in partition_sequentially(
213 targets,
214 key=lambda t: t.address.spec,
215 size_target=fmt_subsystem.batch_size,
216 size_max=4 * fmt_subsystem.batch_size,
217 )
218 )
219
220 individual_results = list(
221 itertools.chain.from_iterable(
222 language_result.results for language_result in per_language_results
223 )
224 )
225
226 if not individual_results:
227 return Fmt(exit_code=0)
228
229 changed_digests = tuple(
230 language_result.output
231 for language_result in per_language_results
232 if language_result.did_change
233 )
234 if changed_digests:
235 # NB: this will fail if there are any conflicting changes, which we want to happen rather
236 # than silently having one result override the other. In practice, this should never
237 # happen due to us grouping each language's formatters into a single digest.
238 merged_formatted_digest = await Get(Digest, MergeDigests(changed_digests))
239 workspace.write_digest(merged_formatted_digest)
240
241 if individual_results:
242 console.print_stderr("")
243
244 # We group all results for the same formatter so that we can give one final status in the
245 # summary. This is only relevant if there were multiple results because of
246 # `--per-file-caching`.
247 formatter_to_results = defaultdict(set)
248 for result in individual_results:
249 formatter_to_results[result.formatter_name].add(result)
250
251 for formatter, results in sorted(formatter_to_results.items()):
252 if any(result.did_change for result in results):
253 sigil = console.sigil_succeeded_with_edits()
254 status = "made changes"
255 elif all(result.skipped for result in results):
256 continue
257 else:
258 sigil = console.sigil_succeeded()
259 status = "made no changes"
260 console.print_stderr(f"{sigil} {formatter} {status}.")
261
262 # Since the rules to produce FmtResult should use ExecuteRequest, rather than
263 # FallibleProcess, we assume that there were no failures.
264 return Fmt(exit_code=0)
265
266
267 @rule
268 async def fmt_language(language_fmt_request: _LanguageFmtRequest) -> _LanguageFmtResults:
269 original_sources = await Get(
270 SourceFiles,
271 SourceFilesRequest(target[SourcesField] for target in language_fmt_request.targets),
272 )
273 prior_formatter_result = original_sources.snapshot
274
275 results = []
276 for fmt_request_type in language_fmt_request.request_types:
277 request = fmt_request_type(
278 (
279 fmt_request_type.field_set_type.create(target)
280 for target in language_fmt_request.targets
281 if fmt_request_type.field_set_type.is_applicable(target)
282 ),
283 snapshot=prior_formatter_result,
284 )
285 if not request.field_sets:
286 continue
287 result = await Get(FmtResult, FmtRequest, request)
288 results.append(result)
289 prior_formatter_result = result.output
290 return _LanguageFmtResults(
291 tuple(results),
292 input=original_sources.snapshot.digest,
293 output=prior_formatter_result.digest,
294 )
295
296
297 def rules():
298 return collect_rules()
299
[end of src/python/pants/core/goals/fmt.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/python/pants/core/goals/fmt.py b/src/python/pants/core/goals/fmt.py
--- a/src/python/pants/core/goals/fmt.py
+++ b/src/python/pants/core/goals/fmt.py
@@ -286,7 +286,8 @@
continue
result = await Get(FmtResult, FmtRequest, request)
results.append(result)
- prior_formatter_result = result.output
+ if not result.skipped:
+ prior_formatter_result = result.output
return _LanguageFmtResults(
tuple(results),
input=original_sources.snapshot.digest,
|
{"golden_diff": "diff --git a/src/python/pants/core/goals/fmt.py b/src/python/pants/core/goals/fmt.py\n--- a/src/python/pants/core/goals/fmt.py\n+++ b/src/python/pants/core/goals/fmt.py\n@@ -286,7 +286,8 @@\n continue\n result = await Get(FmtResult, FmtRequest, request)\n results.append(result)\n- prior_formatter_result = result.output\n+ if not result.skipped:\n+ prior_formatter_result = result.output\n return _LanguageFmtResults(\n tuple(results),\n input=original_sources.snapshot.digest,\n", "issue": "--black-skip breaks fmt goal on isort exception\n**Describe the bug**\r\nThere are a few related bugs, but the breaking behaviour is the most important I think. I've tested on 2.11, and everything works as expected, so this appears to only be a pre-release/dev bug.\r\n\r\nWhile writing a plugin, I started implementing and testing the `skip` functionality and noticed some strange behaviour on `main`. This behaviour isn't limited to `black`, but it's quick to replicate.\r\n\r\nNote below that even after skipping `black` in the `lint` goal, it is in the associated list and marked \"succeeded\". In reality, it should be failing if it's actually running. In 2.11, black just wouldn't be shown in that list. Personally, I think \"black skipped\" in yellow text might make more sense, but \ud83e\udd37\ud83c\udffd \r\n\r\n```bash\r\n> ./pants lint --black-skip ::\r\n\r\n\u2713 autoflake succeeded.\r\n\u2713 bandit succeeded.\r\n\u2713 black succeeded.\r\n\u2715 flake8 failed.\r\n\u2713 isort succeeded.\r\n\u2713 pylint succeeded.\r\n\u2713 pyupgrade succeeded.\r\n\u2713 shellcheck succeeded.\r\n\u2713 shfmt succeeded.\r\n```\r\n\r\n The more concerning behaviour is on format.\r\n\r\n```bash\r\n> ./pants fmt --black-skip ::\r\n\r\n14:28:06.41 [ERROR] 1 Exception encountered:\r\n\r\nEngine traceback:\r\n in select\r\n in pants.core.goals.fmt.fmt\r\n in pants.core.goals.fmt.fmt_language\r\n in pants.backend.python.lint.isort.rules.isort_fmt (isort)\r\n in pants.engine.process.fallible_to_exec_result_or_raise\r\nTraceback (most recent call last):\r\n File \"/Users/sj/.cache/pants/setup/bootstrap-Darwin-x86_64/pants.NUYNTS/install/lib/python3.9/site-packages/pants/engine/process.py\", line 272, in fallible_to_exec_result_or_raise\r\n raise ProcessExecutionFailure(\r\npants.engine.process.ProcessExecutionFailure: Process 'Run isort on 7 files.' failed with exit code 1.\r\n```\r\n\r\n\r\n**Pants version**\r\n2.13.0.dev1\r\n\r\n**OS**\r\nMacOS\r\n\r\n**Additional info**\r\nUsing this repo (https://github.com/sureshjoshi/pants-example-plugin) and I manually set the toml to `2.13.0.dev`.\r\n\r\nI also went in and messed with the `main.py` formatting, so I knew `black` would want to run.\n", "before_files": [{"content": "# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nimport itertools\nimport logging\nfrom collections import defaultdict\nfrom dataclasses import dataclass\nfrom typing import Iterable, TypeVar\n\nfrom pants.core.goals.style_request import (\n StyleRequest,\n determine_specified_tool_names,\n only_option_help,\n style_batch_size_help,\n)\nfrom pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest\nfrom pants.engine.console import Console\nfrom pants.engine.engine_aware import EngineAwareReturnType\nfrom pants.engine.fs import Digest, MergeDigests, Snapshot, SnapshotDiff, Workspace\nfrom pants.engine.goal import Goal, GoalSubsystem\nfrom pants.engine.internals.native_engine import EMPTY_SNAPSHOT\nfrom pants.engine.process import FallibleProcessResult, ProcessResult\nfrom pants.engine.rules import Get, MultiGet, collect_rules, goal_rule, rule\nfrom pants.engine.target import FieldSet, FilteredTargets, SourcesField, Targets\nfrom pants.engine.unions import UnionMembership, union\nfrom pants.option.option_types import IntOption, StrListOption\nfrom pants.util.collections import partition_sequentially\nfrom pants.util.logging import LogLevel\nfrom pants.util.meta import frozen_after_init\nfrom pants.util.strutil import strip_v2_chroot_path\n\n_F = TypeVar(\"_F\", bound=\"FmtResult\")\n_FS = TypeVar(\"_FS\", bound=FieldSet)\n\nlogger = logging.getLogger(__name__)\n\n\n@dataclass(frozen=True)\nclass FmtResult(EngineAwareReturnType):\n input: Snapshot\n output: Snapshot\n stdout: str\n stderr: str\n formatter_name: str\n\n @classmethod\n def create(\n cls,\n request: FmtRequest,\n process_result: ProcessResult | FallibleProcessResult,\n output: Snapshot,\n *,\n strip_chroot_path: bool = False,\n ) -> FmtResult:\n def prep_output(s: bytes) -> str:\n return strip_v2_chroot_path(s) if strip_chroot_path else s.decode()\n\n return cls(\n input=request.snapshot,\n output=output,\n stdout=prep_output(process_result.stdout),\n stderr=prep_output(process_result.stderr),\n formatter_name=request.name,\n )\n\n def __post_init__(self):\n # NB: We debug log stdout/stderr because `message` doesn't log it.\n log = f\"Output from {self.formatter_name}\"\n if self.stdout:\n log += f\"\\n{self.stdout}\"\n if self.stderr:\n log += f\"\\n{self.stderr}\"\n logger.debug(log)\n\n @classmethod\n def skip(cls: type[_F], *, formatter_name: str) -> _F:\n return cls(\n input=EMPTY_SNAPSHOT,\n output=EMPTY_SNAPSHOT,\n stdout=\"\",\n stderr=\"\",\n formatter_name=formatter_name,\n )\n\n @property\n def skipped(self) -> bool:\n return (\n self.input == EMPTY_SNAPSHOT\n and self.output == EMPTY_SNAPSHOT\n and not self.stdout\n and not self.stderr\n )\n\n @property\n def did_change(self) -> bool:\n return self.output != self.input\n\n def level(self) -> LogLevel | None:\n if self.skipped:\n return LogLevel.DEBUG\n return LogLevel.WARN if self.did_change else LogLevel.INFO\n\n def message(self) -> str | None:\n if self.skipped:\n return f\"{self.formatter_name} skipped.\"\n message = \"made changes.\" if self.did_change else \"made no changes.\"\n\n # NB: Instead of printing out `stdout` and `stderr`, we just print a list of files which\n # changed. We do this for two reasons:\n # 1. This is run as part of both `fmt` and `lint`, and we want consistent output between both\n # 2. Different formatters have different stdout/stderr. This way is consistent across all\n # formatters.\n if self.did_change:\n output = \"\".join(\n f\"\\n {file}\"\n for file in SnapshotDiff.from_snapshots(self.input, self.output).changed_files\n )\n else:\n output = \"\"\n\n return f\"{self.formatter_name} {message}{output}\"\n\n def cacheable(self) -> bool:\n \"\"\"Is marked uncacheable to ensure that it always renders.\"\"\"\n return False\n\n\n@union\n@frozen_after_init\n@dataclass(unsafe_hash=True)\nclass FmtRequest(StyleRequest[_FS]):\n snapshot: Snapshot\n\n def __init__(self, field_sets: Iterable[_FS], snapshot: Snapshot) -> None:\n self.snapshot = snapshot\n super().__init__(field_sets)\n\n\n@dataclass(frozen=True)\nclass _LanguageFmtRequest:\n request_types: tuple[type[FmtRequest], ...]\n targets: Targets\n\n\n@dataclass(frozen=True)\nclass _LanguageFmtResults:\n\n results: tuple[FmtResult, ...]\n input: Digest\n output: Digest\n\n @property\n def did_change(self) -> bool:\n return self.input != self.output\n\n\nclass FmtSubsystem(GoalSubsystem):\n name = \"fmt\"\n help = \"Autoformat source code.\"\n\n @classmethod\n def activated(cls, union_membership: UnionMembership) -> bool:\n return FmtRequest in union_membership\n\n only = StrListOption(\n \"--only\",\n help=only_option_help(\"fmt\", \"formatter\", \"isort\", \"shfmt\"),\n )\n batch_size = IntOption(\n \"--batch-size\",\n advanced=True,\n default=128,\n help=style_batch_size_help(uppercase=\"Formatter\", lowercase=\"formatter\"),\n )\n\n\nclass Fmt(Goal):\n subsystem_cls = FmtSubsystem\n\n\n@goal_rule\nasync def fmt(\n console: Console,\n targets: FilteredTargets,\n fmt_subsystem: FmtSubsystem,\n workspace: Workspace,\n union_membership: UnionMembership,\n) -> Fmt:\n request_types = union_membership[FmtRequest]\n specified_names = determine_specified_tool_names(\"fmt\", fmt_subsystem.only, request_types)\n\n # Group targets by the sequence of FmtRequests that apply to them.\n targets_by_fmt_request_order = defaultdict(list)\n for target in targets:\n fmt_requests = []\n for fmt_request in request_types:\n valid_name = fmt_request.name in specified_names\n if valid_name and fmt_request.field_set_type.is_applicable(target): # type: ignore[misc]\n fmt_requests.append(fmt_request)\n if fmt_requests:\n targets_by_fmt_request_order[tuple(fmt_requests)].append(target)\n\n # Spawn sequential formatting per unique sequence of FmtRequests.\n per_language_results = await MultiGet(\n Get(\n _LanguageFmtResults,\n _LanguageFmtRequest(fmt_requests, Targets(target_batch)),\n )\n for fmt_requests, targets in targets_by_fmt_request_order.items()\n for target_batch in partition_sequentially(\n targets,\n key=lambda t: t.address.spec,\n size_target=fmt_subsystem.batch_size,\n size_max=4 * fmt_subsystem.batch_size,\n )\n )\n\n individual_results = list(\n itertools.chain.from_iterable(\n language_result.results for language_result in per_language_results\n )\n )\n\n if not individual_results:\n return Fmt(exit_code=0)\n\n changed_digests = tuple(\n language_result.output\n for language_result in per_language_results\n if language_result.did_change\n )\n if changed_digests:\n # NB: this will fail if there are any conflicting changes, which we want to happen rather\n # than silently having one result override the other. In practice, this should never\n # happen due to us grouping each language's formatters into a single digest.\n merged_formatted_digest = await Get(Digest, MergeDigests(changed_digests))\n workspace.write_digest(merged_formatted_digest)\n\n if individual_results:\n console.print_stderr(\"\")\n\n # We group all results for the same formatter so that we can give one final status in the\n # summary. This is only relevant if there were multiple results because of\n # `--per-file-caching`.\n formatter_to_results = defaultdict(set)\n for result in individual_results:\n formatter_to_results[result.formatter_name].add(result)\n\n for formatter, results in sorted(formatter_to_results.items()):\n if any(result.did_change for result in results):\n sigil = console.sigil_succeeded_with_edits()\n status = \"made changes\"\n elif all(result.skipped for result in results):\n continue\n else:\n sigil = console.sigil_succeeded()\n status = \"made no changes\"\n console.print_stderr(f\"{sigil} {formatter} {status}.\")\n\n # Since the rules to produce FmtResult should use ExecuteRequest, rather than\n # FallibleProcess, we assume that there were no failures.\n return Fmt(exit_code=0)\n\n\n@rule\nasync def fmt_language(language_fmt_request: _LanguageFmtRequest) -> _LanguageFmtResults:\n original_sources = await Get(\n SourceFiles,\n SourceFilesRequest(target[SourcesField] for target in language_fmt_request.targets),\n )\n prior_formatter_result = original_sources.snapshot\n\n results = []\n for fmt_request_type in language_fmt_request.request_types:\n request = fmt_request_type(\n (\n fmt_request_type.field_set_type.create(target)\n for target in language_fmt_request.targets\n if fmt_request_type.field_set_type.is_applicable(target)\n ),\n snapshot=prior_formatter_result,\n )\n if not request.field_sets:\n continue\n result = await Get(FmtResult, FmtRequest, request)\n results.append(result)\n prior_formatter_result = result.output\n return _LanguageFmtResults(\n tuple(results),\n input=original_sources.snapshot.digest,\n output=prior_formatter_result.digest,\n )\n\n\ndef rules():\n return collect_rules()\n", "path": "src/python/pants/core/goals/fmt.py"}]}
| 4,006 | 134 |
gh_patches_debug_23879
|
rasdani/github-patches
|
git_diff
|
yt-dlp__yt-dlp-3573
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[InfoQ] Unable to extract mp3Form form
### Checklist
- [X] I'm reporting a broken site
- [X] I've verified that I'm running yt-dlp version **2022.04.08** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
- [X] I've checked that all provided URLs are alive and playable in a browser
- [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/ytdl-org/youtube-dl#video-url-contains-an-ampersand-and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)
- [X] I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues including closed ones. DO NOT post duplicates
- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)
- [ ] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required
### Region
_No response_
### Description
InfoQ support currently fails, e.g., for <https://www.infoq.com/presentations/rr-reversible-debugging/>:
```
yt-dlp https://www.infoq.com/presentations/rr-reversible-debugging/
[InfoQ] rr-reversible-debugging: Downloading webpage
ERROR: [InfoQ] rr-reversible-debugging: Unable to extract mp3Form form; please report this issue on https://github.com/yt-dlp/yt-dlp/issues?q= , filling out th
e appropriate issue template. Confirm you are on the latest version using yt-dlp -U
```
### Verbose log
```shell
[debug] Command-line config: ['-vU', 'https://www.infoq.com/presentations/rr-reversible-debugging/']
[debug] Encodings: locale cp1252, fs utf-8, out utf-8 (No ANSI), err utf-8 (No ANSI), pref cp1252
[debug] yt-dlp version 2022.04.08 [7884ade] (win_exe)
[debug] Python version 3.8.10 (CPython 64bit) - Windows-2012ServerR2-6.3.9600
[debug] Checking exe version: ffmpeg -bsfs
[debug] Checking exe version: ffprobe -bsfs
[debug] exe versions: ffmpeg N-81036-g2b14204 (needs_adtstoasc), ffprobe 5.0.1-essentials_build-www.gyan.dev
[debug] Optional libraries: brotli, certifi, Cryptodome, mutagen, sqlite, websockets
[debug] Proxy map: {}
Latest version: 2022.04.08, Current version: 2022.04.08
yt-dlp is up to date (2022.04.08)
[debug] [InfoQ] Extracting URL: https://www.infoq.com/presentations/rr-reversible-debugging/
[InfoQ] rr-reversible-debugging: Downloading webpage
ERROR: [InfoQ] rr-reversible-debugging: Unable to extract mp3Form form; please report this issue on https://github.com/yt-dlp/yt-dlp/issues?q= , filling out th
e appropriate issue template. Confirm you are on the latest version using yt-dlp -U
File "yt_dlp\extractor\common.py", line 641, in extract
File "yt_dlp\extractor\infoq.py", line 128, in _real_extract
File "yt_dlp\extractor\infoq.py", line 93, in _extract_http_audio
File "yt_dlp\extractor\common.py", line 1638, in _form_hidden_inputs
File "yt_dlp\extractor\common.py", line 1229, in _search_regex
```
</issue>
<code>
[start of yt_dlp/extractor/infoq.py]
1 from ..compat import (
2 compat_b64decode,
3 compat_urllib_parse_unquote,
4 compat_urlparse,
5 )
6 from ..utils import (
7 determine_ext,
8 update_url_query,
9 )
10 from .bokecc import BokeCCBaseIE
11
12
13 class InfoQIE(BokeCCBaseIE):
14 _VALID_URL = r'https?://(?:www\.)?infoq\.com/(?:[^/]+/)+(?P<id>[^/]+)'
15
16 _TESTS = [{
17 'url': 'http://www.infoq.com/presentations/A-Few-of-My-Favorite-Python-Things',
18 'md5': 'b5ca0e0a8c1fed93b0e65e48e462f9a2',
19 'info_dict': {
20 'id': 'A-Few-of-My-Favorite-Python-Things',
21 'ext': 'mp4',
22 'description': 'Mike Pirnat presents some tips and tricks, standard libraries and third party packages that make programming in Python a richer experience.',
23 'title': 'A Few of My Favorite [Python] Things',
24 },
25 }, {
26 'url': 'http://www.infoq.com/fr/presentations/changez-avis-sur-javascript',
27 'only_matching': True,
28 }, {
29 'url': 'http://www.infoq.com/cn/presentations/openstack-continued-delivery',
30 'md5': '4918d0cca1497f2244572caf626687ef',
31 'info_dict': {
32 'id': 'openstack-continued-delivery',
33 'title': 'OpenStack持续交付之路',
34 'ext': 'flv',
35 'description': 'md5:308d981fb28fa42f49f9568322c683ff',
36 },
37 }, {
38 'url': 'https://www.infoq.com/presentations/Simple-Made-Easy',
39 'md5': '0e34642d4d9ef44bf86f66f6399672db',
40 'info_dict': {
41 'id': 'Simple-Made-Easy',
42 'title': 'Simple Made Easy',
43 'ext': 'mp3',
44 'description': 'md5:3e0e213a8bbd074796ef89ea35ada25b',
45 },
46 'params': {
47 'format': 'bestaudio',
48 },
49 }]
50
51 def _extract_rtmp_video(self, webpage):
52 # The server URL is hardcoded
53 video_url = 'rtmpe://videof.infoq.com/cfx/st/'
54
55 # Extract video URL
56 encoded_id = self._search_regex(
57 r"jsclassref\s*=\s*'([^']*)'", webpage, 'encoded id', default=None)
58
59 real_id = compat_urllib_parse_unquote(compat_b64decode(encoded_id).decode('utf-8'))
60 playpath = 'mp4:' + real_id
61
62 return [{
63 'format_id': 'rtmp_video',
64 'url': video_url,
65 'ext': determine_ext(playpath),
66 'play_path': playpath,
67 }]
68
69 def _extract_cf_auth(self, webpage):
70 policy = self._search_regex(r'InfoQConstants\.scp\s*=\s*\'([^\']+)\'', webpage, 'policy')
71 signature = self._search_regex(r'InfoQConstants\.scs\s*=\s*\'([^\']+)\'', webpage, 'signature')
72 key_pair_id = self._search_regex(r'InfoQConstants\.sck\s*=\s*\'([^\']+)\'', webpage, 'key-pair-id')
73 return {
74 'Policy': policy,
75 'Signature': signature,
76 'Key-Pair-Id': key_pair_id,
77 }
78
79 def _extract_http_video(self, webpage):
80 http_video_url = self._search_regex(r'P\.s\s*=\s*\'([^\']+)\'', webpage, 'video URL')
81 http_video_url = update_url_query(http_video_url, self._extract_cf_auth(webpage))
82 return [{
83 'format_id': 'http_video',
84 'url': http_video_url,
85 'http_headers': {'Referer': 'https://www.infoq.com/'},
86 }]
87
88 def _extract_http_audio(self, webpage, video_id):
89 fields = self._form_hidden_inputs('mp3Form', webpage)
90 http_audio_url = fields.get('filename')
91 if not http_audio_url:
92 return []
93
94 # base URL is found in the Location header in the response returned by
95 # GET https://www.infoq.com/mp3download.action?filename=... when logged in.
96 http_audio_url = compat_urlparse.urljoin('http://ress.infoq.com/downloads/mp3downloads/', http_audio_url)
97 http_audio_url = update_url_query(http_audio_url, self._extract_cf_auth(webpage))
98
99 # audio file seem to be missing some times even if there is a download link
100 # so probe URL to make sure
101 if not self._is_valid_url(http_audio_url, video_id):
102 return []
103
104 return [{
105 'format_id': 'http_audio',
106 'url': http_audio_url,
107 'vcodec': 'none',
108 }]
109
110 def _real_extract(self, url):
111 video_id = self._match_id(url)
112 webpage = self._download_webpage(url, video_id)
113
114 video_title = self._html_extract_title(webpage)
115 video_description = self._html_search_meta('description', webpage, 'description')
116
117 if '/cn/' in url:
118 # for China videos, HTTP video URL exists but always fails with 403
119 formats = self._extract_bokecc_formats(webpage, video_id)
120 else:
121 formats = (
122 self._extract_rtmp_video(webpage)
123 + self._extract_http_video(webpage)
124 + self._extract_http_audio(webpage, video_id))
125
126 self._sort_formats(formats)
127
128 return {
129 'id': video_id,
130 'title': video_title,
131 'description': video_description,
132 'formats': formats,
133 }
134
[end of yt_dlp/extractor/infoq.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/yt_dlp/extractor/infoq.py b/yt_dlp/extractor/infoq.py
--- a/yt_dlp/extractor/infoq.py
+++ b/yt_dlp/extractor/infoq.py
@@ -4,8 +4,10 @@
compat_urlparse,
)
from ..utils import (
+ ExtractorError,
determine_ext,
update_url_query,
+ traverse_obj,
)
from .bokecc import BokeCCBaseIE
@@ -34,6 +36,7 @@
'ext': 'flv',
'description': 'md5:308d981fb28fa42f49f9568322c683ff',
},
+ 'skip': 'Sorry, the page you visited does not exist',
}, {
'url': 'https://www.infoq.com/presentations/Simple-Made-Easy',
'md5': '0e34642d4d9ef44bf86f66f6399672db',
@@ -86,8 +89,10 @@
}]
def _extract_http_audio(self, webpage, video_id):
- fields = self._form_hidden_inputs('mp3Form', webpage)
- http_audio_url = fields.get('filename')
+ try:
+ http_audio_url = traverse_obj(self._form_hidden_inputs('mp3Form', webpage), 'filename')
+ except ExtractorError:
+ http_audio_url = None
if not http_audio_url:
return []
|
{"golden_diff": "diff --git a/yt_dlp/extractor/infoq.py b/yt_dlp/extractor/infoq.py\n--- a/yt_dlp/extractor/infoq.py\n+++ b/yt_dlp/extractor/infoq.py\n@@ -4,8 +4,10 @@\n compat_urlparse,\n )\n from ..utils import (\n+ ExtractorError,\n determine_ext,\n update_url_query,\n+ traverse_obj,\n )\n from .bokecc import BokeCCBaseIE\n \n@@ -34,6 +36,7 @@\n 'ext': 'flv',\n 'description': 'md5:308d981fb28fa42f49f9568322c683ff',\n },\n+ 'skip': 'Sorry, the page you visited does not exist',\n }, {\n 'url': 'https://www.infoq.com/presentations/Simple-Made-Easy',\n 'md5': '0e34642d4d9ef44bf86f66f6399672db',\n@@ -86,8 +89,10 @@\n }]\n \n def _extract_http_audio(self, webpage, video_id):\n- fields = self._form_hidden_inputs('mp3Form', webpage)\n- http_audio_url = fields.get('filename')\n+ try:\n+ http_audio_url = traverse_obj(self._form_hidden_inputs('mp3Form', webpage), 'filename')\n+ except ExtractorError:\n+ http_audio_url = None\n if not http_audio_url:\n return []\n", "issue": "[InfoQ] Unable to extract mp3Form form\n### Checklist\n\n- [X] I'm reporting a broken site\n- [X] I've verified that I'm running yt-dlp version **2022.04.08** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)\n- [X] I've checked that all provided URLs are alive and playable in a browser\n- [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/ytdl-org/youtube-dl#video-url-contains-an-ampersand-and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)\n- [X] I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues including closed ones. DO NOT post duplicates\n- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)\n- [ ] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required\n\n### Region\n\n_No response_\n\n### Description\n\nInfoQ support currently fails, e.g., for <https://www.infoq.com/presentations/rr-reversible-debugging/>:\r\n```\r\nyt-dlp https://www.infoq.com/presentations/rr-reversible-debugging/\r\n\r\n[InfoQ] rr-reversible-debugging: Downloading webpage\r\nERROR: [InfoQ] rr-reversible-debugging: Unable to extract mp3Form form; please report this issue on https://github.com/yt-dlp/yt-dlp/issues?q= , filling out th\r\ne appropriate issue template. Confirm you are on the latest version using yt-dlp -U\r\n```\n\n### Verbose log\n\n```shell\n[debug] Command-line config: ['-vU', 'https://www.infoq.com/presentations/rr-reversible-debugging/']\r\n[debug] Encodings: locale cp1252, fs utf-8, out utf-8 (No ANSI), err utf-8 (No ANSI), pref cp1252\r\n[debug] yt-dlp version 2022.04.08 [7884ade] (win_exe)\r\n[debug] Python version 3.8.10 (CPython 64bit) - Windows-2012ServerR2-6.3.9600\r\n[debug] Checking exe version: ffmpeg -bsfs\r\n[debug] Checking exe version: ffprobe -bsfs\r\n[debug] exe versions: ffmpeg N-81036-g2b14204 (needs_adtstoasc), ffprobe 5.0.1-essentials_build-www.gyan.dev\r\n[debug] Optional libraries: brotli, certifi, Cryptodome, mutagen, sqlite, websockets\r\n[debug] Proxy map: {}\r\nLatest version: 2022.04.08, Current version: 2022.04.08\r\nyt-dlp is up to date (2022.04.08)\r\n[debug] [InfoQ] Extracting URL: https://www.infoq.com/presentations/rr-reversible-debugging/\r\n[InfoQ] rr-reversible-debugging: Downloading webpage\r\nERROR: [InfoQ] rr-reversible-debugging: Unable to extract mp3Form form; please report this issue on https://github.com/yt-dlp/yt-dlp/issues?q= , filling out th\r\ne appropriate issue template. Confirm you are on the latest version using yt-dlp -U\r\n File \"yt_dlp\\extractor\\common.py\", line 641, in extract\r\n File \"yt_dlp\\extractor\\infoq.py\", line 128, in _real_extract\r\n File \"yt_dlp\\extractor\\infoq.py\", line 93, in _extract_http_audio\r\n File \"yt_dlp\\extractor\\common.py\", line 1638, in _form_hidden_inputs\r\n File \"yt_dlp\\extractor\\common.py\", line 1229, in _search_regex\n```\n\n", "before_files": [{"content": "from ..compat import (\n compat_b64decode,\n compat_urllib_parse_unquote,\n compat_urlparse,\n)\nfrom ..utils import (\n determine_ext,\n update_url_query,\n)\nfrom .bokecc import BokeCCBaseIE\n\n\nclass InfoQIE(BokeCCBaseIE):\n _VALID_URL = r'https?://(?:www\\.)?infoq\\.com/(?:[^/]+/)+(?P<id>[^/]+)'\n\n _TESTS = [{\n 'url': 'http://www.infoq.com/presentations/A-Few-of-My-Favorite-Python-Things',\n 'md5': 'b5ca0e0a8c1fed93b0e65e48e462f9a2',\n 'info_dict': {\n 'id': 'A-Few-of-My-Favorite-Python-Things',\n 'ext': 'mp4',\n 'description': 'Mike Pirnat presents some tips and tricks, standard libraries and third party packages that make programming in Python a richer experience.',\n 'title': 'A Few of My Favorite [Python] Things',\n },\n }, {\n 'url': 'http://www.infoq.com/fr/presentations/changez-avis-sur-javascript',\n 'only_matching': True,\n }, {\n 'url': 'http://www.infoq.com/cn/presentations/openstack-continued-delivery',\n 'md5': '4918d0cca1497f2244572caf626687ef',\n 'info_dict': {\n 'id': 'openstack-continued-delivery',\n 'title': 'OpenStack\u6301\u7eed\u4ea4\u4ed8\u4e4b\u8def',\n 'ext': 'flv',\n 'description': 'md5:308d981fb28fa42f49f9568322c683ff',\n },\n }, {\n 'url': 'https://www.infoq.com/presentations/Simple-Made-Easy',\n 'md5': '0e34642d4d9ef44bf86f66f6399672db',\n 'info_dict': {\n 'id': 'Simple-Made-Easy',\n 'title': 'Simple Made Easy',\n 'ext': 'mp3',\n 'description': 'md5:3e0e213a8bbd074796ef89ea35ada25b',\n },\n 'params': {\n 'format': 'bestaudio',\n },\n }]\n\n def _extract_rtmp_video(self, webpage):\n # The server URL is hardcoded\n video_url = 'rtmpe://videof.infoq.com/cfx/st/'\n\n # Extract video URL\n encoded_id = self._search_regex(\n r\"jsclassref\\s*=\\s*'([^']*)'\", webpage, 'encoded id', default=None)\n\n real_id = compat_urllib_parse_unquote(compat_b64decode(encoded_id).decode('utf-8'))\n playpath = 'mp4:' + real_id\n\n return [{\n 'format_id': 'rtmp_video',\n 'url': video_url,\n 'ext': determine_ext(playpath),\n 'play_path': playpath,\n }]\n\n def _extract_cf_auth(self, webpage):\n policy = self._search_regex(r'InfoQConstants\\.scp\\s*=\\s*\\'([^\\']+)\\'', webpage, 'policy')\n signature = self._search_regex(r'InfoQConstants\\.scs\\s*=\\s*\\'([^\\']+)\\'', webpage, 'signature')\n key_pair_id = self._search_regex(r'InfoQConstants\\.sck\\s*=\\s*\\'([^\\']+)\\'', webpage, 'key-pair-id')\n return {\n 'Policy': policy,\n 'Signature': signature,\n 'Key-Pair-Id': key_pair_id,\n }\n\n def _extract_http_video(self, webpage):\n http_video_url = self._search_regex(r'P\\.s\\s*=\\s*\\'([^\\']+)\\'', webpage, 'video URL')\n http_video_url = update_url_query(http_video_url, self._extract_cf_auth(webpage))\n return [{\n 'format_id': 'http_video',\n 'url': http_video_url,\n 'http_headers': {'Referer': 'https://www.infoq.com/'},\n }]\n\n def _extract_http_audio(self, webpage, video_id):\n fields = self._form_hidden_inputs('mp3Form', webpage)\n http_audio_url = fields.get('filename')\n if not http_audio_url:\n return []\n\n # base URL is found in the Location header in the response returned by\n # GET https://www.infoq.com/mp3download.action?filename=... when logged in.\n http_audio_url = compat_urlparse.urljoin('http://ress.infoq.com/downloads/mp3downloads/', http_audio_url)\n http_audio_url = update_url_query(http_audio_url, self._extract_cf_auth(webpage))\n\n # audio file seem to be missing some times even if there is a download link\n # so probe URL to make sure\n if not self._is_valid_url(http_audio_url, video_id):\n return []\n\n return [{\n 'format_id': 'http_audio',\n 'url': http_audio_url,\n 'vcodec': 'none',\n }]\n\n def _real_extract(self, url):\n video_id = self._match_id(url)\n webpage = self._download_webpage(url, video_id)\n\n video_title = self._html_extract_title(webpage)\n video_description = self._html_search_meta('description', webpage, 'description')\n\n if '/cn/' in url:\n # for China videos, HTTP video URL exists but always fails with 403\n formats = self._extract_bokecc_formats(webpage, video_id)\n else:\n formats = (\n self._extract_rtmp_video(webpage)\n + self._extract_http_video(webpage)\n + self._extract_http_audio(webpage, video_id))\n\n self._sort_formats(formats)\n\n return {\n 'id': video_id,\n 'title': video_title,\n 'description': video_description,\n 'formats': formats,\n }\n", "path": "yt_dlp/extractor/infoq.py"}]}
| 3,179 | 351 |
gh_patches_debug_38637
|
rasdani/github-patches
|
git_diff
|
buildbot__buildbot-4036
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
optimize number of database queries
buildbot seems to be running database queries in a loop fetching data for individual builders (resulting in large number of queries). This would affect performance in case of large number of builders. I believe it would be more efficient to use a single database query to fetch data for all the builders and then filter it in python.
I noticed following in postgresql logs (which indicate queries being run in a loop for each builder):
2017-06-23 21:19:52.221 GMT 24738 localhost(56312) LOG: statement: SELECT tags.name
FROM tags JOIN builders_tags ON tags.id = builders_tags.tagid
WHERE builders_tags.builderid = 218
2017-06-23 21:19:52.223 GMT 24738 localhost(56312) LOG: statement: SELECT tags.name
FROM tags JOIN builders_tags ON tags.id = builders_tags.tagid
WHERE builders_tags.builderid = 219
2017-06-23 21:19:52.226 GMT 24738 localhost(56312) LOG: statement: SELECT tags.name
FROM tags JOIN builders_tags ON tags.id = builders_tags.tagid
WHERE builders_tags.builderid = 220
2017-06-23 21:19:52.235 GMT 24738 localhost(56312) LOG: statement: SELECT tags.name
FROM tags JOIN builders_tags ON tags.id = builders_tags.tagid
WHERE builders_tags.builderid = 221
</issue>
<code>
[start of master/buildbot/db/builders.py]
1 # This file is part of Buildbot. Buildbot is free software: you can
2 # redistribute it and/or modify it under the terms of the GNU General Public
3 # License as published by the Free Software Foundation, version 2.
4 #
5 # This program is distributed in the hope that it will be useful, but WITHOUT
6 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
7 # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
8 # details.
9 #
10 # You should have received a copy of the GNU General Public License along with
11 # this program; if not, write to the Free Software Foundation, Inc., 51
12 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
13 #
14 # Copyright Buildbot Team Members
15
16 from __future__ import absolute_import
17 from __future__ import print_function
18
19 import sqlalchemy as sa
20
21 from twisted.internet import defer
22
23 from buildbot.db import base
24
25
26 class BuildersConnectorComponent(base.DBConnectorComponent):
27
28 def findBuilderId(self, name, autoCreate=True):
29 tbl = self.db.model.builders
30 name_hash = self.hashColumns(name)
31 return self.findSomethingId(
32 tbl=tbl,
33 whereclause=(tbl.c.name_hash == name_hash),
34 insert_values=dict(
35 name=name,
36 name_hash=name_hash,
37 ), autoCreate=autoCreate)
38
39 @defer.inlineCallbacks
40 def updateBuilderInfo(self, builderid, description, tags):
41 # convert to tag IDs first, as necessary
42 def toTagid(tag):
43 if isinstance(tag, type(1)):
44 return defer.succeed(tag)
45 ssConnector = self.master.db.tags
46 return ssConnector.findTagId(tag)
47
48 tagsids = [r[1] for r in (yield defer.DeferredList(
49 [toTagid(tag) for tag in tags],
50 fireOnOneErrback=True,
51 consumeErrors=True))]
52
53 def thd(conn):
54 builders_tbl = self.db.model.builders
55 builders_tags_tbl = self.db.model.builders_tags
56 transaction = conn.begin()
57
58 q = builders_tbl.update(
59 whereclause=(builders_tbl.c.id == builderid))
60 conn.execute(q, description=description).close()
61 # remove previous builders_tags
62 conn.execute(builders_tags_tbl.delete(
63 whereclause=((builders_tags_tbl.c.builderid == builderid)))).close()
64
65 # add tag ids
66 if tagsids:
67 conn.execute(builders_tags_tbl.insert(),
68 [dict(builderid=builderid, tagid=tagid)
69 for tagid in tagsids]).close()
70
71 transaction.commit()
72
73 defer.returnValue((yield self.db.pool.do(thd)))
74
75 def getBuilder(self, builderid):
76 d = self.getBuilders(_builderid=builderid)
77
78 @d.addCallback
79 def first(bldrs):
80 if bldrs:
81 return bldrs[0]
82 return None
83 return d
84
85 def addBuilderMaster(self, builderid=None, masterid=None):
86 def thd(conn, no_recurse=False):
87 try:
88 tbl = self.db.model.builder_masters
89 q = tbl.insert()
90 conn.execute(q, builderid=builderid, masterid=masterid)
91 except (sa.exc.IntegrityError, sa.exc.ProgrammingError):
92 pass
93 return self.db.pool.do(thd)
94
95 def removeBuilderMaster(self, builderid=None, masterid=None):
96 def thd(conn, no_recurse=False):
97 tbl = self.db.model.builder_masters
98 conn.execute(tbl.delete(
99 whereclause=((tbl.c.builderid == builderid) &
100 (tbl.c.masterid == masterid))))
101 return self.db.pool.do(thd)
102
103 def getBuilders(self, masterid=None, _builderid=None):
104 def thd(conn):
105 bldr_tbl = self.db.model.builders
106 bm_tbl = self.db.model.builder_masters
107 j = bldr_tbl.outerjoin(bm_tbl)
108 # if we want to filter by masterid, we must join to builder_masters
109 # again, so we can still get the full set of masters for each
110 # builder
111 if masterid is not None:
112 limiting_bm_tbl = bm_tbl.alias('limiting_bm')
113 j = j.join(limiting_bm_tbl,
114 onclause=(bldr_tbl.c.id == limiting_bm_tbl.c.builderid))
115 q = sa.select(
116 [bldr_tbl.c.id, bldr_tbl.c.name,
117 bldr_tbl.c.description, bm_tbl.c.masterid],
118 from_obj=[j],
119 order_by=[bldr_tbl.c.id, bm_tbl.c.masterid])
120 if masterid is not None:
121 # filter the masterid from the limiting table
122 q = q.where(limiting_bm_tbl.c.masterid == masterid)
123 if _builderid is not None:
124 q = q.where(bldr_tbl.c.id == _builderid)
125
126 # now group those by builderid, aggregating by masterid
127 rv = []
128 last = None
129 for row in conn.execute(q).fetchall():
130 # pylint: disable=unsubscriptable-object
131 if not last or row['id'] != last['id']:
132 last = self._thd_row2dict(conn, row)
133 rv.append(last)
134 if row['masterid']:
135 last['masterids'].append(row['masterid'])
136 return rv
137 return self.db.pool.do(thd)
138
139 def _thd_row2dict(self, conn, row):
140 # get tags
141 builders_tags = self.db.model.builders_tags
142 tags = self.db.model.tags
143 from_clause = tags
144 from_clause = from_clause.join(builders_tags)
145 q = sa.select([tags.c.name],
146 (builders_tags.c.builderid == row.id)).select_from(from_clause)
147
148 tags = [r.name for r in
149 conn.execute(q).fetchall()]
150
151 return dict(id=row.id, name=row.name, masterids=[],
152 description=row.description,
153 tags=tags)
154
[end of master/buildbot/db/builders.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/master/buildbot/db/builders.py b/master/buildbot/db/builders.py
--- a/master/buildbot/db/builders.py
+++ b/master/buildbot/db/builders.py
@@ -16,6 +16,8 @@
from __future__ import absolute_import
from __future__ import print_function
+from collections import defaultdict
+
import sqlalchemy as sa
from twisted.internet import defer
@@ -104,6 +106,9 @@
def thd(conn):
bldr_tbl = self.db.model.builders
bm_tbl = self.db.model.builder_masters
+ builders_tags_tbl = self.db.model.builders_tags
+ tags_tbl = self.db.model.tags
+
j = bldr_tbl.outerjoin(bm_tbl)
# if we want to filter by masterid, we must join to builder_masters
# again, so we can still get the full set of masters for each
@@ -123,31 +128,24 @@
if _builderid is not None:
q = q.where(bldr_tbl.c.id == _builderid)
+ # build up a intermediate builder id -> tag names map (fixes performance issue #3396)
+ bldr_id_to_tags = defaultdict(list)
+ bldr_q = sa.select([builders_tags_tbl.c.builderid, tags_tbl.c.name])
+ bldr_q = bldr_q.select_from(tags_tbl.join(builders_tags_tbl))
+
+ for bldr_id, tag in conn.execute(bldr_q).fetchall():
+ bldr_id_to_tags[bldr_id].append(tag)
+
# now group those by builderid, aggregating by masterid
rv = []
last = None
for row in conn.execute(q).fetchall():
# pylint: disable=unsubscriptable-object
if not last or row['id'] != last['id']:
- last = self._thd_row2dict(conn, row)
+ last = dict(id=row.id, name=row.name, masterids=[], description=row.description,
+ tags=bldr_id_to_tags[row.id])
rv.append(last)
if row['masterid']:
last['masterids'].append(row['masterid'])
return rv
return self.db.pool.do(thd)
-
- def _thd_row2dict(self, conn, row):
- # get tags
- builders_tags = self.db.model.builders_tags
- tags = self.db.model.tags
- from_clause = tags
- from_clause = from_clause.join(builders_tags)
- q = sa.select([tags.c.name],
- (builders_tags.c.builderid == row.id)).select_from(from_clause)
-
- tags = [r.name for r in
- conn.execute(q).fetchall()]
-
- return dict(id=row.id, name=row.name, masterids=[],
- description=row.description,
- tags=tags)
|
{"golden_diff": "diff --git a/master/buildbot/db/builders.py b/master/buildbot/db/builders.py\n--- a/master/buildbot/db/builders.py\n+++ b/master/buildbot/db/builders.py\n@@ -16,6 +16,8 @@\n from __future__ import absolute_import\n from __future__ import print_function\n \n+from collections import defaultdict\n+\n import sqlalchemy as sa\n \n from twisted.internet import defer\n@@ -104,6 +106,9 @@\n def thd(conn):\n bldr_tbl = self.db.model.builders\n bm_tbl = self.db.model.builder_masters\n+ builders_tags_tbl = self.db.model.builders_tags\n+ tags_tbl = self.db.model.tags\n+\n j = bldr_tbl.outerjoin(bm_tbl)\n # if we want to filter by masterid, we must join to builder_masters\n # again, so we can still get the full set of masters for each\n@@ -123,31 +128,24 @@\n if _builderid is not None:\n q = q.where(bldr_tbl.c.id == _builderid)\n \n+ # build up a intermediate builder id -> tag names map (fixes performance issue #3396)\n+ bldr_id_to_tags = defaultdict(list)\n+ bldr_q = sa.select([builders_tags_tbl.c.builderid, tags_tbl.c.name])\n+ bldr_q = bldr_q.select_from(tags_tbl.join(builders_tags_tbl))\n+\n+ for bldr_id, tag in conn.execute(bldr_q).fetchall():\n+ bldr_id_to_tags[bldr_id].append(tag)\n+\n # now group those by builderid, aggregating by masterid\n rv = []\n last = None\n for row in conn.execute(q).fetchall():\n # pylint: disable=unsubscriptable-object\n if not last or row['id'] != last['id']:\n- last = self._thd_row2dict(conn, row)\n+ last = dict(id=row.id, name=row.name, masterids=[], description=row.description,\n+ tags=bldr_id_to_tags[row.id])\n rv.append(last)\n if row['masterid']:\n last['masterids'].append(row['masterid'])\n return rv\n return self.db.pool.do(thd)\n-\n- def _thd_row2dict(self, conn, row):\n- # get tags\n- builders_tags = self.db.model.builders_tags\n- tags = self.db.model.tags\n- from_clause = tags\n- from_clause = from_clause.join(builders_tags)\n- q = sa.select([tags.c.name],\n- (builders_tags.c.builderid == row.id)).select_from(from_clause)\n-\n- tags = [r.name for r in\n- conn.execute(q).fetchall()]\n-\n- return dict(id=row.id, name=row.name, masterids=[],\n- description=row.description,\n- tags=tags)\n", "issue": "optimize number of database queries\nbuildbot seems to be running database queries in a loop fetching data for individual builders (resulting in large number of queries). This would affect performance in case of large number of builders. I believe it would be more efficient to use a single database query to fetch data for all the builders and then filter it in python.\r\n\r\nI noticed following in postgresql logs (which indicate queries being run in a loop for each builder):\r\n\r\n2017-06-23 21:19:52.221 GMT 24738 localhost(56312) LOG: statement: SELECT tags.name \r\n FROM tags JOIN builders_tags ON tags.id = builders_tags.tagid \r\n WHERE builders_tags.builderid = 218\r\n2017-06-23 21:19:52.223 GMT 24738 localhost(56312) LOG: statement: SELECT tags.name \r\n FROM tags JOIN builders_tags ON tags.id = builders_tags.tagid \r\n WHERE builders_tags.builderid = 219\r\n2017-06-23 21:19:52.226 GMT 24738 localhost(56312) LOG: statement: SELECT tags.name \r\n FROM tags JOIN builders_tags ON tags.id = builders_tags.tagid \r\n WHERE builders_tags.builderid = 220\r\n2017-06-23 21:19:52.235 GMT 24738 localhost(56312) LOG: statement: SELECT tags.name \r\n FROM tags JOIN builders_tags ON tags.id = builders_tags.tagid \r\n WHERE builders_tags.builderid = 221\r\n\n", "before_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nimport sqlalchemy as sa\n\nfrom twisted.internet import defer\n\nfrom buildbot.db import base\n\n\nclass BuildersConnectorComponent(base.DBConnectorComponent):\n\n def findBuilderId(self, name, autoCreate=True):\n tbl = self.db.model.builders\n name_hash = self.hashColumns(name)\n return self.findSomethingId(\n tbl=tbl,\n whereclause=(tbl.c.name_hash == name_hash),\n insert_values=dict(\n name=name,\n name_hash=name_hash,\n ), autoCreate=autoCreate)\n\n @defer.inlineCallbacks\n def updateBuilderInfo(self, builderid, description, tags):\n # convert to tag IDs first, as necessary\n def toTagid(tag):\n if isinstance(tag, type(1)):\n return defer.succeed(tag)\n ssConnector = self.master.db.tags\n return ssConnector.findTagId(tag)\n\n tagsids = [r[1] for r in (yield defer.DeferredList(\n [toTagid(tag) for tag in tags],\n fireOnOneErrback=True,\n consumeErrors=True))]\n\n def thd(conn):\n builders_tbl = self.db.model.builders\n builders_tags_tbl = self.db.model.builders_tags\n transaction = conn.begin()\n\n q = builders_tbl.update(\n whereclause=(builders_tbl.c.id == builderid))\n conn.execute(q, description=description).close()\n # remove previous builders_tags\n conn.execute(builders_tags_tbl.delete(\n whereclause=((builders_tags_tbl.c.builderid == builderid)))).close()\n\n # add tag ids\n if tagsids:\n conn.execute(builders_tags_tbl.insert(),\n [dict(builderid=builderid, tagid=tagid)\n for tagid in tagsids]).close()\n\n transaction.commit()\n\n defer.returnValue((yield self.db.pool.do(thd)))\n\n def getBuilder(self, builderid):\n d = self.getBuilders(_builderid=builderid)\n\n @d.addCallback\n def first(bldrs):\n if bldrs:\n return bldrs[0]\n return None\n return d\n\n def addBuilderMaster(self, builderid=None, masterid=None):\n def thd(conn, no_recurse=False):\n try:\n tbl = self.db.model.builder_masters\n q = tbl.insert()\n conn.execute(q, builderid=builderid, masterid=masterid)\n except (sa.exc.IntegrityError, sa.exc.ProgrammingError):\n pass\n return self.db.pool.do(thd)\n\n def removeBuilderMaster(self, builderid=None, masterid=None):\n def thd(conn, no_recurse=False):\n tbl = self.db.model.builder_masters\n conn.execute(tbl.delete(\n whereclause=((tbl.c.builderid == builderid) &\n (tbl.c.masterid == masterid))))\n return self.db.pool.do(thd)\n\n def getBuilders(self, masterid=None, _builderid=None):\n def thd(conn):\n bldr_tbl = self.db.model.builders\n bm_tbl = self.db.model.builder_masters\n j = bldr_tbl.outerjoin(bm_tbl)\n # if we want to filter by masterid, we must join to builder_masters\n # again, so we can still get the full set of masters for each\n # builder\n if masterid is not None:\n limiting_bm_tbl = bm_tbl.alias('limiting_bm')\n j = j.join(limiting_bm_tbl,\n onclause=(bldr_tbl.c.id == limiting_bm_tbl.c.builderid))\n q = sa.select(\n [bldr_tbl.c.id, bldr_tbl.c.name,\n bldr_tbl.c.description, bm_tbl.c.masterid],\n from_obj=[j],\n order_by=[bldr_tbl.c.id, bm_tbl.c.masterid])\n if masterid is not None:\n # filter the masterid from the limiting table\n q = q.where(limiting_bm_tbl.c.masterid == masterid)\n if _builderid is not None:\n q = q.where(bldr_tbl.c.id == _builderid)\n\n # now group those by builderid, aggregating by masterid\n rv = []\n last = None\n for row in conn.execute(q).fetchall():\n # pylint: disable=unsubscriptable-object\n if not last or row['id'] != last['id']:\n last = self._thd_row2dict(conn, row)\n rv.append(last)\n if row['masterid']:\n last['masterids'].append(row['masterid'])\n return rv\n return self.db.pool.do(thd)\n\n def _thd_row2dict(self, conn, row):\n # get tags\n builders_tags = self.db.model.builders_tags\n tags = self.db.model.tags\n from_clause = tags\n from_clause = from_clause.join(builders_tags)\n q = sa.select([tags.c.name],\n (builders_tags.c.builderid == row.id)).select_from(from_clause)\n\n tags = [r.name for r in\n conn.execute(q).fetchall()]\n\n return dict(id=row.id, name=row.name, masterids=[],\n description=row.description,\n tags=tags)\n", "path": "master/buildbot/db/builders.py"}]}
| 2,576 | 631 |
gh_patches_debug_12388
|
rasdani/github-patches
|
git_diff
|
safe-global__safe-config-service-30
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cache GET /safe-apps/ endpoint
The endpoint `GET /safe-apps` returns a list of safe-apps that can be cached (eg.: 1h) due to the nature of the endpoint (frequency of updating the app list is low). Updating the list should invalidate the cache.
</issue>
<code>
[start of src/safe_apps/views.py]
1 from rest_framework.generics import ListAPIView
2
3 from .models import SafeApp
4 from .serializers import SafeAppsResponseSerializer
5
6
7 class SafeAppsListView(ListAPIView):
8 serializer_class = SafeAppsResponseSerializer
9
10 def get_queryset(self):
11 queryset = SafeApp.objects.all()
12
13 network_id = self.request.query_params.get("network_id")
14 if network_id is not None and network_id.isdigit():
15 queryset = queryset.filter(networks__contains=[network_id])
16
17 return queryset
18
[end of src/safe_apps/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/safe_apps/views.py b/src/safe_apps/views.py
--- a/src/safe_apps/views.py
+++ b/src/safe_apps/views.py
@@ -1,3 +1,5 @@
+from django.utils.decorators import method_decorator
+from django.views.decorators.cache import cache_page
from rest_framework.generics import ListAPIView
from .models import SafeApp
@@ -7,6 +9,10 @@
class SafeAppsListView(ListAPIView):
serializer_class = SafeAppsResponseSerializer
+ @method_decorator(cache_page(60 * 10)) # Cache 10 minutes
+ def get(self, request, *args, **kwargs):
+ return super().get(self, request, *args, **kwargs)
+
def get_queryset(self):
queryset = SafeApp.objects.all()
|
{"golden_diff": "diff --git a/src/safe_apps/views.py b/src/safe_apps/views.py\n--- a/src/safe_apps/views.py\n+++ b/src/safe_apps/views.py\n@@ -1,3 +1,5 @@\n+from django.utils.decorators import method_decorator\n+from django.views.decorators.cache import cache_page\n from rest_framework.generics import ListAPIView\n \n from .models import SafeApp\n@@ -7,6 +9,10 @@\n class SafeAppsListView(ListAPIView):\n serializer_class = SafeAppsResponseSerializer\n \n+ @method_decorator(cache_page(60 * 10)) # Cache 10 minutes\n+ def get(self, request, *args, **kwargs):\n+ return super().get(self, request, *args, **kwargs)\n+\n def get_queryset(self):\n queryset = SafeApp.objects.all()\n", "issue": "Cache GET /safe-apps/ endpoint\nThe endpoint `GET /safe-apps` returns a list of safe-apps that can be cached (eg.: 1h) due to the nature of the endpoint (frequency of updating the app list is low). Updating the list should invalidate the cache.\n", "before_files": [{"content": "from rest_framework.generics import ListAPIView\n\nfrom .models import SafeApp\nfrom .serializers import SafeAppsResponseSerializer\n\n\nclass SafeAppsListView(ListAPIView):\n serializer_class = SafeAppsResponseSerializer\n\n def get_queryset(self):\n queryset = SafeApp.objects.all()\n\n network_id = self.request.query_params.get(\"network_id\")\n if network_id is not None and network_id.isdigit():\n queryset = queryset.filter(networks__contains=[network_id])\n\n return queryset\n", "path": "src/safe_apps/views.py"}]}
| 729 | 176 |
gh_patches_debug_37213
|
rasdani/github-patches
|
git_diff
|
espnet__espnet-708
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
merge_json behavior when no labels.
@kamo-naoyuki, I've found that there is a case when we don't have any label output in some utterances for the switchboard evaluation data (`rt03`). But current `data2json.sh` cannot allow such cases
https://github.com/espnet/espnet/blob/master/utils/merge_scp2json.py#L210-L214
Can you relax this?
This check is good for the training data, but it excludes such utterances in the evaluation set, and makes the evaluation different from other benchmarks.
</issue>
<code>
[start of utils/merge_scp2json.py]
1 #!/usr/bin/env python
2 # encoding: utf-8
3
4 from __future__ import print_function
5 from __future__ import unicode_literals
6
7 import argparse
8 import codecs
9 from io import open
10 import json
11 import logging
12 import sys
13
14 from espnet.utils.cli_utils import get_commandline_args
15
16 PY2 = sys.version_info[0] == 2
17 sys.stdin = codecs.getreader('utf-8')(sys.stdin if PY2 else sys.stdin.buffer)
18 sys.stdout = codecs.getwriter('utf-8')(
19 sys.stdout if PY2 else sys.stdout.buffer)
20
21
22 # Special types:
23 def shape(x):
24 """Change str to List[int]
25
26 >>> shape('3,5')
27 [3, 5]
28 >>> shape(' [3, 5] ')
29 [3, 5]
30
31 """
32
33 # x: ' [3, 5] ' -> '3, 5'
34 x = x.strip()
35 if x[0] == '[':
36 x = x[1:]
37 if x[-1] == ']':
38 x = x[:-1]
39
40 return list(map(int, x.split(',')))
41
42
43 if __name__ == '__main__':
44 parser = argparse.ArgumentParser(
45 description='Given each file paths with such format as '
46 '<key>:<file>:<type>. type> can be omitted and the default '
47 'is "str". e.g. {} '
48 '--input-scps feat:data/feats.scp shape:data/utt2feat_shape:shape '
49 '--input-scps feat:data/feats2.scp shape:data/utt2feat2_shape:shape '
50 '--output-scps text:data/text shape:data/utt2text_shape:shape '
51 '--scps utt2spk:data/utt2spk'.format(sys.argv[0]),
52 formatter_class=argparse.ArgumentDefaultsHelpFormatter)
53 parser.add_argument('--input-scps', type=str, nargs='*', action='append',
54 default=[], help='Json files for the inputs')
55 parser.add_argument('--output-scps', type=str, nargs='*', action='append',
56 default=[], help='Json files for the outputs')
57 parser.add_argument('--scps', type=str, nargs='+', default=[],
58 help='The json files except for the input and outputs')
59 parser.add_argument('--verbose', '-V', default=1, type=int,
60 help='Verbose option')
61 parser.add_argument('--out', '-O', type=str,
62 help='The output filename. '
63 'If omitted, then output to sys.stdout')
64
65 args = parser.parse_args()
66 args.scps = [args.scps]
67
68 # logging info
69 logfmt = "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s"
70 if args.verbose > 0:
71 logging.basicConfig(level=logging.INFO, format=logfmt)
72 else:
73 logging.basicConfig(level=logging.WARN, format=logfmt)
74 logging.info(get_commandline_args())
75
76 # List[List[Tuple[str, str, Callable[[str], Any], str, str]]]
77 input_infos = []
78 output_infos = []
79 infos = []
80 for lis_list, key_scps_list in [(input_infos, args.input_scps),
81 (output_infos, args.output_scps),
82 (infos, args.scps)]:
83 for key_scps in key_scps_list:
84 lis = []
85 for key_scp in key_scps:
86 sps = key_scp.split(':')
87 if len(sps) == 2:
88 key, scp = sps
89 type_func = None
90 type_func_str = 'none'
91 elif len(sps) == 3:
92 key, scp, type_func_str = sps
93 fail = False
94
95 try:
96 # type_func: Callable[[str], Any]
97 # e.g. type_func_str = "int" -> type_func = int
98 type_func = eval(type_func_str)
99 except Exception:
100 raise RuntimeError(
101 'Unknown type: {}'.format(type_func_str))
102
103 if not callable(type_func):
104 raise RuntimeError(
105 'Unknown type: {}'.format(type_func_str))
106
107 else:
108 raise RuntimeError(
109 'Format <key>:<filepath> '
110 'or <key>:<filepath>:<type> '
111 'e.g. feat:data/feat.scp '
112 'or shape:data/feat.scp:shape: {}'.format(key_scp))
113
114 for item in lis:
115 if key == item[0]:
116 raise RuntimeError('The key "{}" is duplicated: {} {}'
117 .format(key, item[3], key_scp))
118
119 lis.append((key, scp, type_func, key_scp, type_func_str))
120 lis_list.append(lis)
121
122 # Open scp files
123 input_fscps = [[open(i[1], 'r', encoding='utf-8')
124 for i in il] for il in input_infos]
125 output_fscps = [[open(i[1], 'r', encoding='utf-8') for i in il]
126 for il in output_infos]
127 fscps = [[open(i[1], 'r', encoding='utf-8') for i in il] for il in infos]
128
129 # Note(kamo): What is done here?
130 # The final goal is creating a JSON file such as.
131 # {
132 # "utts": {
133 # "sample_id1": {(omitted)},
134 # "sample_id2": {(omitted)},
135 # ....
136 # }
137 # }
138 #
139 # To reduce memory usage, reading the input text files for each lines
140 # and writing JSON elements per samples.
141 if args.out is None:
142 out = sys.stdout
143 else:
144 out = open(args.out, 'w', encoding='utf-8')
145 out.write('{\n "utts": {\n')
146 nutt = 0
147 while True:
148 nutt += 1
149 # List[List[str]]
150 input_lines = [[f.readline() for f in fl] for fl in input_fscps]
151 output_lines = [[f.readline() for f in fl] for fl in output_fscps]
152 lines = [[f.readline() for f in fl] for fl in fscps]
153
154 # Get the first line
155 concat = sum(input_lines + output_lines + lines, [])
156 if len(concat) == 0:
157 break
158 first = concat[0]
159
160 # Sanity check: Must be sorted by the first column and have same keys
161 count = 0
162 for ls_list in (input_lines, output_lines, lines):
163 for ls in ls_list:
164 for line in ls:
165 if line == ''or first == '':
166 if line != first:
167 concat = sum(
168 input_infos + output_infos + infos, [])
169 raise RuntimeError(
170 'The number of lines mismatch '
171 'between: "{}" and "{}"'
172 .format(concat[0][1], concat[count][1]))
173
174 elif line.split()[0] != first.split()[0]:
175 concat = sum(input_infos + output_infos + infos, [])
176 raise RuntimeError(
177 'The keys are mismatch at {}th line '
178 'between "{}" and "{}":\n>>> {}\n>>> {}'
179 .format(nutt, concat[0][1], concat[count][1],
180 first.rstrip(), line.rstrip()))
181 count += 1
182
183 # The end of file
184 if first == '':
185 if nutt != 1:
186 out.write('\n')
187 break
188 if nutt != 1:
189 out.write(',\n')
190
191 entry = {}
192 for inout, _lines, _infos in [('input', input_lines, input_infos),
193 ('output', output_lines, output_infos),
194 ('other', lines, infos)]:
195
196 lis = []
197 for idx, (line_list, info_list) \
198 in enumerate(zip(_lines, _infos), 1):
199 if inout == 'input':
200 d = {'name': 'input{}'.format(idx)}
201 elif inout == 'output':
202 d = {'name': 'target{}'.format(idx)}
203 else:
204 d = {}
205
206 # info_list: List[Tuple[str, str, Callable]]
207 # line_list: List[str]
208 for line, info in zip(line_list, info_list):
209 sps = line.split(None, 1)
210 if len(sps) < 2:
211 raise RuntimeError(
212 'Format error {}th line in {}: '
213 ' Expecting "<key> <value>":\n>>> {}'
214 .format(nutt, info[1], line))
215 uttid, value = sps
216 key = info[0]
217 type_func = info[2]
218 value = value.rstrip()
219
220 if type_func is not None:
221 try:
222 # type_func: Callable[[str], Any]
223 value = type_func(value)
224 except Exception:
225 logging.error('"{}" is an invalid function '
226 'for the {} th line in {}: \n>>> {}'
227 .format(info[4], nutt, info[1], line))
228 raise
229
230 d[key] = value
231 lis.append(d)
232
233 if inout != 'other':
234 entry[inout] = lis
235 else:
236 # If key == 'other'. only has the first item
237 entry.update(lis[0])
238
239 entry = json.dumps(entry, indent=4, ensure_ascii=False,
240 sort_keys=True, separators=(',', ': '))
241 # Add indent
242 indent = ' ' * 2
243 entry = ('\n' + indent).join(entry.split('\n'))
244
245 uttid = first.split()[0]
246 out.write(' "{}": {}'.format(uttid, entry))
247
248 out.write(' }\n}\n')
249
250 logging.info('{} entries in {}'.format(nutt, out.name))
251
[end of utils/merge_scp2json.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/utils/merge_scp2json.py b/utils/merge_scp2json.py
--- a/utils/merge_scp2json.py
+++ b/utils/merge_scp2json.py
@@ -6,6 +6,7 @@
import argparse
import codecs
+from distutils.util import strtobool
from io import open
import json
import logging
@@ -58,6 +59,9 @@
help='The json files except for the input and outputs')
parser.add_argument('--verbose', '-V', default=1, type=int,
help='Verbose option')
+ parser.add_argument('--allow-one-column', type=strtobool, default=False,
+ help='Allow one column in input scp files. '
+ 'In this case, the value will be empty string.')
parser.add_argument('--out', '-O', type=str,
help='The output filename. '
'If omitted, then output to sys.stdout')
@@ -162,7 +166,7 @@
for ls_list in (input_lines, output_lines, lines):
for ls in ls_list:
for line in ls:
- if line == ''or first == '':
+ if line == '' or first == '':
if line != first:
concat = sum(
input_infos + output_infos + infos, [])
@@ -208,11 +212,16 @@
for line, info in zip(line_list, info_list):
sps = line.split(None, 1)
if len(sps) < 2:
- raise RuntimeError(
- 'Format error {}th line in {}: '
- ' Expecting "<key> <value>":\n>>> {}'
- .format(nutt, info[1], line))
- uttid, value = sps
+ if not args.allow_one_column:
+ raise RuntimeError(
+ 'Format error {}th line in {}: '
+ ' Expecting "<key> <value>":\n>>> {}'
+ .format(nutt, info[1], line))
+ uttid = sps[0]
+ value = ''
+ else:
+ uttid, value = sps
+
key = info[0]
type_func = info[2]
value = value.rstrip()
|
{"golden_diff": "diff --git a/utils/merge_scp2json.py b/utils/merge_scp2json.py\n--- a/utils/merge_scp2json.py\n+++ b/utils/merge_scp2json.py\n@@ -6,6 +6,7 @@\n \n import argparse\n import codecs\n+from distutils.util import strtobool\n from io import open\n import json\n import logging\n@@ -58,6 +59,9 @@\n help='The json files except for the input and outputs')\n parser.add_argument('--verbose', '-V', default=1, type=int,\n help='Verbose option')\n+ parser.add_argument('--allow-one-column', type=strtobool, default=False,\n+ help='Allow one column in input scp files. '\n+ 'In this case, the value will be empty string.')\n parser.add_argument('--out', '-O', type=str,\n help='The output filename. '\n 'If omitted, then output to sys.stdout')\n@@ -162,7 +166,7 @@\n for ls_list in (input_lines, output_lines, lines):\n for ls in ls_list:\n for line in ls:\n- if line == ''or first == '':\n+ if line == '' or first == '':\n if line != first:\n concat = sum(\n input_infos + output_infos + infos, [])\n@@ -208,11 +212,16 @@\n for line, info in zip(line_list, info_list):\n sps = line.split(None, 1)\n if len(sps) < 2:\n- raise RuntimeError(\n- 'Format error {}th line in {}: '\n- ' Expecting \"<key> <value>\":\\n>>> {}'\n- .format(nutt, info[1], line))\n- uttid, value = sps\n+ if not args.allow_one_column:\n+ raise RuntimeError(\n+ 'Format error {}th line in {}: '\n+ ' Expecting \"<key> <value>\":\\n>>> {}'\n+ .format(nutt, info[1], line))\n+ uttid = sps[0]\n+ value = ''\n+ else:\n+ uttid, value = sps\n+\n key = info[0]\n type_func = info[2]\n value = value.rstrip()\n", "issue": "merge_json behavior when no labels.\n@kamo-naoyuki, I've found that there is a case when we don't have any label output in some utterances for the switchboard evaluation data (`rt03`). But current `data2json.sh` cannot allow such cases \r\nhttps://github.com/espnet/espnet/blob/master/utils/merge_scp2json.py#L210-L214\r\nCan you relax this?\r\nThis check is good for the training data, but it excludes such utterances in the evaluation set, and makes the evaluation different from other benchmarks.\n", "before_files": [{"content": "#!/usr/bin/env python\n# encoding: utf-8\n\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport argparse\nimport codecs\nfrom io import open\nimport json\nimport logging\nimport sys\n\nfrom espnet.utils.cli_utils import get_commandline_args\n\nPY2 = sys.version_info[0] == 2\nsys.stdin = codecs.getreader('utf-8')(sys.stdin if PY2 else sys.stdin.buffer)\nsys.stdout = codecs.getwriter('utf-8')(\n sys.stdout if PY2 else sys.stdout.buffer)\n\n\n# Special types:\ndef shape(x):\n \"\"\"Change str to List[int]\n\n >>> shape('3,5')\n [3, 5]\n >>> shape(' [3, 5] ')\n [3, 5]\n\n \"\"\"\n\n # x: ' [3, 5] ' -> '3, 5'\n x = x.strip()\n if x[0] == '[':\n x = x[1:]\n if x[-1] == ']':\n x = x[:-1]\n\n return list(map(int, x.split(',')))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Given each file paths with such format as '\n '<key>:<file>:<type>. type> can be omitted and the default '\n 'is \"str\". e.g. {} '\n '--input-scps feat:data/feats.scp shape:data/utt2feat_shape:shape '\n '--input-scps feat:data/feats2.scp shape:data/utt2feat2_shape:shape '\n '--output-scps text:data/text shape:data/utt2text_shape:shape '\n '--scps utt2spk:data/utt2spk'.format(sys.argv[0]),\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--input-scps', type=str, nargs='*', action='append',\n default=[], help='Json files for the inputs')\n parser.add_argument('--output-scps', type=str, nargs='*', action='append',\n default=[], help='Json files for the outputs')\n parser.add_argument('--scps', type=str, nargs='+', default=[],\n help='The json files except for the input and outputs')\n parser.add_argument('--verbose', '-V', default=1, type=int,\n help='Verbose option')\n parser.add_argument('--out', '-O', type=str,\n help='The output filename. '\n 'If omitted, then output to sys.stdout')\n\n args = parser.parse_args()\n args.scps = [args.scps]\n\n # logging info\n logfmt = \"%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s\"\n if args.verbose > 0:\n logging.basicConfig(level=logging.INFO, format=logfmt)\n else:\n logging.basicConfig(level=logging.WARN, format=logfmt)\n logging.info(get_commandline_args())\n\n # List[List[Tuple[str, str, Callable[[str], Any], str, str]]]\n input_infos = []\n output_infos = []\n infos = []\n for lis_list, key_scps_list in [(input_infos, args.input_scps),\n (output_infos, args.output_scps),\n (infos, args.scps)]:\n for key_scps in key_scps_list:\n lis = []\n for key_scp in key_scps:\n sps = key_scp.split(':')\n if len(sps) == 2:\n key, scp = sps\n type_func = None\n type_func_str = 'none'\n elif len(sps) == 3:\n key, scp, type_func_str = sps\n fail = False\n\n try:\n # type_func: Callable[[str], Any]\n # e.g. type_func_str = \"int\" -> type_func = int\n type_func = eval(type_func_str)\n except Exception:\n raise RuntimeError(\n 'Unknown type: {}'.format(type_func_str))\n\n if not callable(type_func):\n raise RuntimeError(\n 'Unknown type: {}'.format(type_func_str))\n\n else:\n raise RuntimeError(\n 'Format <key>:<filepath> '\n 'or <key>:<filepath>:<type> '\n 'e.g. feat:data/feat.scp '\n 'or shape:data/feat.scp:shape: {}'.format(key_scp))\n\n for item in lis:\n if key == item[0]:\n raise RuntimeError('The key \"{}\" is duplicated: {} {}'\n .format(key, item[3], key_scp))\n\n lis.append((key, scp, type_func, key_scp, type_func_str))\n lis_list.append(lis)\n\n # Open scp files\n input_fscps = [[open(i[1], 'r', encoding='utf-8')\n for i in il] for il in input_infos]\n output_fscps = [[open(i[1], 'r', encoding='utf-8') for i in il]\n for il in output_infos]\n fscps = [[open(i[1], 'r', encoding='utf-8') for i in il] for il in infos]\n\n # Note(kamo): What is done here?\n # The final goal is creating a JSON file such as.\n # {\n # \"utts\": {\n # \"sample_id1\": {(omitted)},\n # \"sample_id2\": {(omitted)},\n # ....\n # }\n # }\n #\n # To reduce memory usage, reading the input text files for each lines\n # and writing JSON elements per samples.\n if args.out is None:\n out = sys.stdout\n else:\n out = open(args.out, 'w', encoding='utf-8')\n out.write('{\\n \"utts\": {\\n')\n nutt = 0\n while True:\n nutt += 1\n # List[List[str]]\n input_lines = [[f.readline() for f in fl] for fl in input_fscps]\n output_lines = [[f.readline() for f in fl] for fl in output_fscps]\n lines = [[f.readline() for f in fl] for fl in fscps]\n\n # Get the first line\n concat = sum(input_lines + output_lines + lines, [])\n if len(concat) == 0:\n break\n first = concat[0]\n\n # Sanity check: Must be sorted by the first column and have same keys\n count = 0\n for ls_list in (input_lines, output_lines, lines):\n for ls in ls_list:\n for line in ls:\n if line == ''or first == '':\n if line != first:\n concat = sum(\n input_infos + output_infos + infos, [])\n raise RuntimeError(\n 'The number of lines mismatch '\n 'between: \"{}\" and \"{}\"'\n .format(concat[0][1], concat[count][1]))\n\n elif line.split()[0] != first.split()[0]:\n concat = sum(input_infos + output_infos + infos, [])\n raise RuntimeError(\n 'The keys are mismatch at {}th line '\n 'between \"{}\" and \"{}\":\\n>>> {}\\n>>> {}'\n .format(nutt, concat[0][1], concat[count][1],\n first.rstrip(), line.rstrip()))\n count += 1\n\n # The end of file\n if first == '':\n if nutt != 1:\n out.write('\\n')\n break\n if nutt != 1:\n out.write(',\\n')\n\n entry = {}\n for inout, _lines, _infos in [('input', input_lines, input_infos),\n ('output', output_lines, output_infos),\n ('other', lines, infos)]:\n\n lis = []\n for idx, (line_list, info_list) \\\n in enumerate(zip(_lines, _infos), 1):\n if inout == 'input':\n d = {'name': 'input{}'.format(idx)}\n elif inout == 'output':\n d = {'name': 'target{}'.format(idx)}\n else:\n d = {}\n\n # info_list: List[Tuple[str, str, Callable]]\n # line_list: List[str]\n for line, info in zip(line_list, info_list):\n sps = line.split(None, 1)\n if len(sps) < 2:\n raise RuntimeError(\n 'Format error {}th line in {}: '\n ' Expecting \"<key> <value>\":\\n>>> {}'\n .format(nutt, info[1], line))\n uttid, value = sps\n key = info[0]\n type_func = info[2]\n value = value.rstrip()\n\n if type_func is not None:\n try:\n # type_func: Callable[[str], Any]\n value = type_func(value)\n except Exception:\n logging.error('\"{}\" is an invalid function '\n 'for the {} th line in {}: \\n>>> {}'\n .format(info[4], nutt, info[1], line))\n raise\n\n d[key] = value\n lis.append(d)\n\n if inout != 'other':\n entry[inout] = lis\n else:\n # If key == 'other'. only has the first item\n entry.update(lis[0])\n\n entry = json.dumps(entry, indent=4, ensure_ascii=False,\n sort_keys=True, separators=(',', ': '))\n # Add indent\n indent = ' ' * 2\n entry = ('\\n' + indent).join(entry.split('\\n'))\n\n uttid = first.split()[0]\n out.write(' \"{}\": {}'.format(uttid, entry))\n\n out.write(' }\\n}\\n')\n\n logging.info('{} entries in {}'.format(nutt, out.name))\n", "path": "utils/merge_scp2json.py"}]}
| 3,438 | 502 |
gh_patches_debug_7339
|
rasdani/github-patches
|
git_diff
|
OCHA-DAP__hdx-ckan-1684
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Ebola Static Page: static page extension
Create configuration and deploy of static page extention/plugin.
This is a blocker for the other issues.
- populate the list of datasets of the controller
</issue>
<code>
[start of ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py]
1 '''
2 Created on Nov 3, 2014
3
4 @author: alexandru-m-g
5 '''
6
7 import datetime as dt
8
9 import pylons.config as config
10 import logging
11
12 import ckan.lib.base as base
13 import ckan.logic as logic
14 import ckan.model as model
15 import ckan.common as common
16 import ckan.lib.helpers as h
17
18 render = base.render
19 get_action = logic.get_action
20 c = common.c
21 request = common.request
22
23 log = logging.getLogger(__name__)
24
25
26 class CrisisController(base.BaseController):
27
28 def show(self):
29
30 context = {'model': model, 'session': model.Session,
31 'user': c.user or c.author, 'for_view': True,
32 'auth_user_obj': c.userobj}
33
34 datastore_resource_id = self._get_datastore_resource_id(
35 context, config.get('hdx.crisis.ebola_dataset', None), config.get('hdx.crisis.ebola_resource_title', None))
36 if datastore_resource_id:
37 c.top_line_items = self._get_top_line_items(
38 context, datastore_resource_id)
39
40 limit = 25
41 c.q = u'ebola'
42
43 page = int(request.params.get('page', 1))
44 data_dict = {'sort': u'metadata_modified desc',
45 'fq': '+dataset_type:dataset',
46 'rows': limit,
47 'q': c.q,
48 'start': (page - 1) * limit
49 }
50 query = get_action("package_search")(context, data_dict)
51
52 def pager_url(q=None, page=None):
53 return h.url_for('show_crisis', page=page)
54
55 c.page = h.Page(
56 collection=query['results'],
57 page=page,
58 url=pager_url,
59 item_count=query['count'],
60 items_per_page=limit
61 )
62 c.items = query['results']
63 c.item_count = query['count']
64
65 c.other_links = {}
66 c.other_links['show_more'] = h.url_for(
67 "search", **{'q': u'ebola', 'sort': u'metadata_modified desc',
68 'ext_indicator': '0'})
69
70 return render('crisis/crisis.html')
71
72 def _get_top_line_items(self, context, datastore_resource_id):
73 result = get_action('datastore_search')(
74 context, {'resource_id': datastore_resource_id})
75 if 'records' in result:
76 for r in result['records']:
77 d = dt.datetime.strptime(
78 r[u'latest_date'], '%Y-%m-%dT%H:%M:%S')
79 r[u'latest_date'] = dt.datetime.strftime(d, '%d-%b-%Y')
80 return result['records']
81 return []
82
83 def _get_datastore_resource_id(self, context, dataset_id, resource_name):
84 try:
85 dataset = get_action('package_show')(
86 context, {'id': dataset_id})
87
88 if 'resources' in dataset:
89 for r in dataset['resources']:
90 if 'datastore_active' in r and r['datastore_active'] \
91 and r['name'] == resource_name:
92 return r['id']
93 re
94 except:
95 log.warning('No dataset with id ' + dataset_id)
96 return None
97
[end of ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py b/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py
--- a/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py
+++ b/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py
@@ -90,7 +90,7 @@
if 'datastore_active' in r and r['datastore_active'] \
and r['name'] == resource_name:
return r['id']
- re
+ return None
except:
log.warning('No dataset with id ' + dataset_id)
return None
|
{"golden_diff": "diff --git a/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py b/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py\n--- a/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py\n+++ b/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py\n@@ -90,7 +90,7 @@\n if 'datastore_active' in r and r['datastore_active'] \\\n and r['name'] == resource_name:\n return r['id']\n- re\n+ return None\n except:\n log.warning('No dataset with id ' + dataset_id)\n return None\n", "issue": "Ebola Static Page: static page extension\nCreate configuration and deploy of static page extention/plugin.\nThis is a blocker for the other issues.\n- populate the list of datasets of the controller\n\n", "before_files": [{"content": "'''\nCreated on Nov 3, 2014\n\n@author: alexandru-m-g\n'''\n\nimport datetime as dt\n\nimport pylons.config as config\nimport logging\n\nimport ckan.lib.base as base\nimport ckan.logic as logic\nimport ckan.model as model\nimport ckan.common as common\nimport ckan.lib.helpers as h\n\nrender = base.render\nget_action = logic.get_action\nc = common.c\nrequest = common.request\n\nlog = logging.getLogger(__name__)\n\n\nclass CrisisController(base.BaseController):\n\n def show(self):\n\n context = {'model': model, 'session': model.Session,\n 'user': c.user or c.author, 'for_view': True,\n 'auth_user_obj': c.userobj}\n\n datastore_resource_id = self._get_datastore_resource_id(\n context, config.get('hdx.crisis.ebola_dataset', None), config.get('hdx.crisis.ebola_resource_title', None))\n if datastore_resource_id:\n c.top_line_items = self._get_top_line_items(\n context, datastore_resource_id)\n\n limit = 25\n c.q = u'ebola'\n\n page = int(request.params.get('page', 1))\n data_dict = {'sort': u'metadata_modified desc',\n 'fq': '+dataset_type:dataset',\n 'rows': limit,\n 'q': c.q,\n 'start': (page - 1) * limit\n }\n query = get_action(\"package_search\")(context, data_dict)\n\n def pager_url(q=None, page=None):\n return h.url_for('show_crisis', page=page)\n\n c.page = h.Page(\n collection=query['results'],\n page=page,\n url=pager_url,\n item_count=query['count'],\n items_per_page=limit\n )\n c.items = query['results']\n c.item_count = query['count']\n\n c.other_links = {}\n c.other_links['show_more'] = h.url_for(\n \"search\", **{'q': u'ebola', 'sort': u'metadata_modified desc',\n 'ext_indicator': '0'})\n\n return render('crisis/crisis.html')\n\n def _get_top_line_items(self, context, datastore_resource_id):\n result = get_action('datastore_search')(\n context, {'resource_id': datastore_resource_id})\n if 'records' in result:\n for r in result['records']:\n d = dt.datetime.strptime(\n r[u'latest_date'], '%Y-%m-%dT%H:%M:%S')\n r[u'latest_date'] = dt.datetime.strftime(d, '%d-%b-%Y')\n return result['records']\n return []\n\n def _get_datastore_resource_id(self, context, dataset_id, resource_name):\n try:\n dataset = get_action('package_show')(\n context, {'id': dataset_id})\n\n if 'resources' in dataset:\n for r in dataset['resources']:\n if 'datastore_active' in r and r['datastore_active'] \\\n and r['name'] == resource_name:\n return r['id']\n re\n except:\n log.warning('No dataset with id ' + dataset_id)\n return None\n", "path": "ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py"}]}
| 1,500 | 182 |
gh_patches_debug_5006
|
rasdani/github-patches
|
git_diff
|
Textualize__textual-2621
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Pushing a screen should send Leave message
If you have an action that opens a screen, it leaves the footer stuck in the highlight state.
I think we need to call `_set_mouse_over(None)` on the current screen when pushing another screen.
</issue>
<code>
[start of src/textual/widgets/_footer.py]
1 from __future__ import annotations
2
3 from collections import defaultdict
4 from typing import ClassVar, Optional
5
6 import rich.repr
7 from rich.console import RenderableType
8 from rich.text import Text
9
10 from .. import events
11 from ..reactive import reactive
12 from ..widget import Widget
13
14
15 @rich.repr.auto
16 class Footer(Widget):
17 """A simple footer widget which docks itself to the bottom of the parent container."""
18
19 COMPONENT_CLASSES: ClassVar[set[str]] = {
20 "footer--description",
21 "footer--key",
22 "footer--highlight",
23 "footer--highlight-key",
24 }
25 """
26 | Class | Description |
27 | :- | :- |
28 | `footer--description` | Targets the descriptions of the key bindings. |
29 | `footer--highlight` | Targets the highlighted key binding. |
30 | `footer--highlight-key` | Targets the key portion of the highlighted key binding. |
31 | `footer--key` | Targets the key portions of the key bindings. |
32 """
33
34 DEFAULT_CSS = """
35 Footer {
36 background: $accent;
37 color: $text;
38 dock: bottom;
39 height: 1;
40 }
41 Footer > .footer--highlight {
42 background: $accent-darken-1;
43 }
44
45 Footer > .footer--highlight-key {
46 background: $secondary;
47 text-style: bold;
48 }
49
50 Footer > .footer--key {
51 text-style: bold;
52 background: $accent-darken-2;
53 }
54 """
55
56 highlight_key: reactive[str | None] = reactive[Optional[str]](None)
57
58 def __init__(self) -> None:
59 super().__init__()
60 self._key_text: Text | None = None
61 self.auto_links = False
62
63 async def watch_highlight_key(self) -> None:
64 """If highlight key changes we need to regenerate the text."""
65 self._key_text = None
66 self.refresh()
67
68 def _on_mount(self, _: events.Mount) -> None:
69 self.watch(self.screen, "focused", self._bindings_changed)
70 self.watch(self.screen, "stack_updates", self._bindings_changed)
71
72 def _bindings_changed(self, _: Widget | None) -> None:
73 self._key_text = None
74 self.refresh()
75
76 def _on_mouse_move(self, event: events.MouseMove) -> None:
77 """Store any key we are moving over."""
78 self.highlight_key = event.style.meta.get("key")
79
80 def _on_leave(self, _: events.Leave) -> None:
81 """Clear any highlight when the mouse leaves the widget"""
82 if self.screen.is_current:
83 self.highlight_key = None
84
85 def __rich_repr__(self) -> rich.repr.Result:
86 yield from super().__rich_repr__()
87
88 def _make_key_text(self) -> Text:
89 """Create text containing all the keys."""
90 base_style = self.rich_style
91 text = Text(
92 style=self.rich_style,
93 no_wrap=True,
94 overflow="ellipsis",
95 justify="left",
96 end="",
97 )
98 highlight_style = self.get_component_rich_style("footer--highlight")
99 highlight_key_style = self.get_component_rich_style("footer--highlight-key")
100 key_style = self.get_component_rich_style("footer--key")
101 description_style = self.get_component_rich_style("footer--description")
102
103 bindings = [
104 binding
105 for (_, binding) in self.app.namespace_bindings.values()
106 if binding.show
107 ]
108
109 action_to_bindings = defaultdict(list)
110 for binding in bindings:
111 action_to_bindings[binding.action].append(binding)
112
113 for _, bindings in action_to_bindings.items():
114 binding = bindings[0]
115 if binding.key_display is None:
116 key_display = self.app.get_key_display(binding.key)
117 if key_display is None:
118 key_display = binding.key.upper()
119 else:
120 key_display = binding.key_display
121 hovered = self.highlight_key == binding.key
122 key_text = Text.assemble(
123 (f" {key_display} ", highlight_key_style if hovered else key_style),
124 (
125 f" {binding.description} ",
126 highlight_style if hovered else base_style + description_style,
127 ),
128 meta={
129 "@click": f"app.check_bindings('{binding.key}')",
130 "key": binding.key,
131 },
132 )
133 text.append_text(key_text)
134 return text
135
136 def notify_style_update(self) -> None:
137 self._key_text = None
138
139 def post_render(self, renderable):
140 return renderable
141
142 def render(self) -> RenderableType:
143 if self._key_text is None:
144 self._key_text = self._make_key_text()
145 return self._key_text
146
[end of src/textual/widgets/_footer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/textual/widgets/_footer.py b/src/textual/widgets/_footer.py
--- a/src/textual/widgets/_footer.py
+++ b/src/textual/widgets/_footer.py
@@ -79,8 +79,7 @@
def _on_leave(self, _: events.Leave) -> None:
"""Clear any highlight when the mouse leaves the widget"""
- if self.screen.is_current:
- self.highlight_key = None
+ self.highlight_key = None
def __rich_repr__(self) -> rich.repr.Result:
yield from super().__rich_repr__()
|
{"golden_diff": "diff --git a/src/textual/widgets/_footer.py b/src/textual/widgets/_footer.py\n--- a/src/textual/widgets/_footer.py\n+++ b/src/textual/widgets/_footer.py\n@@ -79,8 +79,7 @@\n \n def _on_leave(self, _: events.Leave) -> None:\n \"\"\"Clear any highlight when the mouse leaves the widget\"\"\"\n- if self.screen.is_current:\n- self.highlight_key = None\n+ self.highlight_key = None\n \n def __rich_repr__(self) -> rich.repr.Result:\n yield from super().__rich_repr__()\n", "issue": "Pushing a screen should send Leave message\nIf you have an action that opens a screen, it leaves the footer stuck in the highlight state.\n\nI think we need to call `_set_mouse_over(None)` on the current screen when pushing another screen.\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom collections import defaultdict\nfrom typing import ClassVar, Optional\n\nimport rich.repr\nfrom rich.console import RenderableType\nfrom rich.text import Text\n\nfrom .. import events\nfrom ..reactive import reactive\nfrom ..widget import Widget\n\n\[email protected]\nclass Footer(Widget):\n \"\"\"A simple footer widget which docks itself to the bottom of the parent container.\"\"\"\n\n COMPONENT_CLASSES: ClassVar[set[str]] = {\n \"footer--description\",\n \"footer--key\",\n \"footer--highlight\",\n \"footer--highlight-key\",\n }\n \"\"\"\n | Class | Description |\n | :- | :- |\n | `footer--description` | Targets the descriptions of the key bindings. |\n | `footer--highlight` | Targets the highlighted key binding. |\n | `footer--highlight-key` | Targets the key portion of the highlighted key binding. |\n | `footer--key` | Targets the key portions of the key bindings. |\n \"\"\"\n\n DEFAULT_CSS = \"\"\"\n Footer {\n background: $accent;\n color: $text;\n dock: bottom;\n height: 1;\n }\n Footer > .footer--highlight {\n background: $accent-darken-1;\n }\n\n Footer > .footer--highlight-key {\n background: $secondary;\n text-style: bold;\n }\n\n Footer > .footer--key {\n text-style: bold;\n background: $accent-darken-2;\n }\n \"\"\"\n\n highlight_key: reactive[str | None] = reactive[Optional[str]](None)\n\n def __init__(self) -> None:\n super().__init__()\n self._key_text: Text | None = None\n self.auto_links = False\n\n async def watch_highlight_key(self) -> None:\n \"\"\"If highlight key changes we need to regenerate the text.\"\"\"\n self._key_text = None\n self.refresh()\n\n def _on_mount(self, _: events.Mount) -> None:\n self.watch(self.screen, \"focused\", self._bindings_changed)\n self.watch(self.screen, \"stack_updates\", self._bindings_changed)\n\n def _bindings_changed(self, _: Widget | None) -> None:\n self._key_text = None\n self.refresh()\n\n def _on_mouse_move(self, event: events.MouseMove) -> None:\n \"\"\"Store any key we are moving over.\"\"\"\n self.highlight_key = event.style.meta.get(\"key\")\n\n def _on_leave(self, _: events.Leave) -> None:\n \"\"\"Clear any highlight when the mouse leaves the widget\"\"\"\n if self.screen.is_current:\n self.highlight_key = None\n\n def __rich_repr__(self) -> rich.repr.Result:\n yield from super().__rich_repr__()\n\n def _make_key_text(self) -> Text:\n \"\"\"Create text containing all the keys.\"\"\"\n base_style = self.rich_style\n text = Text(\n style=self.rich_style,\n no_wrap=True,\n overflow=\"ellipsis\",\n justify=\"left\",\n end=\"\",\n )\n highlight_style = self.get_component_rich_style(\"footer--highlight\")\n highlight_key_style = self.get_component_rich_style(\"footer--highlight-key\")\n key_style = self.get_component_rich_style(\"footer--key\")\n description_style = self.get_component_rich_style(\"footer--description\")\n\n bindings = [\n binding\n for (_, binding) in self.app.namespace_bindings.values()\n if binding.show\n ]\n\n action_to_bindings = defaultdict(list)\n for binding in bindings:\n action_to_bindings[binding.action].append(binding)\n\n for _, bindings in action_to_bindings.items():\n binding = bindings[0]\n if binding.key_display is None:\n key_display = self.app.get_key_display(binding.key)\n if key_display is None:\n key_display = binding.key.upper()\n else:\n key_display = binding.key_display\n hovered = self.highlight_key == binding.key\n key_text = Text.assemble(\n (f\" {key_display} \", highlight_key_style if hovered else key_style),\n (\n f\" {binding.description} \",\n highlight_style if hovered else base_style + description_style,\n ),\n meta={\n \"@click\": f\"app.check_bindings('{binding.key}')\",\n \"key\": binding.key,\n },\n )\n text.append_text(key_text)\n return text\n\n def notify_style_update(self) -> None:\n self._key_text = None\n\n def post_render(self, renderable):\n return renderable\n\n def render(self) -> RenderableType:\n if self._key_text is None:\n self._key_text = self._make_key_text()\n return self._key_text\n", "path": "src/textual/widgets/_footer.py"}]}
| 1,925 | 126 |
gh_patches_debug_27148
|
rasdani/github-patches
|
git_diff
|
beeware__toga-2086
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Gtk] WebView uses deprecated run_javascript api
### Describe the bug
The Gtk backend for `toga.WebView` uses the `run_javascript()` API to evaluate JavaScript. This was deprecated in WebView v2.40 in favour of `evaluate_javascript()`. Since we depend on WebKit v4.0 or later, we should switch to the non-deprecated API. See https://webkitgtk.org/reference/webkit2gtk/stable/method.WebView.run_javascript.html.
### Steps to reproduce
Evaluate some JavaScript using `toga.WebView` on Linux with the Gtk backend. See the followig deprecation warning:
```
/home/parallels/maestral/lib/python3.10/site-packages/toga_gtk/widgets/webview.py:106: DeprecationWarning: WebKit2.WebView.run_javascript is deprecated
self.native.run_javascript(javascript, None, gtk_js_finished)
/home/parallels/maestral/lib/python3.10/site-packages/toga_gtk/widgets/webview.py:86: DeprecationWarning: WebKit2.WebView.run_javascript_finish is deprecated
js_result = webview.run_javascript_finish(task)
```
### Expected behavior
Don't use deprecated APIs unless required for backward compatibility.
### Screenshots
_No response_
### Environment
- Operating System: Ubuntu 22.04
- Python version: Python 3.10
- Software versions:
- Toga: 0.3.2.dev804+g609682318
### Logs
_No response_
### Additional context
_No response_
</issue>
<code>
[start of gtk/src/toga_gtk/widgets/webview.py]
1 from travertino.size import at_least
2
3 from toga.widgets.webview import JavaScriptResult
4
5 from ..libs import GLib, WebKit2
6 from .base import Widget
7
8
9 class WebView(Widget):
10 """GTK WebView implementation."""
11
12 def create(self):
13 if WebKit2 is None: # pragma: no cover
14 raise RuntimeError(
15 "Unable to import WebKit2. Ensure that the system package "
16 "providing Webkit2 and its GTK bindings have been installed."
17 )
18
19 self.native = WebKit2.WebView()
20
21 settings = self.native.get_settings()
22 settings.set_property("enable-developer-extras", True)
23
24 # The default cache model is WEB_BROWSER, which will
25 # use the backing cache to minimize hits on the web server.
26 # This can result in stale web content being served, even if
27 # the source document (and the web server response) changes.
28 context = self.native.get_context()
29 context.set_cache_model(WebKit2.CacheModel.DOCUMENT_VIEWER)
30
31 self.native.connect("load-changed", self.gtk_on_load_changed)
32
33 self.load_future = None
34
35 def gtk_on_load_changed(self, widget, load_event, *args):
36 if load_event == WebKit2.LoadEvent.FINISHED:
37 self.interface.on_webview_load(None)
38
39 if self.load_future:
40 self.load_future.set_result(None)
41 self.load_future = None
42
43 def get_url(self):
44 url = self.native.get_uri()
45 return None if url == "about:blank" else url
46
47 def _loaded(self, data):
48 # Internal method to fake a load event.
49 self.native.emit("load-changed", WebKit2.LoadEvent.FINISHED)
50 return False
51
52 def set_url(self, value, future=None):
53 if value:
54 self.native.load_uri(value)
55 else:
56 self.native.load_plain_text("")
57 # GTK doesn't emit a load-changed signal when plain text is loaded; so we
58 # fake it. We can't emit the signal directly because it will be handled
59 # immediately. During creation of an empty webview, the URL is set to None,
60 # which means an event can be triggered before the widget instance has
61 # finished construction. So, we defer the call with a 0 timeout.
62 GLib.timeout_add(0, self._loaded, None)
63
64 self.load_future = future
65
66 def get_user_agent(self):
67 return self.native.get_settings().props.user_agent
68
69 def set_user_agent(self, value):
70 # replace user agent of webview (webview has own one)
71 self.native.get_settings().props.user_agent = value
72
73 def set_content(self, root_url, content):
74 self.native.load_html(content, root_url)
75
76 def evaluate_javascript(self, javascript, on_result=None):
77 # Construct a future on the event loop
78 result = JavaScriptResult()
79
80 # Define a callback that will update the future when
81 # the Javascript is complete.
82 def gtk_js_finished(webview, task, *user_data):
83 """If `run_javascript_finish` from GTK returns a result, unmarshal it, and
84 call back with the result."""
85 try:
86 js_result = webview.run_javascript_finish(task)
87 value = js_result.get_js_value()
88 if value.is_boolean():
89 value = value.to_boolean()
90 elif value.is_number():
91 value = value.to_double()
92 else:
93 value = value.to_string()
94
95 result.future.set_result(value)
96 if on_result:
97 on_result(value)
98 except Exception as e:
99 exc = RuntimeError(str(e))
100 result.future.set_exception(exc)
101 if on_result:
102 on_result(None, exception=exc)
103
104 # Invoke the javascript method, with a callback that will set
105 # the future when a result is available.
106 self.native.run_javascript(javascript, None, gtk_js_finished)
107
108 # wait for the future, and return the result
109 return result
110
111 def rehint(self):
112 self.interface.intrinsic.width = at_least(self.interface._MIN_WIDTH)
113 self.interface.intrinsic.height = at_least(self.interface._MIN_HEIGHT)
114
[end of gtk/src/toga_gtk/widgets/webview.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/gtk/src/toga_gtk/widgets/webview.py b/gtk/src/toga_gtk/widgets/webview.py
--- a/gtk/src/toga_gtk/widgets/webview.py
+++ b/gtk/src/toga_gtk/widgets/webview.py
@@ -80,11 +80,10 @@
# Define a callback that will update the future when
# the Javascript is complete.
def gtk_js_finished(webview, task, *user_data):
- """If `run_javascript_finish` from GTK returns a result, unmarshal it, and
+ """If `evaluate_javascript_finish` from GTK returns a result, unmarshal it, and
call back with the result."""
try:
- js_result = webview.run_javascript_finish(task)
- value = js_result.get_js_value()
+ value = webview.evaluate_javascript_finish(task)
if value.is_boolean():
value = value.to_boolean()
elif value.is_number():
@@ -103,7 +102,14 @@
# Invoke the javascript method, with a callback that will set
# the future when a result is available.
- self.native.run_javascript(javascript, None, gtk_js_finished)
+ self.native.evaluate_javascript(
+ script=javascript,
+ length=len(javascript),
+ world_name=None,
+ source_uri=None,
+ cancellable=None,
+ callback=gtk_js_finished,
+ )
# wait for the future, and return the result
return result
|
{"golden_diff": "diff --git a/gtk/src/toga_gtk/widgets/webview.py b/gtk/src/toga_gtk/widgets/webview.py\n--- a/gtk/src/toga_gtk/widgets/webview.py\n+++ b/gtk/src/toga_gtk/widgets/webview.py\n@@ -80,11 +80,10 @@\n # Define a callback that will update the future when\n # the Javascript is complete.\n def gtk_js_finished(webview, task, *user_data):\n- \"\"\"If `run_javascript_finish` from GTK returns a result, unmarshal it, and\n+ \"\"\"If `evaluate_javascript_finish` from GTK returns a result, unmarshal it, and\n call back with the result.\"\"\"\n try:\n- js_result = webview.run_javascript_finish(task)\n- value = js_result.get_js_value()\n+ value = webview.evaluate_javascript_finish(task)\n if value.is_boolean():\n value = value.to_boolean()\n elif value.is_number():\n@@ -103,7 +102,14 @@\n \n # Invoke the javascript method, with a callback that will set\n # the future when a result is available.\n- self.native.run_javascript(javascript, None, gtk_js_finished)\n+ self.native.evaluate_javascript(\n+ script=javascript,\n+ length=len(javascript),\n+ world_name=None,\n+ source_uri=None,\n+ cancellable=None,\n+ callback=gtk_js_finished,\n+ )\n \n # wait for the future, and return the result\n return result\n", "issue": "[Gtk] WebView uses deprecated run_javascript api\n### Describe the bug\r\n\r\nThe Gtk backend for `toga.WebView` uses the `run_javascript()` API to evaluate JavaScript. This was deprecated in WebView v2.40 in favour of `evaluate_javascript()`. Since we depend on WebKit v4.0 or later, we should switch to the non-deprecated API. See https://webkitgtk.org/reference/webkit2gtk/stable/method.WebView.run_javascript.html.\r\n\r\n### Steps to reproduce\r\n\r\nEvaluate some JavaScript using `toga.WebView` on Linux with the Gtk backend. See the followig deprecation warning:\r\n\r\n```\r\n/home/parallels/maestral/lib/python3.10/site-packages/toga_gtk/widgets/webview.py:106: DeprecationWarning: WebKit2.WebView.run_javascript is deprecated\r\n self.native.run_javascript(javascript, None, gtk_js_finished)\r\n/home/parallels/maestral/lib/python3.10/site-packages/toga_gtk/widgets/webview.py:86: DeprecationWarning: WebKit2.WebView.run_javascript_finish is deprecated\r\n js_result = webview.run_javascript_finish(task)\r\n```\r\n\r\n### Expected behavior\r\n\r\nDon't use deprecated APIs unless required for backward compatibility.\r\n\r\n### Screenshots\r\n\r\n_No response_\r\n\r\n### Environment\r\n\r\n- Operating System: Ubuntu 22.04\r\n- Python version: Python 3.10\r\n- Software versions:\r\n - Toga: 0.3.2.dev804+g609682318\r\n\r\n\r\n### Logs\r\n\r\n_No response_\r\n\r\n### Additional context\r\n\r\n_No response_\n", "before_files": [{"content": "from travertino.size import at_least\n\nfrom toga.widgets.webview import JavaScriptResult\n\nfrom ..libs import GLib, WebKit2\nfrom .base import Widget\n\n\nclass WebView(Widget):\n \"\"\"GTK WebView implementation.\"\"\"\n\n def create(self):\n if WebKit2 is None: # pragma: no cover\n raise RuntimeError(\n \"Unable to import WebKit2. Ensure that the system package \"\n \"providing Webkit2 and its GTK bindings have been installed.\"\n )\n\n self.native = WebKit2.WebView()\n\n settings = self.native.get_settings()\n settings.set_property(\"enable-developer-extras\", True)\n\n # The default cache model is WEB_BROWSER, which will\n # use the backing cache to minimize hits on the web server.\n # This can result in stale web content being served, even if\n # the source document (and the web server response) changes.\n context = self.native.get_context()\n context.set_cache_model(WebKit2.CacheModel.DOCUMENT_VIEWER)\n\n self.native.connect(\"load-changed\", self.gtk_on_load_changed)\n\n self.load_future = None\n\n def gtk_on_load_changed(self, widget, load_event, *args):\n if load_event == WebKit2.LoadEvent.FINISHED:\n self.interface.on_webview_load(None)\n\n if self.load_future:\n self.load_future.set_result(None)\n self.load_future = None\n\n def get_url(self):\n url = self.native.get_uri()\n return None if url == \"about:blank\" else url\n\n def _loaded(self, data):\n # Internal method to fake a load event.\n self.native.emit(\"load-changed\", WebKit2.LoadEvent.FINISHED)\n return False\n\n def set_url(self, value, future=None):\n if value:\n self.native.load_uri(value)\n else:\n self.native.load_plain_text(\"\")\n # GTK doesn't emit a load-changed signal when plain text is loaded; so we\n # fake it. We can't emit the signal directly because it will be handled\n # immediately. During creation of an empty webview, the URL is set to None,\n # which means an event can be triggered before the widget instance has\n # finished construction. So, we defer the call with a 0 timeout.\n GLib.timeout_add(0, self._loaded, None)\n\n self.load_future = future\n\n def get_user_agent(self):\n return self.native.get_settings().props.user_agent\n\n def set_user_agent(self, value):\n # replace user agent of webview (webview has own one)\n self.native.get_settings().props.user_agent = value\n\n def set_content(self, root_url, content):\n self.native.load_html(content, root_url)\n\n def evaluate_javascript(self, javascript, on_result=None):\n # Construct a future on the event loop\n result = JavaScriptResult()\n\n # Define a callback that will update the future when\n # the Javascript is complete.\n def gtk_js_finished(webview, task, *user_data):\n \"\"\"If `run_javascript_finish` from GTK returns a result, unmarshal it, and\n call back with the result.\"\"\"\n try:\n js_result = webview.run_javascript_finish(task)\n value = js_result.get_js_value()\n if value.is_boolean():\n value = value.to_boolean()\n elif value.is_number():\n value = value.to_double()\n else:\n value = value.to_string()\n\n result.future.set_result(value)\n if on_result:\n on_result(value)\n except Exception as e:\n exc = RuntimeError(str(e))\n result.future.set_exception(exc)\n if on_result:\n on_result(None, exception=exc)\n\n # Invoke the javascript method, with a callback that will set\n # the future when a result is available.\n self.native.run_javascript(javascript, None, gtk_js_finished)\n\n # wait for the future, and return the result\n return result\n\n def rehint(self):\n self.interface.intrinsic.width = at_least(self.interface._MIN_WIDTH)\n self.interface.intrinsic.height = at_least(self.interface._MIN_HEIGHT)\n", "path": "gtk/src/toga_gtk/widgets/webview.py"}]}
| 2,021 | 334 |
gh_patches_debug_29018
|
rasdani/github-patches
|
git_diff
|
sktime__sktime-3972
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] `model_evaluation.evaluate` fails for `statsmodels` forecasters with exogenous features
**Describe the bug**
model_evaluation.evaluate creates X_test containing both train and test samples. For statsmodels models with exogenous features X_test length must equal the forecast horizon and therefore this currently throws the error:
`ValueError: Provided exogenous values are not of the appropriate shape.`
This bug occurs for UnobservedComponents and SARIMAX.
The root cause of the bug is in the `_split` function:
https://github.com/sktime/sktime/blob/a5f824a5fff1c3fc1babd3f34b94c5055ec040b0/sktime/forecasting/model_evaluation/_functions.py#L60
This could be modified to 'X.iloc[test].sort_index()' for statsmodels models with exogenous features or alternatively by filtering X here:
https://github.com/sktime/sktime/blob/a5f824a5fff1c3fc1babd3f34b94c5055ec040b0/sktime/forecasting/base/adapters/_statsmodels.py#L104
**To Reproduce**
```python
from sktime.datasets import load_longley
from sktime.forecasting.structural import UnobservedComponents
from sktime.forecasting.model_evaluation import evaluate
from sktime.forecasting.model_selection import SlidingWindowSplitter
y, X = load_longley()
forecaster = UnobservedComponents(level='local linear trend')
cv = SlidingWindowSplitter(fh=[1,2,3],
window_length = 4,
step_length = 1)
results = evaluate(
forecaster=forecaster, y=y, X = X,
cv=cv, strategy='refit', return_data=True,
error_score = 'raise'
)
```
**Versions**
System:
python: 3.8.12 (default, Nov 20 2022, 10:50:06) [Clang 14.0.0 (clang-1400.0.29.202)]
machine: macOS-13.0.1-arm64-arm-64bit
Python dependencies:
pip: 21.2
setuptools: 63.1.0
sklearn: 1.1.3
sktime: 0.14.0
statsmodels: 0.13.5
numpy: 1.22.0
scipy: 1.9.3
pandas: 1.4.1
matplotlib: 3.6.2
joblib: 1.2.0
numba: 0.56.4
pmdarima: None
tsfresh: None
</issue>
<code>
[start of sktime/forecasting/base/adapters/_statsmodels.py]
1 # -*- coding: utf-8 -*-
2 # !/usr/bin/env python3 -u
3 # copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
4 """Implements adapter for statsmodels forecasters to be used in sktime framework."""
5
6 __author__ = ["mloning"]
7 __all__ = ["_StatsModelsAdapter"]
8
9 import inspect
10 from warnings import warn
11
12 import numpy as np
13 import pandas as pd
14
15 from sktime.forecasting.base import BaseForecaster
16
17
18 class _StatsModelsAdapter(BaseForecaster):
19 """Base class for interfacing statsmodels forecasting algorithms."""
20
21 _fitted_param_names = ()
22 _tags = {
23 "ignores-exogeneous-X": True,
24 "requires-fh-in-fit": False,
25 "handles-missing-data": False,
26 "python_dependencies": "statsmodels",
27 }
28
29 def __init__(self, random_state=None):
30 self._forecaster = None
31 self.random_state = random_state
32 self._fitted_forecaster = None
33 super(_StatsModelsAdapter, self).__init__()
34
35 def _fit(self, y, X=None, fh=None):
36 """Fit to training data.
37
38 Parameters
39 ----------
40 y : pd.Series
41 Target time series to which to fit the forecaster.
42 fh : int, list or np.array, optional (default=None)
43 The forecasters horizon with the steps ahead to to predict.
44 X : pd.DataFrame, optional (default=None)
45 Exogenous variables are ignored
46
47 Returns
48 -------
49 self : returns an instance of self.
50 """
51 # statsmodels does not support the pd.Int64Index as required,
52 # so we coerce them here to pd.RangeIndex
53 if isinstance(y, pd.Series) and y.index.is_integer():
54 y, X = _coerce_int_to_range_index(y, X)
55 self._fit_forecaster(y, X)
56 return self
57
58 def _fit_forecaster(self, y_train, X_train=None):
59 """Log used internally in fit."""
60 raise NotImplementedError("abstract method")
61
62 def _update(self, y, X=None, update_params=True):
63 """Update used internally in update."""
64 if update_params or self.is_composite():
65 super()._update(y, X, update_params=update_params)
66 else:
67 if not hasattr(self._fitted_forecaster, "append"):
68 warn(
69 f"NotImplementedWarning: {self.__class__.__name__} "
70 f"can not accept new data when update_params=False. "
71 f"Call with update_params=True to refit with new data."
72 )
73 else:
74 # only append unseen data to fitted forecaster
75 index_diff = y.index.difference(
76 self._fitted_forecaster.fittedvalues.index
77 )
78 if index_diff.isin(y.index).all():
79 y = y.loc[index_diff]
80 self._fitted_forecaster = self._fitted_forecaster.append(y)
81
82 def _predict(self, fh, X=None):
83 """Make forecasts.
84
85 Parameters
86 ----------
87 fh : ForecastingHorizon
88 The forecasters horizon with the steps ahead to to predict.
89 Default is one-step ahead forecast,
90 i.e. np.array([1])
91 X : pd.DataFrame, optional (default=None)
92 Exogenous variables are ignored.
93
94 Returns
95 -------
96 y_pred : pd.Series
97 Returns series of predicted values.
98 """
99 # statsmodels requires zero-based indexing starting at the
100 # beginning of the training series when passing integers
101 start, end = fh.to_absolute_int(self._y.index[0], self.cutoff)[[0, -1]]
102
103 if "exog" in inspect.signature(self._forecaster.__init__).parameters.keys():
104 y_pred = self._fitted_forecaster.predict(start=start, end=end, exog=X)
105 else:
106 y_pred = self._fitted_forecaster.predict(start=start, end=end)
107
108 # statsmodels forecasts all periods from start to end of forecasting
109 # horizon, but only return given time points in forecasting horizon
110 y_pred = y_pred.loc[fh.to_absolute(self.cutoff).to_pandas()]
111 # ensure that name is not added nor removed
112 # otherwise this may upset conversion to pd.DataFrame
113 y_pred.name = self._y.name
114 return y_pred
115
116 def _get_fitted_params(self):
117 """Get fitted parameters.
118
119 Returns
120 -------
121 fitted_params : dict
122 """
123 fitted_params = {}
124 for name in self._get_fitted_param_names():
125 if name in ["aic", "aicc", "bic", "hqic"]:
126 fitted_params[name] = getattr(self._fitted_forecaster, name, None)
127 else:
128 fitted_params[name] = self._fitted_forecaster.params.get(name)
129 return fitted_params
130
131 def _get_fitted_param_names(self):
132 """Get names of fitted parameters."""
133 return self._fitted_param_names
134
135
136 def _coerce_int_to_range_index(y, X=None):
137 new_index = pd.RangeIndex(y.index[0], y.index[-1] + 1)
138 try:
139 np.testing.assert_array_equal(y.index, new_index)
140 except AssertionError:
141 raise ValueError(
142 "Coercion of integer pd.Index to pd.RangeIndex "
143 "failed. Please provide `y_train` with a "
144 "pd.RangeIndex."
145 )
146 y.index = new_index
147 if X is not None:
148 X.index = new_index
149 return y, X
150
[end of sktime/forecasting/base/adapters/_statsmodels.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/sktime/forecasting/base/adapters/_statsmodels.py b/sktime/forecasting/base/adapters/_statsmodels.py
--- a/sktime/forecasting/base/adapters/_statsmodels.py
+++ b/sktime/forecasting/base/adapters/_statsmodels.py
@@ -99,6 +99,16 @@
# statsmodels requires zero-based indexing starting at the
# beginning of the training series when passing integers
start, end = fh.to_absolute_int(self._y.index[0], self.cutoff)[[0, -1]]
+ fh_abs = fh.to_absolute(self.cutoff).to_pandas()
+
+ # bug fix for evaluate function as test_plus_train indices are passed
+ # statsmodels exog must contain test indices only.
+ # For discussion see https://github.com/sktime/sktime/issues/3830
+ if X is not None:
+ ind_drop = self._X.index
+ X = X.loc[~X.index.isin(ind_drop)]
+ # Entire range of the forecast horizon is required
+ X = X[: fh_abs[-1]]
if "exog" in inspect.signature(self._forecaster.__init__).parameters.keys():
y_pred = self._fitted_forecaster.predict(start=start, end=end, exog=X)
@@ -107,7 +117,7 @@
# statsmodels forecasts all periods from start to end of forecasting
# horizon, but only return given time points in forecasting horizon
- y_pred = y_pred.loc[fh.to_absolute(self.cutoff).to_pandas()]
+ y_pred = y_pred.loc[fh_abs]
# ensure that name is not added nor removed
# otherwise this may upset conversion to pd.DataFrame
y_pred.name = self._y.name
|
{"golden_diff": "diff --git a/sktime/forecasting/base/adapters/_statsmodels.py b/sktime/forecasting/base/adapters/_statsmodels.py\n--- a/sktime/forecasting/base/adapters/_statsmodels.py\n+++ b/sktime/forecasting/base/adapters/_statsmodels.py\n@@ -99,6 +99,16 @@\n # statsmodels requires zero-based indexing starting at the\n # beginning of the training series when passing integers\n start, end = fh.to_absolute_int(self._y.index[0], self.cutoff)[[0, -1]]\n+ fh_abs = fh.to_absolute(self.cutoff).to_pandas()\n+\n+ # bug fix for evaluate function as test_plus_train indices are passed\n+ # statsmodels exog must contain test indices only.\n+ # For discussion see https://github.com/sktime/sktime/issues/3830\n+ if X is not None:\n+ ind_drop = self._X.index\n+ X = X.loc[~X.index.isin(ind_drop)]\n+ # Entire range of the forecast horizon is required\n+ X = X[: fh_abs[-1]]\n \n if \"exog\" in inspect.signature(self._forecaster.__init__).parameters.keys():\n y_pred = self._fitted_forecaster.predict(start=start, end=end, exog=X)\n@@ -107,7 +117,7 @@\n \n # statsmodels forecasts all periods from start to end of forecasting\n # horizon, but only return given time points in forecasting horizon\n- y_pred = y_pred.loc[fh.to_absolute(self.cutoff).to_pandas()]\n+ y_pred = y_pred.loc[fh_abs]\n # ensure that name is not added nor removed\n # otherwise this may upset conversion to pd.DataFrame\n y_pred.name = self._y.name\n", "issue": "[BUG] `model_evaluation.evaluate` fails for `statsmodels` forecasters with exogenous features\n**Describe the bug**\r\n\r\nmodel_evaluation.evaluate creates X_test containing both train and test samples. For statsmodels models with exogenous features X_test length must equal the forecast horizon and therefore this currently throws the error:\r\n\r\n`ValueError: Provided exogenous values are not of the appropriate shape.`\r\n\r\nThis bug occurs for UnobservedComponents and SARIMAX.\r\n\r\nThe root cause of the bug is in the `_split` function:\r\nhttps://github.com/sktime/sktime/blob/a5f824a5fff1c3fc1babd3f34b94c5055ec040b0/sktime/forecasting/model_evaluation/_functions.py#L60\r\n\r\nThis could be modified to 'X.iloc[test].sort_index()' for statsmodels models with exogenous features or alternatively by filtering X here:\r\nhttps://github.com/sktime/sktime/blob/a5f824a5fff1c3fc1babd3f34b94c5055ec040b0/sktime/forecasting/base/adapters/_statsmodels.py#L104\r\n\r\n**To Reproduce**\r\n\r\n```python\r\nfrom sktime.datasets import load_longley\r\nfrom sktime.forecasting.structural import UnobservedComponents\r\nfrom sktime.forecasting.model_evaluation import evaluate\r\nfrom sktime.forecasting.model_selection import SlidingWindowSplitter\r\n\r\ny, X = load_longley()\r\n\r\nforecaster = UnobservedComponents(level='local linear trend')\r\n\r\ncv = SlidingWindowSplitter(fh=[1,2,3],\r\n window_length = 4,\r\n step_length = 1)\r\n\r\nresults = evaluate(\r\n forecaster=forecaster, y=y, X = X,\r\n cv=cv, strategy='refit', return_data=True,\r\n error_score = 'raise'\r\n )\r\n```\r\n\r\n**Versions**\r\n\r\nSystem:\r\npython: 3.8.12 (default, Nov 20 2022, 10:50:06) [Clang 14.0.0 (clang-1400.0.29.202)]\r\nmachine: macOS-13.0.1-arm64-arm-64bit\r\n\r\nPython dependencies:\r\n pip: 21.2\r\n setuptools: 63.1.0\r\n sklearn: 1.1.3\r\n sktime: 0.14.0\r\n statsmodels: 0.13.5\r\n numpy: 1.22.0\r\n scipy: 1.9.3\r\n pandas: 1.4.1\r\n matplotlib: 3.6.2\r\n joblib: 1.2.0\r\n numba: 0.56.4\r\n pmdarima: None\r\n tsfresh: None\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# !/usr/bin/env python3 -u\n# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)\n\"\"\"Implements adapter for statsmodels forecasters to be used in sktime framework.\"\"\"\n\n__author__ = [\"mloning\"]\n__all__ = [\"_StatsModelsAdapter\"]\n\nimport inspect\nfrom warnings import warn\n\nimport numpy as np\nimport pandas as pd\n\nfrom sktime.forecasting.base import BaseForecaster\n\n\nclass _StatsModelsAdapter(BaseForecaster):\n \"\"\"Base class for interfacing statsmodels forecasting algorithms.\"\"\"\n\n _fitted_param_names = ()\n _tags = {\n \"ignores-exogeneous-X\": True,\n \"requires-fh-in-fit\": False,\n \"handles-missing-data\": False,\n \"python_dependencies\": \"statsmodels\",\n }\n\n def __init__(self, random_state=None):\n self._forecaster = None\n self.random_state = random_state\n self._fitted_forecaster = None\n super(_StatsModelsAdapter, self).__init__()\n\n def _fit(self, y, X=None, fh=None):\n \"\"\"Fit to training data.\n\n Parameters\n ----------\n y : pd.Series\n Target time series to which to fit the forecaster.\n fh : int, list or np.array, optional (default=None)\n The forecasters horizon with the steps ahead to to predict.\n X : pd.DataFrame, optional (default=None)\n Exogenous variables are ignored\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n # statsmodels does not support the pd.Int64Index as required,\n # so we coerce them here to pd.RangeIndex\n if isinstance(y, pd.Series) and y.index.is_integer():\n y, X = _coerce_int_to_range_index(y, X)\n self._fit_forecaster(y, X)\n return self\n\n def _fit_forecaster(self, y_train, X_train=None):\n \"\"\"Log used internally in fit.\"\"\"\n raise NotImplementedError(\"abstract method\")\n\n def _update(self, y, X=None, update_params=True):\n \"\"\"Update used internally in update.\"\"\"\n if update_params or self.is_composite():\n super()._update(y, X, update_params=update_params)\n else:\n if not hasattr(self._fitted_forecaster, \"append\"):\n warn(\n f\"NotImplementedWarning: {self.__class__.__name__} \"\n f\"can not accept new data when update_params=False. \"\n f\"Call with update_params=True to refit with new data.\"\n )\n else:\n # only append unseen data to fitted forecaster\n index_diff = y.index.difference(\n self._fitted_forecaster.fittedvalues.index\n )\n if index_diff.isin(y.index).all():\n y = y.loc[index_diff]\n self._fitted_forecaster = self._fitted_forecaster.append(y)\n\n def _predict(self, fh, X=None):\n \"\"\"Make forecasts.\n\n Parameters\n ----------\n fh : ForecastingHorizon\n The forecasters horizon with the steps ahead to to predict.\n Default is one-step ahead forecast,\n i.e. np.array([1])\n X : pd.DataFrame, optional (default=None)\n Exogenous variables are ignored.\n\n Returns\n -------\n y_pred : pd.Series\n Returns series of predicted values.\n \"\"\"\n # statsmodels requires zero-based indexing starting at the\n # beginning of the training series when passing integers\n start, end = fh.to_absolute_int(self._y.index[0], self.cutoff)[[0, -1]]\n\n if \"exog\" in inspect.signature(self._forecaster.__init__).parameters.keys():\n y_pred = self._fitted_forecaster.predict(start=start, end=end, exog=X)\n else:\n y_pred = self._fitted_forecaster.predict(start=start, end=end)\n\n # statsmodels forecasts all periods from start to end of forecasting\n # horizon, but only return given time points in forecasting horizon\n y_pred = y_pred.loc[fh.to_absolute(self.cutoff).to_pandas()]\n # ensure that name is not added nor removed\n # otherwise this may upset conversion to pd.DataFrame\n y_pred.name = self._y.name\n return y_pred\n\n def _get_fitted_params(self):\n \"\"\"Get fitted parameters.\n\n Returns\n -------\n fitted_params : dict\n \"\"\"\n fitted_params = {}\n for name in self._get_fitted_param_names():\n if name in [\"aic\", \"aicc\", \"bic\", \"hqic\"]:\n fitted_params[name] = getattr(self._fitted_forecaster, name, None)\n else:\n fitted_params[name] = self._fitted_forecaster.params.get(name)\n return fitted_params\n\n def _get_fitted_param_names(self):\n \"\"\"Get names of fitted parameters.\"\"\"\n return self._fitted_param_names\n\n\ndef _coerce_int_to_range_index(y, X=None):\n new_index = pd.RangeIndex(y.index[0], y.index[-1] + 1)\n try:\n np.testing.assert_array_equal(y.index, new_index)\n except AssertionError:\n raise ValueError(\n \"Coercion of integer pd.Index to pd.RangeIndex \"\n \"failed. Please provide `y_train` with a \"\n \"pd.RangeIndex.\"\n )\n y.index = new_index\n if X is not None:\n X.index = new_index\n return y, X\n", "path": "sktime/forecasting/base/adapters/_statsmodels.py"}]}
| 2,700 | 399 |
gh_patches_debug_4483
|
rasdani/github-patches
|
git_diff
|
kserve__kserve-2134
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
kfserving-samples bucket on GCS was gone
/kind bug
**What steps did you take and what happened:**
The existing codebase is running some test code with a prebuild model file located in GCS, especially `gs://kfserving-samples/models/tensorflow/flowers` but now seems the bucket is gone already.
Do we have other alternative buckets? or this bucket should be live forever?
```
gsutil cp -r gs://kfserving-samples/models/tensorflow/flowers flowers
BucketNotFoundException: 404 gs://kfserving-samples bucket does not exist.
```
**What did you expect to happen:**
the model file should be downloaded.
**Anything else you would like to add:**
No
**Environment:**
not related to environment.
</issue>
<code>
[start of docs/samples/pipelines/sample-tf-pipeline.py]
1 #
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at
5 #
6 # http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 import kfp.compiler as compiler
15 import kfp.dsl as dsl
16 from kfp import components
17
18 # kfserving_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/'
19 # 'master/components/kubeflow/kfserving/component.yaml')
20 kserve_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/'
21 'master/components/kserve/component.yaml')
22
23
24 @dsl.pipeline(
25 name='KServe pipeline',
26 description='A pipeline for KServe.'
27 )
28 def kservePipeline(
29 action='apply',
30 model_name='tensorflow-sample',
31 model_uri='gs://kfserving-samples/models/tensorflow/flowers',
32 namespace='anonymous',
33 framework='tensorflow'):
34 kserve_op(action=action,
35 model_name=model_name,
36 model_uri=model_uri,
37 namespace=namespace,
38 framework=framework)
39
40
41 if __name__ == '__main__':
42 compiler.Compiler().compile(kservePipeline, __file__ + '.tar.gz')
43
[end of docs/samples/pipelines/sample-tf-pipeline.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/docs/samples/pipelines/sample-tf-pipeline.py b/docs/samples/pipelines/sample-tf-pipeline.py
--- a/docs/samples/pipelines/sample-tf-pipeline.py
+++ b/docs/samples/pipelines/sample-tf-pipeline.py
@@ -28,7 +28,7 @@
def kservePipeline(
action='apply',
model_name='tensorflow-sample',
- model_uri='gs://kfserving-samples/models/tensorflow/flowers',
+ model_uri='gs://kfserving-examples/models/tensorflow/flowers',
namespace='anonymous',
framework='tensorflow'):
kserve_op(action=action,
|
{"golden_diff": "diff --git a/docs/samples/pipelines/sample-tf-pipeline.py b/docs/samples/pipelines/sample-tf-pipeline.py\n--- a/docs/samples/pipelines/sample-tf-pipeline.py\n+++ b/docs/samples/pipelines/sample-tf-pipeline.py\n@@ -28,7 +28,7 @@\n def kservePipeline(\n action='apply',\n model_name='tensorflow-sample',\n- model_uri='gs://kfserving-samples/models/tensorflow/flowers',\n+ model_uri='gs://kfserving-examples/models/tensorflow/flowers',\n namespace='anonymous',\n framework='tensorflow'):\n kserve_op(action=action,\n", "issue": "kfserving-samples bucket on GCS was gone\n/kind bug\r\n\r\n**What steps did you take and what happened:**\r\nThe existing codebase is running some test code with a prebuild model file located in GCS, especially `gs://kfserving-samples/models/tensorflow/flowers` but now seems the bucket is gone already. \r\nDo we have other alternative buckets? or this bucket should be live forever?\r\n\r\n```\r\ngsutil cp -r gs://kfserving-samples/models/tensorflow/flowers flowers\r\n\r\nBucketNotFoundException: 404 gs://kfserving-samples bucket does not exist.\r\n```\r\n\r\n\r\n\r\n**What did you expect to happen:**\r\nthe model file should be downloaded.\r\n\r\n\r\n**Anything else you would like to add:**\r\nNo\r\n\r\n**Environment:**\r\nnot related to environment.\r\n\n", "before_files": [{"content": "#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport kfp.compiler as compiler\nimport kfp.dsl as dsl\nfrom kfp import components\n\n# kfserving_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/'\n# 'master/components/kubeflow/kfserving/component.yaml')\nkserve_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/'\n 'master/components/kserve/component.yaml')\n\n\[email protected](\n name='KServe pipeline',\n description='A pipeline for KServe.'\n)\ndef kservePipeline(\n action='apply',\n model_name='tensorflow-sample',\n model_uri='gs://kfserving-samples/models/tensorflow/flowers',\n namespace='anonymous',\n framework='tensorflow'):\n kserve_op(action=action,\n model_name=model_name,\n model_uri=model_uri,\n namespace=namespace,\n framework=framework)\n\n\nif __name__ == '__main__':\n compiler.Compiler().compile(kservePipeline, __file__ + '.tar.gz')\n", "path": "docs/samples/pipelines/sample-tf-pipeline.py"}]}
| 1,134 | 141 |
gh_patches_debug_4867
|
rasdani/github-patches
|
git_diff
|
buildbot__buildbot-5768
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Send keepalive messages on websocket to prevent cloudflare disconnect
buildbot behind cloudflare is currently reloading for me every 100 seconds. This is due to a cloudflare limitation that is resetting websockets with 100 seconds of inactivity. It seems the only ways around that is either an upgrade to their enterprise plan or adding keepalive messages to the application.
https://community.cloudflare.com/t/cloudflare-websocket-timeout/5865
I'm on buildbot 1.0.0 due to openbsd -stable, I've grepped through the source on master and it seems there's already a ping command implemented on the server, but it's not automatically used by the client yet. The client should invoke it every 60 seconds to avoid a disconnect due to timeouts.
Thanks!
</issue>
<code>
[start of master/buildbot/www/ws.py]
1 # This file is part of . Buildbot is free software: you can
2 # redistribute it and/or modify it under the terms of the GNU General Public
3 # License as published by the Free Software Foundation, version 2.
4 #
5 # This program is distributed in the hope that it will be useful, but WITHOUT
6 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
7 # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
8 # details.
9 #
10 # You should have received a copy of the GNU General Public License along with
11 # this program; if not, write to the Free Software Foundation, Inc., 51
12 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
13 #
14 # Copyright Team Members
15
16 import json
17
18 from autobahn.twisted.resource import WebSocketResource
19 from autobahn.twisted.websocket import WebSocketServerFactory
20 from autobahn.twisted.websocket import WebSocketServerProtocol
21 from twisted.internet import defer
22 from twisted.python import log
23
24 from buildbot.util import bytes2unicode
25 from buildbot.util import toJson
26 from buildbot.util import unicode2bytes
27
28
29 class WsProtocol(WebSocketServerProtocol):
30
31 def __init__(self, master):
32 super().__init__()
33 self.master = master
34 self.qrefs = {}
35 self.debug = self.master.config.www.get('debug', False)
36
37 def sendJsonMessage(self, **msg):
38 return self.sendMessage(unicode2bytes(json.dumps(msg, default=toJson,
39 separators=(',', ':'))))
40
41 def onMessage(self, frame, isBinary):
42 if self.debug:
43 log.msg("FRAME {}".format(frame))
44 # parse the incoming request
45
46 frame = json.loads(bytes2unicode(frame))
47 _id = frame.get("_id")
48 if _id is None:
49 return self.sendJsonMessage(error="no '_id' in websocket frame", code=400, _id=None)
50 cmd = frame.pop("cmd", None)
51 if cmd is None:
52 return self.sendJsonMessage(error="no 'cmd' in websocket frame", code=400, _id=None)
53 cmdmeth = "cmd_" + cmd
54 meth = getattr(self, cmdmeth, None)
55 if meth is None:
56 return self.sendJsonMessage(error="no such command '{}'".format(cmd), code=404, _id=_id)
57 try:
58 return meth(**frame)
59 except TypeError as e:
60 return self.sendJsonMessage(error="Invalid method argument '{}'".format(str(e)),
61 code=400, _id=_id)
62 except Exception as e:
63 log.err("while calling command {}".format(cmd))
64 return self.sendJsonMessage(error="Internal Error '{}'".format(str(e)), code=500,
65 _id=_id)
66
67 def ack(self, _id):
68 return self.sendJsonMessage(msg="OK", code=200, _id=_id)
69
70 def parsePath(self, path):
71 path = path.split("/")
72 return tuple([str(p) if p != "*" else None for p in path])
73
74 def isPath(self, path):
75 if not isinstance(path, str):
76 return False
77 return True
78
79 @defer.inlineCallbacks
80 def cmd_startConsuming(self, path, _id):
81 if not self.isPath(path):
82 yield self.sendJsonMessage(error="invalid path format '{}'".format(str(path)), code=400,
83 _id=_id)
84 return
85
86 # if it's already subscribed, don't leak a subscription
87 if self.qrefs is not None and path in self.qrefs:
88 yield self.ack(_id=_id)
89 return
90
91 def callback(key, message):
92 # protocol is deliberately concise in size
93 return self.sendJsonMessage(k="/".join(key), m=message)
94
95 qref = yield self.master.mq.startConsuming(callback, self.parsePath(path))
96
97 # race conditions handling
98 if self.qrefs is None or path in self.qrefs:
99 qref.stopConsuming()
100
101 # only store and ack if we were not disconnected in between
102 if self.qrefs is not None:
103 self.qrefs[path] = qref
104 self.ack(_id=_id)
105
106 @defer.inlineCallbacks
107 def cmd_stopConsuming(self, path, _id):
108 if not self.isPath(path):
109 yield self.sendJsonMessage(error="invalid path format '{}'".format(str(path)), code=400,
110 _id=_id)
111 return
112
113 # only succeed if path has been started
114 if path in self.qrefs:
115 qref = self.qrefs.pop(path)
116 yield qref.stopConsuming()
117 yield self.ack(_id=_id)
118 return
119 yield self.sendJsonMessage(error="path was not consumed '{}'".format(str(path)), code=400,
120 _id=_id)
121
122 def cmd_ping(self, _id):
123 self.sendJsonMessage(msg="pong", code=200, _id=_id)
124
125 def connectionLost(self, reason):
126 if self.debug:
127 log.msg("connection lost", system=self)
128 for qref in self.qrefs.values():
129 qref.stopConsuming()
130 self.qrefs = None # to be sure we don't add any more
131
132
133 class WsProtocolFactory(WebSocketServerFactory):
134
135 def __init__(self, master):
136 super().__init__()
137 self.master = master
138 self.setProtocolOptions(webStatus=False)
139
140 def buildProtocol(self, addr):
141 p = WsProtocol(self.master)
142 p.factory = self
143 return p
144
145
146 class WsResource(WebSocketResource):
147
148 def __init__(self, master):
149 super().__init__(WsProtocolFactory(master))
150
[end of master/buildbot/www/ws.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/master/buildbot/www/ws.py b/master/buildbot/www/ws.py
--- a/master/buildbot/www/ws.py
+++ b/master/buildbot/www/ws.py
@@ -135,7 +135,8 @@
def __init__(self, master):
super().__init__()
self.master = master
- self.setProtocolOptions(webStatus=False)
+ pingInterval = self.master.config.www.get('ws_ping_interval', 0)
+ self.setProtocolOptions(webStatus=False, autoPingInterval=pingInterval)
def buildProtocol(self, addr):
p = WsProtocol(self.master)
|
{"golden_diff": "diff --git a/master/buildbot/www/ws.py b/master/buildbot/www/ws.py\n--- a/master/buildbot/www/ws.py\n+++ b/master/buildbot/www/ws.py\n@@ -135,7 +135,8 @@\n def __init__(self, master):\n super().__init__()\n self.master = master\n- self.setProtocolOptions(webStatus=False)\n+ pingInterval = self.master.config.www.get('ws_ping_interval', 0)\n+ self.setProtocolOptions(webStatus=False, autoPingInterval=pingInterval)\n \n def buildProtocol(self, addr):\n p = WsProtocol(self.master)\n", "issue": "Send keepalive messages on websocket to prevent cloudflare disconnect\nbuildbot behind cloudflare is currently reloading for me every 100 seconds. This is due to a cloudflare limitation that is resetting websockets with 100 seconds of inactivity. It seems the only ways around that is either an upgrade to their enterprise plan or adding keepalive messages to the application.\r\n\r\nhttps://community.cloudflare.com/t/cloudflare-websocket-timeout/5865\r\n\r\nI'm on buildbot 1.0.0 due to openbsd -stable, I've grepped through the source on master and it seems there's already a ping command implemented on the server, but it's not automatically used by the client yet. The client should invoke it every 60 seconds to avoid a disconnect due to timeouts.\r\n\r\nThanks!\n", "before_files": [{"content": "# This file is part of . Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Team Members\n\nimport json\n\nfrom autobahn.twisted.resource import WebSocketResource\nfrom autobahn.twisted.websocket import WebSocketServerFactory\nfrom autobahn.twisted.websocket import WebSocketServerProtocol\nfrom twisted.internet import defer\nfrom twisted.python import log\n\nfrom buildbot.util import bytes2unicode\nfrom buildbot.util import toJson\nfrom buildbot.util import unicode2bytes\n\n\nclass WsProtocol(WebSocketServerProtocol):\n\n def __init__(self, master):\n super().__init__()\n self.master = master\n self.qrefs = {}\n self.debug = self.master.config.www.get('debug', False)\n\n def sendJsonMessage(self, **msg):\n return self.sendMessage(unicode2bytes(json.dumps(msg, default=toJson,\n separators=(',', ':'))))\n\n def onMessage(self, frame, isBinary):\n if self.debug:\n log.msg(\"FRAME {}\".format(frame))\n # parse the incoming request\n\n frame = json.loads(bytes2unicode(frame))\n _id = frame.get(\"_id\")\n if _id is None:\n return self.sendJsonMessage(error=\"no '_id' in websocket frame\", code=400, _id=None)\n cmd = frame.pop(\"cmd\", None)\n if cmd is None:\n return self.sendJsonMessage(error=\"no 'cmd' in websocket frame\", code=400, _id=None)\n cmdmeth = \"cmd_\" + cmd\n meth = getattr(self, cmdmeth, None)\n if meth is None:\n return self.sendJsonMessage(error=\"no such command '{}'\".format(cmd), code=404, _id=_id)\n try:\n return meth(**frame)\n except TypeError as e:\n return self.sendJsonMessage(error=\"Invalid method argument '{}'\".format(str(e)),\n code=400, _id=_id)\n except Exception as e:\n log.err(\"while calling command {}\".format(cmd))\n return self.sendJsonMessage(error=\"Internal Error '{}'\".format(str(e)), code=500,\n _id=_id)\n\n def ack(self, _id):\n return self.sendJsonMessage(msg=\"OK\", code=200, _id=_id)\n\n def parsePath(self, path):\n path = path.split(\"/\")\n return tuple([str(p) if p != \"*\" else None for p in path])\n\n def isPath(self, path):\n if not isinstance(path, str):\n return False\n return True\n\n @defer.inlineCallbacks\n def cmd_startConsuming(self, path, _id):\n if not self.isPath(path):\n yield self.sendJsonMessage(error=\"invalid path format '{}'\".format(str(path)), code=400,\n _id=_id)\n return\n\n # if it's already subscribed, don't leak a subscription\n if self.qrefs is not None and path in self.qrefs:\n yield self.ack(_id=_id)\n return\n\n def callback(key, message):\n # protocol is deliberately concise in size\n return self.sendJsonMessage(k=\"/\".join(key), m=message)\n\n qref = yield self.master.mq.startConsuming(callback, self.parsePath(path))\n\n # race conditions handling\n if self.qrefs is None or path in self.qrefs:\n qref.stopConsuming()\n\n # only store and ack if we were not disconnected in between\n if self.qrefs is not None:\n self.qrefs[path] = qref\n self.ack(_id=_id)\n\n @defer.inlineCallbacks\n def cmd_stopConsuming(self, path, _id):\n if not self.isPath(path):\n yield self.sendJsonMessage(error=\"invalid path format '{}'\".format(str(path)), code=400,\n _id=_id)\n return\n\n # only succeed if path has been started\n if path in self.qrefs:\n qref = self.qrefs.pop(path)\n yield qref.stopConsuming()\n yield self.ack(_id=_id)\n return\n yield self.sendJsonMessage(error=\"path was not consumed '{}'\".format(str(path)), code=400,\n _id=_id)\n\n def cmd_ping(self, _id):\n self.sendJsonMessage(msg=\"pong\", code=200, _id=_id)\n\n def connectionLost(self, reason):\n if self.debug:\n log.msg(\"connection lost\", system=self)\n for qref in self.qrefs.values():\n qref.stopConsuming()\n self.qrefs = None # to be sure we don't add any more\n\n\nclass WsProtocolFactory(WebSocketServerFactory):\n\n def __init__(self, master):\n super().__init__()\n self.master = master\n self.setProtocolOptions(webStatus=False)\n\n def buildProtocol(self, addr):\n p = WsProtocol(self.master)\n p.factory = self\n return p\n\n\nclass WsResource(WebSocketResource):\n\n def __init__(self, master):\n super().__init__(WsProtocolFactory(master))\n", "path": "master/buildbot/www/ws.py"}]}
| 2,285 | 133 |
gh_patches_debug_8451
|
rasdani/github-patches
|
git_diff
|
web2py__web2py-1732
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
XML sanitize=True throws error on check for acceptable url in py2
Below code in view throws error
```
{{=XML('<img src="/myapp/static/images/pict.jpg" />', sanitize=True)}}
```
Error:
```
Traceback (most recent call last):
File "\web2py\gluon\restricted.py", line 219, in restricted
exec(ccode, environment)
File "<string>", line 73, in <module>
File "\web2py\gluon\html.py", line 603, in __init__
text = sanitizer.sanitize(text, permitted_tags, allowed_attributes)
File "\web2py\gluon\sanitizer.py", line 219, in sanitize
allowed_attributes=allowed_attributes).strip(text, escape)
File "\web2py\gluon\sanitizer.py", line 170, in strip
self.feed(rawstring)
File "HTMLParser.py", line 117, in feed
File "HTMLParser.py", line 161, in goahead
File "HTMLParser.py", line 325, in parse_starttag
File "HTMLParser.py", line 407, in handle_startendtag
File "\web2py\gluon\sanitizer.py", line 115, in handle_starttag
if self.url_is_acceptable(attrs[attribute]):
File "\web2py\gluon\sanitizer.py", line 148, in url_is_acceptable
parsed = urlparse(url)
TypeError: 'module' object is not callable
```
Environment info:
- platform: win32
- python: 2.7.9 (major=2, minor=7, micro=9, releaselevel='final', serial=0)
- web2py: 2.15.2-stable+timestamp.2017.07.19.12.18.41
Steps to reproduce:
1. Open web2py from console
2. Try execute: `XML('<img src="/myapp/static/images/pict.jpg" />', sanitize=True)`
Possible fix:
In gluon._compat.py change line 14 from:
`import urlparse`
to
`from urlparse import urlparse`
</issue>
<code>
[start of gluon/sanitizer.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 """
4 | From http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/496942
5 | Submitter: Josh Goldfoot (other recipes)
6 | Last Updated: 2006/08/05
7 | Version: 1.0
8
9 Cross-site scripting (XSS) defense
10 -----------------------------------
11 """
12
13 from gluon._compat import HTMLParser, urlparse, entitydefs, basestring
14 from gluon.utils import local_html_escape
15 from formatter import AbstractFormatter
16 from xml.sax.saxutils import quoteattr
17
18 __all__ = ['sanitize']
19
20
21 def xssescape(text):
22 """Gets rid of < and > and & and, for good measure, :"""
23
24 return local_html_escape(text, quote=True).replace(':', ':')
25
26
27 class XssCleaner(HTMLParser):
28
29 def __init__(
30 self,
31 permitted_tags=[
32 'a',
33 'b',
34 'blockquote',
35 'br/',
36 'i',
37 'li',
38 'ol',
39 'ul',
40 'p',
41 'cite',
42 'code',
43 'pre',
44 'img/',
45 ],
46 allowed_attributes={'a': ['href', 'title'], 'img': ['src', 'alt'
47 ], 'blockquote': ['type']},
48 strip_disallowed=False
49 ):
50
51 HTMLParser.__init__(self)
52 self.result = ''
53 self.open_tags = []
54 self.permitted_tags = [i for i in permitted_tags if i[-1] != '/']
55 self.requires_no_close = [i[:-1] for i in permitted_tags
56 if i[-1] == '/']
57 self.permitted_tags += self.requires_no_close
58 self.allowed_attributes = allowed_attributes
59
60 # The only schemes allowed in URLs (for href and src attributes).
61 # Adding "javascript" or "vbscript" to this list would not be smart.
62
63 self.allowed_schemes = ['http', 'https', 'ftp', 'mailto']
64
65 #to strip or escape disallowed tags?
66 self.strip_disallowed = strip_disallowed
67 # there might be data after final closing tag, that is to be ignored
68 self.in_disallowed = [False]
69
70 def handle_data(self, data):
71 if data and not self.in_disallowed[-1]:
72 self.result += xssescape(data)
73
74 def handle_charref(self, ref):
75 if self.in_disallowed[-1]:
76 return
77 elif len(ref) < 7 and (ref.isdigit() or ref == 'x27'): # x27 is a special case for apostrophe
78 self.result += '&#%s;' % ref
79 else:
80 self.result += xssescape('&#%s' % ref)
81
82 def handle_entityref(self, ref):
83 if self.in_disallowed[-1]:
84 return
85 elif ref in entitydefs:
86 self.result += '&%s;' % ref
87 else:
88 self.result += xssescape('&%s' % ref)
89
90 def handle_comment(self, comment):
91 if self.in_disallowed[-1]:
92 return
93 elif comment:
94 self.result += xssescape('<!--%s-->' % comment)
95
96 def handle_starttag(
97 self,
98 tag,
99 attrs
100 ):
101 if tag not in self.permitted_tags:
102 self.in_disallowed.append(True)
103 if (not self.strip_disallowed):
104 self.result += xssescape('<%s>' % tag)
105 else:
106 self.in_disallowed.append(False)
107 bt = '<' + tag
108 if tag in self.allowed_attributes:
109 attrs = dict(attrs)
110 self.allowed_attributes_here = [x for x in
111 self.allowed_attributes[tag] if x in attrs
112 and len(attrs[x]) > 0]
113 for attribute in self.allowed_attributes_here:
114 if attribute in ['href', 'src', 'background']:
115 if self.url_is_acceptable(attrs[attribute]):
116 bt += ' %s="%s"' % (attribute,
117 attrs[attribute])
118 else:
119 bt += ' %s=%s' % (xssescape(attribute),
120 quoteattr(attrs[attribute]))
121 # deal with <a> without href and <img> without src
122 if bt == '<a' or bt == '<img':
123 return
124 if tag in self.requires_no_close:
125 bt += ' /'
126 bt += '>'
127 self.result += bt
128 if tag not in self.requires_no_close: self.open_tags.insert(0, tag)
129
130 def handle_endtag(self, tag):
131 bracketed = '</%s>' % tag
132 self.in_disallowed and self.in_disallowed.pop()
133 if tag not in self.permitted_tags:
134 if (not self.strip_disallowed):
135 self.result += xssescape(bracketed)
136 elif tag in self.open_tags:
137 self.result += bracketed
138 self.open_tags.remove(tag)
139
140 def url_is_acceptable(self, url):
141 """
142 Accepts relative, absolute, and mailto urls
143 """
144
145 if url.startswith('#'):
146 return True
147 else:
148 parsed = urlparse(url)
149 return ((parsed[0] in self.allowed_schemes and '.' in parsed[1]) or
150 (parsed[0] in self.allowed_schemes and '@' in parsed[2]) or
151 (parsed[0] == '' and parsed[2].startswith('/')))
152
153 def strip(self, rawstring, escape=True):
154 """
155 Returns the argument stripped of potentially harmful
156 HTML or Javascript code
157
158 @type escape: boolean
159 @param escape: If True (default) it escapes the potentially harmful
160 content, otherwise remove it
161 """
162
163 if not isinstance(rawstring, str):
164 return str(rawstring)
165 for tag in self.requires_no_close:
166 rawstring = rawstring.replace("<%s/>" % tag, "<%s />" % tag)
167 if not escape:
168 self.strip_disallowed = True
169 self.result = ''
170 self.feed(rawstring)
171 for endtag in self.open_tags:
172 if endtag not in self.requires_no_close:
173 self.result += '</%s>' % endtag
174 return self.result
175
176 def xtags(self):
177 """
178 Returns a printable string informing the user which tags are allowed
179 """
180
181 tg = ''
182 for x in sorted(self.permitted_tags):
183 tg += '<' + x
184 if x in self.allowed_attributes:
185 for y in self.allowed_attributes[x]:
186 tg += ' %s=""' % y
187 tg += '> '
188 return xssescape(tg.strip())
189
190
191 def sanitize(text, permitted_tags=[
192 'a',
193 'b',
194 'blockquote',
195 'br/',
196 'i',
197 'li',
198 'ol',
199 'ul',
200 'p',
201 'cite',
202 'code',
203 'pre',
204 'img/',
205 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
206 'table', 'tbody', 'thead', 'tfoot', 'tr', 'td', 'div',
207 'strong', 'span',
208 ],
209 allowed_attributes={
210 'a': ['href', 'title'],
211 'img': ['src', 'alt'],
212 'blockquote': ['type'],
213 'td': ['colspan'],
214 },
215 escape=True):
216 if not isinstance(text, basestring):
217 return str(text)
218 return XssCleaner(permitted_tags=permitted_tags,
219 allowed_attributes=allowed_attributes).strip(text, escape)
220
[end of gluon/sanitizer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/gluon/sanitizer.py b/gluon/sanitizer.py
--- a/gluon/sanitizer.py
+++ b/gluon/sanitizer.py
@@ -145,7 +145,7 @@
if url.startswith('#'):
return True
else:
- parsed = urlparse(url)
+ parsed = urlparse.urlparse(url)
return ((parsed[0] in self.allowed_schemes and '.' in parsed[1]) or
(parsed[0] in self.allowed_schemes and '@' in parsed[2]) or
(parsed[0] == '' and parsed[2].startswith('/')))
|
{"golden_diff": "diff --git a/gluon/sanitizer.py b/gluon/sanitizer.py\n--- a/gluon/sanitizer.py\n+++ b/gluon/sanitizer.py\n@@ -145,7 +145,7 @@\n if url.startswith('#'):\n return True\n else:\n- parsed = urlparse(url)\n+ parsed = urlparse.urlparse(url)\n return ((parsed[0] in self.allowed_schemes and '.' in parsed[1]) or\n (parsed[0] in self.allowed_schemes and '@' in parsed[2]) or\n (parsed[0] == '' and parsed[2].startswith('/')))\n", "issue": "XML sanitize=True throws error on check for acceptable url in py2\nBelow code in view throws error\r\n```\r\n{{=XML('<img src=\"/myapp/static/images/pict.jpg\" />', sanitize=True)}}\r\n```\r\n\r\nError:\r\n```\r\nTraceback (most recent call last):\r\n File \"\\web2py\\gluon\\restricted.py\", line 219, in restricted\r\n exec(ccode, environment)\r\n File \"<string>\", line 73, in <module>\r\n File \"\\web2py\\gluon\\html.py\", line 603, in __init__\r\n text = sanitizer.sanitize(text, permitted_tags, allowed_attributes)\r\n File \"\\web2py\\gluon\\sanitizer.py\", line 219, in sanitize\r\n allowed_attributes=allowed_attributes).strip(text, escape)\r\n File \"\\web2py\\gluon\\sanitizer.py\", line 170, in strip\r\n self.feed(rawstring)\r\n File \"HTMLParser.py\", line 117, in feed\r\n File \"HTMLParser.py\", line 161, in goahead\r\n File \"HTMLParser.py\", line 325, in parse_starttag\r\n File \"HTMLParser.py\", line 407, in handle_startendtag\r\n File \"\\web2py\\gluon\\sanitizer.py\", line 115, in handle_starttag\r\n if self.url_is_acceptable(attrs[attribute]):\r\n File \"\\web2py\\gluon\\sanitizer.py\", line 148, in url_is_acceptable\r\n parsed = urlparse(url)\r\nTypeError: 'module' object is not callable\r\n```\r\n\r\nEnvironment info:\r\n- platform: win32\r\n- python: 2.7.9 (major=2, minor=7, micro=9, releaselevel='final', serial=0)\r\n- web2py: 2.15.2-stable+timestamp.2017.07.19.12.18.41\r\n\r\nSteps to reproduce:\r\n1. Open web2py from console\r\n2. Try execute: `XML('<img src=\"/myapp/static/images/pict.jpg\" />', sanitize=True)`\r\n\r\nPossible fix:\r\nIn gluon._compat.py change line 14 from:\r\n`import urlparse`\r\nto \r\n`from urlparse import urlparse`\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n| From http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/496942\n| Submitter: Josh Goldfoot (other recipes)\n| Last Updated: 2006/08/05\n| Version: 1.0\n\nCross-site scripting (XSS) defense\n-----------------------------------\n\"\"\"\n\nfrom gluon._compat import HTMLParser, urlparse, entitydefs, basestring\nfrom gluon.utils import local_html_escape\nfrom formatter import AbstractFormatter\nfrom xml.sax.saxutils import quoteattr\n\n__all__ = ['sanitize']\n\n\ndef xssescape(text):\n \"\"\"Gets rid of < and > and & and, for good measure, :\"\"\"\n\n return local_html_escape(text, quote=True).replace(':', ':')\n\n\nclass XssCleaner(HTMLParser):\n\n def __init__(\n self,\n permitted_tags=[\n 'a',\n 'b',\n 'blockquote',\n 'br/',\n 'i',\n 'li',\n 'ol',\n 'ul',\n 'p',\n 'cite',\n 'code',\n 'pre',\n 'img/',\n ],\n allowed_attributes={'a': ['href', 'title'], 'img': ['src', 'alt'\n ], 'blockquote': ['type']},\n strip_disallowed=False\n ):\n\n HTMLParser.__init__(self)\n self.result = ''\n self.open_tags = []\n self.permitted_tags = [i for i in permitted_tags if i[-1] != '/']\n self.requires_no_close = [i[:-1] for i in permitted_tags\n if i[-1] == '/']\n self.permitted_tags += self.requires_no_close\n self.allowed_attributes = allowed_attributes\n\n # The only schemes allowed in URLs (for href and src attributes).\n # Adding \"javascript\" or \"vbscript\" to this list would not be smart.\n\n self.allowed_schemes = ['http', 'https', 'ftp', 'mailto']\n\n #to strip or escape disallowed tags?\n self.strip_disallowed = strip_disallowed\n # there might be data after final closing tag, that is to be ignored\n self.in_disallowed = [False]\n\n def handle_data(self, data):\n if data and not self.in_disallowed[-1]:\n self.result += xssescape(data)\n\n def handle_charref(self, ref):\n if self.in_disallowed[-1]:\n return\n elif len(ref) < 7 and (ref.isdigit() or ref == 'x27'): # x27 is a special case for apostrophe\n self.result += '&#%s;' % ref\n else:\n self.result += xssescape('&#%s' % ref)\n\n def handle_entityref(self, ref):\n if self.in_disallowed[-1]:\n return\n elif ref in entitydefs:\n self.result += '&%s;' % ref\n else:\n self.result += xssescape('&%s' % ref)\n\n def handle_comment(self, comment):\n if self.in_disallowed[-1]:\n return\n elif comment:\n self.result += xssescape('<!--%s-->' % comment)\n\n def handle_starttag(\n self,\n tag,\n attrs\n ):\n if tag not in self.permitted_tags:\n self.in_disallowed.append(True)\n if (not self.strip_disallowed):\n self.result += xssescape('<%s>' % tag)\n else:\n self.in_disallowed.append(False)\n bt = '<' + tag\n if tag in self.allowed_attributes:\n attrs = dict(attrs)\n self.allowed_attributes_here = [x for x in\n self.allowed_attributes[tag] if x in attrs\n and len(attrs[x]) > 0]\n for attribute in self.allowed_attributes_here:\n if attribute in ['href', 'src', 'background']:\n if self.url_is_acceptable(attrs[attribute]):\n bt += ' %s=\"%s\"' % (attribute,\n attrs[attribute])\n else:\n bt += ' %s=%s' % (xssescape(attribute),\n quoteattr(attrs[attribute]))\n # deal with <a> without href and <img> without src\n if bt == '<a' or bt == '<img':\n return\n if tag in self.requires_no_close:\n bt += ' /'\n bt += '>'\n self.result += bt\n if tag not in self.requires_no_close: self.open_tags.insert(0, tag)\n\n def handle_endtag(self, tag):\n bracketed = '</%s>' % tag\n self.in_disallowed and self.in_disallowed.pop()\n if tag not in self.permitted_tags:\n if (not self.strip_disallowed):\n self.result += xssescape(bracketed)\n elif tag in self.open_tags:\n self.result += bracketed\n self.open_tags.remove(tag)\n\n def url_is_acceptable(self, url):\n \"\"\"\n Accepts relative, absolute, and mailto urls\n \"\"\"\n\n if url.startswith('#'):\n return True\n else:\n parsed = urlparse(url)\n return ((parsed[0] in self.allowed_schemes and '.' in parsed[1]) or\n (parsed[0] in self.allowed_schemes and '@' in parsed[2]) or\n (parsed[0] == '' and parsed[2].startswith('/')))\n\n def strip(self, rawstring, escape=True):\n \"\"\"\n Returns the argument stripped of potentially harmful\n HTML or Javascript code\n\n @type escape: boolean\n @param escape: If True (default) it escapes the potentially harmful\n content, otherwise remove it\n \"\"\"\n\n if not isinstance(rawstring, str):\n return str(rawstring)\n for tag in self.requires_no_close:\n rawstring = rawstring.replace(\"<%s/>\" % tag, \"<%s />\" % tag)\n if not escape:\n self.strip_disallowed = True\n self.result = ''\n self.feed(rawstring)\n for endtag in self.open_tags:\n if endtag not in self.requires_no_close:\n self.result += '</%s>' % endtag\n return self.result\n\n def xtags(self):\n \"\"\"\n Returns a printable string informing the user which tags are allowed\n \"\"\"\n\n tg = ''\n for x in sorted(self.permitted_tags):\n tg += '<' + x\n if x in self.allowed_attributes:\n for y in self.allowed_attributes[x]:\n tg += ' %s=\"\"' % y\n tg += '> '\n return xssescape(tg.strip())\n\n\ndef sanitize(text, permitted_tags=[\n 'a',\n 'b',\n 'blockquote',\n 'br/',\n 'i',\n 'li',\n 'ol',\n 'ul',\n 'p',\n 'cite',\n 'code',\n 'pre',\n 'img/',\n 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',\n 'table', 'tbody', 'thead', 'tfoot', 'tr', 'td', 'div',\n 'strong', 'span',\n],\n allowed_attributes={\n 'a': ['href', 'title'],\n 'img': ['src', 'alt'],\n 'blockquote': ['type'],\n 'td': ['colspan'],\n },\n escape=True):\n if not isinstance(text, basestring):\n return str(text)\n return XssCleaner(permitted_tags=permitted_tags,\n allowed_attributes=allowed_attributes).strip(text, escape)\n", "path": "gluon/sanitizer.py"}]}
| 3,221 | 141 |
gh_patches_debug_21347
|
rasdani/github-patches
|
git_diff
|
kivy__kivy-4271
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Doc build fails on Windows
The first error is not shown, if the `build` folder is created manually.
``` cmd
make[1]: Entering directory `C:/dev/kivy/doc'
mkdir build\\html build\\doctrees
mkdir: cannot create directory `build\\html': No such file or directory
mkdir: cannot create directory `build\\doctrees': No such file or directory
make[1]: *** [html] Error 1
make[1]: Leaving directory `C:/dev/kivy/doc'
make: *** [html] Error 2
```
And at the end of the build:
``` cmd
copying static files... done
copying extra files... done
dumping search index... done
dumping object inventory... done
build succeeded.
sh: C:\Program: No such file or directory
make[1]: *** [html] Error 127
make[1]: Leaving directory `C:/dev/kivy/doc'
make: *** [html] Error 2
```
## <bountysource-plugin>
Want to back this issue? **[Post a bounty on it!](https://www.bountysource.com/issues/5410566-doc-build-fails-on-windows?utm_campaign=plugin&utm_content=tracker%2F42681&utm_medium=issues&utm_source=github)** We accept bounties via [Bountysource](https://www.bountysource.com/?utm_campaign=plugin&utm_content=tracker%2F42681&utm_medium=issues&utm_source=github).
</bountysource-plugin>
</issue>
<code>
[start of kivy/lib/mtdev.py]
1 '''
2 Python mtdev
3 ============
4
5 The mtdev module provides Python bindings to the `Kernel multi-touch
6 transformation library <https://launchpad.net/mtdev>`_, also known as mtdev
7 (MIT license).
8
9 The mtdev library transforms all variants of kernel MT events to the
10 slotted type B protocol. The events put into mtdev may be from any MT
11 device, specifically type A without contact tracking, type A with
12 contact tracking, or type B with contact tracking. See the kernel
13 documentation for further details.
14
15 .. warning::
16
17 This is an external library and Kivy does not provide any support for it.
18 It might change in the future and we advise you don't rely on it in your
19 code.
20 '''
21
22 import os
23 from ctypes import cdll, Structure, c_ulong, c_int, c_ushort, \
24 c_void_p, pointer, POINTER, byref
25
26 # load library
27 libmtdev = cdll.LoadLibrary('libmtdev.so.1')
28
29 # from linux/input.h
30 MTDEV_CODE_SLOT = 0x2f # MT slot being modified
31 MTDEV_CODE_TOUCH_MAJOR = 0x30 # Major axis of touching ellipse
32 MTDEV_CODE_TOUCH_MINOR = 0x31 # Minor axis (omit if circular)
33 MTDEV_CODE_WIDTH_MAJOR = 0x32 # Major axis of approaching ellipse
34 MTDEV_CODE_WIDTH_MINOR = 0x33 # Minor axis (omit if circular)
35 MTDEV_CODE_ORIENTATION = 0x34 # Ellipse orientation
36 MTDEV_CODE_POSITION_X = 0x35 # Center X ellipse position
37 MTDEV_CODE_POSITION_Y = 0x36 # Center Y ellipse position
38 MTDEV_CODE_TOOL_TYPE = 0x37 # Type of touching device
39 MTDEV_CODE_BLOB_ID = 0x38 # Group a set of packets as a blob
40 MTDEV_CODE_TRACKING_ID = 0x39 # Unique ID of initiated contact
41 MTDEV_CODE_PRESSURE = 0x3a # Pressure on contact area
42 MTDEV_CODE_ABS_X = 0x00
43 MTDEV_CODE_ABS_Y = 0x01
44 MTDEV_CODE_ABS_Z = 0x02
45 MTDEV_CODE_BTN_DIGI = 0x140
46 MTDEV_CODE_BTN_TOOL_PEN = 0x140
47 MTDEV_CODE_BTN_TOOL_RUBBER = 0x141
48 MTDEV_CODE_BTN_TOOL_BRUSH = 0x142
49 MTDEV_CODE_BTN_TOOL_PENCIL = 0x143
50 MTDEV_CODE_BTN_TOOL_AIRBRUSH = 0x144
51 MTDEV_CODE_BTN_TOOL_FINGER = 0x145
52 MTDEV_CODE_BTN_TOOL_MOUSE = 0x146
53 MTDEV_CODE_BTN_TOOL_LENS = 0x147
54 MTDEV_CODE_BTN_TOUCH = 0x14a
55 MTDEV_CODE_BTN_STYLUS = 0x14b
56 MTDEV_CODE_BTN_STYLUS2 = 0x14c
57 MTDEV_CODE_BTN_TOOL_DOUBLETAP = 0x14d
58 MTDEV_CODE_BTN_TOOL_TRIPLETAP = 0x14e
59 MTDEV_CODE_BTN_TOOL_QUADTAP = 0x14f # Four fingers on trackpad
60
61 MTDEV_TYPE_EV_ABS = 0x03
62 MTDEV_TYPE_EV_SYN = 0x00
63 MTDEV_TYPE_EV_KEY = 0x01
64 MTDEV_TYPE_EV_REL = 0x02
65 MTDEV_TYPE_EV_ABS = 0x03
66 MTDEV_TYPE_EV_MSC = 0x04
67 MTDEV_TYPE_EV_SW = 0x05
68 MTDEV_TYPE_EV_LED = 0x11
69 MTDEV_TYPE_EV_SND = 0x12
70 MTDEV_TYPE_EV_REP = 0x14
71 MTDEV_TYPE_EV_FF = 0x15
72 MTDEV_TYPE_EV_PWR = 0x16
73 MTDEV_TYPE_EV_FF_STATUS = 0x17
74
75 MTDEV_ABS_TRACKING_ID = 9
76 MTDEV_ABS_POSITION_X = 5
77 MTDEV_ABS_POSITION_Y = 6
78 MTDEV_ABS_TOUCH_MAJOR = 0
79 MTDEV_ABS_TOUCH_MINOR = 1
80 MTDEV_ABS_WIDTH_MAJOR = 2
81 MTDEV_ABS_WIDTH_MINOR = 3
82 MTDEV_ABS_ORIENTATION = 4
83 MTDEV_ABS_SIZE = 11
84
85 class timeval(Structure):
86 _fields_ = [
87 ('tv_sec', c_ulong),
88 ('tv_usec', c_ulong)
89 ]
90
91 class input_event(Structure):
92 _fields_ = [
93 ('time', timeval),
94 ('type', c_ushort),
95 ('code', c_ushort),
96 ('value', c_int)
97 ]
98
99 class input_absinfo(Structure):
100 _fields_ = [
101 ('value', c_int),
102 ('minimum', c_int),
103 ('maximum', c_int),
104 ('fuzz', c_int),
105 ('flat', c_int),
106 ('resolution', c_int)
107 ]
108
109 class mtdev_caps(Structure):
110 _fields_ = [
111 ('has_mtdata', c_int),
112 ('has_slot', c_int),
113 ('has_abs', c_int * MTDEV_ABS_SIZE),
114 ('slot', input_absinfo),
115 ('abs', input_absinfo * MTDEV_ABS_SIZE)
116 ]
117
118 class mtdev(Structure):
119 _fields_ = [
120 ('caps', mtdev_caps),
121 ('state', c_void_p)
122 ]
123
124 # binding
125 mtdev_open = libmtdev.mtdev_open
126 mtdev_open.argtypes = [POINTER(mtdev), c_int]
127 mtdev_get = libmtdev.mtdev_get
128 mtdev_get.argtypes = [POINTER(mtdev), c_int, POINTER(input_event), c_int]
129 mtdev_idle = libmtdev.mtdev_idle
130 mtdev_idle.argtypes = [POINTER(mtdev), c_int, c_int]
131 mtdev_close = libmtdev.mtdev_close
132 mtdev_close.argtypes = [POINTER(mtdev)]
133
134
135 class Device:
136 def __init__(self, filename):
137 self._filename = filename
138 self._fd = -1
139 self._device = mtdev()
140
141 self._fd = os.open(filename, os.O_NONBLOCK | os.O_RDONLY)
142 ret = mtdev_open(pointer(self._device), self._fd)
143 if ret != 0:
144 os.close(self._fd)
145 self._fd = -1
146 raise Exception('Unable to open device')
147
148 def close(self):
149 '''Close the mtdev converter
150 '''
151 if self._fd == -1:
152 return
153 mtdev_close(POINTER(self._device))
154 os.close(self._fd)
155 self._fd = -1
156
157 def idle(self, ms):
158 '''Check state of kernel device
159
160 :Parameters:
161 `ms` : int
162 Number of milliseconds to wait for activity
163
164 :Return:
165 Return True if the device is idle, i.e, there are no fetched events
166 in the pipe and there is nothing to fetch from the device.
167 '''
168 if self._fd == -1:
169 raise Exception('Device closed')
170 return bool(mtdev_idle(pointer(self._device), self._fd, ms))
171
172
173 def get(self):
174 if self._fd == -1:
175 raise Exception('Device closed')
176 ev = input_event()
177 if mtdev_get(pointer(self._device), self._fd, byref(ev), 1) <= 0:
178 return None
179 return ev
180
181 def has_mtdata(self):
182 '''Return True if the device has multitouch data.
183 '''
184 if self._fd == -1:
185 raise Exception('Device closed')
186 return bool(self._device.caps.has_mtdata)
187
188 def has_slot(self):
189 '''Return True if the device has slot information.
190 '''
191 if self._fd == -1:
192 raise Exception('Device closed')
193 return bool(self._device.caps.has_slot)
194
195 def has_abs(self, index):
196 '''Return True if the device has abs data.
197
198 :Parameters:
199 `index` : int
200 One of const starting with a name ABS_MT_
201 '''
202 if self._fd == -1:
203 raise Exception('Device closed')
204 if index < 0 or index >= MTDEV_ABS_SIZE:
205 raise IndexError('Invalid index')
206 return bool(self._device.caps.has_abs[index])
207
208 def get_max_abs(self):
209 '''Return the maximum number of abs information available.
210 '''
211 return MTDEV_ABS_SIZE
212
213 def get_slot(self):
214 '''Return the slot data.
215 '''
216 if self._fd == -1:
217 raise Exception('Device closed')
218 if self._device.caps.has_slot == 0:
219 return
220 return self._device.caps.slot
221
222 def get_abs(self, index):
223 '''Return the abs data.
224
225 :Parameters:
226 `index` : int
227 One of const starting with a name ABS_MT_
228 '''
229 if self._fd == -1:
230 raise Exception('Device closed')
231 if index < 0 or index >= MTDEV_ABS_SIZE:
232 raise IndexError('Invalid index')
233 return self._device.caps.abs[index]
234
235
236
[end of kivy/lib/mtdev.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/kivy/lib/mtdev.py b/kivy/lib/mtdev.py
--- a/kivy/lib/mtdev.py
+++ b/kivy/lib/mtdev.py
@@ -24,7 +24,8 @@
c_void_p, pointer, POINTER, byref
# load library
-libmtdev = cdll.LoadLibrary('libmtdev.so.1')
+if 'KIVY_DOC' not in os.environ:
+ libmtdev = cdll.LoadLibrary('libmtdev.so.1')
# from linux/input.h
MTDEV_CODE_SLOT = 0x2f # MT slot being modified
@@ -122,14 +123,15 @@
]
# binding
-mtdev_open = libmtdev.mtdev_open
-mtdev_open.argtypes = [POINTER(mtdev), c_int]
-mtdev_get = libmtdev.mtdev_get
-mtdev_get.argtypes = [POINTER(mtdev), c_int, POINTER(input_event), c_int]
-mtdev_idle = libmtdev.mtdev_idle
-mtdev_idle.argtypes = [POINTER(mtdev), c_int, c_int]
-mtdev_close = libmtdev.mtdev_close
-mtdev_close.argtypes = [POINTER(mtdev)]
+if 'KIVY_DOC' not in os.environ:
+ mtdev_open = libmtdev.mtdev_open
+ mtdev_open.argtypes = [POINTER(mtdev), c_int]
+ mtdev_get = libmtdev.mtdev_get
+ mtdev_get.argtypes = [POINTER(mtdev), c_int, POINTER(input_event), c_int]
+ mtdev_idle = libmtdev.mtdev_idle
+ mtdev_idle.argtypes = [POINTER(mtdev), c_int, c_int]
+ mtdev_close = libmtdev.mtdev_close
+ mtdev_close.argtypes = [POINTER(mtdev)]
class Device:
|
{"golden_diff": "diff --git a/kivy/lib/mtdev.py b/kivy/lib/mtdev.py\n--- a/kivy/lib/mtdev.py\n+++ b/kivy/lib/mtdev.py\n@@ -24,7 +24,8 @@\n c_void_p, pointer, POINTER, byref\n \n # load library\n-libmtdev = cdll.LoadLibrary('libmtdev.so.1')\n+if 'KIVY_DOC' not in os.environ:\n+ libmtdev = cdll.LoadLibrary('libmtdev.so.1')\n \n # from linux/input.h\n MTDEV_CODE_SLOT = 0x2f # MT slot being modified\n@@ -122,14 +123,15 @@\n ]\n \n # binding\n-mtdev_open = libmtdev.mtdev_open\n-mtdev_open.argtypes = [POINTER(mtdev), c_int]\n-mtdev_get = libmtdev.mtdev_get\n-mtdev_get.argtypes = [POINTER(mtdev), c_int, POINTER(input_event), c_int]\n-mtdev_idle = libmtdev.mtdev_idle\n-mtdev_idle.argtypes = [POINTER(mtdev), c_int, c_int]\n-mtdev_close = libmtdev.mtdev_close\n-mtdev_close.argtypes = [POINTER(mtdev)]\n+if 'KIVY_DOC' not in os.environ:\n+ mtdev_open = libmtdev.mtdev_open\n+ mtdev_open.argtypes = [POINTER(mtdev), c_int]\n+ mtdev_get = libmtdev.mtdev_get\n+ mtdev_get.argtypes = [POINTER(mtdev), c_int, POINTER(input_event), c_int]\n+ mtdev_idle = libmtdev.mtdev_idle\n+ mtdev_idle.argtypes = [POINTER(mtdev), c_int, c_int]\n+ mtdev_close = libmtdev.mtdev_close\n+ mtdev_close.argtypes = [POINTER(mtdev)]\n \n \n class Device:\n", "issue": "Doc build fails on Windows\nThe first error is not shown, if the `build` folder is created manually.\n\n``` cmd\nmake[1]: Entering directory `C:/dev/kivy/doc'\nmkdir build\\\\html build\\\\doctrees\nmkdir: cannot create directory `build\\\\html': No such file or directory\nmkdir: cannot create directory `build\\\\doctrees': No such file or directory\nmake[1]: *** [html] Error 1\nmake[1]: Leaving directory `C:/dev/kivy/doc'\nmake: *** [html] Error 2\n```\n\nAnd at the end of the build:\n\n``` cmd\ncopying static files... done\ncopying extra files... done\ndumping search index... done\ndumping object inventory... done\nbuild succeeded.\nsh: C:\\Program: No such file or directory\nmake[1]: *** [html] Error 127\nmake[1]: Leaving directory `C:/dev/kivy/doc'\nmake: *** [html] Error 2\n```\n## <bountysource-plugin>\n\nWant to back this issue? **[Post a bounty on it!](https://www.bountysource.com/issues/5410566-doc-build-fails-on-windows?utm_campaign=plugin&utm_content=tracker%2F42681&utm_medium=issues&utm_source=github)** We accept bounties via [Bountysource](https://www.bountysource.com/?utm_campaign=plugin&utm_content=tracker%2F42681&utm_medium=issues&utm_source=github).\n</bountysource-plugin>\n\n", "before_files": [{"content": "'''\nPython mtdev\n============\n\nThe mtdev module provides Python bindings to the `Kernel multi-touch\ntransformation library <https://launchpad.net/mtdev>`_, also known as mtdev\n(MIT license).\n\nThe mtdev library transforms all variants of kernel MT events to the\nslotted type B protocol. The events put into mtdev may be from any MT\ndevice, specifically type A without contact tracking, type A with\ncontact tracking, or type B with contact tracking. See the kernel\ndocumentation for further details.\n\n.. warning::\n\n This is an external library and Kivy does not provide any support for it.\n It might change in the future and we advise you don't rely on it in your\n code.\n'''\n\nimport os\nfrom ctypes import cdll, Structure, c_ulong, c_int, c_ushort, \\\n c_void_p, pointer, POINTER, byref\n\n# load library\nlibmtdev = cdll.LoadLibrary('libmtdev.so.1')\n\n# from linux/input.h\nMTDEV_CODE_SLOT = 0x2f # MT slot being modified\nMTDEV_CODE_TOUCH_MAJOR = 0x30 # Major axis of touching ellipse\nMTDEV_CODE_TOUCH_MINOR = 0x31 # Minor axis (omit if circular)\nMTDEV_CODE_WIDTH_MAJOR = 0x32 # Major axis of approaching ellipse\nMTDEV_CODE_WIDTH_MINOR = 0x33 # Minor axis (omit if circular)\nMTDEV_CODE_ORIENTATION = 0x34 # Ellipse orientation\nMTDEV_CODE_POSITION_X = 0x35 # Center X ellipse position\nMTDEV_CODE_POSITION_Y = 0x36 # Center Y ellipse position\nMTDEV_CODE_TOOL_TYPE = 0x37 # Type of touching device\nMTDEV_CODE_BLOB_ID = 0x38 # Group a set of packets as a blob\nMTDEV_CODE_TRACKING_ID = 0x39 # Unique ID of initiated contact\nMTDEV_CODE_PRESSURE = 0x3a # Pressure on contact area\nMTDEV_CODE_ABS_X\t\t = 0x00\nMTDEV_CODE_ABS_Y\t\t = 0x01\nMTDEV_CODE_ABS_Z\t\t = 0x02\nMTDEV_CODE_BTN_DIGI\t\t = 0x140\nMTDEV_CODE_BTN_TOOL_PEN\t\t = 0x140\nMTDEV_CODE_BTN_TOOL_RUBBER\t\t= 0x141\nMTDEV_CODE_BTN_TOOL_BRUSH\t\t= 0x142\nMTDEV_CODE_BTN_TOOL_PENCIL\t\t= 0x143\nMTDEV_CODE_BTN_TOOL_AIRBRUSH\t= 0x144\nMTDEV_CODE_BTN_TOOL_FINGER\t\t= 0x145\nMTDEV_CODE_BTN_TOOL_MOUSE\t\t= 0x146\nMTDEV_CODE_BTN_TOOL_LENS\t\t= 0x147\nMTDEV_CODE_BTN_TOUCH\t\t = 0x14a\nMTDEV_CODE_BTN_STYLUS\t\t = 0x14b\nMTDEV_CODE_BTN_STYLUS2\t\t = 0x14c\nMTDEV_CODE_BTN_TOOL_DOUBLETAP\t= 0x14d\nMTDEV_CODE_BTN_TOOL_TRIPLETAP\t= 0x14e\nMTDEV_CODE_BTN_TOOL_QUADTAP\t = 0x14f\t# Four fingers on trackpad\n\nMTDEV_TYPE_EV_ABS = 0x03\nMTDEV_TYPE_EV_SYN = 0x00\nMTDEV_TYPE_EV_KEY = 0x01\nMTDEV_TYPE_EV_REL = 0x02\nMTDEV_TYPE_EV_ABS = 0x03\nMTDEV_TYPE_EV_MSC = 0x04\nMTDEV_TYPE_EV_SW = 0x05\nMTDEV_TYPE_EV_LED = 0x11\nMTDEV_TYPE_EV_SND = 0x12\nMTDEV_TYPE_EV_REP = 0x14\nMTDEV_TYPE_EV_FF = 0x15\nMTDEV_TYPE_EV_PWR = 0x16\nMTDEV_TYPE_EV_FF_STATUS = 0x17\n\nMTDEV_ABS_TRACKING_ID\t= 9\nMTDEV_ABS_POSITION_X\t= 5\nMTDEV_ABS_POSITION_Y\t= 6\nMTDEV_ABS_TOUCH_MAJOR\t= 0\nMTDEV_ABS_TOUCH_MINOR\t= 1\nMTDEV_ABS_WIDTH_MAJOR\t= 2\nMTDEV_ABS_WIDTH_MINOR\t= 3\nMTDEV_ABS_ORIENTATION\t= 4\nMTDEV_ABS_SIZE = 11\n\nclass timeval(Structure):\n _fields_ = [\n ('tv_sec', c_ulong),\n ('tv_usec', c_ulong)\n ]\n\nclass input_event(Structure):\n _fields_ = [\n ('time', timeval),\n ('type', c_ushort),\n ('code', c_ushort),\n ('value', c_int)\n ]\n\nclass input_absinfo(Structure):\n _fields_ = [\n ('value', c_int),\n ('minimum', c_int),\n ('maximum', c_int),\n ('fuzz', c_int),\n ('flat', c_int),\n ('resolution', c_int)\n ]\n\nclass mtdev_caps(Structure):\n _fields_ = [\n ('has_mtdata', c_int),\n ('has_slot', c_int),\n ('has_abs', c_int * MTDEV_ABS_SIZE),\n ('slot', input_absinfo),\n ('abs', input_absinfo * MTDEV_ABS_SIZE)\n ]\n\nclass mtdev(Structure):\n _fields_ = [\n ('caps', mtdev_caps),\n ('state', c_void_p)\n ]\n\n# binding\nmtdev_open = libmtdev.mtdev_open\nmtdev_open.argtypes = [POINTER(mtdev), c_int]\nmtdev_get = libmtdev.mtdev_get\nmtdev_get.argtypes = [POINTER(mtdev), c_int, POINTER(input_event), c_int]\nmtdev_idle = libmtdev.mtdev_idle\nmtdev_idle.argtypes = [POINTER(mtdev), c_int, c_int]\nmtdev_close = libmtdev.mtdev_close\nmtdev_close.argtypes = [POINTER(mtdev)]\n\n\nclass Device:\n def __init__(self, filename):\n self._filename = filename\n self._fd = -1\n self._device = mtdev()\n\n self._fd = os.open(filename, os.O_NONBLOCK | os.O_RDONLY)\n ret = mtdev_open(pointer(self._device), self._fd)\n if ret != 0:\n os.close(self._fd)\n self._fd = -1\n raise Exception('Unable to open device')\n\n def close(self):\n '''Close the mtdev converter\n '''\n if self._fd == -1:\n return\n mtdev_close(POINTER(self._device))\n os.close(self._fd)\n self._fd = -1\n\n def idle(self, ms):\n '''Check state of kernel device\n \n :Parameters:\n `ms` : int\n Number of milliseconds to wait for activity\n\n :Return:\n Return True if the device is idle, i.e, there are no fetched events\n in the pipe and there is nothing to fetch from the device.\n '''\n if self._fd == -1:\n raise Exception('Device closed')\n return bool(mtdev_idle(pointer(self._device), self._fd, ms))\n\n\n def get(self):\n if self._fd == -1:\n raise Exception('Device closed')\n ev = input_event()\n if mtdev_get(pointer(self._device), self._fd, byref(ev), 1) <= 0:\n return None\n return ev\n\n def has_mtdata(self):\n '''Return True if the device has multitouch data.\n '''\n if self._fd == -1:\n raise Exception('Device closed')\n return bool(self._device.caps.has_mtdata)\n\n def has_slot(self):\n '''Return True if the device has slot information.\n '''\n if self._fd == -1:\n raise Exception('Device closed')\n return bool(self._device.caps.has_slot)\n\n def has_abs(self, index):\n '''Return True if the device has abs data.\n\n :Parameters:\n `index` : int\n One of const starting with a name ABS_MT_\n '''\n if self._fd == -1:\n raise Exception('Device closed')\n if index < 0 or index >= MTDEV_ABS_SIZE:\n raise IndexError('Invalid index')\n return bool(self._device.caps.has_abs[index])\n\n def get_max_abs(self):\n '''Return the maximum number of abs information available.\n '''\n return MTDEV_ABS_SIZE\n\n def get_slot(self):\n '''Return the slot data.\n '''\n if self._fd == -1:\n raise Exception('Device closed')\n if self._device.caps.has_slot == 0:\n return\n return self._device.caps.slot\n\n def get_abs(self, index):\n '''Return the abs data.\n\n :Parameters:\n `index` : int\n One of const starting with a name ABS_MT_\n '''\n if self._fd == -1:\n raise Exception('Device closed')\n if index < 0 or index >= MTDEV_ABS_SIZE:\n raise IndexError('Invalid index')\n return self._device.caps.abs[index]\n\n\n", "path": "kivy/lib/mtdev.py"}]}
| 3,543 | 433 |
gh_patches_debug_22024
|
rasdani/github-patches
|
git_diff
|
RedHatInsights__insights-core-2476
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Dependencies 'is_aws'/'is_azure' doesn't work on non-aws/non-azure system
The `insights.specs.default.DefaultSpecs.aws_instance_type` parser got the following result incorrectly from non-aws system:
```
# cat /tmp/insights-xxx-20200216034411/insights_commands/python_-m_insights.tools.cat_--no-header_aws_instance_type
Missing Dependencies:
At Least One Of:
insights.specs.default.DefaultSpecs.aws_instance_type
insights.specs.insights_archive.InsightsArchiveSpecs.aws_instance_type
```
Then the `insights.parsers.aws_instance_type.AWSInstanceType` parser returns `.type` as "INSIGHTS", for all non-aws systems, which result some rules got fired incorrectly.
In fact, this spec `aws_instance_type` shouldn't be collected at all since it depends on `is_aws`:
https://github.com/RedHatInsights/insights-core/blob/0db5e8900507a6a60b58ac458a11644a8871306a/insights/specs/default.py#L100-L109
And the `is_aws` is skipped correctly:
```
# insights-cat insights.specs.default.DefaultSpecs.is_aws /tmp/insights-xxx-20200216034411
[Empty output]
```
And the dependency check works well:
```
# insights-cat insights.specs.default.DefaultSpecs.aws_instance_type /tmp/insights-xxx-20200216034411
Missing Dependencies:
Requires:
insights.core.context.HostContext
insights.specs.default.DefaultSpecs.is_aws
```
However, the spec is still collected by the client.
```
# cat /tmp/insights-xxx-20200216034411/insights_commands/python_-m_insights.tools.cat_--no-header_aws_instance_type
Missing Dependencies:
At Least One Of:
insights.specs.default.DefaultSpecs.aws_instance_type
insights.specs.insights_archive.InsightsArchiveSpecs.aws_instance_type
```
It shouldn't be collected, right?
BTW, the `azure_*` and the other `aws_*` specs are also affected by this issue.
</issue>
<code>
[start of insights/client/archive.py]
1 """
2 Handle adding files and preparing the archive for upload
3 """
4 from __future__ import absolute_import
5 import time
6 import os
7 import shutil
8 import subprocess
9 import shlex
10 import logging
11 import tempfile
12 import re
13 import atexit
14
15 from .utilities import determine_hostname, _expand_paths, write_data_to_file
16 from .insights_spec import InsightsFile, InsightsCommand
17
18 logger = logging.getLogger(__name__)
19
20
21 class InsightsArchive(object):
22 """
23 This class is an interface for adding command output
24 and files to the insights archive
25 """
26 def __init__(self, config):
27 """
28 Initialize the Insights Archive
29 Create temp dir, archive dir, and command dir
30 """
31 self.config = config
32 self.tmp_dir = tempfile.mkdtemp(prefix='/var/tmp/')
33 if not self.config.obfuscate:
34 self.archive_tmp_dir = tempfile.mkdtemp(prefix='/var/tmp/')
35 name = determine_hostname()
36 self.archive_name = ("insights-%s-%s" %
37 (name,
38 time.strftime("%Y%m%d%H%M%S")))
39 self.archive_dir = self.create_archive_dir()
40 self.cmd_dir = self.create_command_dir()
41 self.compressor = 'gz' or config.compressor
42 self.tar_file = None
43 atexit.register(self.cleanup_tmp)
44
45 def create_archive_dir(self):
46 """
47 Create the archive dir
48 """
49 archive_dir = os.path.join(self.tmp_dir, self.archive_name)
50 os.makedirs(archive_dir, 0o700)
51 return archive_dir
52
53 def create_command_dir(self):
54 """
55 Create the "sos_commands" dir
56 """
57 cmd_dir = os.path.join(self.archive_dir, "insights_commands")
58 os.makedirs(cmd_dir, 0o700)
59 return cmd_dir
60
61 def get_full_archive_path(self, path):
62 """
63 Returns the full archive path
64 """
65 return os.path.join(self.archive_dir, path.lstrip('/'))
66
67 def _copy_file(self, path):
68 """
69 Copy just a single file
70 """
71 full_path = self.get_full_archive_path(path)
72 # Try to make the dir, eat exception if it fails
73 try:
74 os.makedirs(os.path.dirname(full_path))
75 except OSError:
76 pass
77 logger.debug("Copying %s to %s", path, full_path)
78 shutil.copyfile(path, full_path)
79 return path
80
81 def copy_file(self, path):
82 """
83 Copy a single file or regex, creating the necessary directories
84 """
85 if "*" in path:
86 paths = _expand_paths(path)
87 if paths:
88 for path in paths:
89 self._copy_file(path)
90 else:
91 if os.path.isfile(path):
92 return self._copy_file(path)
93 else:
94 logger.debug("File %s does not exist", path)
95 return False
96
97 def copy_dir(self, path):
98 """
99 Recursively copy directory
100 """
101 for directory in path:
102 if os.path.isdir(path):
103 full_path = os.path.join(self.archive_dir, directory.lstrip('/'))
104 logger.debug("Copying %s to %s", directory, full_path)
105 shutil.copytree(directory, full_path)
106 else:
107 logger.debug("Not a directory: %s", directory)
108 return path
109
110 def get_compression_flag(self, compressor):
111 return {
112 "gz": "z",
113 "xz": "J",
114 "bz2": "j",
115 "none": ""
116 }.get(compressor, "z")
117
118 def create_tar_file(self):
119 """
120 Create tar file to be compressed
121 """
122 tar_file_name = os.path.join(self.archive_tmp_dir, self.archive_name)
123 ext = "" if self.compressor == "none" else ".%s" % self.compressor
124 tar_file_name = tar_file_name + ".tar" + ext
125 logger.debug("Tar File: " + tar_file_name)
126 if self.compressor not in ["gz", "xz", "bz2", "none"]:
127 logger.error("The compressor %s is not supported. Using default: gz", self.compressor)
128 return_code = subprocess.call(shlex.split("tar c%sfS %s -C %s ." % (
129 self.get_compression_flag(self.compressor),
130 tar_file_name, self.tmp_dir)),
131 stderr=subprocess.PIPE)
132 if (self.compressor in ["bz2", "xz"] and return_code != 0):
133 logger.error("ERROR: %s compressor is not installed, cannot compress file", self.compressor)
134 return None
135 self.delete_archive_dir()
136 logger.debug("Tar File Size: %s", str(os.path.getsize(tar_file_name)))
137 self.tar_file = tar_file_name
138 return tar_file_name
139
140 def delete_tmp_dir(self):
141 """
142 Delete the entire tmp dir
143 """
144 logger.debug("Deleting: " + self.tmp_dir)
145 shutil.rmtree(self.tmp_dir, True)
146
147 def delete_archive_dir(self):
148 """
149 Delete the entire archive dir
150 """
151 logger.debug("Deleting: " + self.archive_dir)
152 shutil.rmtree(self.archive_dir, True)
153
154 def delete_archive_file(self):
155 """
156 Delete the directory containing the constructed archive
157 """
158 logger.debug("Deleting %s", self.archive_tmp_dir)
159 shutil.rmtree(self.archive_tmp_dir, True)
160
161 def add_to_archive(self, spec):
162 '''
163 Add files and commands to archive
164 Use InsightsSpec.get_output() to get data
165 '''
166 cmd_not_found_regex = "^timeout: failed to run command .+: No such file or directory$"
167 if isinstance(spec, InsightsCommand):
168 archive_path = os.path.join(self.cmd_dir, spec.archive_path.lstrip('/'))
169 if isinstance(spec, InsightsFile):
170 archive_path = self.get_full_archive_path(spec.archive_path.lstrip('/'))
171 output = spec.get_output()
172 if output and not re.search(cmd_not_found_regex, output):
173 write_data_to_file(output, archive_path)
174
175 def add_metadata_to_archive(self, metadata, meta_path):
176 '''
177 Add metadata to archive
178 '''
179 archive_path = self.get_full_archive_path(meta_path.lstrip('/'))
180 write_data_to_file(metadata, archive_path)
181
182 def cleanup_tmp(self):
183 '''
184 Only used during built-in collection.
185 Delete archive and tmp dirs on exit.
186 '''
187 if self.config.keep_archive:
188 if self.config.no_upload:
189 logger.info('Archive saved at %s', self.tar_file)
190 else:
191 logger.info('Insights archive retained in %s', self.tar_file)
192 if self.config.obfuscate:
193 return # return before deleting tmp_dir
194 else:
195 self.delete_archive_file()
196 self.delete_tmp_dir()
197
[end of insights/client/archive.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/insights/client/archive.py b/insights/client/archive.py
--- a/insights/client/archive.py
+++ b/insights/client/archive.py
@@ -163,13 +163,16 @@
Add files and commands to archive
Use InsightsSpec.get_output() to get data
'''
- cmd_not_found_regex = "^timeout: failed to run command .+: No such file or directory$"
+ ab_regex = [
+ "^timeout: failed to run command .+: No such file or directory$",
+ "^Missing Dependencies:"
+ ]
if isinstance(spec, InsightsCommand):
archive_path = os.path.join(self.cmd_dir, spec.archive_path.lstrip('/'))
if isinstance(spec, InsightsFile):
archive_path = self.get_full_archive_path(spec.archive_path.lstrip('/'))
output = spec.get_output()
- if output and not re.search(cmd_not_found_regex, output):
+ if output and not any(re.search(rg, output) for rg in ab_regex):
write_data_to_file(output, archive_path)
def add_metadata_to_archive(self, metadata, meta_path):
|
{"golden_diff": "diff --git a/insights/client/archive.py b/insights/client/archive.py\n--- a/insights/client/archive.py\n+++ b/insights/client/archive.py\n@@ -163,13 +163,16 @@\n Add files and commands to archive\n Use InsightsSpec.get_output() to get data\n '''\n- cmd_not_found_regex = \"^timeout: failed to run command .+: No such file or directory$\"\n+ ab_regex = [\n+ \"^timeout: failed to run command .+: No such file or directory$\",\n+ \"^Missing Dependencies:\"\n+ ]\n if isinstance(spec, InsightsCommand):\n archive_path = os.path.join(self.cmd_dir, spec.archive_path.lstrip('/'))\n if isinstance(spec, InsightsFile):\n archive_path = self.get_full_archive_path(spec.archive_path.lstrip('/'))\n output = spec.get_output()\n- if output and not re.search(cmd_not_found_regex, output):\n+ if output and not any(re.search(rg, output) for rg in ab_regex):\n write_data_to_file(output, archive_path)\n \n def add_metadata_to_archive(self, metadata, meta_path):\n", "issue": "Dependencies 'is_aws'/'is_azure' doesn't work on non-aws/non-azure system\nThe `insights.specs.default.DefaultSpecs.aws_instance_type` parser got the following result incorrectly from non-aws system:\r\n\r\n```\r\n# cat /tmp/insights-xxx-20200216034411/insights_commands/python_-m_insights.tools.cat_--no-header_aws_instance_type\r\nMissing Dependencies:\r\n At Least One Of:\r\n insights.specs.default.DefaultSpecs.aws_instance_type\r\n insights.specs.insights_archive.InsightsArchiveSpecs.aws_instance_type\r\n```\r\nThen the `insights.parsers.aws_instance_type.AWSInstanceType` parser returns `.type` as \"INSIGHTS\", for all non-aws systems, which result some rules got fired incorrectly.\r\n\r\nIn fact, this spec `aws_instance_type` shouldn't be collected at all since it depends on `is_aws`:\r\nhttps://github.com/RedHatInsights/insights-core/blob/0db5e8900507a6a60b58ac458a11644a8871306a/insights/specs/default.py#L100-L109\r\n\r\nAnd the `is_aws` is skipped correctly:\r\n```\r\n# insights-cat insights.specs.default.DefaultSpecs.is_aws /tmp/insights-xxx-20200216034411\r\n[Empty output]\r\n```\r\n\r\nAnd the dependency check works well:\r\n```\r\n# insights-cat insights.specs.default.DefaultSpecs.aws_instance_type /tmp/insights-xxx-20200216034411 \r\nMissing Dependencies:\r\n Requires:\r\n insights.core.context.HostContext\r\n insights.specs.default.DefaultSpecs.is_aws\r\n```\r\nHowever, the spec is still collected by the client.\r\n```\r\n# cat /tmp/insights-xxx-20200216034411/insights_commands/python_-m_insights.tools.cat_--no-header_aws_instance_type\r\nMissing Dependencies:\r\n At Least One Of:\r\n insights.specs.default.DefaultSpecs.aws_instance_type\r\n insights.specs.insights_archive.InsightsArchiveSpecs.aws_instance_type\r\n```\r\n\r\nIt shouldn't be collected, right?\r\n\r\nBTW, the `azure_*` and the other `aws_*` specs are also affected by this issue.\r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nHandle adding files and preparing the archive for upload\n\"\"\"\nfrom __future__ import absolute_import\nimport time\nimport os\nimport shutil\nimport subprocess\nimport shlex\nimport logging\nimport tempfile\nimport re\nimport atexit\n\nfrom .utilities import determine_hostname, _expand_paths, write_data_to_file\nfrom .insights_spec import InsightsFile, InsightsCommand\n\nlogger = logging.getLogger(__name__)\n\n\nclass InsightsArchive(object):\n \"\"\"\n This class is an interface for adding command output\n and files to the insights archive\n \"\"\"\n def __init__(self, config):\n \"\"\"\n Initialize the Insights Archive\n Create temp dir, archive dir, and command dir\n \"\"\"\n self.config = config\n self.tmp_dir = tempfile.mkdtemp(prefix='/var/tmp/')\n if not self.config.obfuscate:\n self.archive_tmp_dir = tempfile.mkdtemp(prefix='/var/tmp/')\n name = determine_hostname()\n self.archive_name = (\"insights-%s-%s\" %\n (name,\n time.strftime(\"%Y%m%d%H%M%S\")))\n self.archive_dir = self.create_archive_dir()\n self.cmd_dir = self.create_command_dir()\n self.compressor = 'gz' or config.compressor\n self.tar_file = None\n atexit.register(self.cleanup_tmp)\n\n def create_archive_dir(self):\n \"\"\"\n Create the archive dir\n \"\"\"\n archive_dir = os.path.join(self.tmp_dir, self.archive_name)\n os.makedirs(archive_dir, 0o700)\n return archive_dir\n\n def create_command_dir(self):\n \"\"\"\n Create the \"sos_commands\" dir\n \"\"\"\n cmd_dir = os.path.join(self.archive_dir, \"insights_commands\")\n os.makedirs(cmd_dir, 0o700)\n return cmd_dir\n\n def get_full_archive_path(self, path):\n \"\"\"\n Returns the full archive path\n \"\"\"\n return os.path.join(self.archive_dir, path.lstrip('/'))\n\n def _copy_file(self, path):\n \"\"\"\n Copy just a single file\n \"\"\"\n full_path = self.get_full_archive_path(path)\n # Try to make the dir, eat exception if it fails\n try:\n os.makedirs(os.path.dirname(full_path))\n except OSError:\n pass\n logger.debug(\"Copying %s to %s\", path, full_path)\n shutil.copyfile(path, full_path)\n return path\n\n def copy_file(self, path):\n \"\"\"\n Copy a single file or regex, creating the necessary directories\n \"\"\"\n if \"*\" in path:\n paths = _expand_paths(path)\n if paths:\n for path in paths:\n self._copy_file(path)\n else:\n if os.path.isfile(path):\n return self._copy_file(path)\n else:\n logger.debug(\"File %s does not exist\", path)\n return False\n\n def copy_dir(self, path):\n \"\"\"\n Recursively copy directory\n \"\"\"\n for directory in path:\n if os.path.isdir(path):\n full_path = os.path.join(self.archive_dir, directory.lstrip('/'))\n logger.debug(\"Copying %s to %s\", directory, full_path)\n shutil.copytree(directory, full_path)\n else:\n logger.debug(\"Not a directory: %s\", directory)\n return path\n\n def get_compression_flag(self, compressor):\n return {\n \"gz\": \"z\",\n \"xz\": \"J\",\n \"bz2\": \"j\",\n \"none\": \"\"\n }.get(compressor, \"z\")\n\n def create_tar_file(self):\n \"\"\"\n Create tar file to be compressed\n \"\"\"\n tar_file_name = os.path.join(self.archive_tmp_dir, self.archive_name)\n ext = \"\" if self.compressor == \"none\" else \".%s\" % self.compressor\n tar_file_name = tar_file_name + \".tar\" + ext\n logger.debug(\"Tar File: \" + tar_file_name)\n if self.compressor not in [\"gz\", \"xz\", \"bz2\", \"none\"]:\n logger.error(\"The compressor %s is not supported. Using default: gz\", self.compressor)\n return_code = subprocess.call(shlex.split(\"tar c%sfS %s -C %s .\" % (\n self.get_compression_flag(self.compressor),\n tar_file_name, self.tmp_dir)),\n stderr=subprocess.PIPE)\n if (self.compressor in [\"bz2\", \"xz\"] and return_code != 0):\n logger.error(\"ERROR: %s compressor is not installed, cannot compress file\", self.compressor)\n return None\n self.delete_archive_dir()\n logger.debug(\"Tar File Size: %s\", str(os.path.getsize(tar_file_name)))\n self.tar_file = tar_file_name\n return tar_file_name\n\n def delete_tmp_dir(self):\n \"\"\"\n Delete the entire tmp dir\n \"\"\"\n logger.debug(\"Deleting: \" + self.tmp_dir)\n shutil.rmtree(self.tmp_dir, True)\n\n def delete_archive_dir(self):\n \"\"\"\n Delete the entire archive dir\n \"\"\"\n logger.debug(\"Deleting: \" + self.archive_dir)\n shutil.rmtree(self.archive_dir, True)\n\n def delete_archive_file(self):\n \"\"\"\n Delete the directory containing the constructed archive\n \"\"\"\n logger.debug(\"Deleting %s\", self.archive_tmp_dir)\n shutil.rmtree(self.archive_tmp_dir, True)\n\n def add_to_archive(self, spec):\n '''\n Add files and commands to archive\n Use InsightsSpec.get_output() to get data\n '''\n cmd_not_found_regex = \"^timeout: failed to run command .+: No such file or directory$\"\n if isinstance(spec, InsightsCommand):\n archive_path = os.path.join(self.cmd_dir, spec.archive_path.lstrip('/'))\n if isinstance(spec, InsightsFile):\n archive_path = self.get_full_archive_path(spec.archive_path.lstrip('/'))\n output = spec.get_output()\n if output and not re.search(cmd_not_found_regex, output):\n write_data_to_file(output, archive_path)\n\n def add_metadata_to_archive(self, metadata, meta_path):\n '''\n Add metadata to archive\n '''\n archive_path = self.get_full_archive_path(meta_path.lstrip('/'))\n write_data_to_file(metadata, archive_path)\n\n def cleanup_tmp(self):\n '''\n Only used during built-in collection.\n Delete archive and tmp dirs on exit.\n '''\n if self.config.keep_archive:\n if self.config.no_upload:\n logger.info('Archive saved at %s', self.tar_file)\n else:\n logger.info('Insights archive retained in %s', self.tar_file)\n if self.config.obfuscate:\n return # return before deleting tmp_dir\n else:\n self.delete_archive_file()\n self.delete_tmp_dir()\n", "path": "insights/client/archive.py"}]}
| 2,976 | 245 |
gh_patches_debug_41867
|
rasdani/github-patches
|
git_diff
|
hpcaitech__ColossalAI-2974
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG]: LoRA does not support the training of reward models
### 🐛 Describe the bug

### Environment
### env
OS:ubuntu 20.04
GPU:4 x A10
python==3.9.0
torch==1.13.1-cu116
colossalai==0.2.5
### command
`python train_reward_model.py --pretrain "bigscience/bloom-560m" --lora_rank 16`
[tensor] fix some unittests
[tensor] fix some unittests
</issue>
<code>
[start of applications/ChatGPT/chatgpt/trainer/rm.py]
1 from abc import ABC
2
3 import loralib as lora
4 import torch
5 from chatgpt.dataset import RewardDataset
6 from chatgpt.nn import PairWiseLoss
7 from torch.optim import Adam, Optimizer
8 from torch.utils.data import DataLoader
9 from tqdm import tqdm
10
11 from .strategies import Strategy
12 from .utils import is_rank_0
13
14
15 class RewardModelTrainer(ABC):
16 """
17 Trainer to use while training reward model.
18
19 Args:
20 model (torch.nn.Module): the model to train
21 strategy (Strategy): the strategy to use for training
22 optim(Optimizer): the optimizer to use for training
23 train_dataset (RewardDataset): the dataset to use for training
24 eval_dataset (RewardDataset): the dataset to use for evaluation
25 batch_size (int, defaults to 1): the batch size while training
26 max_epochs (int, defaults to 2): the number of epochs to train
27 optim_kwargs (dict, defaults to {'lr':1e-4}): the kwargs to use while initializing optimizer
28 """
29
30 def __init__(
31 self,
32 model,
33 strategy: Strategy,
34 optim: Optimizer,
35 train_dataset: RewardDataset,
36 eval_dataset: RewardDataset,
37 batch_size: int = 1,
38 max_epochs: int = 2,
39 ) -> None:
40 super().__init__()
41 self.strategy = strategy
42 self.epochs = max_epochs
43 self.train_dataloader = DataLoader(train_dataset, batch_size=batch_size)
44 self.eval_dataloader = DataLoader(eval_dataset, batch_size=batch_size)
45
46 self.model = strategy.setup_model(model)
47 self.loss_fn = PairWiseLoss()
48 self.optimizer = strategy.setup_optimizer(optim, self.model)
49
50 def fit(self, use_lora):
51 epoch_bar = tqdm(range(self.epochs), desc='Train epoch', disable=not is_rank_0())
52 for epoch in range(self.epochs):
53 step_bar = tqdm(range(self.train_dataloader.__len__()),
54 desc='Train step of epoch %d' % epoch,
55 disable=not is_rank_0())
56 # train
57 if use_lora > 0:
58 print("Using Lora")
59 lora.mark_only_lora_as_trainable(self.model.body)
60
61 else:
62 self.model.train()
63 for chosen_ids, c_mask, reject_ids, r_mask in self.train_dataloader:
64 chosen_ids = chosen_ids.squeeze(1).cuda()
65 c_mask = c_mask.squeeze(1).cuda()
66 reject_ids = reject_ids.squeeze(1).cuda()
67 r_mask = r_mask.squeeze(1).cuda()
68 chosen_reward = self.model(chosen_ids, attention_mask=c_mask)
69 reject_reward = self.model(reject_ids, attention_mask=r_mask)
70 loss = self.loss_fn(chosen_reward, reject_reward)
71 self.strategy.backward(loss, self.model, self.optimizer)
72 self.strategy.optimizer_step(self.optimizer)
73 self.optimizer.zero_grad()
74 step_bar.update()
75 step_bar.set_postfix({'loss': loss.item()})
76
77 # eval
78 self.model.eval()
79 with torch.no_grad():
80 dist = 0
81 loss_sum = 0
82 for chosen_ids, c_mask, reject_ids, r_mask in self.eval_dataloader:
83 chosen_ids = chosen_ids.squeeze(1).cuda()
84 c_mask = c_mask.squeeze(1).cuda()
85 reject_ids = reject_ids.squeeze(1).cuda()
86 r_mask = r_mask.squeeze(1).cuda()
87 chosen_reward = self.model(chosen_ids, attention_mask=c_mask)
88 reject_reward = self.model(reject_ids, attention_mask=r_mask)
89 dist += (chosen_reward - reject_reward).mean().item()
90 loss = self.loss_fn(chosen_reward, reject_reward)
91 loss_sum += loss.item()
92 dist_mean = dist / self.eval_dataloader.__len__()
93 loss_mean = loss_sum / self.eval_dataloader.__len__()
94 epoch_bar.update()
95 step_bar.set_postfix({'loss': loss_mean, 'dist_mean': dist_mean})
96 step_bar.close()
97
[end of applications/ChatGPT/chatgpt/trainer/rm.py]
[start of applications/ChatGPT/chatgpt/nn/reward_model.py]
1 from typing import Optional
2
3 import torch
4 import torch.nn as nn
5
6 from .lora import LoRAModule
7
8
9 class RewardModel(LoRAModule):
10 """
11 Reward model base class.
12
13 Args:
14 model (nn.Module): Reward model.
15 value_head (nn.Module): Value head to get reward score.
16 lora_rank (int): LoRA rank.
17 lora_train_bias (str): LoRA bias training mode.
18 """
19
20 def __init__(self,
21 model: nn.Module,
22 value_head: Optional[nn.Module] = None,
23 lora_rank: int = 0,
24 lora_train_bias: str = 'none') -> None:
25 super().__init__(lora_rank=lora_rank, lora_train_bias=lora_train_bias)
26 self.body = model
27 if value_head is not None:
28 if value_head.out_features != 1:
29 raise ValueError("The value head of reward model's output dim should be 1!")
30 self.value_head = value_head
31
32 else:
33 self.value_head = nn.Linear(model.config.n_embd, 1)
34 self.convert_to_lora()
35
36 def forward(self, sequences: torch.LongTensor, attention_mask: Optional[torch.Tensor] = None) -> torch.Tensor:
37 outputs = self.body(sequences, attention_mask=attention_mask)
38 last_hidden_states = outputs['last_hidden_state']
39 values = self.value_head(last_hidden_states)[:, :-1]
40 value = values.mean(dim=1).squeeze(1) # ensure shape is (B)
41 return value
42
[end of applications/ChatGPT/chatgpt/nn/reward_model.py]
[start of applications/ChatGPT/examples/train_reward_model.py]
1 import argparse
2
3 import loralib as lora
4 import torch
5 from chatgpt.dataset import RewardDataset
6 from chatgpt.nn import BLOOMRM, GPTRM, OPTRM
7 from chatgpt.trainer import RewardModelTrainer
8 from chatgpt.trainer.strategies import ColossalAIStrategy, DDPStrategy, NaiveStrategy
9 from datasets import load_dataset
10 from torch.optim import Adam
11 from transformers import AutoTokenizer, BloomTokenizerFast
12 from transformers.models.gpt2.tokenization_gpt2 import GPT2Tokenizer
13
14 from colossalai.nn.optimizer import HybridAdam
15
16
17 def train(args):
18 # configure strategy
19 if args.strategy == 'naive':
20 strategy = NaiveStrategy()
21 elif args.strategy == 'ddp':
22 strategy = DDPStrategy()
23 elif args.strategy == 'colossalai_gemini':
24 strategy = ColossalAIStrategy(stage=3, placement_policy='cuda')
25 elif args.strategy == 'colossalai_zero2':
26 strategy = ColossalAIStrategy(stage=2, placement_policy='cuda')
27 else:
28 raise ValueError(f'Unsupported strategy "{args.strategy}"')
29
30 # configure model
31 with strategy.model_init_context():
32 if args.model == 'bloom':
33 model = BLOOMRM(pretrained=args.pretrain, lora_rank=args.lora_rank).cuda()
34 elif args.model == 'opt':
35 model = OPTRM(pretrained=args.pretrain, lora_rank=args.lora_rank).cuda()
36 elif args.model == 'gpt2':
37 model = GPTRM(pretrained=args.pretrain, lora_rank=args.lora_rank).cuda()
38 else:
39 raise ValueError(f'Unsupported model "{args.model}"')
40
41 # configure tokenizer
42 if args.model == 'gpt2':
43 tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
44 tokenizer.pad_token = tokenizer.eos_token
45 elif args.model == 'bloom':
46 tokenizer = BloomTokenizerFast.from_pretrained(args.pretrain)
47 tokenizer.pad_token = tokenizer.eos_token
48 elif args.model == 'opt':
49 tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m")
50 else:
51 raise ValueError(f'Unsupported model "{args.model}"')
52 tokenizer.pad_token = tokenizer.eos_token
53
54 max_len = 512
55
56 # configure optimizer
57 if args.strategy.startswith('colossalai'):
58 optim = HybridAdam(model.parameters(), lr=5e-5)
59 else:
60 optim = Adam(model.parameters(), lr=5e-5)
61
62 # prepare for data and dataset
63 data = load_dataset(args.dataset)
64 train_data = data["train"].select(range(100))
65 eval_data = data['test'].select(range(5))
66 train_dataset = RewardDataset(train_data, tokenizer, max_len)
67 eval_dataset = RewardDataset(eval_data, tokenizer, max_len)
68
69 # batch_size here is expected to be C(k,2), k means # response of each prompt
70 # be limited with the format of dataset 'Dahoas/rm-static', we'd better use batch_size as 1
71 trainer = RewardModelTrainer(model=model,
72 strategy=strategy,
73 optim=optim,
74 train_dataset=train_dataset,
75 eval_dataset=eval_dataset,
76 batch_size=args.batch_size,
77 max_epochs=args.max_epochs)
78
79 trainer.fit(use_lora=args.lora_rank)
80
81 # save model checkpoint after fitting on only rank0
82 strategy.save_model(model, 'rm_checkpoint.pt', only_rank0=True)
83 # save optimizer checkpoint on all ranks
84 strategy.save_optimizer(optim, 'rm_optim_checkpoint_%d.pt' % (torch.cuda.current_device()), only_rank0=False)
85
86
87 if __name__ == '__main__':
88 parser = argparse.ArgumentParser()
89 parser.add_argument('--strategy',
90 choices=['naive', 'ddp', 'colossalai_gemini', 'colossalai_zero2'],
91 default='naive')
92 parser.add_argument('--model', choices=['gpt2', 'bloom', 'opt'], default='bloom')
93 parser.add_argument('--pretrain', type=str, default=None)
94 parser.add_argument('--dataset', type=str, default='Dahoas/rm-static')
95 parser.add_argument('--save_path', type=str, default='rm_ckpt.pth')
96 parser.add_argument('--max_epochs', type=int, default=10)
97 parser.add_argument('--batch_size', type=int, default=4)
98 parser.add_argument('--lora_rank', type=int, default=0, help="low-rank adaptation matrices rank")
99 args = parser.parse_args()
100 train(args)
101
[end of applications/ChatGPT/examples/train_reward_model.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/applications/ChatGPT/chatgpt/nn/reward_model.py b/applications/ChatGPT/chatgpt/nn/reward_model.py
--- a/applications/ChatGPT/chatgpt/nn/reward_model.py
+++ b/applications/ChatGPT/chatgpt/nn/reward_model.py
@@ -23,7 +23,7 @@
lora_rank: int = 0,
lora_train_bias: str = 'none') -> None:
super().__init__(lora_rank=lora_rank, lora_train_bias=lora_train_bias)
- self.body = model
+ self.model = model
if value_head is not None:
if value_head.out_features != 1:
raise ValueError("The value head of reward model's output dim should be 1!")
@@ -34,7 +34,7 @@
self.convert_to_lora()
def forward(self, sequences: torch.LongTensor, attention_mask: Optional[torch.Tensor] = None) -> torch.Tensor:
- outputs = self.body(sequences, attention_mask=attention_mask)
+ outputs = self.model(sequences, attention_mask=attention_mask)
last_hidden_states = outputs['last_hidden_state']
values = self.value_head(last_hidden_states)[:, :-1]
value = values.mean(dim=1).squeeze(1) # ensure shape is (B)
diff --git a/applications/ChatGPT/chatgpt/trainer/rm.py b/applications/ChatGPT/chatgpt/trainer/rm.py
--- a/applications/ChatGPT/chatgpt/trainer/rm.py
+++ b/applications/ChatGPT/chatgpt/trainer/rm.py
@@ -44,6 +44,8 @@
self.eval_dataloader = DataLoader(eval_dataset, batch_size=batch_size)
self.model = strategy.setup_model(model)
+ if "DDP" in str(self.strategy):
+ self.model = self.model.module
self.loss_fn = PairWiseLoss()
self.optimizer = strategy.setup_optimizer(optim, self.model)
@@ -56,7 +58,7 @@
# train
if use_lora > 0:
print("Using Lora")
- lora.mark_only_lora_as_trainable(self.model.body)
+ lora.mark_only_lora_as_trainable(self.model.model)
else:
self.model.train()
diff --git a/applications/ChatGPT/examples/train_reward_model.py b/applications/ChatGPT/examples/train_reward_model.py
--- a/applications/ChatGPT/examples/train_reward_model.py
+++ b/applications/ChatGPT/examples/train_reward_model.py
@@ -61,8 +61,8 @@
# prepare for data and dataset
data = load_dataset(args.dataset)
- train_data = data["train"].select(range(100))
- eval_data = data['test'].select(range(5))
+ train_data = data["train"]
+ eval_data = data['test']
train_dataset = RewardDataset(train_data, tokenizer, max_len)
eval_dataset = RewardDataset(eval_data, tokenizer, max_len)
@@ -93,7 +93,7 @@
parser.add_argument('--pretrain', type=str, default=None)
parser.add_argument('--dataset', type=str, default='Dahoas/rm-static')
parser.add_argument('--save_path', type=str, default='rm_ckpt.pth')
- parser.add_argument('--max_epochs', type=int, default=10)
+ parser.add_argument('--max_epochs', type=int, default=1)
parser.add_argument('--batch_size', type=int, default=4)
parser.add_argument('--lora_rank', type=int, default=0, help="low-rank adaptation matrices rank")
args = parser.parse_args()
|
{"golden_diff": "diff --git a/applications/ChatGPT/chatgpt/nn/reward_model.py b/applications/ChatGPT/chatgpt/nn/reward_model.py\n--- a/applications/ChatGPT/chatgpt/nn/reward_model.py\n+++ b/applications/ChatGPT/chatgpt/nn/reward_model.py\n@@ -23,7 +23,7 @@\n lora_rank: int = 0,\n lora_train_bias: str = 'none') -> None:\n super().__init__(lora_rank=lora_rank, lora_train_bias=lora_train_bias)\n- self.body = model\n+ self.model = model\n if value_head is not None:\n if value_head.out_features != 1:\n raise ValueError(\"The value head of reward model's output dim should be 1!\")\n@@ -34,7 +34,7 @@\n self.convert_to_lora()\n \n def forward(self, sequences: torch.LongTensor, attention_mask: Optional[torch.Tensor] = None) -> torch.Tensor:\n- outputs = self.body(sequences, attention_mask=attention_mask)\n+ outputs = self.model(sequences, attention_mask=attention_mask)\n last_hidden_states = outputs['last_hidden_state']\n values = self.value_head(last_hidden_states)[:, :-1]\n value = values.mean(dim=1).squeeze(1) # ensure shape is (B)\ndiff --git a/applications/ChatGPT/chatgpt/trainer/rm.py b/applications/ChatGPT/chatgpt/trainer/rm.py\n--- a/applications/ChatGPT/chatgpt/trainer/rm.py\n+++ b/applications/ChatGPT/chatgpt/trainer/rm.py\n@@ -44,6 +44,8 @@\n self.eval_dataloader = DataLoader(eval_dataset, batch_size=batch_size)\n \n self.model = strategy.setup_model(model)\n+ if \"DDP\" in str(self.strategy):\n+ self.model = self.model.module\n self.loss_fn = PairWiseLoss()\n self.optimizer = strategy.setup_optimizer(optim, self.model)\n \n@@ -56,7 +58,7 @@\n # train\n if use_lora > 0:\n print(\"Using Lora\")\n- lora.mark_only_lora_as_trainable(self.model.body)\n+ lora.mark_only_lora_as_trainable(self.model.model)\n \n else:\n self.model.train()\ndiff --git a/applications/ChatGPT/examples/train_reward_model.py b/applications/ChatGPT/examples/train_reward_model.py\n--- a/applications/ChatGPT/examples/train_reward_model.py\n+++ b/applications/ChatGPT/examples/train_reward_model.py\n@@ -61,8 +61,8 @@\n \n # prepare for data and dataset\n data = load_dataset(args.dataset)\n- train_data = data[\"train\"].select(range(100))\n- eval_data = data['test'].select(range(5))\n+ train_data = data[\"train\"]\n+ eval_data = data['test']\n train_dataset = RewardDataset(train_data, tokenizer, max_len)\n eval_dataset = RewardDataset(eval_data, tokenizer, max_len)\n \n@@ -93,7 +93,7 @@\n parser.add_argument('--pretrain', type=str, default=None)\n parser.add_argument('--dataset', type=str, default='Dahoas/rm-static')\n parser.add_argument('--save_path', type=str, default='rm_ckpt.pth')\n- parser.add_argument('--max_epochs', type=int, default=10)\n+ parser.add_argument('--max_epochs', type=int, default=1)\n parser.add_argument('--batch_size', type=int, default=4)\n parser.add_argument('--lora_rank', type=int, default=0, help=\"low-rank adaptation matrices rank\")\n args = parser.parse_args()\n", "issue": "[BUG]: LoRA does not support the training of reward models\n### \ud83d\udc1b Describe the bug\n\n\r\n\n\n### Environment\n\n### env\r\nOS\uff1aubuntu 20.04\r\nGPU\uff1a4 x A10\r\npython==3.9.0\r\ntorch==1.13.1-cu116\r\ncolossalai==0.2.5\r\n\r\n### command\r\n`python train_reward_model.py --pretrain \"bigscience/bloom-560m\" --lora_rank 16`\r\n\r\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "from abc import ABC\n\nimport loralib as lora\nimport torch\nfrom chatgpt.dataset import RewardDataset\nfrom chatgpt.nn import PairWiseLoss\nfrom torch.optim import Adam, Optimizer\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\n\nfrom .strategies import Strategy\nfrom .utils import is_rank_0\n\n\nclass RewardModelTrainer(ABC):\n \"\"\"\n Trainer to use while training reward model.\n\n Args:\n model (torch.nn.Module): the model to train\n strategy (Strategy): the strategy to use for training\n optim(Optimizer): the optimizer to use for training\n train_dataset (RewardDataset): the dataset to use for training\n eval_dataset (RewardDataset): the dataset to use for evaluation\n batch_size (int, defaults to 1): the batch size while training\n max_epochs (int, defaults to 2): the number of epochs to train\n optim_kwargs (dict, defaults to {'lr':1e-4}): the kwargs to use while initializing optimizer\n \"\"\"\n\n def __init__(\n self,\n model,\n strategy: Strategy,\n optim: Optimizer,\n train_dataset: RewardDataset,\n eval_dataset: RewardDataset,\n batch_size: int = 1,\n max_epochs: int = 2,\n ) -> None:\n super().__init__()\n self.strategy = strategy\n self.epochs = max_epochs\n self.train_dataloader = DataLoader(train_dataset, batch_size=batch_size)\n self.eval_dataloader = DataLoader(eval_dataset, batch_size=batch_size)\n\n self.model = strategy.setup_model(model)\n self.loss_fn = PairWiseLoss()\n self.optimizer = strategy.setup_optimizer(optim, self.model)\n\n def fit(self, use_lora):\n epoch_bar = tqdm(range(self.epochs), desc='Train epoch', disable=not is_rank_0())\n for epoch in range(self.epochs):\n step_bar = tqdm(range(self.train_dataloader.__len__()),\n desc='Train step of epoch %d' % epoch,\n disable=not is_rank_0())\n # train\n if use_lora > 0:\n print(\"Using Lora\")\n lora.mark_only_lora_as_trainable(self.model.body)\n\n else:\n self.model.train()\n for chosen_ids, c_mask, reject_ids, r_mask in self.train_dataloader:\n chosen_ids = chosen_ids.squeeze(1).cuda()\n c_mask = c_mask.squeeze(1).cuda()\n reject_ids = reject_ids.squeeze(1).cuda()\n r_mask = r_mask.squeeze(1).cuda()\n chosen_reward = self.model(chosen_ids, attention_mask=c_mask)\n reject_reward = self.model(reject_ids, attention_mask=r_mask)\n loss = self.loss_fn(chosen_reward, reject_reward)\n self.strategy.backward(loss, self.model, self.optimizer)\n self.strategy.optimizer_step(self.optimizer)\n self.optimizer.zero_grad()\n step_bar.update()\n step_bar.set_postfix({'loss': loss.item()})\n\n # eval\n self.model.eval()\n with torch.no_grad():\n dist = 0\n loss_sum = 0\n for chosen_ids, c_mask, reject_ids, r_mask in self.eval_dataloader:\n chosen_ids = chosen_ids.squeeze(1).cuda()\n c_mask = c_mask.squeeze(1).cuda()\n reject_ids = reject_ids.squeeze(1).cuda()\n r_mask = r_mask.squeeze(1).cuda()\n chosen_reward = self.model(chosen_ids, attention_mask=c_mask)\n reject_reward = self.model(reject_ids, attention_mask=r_mask)\n dist += (chosen_reward - reject_reward).mean().item()\n loss = self.loss_fn(chosen_reward, reject_reward)\n loss_sum += loss.item()\n dist_mean = dist / self.eval_dataloader.__len__()\n loss_mean = loss_sum / self.eval_dataloader.__len__()\n epoch_bar.update()\n step_bar.set_postfix({'loss': loss_mean, 'dist_mean': dist_mean})\n step_bar.close()\n", "path": "applications/ChatGPT/chatgpt/trainer/rm.py"}, {"content": "from typing import Optional\n\nimport torch\nimport torch.nn as nn\n\nfrom .lora import LoRAModule\n\n\nclass RewardModel(LoRAModule):\n \"\"\"\n Reward model base class.\n\n Args:\n model (nn.Module): Reward model.\n value_head (nn.Module): Value head to get reward score.\n lora_rank (int): LoRA rank.\n lora_train_bias (str): LoRA bias training mode.\n \"\"\"\n\n def __init__(self,\n model: nn.Module,\n value_head: Optional[nn.Module] = None,\n lora_rank: int = 0,\n lora_train_bias: str = 'none') -> None:\n super().__init__(lora_rank=lora_rank, lora_train_bias=lora_train_bias)\n self.body = model\n if value_head is not None:\n if value_head.out_features != 1:\n raise ValueError(\"The value head of reward model's output dim should be 1!\")\n self.value_head = value_head\n\n else:\n self.value_head = nn.Linear(model.config.n_embd, 1)\n self.convert_to_lora()\n\n def forward(self, sequences: torch.LongTensor, attention_mask: Optional[torch.Tensor] = None) -> torch.Tensor:\n outputs = self.body(sequences, attention_mask=attention_mask)\n last_hidden_states = outputs['last_hidden_state']\n values = self.value_head(last_hidden_states)[:, :-1]\n value = values.mean(dim=1).squeeze(1) # ensure shape is (B)\n return value\n", "path": "applications/ChatGPT/chatgpt/nn/reward_model.py"}, {"content": "import argparse\n\nimport loralib as lora\nimport torch\nfrom chatgpt.dataset import RewardDataset\nfrom chatgpt.nn import BLOOMRM, GPTRM, OPTRM\nfrom chatgpt.trainer import RewardModelTrainer\nfrom chatgpt.trainer.strategies import ColossalAIStrategy, DDPStrategy, NaiveStrategy\nfrom datasets import load_dataset\nfrom torch.optim import Adam\nfrom transformers import AutoTokenizer, BloomTokenizerFast\nfrom transformers.models.gpt2.tokenization_gpt2 import GPT2Tokenizer\n\nfrom colossalai.nn.optimizer import HybridAdam\n\n\ndef train(args):\n # configure strategy\n if args.strategy == 'naive':\n strategy = NaiveStrategy()\n elif args.strategy == 'ddp':\n strategy = DDPStrategy()\n elif args.strategy == 'colossalai_gemini':\n strategy = ColossalAIStrategy(stage=3, placement_policy='cuda')\n elif args.strategy == 'colossalai_zero2':\n strategy = ColossalAIStrategy(stage=2, placement_policy='cuda')\n else:\n raise ValueError(f'Unsupported strategy \"{args.strategy}\"')\n\n # configure model\n with strategy.model_init_context():\n if args.model == 'bloom':\n model = BLOOMRM(pretrained=args.pretrain, lora_rank=args.lora_rank).cuda()\n elif args.model == 'opt':\n model = OPTRM(pretrained=args.pretrain, lora_rank=args.lora_rank).cuda()\n elif args.model == 'gpt2':\n model = GPTRM(pretrained=args.pretrain, lora_rank=args.lora_rank).cuda()\n else:\n raise ValueError(f'Unsupported model \"{args.model}\"')\n\n # configure tokenizer\n if args.model == 'gpt2':\n tokenizer = GPT2Tokenizer.from_pretrained('gpt2')\n tokenizer.pad_token = tokenizer.eos_token\n elif args.model == 'bloom':\n tokenizer = BloomTokenizerFast.from_pretrained(args.pretrain)\n tokenizer.pad_token = tokenizer.eos_token\n elif args.model == 'opt':\n tokenizer = AutoTokenizer.from_pretrained(\"facebook/opt-350m\")\n else:\n raise ValueError(f'Unsupported model \"{args.model}\"')\n tokenizer.pad_token = tokenizer.eos_token\n\n max_len = 512\n\n # configure optimizer\n if args.strategy.startswith('colossalai'):\n optim = HybridAdam(model.parameters(), lr=5e-5)\n else:\n optim = Adam(model.parameters(), lr=5e-5)\n\n # prepare for data and dataset\n data = load_dataset(args.dataset)\n train_data = data[\"train\"].select(range(100))\n eval_data = data['test'].select(range(5))\n train_dataset = RewardDataset(train_data, tokenizer, max_len)\n eval_dataset = RewardDataset(eval_data, tokenizer, max_len)\n\n # batch_size here is expected to be C(k,2), k means # response of each prompt\n # be limited with the format of dataset 'Dahoas/rm-static', we'd better use batch_size as 1\n trainer = RewardModelTrainer(model=model,\n strategy=strategy,\n optim=optim,\n train_dataset=train_dataset,\n eval_dataset=eval_dataset,\n batch_size=args.batch_size,\n max_epochs=args.max_epochs)\n\n trainer.fit(use_lora=args.lora_rank)\n\n # save model checkpoint after fitting on only rank0\n strategy.save_model(model, 'rm_checkpoint.pt', only_rank0=True)\n # save optimizer checkpoint on all ranks\n strategy.save_optimizer(optim, 'rm_optim_checkpoint_%d.pt' % (torch.cuda.current_device()), only_rank0=False)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--strategy',\n choices=['naive', 'ddp', 'colossalai_gemini', 'colossalai_zero2'],\n default='naive')\n parser.add_argument('--model', choices=['gpt2', 'bloom', 'opt'], default='bloom')\n parser.add_argument('--pretrain', type=str, default=None)\n parser.add_argument('--dataset', type=str, default='Dahoas/rm-static')\n parser.add_argument('--save_path', type=str, default='rm_ckpt.pth')\n parser.add_argument('--max_epochs', type=int, default=10)\n parser.add_argument('--batch_size', type=int, default=4)\n parser.add_argument('--lora_rank', type=int, default=0, help=\"low-rank adaptation matrices rank\")\n args = parser.parse_args()\n train(args)\n", "path": "applications/ChatGPT/examples/train_reward_model.py"}]}
| 3,445 | 827 |
gh_patches_debug_20747
|
rasdani/github-patches
|
git_diff
|
DataDog__dd-trace-py-477
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[psycopg2] The quote_ident function requires psycopg2.extensions.connection.
This is very related to https://github.com/DataDog/dd-trace-py/issues/383.
To reproduce:
```
from ddtrace import patch_all
patch_all()
import psycopg2
from psycopg2.extensions import quote_ident
conn = psycopg2.connect(dbname="test")
quote_ident('foo', conn) # Fails with TypeError: argument 2 must be a connection or a cursor
```
[psycopg2] The prepare functions require psycopg2.extensions.connection.
Here is an example:
```
from ddtrace import patch_all
patch_all()
import psycopg2
from psycopg2.extensions import adapt
conn = psycopg2.connect(dbname="test")
items = adapt([1, 2, 3])
items.prepare(conn) # Fails here
binary = adapt(b'12345')
binary.prepare(conn) # and here.
```
```
$ python example.py
Traceback (most recent call last):
File "example.py", line 11, in <module>
items.prepare(conn)
TypeError: argument 1 must be psycopg2.extensions.connection, not TracedConnection
```
This could probably be solved in a similar manner to #96 .
</issue>
<code>
[start of ddtrace/contrib/psycopg/patch.py]
1 # 3p
2 import psycopg2
3 import wrapt
4
5 # project
6 from ddtrace import Pin
7 from ddtrace.contrib import dbapi
8 from ddtrace.ext import sql, net, db
9
10 # Original connect method
11 _connect = psycopg2.connect
12
13
14 def patch():
15 """ Patch monkey patches psycopg's connection function
16 so that the connection's functions are traced.
17 """
18 if getattr(psycopg2, '_datadog_patch', False):
19 return
20 setattr(psycopg2, '_datadog_patch', True)
21
22 wrapt.wrap_function_wrapper(psycopg2, 'connect', patched_connect)
23 _patch_extensions(_psycopg2_extensions) # do this early just in case
24
25
26 def unpatch():
27 if getattr(psycopg2, '_datadog_patch', False):
28 setattr(psycopg2, '_datadog_patch', False)
29 psycopg2.connect = _connect
30
31
32 def patch_conn(conn, traced_conn_cls=dbapi.TracedConnection):
33 """ Wrap will patch the instance so that it's queries are traced."""
34 # ensure we've patched extensions (this is idempotent) in
35 # case we're only tracing some connections.
36 _patch_extensions(_psycopg2_extensions)
37
38 c = traced_conn_cls(conn)
39
40 # fetch tags from the dsn
41 dsn = sql.parse_pg_dsn(conn.dsn)
42 tags = {
43 net.TARGET_HOST: dsn.get("host"),
44 net.TARGET_PORT: dsn.get("port"),
45 db.NAME: dsn.get("dbname"),
46 db.USER: dsn.get("user"),
47 "db.application" : dsn.get("application_name"),
48 }
49
50 Pin(
51 service="postgres",
52 app="postgres",
53 app_type="db",
54 tags=tags).onto(c)
55
56 return c
57
58
59 def _patch_extensions(_extensions):
60 # we must patch extensions all the time (it's pretty harmless) so split
61 # from global patching of connections. must be idempotent.
62 for _, module, func, wrapper in _extensions:
63 if not hasattr(module, func) or isinstance(getattr(module, func), wrapt.ObjectProxy):
64 continue
65 wrapt.wrap_function_wrapper(module, func, wrapper)
66
67
68 def _unpatch_extensions(_extensions):
69 # we must patch extensions all the time (it's pretty harmless) so split
70 # from global patching of connections. must be idempotent.
71 for original, module, func, _ in _extensions:
72 setattr(module, func, original)
73
74
75 #
76 # monkeypatch targets
77 #
78
79 def patched_connect(connect_func, _, args, kwargs):
80 conn = connect_func(*args, **kwargs)
81 return patch_conn(conn)
82
83
84 def _extensions_register_type(func, _, args, kwargs):
85 def _unroll_args(obj, scope=None):
86 return obj, scope
87 obj, scope = _unroll_args(*args, **kwargs)
88
89 # register_type performs a c-level check of the object
90 # type so we must be sure to pass in the actual db connection
91 if scope and isinstance(scope, wrapt.ObjectProxy):
92 scope = scope.__wrapped__
93
94 return func(obj, scope) if scope else func(obj)
95
96
97 def _extensions_adapt(func, _, args, kwargs):
98 adapt = func(*args, **kwargs)
99 if hasattr(adapt, 'prepare'):
100 return AdapterWrapper(adapt)
101 return adapt
102
103
104 class AdapterWrapper(wrapt.ObjectProxy):
105 def prepare(self, *args, **kwargs):
106 func = self.__wrapped__.prepare
107 if not args:
108 return func(*args, **kwargs)
109 conn = args[0]
110
111 # prepare performs a c-level check of the object type so
112 # we must be sure to pass in the actual db connection
113 if isinstance(conn, wrapt.ObjectProxy):
114 conn = conn.__wrapped__
115
116 return func(conn, *args[1:], **kwargs)
117
118
119 # extension hooks
120 _psycopg2_extensions = [
121 (psycopg2.extensions.register_type,
122 psycopg2.extensions, 'register_type',
123 _extensions_register_type),
124 (psycopg2._psycopg.register_type,
125 psycopg2._psycopg, 'register_type',
126 _extensions_register_type),
127 (psycopg2.extensions.adapt,
128 psycopg2.extensions, 'adapt',
129 _extensions_adapt),
130 ]
131
132 # `_json` attribute is only available for psycopg >= 2.5
133 if getattr(psycopg2, '_json', None):
134 _psycopg2_extensions += [
135 (psycopg2._json.register_type,
136 psycopg2._json, 'register_type',
137 _extensions_register_type),
138 ]
139
[end of ddtrace/contrib/psycopg/patch.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ddtrace/contrib/psycopg/patch.py b/ddtrace/contrib/psycopg/patch.py
--- a/ddtrace/contrib/psycopg/patch.py
+++ b/ddtrace/contrib/psycopg/patch.py
@@ -93,6 +93,17 @@
return func(obj, scope) if scope else func(obj)
+def _extensions_quote_ident(func, _, args, kwargs):
+ def _unroll_args(obj, scope=None):
+ return obj, scope
+ obj, scope = _unroll_args(*args, **kwargs)
+
+ # register_type performs a c-level check of the object
+ # type so we must be sure to pass in the actual db connection
+ if scope and isinstance(scope, wrapt.ObjectProxy):
+ scope = scope.__wrapped__
+
+ return func(obj, scope) if scope else func(obj)
def _extensions_adapt(func, _, args, kwargs):
adapt = func(*args, **kwargs)
@@ -136,3 +147,11 @@
psycopg2._json, 'register_type',
_extensions_register_type),
]
+
+# `quote_ident` attribute is only available for psycopg >= 2.7
+if getattr(psycopg2, 'extensions', None) and getattr(psycopg2.extensions,
+ 'quote_ident', None):
+ _psycopg2_extensions += [(psycopg2.extensions.quote_ident,
+ psycopg2.extensions, 'quote_ident',
+ _extensions_quote_ident),
+ ]
|
{"golden_diff": "diff --git a/ddtrace/contrib/psycopg/patch.py b/ddtrace/contrib/psycopg/patch.py\n--- a/ddtrace/contrib/psycopg/patch.py\n+++ b/ddtrace/contrib/psycopg/patch.py\n@@ -93,6 +93,17 @@\n \n return func(obj, scope) if scope else func(obj)\n \n+def _extensions_quote_ident(func, _, args, kwargs):\n+ def _unroll_args(obj, scope=None):\n+ return obj, scope\n+ obj, scope = _unroll_args(*args, **kwargs)\n+\n+ # register_type performs a c-level check of the object\n+ # type so we must be sure to pass in the actual db connection\n+ if scope and isinstance(scope, wrapt.ObjectProxy):\n+ scope = scope.__wrapped__\n+\n+ return func(obj, scope) if scope else func(obj)\n \n def _extensions_adapt(func, _, args, kwargs):\n adapt = func(*args, **kwargs)\n@@ -136,3 +147,11 @@\n psycopg2._json, 'register_type',\n _extensions_register_type),\n ]\n+\n+# `quote_ident` attribute is only available for psycopg >= 2.7\n+if getattr(psycopg2, 'extensions', None) and getattr(psycopg2.extensions,\n+ 'quote_ident', None):\n+ _psycopg2_extensions += [(psycopg2.extensions.quote_ident,\n+ psycopg2.extensions, 'quote_ident',\n+ _extensions_quote_ident),\n+ ]\n", "issue": "[psycopg2] The quote_ident function requires psycopg2.extensions.connection.\nThis is very related to https://github.com/DataDog/dd-trace-py/issues/383.\r\n\r\nTo reproduce:\r\n```\r\nfrom ddtrace import patch_all\r\npatch_all()\r\n\r\nimport psycopg2\r\nfrom psycopg2.extensions import quote_ident\r\n\r\nconn = psycopg2.connect(dbname=\"test\")\r\nquote_ident('foo', conn) # Fails with TypeError: argument 2 must be a connection or a cursor\r\n```\r\n\r\n\n[psycopg2] The prepare functions require psycopg2.extensions.connection.\nHere is an example:\r\n\r\n```\r\nfrom ddtrace import patch_all\r\n\r\npatch_all()\r\n\r\nimport psycopg2\r\nfrom psycopg2.extensions import adapt\r\n\r\nconn = psycopg2.connect(dbname=\"test\")\r\n\r\nitems = adapt([1, 2, 3])\r\nitems.prepare(conn) # Fails here\r\n\r\nbinary = adapt(b'12345')\r\nbinary.prepare(conn) # and here.\r\n```\r\n\r\n```\r\n$ python example.py\r\nTraceback (most recent call last):\r\n File \"example.py\", line 11, in <module>\r\n items.prepare(conn)\r\nTypeError: argument 1 must be psycopg2.extensions.connection, not TracedConnection\r\n```\r\n\r\nThis could probably be solved in a similar manner to #96 .\n", "before_files": [{"content": "# 3p\nimport psycopg2\nimport wrapt\n\n# project\nfrom ddtrace import Pin\nfrom ddtrace.contrib import dbapi\nfrom ddtrace.ext import sql, net, db\n\n# Original connect method\n_connect = psycopg2.connect\n\n\ndef patch():\n \"\"\" Patch monkey patches psycopg's connection function\n so that the connection's functions are traced.\n \"\"\"\n if getattr(psycopg2, '_datadog_patch', False):\n return\n setattr(psycopg2, '_datadog_patch', True)\n\n wrapt.wrap_function_wrapper(psycopg2, 'connect', patched_connect)\n _patch_extensions(_psycopg2_extensions) # do this early just in case\n\n\ndef unpatch():\n if getattr(psycopg2, '_datadog_patch', False):\n setattr(psycopg2, '_datadog_patch', False)\n psycopg2.connect = _connect\n\n\ndef patch_conn(conn, traced_conn_cls=dbapi.TracedConnection):\n \"\"\" Wrap will patch the instance so that it's queries are traced.\"\"\"\n # ensure we've patched extensions (this is idempotent) in\n # case we're only tracing some connections.\n _patch_extensions(_psycopg2_extensions)\n\n c = traced_conn_cls(conn)\n\n # fetch tags from the dsn\n dsn = sql.parse_pg_dsn(conn.dsn)\n tags = {\n net.TARGET_HOST: dsn.get(\"host\"),\n net.TARGET_PORT: dsn.get(\"port\"),\n db.NAME: dsn.get(\"dbname\"),\n db.USER: dsn.get(\"user\"),\n \"db.application\" : dsn.get(\"application_name\"),\n }\n\n Pin(\n service=\"postgres\",\n app=\"postgres\",\n app_type=\"db\",\n tags=tags).onto(c)\n\n return c\n\n\ndef _patch_extensions(_extensions):\n # we must patch extensions all the time (it's pretty harmless) so split\n # from global patching of connections. must be idempotent.\n for _, module, func, wrapper in _extensions:\n if not hasattr(module, func) or isinstance(getattr(module, func), wrapt.ObjectProxy):\n continue\n wrapt.wrap_function_wrapper(module, func, wrapper)\n\n\ndef _unpatch_extensions(_extensions):\n # we must patch extensions all the time (it's pretty harmless) so split\n # from global patching of connections. must be idempotent.\n for original, module, func, _ in _extensions:\n setattr(module, func, original)\n\n\n#\n# monkeypatch targets\n#\n\ndef patched_connect(connect_func, _, args, kwargs):\n conn = connect_func(*args, **kwargs)\n return patch_conn(conn)\n\n\ndef _extensions_register_type(func, _, args, kwargs):\n def _unroll_args(obj, scope=None):\n return obj, scope\n obj, scope = _unroll_args(*args, **kwargs)\n\n # register_type performs a c-level check of the object\n # type so we must be sure to pass in the actual db connection\n if scope and isinstance(scope, wrapt.ObjectProxy):\n scope = scope.__wrapped__\n\n return func(obj, scope) if scope else func(obj)\n\n\ndef _extensions_adapt(func, _, args, kwargs):\n adapt = func(*args, **kwargs)\n if hasattr(adapt, 'prepare'):\n return AdapterWrapper(adapt)\n return adapt\n\n\nclass AdapterWrapper(wrapt.ObjectProxy):\n def prepare(self, *args, **kwargs):\n func = self.__wrapped__.prepare\n if not args:\n return func(*args, **kwargs)\n conn = args[0]\n\n # prepare performs a c-level check of the object type so\n # we must be sure to pass in the actual db connection\n if isinstance(conn, wrapt.ObjectProxy):\n conn = conn.__wrapped__\n\n return func(conn, *args[1:], **kwargs)\n\n\n# extension hooks\n_psycopg2_extensions = [\n (psycopg2.extensions.register_type,\n psycopg2.extensions, 'register_type',\n _extensions_register_type),\n (psycopg2._psycopg.register_type,\n psycopg2._psycopg, 'register_type',\n _extensions_register_type),\n (psycopg2.extensions.adapt,\n psycopg2.extensions, 'adapt',\n _extensions_adapt),\n]\n\n# `_json` attribute is only available for psycopg >= 2.5\nif getattr(psycopg2, '_json', None):\n _psycopg2_extensions += [\n (psycopg2._json.register_type,\n psycopg2._json, 'register_type',\n _extensions_register_type),\n ]\n", "path": "ddtrace/contrib/psycopg/patch.py"}]}
| 2,115 | 333 |
gh_patches_debug_751
|
rasdani/github-patches
|
git_diff
|
pytorch__TensorRT-74
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Create some sort of serialization / deserialization functionality
With INT8 about to land, would be a pain to have to calibrate from scratch every time. There should be some mechanism to save and load modules with the TRT engine included.
</issue>
<code>
[start of py/trtorch/__init__.py]
1 import os
2 import sys
3
4 if sys.version_info < (3,):
5 raise Exception("Python 2 has reached end-of-life and is not supported by TRTorch")
6
7 import ctypes
8 import torch
9
10 from trtorch._version import __version__
11 from trtorch._compiler import *
12 from trtorch._types import *
13 from trtorch import logging
14
[end of py/trtorch/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/py/trtorch/__init__.py b/py/trtorch/__init__.py
--- a/py/trtorch/__init__.py
+++ b/py/trtorch/__init__.py
@@ -11,3 +11,7 @@
from trtorch._compiler import *
from trtorch._types import *
from trtorch import logging
+
+def _register_with_torch():
+ trtorch_dir = os.path.dirname(__file__)
+ torch.ops.load_library(trtorch_dir + '/lib/trtorch.so')
\ No newline at end of file
|
{"golden_diff": "diff --git a/py/trtorch/__init__.py b/py/trtorch/__init__.py\n--- a/py/trtorch/__init__.py\n+++ b/py/trtorch/__init__.py\n@@ -11,3 +11,7 @@\n from trtorch._compiler import *\n from trtorch._types import *\n from trtorch import logging\n+\n+def _register_with_torch():\n+ trtorch_dir = os.path.dirname(__file__)\n+ torch.ops.load_library(trtorch_dir + '/lib/trtorch.so')\n\\ No newline at end of file\n", "issue": "Create some sort of serialization / deserialization functionality\nWith INT8 about to land, would be a pain to have to calibrate from scratch every time. There should be some mechanism to save and load modules with the TRT engine included. \n", "before_files": [{"content": "import os\nimport sys\n\nif sys.version_info < (3,):\n raise Exception(\"Python 2 has reached end-of-life and is not supported by TRTorch\")\n\nimport ctypes\nimport torch\n\nfrom trtorch._version import __version__\nfrom trtorch._compiler import *\nfrom trtorch._types import *\nfrom trtorch import logging\n", "path": "py/trtorch/__init__.py"}]}
| 681 | 117 |
gh_patches_debug_39214
|
rasdani/github-patches
|
git_diff
|
pytorch__TensorRT-2372
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
🐛 [Bug] Bug in `aten.where` converter for Numpy array inputs
## Bug Description
- When applying converter to Numpy arrays or constants, the compilation fails due to use of the `expand` operator, which only applies to Torch tensors
</issue>
<code>
[start of py/torch_tensorrt/dynamo/conversion/impl/condition/ops.py]
1 from typing import Optional
2
3 import numpy as np
4 import tensorrt as trt
5 import torch
6 from torch.fx.node import Target
7 from torch_tensorrt.dynamo._SourceIR import SourceIR
8 from torch_tensorrt.dynamo.conversion._ConversionContext import ConversionContext
9 from torch_tensorrt.dynamo.conversion.converter_utils import (
10 broadcastable,
11 get_trt_tensor,
12 )
13 from torch_tensorrt.dynamo.conversion.impl.slice import expand
14 from torch_tensorrt.fx.converters.converter_utils import broadcast, set_layer_name
15 from torch_tensorrt.fx.types import TRTTensor
16
17
18 def where(
19 ctx: ConversionContext,
20 target: Target,
21 source_ir: Optional[SourceIR],
22 name: str,
23 input: TRTTensor,
24 other: TRTTensor,
25 condition: TRTTensor,
26 ) -> TRTTensor:
27 if not (broadcastable(input, other)):
28 assert "The two torch tensors should be broadcastable"
29
30 # get output shape
31 # purpose of this is to bring input and other rank same as
32 # output_shape to input it to the add_expand operation
33 # condition will have dimension of either input or other
34 input, other = broadcast(ctx.net, input, other, f"{name}_x", f"{name}_y")
35 if len(tuple(condition.shape)) != len(tuple(input.shape)):
36 condition, input = broadcast(
37 ctx.net, condition, input, f"{name}_condition", f"{name}_x"
38 )
39
40 x_shape = list(input.shape)
41 y_shape = list(other.shape)
42 condition_shape = list(condition.shape)
43
44 output_shape = list(torch.broadcast_shapes(condition_shape, x_shape, y_shape))
45
46 # expand shape
47 if not isinstance(condition, TRTTensor):
48 assert condition.dtype in (torch.bool, np.bool_), "condition dtype is not bool"
49 if condition_shape != output_shape:
50 condition = (
51 condition.expand(output_shape)
52 if isinstance(condition, torch.Tensor)
53 else np.broadcast_to(condition, output_shape)
54 )
55 condition_val = get_trt_tensor(ctx, condition, f"{name}_condition")
56 else:
57 assert condition.dtype == trt.bool, "mask dtype is not bool!"
58 if condition_shape != output_shape:
59 condition_val = expand(
60 ctx, target, source_ir, f"{name}_expand", condition, output_shape
61 )
62 else:
63 condition_val = condition
64
65 if not isinstance(input, TRTTensor):
66 if x_shape != output_shape:
67 # special case where 1 element in input
68 if len(input.shape) == 0:
69 input = (
70 input.unsqueeze(0)
71 if isinstance(input, torch.Tensor)
72 else np.expand_dims(input, axis=0)
73 )
74 input = input.expand(output_shape)
75 x_val = get_trt_tensor(ctx, input, f"{name}_x")
76 else:
77 x_val = input
78 if x_shape != output_shape:
79 x_val = expand(
80 ctx, target, source_ir, f"{name}_x_expand", input, output_shape
81 )
82
83 if not isinstance(other, TRTTensor):
84 if y_shape != output_shape:
85 # special case where 1 element in other
86 if len(other.shape) == 0:
87 other = (
88 other.unsqueeze(0)
89 if isinstance(other, torch.Tensor)
90 else np.expand_dims(other, axis=0)
91 )
92 other = other.expand(output_shape)
93 y_val = get_trt_tensor(ctx, other, f"{name}_y")
94 else:
95 y_val = other
96 if y_shape != output_shape:
97 y_val = expand(
98 ctx, target, source_ir, f"{name}_y_expand", y_val, output_shape
99 )
100
101 select_layer = ctx.net.add_select(condition_val, x_val, y_val)
102
103 set_layer_name(select_layer, target, f"{name}_select")
104
105 return select_layer.get_output(0)
106
[end of py/torch_tensorrt/dynamo/conversion/impl/condition/ops.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/py/torch_tensorrt/dynamo/conversion/impl/condition/ops.py b/py/torch_tensorrt/dynamo/conversion/impl/condition/ops.py
--- a/py/torch_tensorrt/dynamo/conversion/impl/condition/ops.py
+++ b/py/torch_tensorrt/dynamo/conversion/impl/condition/ops.py
@@ -1,4 +1,4 @@
-from typing import Optional
+from typing import Optional, Union
import numpy as np
import tensorrt as trt
@@ -11,7 +11,7 @@
get_trt_tensor,
)
from torch_tensorrt.dynamo.conversion.impl.slice import expand
-from torch_tensorrt.fx.converters.converter_utils import broadcast, set_layer_name
+from torch_tensorrt.fx.converters.converter_utils import set_layer_name
from torch_tensorrt.fx.types import TRTTensor
@@ -20,23 +20,13 @@
target: Target,
source_ir: Optional[SourceIR],
name: str,
- input: TRTTensor,
- other: TRTTensor,
- condition: TRTTensor,
+ input: Union[TRTTensor, np.ndarray, torch.Tensor],
+ other: Union[TRTTensor, np.ndarray, torch.Tensor],
+ condition: Union[TRTTensor, np.ndarray, torch.Tensor],
) -> TRTTensor:
if not (broadcastable(input, other)):
assert "The two torch tensors should be broadcastable"
- # get output shape
- # purpose of this is to bring input and other rank same as
- # output_shape to input it to the add_expand operation
- # condition will have dimension of either input or other
- input, other = broadcast(ctx.net, input, other, f"{name}_x", f"{name}_y")
- if len(tuple(condition.shape)) != len(tuple(input.shape)):
- condition, input = broadcast(
- ctx.net, condition, input, f"{name}_condition", f"{name}_x"
- )
-
x_shape = list(input.shape)
y_shape = list(other.shape)
condition_shape = list(condition.shape)
@@ -71,7 +61,11 @@
if isinstance(input, torch.Tensor)
else np.expand_dims(input, axis=0)
)
- input = input.expand(output_shape)
+ input = (
+ input.expand(output_shape)
+ if isinstance(input, torch.Tensor)
+ else np.broadcast_to(input, output_shape)
+ )
x_val = get_trt_tensor(ctx, input, f"{name}_x")
else:
x_val = input
@@ -89,7 +83,11 @@
if isinstance(other, torch.Tensor)
else np.expand_dims(other, axis=0)
)
- other = other.expand(output_shape)
+ other = (
+ other.expand(output_shape)
+ if isinstance(other, torch.Tensor)
+ else np.broadcast_to(other, output_shape)
+ )
y_val = get_trt_tensor(ctx, other, f"{name}_y")
else:
y_val = other
|
{"golden_diff": "diff --git a/py/torch_tensorrt/dynamo/conversion/impl/condition/ops.py b/py/torch_tensorrt/dynamo/conversion/impl/condition/ops.py\n--- a/py/torch_tensorrt/dynamo/conversion/impl/condition/ops.py\n+++ b/py/torch_tensorrt/dynamo/conversion/impl/condition/ops.py\n@@ -1,4 +1,4 @@\n-from typing import Optional\n+from typing import Optional, Union\n \n import numpy as np\n import tensorrt as trt\n@@ -11,7 +11,7 @@\n get_trt_tensor,\n )\n from torch_tensorrt.dynamo.conversion.impl.slice import expand\n-from torch_tensorrt.fx.converters.converter_utils import broadcast, set_layer_name\n+from torch_tensorrt.fx.converters.converter_utils import set_layer_name\n from torch_tensorrt.fx.types import TRTTensor\n \n \n@@ -20,23 +20,13 @@\n target: Target,\n source_ir: Optional[SourceIR],\n name: str,\n- input: TRTTensor,\n- other: TRTTensor,\n- condition: TRTTensor,\n+ input: Union[TRTTensor, np.ndarray, torch.Tensor],\n+ other: Union[TRTTensor, np.ndarray, torch.Tensor],\n+ condition: Union[TRTTensor, np.ndarray, torch.Tensor],\n ) -> TRTTensor:\n if not (broadcastable(input, other)):\n assert \"The two torch tensors should be broadcastable\"\n \n- # get output shape\n- # purpose of this is to bring input and other rank same as\n- # output_shape to input it to the add_expand operation\n- # condition will have dimension of either input or other\n- input, other = broadcast(ctx.net, input, other, f\"{name}_x\", f\"{name}_y\")\n- if len(tuple(condition.shape)) != len(tuple(input.shape)):\n- condition, input = broadcast(\n- ctx.net, condition, input, f\"{name}_condition\", f\"{name}_x\"\n- )\n-\n x_shape = list(input.shape)\n y_shape = list(other.shape)\n condition_shape = list(condition.shape)\n@@ -71,7 +61,11 @@\n if isinstance(input, torch.Tensor)\n else np.expand_dims(input, axis=0)\n )\n- input = input.expand(output_shape)\n+ input = (\n+ input.expand(output_shape)\n+ if isinstance(input, torch.Tensor)\n+ else np.broadcast_to(input, output_shape)\n+ )\n x_val = get_trt_tensor(ctx, input, f\"{name}_x\")\n else:\n x_val = input\n@@ -89,7 +83,11 @@\n if isinstance(other, torch.Tensor)\n else np.expand_dims(other, axis=0)\n )\n- other = other.expand(output_shape)\n+ other = (\n+ other.expand(output_shape)\n+ if isinstance(other, torch.Tensor)\n+ else np.broadcast_to(other, output_shape)\n+ )\n y_val = get_trt_tensor(ctx, other, f\"{name}_y\")\n else:\n y_val = other\n", "issue": "\ud83d\udc1b [Bug] Bug in `aten.where` converter for Numpy array inputs\n## Bug Description\r\n- When applying converter to Numpy arrays or constants, the compilation fails due to use of the `expand` operator, which only applies to Torch tensors\r\n\n", "before_files": [{"content": "from typing import Optional\n\nimport numpy as np\nimport tensorrt as trt\nimport torch\nfrom torch.fx.node import Target\nfrom torch_tensorrt.dynamo._SourceIR import SourceIR\nfrom torch_tensorrt.dynamo.conversion._ConversionContext import ConversionContext\nfrom torch_tensorrt.dynamo.conversion.converter_utils import (\n broadcastable,\n get_trt_tensor,\n)\nfrom torch_tensorrt.dynamo.conversion.impl.slice import expand\nfrom torch_tensorrt.fx.converters.converter_utils import broadcast, set_layer_name\nfrom torch_tensorrt.fx.types import TRTTensor\n\n\ndef where(\n ctx: ConversionContext,\n target: Target,\n source_ir: Optional[SourceIR],\n name: str,\n input: TRTTensor,\n other: TRTTensor,\n condition: TRTTensor,\n) -> TRTTensor:\n if not (broadcastable(input, other)):\n assert \"The two torch tensors should be broadcastable\"\n\n # get output shape\n # purpose of this is to bring input and other rank same as\n # output_shape to input it to the add_expand operation\n # condition will have dimension of either input or other\n input, other = broadcast(ctx.net, input, other, f\"{name}_x\", f\"{name}_y\")\n if len(tuple(condition.shape)) != len(tuple(input.shape)):\n condition, input = broadcast(\n ctx.net, condition, input, f\"{name}_condition\", f\"{name}_x\"\n )\n\n x_shape = list(input.shape)\n y_shape = list(other.shape)\n condition_shape = list(condition.shape)\n\n output_shape = list(torch.broadcast_shapes(condition_shape, x_shape, y_shape))\n\n # expand shape\n if not isinstance(condition, TRTTensor):\n assert condition.dtype in (torch.bool, np.bool_), \"condition dtype is not bool\"\n if condition_shape != output_shape:\n condition = (\n condition.expand(output_shape)\n if isinstance(condition, torch.Tensor)\n else np.broadcast_to(condition, output_shape)\n )\n condition_val = get_trt_tensor(ctx, condition, f\"{name}_condition\")\n else:\n assert condition.dtype == trt.bool, \"mask dtype is not bool!\"\n if condition_shape != output_shape:\n condition_val = expand(\n ctx, target, source_ir, f\"{name}_expand\", condition, output_shape\n )\n else:\n condition_val = condition\n\n if not isinstance(input, TRTTensor):\n if x_shape != output_shape:\n # special case where 1 element in input\n if len(input.shape) == 0:\n input = (\n input.unsqueeze(0)\n if isinstance(input, torch.Tensor)\n else np.expand_dims(input, axis=0)\n )\n input = input.expand(output_shape)\n x_val = get_trt_tensor(ctx, input, f\"{name}_x\")\n else:\n x_val = input\n if x_shape != output_shape:\n x_val = expand(\n ctx, target, source_ir, f\"{name}_x_expand\", input, output_shape\n )\n\n if not isinstance(other, TRTTensor):\n if y_shape != output_shape:\n # special case where 1 element in other\n if len(other.shape) == 0:\n other = (\n other.unsqueeze(0)\n if isinstance(other, torch.Tensor)\n else np.expand_dims(other, axis=0)\n )\n other = other.expand(output_shape)\n y_val = get_trt_tensor(ctx, other, f\"{name}_y\")\n else:\n y_val = other\n if y_shape != output_shape:\n y_val = expand(\n ctx, target, source_ir, f\"{name}_y_expand\", y_val, output_shape\n )\n\n select_layer = ctx.net.add_select(condition_val, x_val, y_val)\n\n set_layer_name(select_layer, target, f\"{name}_select\")\n\n return select_layer.get_output(0)\n", "path": "py/torch_tensorrt/dynamo/conversion/impl/condition/ops.py"}]}
| 1,659 | 677 |
gh_patches_debug_21695
|
rasdani/github-patches
|
git_diff
|
bridgecrewio__checkov-3835
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error validando funciones en los tags
**Describe the issue**
We are trying to execute checkov in a template which makes use of a function (IF) in the tags:
- 'Fn::If':
- cPdnEnv
-
Key: "Schedule"
Value: "VISOR-02"
- !Ref "AWS::NoValue"
but it throws the following error message:
`Failed to parse tags for entity {'rEC2Instance': ...`
We updated to the latest version of checkov but the error continues
According to the AWS [documentation](https://docs.aws.amazon.com/es_es/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-conditions.html#intrinsic-function-reference-conditions-if), this IF function can be used on tags.
</issue>
<code>
[start of checkov/cloudformation/cfn_utils.py]
1 from __future__ import annotations
2
3 import logging
4 import os
5 from typing import Optional, List, Tuple, Dict, Any, Union
6
7 import dpath.util
8
9 from checkov.cloudformation.checks.resource.base_registry import Registry
10 from checkov.cloudformation.checks.resource.registry import cfn_registry
11 from checkov.cloudformation.context_parser import ContextParser, ENDLINE, STARTLINE
12 from checkov.cloudformation.parser import parse, TemplateSections
13 from checkov.common.parallelizer.parallel_runner import parallel_runner
14 from checkov.common.parsers.node import DictNode, ListNode, StrNode
15 from checkov.common.runners.base_runner import filter_ignored_paths
16 from checkov.runner_filter import RunnerFilter
17 from checkov.common.models.consts import YAML_COMMENT_MARK
18
19 CF_POSSIBLE_ENDINGS = frozenset([".yml", ".yaml", ".json", ".template"])
20
21
22 def get_resource_tags(entity: Dict[StrNode, DictNode], registry: Registry = cfn_registry) -> Optional[Dict[str, str]]:
23 entity_details = registry.extract_entity_details(entity)
24
25 if not entity_details:
26 return None
27
28 entity_config = entity_details[-1]
29
30 if not isinstance(entity_config, dict):
31 return None
32
33 try:
34 properties = entity_config.get("Properties")
35 if properties:
36 tags = properties.get("Tags")
37 if tags:
38 return parse_entity_tags(tags)
39 except Exception:
40 logging.warning(f"Failed to parse tags for entity {entity}")
41
42 return None
43
44
45 def parse_entity_tags(tags: Union[ListNode, Dict[str, Any]]) -> Optional[Dict[str, str]]:
46 if isinstance(tags, ListNode):
47 tag_dict = {get_entity_value_as_string(tag["Key"]): get_entity_value_as_string(tag["Value"]) for tag in tags}
48 return tag_dict
49 elif isinstance(tags, dict):
50 tag_dict = {
51 get_entity_value_as_string(key): get_entity_value_as_string(value)
52 for key, value in tags.items()
53 if key not in (STARTLINE, ENDLINE)
54 }
55 return tag_dict
56 return None
57
58
59 def get_entity_value_as_string(value: Any) -> str:
60 """
61 Handles different type of entities with possible CFN function substitutions. Returns the simplest possible string value
62 (without performing any function calls).
63
64 Examples:
65 Key: Value # returns simple string
66
67 Key: !Ref ${AWS::AccountId}-data # returns ${AWS::AccountId}-data
68
69 Key:
70 - ${account}-data
71 - account: !Ref ${AWS::AccountId}
72
73 # returns ${account}-data
74
75 :param value:
76 :return:
77 """
78 if isinstance(value, dict):
79 (function, value) = next(iter(value.items()))
80 # If the value is a long-form function, then the first element is the template string (technically str_node)
81 # Otherwise the dict value is the template string
82 if isinstance(value, list):
83 if "Join" in function:
84 # Join looks like !Join [, [V1, V2, V3]]
85 join_str = str(value[0])
86 return join_str.join([str(v) for v in value[1]])
87 else:
88 return str(value[0])
89 else:
90 return str(value)
91 else:
92 return str(value)
93
94
95 def get_folder_definitions(
96 root_folder: str, excluded_paths: list[str] | None, out_parsing_errors: dict[str, str] | None = None
97 ) -> tuple[dict[str, DictNode], dict[str, list[tuple[int, str]]]]:
98 out_parsing_errors = {} if out_parsing_errors is None else out_parsing_errors
99 files_list = []
100 for root, d_names, f_names in os.walk(root_folder):
101 filter_ignored_paths(root, d_names, excluded_paths)
102 filter_ignored_paths(root, f_names, excluded_paths)
103 for file in f_names:
104 file_ending = os.path.splitext(file)[1]
105 if file_ending in CF_POSSIBLE_ENDINGS:
106 files_list.append(os.path.join(root, file))
107
108 definitions, definitions_raw = get_files_definitions(files_list, out_parsing_errors)
109 return definitions, definitions_raw
110
111
112 def build_definitions_context(
113 definitions: Dict[str, DictNode], definitions_raw: Dict[str, List[Tuple[int, str]]]
114 ) -> Dict[str, Dict[str, Any]]:
115 definitions_context: Dict[str, Dict[str, Any]] = {}
116 # iterate on the files
117 for file_path, file_path_definitions in definitions.items():
118 # iterate on the definitions (Parameters, Resources, Outputs...)
119 for file_path_definition, definition in file_path_definitions.items():
120 if (
121 isinstance(file_path_definition, StrNode)
122 and file_path_definition.upper() in TemplateSections.__members__
123 and isinstance(definition, DictNode)
124 ):
125 # iterate on the actual objects of each definition
126 for attribute, attr_value in definition.items():
127 if isinstance(attr_value, DictNode):
128 start_line = attr_value.start_mark.line
129 end_line = attr_value.end_mark.line
130 # fix lines number for yaml and json files
131 first_line_index = 0
132 while not str.strip(definitions_raw[file_path][first_line_index][1]):
133 first_line_index += 1
134 # check if the file is a json file
135 if str.strip(definitions_raw[file_path][first_line_index][1])[0] == "{":
136 start_line += 1
137 end_line += 1
138 else:
139 # add resource comments to definition lines
140 current_line = str.strip(definitions_raw[file_path][start_line - 1][1])
141 while not current_line or current_line[0] == YAML_COMMENT_MARK:
142 start_line -= 1
143 current_line = str.strip(definitions_raw[file_path][start_line - 1][1])
144
145 # remove next resource comments from definition lines
146 current_line = str.strip(definitions_raw[file_path][end_line - 1][1])
147 while not current_line or current_line[0] == YAML_COMMENT_MARK:
148 end_line -= 1
149 current_line = str.strip(definitions_raw[file_path][end_line - 1][1])
150
151 code_lines = definitions_raw[file_path][start_line - 1: end_line]
152 dpath.new(
153 definitions_context,
154 [file_path, str(file_path_definition), str(attribute)],
155 {"start_line": start_line, "end_line": end_line, "code_lines": code_lines},
156 )
157 if file_path_definition.upper() == TemplateSections.RESOURCES.value.upper():
158 skipped_checks = ContextParser.collect_skip_comments(
159 entity_code_lines=code_lines,
160 resource_config=attr_value,
161 )
162 dpath.new(
163 definitions_context,
164 [file_path, str(file_path_definition), str(attribute), "skipped_checks"],
165 skipped_checks,
166 )
167 return definitions_context
168
169
170 def create_definitions(
171 root_folder: str,
172 files: list[str] | None = None,
173 runner_filter: RunnerFilter | None = None,
174 out_parsing_errors: dict[str, str] | None = None
175 ) -> tuple[dict[str, DictNode], dict[str, list[tuple[int, str]]]]:
176 runner_filter = runner_filter or RunnerFilter()
177 out_parsing_errors = {} if out_parsing_errors is None else out_parsing_errors
178 definitions: dict[str, DictNode] = {}
179 definitions_raw: dict[str, list[tuple[int, str]]] = {}
180 if files:
181 files_list = [file for file in files if os.path.splitext(file)[1] in CF_POSSIBLE_ENDINGS]
182 definitions, definitions_raw = get_files_definitions(files_list, out_parsing_errors)
183
184 if root_folder:
185 definitions, definitions_raw = get_folder_definitions(root_folder, runner_filter.excluded_paths,
186 out_parsing_errors)
187
188 return definitions, definitions_raw
189
190
191 def get_files_definitions(files: List[str], out_parsing_errors: Dict[str, str], filepath_fn=None) \
192 -> Tuple[Dict[str, DictNode], Dict[str, List[Tuple[int, str]]]]:
193 def _parse_file(file):
194 parsing_errors = {}
195 result = parse(file, parsing_errors)
196 return (file, result), parsing_errors
197
198 results = parallel_runner.run_function(_parse_file, files)
199
200 definitions = {}
201 definitions_raw = {}
202 for result, parsing_errors in results:
203 out_parsing_errors.update(parsing_errors)
204 (file, parse_result) = result
205 path = filepath_fn(file) if filepath_fn else file
206 try:
207 template, template_lines = parse_result
208 if isinstance(template, DictNode) and isinstance(template.get("Resources"), DictNode):
209 if validate_properties_in_resources_are_dict(template):
210 definitions[path] = template
211 definitions_raw[path] = template_lines
212 else:
213 out_parsing_errors.update({file: 'Resource Properties is not a dictionary'})
214 else:
215 if parsing_errors:
216 logging.debug(f'File {file} had the following parsing errors: {parsing_errors}')
217 logging.debug(f"Parsed file {file} incorrectly {template}")
218 except (TypeError, ValueError):
219 logging.warning(f"CloudFormation skipping {file} as it is not a valid CF template")
220 continue
221
222 return definitions, definitions_raw
223
224
225 def validate_properties_in_resources_are_dict(template: DictNode) -> bool:
226 template_resources = template.get("Resources")
227 for resource_name, resource in template_resources.items():
228 if 'Properties' in resource and not isinstance(resource['Properties'], DictNode) or "." in resource_name:
229 return False
230 return True
231
[end of checkov/cloudformation/cfn_utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/checkov/cloudformation/cfn_utils.py b/checkov/cloudformation/cfn_utils.py
--- a/checkov/cloudformation/cfn_utils.py
+++ b/checkov/cloudformation/cfn_utils.py
@@ -17,6 +17,7 @@
from checkov.common.models.consts import YAML_COMMENT_MARK
CF_POSSIBLE_ENDINGS = frozenset([".yml", ".yaml", ".json", ".template"])
+TAG_FIELD_NAMES = ("Key", "Value")
def get_resource_tags(entity: Dict[StrNode, DictNode], registry: Registry = cfn_registry) -> Optional[Dict[str, str]]:
@@ -43,8 +44,12 @@
def parse_entity_tags(tags: Union[ListNode, Dict[str, Any]]) -> Optional[Dict[str, str]]:
- if isinstance(tags, ListNode):
- tag_dict = {get_entity_value_as_string(tag["Key"]): get_entity_value_as_string(tag["Value"]) for tag in tags}
+ if isinstance(tags, list):
+ tag_dict = {
+ get_entity_value_as_string(tag["Key"]): get_entity_value_as_string(tag["Value"])
+ for tag in tags
+ if all(field in tag for field in TAG_FIELD_NAMES)
+ }
return tag_dict
elif isinstance(tags, dict):
tag_dict = {
|
{"golden_diff": "diff --git a/checkov/cloudformation/cfn_utils.py b/checkov/cloudformation/cfn_utils.py\n--- a/checkov/cloudformation/cfn_utils.py\n+++ b/checkov/cloudformation/cfn_utils.py\n@@ -17,6 +17,7 @@\n from checkov.common.models.consts import YAML_COMMENT_MARK\n \n CF_POSSIBLE_ENDINGS = frozenset([\".yml\", \".yaml\", \".json\", \".template\"])\n+TAG_FIELD_NAMES = (\"Key\", \"Value\")\n \n \n def get_resource_tags(entity: Dict[StrNode, DictNode], registry: Registry = cfn_registry) -> Optional[Dict[str, str]]:\n@@ -43,8 +44,12 @@\n \n \n def parse_entity_tags(tags: Union[ListNode, Dict[str, Any]]) -> Optional[Dict[str, str]]:\n- if isinstance(tags, ListNode):\n- tag_dict = {get_entity_value_as_string(tag[\"Key\"]): get_entity_value_as_string(tag[\"Value\"]) for tag in tags}\n+ if isinstance(tags, list):\n+ tag_dict = {\n+ get_entity_value_as_string(tag[\"Key\"]): get_entity_value_as_string(tag[\"Value\"])\n+ for tag in tags\n+ if all(field in tag for field in TAG_FIELD_NAMES)\n+ }\n return tag_dict\n elif isinstance(tags, dict):\n tag_dict = {\n", "issue": "Error validando funciones en los tags\n**Describe the issue**\r\nWe are trying to execute checkov in a template which makes use of a function (IF) in the tags:\r\n\r\n - 'Fn::If':\r\n - cPdnEnv\r\n -\r\n Key: \"Schedule\"\r\n Value: \"VISOR-02\"\r\n - !Ref \"AWS::NoValue\" \r\n\r\nbut it throws the following error message:\r\n\r\n`Failed to parse tags for entity {'rEC2Instance': ...`\r\n\r\nWe updated to the latest version of checkov but the error continues\r\n\r\nAccording to the AWS [documentation](https://docs.aws.amazon.com/es_es/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-conditions.html#intrinsic-function-reference-conditions-if), this IF function can be used on tags.\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport logging\nimport os\nfrom typing import Optional, List, Tuple, Dict, Any, Union\n\nimport dpath.util\n\nfrom checkov.cloudformation.checks.resource.base_registry import Registry\nfrom checkov.cloudformation.checks.resource.registry import cfn_registry\nfrom checkov.cloudformation.context_parser import ContextParser, ENDLINE, STARTLINE\nfrom checkov.cloudformation.parser import parse, TemplateSections\nfrom checkov.common.parallelizer.parallel_runner import parallel_runner\nfrom checkov.common.parsers.node import DictNode, ListNode, StrNode\nfrom checkov.common.runners.base_runner import filter_ignored_paths\nfrom checkov.runner_filter import RunnerFilter\nfrom checkov.common.models.consts import YAML_COMMENT_MARK\n\nCF_POSSIBLE_ENDINGS = frozenset([\".yml\", \".yaml\", \".json\", \".template\"])\n\n\ndef get_resource_tags(entity: Dict[StrNode, DictNode], registry: Registry = cfn_registry) -> Optional[Dict[str, str]]:\n entity_details = registry.extract_entity_details(entity)\n\n if not entity_details:\n return None\n\n entity_config = entity_details[-1]\n\n if not isinstance(entity_config, dict):\n return None\n\n try:\n properties = entity_config.get(\"Properties\")\n if properties:\n tags = properties.get(\"Tags\")\n if tags:\n return parse_entity_tags(tags)\n except Exception:\n logging.warning(f\"Failed to parse tags for entity {entity}\")\n\n return None\n\n\ndef parse_entity_tags(tags: Union[ListNode, Dict[str, Any]]) -> Optional[Dict[str, str]]:\n if isinstance(tags, ListNode):\n tag_dict = {get_entity_value_as_string(tag[\"Key\"]): get_entity_value_as_string(tag[\"Value\"]) for tag in tags}\n return tag_dict\n elif isinstance(tags, dict):\n tag_dict = {\n get_entity_value_as_string(key): get_entity_value_as_string(value)\n for key, value in tags.items()\n if key not in (STARTLINE, ENDLINE)\n }\n return tag_dict\n return None\n\n\ndef get_entity_value_as_string(value: Any) -> str:\n \"\"\"\n Handles different type of entities with possible CFN function substitutions. Returns the simplest possible string value\n (without performing any function calls).\n\n Examples:\n Key: Value # returns simple string\n\n Key: !Ref ${AWS::AccountId}-data # returns ${AWS::AccountId}-data\n\n Key:\n - ${account}-data\n - account: !Ref ${AWS::AccountId}\n\n # returns ${account}-data\n\n :param value:\n :return:\n \"\"\"\n if isinstance(value, dict):\n (function, value) = next(iter(value.items()))\n # If the value is a long-form function, then the first element is the template string (technically str_node)\n # Otherwise the dict value is the template string\n if isinstance(value, list):\n if \"Join\" in function:\n # Join looks like !Join [, [V1, V2, V3]]\n join_str = str(value[0])\n return join_str.join([str(v) for v in value[1]])\n else:\n return str(value[0])\n else:\n return str(value)\n else:\n return str(value)\n\n\ndef get_folder_definitions(\n root_folder: str, excluded_paths: list[str] | None, out_parsing_errors: dict[str, str] | None = None\n) -> tuple[dict[str, DictNode], dict[str, list[tuple[int, str]]]]:\n out_parsing_errors = {} if out_parsing_errors is None else out_parsing_errors\n files_list = []\n for root, d_names, f_names in os.walk(root_folder):\n filter_ignored_paths(root, d_names, excluded_paths)\n filter_ignored_paths(root, f_names, excluded_paths)\n for file in f_names:\n file_ending = os.path.splitext(file)[1]\n if file_ending in CF_POSSIBLE_ENDINGS:\n files_list.append(os.path.join(root, file))\n\n definitions, definitions_raw = get_files_definitions(files_list, out_parsing_errors)\n return definitions, definitions_raw\n\n\ndef build_definitions_context(\n definitions: Dict[str, DictNode], definitions_raw: Dict[str, List[Tuple[int, str]]]\n) -> Dict[str, Dict[str, Any]]:\n definitions_context: Dict[str, Dict[str, Any]] = {}\n # iterate on the files\n for file_path, file_path_definitions in definitions.items():\n # iterate on the definitions (Parameters, Resources, Outputs...)\n for file_path_definition, definition in file_path_definitions.items():\n if (\n isinstance(file_path_definition, StrNode)\n and file_path_definition.upper() in TemplateSections.__members__\n and isinstance(definition, DictNode)\n ):\n # iterate on the actual objects of each definition\n for attribute, attr_value in definition.items():\n if isinstance(attr_value, DictNode):\n start_line = attr_value.start_mark.line\n end_line = attr_value.end_mark.line\n # fix lines number for yaml and json files\n first_line_index = 0\n while not str.strip(definitions_raw[file_path][first_line_index][1]):\n first_line_index += 1\n # check if the file is a json file\n if str.strip(definitions_raw[file_path][first_line_index][1])[0] == \"{\":\n start_line += 1\n end_line += 1\n else:\n # add resource comments to definition lines\n current_line = str.strip(definitions_raw[file_path][start_line - 1][1])\n while not current_line or current_line[0] == YAML_COMMENT_MARK:\n start_line -= 1\n current_line = str.strip(definitions_raw[file_path][start_line - 1][1])\n\n # remove next resource comments from definition lines\n current_line = str.strip(definitions_raw[file_path][end_line - 1][1])\n while not current_line or current_line[0] == YAML_COMMENT_MARK:\n end_line -= 1\n current_line = str.strip(definitions_raw[file_path][end_line - 1][1])\n\n code_lines = definitions_raw[file_path][start_line - 1: end_line]\n dpath.new(\n definitions_context,\n [file_path, str(file_path_definition), str(attribute)],\n {\"start_line\": start_line, \"end_line\": end_line, \"code_lines\": code_lines},\n )\n if file_path_definition.upper() == TemplateSections.RESOURCES.value.upper():\n skipped_checks = ContextParser.collect_skip_comments(\n entity_code_lines=code_lines,\n resource_config=attr_value,\n )\n dpath.new(\n definitions_context,\n [file_path, str(file_path_definition), str(attribute), \"skipped_checks\"],\n skipped_checks,\n )\n return definitions_context\n\n\ndef create_definitions(\n root_folder: str,\n files: list[str] | None = None,\n runner_filter: RunnerFilter | None = None,\n out_parsing_errors: dict[str, str] | None = None\n) -> tuple[dict[str, DictNode], dict[str, list[tuple[int, str]]]]:\n runner_filter = runner_filter or RunnerFilter()\n out_parsing_errors = {} if out_parsing_errors is None else out_parsing_errors\n definitions: dict[str, DictNode] = {}\n definitions_raw: dict[str, list[tuple[int, str]]] = {}\n if files:\n files_list = [file for file in files if os.path.splitext(file)[1] in CF_POSSIBLE_ENDINGS]\n definitions, definitions_raw = get_files_definitions(files_list, out_parsing_errors)\n\n if root_folder:\n definitions, definitions_raw = get_folder_definitions(root_folder, runner_filter.excluded_paths,\n out_parsing_errors)\n\n return definitions, definitions_raw\n\n\ndef get_files_definitions(files: List[str], out_parsing_errors: Dict[str, str], filepath_fn=None) \\\n -> Tuple[Dict[str, DictNode], Dict[str, List[Tuple[int, str]]]]:\n def _parse_file(file):\n parsing_errors = {}\n result = parse(file, parsing_errors)\n return (file, result), parsing_errors\n\n results = parallel_runner.run_function(_parse_file, files)\n\n definitions = {}\n definitions_raw = {}\n for result, parsing_errors in results:\n out_parsing_errors.update(parsing_errors)\n (file, parse_result) = result\n path = filepath_fn(file) if filepath_fn else file\n try:\n template, template_lines = parse_result\n if isinstance(template, DictNode) and isinstance(template.get(\"Resources\"), DictNode):\n if validate_properties_in_resources_are_dict(template):\n definitions[path] = template\n definitions_raw[path] = template_lines\n else:\n out_parsing_errors.update({file: 'Resource Properties is not a dictionary'})\n else:\n if parsing_errors:\n logging.debug(f'File {file} had the following parsing errors: {parsing_errors}')\n logging.debug(f\"Parsed file {file} incorrectly {template}\")\n except (TypeError, ValueError):\n logging.warning(f\"CloudFormation skipping {file} as it is not a valid CF template\")\n continue\n\n return definitions, definitions_raw\n\n\ndef validate_properties_in_resources_are_dict(template: DictNode) -> bool:\n template_resources = template.get(\"Resources\")\n for resource_name, resource in template_resources.items():\n if 'Properties' in resource and not isinstance(resource['Properties'], DictNode) or \".\" in resource_name:\n return False\n return True\n", "path": "checkov/cloudformation/cfn_utils.py"}]}
| 3,339 | 285 |
gh_patches_debug_17721
|
rasdani/github-patches
|
git_diff
|
yt-dlp__yt-dlp-2776
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
"Unable to extract OpenGraph" in FC2 Video
### Checklist
- [X] I'm reporting a broken site
- [X] I've verified that I'm running yt-dlp version **2022.02.04**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update))
- [X] I've checked that all provided URLs are alive and playable in a browser
- [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/ytdl-org/youtube-dl#video-url-contains-an-ampersand-and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)
- [X] I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues including closed ones. DO NOT post duplicates
- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)
- [X] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required
### Region
Japan
### Description
I get "Unable to extract OpenGraph" error whenever I try to download video from fc2.
Although it is a 24-minute video, the downloaded video is only 24KB.
I saw #2566 but it isn't solve.
Best regards.
### Verbose log
```shell
[debug] Command-line config: ['-vU', 'https://video.fc2.com/content/20220213MSqJytgT']
[debug] Encodings: locale UTF-8, fs utf-8, out utf-8, err utf-8, pref UTF-8
[debug] yt-dlp version 2022.02.04 [c1653e9ef] (zip)
[debug] Python version 3.8.10 (CPython 64bit) - Linux-5.4.0-94-generic-x86_64-with-glibc2.29
[debug] exe versions: none
[debug] Optional libraries: secretstorage, sqlite
[debug] Proxy map: {}
Latest version: 2022.02.04, Current version: 2022.02.04
yt-dlp is up to date (2022.02.04)
[debug] [fc2] Extracting URL: https://video.fc2.com/content/20220213MSqJytgT
[fc2] 20220213MSqJytgT: Downloading webpage
WARNING: [fc2] unable to extract OpenGraph description; please report this issue on https://github.com/yt-dlp/yt-dlp , filling out the "Broken site" issue template properly. Confirm you are on the latest version using -U
[fc2] 20220213MSqJytgT: Downloading info page
[debug] Default format spec: best/bestvideo+bestaudio
[info] 20220213MSqJytgT: Downloading 1 format(s): 0
[debug] Invoking downloader on "https://video.fc2.com/api/v3/videoplay/FGSFRL7G77YLaRCSvFvsjLRa7uFRYjSSOb2psTf/2?signature=92W9Q$3S_TLED-9427TY8JLOJYNDI$.F2YBGY5ZXL-&t=1644854861"
[download] Destination: やくならマグカップも 二番窯#1 [20220213MSqJytgT].mp4
[download] 100% of 24.32KiB in 00:00
```
</issue>
<code>
[start of yt_dlp/extractor/fc2.py]
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 from .common import InfoExtractor
5 from ..compat import (
6 compat_parse_qs,
7 )
8 from ..utils import (
9 ExtractorError,
10 sanitized_Request,
11 traverse_obj,
12 urlencode_postdata,
13 urljoin,
14 )
15
16
17 class FC2IE(InfoExtractor):
18 _VALID_URL = r'^(?:https?://video\.fc2\.com/(?:[^/]+/)*content/|fc2:)(?P<id>[^/]+)'
19 IE_NAME = 'fc2'
20 _NETRC_MACHINE = 'fc2'
21 _TESTS = [{
22 'url': 'http://video.fc2.com/en/content/20121103kUan1KHs',
23 'md5': 'a6ebe8ebe0396518689d963774a54eb7',
24 'info_dict': {
25 'id': '20121103kUan1KHs',
26 'ext': 'flv',
27 'title': 'Boxing again with Puff',
28 },
29 }, {
30 'url': 'http://video.fc2.com/en/content/20150125cEva0hDn/',
31 'info_dict': {
32 'id': '20150125cEva0hDn',
33 'ext': 'mp4',
34 },
35 'params': {
36 'username': '[email protected]',
37 'password': '(snip)',
38 },
39 'skip': 'requires actual password',
40 }, {
41 'url': 'http://video.fc2.com/en/a/content/20130926eZpARwsF',
42 'only_matching': True,
43 }]
44
45 def _login(self):
46 username, password = self._get_login_info()
47 if username is None or password is None:
48 return False
49
50 # Log in
51 login_form_strs = {
52 'email': username,
53 'password': password,
54 'done': 'video',
55 'Submit': ' Login ',
56 }
57
58 login_data = urlencode_postdata(login_form_strs)
59 request = sanitized_Request(
60 'https://secure.id.fc2.com/index.php?mode=login&switch_language=en', login_data)
61
62 login_results = self._download_webpage(request, None, note='Logging in', errnote='Unable to log in')
63 if 'mode=redirect&login=done' not in login_results:
64 self.report_warning('unable to log in: bad username or password')
65 return False
66
67 # this is also needed
68 login_redir = sanitized_Request('http://id.fc2.com/?mode=redirect&login=done')
69 self._download_webpage(
70 login_redir, None, note='Login redirect', errnote='Login redirect failed')
71
72 return True
73
74 def _real_extract(self, url):
75 video_id = self._match_id(url)
76 self._login()
77 webpage = None
78 if not url.startswith('fc2:'):
79 webpage = self._download_webpage(url, video_id)
80 self._downloader.cookiejar.clear_session_cookies() # must clear
81 self._login()
82
83 title, thumbnail, description = None, None, None
84 if webpage is not None:
85 title = self._html_search_regex(
86 (r'<h2\s+class="videoCnt_title">([^<]+?)</h2>',
87 r'\s+href="[^"]+"\s*title="([^"]+?)"\s*rel="nofollow">\s*<img',
88 # there's two matches in the webpage
89 r'\s+href="[^"]+"\s*title="([^"]+?)"\s*rel="nofollow">\s*\1'),
90 webpage,
91 'title', fatal=False)
92 thumbnail = self._og_search_thumbnail(webpage)
93 description = self._og_search_description(webpage)
94
95 vidplaylist = self._download_json(
96 'https://video.fc2.com/api/v3/videoplaylist/%s?sh=1&fs=0' % video_id, video_id,
97 note='Downloading info page')
98 vid_url = traverse_obj(vidplaylist, ('playlist', 'nq'))
99 if not vid_url:
100 raise ExtractorError('Unable to extract video URL')
101 vid_url = urljoin('https://video.fc2.com/', vid_url)
102
103 return {
104 'id': video_id,
105 'title': title,
106 'url': vid_url,
107 'ext': 'mp4',
108 'description': description,
109 'thumbnail': thumbnail,
110 }
111
112
113 class FC2EmbedIE(InfoExtractor):
114 _VALID_URL = r'https?://video\.fc2\.com/flv2\.swf\?(?P<query>.+)'
115 IE_NAME = 'fc2:embed'
116
117 _TEST = {
118 'url': 'http://video.fc2.com/flv2.swf?t=201404182936758512407645&i=20130316kwishtfitaknmcgd76kjd864hso93htfjcnaogz629mcgfs6rbfk0hsycma7shkf85937cbchfygd74&i=201403223kCqB3Ez&d=2625&sj=11&lang=ja&rel=1&from=11&cmt=1&tk=TlRBM09EQTNNekU9&tl=プリズン・ブレイク%20S1-01%20マイケル%20【吹替】',
119 'md5': 'b8aae5334cb691bdb1193a88a6ab5d5a',
120 'info_dict': {
121 'id': '201403223kCqB3Ez',
122 'ext': 'flv',
123 'title': 'プリズン・ブレイク S1-01 マイケル 【吹替】',
124 'thumbnail': r're:^https?://.*\.jpg$',
125 },
126 }
127
128 def _real_extract(self, url):
129 mobj = self._match_valid_url(url)
130 query = compat_parse_qs(mobj.group('query'))
131
132 video_id = query['i'][-1]
133 title = query.get('tl', ['FC2 video %s' % video_id])[0]
134
135 sj = query.get('sj', [None])[0]
136 thumbnail = None
137 if sj:
138 # See thumbnailImagePath() in ServerConst.as of flv2.swf
139 thumbnail = 'http://video%s-thumbnail.fc2.com/up/pic/%s.jpg' % (
140 sj, '/'.join((video_id[:6], video_id[6:8], video_id[-2], video_id[-1], video_id)))
141
142 return {
143 '_type': 'url_transparent',
144 'ie_key': FC2IE.ie_key(),
145 'url': 'fc2:%s' % video_id,
146 'title': title,
147 'thumbnail': thumbnail,
148 }
149
[end of yt_dlp/extractor/fc2.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/yt_dlp/extractor/fc2.py b/yt_dlp/extractor/fc2.py
--- a/yt_dlp/extractor/fc2.py
+++ b/yt_dlp/extractor/fc2.py
@@ -90,7 +90,7 @@
webpage,
'title', fatal=False)
thumbnail = self._og_search_thumbnail(webpage)
- description = self._og_search_description(webpage)
+ description = self._og_search_description(webpage, default=None)
vidplaylist = self._download_json(
'https://video.fc2.com/api/v3/videoplaylist/%s?sh=1&fs=0' % video_id, video_id,
@@ -105,6 +105,7 @@
'title': title,
'url': vid_url,
'ext': 'mp4',
+ 'protocol': 'm3u8_native',
'description': description,
'thumbnail': thumbnail,
}
|
{"golden_diff": "diff --git a/yt_dlp/extractor/fc2.py b/yt_dlp/extractor/fc2.py\n--- a/yt_dlp/extractor/fc2.py\n+++ b/yt_dlp/extractor/fc2.py\n@@ -90,7 +90,7 @@\n webpage,\n 'title', fatal=False)\n thumbnail = self._og_search_thumbnail(webpage)\n- description = self._og_search_description(webpage)\n+ description = self._og_search_description(webpage, default=None)\n \n vidplaylist = self._download_json(\n 'https://video.fc2.com/api/v3/videoplaylist/%s?sh=1&fs=0' % video_id, video_id,\n@@ -105,6 +105,7 @@\n 'title': title,\n 'url': vid_url,\n 'ext': 'mp4',\n+ 'protocol': 'm3u8_native',\n 'description': description,\n 'thumbnail': thumbnail,\n }\n", "issue": "\"Unable to extract OpenGraph\" in FC2 Video\n### Checklist\n\n- [X] I'm reporting a broken site\n- [X] I've verified that I'm running yt-dlp version **2022.02.04**. ([update instructions](https://github.com/yt-dlp/yt-dlp#update))\n- [X] I've checked that all provided URLs are alive and playable in a browser\n- [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/ytdl-org/youtube-dl#video-url-contains-an-ampersand-and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)\n- [X] I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues including closed ones. DO NOT post duplicates\n- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)\n- [X] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required\n\n### Region\n\nJapan\n\n### Description\n\nI get \"Unable to extract OpenGraph\" error whenever I try to download video from fc2.\r\nAlthough it is a 24-minute video, the downloaded video is only 24KB.\r\nI saw #2566 but it isn't solve.\r\nBest regards.\n\n### Verbose log\n\n```shell\n[debug] Command-line config: ['-vU', 'https://video.fc2.com/content/20220213MSqJytgT']\r\n[debug] Encodings: locale UTF-8, fs utf-8, out utf-8, err utf-8, pref UTF-8\r\n[debug] yt-dlp version 2022.02.04 [c1653e9ef] (zip)\r\n[debug] Python version 3.8.10 (CPython 64bit) - Linux-5.4.0-94-generic-x86_64-with-glibc2.29\r\n[debug] exe versions: none\r\n[debug] Optional libraries: secretstorage, sqlite\r\n[debug] Proxy map: {}\r\nLatest version: 2022.02.04, Current version: 2022.02.04\r\nyt-dlp is up to date (2022.02.04)\r\n[debug] [fc2] Extracting URL: https://video.fc2.com/content/20220213MSqJytgT\r\n[fc2] 20220213MSqJytgT: Downloading webpage\r\nWARNING: [fc2] unable to extract OpenGraph description; please report this issue on https://github.com/yt-dlp/yt-dlp , filling out the \"Broken site\" issue template properly. Confirm you are on the latest version using -U\r\n[fc2] 20220213MSqJytgT: Downloading info page\r\n[debug] Default format spec: best/bestvideo+bestaudio\r\n[info] 20220213MSqJytgT: Downloading 1 format(s): 0\r\n[debug] Invoking downloader on \"https://video.fc2.com/api/v3/videoplay/FGSFRL7G77YLaRCSvFvsjLRa7uFRYjSSOb2psTf/2?signature=92W9Q$3S_TLED-9427TY8JLOJYNDI$.F2YBGY5ZXL-&t=1644854861\"\r\n[download] Destination: \u3084\u304f\u306a\u3089\u30de\u30b0\u30ab\u30c3\u30d7\u3082 \u4e8c\u756a\u7aaf#1 [20220213MSqJytgT].mp4\r\n[download] 100% of 24.32KiB in 00:00\n```\n\n", "before_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nfrom .common import InfoExtractor\nfrom ..compat import (\n compat_parse_qs,\n)\nfrom ..utils import (\n ExtractorError,\n sanitized_Request,\n traverse_obj,\n urlencode_postdata,\n urljoin,\n)\n\n\nclass FC2IE(InfoExtractor):\n _VALID_URL = r'^(?:https?://video\\.fc2\\.com/(?:[^/]+/)*content/|fc2:)(?P<id>[^/]+)'\n IE_NAME = 'fc2'\n _NETRC_MACHINE = 'fc2'\n _TESTS = [{\n 'url': 'http://video.fc2.com/en/content/20121103kUan1KHs',\n 'md5': 'a6ebe8ebe0396518689d963774a54eb7',\n 'info_dict': {\n 'id': '20121103kUan1KHs',\n 'ext': 'flv',\n 'title': 'Boxing again with Puff',\n },\n }, {\n 'url': 'http://video.fc2.com/en/content/20150125cEva0hDn/',\n 'info_dict': {\n 'id': '20150125cEva0hDn',\n 'ext': 'mp4',\n },\n 'params': {\n 'username': '[email protected]',\n 'password': '(snip)',\n },\n 'skip': 'requires actual password',\n }, {\n 'url': 'http://video.fc2.com/en/a/content/20130926eZpARwsF',\n 'only_matching': True,\n }]\n\n def _login(self):\n username, password = self._get_login_info()\n if username is None or password is None:\n return False\n\n # Log in\n login_form_strs = {\n 'email': username,\n 'password': password,\n 'done': 'video',\n 'Submit': ' Login ',\n }\n\n login_data = urlencode_postdata(login_form_strs)\n request = sanitized_Request(\n 'https://secure.id.fc2.com/index.php?mode=login&switch_language=en', login_data)\n\n login_results = self._download_webpage(request, None, note='Logging in', errnote='Unable to log in')\n if 'mode=redirect&login=done' not in login_results:\n self.report_warning('unable to log in: bad username or password')\n return False\n\n # this is also needed\n login_redir = sanitized_Request('http://id.fc2.com/?mode=redirect&login=done')\n self._download_webpage(\n login_redir, None, note='Login redirect', errnote='Login redirect failed')\n\n return True\n\n def _real_extract(self, url):\n video_id = self._match_id(url)\n self._login()\n webpage = None\n if not url.startswith('fc2:'):\n webpage = self._download_webpage(url, video_id)\n self._downloader.cookiejar.clear_session_cookies() # must clear\n self._login()\n\n title, thumbnail, description = None, None, None\n if webpage is not None:\n title = self._html_search_regex(\n (r'<h2\\s+class=\"videoCnt_title\">([^<]+?)</h2>',\n r'\\s+href=\"[^\"]+\"\\s*title=\"([^\"]+?)\"\\s*rel=\"nofollow\">\\s*<img',\n # there's two matches in the webpage\n r'\\s+href=\"[^\"]+\"\\s*title=\"([^\"]+?)\"\\s*rel=\"nofollow\">\\s*\\1'),\n webpage,\n 'title', fatal=False)\n thumbnail = self._og_search_thumbnail(webpage)\n description = self._og_search_description(webpage)\n\n vidplaylist = self._download_json(\n 'https://video.fc2.com/api/v3/videoplaylist/%s?sh=1&fs=0' % video_id, video_id,\n note='Downloading info page')\n vid_url = traverse_obj(vidplaylist, ('playlist', 'nq'))\n if not vid_url:\n raise ExtractorError('Unable to extract video URL')\n vid_url = urljoin('https://video.fc2.com/', vid_url)\n\n return {\n 'id': video_id,\n 'title': title,\n 'url': vid_url,\n 'ext': 'mp4',\n 'description': description,\n 'thumbnail': thumbnail,\n }\n\n\nclass FC2EmbedIE(InfoExtractor):\n _VALID_URL = r'https?://video\\.fc2\\.com/flv2\\.swf\\?(?P<query>.+)'\n IE_NAME = 'fc2:embed'\n\n _TEST = {\n 'url': 'http://video.fc2.com/flv2.swf?t=201404182936758512407645&i=20130316kwishtfitaknmcgd76kjd864hso93htfjcnaogz629mcgfs6rbfk0hsycma7shkf85937cbchfygd74&i=201403223kCqB3Ez&d=2625&sj=11&lang=ja&rel=1&from=11&cmt=1&tk=TlRBM09EQTNNekU9&tl=\u30d7\u30ea\u30ba\u30f3\uff65\u30d6\u30ec\u30a4\u30af%20S1-01%20\u30de\u30a4\u30b1\u30eb%20\u3010\u5439\u66ff\u3011',\n 'md5': 'b8aae5334cb691bdb1193a88a6ab5d5a',\n 'info_dict': {\n 'id': '201403223kCqB3Ez',\n 'ext': 'flv',\n 'title': '\u30d7\u30ea\u30ba\u30f3\uff65\u30d6\u30ec\u30a4\u30af S1-01 \u30de\u30a4\u30b1\u30eb \u3010\u5439\u66ff\u3011',\n 'thumbnail': r're:^https?://.*\\.jpg$',\n },\n }\n\n def _real_extract(self, url):\n mobj = self._match_valid_url(url)\n query = compat_parse_qs(mobj.group('query'))\n\n video_id = query['i'][-1]\n title = query.get('tl', ['FC2 video %s' % video_id])[0]\n\n sj = query.get('sj', [None])[0]\n thumbnail = None\n if sj:\n # See thumbnailImagePath() in ServerConst.as of flv2.swf\n thumbnail = 'http://video%s-thumbnail.fc2.com/up/pic/%s.jpg' % (\n sj, '/'.join((video_id[:6], video_id[6:8], video_id[-2], video_id[-1], video_id)))\n\n return {\n '_type': 'url_transparent',\n 'ie_key': FC2IE.ie_key(),\n 'url': 'fc2:%s' % video_id,\n 'title': title,\n 'thumbnail': thumbnail,\n }\n", "path": "yt_dlp/extractor/fc2.py"}]}
| 3,425 | 220 |
gh_patches_debug_12960
|
rasdani/github-patches
|
git_diff
|
pyinstaller__pyinstaller-5231
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Code signing and notarization fails on macOS .app bundles
When using the pyinstaller to create an .app bundle, the installer places a file called base_library.zip into Contents/MacOS. This file is the reason that notarization will fail. As already done with other non executable files, the base_library.zip must be placed into the Contents/Resources/ folder and a link must be made in Contents/MacOS, as described here: https://github.com/pyinstaller/pyinstaller/blob/develop/PyInstaller/building/osx.py#L193
</issue>
<code>
[start of PyInstaller/building/osx.py]
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2005-2020, PyInstaller Development Team.
3 #
4 # Distributed under the terms of the GNU General Public License (version 2
5 # or later) with exception for distributing the bootloader.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #
9 # SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
10 #-----------------------------------------------------------------------------
11
12 import os
13 import plistlib
14 import shutil
15 from ..compat import is_darwin
16 from .api import EXE, COLLECT
17 from .datastruct import Target, TOC, logger
18 from .utils import _check_path_overlap, _rmtree, add_suffix_to_extensions, checkCache
19
20
21
22 class BUNDLE(Target):
23 def __init__(self, *args, **kws):
24 from ..config import CONF
25
26 # BUNDLE only has a sense under Mac OS X, it's a noop on other platforms
27 if not is_darwin:
28 return
29
30 # get a path to a .icns icon for the app bundle.
31 self.icon = kws.get('icon')
32 if not self.icon:
33 # --icon not specified; use the default in the pyinstaller folder
34 self.icon = os.path.join(os.path.dirname(os.path.dirname(__file__)),
35 'bootloader', 'images', 'icon-windowed.icns')
36 else:
37 # user gave an --icon=path. If it is relative, make it
38 # relative to the spec file location.
39 if not os.path.isabs(self.icon):
40 self.icon = os.path.join(CONF['specpath'], self.icon)
41 # ensure icon path is absolute
42 self.icon = os.path.abspath(self.icon)
43
44 Target.__init__(self)
45
46 # .app bundle is created in DISTPATH.
47 self.name = kws.get('name', None)
48 base_name = os.path.basename(self.name)
49 self.name = os.path.join(CONF['distpath'], base_name)
50
51 self.appname = os.path.splitext(base_name)[0]
52 self.version = kws.get("version", "0.0.0")
53 self.toc = TOC()
54 self.strip = False
55 self.upx = False
56 self.console = True
57
58 # .app bundle identifier for Code Signing
59 self.bundle_identifier = kws.get('bundle_identifier')
60 if not self.bundle_identifier:
61 # Fallback to appname.
62 self.bundle_identifier = self.appname
63
64 self.info_plist = kws.get('info_plist', None)
65
66 for arg in args:
67 if isinstance(arg, EXE):
68 self.toc.append((os.path.basename(arg.name), arg.name, arg.typ))
69 self.toc.extend(arg.dependencies)
70 self.strip = arg.strip
71 self.upx = arg.upx
72 self.upx_exclude = arg.upx_exclude
73 self.console = arg.console
74 elif isinstance(arg, TOC):
75 self.toc.extend(arg)
76 # TOC doesn't have a strip or upx attribute, so there is no way for us to
77 # tell which cache we should draw from.
78 elif isinstance(arg, COLLECT):
79 self.toc.extend(arg.toc)
80 self.strip = arg.strip_binaries
81 self.upx = arg.upx_binaries
82 self.upx_exclude = arg.upx_exclude
83 self.console = arg.console
84 else:
85 logger.info("unsupported entry %s", arg.__class__.__name__)
86 # Now, find values for app filepath (name), app name (appname), and name
87 # of the actual executable (exename) from the first EXECUTABLE item in
88 # toc, which might have come from a COLLECT too (not from an EXE).
89 for inm, name, typ in self.toc:
90 if typ == "EXECUTABLE":
91 self.exename = name
92 if self.name is None:
93 self.appname = "Mac%s" % (os.path.splitext(inm)[0],)
94 self.name = os.path.join(CONF['specpath'], self.appname + ".app")
95 else:
96 self.name = os.path.join(CONF['specpath'], self.name)
97 break
98 self.__postinit__()
99
100 _GUTS = (
101 # BUNDLE always builds, just want the toc to be written out
102 ('toc', None),
103 )
104
105 def _check_guts(self, data, last_build):
106 # BUNDLE always needs to be executed, since it will clean the output
107 # directory anyway to make sure there is no existing cruft accumulating
108 return 1
109
110 def assemble(self):
111 if _check_path_overlap(self.name) and os.path.isdir(self.name):
112 _rmtree(self.name)
113 logger.info("Building BUNDLE %s", self.tocbasename)
114
115 # Create a minimal Mac bundle structure
116 os.makedirs(os.path.join(self.name, "Contents", "MacOS"))
117 os.makedirs(os.path.join(self.name, "Contents", "Resources"))
118 os.makedirs(os.path.join(self.name, "Contents", "Frameworks"))
119
120 # Copy icns icon to Resources directory.
121 if os.path.exists(self.icon):
122 shutil.copy(self.icon, os.path.join(self.name, 'Contents', 'Resources'))
123 else:
124 logger.warning("icon not found %s", self.icon)
125
126 # Key/values for a minimal Info.plist file
127 info_plist_dict = {"CFBundleDisplayName": self.appname,
128 "CFBundleName": self.appname,
129
130 # Required by 'codesign' utility.
131 # The value for CFBundleIdentifier is used as the default unique
132 # name of your program for Code Signing purposes.
133 # It even identifies the APP for access to restricted OS X areas
134 # like Keychain.
135 #
136 # The identifier used for signing must be globally unique. The usal
137 # form for this identifier is a hierarchical name in reverse DNS
138 # notation, starting with the toplevel domain, followed by the
139 # company name, followed by the department within the company, and
140 # ending with the product name. Usually in the form:
141 # com.mycompany.department.appname
142 # Cli option --osx-bundle-identifier sets this value.
143 "CFBundleIdentifier": self.bundle_identifier,
144
145 # Fix for #156 - 'MacOS' must be in the name - not sure why
146 "CFBundleExecutable": 'MacOS/%s' % os.path.basename(self.exename),
147 "CFBundleIconFile": os.path.basename(self.icon),
148 "CFBundleInfoDictionaryVersion": "6.0",
149 "CFBundlePackageType": "APPL",
150 "CFBundleShortVersionString": self.version,
151
152 }
153
154 # Setting EXE console=True implies LSBackgroundOnly=True.
155 # But it still can be overwrite by the user.
156 if self.console:
157 info_plist_dict['LSBackgroundOnly'] = True
158
159 # Merge info_plist settings from spec file
160 if isinstance(self.info_plist, dict) and self.info_plist:
161 info_plist_dict.update(self.info_plist)
162
163 plist_filename = os.path.join(self.name, "Contents", "Info.plist")
164 with open(plist_filename, "wb") as plist_fh:
165 plistlib.dump(info_plist_dict, plist_fh)
166
167 links = []
168 toc = add_suffix_to_extensions(self.toc)
169 for inm, fnm, typ in toc:
170 # Copy files from cache. This ensures that are used files with relative
171 # paths to dynamic library dependencies (@executable_path)
172 base_path = inm.split('/', 1)[0]
173 if typ in ('EXTENSION', 'BINARY'):
174 fnm = checkCache(fnm, strip=self.strip, upx=self.upx,
175 upx_exclude=self.upx_exclude, dist_nm=inm)
176 # Add most data files to a list for symlinking later.
177 if typ == 'DATA' and base_path not in ('base_library.zip', 'PySide2', 'PyQt5'):
178 links.append((inm, fnm))
179 else:
180 tofnm = os.path.join(self.name, "Contents", "MacOS", inm)
181 todir = os.path.dirname(tofnm)
182 if not os.path.exists(todir):
183 os.makedirs(todir)
184 if os.path.isdir(fnm):
185 # beacuse shutil.copy2() is the default copy function
186 # for shutil.copytree, this will also copy file metadata
187 shutil.copytree(fnm, tofnm)
188 else:
189 shutil.copy(fnm, tofnm)
190
191 logger.info('moving BUNDLE data files to Resource directory')
192
193 # Mac OS X Code Signing does not work when .app bundle contains
194 # data files in dir ./Contents/MacOS.
195 #
196 # Put all data files in ./Resources and create symlinks in ./MacOS.
197 bin_dir = os.path.join(self.name, 'Contents', 'MacOS')
198 res_dir = os.path.join(self.name, 'Contents', 'Resources')
199 for inm, fnm in links:
200 tofnm = os.path.join(res_dir, inm)
201 todir = os.path.dirname(tofnm)
202 if not os.path.exists(todir):
203 os.makedirs(todir)
204 if os.path.isdir(fnm):
205 # beacuse shutil.copy2() is the default copy function
206 # for shutil.copytree, this will also copy file metadata
207 shutil.copytree(fnm, tofnm)
208 else:
209 shutil.copy(fnm, tofnm)
210 base_path = os.path.split(inm)[0]
211 if base_path:
212 if not os.path.exists(os.path.join(bin_dir, inm)):
213 path = ''
214 for part in iter(base_path.split(os.path.sep)):
215 # Build path from previous path and the next part of the base path
216 path = os.path.join(path, part)
217 try:
218 relative_source_path = os.path.relpath(os.path.join(res_dir, path),
219 os.path.split(os.path.join(bin_dir, path))[0])
220 dest_path = os.path.join(bin_dir, path)
221 os.symlink(relative_source_path, dest_path)
222 break
223 except FileExistsError:
224 pass
225 if not os.path.exists(os.path.join(bin_dir, inm)):
226 relative_source_path = os.path.relpath(os.path.join(res_dir, inm),
227 os.path.split(os.path.join(bin_dir, inm))[0])
228 dest_path = os.path.join(bin_dir, inm)
229 os.symlink(relative_source_path, dest_path)
230 else: # If path is empty, e.g., a top level file, try to just symlink the file
231 os.symlink(os.path.relpath(os.path.join(res_dir, inm),
232 os.path.split(os.path.join(bin_dir, inm))[0]),
233 os.path.join(bin_dir, inm))
234
[end of PyInstaller/building/osx.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/PyInstaller/building/osx.py b/PyInstaller/building/osx.py
--- a/PyInstaller/building/osx.py
+++ b/PyInstaller/building/osx.py
@@ -174,7 +174,7 @@
fnm = checkCache(fnm, strip=self.strip, upx=self.upx,
upx_exclude=self.upx_exclude, dist_nm=inm)
# Add most data files to a list for symlinking later.
- if typ == 'DATA' and base_path not in ('base_library.zip', 'PySide2', 'PyQt5'):
+ if typ == 'DATA' and base_path not in ('PySide2', 'PyQt5'):
links.append((inm, fnm))
else:
tofnm = os.path.join(self.name, "Contents", "MacOS", inm)
|
{"golden_diff": "diff --git a/PyInstaller/building/osx.py b/PyInstaller/building/osx.py\n--- a/PyInstaller/building/osx.py\n+++ b/PyInstaller/building/osx.py\n@@ -174,7 +174,7 @@\n fnm = checkCache(fnm, strip=self.strip, upx=self.upx,\n upx_exclude=self.upx_exclude, dist_nm=inm)\n # Add most data files to a list for symlinking later.\n- if typ == 'DATA' and base_path not in ('base_library.zip', 'PySide2', 'PyQt5'):\n+ if typ == 'DATA' and base_path not in ('PySide2', 'PyQt5'):\n links.append((inm, fnm))\n else:\n tofnm = os.path.join(self.name, \"Contents\", \"MacOS\", inm)\n", "issue": "Code signing and notarization fails on macOS .app bundles\nWhen using the pyinstaller to create an .app bundle, the installer places a file called base_library.zip into Contents/MacOS. This file is the reason that notarization will fail. As already done with other non executable files, the base_library.zip must be placed into the Contents/Resources/ folder and a link must be made in Contents/MacOS, as described here: https://github.com/pyinstaller/pyinstaller/blob/develop/PyInstaller/building/osx.py#L193\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2020, PyInstaller Development Team.\n#\n# Distributed under the terms of the GNU General Public License (version 2\n# or later) with exception for distributing the bootloader.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)\n#-----------------------------------------------------------------------------\n\nimport os\nimport plistlib\nimport shutil\nfrom ..compat import is_darwin\nfrom .api import EXE, COLLECT\nfrom .datastruct import Target, TOC, logger\nfrom .utils import _check_path_overlap, _rmtree, add_suffix_to_extensions, checkCache\n\n\n\nclass BUNDLE(Target):\n def __init__(self, *args, **kws):\n from ..config import CONF\n\n # BUNDLE only has a sense under Mac OS X, it's a noop on other platforms\n if not is_darwin:\n return\n\n # get a path to a .icns icon for the app bundle.\n self.icon = kws.get('icon')\n if not self.icon:\n # --icon not specified; use the default in the pyinstaller folder\n self.icon = os.path.join(os.path.dirname(os.path.dirname(__file__)),\n 'bootloader', 'images', 'icon-windowed.icns')\n else:\n # user gave an --icon=path. If it is relative, make it\n # relative to the spec file location.\n if not os.path.isabs(self.icon):\n self.icon = os.path.join(CONF['specpath'], self.icon)\n # ensure icon path is absolute\n self.icon = os.path.abspath(self.icon)\n\n Target.__init__(self)\n\n # .app bundle is created in DISTPATH.\n self.name = kws.get('name', None)\n base_name = os.path.basename(self.name)\n self.name = os.path.join(CONF['distpath'], base_name)\n\n self.appname = os.path.splitext(base_name)[0]\n self.version = kws.get(\"version\", \"0.0.0\")\n self.toc = TOC()\n self.strip = False\n self.upx = False\n self.console = True\n\n # .app bundle identifier for Code Signing\n self.bundle_identifier = kws.get('bundle_identifier')\n if not self.bundle_identifier:\n # Fallback to appname.\n self.bundle_identifier = self.appname\n\n self.info_plist = kws.get('info_plist', None)\n\n for arg in args:\n if isinstance(arg, EXE):\n self.toc.append((os.path.basename(arg.name), arg.name, arg.typ))\n self.toc.extend(arg.dependencies)\n self.strip = arg.strip\n self.upx = arg.upx\n self.upx_exclude = arg.upx_exclude\n self.console = arg.console\n elif isinstance(arg, TOC):\n self.toc.extend(arg)\n # TOC doesn't have a strip or upx attribute, so there is no way for us to\n # tell which cache we should draw from.\n elif isinstance(arg, COLLECT):\n self.toc.extend(arg.toc)\n self.strip = arg.strip_binaries\n self.upx = arg.upx_binaries\n self.upx_exclude = arg.upx_exclude\n self.console = arg.console\n else:\n logger.info(\"unsupported entry %s\", arg.__class__.__name__)\n # Now, find values for app filepath (name), app name (appname), and name\n # of the actual executable (exename) from the first EXECUTABLE item in\n # toc, which might have come from a COLLECT too (not from an EXE).\n for inm, name, typ in self.toc:\n if typ == \"EXECUTABLE\":\n self.exename = name\n if self.name is None:\n self.appname = \"Mac%s\" % (os.path.splitext(inm)[0],)\n self.name = os.path.join(CONF['specpath'], self.appname + \".app\")\n else:\n self.name = os.path.join(CONF['specpath'], self.name)\n break\n self.__postinit__()\n\n _GUTS = (\n # BUNDLE always builds, just want the toc to be written out\n ('toc', None),\n )\n\n def _check_guts(self, data, last_build):\n # BUNDLE always needs to be executed, since it will clean the output\n # directory anyway to make sure there is no existing cruft accumulating\n return 1\n\n def assemble(self):\n if _check_path_overlap(self.name) and os.path.isdir(self.name):\n _rmtree(self.name)\n logger.info(\"Building BUNDLE %s\", self.tocbasename)\n\n # Create a minimal Mac bundle structure\n os.makedirs(os.path.join(self.name, \"Contents\", \"MacOS\"))\n os.makedirs(os.path.join(self.name, \"Contents\", \"Resources\"))\n os.makedirs(os.path.join(self.name, \"Contents\", \"Frameworks\"))\n\n # Copy icns icon to Resources directory.\n if os.path.exists(self.icon):\n shutil.copy(self.icon, os.path.join(self.name, 'Contents', 'Resources'))\n else:\n logger.warning(\"icon not found %s\", self.icon)\n\n # Key/values for a minimal Info.plist file\n info_plist_dict = {\"CFBundleDisplayName\": self.appname,\n \"CFBundleName\": self.appname,\n\n # Required by 'codesign' utility.\n # The value for CFBundleIdentifier is used as the default unique\n # name of your program for Code Signing purposes.\n # It even identifies the APP for access to restricted OS X areas\n # like Keychain.\n #\n # The identifier used for signing must be globally unique. The usal\n # form for this identifier is a hierarchical name in reverse DNS\n # notation, starting with the toplevel domain, followed by the\n # company name, followed by the department within the company, and\n # ending with the product name. Usually in the form:\n # com.mycompany.department.appname\n # Cli option --osx-bundle-identifier sets this value.\n \"CFBundleIdentifier\": self.bundle_identifier,\n\n # Fix for #156 - 'MacOS' must be in the name - not sure why\n \"CFBundleExecutable\": 'MacOS/%s' % os.path.basename(self.exename),\n \"CFBundleIconFile\": os.path.basename(self.icon),\n \"CFBundleInfoDictionaryVersion\": \"6.0\",\n \"CFBundlePackageType\": \"APPL\",\n \"CFBundleShortVersionString\": self.version,\n\n }\n\n # Setting EXE console=True implies LSBackgroundOnly=True.\n # But it still can be overwrite by the user.\n if self.console:\n info_plist_dict['LSBackgroundOnly'] = True\n\n # Merge info_plist settings from spec file\n if isinstance(self.info_plist, dict) and self.info_plist:\n info_plist_dict.update(self.info_plist)\n\n plist_filename = os.path.join(self.name, \"Contents\", \"Info.plist\")\n with open(plist_filename, \"wb\") as plist_fh:\n plistlib.dump(info_plist_dict, plist_fh)\n\n links = []\n toc = add_suffix_to_extensions(self.toc)\n for inm, fnm, typ in toc:\n # Copy files from cache. This ensures that are used files with relative\n # paths to dynamic library dependencies (@executable_path)\n base_path = inm.split('/', 1)[0]\n if typ in ('EXTENSION', 'BINARY'):\n fnm = checkCache(fnm, strip=self.strip, upx=self.upx,\n upx_exclude=self.upx_exclude, dist_nm=inm)\n # Add most data files to a list for symlinking later.\n if typ == 'DATA' and base_path not in ('base_library.zip', 'PySide2', 'PyQt5'):\n links.append((inm, fnm))\n else:\n tofnm = os.path.join(self.name, \"Contents\", \"MacOS\", inm)\n todir = os.path.dirname(tofnm)\n if not os.path.exists(todir):\n os.makedirs(todir)\n if os.path.isdir(fnm):\n # beacuse shutil.copy2() is the default copy function\n # for shutil.copytree, this will also copy file metadata\n shutil.copytree(fnm, tofnm)\n else:\n shutil.copy(fnm, tofnm)\n\n logger.info('moving BUNDLE data files to Resource directory')\n\n # Mac OS X Code Signing does not work when .app bundle contains\n # data files in dir ./Contents/MacOS.\n #\n # Put all data files in ./Resources and create symlinks in ./MacOS.\n bin_dir = os.path.join(self.name, 'Contents', 'MacOS')\n res_dir = os.path.join(self.name, 'Contents', 'Resources')\n for inm, fnm in links:\n tofnm = os.path.join(res_dir, inm)\n todir = os.path.dirname(tofnm)\n if not os.path.exists(todir):\n os.makedirs(todir)\n if os.path.isdir(fnm):\n # beacuse shutil.copy2() is the default copy function\n # for shutil.copytree, this will also copy file metadata\n shutil.copytree(fnm, tofnm)\n else:\n shutil.copy(fnm, tofnm)\n base_path = os.path.split(inm)[0]\n if base_path:\n if not os.path.exists(os.path.join(bin_dir, inm)):\n path = ''\n for part in iter(base_path.split(os.path.sep)):\n # Build path from previous path and the next part of the base path\n path = os.path.join(path, part)\n try:\n relative_source_path = os.path.relpath(os.path.join(res_dir, path),\n os.path.split(os.path.join(bin_dir, path))[0])\n dest_path = os.path.join(bin_dir, path)\n os.symlink(relative_source_path, dest_path)\n break\n except FileExistsError:\n pass\n if not os.path.exists(os.path.join(bin_dir, inm)):\n relative_source_path = os.path.relpath(os.path.join(res_dir, inm),\n os.path.split(os.path.join(bin_dir, inm))[0])\n dest_path = os.path.join(bin_dir, inm)\n os.symlink(relative_source_path, dest_path)\n else: # If path is empty, e.g., a top level file, try to just symlink the file\n os.symlink(os.path.relpath(os.path.join(res_dir, inm),\n os.path.split(os.path.join(bin_dir, inm))[0]),\n os.path.join(bin_dir, inm))\n", "path": "PyInstaller/building/osx.py"}]}
| 3,593 | 191 |
gh_patches_debug_38853
|
rasdani/github-patches
|
git_diff
|
interlegis__sapl-276
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Criar stubs para PK igual a zero
Durante a migração, registros filhos podem apontar para um registro pai com PK igual a zero. Atualmente esse valor é mudado para None, porém há campos que não podem ser nuláveis. Nestes casos, um stub de registro pai deve ser criado e o valor do campo do registro filho deve apontar para este novo stub.
</issue>
<code>
[start of legacy/migration.py]
1 import re
2
3 import pkg_resources
4 import yaml
5 from django.apps import apps
6 from django.apps.config import AppConfig
7 from django.core.exceptions import ObjectDoesNotExist
8 from django.db import connections, models
9 from django.db.models.base import ModelBase
10 from model_mommy import mommy
11
12 from comissoes.models import Composicao, Participacao
13 from parlamentares.models import Parlamentar
14 from sessao.models import SessaoPlenaria
15
16 # BASE ######################################################################
17
18 # apps to be migrated, in app dependency order (very important)
19 appconfs = [apps.get_app_config(n) for n in [
20 'parlamentares',
21 'comissoes',
22 'materia',
23 'norma',
24 'sessao',
25 'lexml',
26 'protocoloadm', ]]
27
28 stubs_list = []
29
30 name_sets = [set(m.__name__ for m in ac.get_models()) for ac in appconfs]
31
32 # apps do not overlap
33 for s1 in name_sets:
34 for s2 in name_sets:
35 if s1 is not s2:
36 assert not s1.intersection(s2)
37
38 # apps include all legacy models
39 legacy_app = apps.get_app_config('legacy')
40 legacy_model_names = set(m.__name__ for m in legacy_app.get_models())
41
42 model_dict = {m.__name__: m for ac in appconfs for m in ac.get_models()}
43
44
45 # RENAMES ###################################################################
46
47 MODEL_RENAME_PATTERN = re.compile('(.+) \((.+)\)')
48
49
50 def get_renames():
51 field_renames = {}
52 model_renames = {}
53 for app in appconfs:
54 app_rename_data = yaml.load(
55 pkg_resources.resource_string(app.module.__name__, 'legacy.yaml'))
56 for model_name, renames in app_rename_data.items():
57 match = MODEL_RENAME_PATTERN.match(model_name)
58 if match:
59 model_name, old_name = match.groups()
60 else:
61 old_name = None
62 model = getattr(app.models_module, model_name)
63 if old_name:
64 model_renames[model] = old_name
65 field_renames[model] = renames
66
67 # collect renames from parent classes
68 for model, renames in field_renames.items():
69 if any(parent in field_renames for parent in model.__mro__[1:]):
70 renames = {}
71 for parent in reversed(model.__mro__):
72 if parent in field_renames:
73 renames.update(field_renames[parent])
74 field_renames[model] = renames
75
76 # remove abstract classes
77 field_renames = {m: r for m, r in field_renames.items()
78 if not m._meta.abstract}
79
80 return field_renames, model_renames
81
82
83 # MIGRATION #################################################################
84
85 def info(msg):
86 print('INFO: ' + msg)
87
88
89 def warn(msg):
90 print('WARNING! ' + msg)
91
92
93 def get_fk_related(field, value, label=None):
94 if value is not None:
95 try:
96 value = field.related_model.objects.get(id=value)
97 except ObjectDoesNotExist:
98 msg = 'FK [%s] not found for value %s ' \
99 '(in %s %s)' % (
100 field.name, value,
101 field.model.__name__, label or '---')
102 if value == 0:
103 # we interpret FK == 0 as actually FK == NONE
104 value = None
105 warn(msg + ' => using NONE for zero value')
106 else:
107 value = make_stub(field.related_model, value)
108 stubs_list.append((value.id, field))
109 warn(msg + ' => STUB CREATED')
110 else:
111 assert value
112 return value
113
114
115 def get_field(model, fieldname):
116 return model._meta.get_field(fieldname)
117
118
119 def exec_sql(sql, db='default'):
120 cursor = connections[db].cursor()
121 cursor.execute(sql)
122 return cursor
123
124
125 def iter_sql_records(sql, db):
126 class Record:
127 pass
128 cursor = exec_sql(sql, db)
129 fieldnames = [name[0] for name in cursor.description]
130 for row in cursor.fetchall():
131 record = Record()
132 record.__dict__.update(zip(fieldnames, row))
133 yield record
134
135
136 def save_with_id(new, id):
137 sequence_name = '%s_id_seq' % type(new)._meta.db_table
138 cursor = exec_sql('SELECT last_value from %s;' % sequence_name)
139 (last_value,) = cursor.fetchone()
140 if last_value == 1 or id != last_value + 1:
141 # we explicitly set the next id if last_value == 1
142 # because last_value == 1 for a table containing either 0 or 1 records
143 # (we would have trouble for id == 2 and a missing id == 1)
144 exec_sql('ALTER SEQUENCE %s RESTART WITH %s;' % (sequence_name, id))
145 new.save()
146 assert new.id == id, 'New id is different from provided!'
147
148
149 def make_stub(model, id):
150 new = mommy.prepare(model)
151 save_with_id(new, id)
152 return new
153
154
155 class DataMigrator:
156
157 def __init__(self):
158 self.field_renames, self.model_renames = get_renames()
159
160 def populate_renamed_fields(self, new, old):
161 renames = self.field_renames[type(new)]
162
163 for field in new._meta.fields:
164 old_field_name = renames.get(field.name)
165 field_type = field.get_internal_type()
166
167 if old_field_name:
168 old_value = getattr(old, old_field_name)
169 if isinstance(field, models.ForeignKey):
170 old_type = type(old) # not necessarily a model
171 if hasattr(old_type, '_meta') and \
172 old_type._meta.pk.name != 'id':
173 label = old.pk
174 else:
175 label = '-- WITHOUT PK --'
176 value = get_fk_related(field, old_value, label)
177 else:
178 value = getattr(old, old_field_name)
179 if field_type == 'CharField' or field_type == 'TextField':
180 if value is None:
181 warn(
182 "Field %s (%s) from model %s"
183 " => settig empty string '' for %s value" %
184 (field.name, field_type, field.model.__name__,
185 value))
186 value = ''
187 setattr(new, field.name, value)
188
189 def migrate(self, obj=appconfs):
190 # warning: model/app migration order is of utmost importance
191
192 self.to_delete = []
193 info('Starting %s migration...' % obj)
194 self._do_migrate(obj)
195 # exclude logically deleted in legacy base
196 info('Deleting models with ind_excluido...')
197 for obj in self.to_delete:
198 obj.delete()
199 info('Deleting unnecessary stubs...')
200 self.delete_stubs()
201
202 def _do_migrate(self, obj):
203 if isinstance(obj, AppConfig):
204 models_to_migrate = (model for model in obj.models.values()
205 if model in self.field_renames)
206 self._do_migrate(models_to_migrate)
207 elif isinstance(obj, ModelBase):
208 self.migrate_model(obj)
209 elif hasattr(obj, '__iter__'):
210 for item in obj:
211 self._do_migrate(item)
212 else:
213 raise TypeError(
214 'Parameter must be a Model, AppConfig or a sequence of them')
215
216 def migrate_model(self, model):
217 print('Migrating %s...' % model.__name__)
218
219 legacy_model_name = self.model_renames.get(model, model.__name__)
220 legacy_model = legacy_app.get_model(legacy_model_name)
221 legacy_pk_name = legacy_model._meta.pk.name
222
223 # Clear all model entries
224 # They may have been created in a previous migration attempt
225 model.objects.all().delete()
226
227 # setup migration strategy for tables with or without a pk
228 if legacy_pk_name == 'id':
229 # There is no pk in the legacy table
230 def save(new, old):
231 new.save()
232
233 old_records = iter_sql_records(
234 'select * from ' + legacy_model._meta.db_table, 'legacy')
235 else:
236 def save(new, old):
237 save_with_id(new, getattr(old, legacy_pk_name))
238
239 old_records = legacy_model.objects.all().order_by(legacy_pk_name)
240
241 adjust = MIGRATION_ADJUSTMENTS.get(model)
242
243 # convert old records to new ones
244 for old in old_records:
245 new = model()
246 self.populate_renamed_fields(new, old)
247 if adjust:
248 adjust(new, old)
249 save(new, old)
250 if getattr(old, 'ind_excluido', False):
251 self.to_delete.append(new)
252
253 def delete_stubs(self):
254 for line in stubs_list:
255 stub, field = line
256 # Filter all objects in model and delete from related model
257 # if quantity is equal to zero
258 if field.model.objects.filter(**{field.name: stub}).exists():
259 field.related_model.objects.get(**{'id': stub}).delete()
260
261
262 def migrate(obj=appconfs):
263 dm = DataMigrator()
264 dm.migrate(obj)
265
266
267 # MIGRATION_ADJUSTMENTS #####################################################
268
269 def adjust_participacao(new_participacao, old):
270 composicao = Composicao()
271 composicao.comissao, composicao.periodo = [
272 get_fk_related(Composicao._meta.get_field(name), value)
273 for name, value in (('comissao', old.cod_comissao),
274 ('periodo', old.cod_periodo_comp))]
275 # check if there is already an "equal" one in the db
276 already_created = Composicao.objects.filter(
277 comissao=composicao.comissao, periodo=composicao.periodo)
278 if already_created:
279 assert len(already_created) == 1 # we must never have made 2 copies
280 [composicao] = already_created
281 else:
282 composicao.save()
283 new_participacao.composicao = composicao
284
285
286 def adjust_parlamentar(new_parlamentar, old):
287 value = new_parlamentar.unidade_deliberativa
288 # Field is defined as not null in legacy db,
289 # but data includes null values
290 # => transform None to False
291 if value is None:
292 warn('null converted to False')
293 new_parlamentar.unidade_deliberativa = False
294
295
296 def adjust_sessaoplenaria(new, old):
297 assert not old.tip_expediente
298
299
300 MIGRATION_ADJUSTMENTS = {
301 Participacao: adjust_participacao,
302 Parlamentar: adjust_parlamentar,
303 SessaoPlenaria: adjust_sessaoplenaria,
304 }
305
306 # CHECKS ####################################################################
307
308
309 def get_ind_excluido(obj):
310 legacy_model = legacy_app.get_model(type(obj).__name__)
311 return getattr(legacy_model.objects.get(
312 **{legacy_model._meta.pk.name: obj.id}), 'ind_excluido', False)
313
314
315 def check_app_no_ind_excluido(app):
316 for model in app.models.values():
317 assert not any(get_ind_excluido(obj) for obj in model.objects.all())
318 print('OK!')
319
[end of legacy/migration.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/legacy/migration.py b/legacy/migration.py
--- a/legacy/migration.py
+++ b/legacy/migration.py
@@ -6,6 +6,7 @@
from django.apps.config import AppConfig
from django.core.exceptions import ObjectDoesNotExist
from django.db import connections, models
+from django.db.models import CharField, TextField
from django.db.models.base import ModelBase
from model_mommy import mommy
@@ -87,26 +88,41 @@
def warn(msg):
- print('WARNING! ' + msg)
+ print('CUIDADO! ' + msg)
def get_fk_related(field, value, label=None):
+ fields_dict = {}
+
+ if value is None and field.null is False:
+ value = 0
if value is not None:
try:
value = field.related_model.objects.get(id=value)
except ObjectDoesNotExist:
- msg = 'FK [%s] not found for value %s ' \
- '(in %s %s)' % (
+ msg = 'FK [%s] não encontrada para valor %s ' \
+ '(em %s %s)' % (
field.name, value,
field.model.__name__, label or '---')
if value == 0:
- # we interpret FK == 0 as actually FK == NONE
- value = None
- warn(msg + ' => using NONE for zero value')
+ # se FK == 0, criamos um stub e colocamos o valor '????????
+ # para qualquer CharField ou TextField que possa haver
+ if not field.null:
+ all_fields = field.related_model._meta.get_fields()
+ fields_dict = {f.name: '????????????'[:f.max_length]
+ for f in all_fields
+ if isinstance(f, (CharField, TextField)) and
+ not f.choices and not f.blank}
+ value = mommy.make(field.related_model,
+ **fields_dict)
+ warn(msg + ' => STUB criada para campos não nuláveis!')
+ else:
+ value = None
+ warn(msg + ' => usando None para valores iguais a zero!')
else:
value = make_stub(field.related_model, value)
stubs_list.append((value.id, field))
- warn(msg + ' => STUB CREATED')
+ warn(msg + ' => STUB criada!')
else:
assert value
return value
@@ -163,7 +179,6 @@
for field in new._meta.fields:
old_field_name = renames.get(field.name)
field_type = field.get_internal_type()
-
if old_field_name:
old_value = getattr(old, old_field_name)
if isinstance(field, models.ForeignKey):
|
{"golden_diff": "diff --git a/legacy/migration.py b/legacy/migration.py\n--- a/legacy/migration.py\n+++ b/legacy/migration.py\n@@ -6,6 +6,7 @@\n from django.apps.config import AppConfig\n from django.core.exceptions import ObjectDoesNotExist\n from django.db import connections, models\n+from django.db.models import CharField, TextField\n from django.db.models.base import ModelBase\n from model_mommy import mommy\n \n@@ -87,26 +88,41 @@\n \n \n def warn(msg):\n- print('WARNING! ' + msg)\n+ print('CUIDADO! ' + msg)\n \n \n def get_fk_related(field, value, label=None):\n+ fields_dict = {}\n+\n+ if value is None and field.null is False:\n+ value = 0\n if value is not None:\n try:\n value = field.related_model.objects.get(id=value)\n except ObjectDoesNotExist:\n- msg = 'FK [%s] not found for value %s ' \\\n- '(in %s %s)' % (\n+ msg = 'FK [%s] n\u00e3o encontrada para valor %s ' \\\n+ '(em %s %s)' % (\n field.name, value,\n field.model.__name__, label or '---')\n if value == 0:\n- # we interpret FK == 0 as actually FK == NONE\n- value = None\n- warn(msg + ' => using NONE for zero value')\n+ # se FK == 0, criamos um stub e colocamos o valor '????????\n+ # para qualquer CharField ou TextField que possa haver\n+ if not field.null:\n+ all_fields = field.related_model._meta.get_fields()\n+ fields_dict = {f.name: '????????????'[:f.max_length]\n+ for f in all_fields\n+ if isinstance(f, (CharField, TextField)) and\n+ not f.choices and not f.blank}\n+ value = mommy.make(field.related_model,\n+ **fields_dict)\n+ warn(msg + ' => STUB criada para campos n\u00e3o nul\u00e1veis!')\n+ else:\n+ value = None\n+ warn(msg + ' => usando None para valores iguais a zero!')\n else:\n value = make_stub(field.related_model, value)\n stubs_list.append((value.id, field))\n- warn(msg + ' => STUB CREATED')\n+ warn(msg + ' => STUB criada!')\n else:\n assert value\n return value\n@@ -163,7 +179,6 @@\n for field in new._meta.fields:\n old_field_name = renames.get(field.name)\n field_type = field.get_internal_type()\n-\n if old_field_name:\n old_value = getattr(old, old_field_name)\n if isinstance(field, models.ForeignKey):\n", "issue": "Criar stubs para PK igual a zero\nDurante a migra\u00e7\u00e3o, registros filhos podem apontar para um registro pai com PK igual a zero. Atualmente esse valor \u00e9 mudado para None, por\u00e9m h\u00e1 campos que n\u00e3o podem ser nul\u00e1veis. Nestes casos, um stub de registro pai deve ser criado e o valor do campo do registro filho deve apontar para este novo stub.\n\n", "before_files": [{"content": "import re\n\nimport pkg_resources\nimport yaml\nfrom django.apps import apps\nfrom django.apps.config import AppConfig\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db import connections, models\nfrom django.db.models.base import ModelBase\nfrom model_mommy import mommy\n\nfrom comissoes.models import Composicao, Participacao\nfrom parlamentares.models import Parlamentar\nfrom sessao.models import SessaoPlenaria\n\n# BASE ######################################################################\n\n# apps to be migrated, in app dependency order (very important)\nappconfs = [apps.get_app_config(n) for n in [\n 'parlamentares',\n 'comissoes',\n 'materia',\n 'norma',\n 'sessao',\n 'lexml',\n 'protocoloadm', ]]\n\nstubs_list = []\n\nname_sets = [set(m.__name__ for m in ac.get_models()) for ac in appconfs]\n\n# apps do not overlap\nfor s1 in name_sets:\n for s2 in name_sets:\n if s1 is not s2:\n assert not s1.intersection(s2)\n\n# apps include all legacy models\nlegacy_app = apps.get_app_config('legacy')\nlegacy_model_names = set(m.__name__ for m in legacy_app.get_models())\n\nmodel_dict = {m.__name__: m for ac in appconfs for m in ac.get_models()}\n\n\n# RENAMES ###################################################################\n\nMODEL_RENAME_PATTERN = re.compile('(.+) \\((.+)\\)')\n\n\ndef get_renames():\n field_renames = {}\n model_renames = {}\n for app in appconfs:\n app_rename_data = yaml.load(\n pkg_resources.resource_string(app.module.__name__, 'legacy.yaml'))\n for model_name, renames in app_rename_data.items():\n match = MODEL_RENAME_PATTERN.match(model_name)\n if match:\n model_name, old_name = match.groups()\n else:\n old_name = None\n model = getattr(app.models_module, model_name)\n if old_name:\n model_renames[model] = old_name\n field_renames[model] = renames\n\n # collect renames from parent classes\n for model, renames in field_renames.items():\n if any(parent in field_renames for parent in model.__mro__[1:]):\n renames = {}\n for parent in reversed(model.__mro__):\n if parent in field_renames:\n renames.update(field_renames[parent])\n field_renames[model] = renames\n\n # remove abstract classes\n field_renames = {m: r for m, r in field_renames.items()\n if not m._meta.abstract}\n\n return field_renames, model_renames\n\n\n# MIGRATION #################################################################\n\ndef info(msg):\n print('INFO: ' + msg)\n\n\ndef warn(msg):\n print('WARNING! ' + msg)\n\n\ndef get_fk_related(field, value, label=None):\n if value is not None:\n try:\n value = field.related_model.objects.get(id=value)\n except ObjectDoesNotExist:\n msg = 'FK [%s] not found for value %s ' \\\n '(in %s %s)' % (\n field.name, value,\n field.model.__name__, label or '---')\n if value == 0:\n # we interpret FK == 0 as actually FK == NONE\n value = None\n warn(msg + ' => using NONE for zero value')\n else:\n value = make_stub(field.related_model, value)\n stubs_list.append((value.id, field))\n warn(msg + ' => STUB CREATED')\n else:\n assert value\n return value\n\n\ndef get_field(model, fieldname):\n return model._meta.get_field(fieldname)\n\n\ndef exec_sql(sql, db='default'):\n cursor = connections[db].cursor()\n cursor.execute(sql)\n return cursor\n\n\ndef iter_sql_records(sql, db):\n class Record:\n pass\n cursor = exec_sql(sql, db)\n fieldnames = [name[0] for name in cursor.description]\n for row in cursor.fetchall():\n record = Record()\n record.__dict__.update(zip(fieldnames, row))\n yield record\n\n\ndef save_with_id(new, id):\n sequence_name = '%s_id_seq' % type(new)._meta.db_table\n cursor = exec_sql('SELECT last_value from %s;' % sequence_name)\n (last_value,) = cursor.fetchone()\n if last_value == 1 or id != last_value + 1:\n # we explicitly set the next id if last_value == 1\n # because last_value == 1 for a table containing either 0 or 1 records\n # (we would have trouble for id == 2 and a missing id == 1)\n exec_sql('ALTER SEQUENCE %s RESTART WITH %s;' % (sequence_name, id))\n new.save()\n assert new.id == id, 'New id is different from provided!'\n\n\ndef make_stub(model, id):\n new = mommy.prepare(model)\n save_with_id(new, id)\n return new\n\n\nclass DataMigrator:\n\n def __init__(self):\n self.field_renames, self.model_renames = get_renames()\n\n def populate_renamed_fields(self, new, old):\n renames = self.field_renames[type(new)]\n\n for field in new._meta.fields:\n old_field_name = renames.get(field.name)\n field_type = field.get_internal_type()\n\n if old_field_name:\n old_value = getattr(old, old_field_name)\n if isinstance(field, models.ForeignKey):\n old_type = type(old) # not necessarily a model\n if hasattr(old_type, '_meta') and \\\n old_type._meta.pk.name != 'id':\n label = old.pk\n else:\n label = '-- WITHOUT PK --'\n value = get_fk_related(field, old_value, label)\n else:\n value = getattr(old, old_field_name)\n if field_type == 'CharField' or field_type == 'TextField':\n if value is None:\n warn(\n \"Field %s (%s) from model %s\"\n \" => settig empty string '' for %s value\" %\n (field.name, field_type, field.model.__name__,\n value))\n value = ''\n setattr(new, field.name, value)\n\n def migrate(self, obj=appconfs):\n # warning: model/app migration order is of utmost importance\n\n self.to_delete = []\n info('Starting %s migration...' % obj)\n self._do_migrate(obj)\n # exclude logically deleted in legacy base\n info('Deleting models with ind_excluido...')\n for obj in self.to_delete:\n obj.delete()\n info('Deleting unnecessary stubs...')\n self.delete_stubs()\n\n def _do_migrate(self, obj):\n if isinstance(obj, AppConfig):\n models_to_migrate = (model for model in obj.models.values()\n if model in self.field_renames)\n self._do_migrate(models_to_migrate)\n elif isinstance(obj, ModelBase):\n self.migrate_model(obj)\n elif hasattr(obj, '__iter__'):\n for item in obj:\n self._do_migrate(item)\n else:\n raise TypeError(\n 'Parameter must be a Model, AppConfig or a sequence of them')\n\n def migrate_model(self, model):\n print('Migrating %s...' % model.__name__)\n\n legacy_model_name = self.model_renames.get(model, model.__name__)\n legacy_model = legacy_app.get_model(legacy_model_name)\n legacy_pk_name = legacy_model._meta.pk.name\n\n # Clear all model entries\n # They may have been created in a previous migration attempt\n model.objects.all().delete()\n\n # setup migration strategy for tables with or without a pk\n if legacy_pk_name == 'id':\n # There is no pk in the legacy table\n def save(new, old):\n new.save()\n\n old_records = iter_sql_records(\n 'select * from ' + legacy_model._meta.db_table, 'legacy')\n else:\n def save(new, old):\n save_with_id(new, getattr(old, legacy_pk_name))\n\n old_records = legacy_model.objects.all().order_by(legacy_pk_name)\n\n adjust = MIGRATION_ADJUSTMENTS.get(model)\n\n # convert old records to new ones\n for old in old_records:\n new = model()\n self.populate_renamed_fields(new, old)\n if adjust:\n adjust(new, old)\n save(new, old)\n if getattr(old, 'ind_excluido', False):\n self.to_delete.append(new)\n\n def delete_stubs(self):\n for line in stubs_list:\n stub, field = line\n # Filter all objects in model and delete from related model\n # if quantity is equal to zero\n if field.model.objects.filter(**{field.name: stub}).exists():\n field.related_model.objects.get(**{'id': stub}).delete()\n\n\ndef migrate(obj=appconfs):\n dm = DataMigrator()\n dm.migrate(obj)\n\n\n# MIGRATION_ADJUSTMENTS #####################################################\n\ndef adjust_participacao(new_participacao, old):\n composicao = Composicao()\n composicao.comissao, composicao.periodo = [\n get_fk_related(Composicao._meta.get_field(name), value)\n for name, value in (('comissao', old.cod_comissao),\n ('periodo', old.cod_periodo_comp))]\n # check if there is already an \"equal\" one in the db\n already_created = Composicao.objects.filter(\n comissao=composicao.comissao, periodo=composicao.periodo)\n if already_created:\n assert len(already_created) == 1 # we must never have made 2 copies\n [composicao] = already_created\n else:\n composicao.save()\n new_participacao.composicao = composicao\n\n\ndef adjust_parlamentar(new_parlamentar, old):\n value = new_parlamentar.unidade_deliberativa\n # Field is defined as not null in legacy db,\n # but data includes null values\n # => transform None to False\n if value is None:\n warn('null converted to False')\n new_parlamentar.unidade_deliberativa = False\n\n\ndef adjust_sessaoplenaria(new, old):\n assert not old.tip_expediente\n\n\nMIGRATION_ADJUSTMENTS = {\n Participacao: adjust_participacao,\n Parlamentar: adjust_parlamentar,\n SessaoPlenaria: adjust_sessaoplenaria,\n}\n\n# CHECKS ####################################################################\n\n\ndef get_ind_excluido(obj):\n legacy_model = legacy_app.get_model(type(obj).__name__)\n return getattr(legacy_model.objects.get(\n **{legacy_model._meta.pk.name: obj.id}), 'ind_excluido', False)\n\n\ndef check_app_no_ind_excluido(app):\n for model in app.models.values():\n assert not any(get_ind_excluido(obj) for obj in model.objects.all())\n print('OK!')\n", "path": "legacy/migration.py"}]}
| 3,878 | 611 |
gh_patches_debug_14221
|
rasdani/github-patches
|
git_diff
|
mkdocs__mkdocs-1623
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
link of absolute path without domains will display a warning
If I wrote `[link](/api/file.pdf)` in docs, it would show a warning like this:
```
WARNING - Documentation file 'forum.md' contains a link to 'api/file.pdf' which is not found in the documentation files.
```
The link works in the generated docs. The only problem is the warning message.
</issue>
<code>
[start of mkdocs/structure/pages.py]
1 # coding: utf-8
2
3 from __future__ import unicode_literals
4
5 import os
6 import io
7 import datetime
8 import logging
9
10 import markdown
11 from markdown.extensions import Extension
12 from markdown.treeprocessors import Treeprocessor
13 from markdown.util import AMP_SUBSTITUTE
14
15 from mkdocs.structure.toc import get_toc
16 from mkdocs.utils import meta, urlparse, urlunparse, urljoin, get_markdown_title, warning_filter
17
18 log = logging.getLogger(__name__)
19 log.addFilter(warning_filter)
20
21
22 class Page(object):
23 def __init__(self, title, file, config):
24 file.page = self
25 self.file = file
26 self.title = title
27
28 # Navigation attributes
29 self.parent = None
30 self.children = None
31 self.previous_page = None
32 self.next_page = None
33 self.active = False
34
35 self.is_section = False
36 self.is_page = True
37 self.is_link = False
38
39 # Support SOURCE_DATE_EPOCH environment variable for "reproducible" builds.
40 # See https://reproducible-builds.org/specs/source-date-epoch/
41 if 'SOURCE_DATE_EPOCH' in os.environ:
42 self.update_date = datetime.datetime.utcfromtimestamp(
43 int(os.environ['SOURCE_DATE_EPOCH'])
44 ).strftime("%Y-%m-%d")
45 else:
46 self.update_date = datetime.datetime.now().strftime("%Y-%m-%d")
47
48 self._set_canonical_url(config.get('site_url', None))
49 self._set_edit_url(config.get('repo_url', None), config.get('edit_uri', None))
50
51 # Placeholders to be filled in later in the build process.
52 self.markdown = None
53 self.content = None
54 self.toc = []
55 self.meta = {}
56
57 def __eq__(self, other):
58
59 def sub_dict(d):
60 return dict((key, value) for key, value in d.items() if key in ['title', 'file'])
61
62 return (isinstance(other, self.__class__) and sub_dict(self.__dict__) == sub_dict(other.__dict__))
63
64 def __ne__(self, other):
65 return not self.__eq__(other)
66
67 def __repr__(self):
68 title = "'{}'".format(self.title) if (self.title is not None) else '[blank]'
69 return "Page(title={}, url='{}')".format(title, self.abs_url or self.file.url)
70
71 def _indent_print(self, depth=0):
72 return '{}{}'.format(' ' * depth, repr(self))
73
74 def _get_active(self):
75 """ Return active status of page. """
76 return self.__active
77
78 def _set_active(self, value):
79 """ Set active status of page and ancestors. """
80 self.__active = bool(value)
81 if self.parent is not None:
82 self.parent.active = bool(value)
83
84 active = property(_get_active, _set_active)
85
86 @property
87 def is_index(self):
88 return self.file.name == 'index'
89
90 @property
91 def is_top_level(self):
92 return self.parent is None
93
94 @property
95 def is_homepage(self):
96 return self.is_top_level and self.is_index
97
98 @property
99 def url(self):
100 return '' if self.file.url == '.' else self.file.url
101
102 @property
103 def ancestors(self):
104 if self.parent is None:
105 return []
106 return [self.parent] + self.parent.ancestors
107
108 def _set_canonical_url(self, base):
109 if base:
110 if not base.endswith('/'):
111 base += '/'
112 self.canonical_url = urljoin(base, self.url)
113 self.abs_url = urlparse(self.canonical_url).path
114 else:
115 self.canonical_url = None
116 self.abs_url = None
117
118 def _set_edit_url(self, repo_url, edit_uri):
119 if repo_url and edit_uri:
120 src_path = self.file.src_path.replace('\\', '/')
121 self.edit_url = urljoin(repo_url, edit_uri + src_path)
122 else:
123 self.edit_url = None
124
125 def read_source(self, config):
126 source = config['plugins'].run_event('page_read_source', None, config=config, page=self)
127 if source is None:
128 try:
129 with io.open(self.file.abs_src_path, 'r', encoding='utf-8-sig', errors='strict') as f:
130 source = f.read()
131 except IOError:
132 log.error('File not found: {}'.format(self.file.src_path))
133 raise
134 except ValueError:
135 log.error('Encoding error reading file: {}'.format(self.file.src_path))
136 raise
137
138 self.markdown, self.meta = meta.get_data(source)
139 self._set_title()
140
141 def _set_title(self):
142 """
143 Set the title for a Markdown document.
144
145 Check these in order and use the first that returns a valid title:
146 - value provided on init (passed in from config)
147 - value of metadata 'title'
148 - content of the first H1 in Markdown content
149 - convert filename to title
150 """
151 if self.title is not None:
152 return
153
154 if 'title' in self.meta:
155 self.title = self.meta['title']
156 return
157
158 title = get_markdown_title(self.markdown)
159
160 if title is None:
161 if self.is_homepage:
162 title = 'Home'
163 else:
164 title = self.file.name.replace('-', ' ').replace('_', ' ')
165 # Capitalize if the filename was all lowercase, otherwise leave it as-is.
166 if title.lower() == title:
167 title = title.capitalize()
168
169 self.title = title
170
171 def render(self, config, files):
172 """
173 Convert the Markdown source file to HTML as per the config.
174 """
175
176 extensions = [
177 _RelativePathExtension(self.file, files)
178 ] + config['markdown_extensions']
179
180 md = markdown.Markdown(
181 extensions=extensions,
182 extension_configs=config['mdx_configs'] or {}
183 )
184 self.content = md.convert(self.markdown)
185 self.toc = get_toc(getattr(md, 'toc', ''))
186
187
188 class _RelativePathTreeprocessor(Treeprocessor):
189 def __init__(self, file, files):
190 self.file = file
191 self.files = files
192
193 def run(self, root):
194 """
195 Update urls on anchors and images to make them relative
196
197 Iterates through the full document tree looking for specific
198 tags and then makes them relative based on the site navigation
199 """
200 for element in root.iter():
201 if element.tag == 'a':
202 key = 'href'
203 elif element.tag == 'img':
204 key = 'src'
205 else:
206 continue
207
208 url = element.get(key)
209 new_url = self.path_to_url(url)
210 element.set(key, new_url)
211
212 return root
213
214 def path_to_url(self, url):
215 scheme, netloc, path, params, query, fragment = urlparse(url)
216
217 if scheme or netloc or not path or AMP_SUBSTITUTE in url or '.' not in os.path.split(path)[-1]:
218 # Ignore URLs unless they are a relative link to a source file.
219 # AMP_SUBSTITUTE is used internally by Markdown only for email.
220 # No '.' in the last part of a path indicates path does not point to a file.
221 return url
222
223 # Determine the filepath of the target.
224 target_path = os.path.join(os.path.dirname(self.file.src_path), path)
225 target_path = os.path.normpath(target_path).lstrip(os.sep)
226
227 # Validate that the target exists in files collection.
228 if target_path not in self.files:
229 log.warning(
230 "Documentation file '{}' contains a link to '{}' which is not found "
231 "in the documentation files.".format(self.file.src_path, target_path)
232 )
233 return url
234 target_file = self.files.get_file_from_path(target_path)
235 path = target_file.url_relative_to(self.file)
236 components = (scheme, netloc, path, params, query, fragment)
237 return urlunparse(components)
238
239
240 class _RelativePathExtension(Extension):
241 """
242 The Extension class is what we pass to markdown, it then
243 registers the Treeprocessor.
244 """
245
246 def __init__(self, file, files):
247 self.file = file
248 self.files = files
249
250 def extendMarkdown(self, md, md_globals):
251 relpath = _RelativePathTreeprocessor(self.file, self.files)
252 md.treeprocessors.add("relpath", relpath, "_end")
253
[end of mkdocs/structure/pages.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mkdocs/structure/pages.py b/mkdocs/structure/pages.py
--- a/mkdocs/structure/pages.py
+++ b/mkdocs/structure/pages.py
@@ -214,7 +214,8 @@
def path_to_url(self, url):
scheme, netloc, path, params, query, fragment = urlparse(url)
- if scheme or netloc or not path or AMP_SUBSTITUTE in url or '.' not in os.path.split(path)[-1]:
+ if (scheme or netloc or not path or url.startswith('/')
+ or AMP_SUBSTITUTE in url or '.' not in os.path.split(path)[-1]):
# Ignore URLs unless they are a relative link to a source file.
# AMP_SUBSTITUTE is used internally by Markdown only for email.
# No '.' in the last part of a path indicates path does not point to a file.
|
{"golden_diff": "diff --git a/mkdocs/structure/pages.py b/mkdocs/structure/pages.py\n--- a/mkdocs/structure/pages.py\n+++ b/mkdocs/structure/pages.py\n@@ -214,7 +214,8 @@\n def path_to_url(self, url):\n scheme, netloc, path, params, query, fragment = urlparse(url)\n \n- if scheme or netloc or not path or AMP_SUBSTITUTE in url or '.' not in os.path.split(path)[-1]:\n+ if (scheme or netloc or not path or url.startswith('/')\n+ or AMP_SUBSTITUTE in url or '.' not in os.path.split(path)[-1]):\n # Ignore URLs unless they are a relative link to a source file.\n # AMP_SUBSTITUTE is used internally by Markdown only for email.\n # No '.' in the last part of a path indicates path does not point to a file.\n", "issue": "link of absolute path without domains will display a warning\nIf I wrote `[link](/api/file.pdf)` in docs, it would show a warning like this:\r\n\r\n```\r\nWARNING - Documentation file 'forum.md' contains a link to 'api/file.pdf' which is not found in the documentation files.\r\n```\r\n\r\nThe link works in the generated docs. The only problem is the warning message.\n", "before_files": [{"content": "# coding: utf-8\n\nfrom __future__ import unicode_literals\n\nimport os\nimport io\nimport datetime\nimport logging\n\nimport markdown\nfrom markdown.extensions import Extension\nfrom markdown.treeprocessors import Treeprocessor\nfrom markdown.util import AMP_SUBSTITUTE\n\nfrom mkdocs.structure.toc import get_toc\nfrom mkdocs.utils import meta, urlparse, urlunparse, urljoin, get_markdown_title, warning_filter\n\nlog = logging.getLogger(__name__)\nlog.addFilter(warning_filter)\n\n\nclass Page(object):\n def __init__(self, title, file, config):\n file.page = self\n self.file = file\n self.title = title\n\n # Navigation attributes\n self.parent = None\n self.children = None\n self.previous_page = None\n self.next_page = None\n self.active = False\n\n self.is_section = False\n self.is_page = True\n self.is_link = False\n\n # Support SOURCE_DATE_EPOCH environment variable for \"reproducible\" builds.\n # See https://reproducible-builds.org/specs/source-date-epoch/\n if 'SOURCE_DATE_EPOCH' in os.environ:\n self.update_date = datetime.datetime.utcfromtimestamp(\n int(os.environ['SOURCE_DATE_EPOCH'])\n ).strftime(\"%Y-%m-%d\")\n else:\n self.update_date = datetime.datetime.now().strftime(\"%Y-%m-%d\")\n\n self._set_canonical_url(config.get('site_url', None))\n self._set_edit_url(config.get('repo_url', None), config.get('edit_uri', None))\n\n # Placeholders to be filled in later in the build process.\n self.markdown = None\n self.content = None\n self.toc = []\n self.meta = {}\n\n def __eq__(self, other):\n\n def sub_dict(d):\n return dict((key, value) for key, value in d.items() if key in ['title', 'file'])\n\n return (isinstance(other, self.__class__) and sub_dict(self.__dict__) == sub_dict(other.__dict__))\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __repr__(self):\n title = \"'{}'\".format(self.title) if (self.title is not None) else '[blank]'\n return \"Page(title={}, url='{}')\".format(title, self.abs_url or self.file.url)\n\n def _indent_print(self, depth=0):\n return '{}{}'.format(' ' * depth, repr(self))\n\n def _get_active(self):\n \"\"\" Return active status of page. \"\"\"\n return self.__active\n\n def _set_active(self, value):\n \"\"\" Set active status of page and ancestors. \"\"\"\n self.__active = bool(value)\n if self.parent is not None:\n self.parent.active = bool(value)\n\n active = property(_get_active, _set_active)\n\n @property\n def is_index(self):\n return self.file.name == 'index'\n\n @property\n def is_top_level(self):\n return self.parent is None\n\n @property\n def is_homepage(self):\n return self.is_top_level and self.is_index\n\n @property\n def url(self):\n return '' if self.file.url == '.' else self.file.url\n\n @property\n def ancestors(self):\n if self.parent is None:\n return []\n return [self.parent] + self.parent.ancestors\n\n def _set_canonical_url(self, base):\n if base:\n if not base.endswith('/'):\n base += '/'\n self.canonical_url = urljoin(base, self.url)\n self.abs_url = urlparse(self.canonical_url).path\n else:\n self.canonical_url = None\n self.abs_url = None\n\n def _set_edit_url(self, repo_url, edit_uri):\n if repo_url and edit_uri:\n src_path = self.file.src_path.replace('\\\\', '/')\n self.edit_url = urljoin(repo_url, edit_uri + src_path)\n else:\n self.edit_url = None\n\n def read_source(self, config):\n source = config['plugins'].run_event('page_read_source', None, config=config, page=self)\n if source is None:\n try:\n with io.open(self.file.abs_src_path, 'r', encoding='utf-8-sig', errors='strict') as f:\n source = f.read()\n except IOError:\n log.error('File not found: {}'.format(self.file.src_path))\n raise\n except ValueError:\n log.error('Encoding error reading file: {}'.format(self.file.src_path))\n raise\n\n self.markdown, self.meta = meta.get_data(source)\n self._set_title()\n\n def _set_title(self):\n \"\"\"\n Set the title for a Markdown document.\n\n Check these in order and use the first that returns a valid title:\n - value provided on init (passed in from config)\n - value of metadata 'title'\n - content of the first H1 in Markdown content\n - convert filename to title\n \"\"\"\n if self.title is not None:\n return\n\n if 'title' in self.meta:\n self.title = self.meta['title']\n return\n\n title = get_markdown_title(self.markdown)\n\n if title is None:\n if self.is_homepage:\n title = 'Home'\n else:\n title = self.file.name.replace('-', ' ').replace('_', ' ')\n # Capitalize if the filename was all lowercase, otherwise leave it as-is.\n if title.lower() == title:\n title = title.capitalize()\n\n self.title = title\n\n def render(self, config, files):\n \"\"\"\n Convert the Markdown source file to HTML as per the config.\n \"\"\"\n\n extensions = [\n _RelativePathExtension(self.file, files)\n ] + config['markdown_extensions']\n\n md = markdown.Markdown(\n extensions=extensions,\n extension_configs=config['mdx_configs'] or {}\n )\n self.content = md.convert(self.markdown)\n self.toc = get_toc(getattr(md, 'toc', ''))\n\n\nclass _RelativePathTreeprocessor(Treeprocessor):\n def __init__(self, file, files):\n self.file = file\n self.files = files\n\n def run(self, root):\n \"\"\"\n Update urls on anchors and images to make them relative\n\n Iterates through the full document tree looking for specific\n tags and then makes them relative based on the site navigation\n \"\"\"\n for element in root.iter():\n if element.tag == 'a':\n key = 'href'\n elif element.tag == 'img':\n key = 'src'\n else:\n continue\n\n url = element.get(key)\n new_url = self.path_to_url(url)\n element.set(key, new_url)\n\n return root\n\n def path_to_url(self, url):\n scheme, netloc, path, params, query, fragment = urlparse(url)\n\n if scheme or netloc or not path or AMP_SUBSTITUTE in url or '.' not in os.path.split(path)[-1]:\n # Ignore URLs unless they are a relative link to a source file.\n # AMP_SUBSTITUTE is used internally by Markdown only for email.\n # No '.' in the last part of a path indicates path does not point to a file.\n return url\n\n # Determine the filepath of the target.\n target_path = os.path.join(os.path.dirname(self.file.src_path), path)\n target_path = os.path.normpath(target_path).lstrip(os.sep)\n\n # Validate that the target exists in files collection.\n if target_path not in self.files:\n log.warning(\n \"Documentation file '{}' contains a link to '{}' which is not found \"\n \"in the documentation files.\".format(self.file.src_path, target_path)\n )\n return url\n target_file = self.files.get_file_from_path(target_path)\n path = target_file.url_relative_to(self.file)\n components = (scheme, netloc, path, params, query, fragment)\n return urlunparse(components)\n\n\nclass _RelativePathExtension(Extension):\n \"\"\"\n The Extension class is what we pass to markdown, it then\n registers the Treeprocessor.\n \"\"\"\n\n def __init__(self, file, files):\n self.file = file\n self.files = files\n\n def extendMarkdown(self, md, md_globals):\n relpath = _RelativePathTreeprocessor(self.file, self.files)\n md.treeprocessors.add(\"relpath\", relpath, \"_end\")\n", "path": "mkdocs/structure/pages.py"}]}
| 3,118 | 191 |
gh_patches_debug_29538
|
rasdani/github-patches
|
git_diff
|
readthedocs__readthedocs.org-596
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Edit on GitHub link is broken for branches with e.g. slash in the name
If you have docs built from a branch with e.g. slash in the name, like `v1.x/master`, then RTD will "slugify" it and use v1.x-master in URLs. That's OK.
The problem is that RTD use this slugified branch name to construct the "Edit on GitHub" URL as well, which then becomes a broken link.
</issue>
<code>
[start of readthedocs/builds/utils.py]
1 import re
2
3 GH_REGEXS = [
4 re.compile('github.com/(.+)/(.+)(?:\.git){1}'),
5 re.compile('github.com/(.+)/(.+)'),
6 re.compile('github.com:(.+)/(.+).git'),
7 ]
8
9 BB_REGEXS = [
10 re.compile('bitbucket.org/(.+)/(.+)/'),
11 re.compile('bitbucket.org/(.+)/(.+)'),
12 re.compile('bitbucket.org:(.+)/(.+)\.git'),
13 ]
14
15 def get_github_username_repo(version):
16 repo_url = version.project.repo
17 if 'github' in repo_url:
18 for regex in GH_REGEXS:
19 match = regex.search(repo_url)
20 if match:
21 return match.groups()
22 return (None, None)
23
24 def get_bitbucket_username_repo(version):
25 repo_url = version.project.repo
26 if 'bitbucket' in repo_url:
27 for regex in BB_REGEXS:
28 match = regex.search(repo_url)
29 if match:
30 return match.groups()
31 return (None, None)
32
33 def get_vcs_version(version):
34 if version.slug == 'latest':
35 if version.project.default_branch:
36 return version.project.default_branch
37 else:
38 return version.project.vcs_repo().fallback_branch
39 else:
40 return version.slug
41
42 def get_conf_py_path(version):
43 conf_py_path = version.project.conf_file(version.slug)
44 conf_py_path = conf_py_path.replace(
45 version.project.checkout_path(version.slug), '')
46 return conf_py_path.replace('conf.py', '')
47
[end of readthedocs/builds/utils.py]
[start of readthedocs/doc_builder/backends/sphinx.py]
1 import os
2 import shutil
3 import codecs
4 import logging
5 import zipfile
6
7 from django.template import Template, Context
8 from django.contrib.auth.models import SiteProfileNotAvailable
9 from django.core.exceptions import ObjectDoesNotExist
10 from django.conf import settings
11
12 from builds import utils as version_utils
13 from core.utils import copy_to_app_servers, copy_file_to_app_servers
14 from doc_builder.base import BaseBuilder, restoring_chdir
15 from projects.utils import run
16 from tastyapi import apiv2
17
18 log = logging.getLogger(__name__)
19
20
21 RTD_CONF_ADDITIONS = """
22 {% load projects_tags %}
23 #Add RTD Template Path.
24 if 'templates_path' in globals():
25 templates_path.insert(0, '{{ template_path }}')
26 else:
27 templates_path = ['{{ template_path }}', 'templates', '_templates',
28 '.templates']
29
30 # Add RTD Static Path. Add to the end because it overwrites previous files.
31 if 'html_static_path' in globals():
32 html_static_path.append('{{ static_path }}')
33 else:
34 html_static_path = ['_static', '{{ static_path }}']
35
36 # Add RTD Theme Path.
37 if 'html_theme_path' in globals():
38 html_theme_path.append('{{ template_path }}')
39 else:
40 html_theme_path = ['_themes', '{{ template_path }}']
41
42 # Add RTD Theme only if they aren't overriding it already
43 using_rtd_theme = False
44 if 'html_theme' in globals():
45 if html_theme in ['default']:
46 # Allow people to bail with a hack of having an html_style
47 if not 'html_style' in globals():
48 html_theme = 'sphinx_rtd_theme'
49 html_style = None
50 html_theme_options = {}
51 using_rtd_theme = True
52 else:
53 html_theme = 'sphinx_rtd_theme'
54 html_style = None
55 html_theme_options = {}
56 using_rtd_theme = True
57
58 # Force theme on setting
59 if globals().get('RTD_NEW_THEME', False):
60 html_theme = 'sphinx_rtd_theme'
61 html_style = None
62 html_theme_options = {}
63 using_rtd_theme = True
64
65 if globals().get('RTD_OLD_THEME', False):
66 html_style = 'rtd.css'
67 html_theme = 'default'
68
69 #Add project information to the template context.
70 context = {
71 'using_theme': using_rtd_theme,
72 'html_theme': html_theme,
73 'current_version': "{{ current_version }}",
74 'MEDIA_URL': "{{ settings.MEDIA_URL }}",
75 'PRODUCTION_DOMAIN': "{{ settings.PRODUCTION_DOMAIN }}",
76 'versions': [{% for version in versions|sort_version_aware %}
77 ("{{ version.slug }}", "/en/{{ version.slug }}/"),{% endfor %}
78 ],
79 'downloads': [ {% for key, val in downloads.items %}
80 ("{{ key }}", "{{ val }}"),{% endfor %}
81 ],
82 'slug': '{{ project.slug }}',
83 'name': u'{{ project.name }}',
84 'rtd_language': u'{{ project.language }}',
85 'canonical_url': '{{ project.canonical_url }}',
86 'analytics_code': '{{ project.analytics_code }}',
87 'conf_py_path': '{{ conf_py_path }}',
88 'github_user': '{{ github_user }}',
89 'github_repo': '{{ github_repo }}',
90 'github_version': '{{ github_version }}',
91 'display_github': {{ display_github }},
92 'READTHEDOCS': True,
93 'using_theme': (html_theme == "default"),
94 'new_theme': (html_theme == "sphinx_rtd_theme"),
95 }
96 if 'html_context' in globals():
97 html_context.update(context)
98 else:
99 html_context = context
100
101 # Add custom RTD extension
102 if 'extensions' in globals():
103 extensions.append("readthedocs_ext.readthedocs")
104 extensions.append("readthedocs_ext.readthedocshtmldir")
105 else:
106 extensions = ["readthedocs_ext.readthedocs", "readthedocs_ext.readthedocshtmldir"]
107 """
108
109 TEMPLATE_DIR = '%s/readthedocs/templates/sphinx' % settings.SITE_ROOT
110 STATIC_DIR = '%s/_static' % TEMPLATE_DIR
111
112
113 class Builder(BaseBuilder):
114 """
115 The parent for most sphinx builders.
116
117 Also handles the default sphinx output of html.
118 """
119
120 def _whitelisted(self, **kwargs):
121 """Modify the given ``conf.py`` file from a whitelisted user's project.
122 """
123 project = self.version.project
124 #Open file for appending.
125 outfile = codecs.open(project.conf_file(self.version.slug),
126 encoding='utf-8', mode='a')
127 outfile.write("\n")
128 conf_py_path = version_utils.get_conf_py_path(self.version)
129 remote_version = version_utils.get_vcs_version(self.version)
130 github_info = version_utils.get_github_username_repo(self.version)
131 bitbucket_info = version_utils.get_bitbucket_username_repo(self.version)
132 if github_info[0] is None:
133 display_github = False
134 else:
135 display_github = True
136 if bitbucket_info[0] is None:
137 display_bitbucket = False
138 else:
139 display_bitbucket = True
140
141 rtd_ctx = Context({
142 'versions': project.api_versions(),
143 'downloads': self.version.get_downloads(pretty=True),
144 'current_version': self.version.slug,
145 'project': project,
146 'settings': settings,
147 'static_path': STATIC_DIR,
148 'template_path': TEMPLATE_DIR,
149 'conf_py_path': conf_py_path,
150 'downloads': apiv2.version(self.version.pk).downloads.get()['downloads'],
151 # GitHub
152 'github_user': github_info[0],
153 'github_repo': github_info[1],
154 'github_version': remote_version,
155 'display_github': display_github,
156 # BitBucket
157 'bitbucket_user': bitbucket_info[0],
158 'bitbucket_repo': bitbucket_info[1],
159 'bitbucket_version': remote_version,
160 'display_bitbucket': display_bitbucket,
161 })
162 rtd_string = Template(RTD_CONF_ADDITIONS).render(rtd_ctx)
163 outfile.write(rtd_string)
164
165 def clean(self, **kwargs):
166 try:
167 self._whitelisted()
168 except (OSError, SiteProfileNotAvailable, ObjectDoesNotExist):
169 log.error("Conf file not found. Error writing to disk.",
170 exc_info=True)
171 return ('', 'Conf file not found. Error writing to disk.', -1)
172
173 @restoring_chdir
174 def build(self, **kwargs):
175 project = self.version.project
176 os.chdir(project.conf_dir(self.version.slug))
177 force_str = " -E " if self.force else ""
178 if project.use_virtualenv:
179 build_command = "%s %s -b readthedocs -D language=%s . _build/html " % (
180 project.venv_bin(version=self.version.slug,
181 bin='sphinx-build'),
182 force_str,
183 project.language)
184 else:
185 build_command = ("sphinx-build %s -b readthedocs -D language=%s . _build/html"
186 % (force_str, project.language))
187 build_results = run(build_command, shell=True)
188 self._zip_html()
189 if 'no targets are out of date.' in build_results[1]:
190 self._changed = False
191 return build_results
192
193 @restoring_chdir
194 def _zip_html(self, **kwargs):
195 from_path = self.version.project.full_build_path(self.version.slug)
196 to_path = self.version.project.checkout_path(self.version.slug)
197 to_file = os.path.join(to_path, '%s.zip' % self.version.project.slug)
198
199 log.info("Creating zip file from %s" % from_path)
200 # Create a <slug>.zip file containing all files in file_path
201 os.chdir(from_path)
202 archive = zipfile.ZipFile(to_file, 'w')
203 for root, subfolders, files in os.walk('.'):
204 for file in files:
205 to_write = os.path.join(root, file)
206 archive.write(
207 filename=to_write,
208 arcname=os.path.join("%s-%s" % (self.version.project.slug,
209 self.version.slug),
210 to_write)
211 )
212 archive.close()
213 return to_file
214
215 def move(self, **kwargs):
216 project = self.version.project
217 if project.full_build_path(self.version.slug):
218 #Copy the html files.
219 target = project.rtd_build_path(self.version.slug)
220 if "_" in project.slug:
221 new_slug = project.slug.replace('_', '-')
222 new_target = target.replace(project.slug, new_slug)
223 #Only replace 1, so user_builds doesn't get replaced >:x
224 targets = [target, new_target]
225 else:
226 targets = [target]
227 for target in targets:
228 if getattr(settings, "MULTIPLE_APP_SERVERS", None):
229 log.info("Copying docs to remote server.")
230 copy_to_app_servers(
231 project.full_build_path(self.version.slug), target)
232 else:
233 if os.path.exists(target):
234 shutil.rmtree(target)
235 log.info("Copying docs on the local filesystem")
236 shutil.copytree(
237 project.full_build_path(self.version.slug), target)
238
239 #Copy the zip file.
240 to_path = os.path.join(settings.MEDIA_ROOT, 'htmlzip',
241 project.slug, self.version.slug)
242 to_file = os.path.join(to_path, '%s.zip' % project.slug)
243 from_path = project.checkout_path(self.version.slug)
244 from_file = os.path.join(from_path, '%s.zip' % project.slug)
245 if getattr(settings, "MULTIPLE_APP_SERVERS", None):
246 copy_file_to_app_servers(from_file, to_file)
247 else:
248 if not os.path.exists(to_path):
249 os.makedirs(to_path)
250 run('mv -f %s %s' % (from_file, to_file))
251 else:
252 log.warning("Not moving docs, because the build dir is unknown.")
253
[end of readthedocs/doc_builder/backends/sphinx.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/readthedocs/builds/utils.py b/readthedocs/builds/utils.py
--- a/readthedocs/builds/utils.py
+++ b/readthedocs/builds/utils.py
@@ -30,14 +30,22 @@
return match.groups()
return (None, None)
-def get_vcs_version(version):
+def get_vcs_version_slug(version):
+ slug = None
if version.slug == 'latest':
if version.project.default_branch:
- return version.project.default_branch
+ slug = version.project.default_branch
else:
- return version.project.vcs_repo().fallback_branch
+ slug = version.project.vcs_repo().fallback_branch
else:
- return version.slug
+ slug = version.slug
+ # https://github.com/rtfd/readthedocs.org/issues/561
+ # version identifiers with / characters in branch name need to un-slugify
+ # the branch name for remote links to work
+ if slug.replace('-', '/') in version.identifier:
+ slug = slug.replace('-', '/')
+ return slug
+
def get_conf_py_path(version):
conf_py_path = version.project.conf_file(version.slug)
diff --git a/readthedocs/doc_builder/backends/sphinx.py b/readthedocs/doc_builder/backends/sphinx.py
--- a/readthedocs/doc_builder/backends/sphinx.py
+++ b/readthedocs/doc_builder/backends/sphinx.py
@@ -126,7 +126,7 @@
encoding='utf-8', mode='a')
outfile.write("\n")
conf_py_path = version_utils.get_conf_py_path(self.version)
- remote_version = version_utils.get_vcs_version(self.version)
+ remote_version = version_utils.get_vcs_version_slug(self.version)
github_info = version_utils.get_github_username_repo(self.version)
bitbucket_info = version_utils.get_bitbucket_username_repo(self.version)
if github_info[0] is None:
|
{"golden_diff": "diff --git a/readthedocs/builds/utils.py b/readthedocs/builds/utils.py\n--- a/readthedocs/builds/utils.py\n+++ b/readthedocs/builds/utils.py\n@@ -30,14 +30,22 @@\n return match.groups()\n return (None, None)\n \n-def get_vcs_version(version):\n+def get_vcs_version_slug(version):\n+ slug = None\n if version.slug == 'latest':\n if version.project.default_branch:\n- return version.project.default_branch\n+ slug = version.project.default_branch\n else:\n- return version.project.vcs_repo().fallback_branch\n+ slug = version.project.vcs_repo().fallback_branch\n else:\n- return version.slug\n+ slug = version.slug\n+ # https://github.com/rtfd/readthedocs.org/issues/561\n+ # version identifiers with / characters in branch name need to un-slugify\n+ # the branch name for remote links to work\n+ if slug.replace('-', '/') in version.identifier:\n+ slug = slug.replace('-', '/')\n+ return slug\n+\n \n def get_conf_py_path(version):\n conf_py_path = version.project.conf_file(version.slug)\ndiff --git a/readthedocs/doc_builder/backends/sphinx.py b/readthedocs/doc_builder/backends/sphinx.py\n--- a/readthedocs/doc_builder/backends/sphinx.py\n+++ b/readthedocs/doc_builder/backends/sphinx.py\n@@ -126,7 +126,7 @@\n encoding='utf-8', mode='a')\n outfile.write(\"\\n\")\n conf_py_path = version_utils.get_conf_py_path(self.version)\n- remote_version = version_utils.get_vcs_version(self.version)\n+ remote_version = version_utils.get_vcs_version_slug(self.version)\n github_info = version_utils.get_github_username_repo(self.version)\n bitbucket_info = version_utils.get_bitbucket_username_repo(self.version)\n if github_info[0] is None:\n", "issue": "Edit on GitHub link is broken for branches with e.g. slash in the name\nIf you have docs built from a branch with e.g. slash in the name, like `v1.x/master`, then RTD will \"slugify\" it and use v1.x-master in URLs. That's OK.\n\nThe problem is that RTD use this slugified branch name to construct the \"Edit on GitHub\" URL as well, which then becomes a broken link.\n\n", "before_files": [{"content": "import re\n\nGH_REGEXS = [\n re.compile('github.com/(.+)/(.+)(?:\\.git){1}'),\n re.compile('github.com/(.+)/(.+)'),\n re.compile('github.com:(.+)/(.+).git'),\n]\n\nBB_REGEXS = [\n re.compile('bitbucket.org/(.+)/(.+)/'),\n re.compile('bitbucket.org/(.+)/(.+)'),\n re.compile('bitbucket.org:(.+)/(.+)\\.git'),\n]\n\ndef get_github_username_repo(version):\n repo_url = version.project.repo\n if 'github' in repo_url:\n for regex in GH_REGEXS:\n match = regex.search(repo_url)\n if match:\n return match.groups()\n return (None, None)\n\ndef get_bitbucket_username_repo(version):\n repo_url = version.project.repo\n if 'bitbucket' in repo_url:\n for regex in BB_REGEXS:\n match = regex.search(repo_url)\n if match:\n return match.groups()\n return (None, None)\n\ndef get_vcs_version(version):\n if version.slug == 'latest':\n if version.project.default_branch:\n return version.project.default_branch\n else:\n return version.project.vcs_repo().fallback_branch\n else:\n return version.slug\n\ndef get_conf_py_path(version):\n conf_py_path = version.project.conf_file(version.slug)\n conf_py_path = conf_py_path.replace(\n version.project.checkout_path(version.slug), '')\n return conf_py_path.replace('conf.py', '')\n", "path": "readthedocs/builds/utils.py"}, {"content": "import os\nimport shutil\nimport codecs\nimport logging\nimport zipfile\n\nfrom django.template import Template, Context\nfrom django.contrib.auth.models import SiteProfileNotAvailable\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.conf import settings\n\nfrom builds import utils as version_utils\nfrom core.utils import copy_to_app_servers, copy_file_to_app_servers\nfrom doc_builder.base import BaseBuilder, restoring_chdir\nfrom projects.utils import run\nfrom tastyapi import apiv2\n\nlog = logging.getLogger(__name__)\n\n\nRTD_CONF_ADDITIONS = \"\"\"\n{% load projects_tags %}\n#Add RTD Template Path.\nif 'templates_path' in globals():\n templates_path.insert(0, '{{ template_path }}')\nelse:\n templates_path = ['{{ template_path }}', 'templates', '_templates',\n '.templates']\n\n# Add RTD Static Path. Add to the end because it overwrites previous files.\nif 'html_static_path' in globals():\n html_static_path.append('{{ static_path }}')\nelse:\n html_static_path = ['_static', '{{ static_path }}']\n\n# Add RTD Theme Path. \nif 'html_theme_path' in globals():\n html_theme_path.append('{{ template_path }}')\nelse:\n html_theme_path = ['_themes', '{{ template_path }}']\n\n# Add RTD Theme only if they aren't overriding it already\nusing_rtd_theme = False\nif 'html_theme' in globals():\n if html_theme in ['default']:\n # Allow people to bail with a hack of having an html_style\n if not 'html_style' in globals():\n html_theme = 'sphinx_rtd_theme'\n html_style = None\n html_theme_options = {}\n using_rtd_theme = True\nelse:\n html_theme = 'sphinx_rtd_theme'\n html_style = None\n html_theme_options = {}\n using_rtd_theme = True\n\n# Force theme on setting\nif globals().get('RTD_NEW_THEME', False):\n html_theme = 'sphinx_rtd_theme'\n html_style = None\n html_theme_options = {}\n using_rtd_theme = True\n\nif globals().get('RTD_OLD_THEME', False):\n html_style = 'rtd.css'\n html_theme = 'default'\n\n#Add project information to the template context.\ncontext = {\n 'using_theme': using_rtd_theme,\n 'html_theme': html_theme,\n 'current_version': \"{{ current_version }}\",\n 'MEDIA_URL': \"{{ settings.MEDIA_URL }}\",\n 'PRODUCTION_DOMAIN': \"{{ settings.PRODUCTION_DOMAIN }}\",\n 'versions': [{% for version in versions|sort_version_aware %}\n (\"{{ version.slug }}\", \"/en/{{ version.slug }}/\"),{% endfor %}\n ],\n 'downloads': [ {% for key, val in downloads.items %}\n (\"{{ key }}\", \"{{ val }}\"),{% endfor %}\n ],\n 'slug': '{{ project.slug }}',\n 'name': u'{{ project.name }}',\n 'rtd_language': u'{{ project.language }}',\n 'canonical_url': '{{ project.canonical_url }}',\n 'analytics_code': '{{ project.analytics_code }}',\n 'conf_py_path': '{{ conf_py_path }}',\n 'github_user': '{{ github_user }}',\n 'github_repo': '{{ github_repo }}',\n 'github_version': '{{ github_version }}',\n 'display_github': {{ display_github }},\n 'READTHEDOCS': True,\n 'using_theme': (html_theme == \"default\"),\n 'new_theme': (html_theme == \"sphinx_rtd_theme\"),\n}\nif 'html_context' in globals():\n html_context.update(context)\nelse:\n html_context = context\n\n# Add custom RTD extension\nif 'extensions' in globals():\n extensions.append(\"readthedocs_ext.readthedocs\")\n extensions.append(\"readthedocs_ext.readthedocshtmldir\")\nelse:\n extensions = [\"readthedocs_ext.readthedocs\", \"readthedocs_ext.readthedocshtmldir\"]\n\"\"\"\n\nTEMPLATE_DIR = '%s/readthedocs/templates/sphinx' % settings.SITE_ROOT\nSTATIC_DIR = '%s/_static' % TEMPLATE_DIR\n\n\nclass Builder(BaseBuilder):\n \"\"\"\n The parent for most sphinx builders.\n\n Also handles the default sphinx output of html.\n \"\"\"\n\n def _whitelisted(self, **kwargs):\n \"\"\"Modify the given ``conf.py`` file from a whitelisted user's project.\n \"\"\"\n project = self.version.project\n #Open file for appending.\n outfile = codecs.open(project.conf_file(self.version.slug),\n encoding='utf-8', mode='a')\n outfile.write(\"\\n\")\n conf_py_path = version_utils.get_conf_py_path(self.version)\n remote_version = version_utils.get_vcs_version(self.version)\n github_info = version_utils.get_github_username_repo(self.version)\n bitbucket_info = version_utils.get_bitbucket_username_repo(self.version)\n if github_info[0] is None:\n display_github = False\n else:\n display_github = True\n if bitbucket_info[0] is None:\n display_bitbucket = False\n else:\n display_bitbucket = True\n\n rtd_ctx = Context({\n 'versions': project.api_versions(),\n 'downloads': self.version.get_downloads(pretty=True),\n 'current_version': self.version.slug,\n 'project': project,\n 'settings': settings,\n 'static_path': STATIC_DIR,\n 'template_path': TEMPLATE_DIR,\n 'conf_py_path': conf_py_path,\n 'downloads': apiv2.version(self.version.pk).downloads.get()['downloads'],\n # GitHub\n 'github_user': github_info[0],\n 'github_repo': github_info[1],\n 'github_version': remote_version,\n 'display_github': display_github,\n # BitBucket\n 'bitbucket_user': bitbucket_info[0],\n 'bitbucket_repo': bitbucket_info[1],\n 'bitbucket_version': remote_version,\n 'display_bitbucket': display_bitbucket,\n })\n rtd_string = Template(RTD_CONF_ADDITIONS).render(rtd_ctx)\n outfile.write(rtd_string)\n\n def clean(self, **kwargs):\n try:\n self._whitelisted()\n except (OSError, SiteProfileNotAvailable, ObjectDoesNotExist):\n log.error(\"Conf file not found. Error writing to disk.\",\n exc_info=True)\n return ('', 'Conf file not found. Error writing to disk.', -1)\n\n @restoring_chdir\n def build(self, **kwargs):\n project = self.version.project\n os.chdir(project.conf_dir(self.version.slug))\n force_str = \" -E \" if self.force else \"\"\n if project.use_virtualenv:\n build_command = \"%s %s -b readthedocs -D language=%s . _build/html \" % (\n project.venv_bin(version=self.version.slug,\n bin='sphinx-build'),\n force_str,\n project.language)\n else:\n build_command = (\"sphinx-build %s -b readthedocs -D language=%s . _build/html\"\n % (force_str, project.language))\n build_results = run(build_command, shell=True)\n self._zip_html()\n if 'no targets are out of date.' in build_results[1]:\n self._changed = False\n return build_results\n\n @restoring_chdir\n def _zip_html(self, **kwargs):\n from_path = self.version.project.full_build_path(self.version.slug)\n to_path = self.version.project.checkout_path(self.version.slug)\n to_file = os.path.join(to_path, '%s.zip' % self.version.project.slug)\n\n log.info(\"Creating zip file from %s\" % from_path)\n # Create a <slug>.zip file containing all files in file_path\n os.chdir(from_path)\n archive = zipfile.ZipFile(to_file, 'w')\n for root, subfolders, files in os.walk('.'):\n for file in files:\n to_write = os.path.join(root, file)\n archive.write(\n filename=to_write,\n arcname=os.path.join(\"%s-%s\" % (self.version.project.slug,\n self.version.slug),\n to_write)\n )\n archive.close()\n return to_file\n\n def move(self, **kwargs):\n project = self.version.project\n if project.full_build_path(self.version.slug):\n #Copy the html files.\n target = project.rtd_build_path(self.version.slug)\n if \"_\" in project.slug:\n new_slug = project.slug.replace('_', '-')\n new_target = target.replace(project.slug, new_slug)\n #Only replace 1, so user_builds doesn't get replaced >:x\n targets = [target, new_target]\n else:\n targets = [target]\n for target in targets:\n if getattr(settings, \"MULTIPLE_APP_SERVERS\", None):\n log.info(\"Copying docs to remote server.\")\n copy_to_app_servers(\n project.full_build_path(self.version.slug), target)\n else:\n if os.path.exists(target):\n shutil.rmtree(target)\n log.info(\"Copying docs on the local filesystem\")\n shutil.copytree(\n project.full_build_path(self.version.slug), target)\n\n #Copy the zip file.\n to_path = os.path.join(settings.MEDIA_ROOT, 'htmlzip',\n project.slug, self.version.slug)\n to_file = os.path.join(to_path, '%s.zip' % project.slug)\n from_path = project.checkout_path(self.version.slug)\n from_file = os.path.join(from_path, '%s.zip' % project.slug)\n if getattr(settings, \"MULTIPLE_APP_SERVERS\", None):\n copy_file_to_app_servers(from_file, to_file)\n else:\n if not os.path.exists(to_path):\n os.makedirs(to_path)\n run('mv -f %s %s' % (from_file, to_file))\n else:\n log.warning(\"Not moving docs, because the build dir is unknown.\")\n", "path": "readthedocs/doc_builder/backends/sphinx.py"}]}
| 3,835 | 425 |
gh_patches_debug_22947
|
rasdani/github-patches
|
git_diff
|
goauthentik__authentik-7386
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Email stage links incorrectly encoded
**Describe the bug**
Links sent in emails are getting doubly encoded.
For example:
`http://localhost:9000/if/flow/signup/?query=&flow_token=W2kRqeEmVIdh0h...`
The issue is the `&` between querystring parameters.
**To Reproduce**
Steps to reproduce the behavior:
I'm seeing these on all signup flows and reset password flows where an email is triggered.
**Expected behavior**
I'd expect the above to instead be
`http://localhost:9000/if/flow/signup/?query=&flow_token=W2kRqeEmVIdh0h...`
**Version and Deployment (please complete the following information):**
- authentik version: 2023.10.2
- Deployment: only tested so far in docker-compose
</issue>
<code>
[start of authentik/stages/email/stage.py]
1 """authentik multi-stage authentication engine"""
2 from datetime import timedelta
3
4 from django.contrib import messages
5 from django.http import HttpRequest, HttpResponse
6 from django.http.request import QueryDict
7 from django.urls import reverse
8 from django.utils.text import slugify
9 from django.utils.timezone import now
10 from django.utils.translation import gettext as _
11 from rest_framework.fields import CharField
12 from rest_framework.serializers import ValidationError
13
14 from authentik.flows.challenge import Challenge, ChallengeResponse, ChallengeTypes
15 from authentik.flows.models import FlowDesignation, FlowToken
16 from authentik.flows.planner import PLAN_CONTEXT_IS_RESTORED, PLAN_CONTEXT_PENDING_USER
17 from authentik.flows.stage import ChallengeStageView
18 from authentik.flows.views.executor import QS_KEY_TOKEN, QS_QUERY
19 from authentik.stages.email.models import EmailStage
20 from authentik.stages.email.tasks import send_mails
21 from authentik.stages.email.utils import TemplateEmailMessage
22
23 PLAN_CONTEXT_EMAIL_SENT = "email_sent"
24 PLAN_CONTEXT_EMAIL_OVERRIDE = "email"
25
26
27 class EmailChallenge(Challenge):
28 """Email challenge"""
29
30 component = CharField(default="ak-stage-email")
31
32
33 class EmailChallengeResponse(ChallengeResponse):
34 """Email challenge resposen. No fields. This challenge is
35 always declared invalid to give the user a chance to retry"""
36
37 component = CharField(default="ak-stage-email")
38
39 def validate(self, attrs):
40 raise ValidationError(detail="email-sent", code="email-sent")
41
42
43 class EmailStageView(ChallengeStageView):
44 """Email stage which sends Email for verification"""
45
46 response_class = EmailChallengeResponse
47
48 def get_full_url(self, **kwargs) -> str:
49 """Get full URL to be used in template"""
50 base_url = reverse(
51 "authentik_core:if-flow",
52 kwargs={"flow_slug": self.executor.flow.slug},
53 )
54 # Parse query string from current URL (full query string)
55 query_params = QueryDict(self.request.META.get("QUERY_STRING", ""), mutable=True)
56 query_params.pop(QS_KEY_TOKEN, None)
57
58 # Check for nested query string used by flow executor, and remove any
59 # kind of flow token from that
60 if QS_QUERY in query_params:
61 inner_query_params = QueryDict(query_params.get(QS_QUERY), mutable=True)
62 inner_query_params.pop(QS_KEY_TOKEN, None)
63 query_params[QS_QUERY] = inner_query_params.urlencode()
64
65 query_params.update(kwargs)
66 full_url = base_url
67 if len(query_params) > 0:
68 full_url = f"{full_url}?{query_params.urlencode()}"
69 return self.request.build_absolute_uri(full_url)
70
71 def get_token(self) -> FlowToken:
72 """Get token"""
73 pending_user = self.get_pending_user()
74 current_stage: EmailStage = self.executor.current_stage
75 valid_delta = timedelta(
76 minutes=current_stage.token_expiry + 1
77 ) # + 1 because django timesince always rounds down
78 identifier = slugify(f"ak-email-stage-{current_stage.name}-{pending_user}")
79 # Don't check for validity here, we only care if the token exists
80 tokens = FlowToken.objects.filter(identifier=identifier)
81 if not tokens.exists():
82 return FlowToken.objects.create(
83 expires=now() + valid_delta,
84 user=pending_user,
85 identifier=identifier,
86 flow=self.executor.flow,
87 _plan=FlowToken.pickle(self.executor.plan),
88 )
89 token = tokens.first()
90 # Check if token is expired and rotate key if so
91 if token.is_expired:
92 token.expire_action()
93 return token
94
95 def send_email(self):
96 """Helper function that sends the actual email. Implies that you've
97 already checked that there is a pending user."""
98 pending_user = self.get_pending_user()
99 if not pending_user.pk and self.executor.flow.designation == FlowDesignation.RECOVERY:
100 # Pending user does not have a primary key, and we're in a recovery flow,
101 # which means the user entered an invalid identifier, so we pretend to send the
102 # email, to not disclose if the user exists
103 return
104 email = self.executor.plan.context.get(PLAN_CONTEXT_EMAIL_OVERRIDE, None)
105 if not email:
106 email = pending_user.email
107 current_stage: EmailStage = self.executor.current_stage
108 token = self.get_token()
109 # Send mail to user
110 message = TemplateEmailMessage(
111 subject=_(current_stage.subject),
112 to=[email],
113 language=pending_user.locale(self.request),
114 template_name=current_stage.template,
115 template_context={
116 "url": self.get_full_url(**{QS_KEY_TOKEN: token.key}),
117 "user": pending_user,
118 "expires": token.expires,
119 },
120 )
121 send_mails(current_stage, message)
122
123 def get(self, request: HttpRequest, *args, **kwargs) -> HttpResponse:
124 # Check if the user came back from the email link to verify
125 restore_token: FlowToken = self.executor.plan.context.get(PLAN_CONTEXT_IS_RESTORED, None)
126 user = self.get_pending_user()
127 if restore_token:
128 if restore_token.user != user:
129 self.logger.warning("Flow token for non-matching user, denying request")
130 return self.executor.stage_invalid()
131 messages.success(request, _("Successfully verified Email."))
132 if self.executor.current_stage.activate_user_on_success:
133 user.is_active = True
134 user.save()
135 return self.executor.stage_ok()
136 if PLAN_CONTEXT_PENDING_USER not in self.executor.plan.context:
137 self.logger.debug("No pending user")
138 messages.error(self.request, _("No pending user."))
139 return self.executor.stage_invalid()
140 # Check if we've already sent the initial e-mail
141 if PLAN_CONTEXT_EMAIL_SENT not in self.executor.plan.context:
142 self.send_email()
143 self.executor.plan.context[PLAN_CONTEXT_EMAIL_SENT] = True
144 return super().get(request, *args, **kwargs)
145
146 def get_challenge(self) -> Challenge:
147 challenge = EmailChallenge(
148 data={
149 "type": ChallengeTypes.NATIVE.value,
150 "title": _("Email sent."),
151 }
152 )
153 return challenge
154
155 def challenge_valid(self, response: ChallengeResponse) -> HttpResponse:
156 return super().challenge_invalid(response)
157
158 def challenge_invalid(self, response: ChallengeResponse) -> HttpResponse:
159 if PLAN_CONTEXT_PENDING_USER not in self.executor.plan.context:
160 messages.error(self.request, _("No pending user."))
161 return super().challenge_invalid(response)
162 self.send_email()
163 messages.success(self.request, _("Email Successfully sent."))
164 # We can't call stage_ok yet, as we're still waiting
165 # for the user to click the link in the email
166 return super().challenge_invalid(response)
167
[end of authentik/stages/email/stage.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/authentik/stages/email/stage.py b/authentik/stages/email/stage.py
--- a/authentik/stages/email/stage.py
+++ b/authentik/stages/email/stage.py
@@ -52,17 +52,13 @@
kwargs={"flow_slug": self.executor.flow.slug},
)
# Parse query string from current URL (full query string)
- query_params = QueryDict(self.request.META.get("QUERY_STRING", ""), mutable=True)
+ # this view is only run within a flow executor, where we need to get the query string
+ # from the query= parameter (double encoded); but for the redirect
+ # we need to expand it since it'll go through the flow interface
+ query_params = QueryDict(self.request.GET.get(QS_QUERY), mutable=True)
query_params.pop(QS_KEY_TOKEN, None)
-
- # Check for nested query string used by flow executor, and remove any
- # kind of flow token from that
- if QS_QUERY in query_params:
- inner_query_params = QueryDict(query_params.get(QS_QUERY), mutable=True)
- inner_query_params.pop(QS_KEY_TOKEN, None)
- query_params[QS_QUERY] = inner_query_params.urlencode()
-
query_params.update(kwargs)
+ print(query_params)
full_url = base_url
if len(query_params) > 0:
full_url = f"{full_url}?{query_params.urlencode()}"
|
{"golden_diff": "diff --git a/authentik/stages/email/stage.py b/authentik/stages/email/stage.py\n--- a/authentik/stages/email/stage.py\n+++ b/authentik/stages/email/stage.py\n@@ -52,17 +52,13 @@\n kwargs={\"flow_slug\": self.executor.flow.slug},\n )\n # Parse query string from current URL (full query string)\n- query_params = QueryDict(self.request.META.get(\"QUERY_STRING\", \"\"), mutable=True)\n+ # this view is only run within a flow executor, where we need to get the query string\n+ # from the query= parameter (double encoded); but for the redirect\n+ # we need to expand it since it'll go through the flow interface\n+ query_params = QueryDict(self.request.GET.get(QS_QUERY), mutable=True)\n query_params.pop(QS_KEY_TOKEN, None)\n-\n- # Check for nested query string used by flow executor, and remove any\n- # kind of flow token from that\n- if QS_QUERY in query_params:\n- inner_query_params = QueryDict(query_params.get(QS_QUERY), mutable=True)\n- inner_query_params.pop(QS_KEY_TOKEN, None)\n- query_params[QS_QUERY] = inner_query_params.urlencode()\n-\n query_params.update(kwargs)\n+ print(query_params)\n full_url = base_url\n if len(query_params) > 0:\n full_url = f\"{full_url}?{query_params.urlencode()}\"\n", "issue": "Email stage links incorrectly encoded\n**Describe the bug**\r\nLinks sent in emails are getting doubly encoded.\r\n\r\nFor example:\r\n`http://localhost:9000/if/flow/signup/?query=&flow_token=W2kRqeEmVIdh0h...`\r\n\r\nThe issue is the `&` between querystring parameters.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n\r\nI'm seeing these on all signup flows and reset password flows where an email is triggered. \r\n\r\n**Expected behavior**\r\nI'd expect the above to instead be \r\n`http://localhost:9000/if/flow/signup/?query=&flow_token=W2kRqeEmVIdh0h...`\r\n\r\n**Version and Deployment (please complete the following information):**\r\n\r\n- authentik version: 2023.10.2\r\n- Deployment: only tested so far in docker-compose\r\n\r\n\n", "before_files": [{"content": "\"\"\"authentik multi-stage authentication engine\"\"\"\nfrom datetime import timedelta\n\nfrom django.contrib import messages\nfrom django.http import HttpRequest, HttpResponse\nfrom django.http.request import QueryDict\nfrom django.urls import reverse\nfrom django.utils.text import slugify\nfrom django.utils.timezone import now\nfrom django.utils.translation import gettext as _\nfrom rest_framework.fields import CharField\nfrom rest_framework.serializers import ValidationError\n\nfrom authentik.flows.challenge import Challenge, ChallengeResponse, ChallengeTypes\nfrom authentik.flows.models import FlowDesignation, FlowToken\nfrom authentik.flows.planner import PLAN_CONTEXT_IS_RESTORED, PLAN_CONTEXT_PENDING_USER\nfrom authentik.flows.stage import ChallengeStageView\nfrom authentik.flows.views.executor import QS_KEY_TOKEN, QS_QUERY\nfrom authentik.stages.email.models import EmailStage\nfrom authentik.stages.email.tasks import send_mails\nfrom authentik.stages.email.utils import TemplateEmailMessage\n\nPLAN_CONTEXT_EMAIL_SENT = \"email_sent\"\nPLAN_CONTEXT_EMAIL_OVERRIDE = \"email\"\n\n\nclass EmailChallenge(Challenge):\n \"\"\"Email challenge\"\"\"\n\n component = CharField(default=\"ak-stage-email\")\n\n\nclass EmailChallengeResponse(ChallengeResponse):\n \"\"\"Email challenge resposen. No fields. This challenge is\n always declared invalid to give the user a chance to retry\"\"\"\n\n component = CharField(default=\"ak-stage-email\")\n\n def validate(self, attrs):\n raise ValidationError(detail=\"email-sent\", code=\"email-sent\")\n\n\nclass EmailStageView(ChallengeStageView):\n \"\"\"Email stage which sends Email for verification\"\"\"\n\n response_class = EmailChallengeResponse\n\n def get_full_url(self, **kwargs) -> str:\n \"\"\"Get full URL to be used in template\"\"\"\n base_url = reverse(\n \"authentik_core:if-flow\",\n kwargs={\"flow_slug\": self.executor.flow.slug},\n )\n # Parse query string from current URL (full query string)\n query_params = QueryDict(self.request.META.get(\"QUERY_STRING\", \"\"), mutable=True)\n query_params.pop(QS_KEY_TOKEN, None)\n\n # Check for nested query string used by flow executor, and remove any\n # kind of flow token from that\n if QS_QUERY in query_params:\n inner_query_params = QueryDict(query_params.get(QS_QUERY), mutable=True)\n inner_query_params.pop(QS_KEY_TOKEN, None)\n query_params[QS_QUERY] = inner_query_params.urlencode()\n\n query_params.update(kwargs)\n full_url = base_url\n if len(query_params) > 0:\n full_url = f\"{full_url}?{query_params.urlencode()}\"\n return self.request.build_absolute_uri(full_url)\n\n def get_token(self) -> FlowToken:\n \"\"\"Get token\"\"\"\n pending_user = self.get_pending_user()\n current_stage: EmailStage = self.executor.current_stage\n valid_delta = timedelta(\n minutes=current_stage.token_expiry + 1\n ) # + 1 because django timesince always rounds down\n identifier = slugify(f\"ak-email-stage-{current_stage.name}-{pending_user}\")\n # Don't check for validity here, we only care if the token exists\n tokens = FlowToken.objects.filter(identifier=identifier)\n if not tokens.exists():\n return FlowToken.objects.create(\n expires=now() + valid_delta,\n user=pending_user,\n identifier=identifier,\n flow=self.executor.flow,\n _plan=FlowToken.pickle(self.executor.plan),\n )\n token = tokens.first()\n # Check if token is expired and rotate key if so\n if token.is_expired:\n token.expire_action()\n return token\n\n def send_email(self):\n \"\"\"Helper function that sends the actual email. Implies that you've\n already checked that there is a pending user.\"\"\"\n pending_user = self.get_pending_user()\n if not pending_user.pk and self.executor.flow.designation == FlowDesignation.RECOVERY:\n # Pending user does not have a primary key, and we're in a recovery flow,\n # which means the user entered an invalid identifier, so we pretend to send the\n # email, to not disclose if the user exists\n return\n email = self.executor.plan.context.get(PLAN_CONTEXT_EMAIL_OVERRIDE, None)\n if not email:\n email = pending_user.email\n current_stage: EmailStage = self.executor.current_stage\n token = self.get_token()\n # Send mail to user\n message = TemplateEmailMessage(\n subject=_(current_stage.subject),\n to=[email],\n language=pending_user.locale(self.request),\n template_name=current_stage.template,\n template_context={\n \"url\": self.get_full_url(**{QS_KEY_TOKEN: token.key}),\n \"user\": pending_user,\n \"expires\": token.expires,\n },\n )\n send_mails(current_stage, message)\n\n def get(self, request: HttpRequest, *args, **kwargs) -> HttpResponse:\n # Check if the user came back from the email link to verify\n restore_token: FlowToken = self.executor.plan.context.get(PLAN_CONTEXT_IS_RESTORED, None)\n user = self.get_pending_user()\n if restore_token:\n if restore_token.user != user:\n self.logger.warning(\"Flow token for non-matching user, denying request\")\n return self.executor.stage_invalid()\n messages.success(request, _(\"Successfully verified Email.\"))\n if self.executor.current_stage.activate_user_on_success:\n user.is_active = True\n user.save()\n return self.executor.stage_ok()\n if PLAN_CONTEXT_PENDING_USER not in self.executor.plan.context:\n self.logger.debug(\"No pending user\")\n messages.error(self.request, _(\"No pending user.\"))\n return self.executor.stage_invalid()\n # Check if we've already sent the initial e-mail\n if PLAN_CONTEXT_EMAIL_SENT not in self.executor.plan.context:\n self.send_email()\n self.executor.plan.context[PLAN_CONTEXT_EMAIL_SENT] = True\n return super().get(request, *args, **kwargs)\n\n def get_challenge(self) -> Challenge:\n challenge = EmailChallenge(\n data={\n \"type\": ChallengeTypes.NATIVE.value,\n \"title\": _(\"Email sent.\"),\n }\n )\n return challenge\n\n def challenge_valid(self, response: ChallengeResponse) -> HttpResponse:\n return super().challenge_invalid(response)\n\n def challenge_invalid(self, response: ChallengeResponse) -> HttpResponse:\n if PLAN_CONTEXT_PENDING_USER not in self.executor.plan.context:\n messages.error(self.request, _(\"No pending user.\"))\n return super().challenge_invalid(response)\n self.send_email()\n messages.success(self.request, _(\"Email Successfully sent.\"))\n # We can't call stage_ok yet, as we're still waiting\n # for the user to click the link in the email\n return super().challenge_invalid(response)\n", "path": "authentik/stages/email/stage.py"}]}
| 2,558 | 322 |
gh_patches_debug_5679
|
rasdani/github-patches
|
git_diff
|
goauthentik__authentik-4778
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
OAuth sources seem to be linked to every account
**Describe the bug**
Opening user settings then going to "Connected services" seem to show everyone's connections instead of just the current authenticated user
**To Reproduce**
Steps to reproduce the behavior:
1. Login using existing user (User A)
2. Go to account settings -> connected services to see no connected services
3. In another browser go to login page and login using OAuth (User B)
4. User A now goes to connected services to see that they have the same OAuth connected
5. Logging out of User A and logging in with OAuth will login as User B
6. Disconnecting OAuth from User A will not disconnect it from User B, but will appear to have
**Expected behavior**
OAuth management page (connected services) should only show oauth relations that the current user has.
**Screenshots**




**Logs**
Admin Interface -> Event Logs:
- Model Created from grant2 (User B)
- Source Linked from grant2 (User B)
- Model Deleted from grant (User A)
**Version and Deployment (please complete the following information):**
- authentik version: 2023.2.2
- Deployment: docker-compose
**Additional context**
Add any other context about the problem here.
</issue>
<code>
[start of authentik/core/api/sources.py]
1 """Source API Views"""
2 from typing import Iterable
3
4 from django_filters.rest_framework import DjangoFilterBackend
5 from drf_spectacular.utils import OpenApiResponse, extend_schema
6 from rest_framework import mixins
7 from rest_framework.decorators import action
8 from rest_framework.filters import OrderingFilter, SearchFilter
9 from rest_framework.parsers import MultiPartParser
10 from rest_framework.request import Request
11 from rest_framework.response import Response
12 from rest_framework.serializers import ModelSerializer, ReadOnlyField, SerializerMethodField
13 from rest_framework.viewsets import GenericViewSet
14 from structlog.stdlib import get_logger
15
16 from authentik.api.authorization import OwnerFilter, OwnerSuperuserPermissions
17 from authentik.api.decorators import permission_required
18 from authentik.core.api.used_by import UsedByMixin
19 from authentik.core.api.utils import MetaNameSerializer, TypeCreateSerializer
20 from authentik.core.models import Source, UserSourceConnection
21 from authentik.core.types import UserSettingSerializer
22 from authentik.lib.utils.file import (
23 FilePathSerializer,
24 FileUploadSerializer,
25 set_file,
26 set_file_url,
27 )
28 from authentik.lib.utils.reflection import all_subclasses
29 from authentik.policies.engine import PolicyEngine
30
31 LOGGER = get_logger()
32
33
34 class SourceSerializer(ModelSerializer, MetaNameSerializer):
35 """Source Serializer"""
36
37 managed = ReadOnlyField()
38 component = SerializerMethodField()
39 icon = ReadOnlyField(source="get_icon")
40
41 def get_component(self, obj: Source) -> str:
42 """Get object component so that we know how to edit the object"""
43 # pyright: reportGeneralTypeIssues=false
44 if obj.__class__ == Source:
45 return ""
46 return obj.component
47
48 class Meta:
49 model = Source
50 fields = [
51 "pk",
52 "name",
53 "slug",
54 "enabled",
55 "authentication_flow",
56 "enrollment_flow",
57 "component",
58 "verbose_name",
59 "verbose_name_plural",
60 "meta_model_name",
61 "policy_engine_mode",
62 "user_matching_mode",
63 "managed",
64 "user_path_template",
65 "icon",
66 ]
67
68
69 class SourceViewSet(
70 mixins.RetrieveModelMixin,
71 mixins.DestroyModelMixin,
72 UsedByMixin,
73 mixins.ListModelMixin,
74 GenericViewSet,
75 ):
76 """Source Viewset"""
77
78 queryset = Source.objects.none()
79 serializer_class = SourceSerializer
80 lookup_field = "slug"
81 search_fields = ["slug", "name"]
82 filterset_fields = ["slug", "name", "managed"]
83
84 def get_queryset(self): # pragma: no cover
85 return Source.objects.select_subclasses()
86
87 @permission_required("authentik_core.change_source")
88 @extend_schema(
89 request={
90 "multipart/form-data": FileUploadSerializer,
91 },
92 responses={
93 200: OpenApiResponse(description="Success"),
94 400: OpenApiResponse(description="Bad request"),
95 },
96 )
97 @action(
98 detail=True,
99 pagination_class=None,
100 filter_backends=[],
101 methods=["POST"],
102 parser_classes=(MultiPartParser,),
103 )
104 def set_icon(self, request: Request, slug: str):
105 """Set source icon"""
106 source: Source = self.get_object()
107 return set_file(request, source, "icon")
108
109 @permission_required("authentik_core.change_source")
110 @extend_schema(
111 request=FilePathSerializer,
112 responses={
113 200: OpenApiResponse(description="Success"),
114 400: OpenApiResponse(description="Bad request"),
115 },
116 )
117 @action(
118 detail=True,
119 pagination_class=None,
120 filter_backends=[],
121 methods=["POST"],
122 )
123 def set_icon_url(self, request: Request, slug: str):
124 """Set source icon (as URL)"""
125 source: Source = self.get_object()
126 return set_file_url(request, source, "icon")
127
128 @extend_schema(responses={200: TypeCreateSerializer(many=True)})
129 @action(detail=False, pagination_class=None, filter_backends=[])
130 def types(self, request: Request) -> Response:
131 """Get all creatable source types"""
132 data = []
133 for subclass in all_subclasses(self.queryset.model):
134 subclass: Source
135 component = ""
136 if len(subclass.__subclasses__()) > 0:
137 continue
138 if subclass._meta.abstract:
139 component = subclass.__bases__[0]().component
140 else:
141 component = subclass().component
142 # pyright: reportGeneralTypeIssues=false
143 data.append(
144 {
145 "name": subclass._meta.verbose_name,
146 "description": subclass.__doc__,
147 "component": component,
148 "model_name": subclass._meta.model_name,
149 }
150 )
151 return Response(TypeCreateSerializer(data, many=True).data)
152
153 @extend_schema(responses={200: UserSettingSerializer(many=True)})
154 @action(detail=False, pagination_class=None, filter_backends=[])
155 def user_settings(self, request: Request) -> Response:
156 """Get all sources the user can configure"""
157 _all_sources: Iterable[Source] = (
158 Source.objects.filter(enabled=True).select_subclasses().order_by("name")
159 )
160 matching_sources: list[UserSettingSerializer] = []
161 for source in _all_sources:
162 user_settings = source.ui_user_settings()
163 if not user_settings:
164 continue
165 policy_engine = PolicyEngine(source, request.user, request)
166 policy_engine.build()
167 if not policy_engine.passing:
168 continue
169 source_settings = source.ui_user_settings()
170 source_settings.initial_data["object_uid"] = source.slug
171 if not source_settings.is_valid():
172 LOGGER.warning(source_settings.errors)
173 matching_sources.append(source_settings.validated_data)
174 return Response(matching_sources)
175
176
177 class UserSourceConnectionSerializer(SourceSerializer):
178 """OAuth Source Serializer"""
179
180 source = SourceSerializer(read_only=True)
181
182 class Meta:
183 model = UserSourceConnection
184 fields = [
185 "pk",
186 "user",
187 "source",
188 "created",
189 ]
190 extra_kwargs = {
191 "user": {"read_only": True},
192 "created": {"read_only": True},
193 }
194
195
196 class UserSourceConnectionViewSet(
197 mixins.RetrieveModelMixin,
198 mixins.UpdateModelMixin,
199 mixins.DestroyModelMixin,
200 UsedByMixin,
201 mixins.ListModelMixin,
202 GenericViewSet,
203 ):
204 """User-source connection Viewset"""
205
206 queryset = UserSourceConnection.objects.all()
207 serializer_class = UserSourceConnectionSerializer
208 permission_classes = [OwnerSuperuserPermissions]
209 filter_backends = [OwnerFilter, DjangoFilterBackend, OrderingFilter, SearchFilter]
210 ordering = ["pk"]
211
[end of authentik/core/api/sources.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/authentik/core/api/sources.py b/authentik/core/api/sources.py
--- a/authentik/core/api/sources.py
+++ b/authentik/core/api/sources.py
@@ -206,5 +206,6 @@
queryset = UserSourceConnection.objects.all()
serializer_class = UserSourceConnectionSerializer
permission_classes = [OwnerSuperuserPermissions]
+ filterset_fields = ["user"]
filter_backends = [OwnerFilter, DjangoFilterBackend, OrderingFilter, SearchFilter]
ordering = ["pk"]
|
{"golden_diff": "diff --git a/authentik/core/api/sources.py b/authentik/core/api/sources.py\n--- a/authentik/core/api/sources.py\n+++ b/authentik/core/api/sources.py\n@@ -206,5 +206,6 @@\n queryset = UserSourceConnection.objects.all()\n serializer_class = UserSourceConnectionSerializer\n permission_classes = [OwnerSuperuserPermissions]\n+ filterset_fields = [\"user\"]\n filter_backends = [OwnerFilter, DjangoFilterBackend, OrderingFilter, SearchFilter]\n ordering = [\"pk\"]\n", "issue": "OAuth sources seem to be linked to every account\n**Describe the bug**\r\nOpening user settings then going to \"Connected services\" seem to show everyone's connections instead of just the current authenticated user\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Login using existing user (User A)\r\n2. Go to account settings -> connected services to see no connected services\r\n3. In another browser go to login page and login using OAuth (User B)\r\n4. User A now goes to connected services to see that they have the same OAuth connected\r\n5. Logging out of User A and logging in with OAuth will login as User B\r\n6. Disconnecting OAuth from User A will not disconnect it from User B, but will appear to have\r\n\r\n**Expected behavior**\r\nOAuth management page (connected services) should only show oauth relations that the current user has.\r\n\r\n**Screenshots**\r\n\r\n\r\n\r\n\r\n\r\n\r\n**Logs**\r\nAdmin Interface -> Event Logs:\r\n- Model Created from grant2 (User B)\r\n- Source Linked from grant2 (User B)\r\n- Model Deleted from grant (User A)\r\n\r\n**Version and Deployment (please complete the following information):**\r\n - authentik version: 2023.2.2\r\n - Deployment: docker-compose\r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n\n", "before_files": [{"content": "\"\"\"Source API Views\"\"\"\nfrom typing import Iterable\n\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom drf_spectacular.utils import OpenApiResponse, extend_schema\nfrom rest_framework import mixins\nfrom rest_framework.decorators import action\nfrom rest_framework.filters import OrderingFilter, SearchFilter\nfrom rest_framework.parsers import MultiPartParser\nfrom rest_framework.request import Request\nfrom rest_framework.response import Response\nfrom rest_framework.serializers import ModelSerializer, ReadOnlyField, SerializerMethodField\nfrom rest_framework.viewsets import GenericViewSet\nfrom structlog.stdlib import get_logger\n\nfrom authentik.api.authorization import OwnerFilter, OwnerSuperuserPermissions\nfrom authentik.api.decorators import permission_required\nfrom authentik.core.api.used_by import UsedByMixin\nfrom authentik.core.api.utils import MetaNameSerializer, TypeCreateSerializer\nfrom authentik.core.models import Source, UserSourceConnection\nfrom authentik.core.types import UserSettingSerializer\nfrom authentik.lib.utils.file import (\n FilePathSerializer,\n FileUploadSerializer,\n set_file,\n set_file_url,\n)\nfrom authentik.lib.utils.reflection import all_subclasses\nfrom authentik.policies.engine import PolicyEngine\n\nLOGGER = get_logger()\n\n\nclass SourceSerializer(ModelSerializer, MetaNameSerializer):\n \"\"\"Source Serializer\"\"\"\n\n managed = ReadOnlyField()\n component = SerializerMethodField()\n icon = ReadOnlyField(source=\"get_icon\")\n\n def get_component(self, obj: Source) -> str:\n \"\"\"Get object component so that we know how to edit the object\"\"\"\n # pyright: reportGeneralTypeIssues=false\n if obj.__class__ == Source:\n return \"\"\n return obj.component\n\n class Meta:\n model = Source\n fields = [\n \"pk\",\n \"name\",\n \"slug\",\n \"enabled\",\n \"authentication_flow\",\n \"enrollment_flow\",\n \"component\",\n \"verbose_name\",\n \"verbose_name_plural\",\n \"meta_model_name\",\n \"policy_engine_mode\",\n \"user_matching_mode\",\n \"managed\",\n \"user_path_template\",\n \"icon\",\n ]\n\n\nclass SourceViewSet(\n mixins.RetrieveModelMixin,\n mixins.DestroyModelMixin,\n UsedByMixin,\n mixins.ListModelMixin,\n GenericViewSet,\n):\n \"\"\"Source Viewset\"\"\"\n\n queryset = Source.objects.none()\n serializer_class = SourceSerializer\n lookup_field = \"slug\"\n search_fields = [\"slug\", \"name\"]\n filterset_fields = [\"slug\", \"name\", \"managed\"]\n\n def get_queryset(self): # pragma: no cover\n return Source.objects.select_subclasses()\n\n @permission_required(\"authentik_core.change_source\")\n @extend_schema(\n request={\n \"multipart/form-data\": FileUploadSerializer,\n },\n responses={\n 200: OpenApiResponse(description=\"Success\"),\n 400: OpenApiResponse(description=\"Bad request\"),\n },\n )\n @action(\n detail=True,\n pagination_class=None,\n filter_backends=[],\n methods=[\"POST\"],\n parser_classes=(MultiPartParser,),\n )\n def set_icon(self, request: Request, slug: str):\n \"\"\"Set source icon\"\"\"\n source: Source = self.get_object()\n return set_file(request, source, \"icon\")\n\n @permission_required(\"authentik_core.change_source\")\n @extend_schema(\n request=FilePathSerializer,\n responses={\n 200: OpenApiResponse(description=\"Success\"),\n 400: OpenApiResponse(description=\"Bad request\"),\n },\n )\n @action(\n detail=True,\n pagination_class=None,\n filter_backends=[],\n methods=[\"POST\"],\n )\n def set_icon_url(self, request: Request, slug: str):\n \"\"\"Set source icon (as URL)\"\"\"\n source: Source = self.get_object()\n return set_file_url(request, source, \"icon\")\n\n @extend_schema(responses={200: TypeCreateSerializer(many=True)})\n @action(detail=False, pagination_class=None, filter_backends=[])\n def types(self, request: Request) -> Response:\n \"\"\"Get all creatable source types\"\"\"\n data = []\n for subclass in all_subclasses(self.queryset.model):\n subclass: Source\n component = \"\"\n if len(subclass.__subclasses__()) > 0:\n continue\n if subclass._meta.abstract:\n component = subclass.__bases__[0]().component\n else:\n component = subclass().component\n # pyright: reportGeneralTypeIssues=false\n data.append(\n {\n \"name\": subclass._meta.verbose_name,\n \"description\": subclass.__doc__,\n \"component\": component,\n \"model_name\": subclass._meta.model_name,\n }\n )\n return Response(TypeCreateSerializer(data, many=True).data)\n\n @extend_schema(responses={200: UserSettingSerializer(many=True)})\n @action(detail=False, pagination_class=None, filter_backends=[])\n def user_settings(self, request: Request) -> Response:\n \"\"\"Get all sources the user can configure\"\"\"\n _all_sources: Iterable[Source] = (\n Source.objects.filter(enabled=True).select_subclasses().order_by(\"name\")\n )\n matching_sources: list[UserSettingSerializer] = []\n for source in _all_sources:\n user_settings = source.ui_user_settings()\n if not user_settings:\n continue\n policy_engine = PolicyEngine(source, request.user, request)\n policy_engine.build()\n if not policy_engine.passing:\n continue\n source_settings = source.ui_user_settings()\n source_settings.initial_data[\"object_uid\"] = source.slug\n if not source_settings.is_valid():\n LOGGER.warning(source_settings.errors)\n matching_sources.append(source_settings.validated_data)\n return Response(matching_sources)\n\n\nclass UserSourceConnectionSerializer(SourceSerializer):\n \"\"\"OAuth Source Serializer\"\"\"\n\n source = SourceSerializer(read_only=True)\n\n class Meta:\n model = UserSourceConnection\n fields = [\n \"pk\",\n \"user\",\n \"source\",\n \"created\",\n ]\n extra_kwargs = {\n \"user\": {\"read_only\": True},\n \"created\": {\"read_only\": True},\n }\n\n\nclass UserSourceConnectionViewSet(\n mixins.RetrieveModelMixin,\n mixins.UpdateModelMixin,\n mixins.DestroyModelMixin,\n UsedByMixin,\n mixins.ListModelMixin,\n GenericViewSet,\n):\n \"\"\"User-source connection Viewset\"\"\"\n\n queryset = UserSourceConnection.objects.all()\n serializer_class = UserSourceConnectionSerializer\n permission_classes = [OwnerSuperuserPermissions]\n filter_backends = [OwnerFilter, DjangoFilterBackend, OrderingFilter, SearchFilter]\n ordering = [\"pk\"]\n", "path": "authentik/core/api/sources.py"}]}
| 2,979 | 116 |
gh_patches_debug_30079
|
rasdani/github-patches
|
git_diff
|
nautobot__nautobot-844
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Prefix with tags is created in 2 steps
<!--
NOTE: IF YOUR ISSUE DOES NOT FOLLOW THIS TEMPLATE, IT WILL BE CLOSED.
This form is only for reporting reproducible bugs. If you need assistance
with Nautobot installation, or if you have a general question, please start a
discussion instead: https://github.com/nautobot/nautobot/discussions
Please describe the environment in which you are running Nautobot. Be sure
that you are running an unmodified instance of the latest stable release
before submitting a bug report, and that any plugins have been disabled.
-->
### Environment
* Python version: 3.6.8
* Nautobot version: v1.0.0b2
<!--
Describe in detail the exact steps that someone else can take to reproduce
this bug using the current stable release of Nautobot. Begin with the
creation of any necessary database objects and call out every operation
being performed explicitly. If reporting a bug in the REST API, be sure to
reconstruct the raw HTTP request(s) being made: Don't rely on a client
library such as pynautobot.
-->
When creating a new prefix while specifying any tags, it is not created with it right away but first without tags and than it is updated with them. The result is correct, having prefix with tags created. But the process is wrong, because it triggers 2 webhooks (`type create` and `type update`), but there should only be single for `type update`, as this is creation.
### Steps to Reproduce
1. create new prefix with tags

2. check changelog


<!-- What did you expect to happen? -->
### Expected Behavior
Single change logging creation of the prefix
<!-- What happened instead? -->
### Observed Behavior
Change logging creation of the prefix and another one logging update (adding tags)
</issue>
<code>
[start of nautobot/extras/signals.py]
1 import os
2 import random
3 import shutil
4 import uuid
5 import logging
6 from datetime import timedelta
7
8 from cacheops.signals import cache_invalidated, cache_read
9 from django.conf import settings
10 from django.contrib.contenttypes.models import ContentType
11 from django.db import transaction
12 from django.db.models.signals import m2m_changed, pre_delete
13 from django.dispatch import receiver
14 from django.utils import timezone
15 from django_prometheus.models import model_deletes, model_inserts, model_updates
16 from prometheus_client import Counter
17
18 from nautobot.extras.tasks import delete_custom_field_data, provision_field
19 from .choices import JobResultStatusChoices, ObjectChangeActionChoices
20 from .models import CustomField, GitRepository, JobResult, ObjectChange
21 from .webhooks import enqueue_webhooks
22
23 logger = logging.getLogger("nautobot.extras.signals")
24
25
26 #
27 # Change logging/webhooks
28 #
29
30
31 def _get_user_if_authenticated(request, objectchange):
32 """Return the user object associated with the request if the user is defined.
33
34 If the user is not defined, log a warning to indicate that the user couldn't be retrived from the request
35 This is a workaround to fix a recurring issue where the user shouldn't be present in the request object randomly.
36 A similar issue was reported in NetBox https://github.com/netbox-community/netbox/issues/5142
37 """
38 if request.user.is_authenticated:
39 return request.user
40 else:
41 logger.warning(f"Unable to retrieve the user while creating the changelog for {objectchange.changed_object}")
42
43
44 def _handle_changed_object(request, sender, instance, **kwargs):
45 """
46 Fires when an object is created or updated.
47 """
48 # Queue the object for processing once the request completes
49 if kwargs.get("created"):
50 action = ObjectChangeActionChoices.ACTION_CREATE
51 elif "created" in kwargs:
52 action = ObjectChangeActionChoices.ACTION_UPDATE
53 elif kwargs.get("action") in ["post_add", "post_remove"] and kwargs["pk_set"]:
54 # m2m_changed with objects added or removed
55 action = ObjectChangeActionChoices.ACTION_UPDATE
56 else:
57 return
58
59 # Record an ObjectChange if applicable
60 if hasattr(instance, "to_objectchange"):
61 objectchange = instance.to_objectchange(action)
62 objectchange.user = _get_user_if_authenticated(request, objectchange)
63 objectchange.request_id = request.id
64 objectchange.save()
65
66 # Enqueue webhooks
67 enqueue_webhooks(instance, request.user, request.id, action)
68
69 # Increment metric counters
70 if action == ObjectChangeActionChoices.ACTION_CREATE:
71 model_inserts.labels(instance._meta.model_name).inc()
72 elif action == ObjectChangeActionChoices.ACTION_UPDATE:
73 model_updates.labels(instance._meta.model_name).inc()
74
75 # Housekeeping: 0.1% chance of clearing out expired ObjectChanges
76 if settings.CHANGELOG_RETENTION and random.randint(1, 1000) == 1:
77 cutoff = timezone.now() - timedelta(days=settings.CHANGELOG_RETENTION)
78 ObjectChange.objects.filter(time__lt=cutoff).delete()
79
80
81 def _handle_deleted_object(request, sender, instance, **kwargs):
82 """
83 Fires when an object is deleted.
84 """
85 # Record an ObjectChange if applicable
86 if hasattr(instance, "to_objectchange"):
87 objectchange = instance.to_objectchange(ObjectChangeActionChoices.ACTION_DELETE)
88 objectchange.user = _get_user_if_authenticated(request, objectchange)
89 objectchange.request_id = request.id
90 objectchange.save()
91
92 # Enqueue webhooks
93 enqueue_webhooks(instance, request.user, request.id, ObjectChangeActionChoices.ACTION_DELETE)
94
95 # Increment metric counters
96 model_deletes.labels(instance._meta.model_name).inc()
97
98
99 #
100 # Custom fields
101 #
102
103
104 def handle_cf_removed_obj_types(instance, action, pk_set, **kwargs):
105 """
106 Handle the cleanup of old custom field data when a CustomField is removed from one or more ContentTypes.
107 """
108 if action == "post_remove":
109 # Existing content types have been removed from the custom field, delete their data
110 transaction.on_commit(lambda: delete_custom_field_data.delay(instance.name, pk_set))
111
112 elif action == "post_add":
113 # New content types have been added to the custom field, provision them
114 transaction.on_commit(lambda: provision_field.delay(instance.pk, pk_set))
115
116
117 m2m_changed.connect(handle_cf_removed_obj_types, sender=CustomField.content_types.through)
118
119
120 #
121 # Caching
122 #
123
124 cacheops_cache_hit = Counter("cacheops_cache_hit", "Number of cache hits")
125 cacheops_cache_miss = Counter("cacheops_cache_miss", "Number of cache misses")
126 cacheops_cache_invalidated = Counter("cacheops_cache_invalidated", "Number of cache invalidations")
127
128
129 def cache_read_collector(sender, func, hit, **kwargs):
130 if hit:
131 cacheops_cache_hit.inc()
132 else:
133 cacheops_cache_miss.inc()
134
135
136 def cache_invalidated_collector(sender, obj_dict, **kwargs):
137 cacheops_cache_invalidated.inc()
138
139
140 cache_read.connect(cache_read_collector)
141 cache_invalidated.connect(cache_invalidated_collector)
142
143
144 #
145 # Datasources
146 #
147
148
149 @receiver(pre_delete, sender=GitRepository)
150 def git_repository_pre_delete(instance, **kwargs):
151 """
152 When a GitRepository is deleted, invoke all registered callbacks, then remove it from the local filesystem.
153
154 Note that GitRepository create/update operations enqueue a background job to handle the sync/resync;
155 this operation, by contrast, happens in the foreground as it needs to complete before we allow the
156 GitRepository itself to be deleted.
157 """
158 from nautobot.extras.datasources import refresh_datasource_content
159
160 job_result = JobResult.objects.create(
161 name=instance.name,
162 obj_type=ContentType.objects.get_for_model(instance),
163 user=None,
164 job_id=uuid.uuid4(),
165 status=JobResultStatusChoices.STATUS_RUNNING,
166 )
167
168 refresh_datasource_content("extras.gitrepository", instance, None, job_result, delete=True)
169
170 if job_result.status not in JobResultStatusChoices.TERMINAL_STATE_CHOICES:
171 job_result.set_status(JobResultStatusChoices.STATUS_COMPLETED)
172 job_result.save()
173
174 # TODO: In a distributed Nautobot deployment, each Django instance and/or RQ worker instance may have its own clone
175 # of this repository; we need some way to ensure that all such clones are deleted.
176 # For now we just delete the one that we have locally and rely on other methods (notably get_jobs())
177 # to clean up other clones as they're encountered.
178 if os.path.isdir(instance.filesystem_path):
179 shutil.rmtree(instance.filesystem_path)
180
[end of nautobot/extras/signals.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/nautobot/extras/signals.py b/nautobot/extras/signals.py
--- a/nautobot/extras/signals.py
+++ b/nautobot/extras/signals.py
@@ -45,23 +45,33 @@
"""
Fires when an object is created or updated.
"""
- # Queue the object for processing once the request completes
+ m2m_changed = False
+
+ # Determine the type of change being made
if kwargs.get("created"):
action = ObjectChangeActionChoices.ACTION_CREATE
elif "created" in kwargs:
action = ObjectChangeActionChoices.ACTION_UPDATE
elif kwargs.get("action") in ["post_add", "post_remove"] and kwargs["pk_set"]:
# m2m_changed with objects added or removed
+ m2m_changed = True
action = ObjectChangeActionChoices.ACTION_UPDATE
else:
return
# Record an ObjectChange if applicable
if hasattr(instance, "to_objectchange"):
- objectchange = instance.to_objectchange(action)
- objectchange.user = _get_user_if_authenticated(request, objectchange)
- objectchange.request_id = request.id
- objectchange.save()
+ if m2m_changed:
+ ObjectChange.objects.filter(
+ changed_object_type=ContentType.objects.get_for_model(instance),
+ changed_object_id=instance.pk,
+ request_id=request.id,
+ ).update(object_data=instance.to_objectchange(action).object_data)
+ else:
+ objectchange = instance.to_objectchange(action)
+ objectchange.user = _get_user_if_authenticated(request, objectchange)
+ objectchange.request_id = request.id
+ objectchange.save()
# Enqueue webhooks
enqueue_webhooks(instance, request.user, request.id, action)
|
{"golden_diff": "diff --git a/nautobot/extras/signals.py b/nautobot/extras/signals.py\n--- a/nautobot/extras/signals.py\n+++ b/nautobot/extras/signals.py\n@@ -45,23 +45,33 @@\n \"\"\"\n Fires when an object is created or updated.\n \"\"\"\n- # Queue the object for processing once the request completes\n+ m2m_changed = False\n+\n+ # Determine the type of change being made\n if kwargs.get(\"created\"):\n action = ObjectChangeActionChoices.ACTION_CREATE\n elif \"created\" in kwargs:\n action = ObjectChangeActionChoices.ACTION_UPDATE\n elif kwargs.get(\"action\") in [\"post_add\", \"post_remove\"] and kwargs[\"pk_set\"]:\n # m2m_changed with objects added or removed\n+ m2m_changed = True\n action = ObjectChangeActionChoices.ACTION_UPDATE\n else:\n return\n \n # Record an ObjectChange if applicable\n if hasattr(instance, \"to_objectchange\"):\n- objectchange = instance.to_objectchange(action)\n- objectchange.user = _get_user_if_authenticated(request, objectchange)\n- objectchange.request_id = request.id\n- objectchange.save()\n+ if m2m_changed:\n+ ObjectChange.objects.filter(\n+ changed_object_type=ContentType.objects.get_for_model(instance),\n+ changed_object_id=instance.pk,\n+ request_id=request.id,\n+ ).update(object_data=instance.to_objectchange(action).object_data)\n+ else:\n+ objectchange = instance.to_objectchange(action)\n+ objectchange.user = _get_user_if_authenticated(request, objectchange)\n+ objectchange.request_id = request.id\n+ objectchange.save()\n \n # Enqueue webhooks\n enqueue_webhooks(instance, request.user, request.id, action)\n", "issue": "Prefix with tags is created in 2 steps\n<!--\r\n NOTE: IF YOUR ISSUE DOES NOT FOLLOW THIS TEMPLATE, IT WILL BE CLOSED.\r\n\r\n This form is only for reporting reproducible bugs. If you need assistance\r\n with Nautobot installation, or if you have a general question, please start a\r\n discussion instead: https://github.com/nautobot/nautobot/discussions\r\n\r\n Please describe the environment in which you are running Nautobot. Be sure\r\n that you are running an unmodified instance of the latest stable release\r\n before submitting a bug report, and that any plugins have been disabled.\r\n-->\r\n### Environment\r\n* Python version: 3.6.8\r\n* Nautobot version: v1.0.0b2\r\n\r\n<!--\r\n Describe in detail the exact steps that someone else can take to reproduce\r\n this bug using the current stable release of Nautobot. Begin with the\r\n creation of any necessary database objects and call out every operation\r\n being performed explicitly. If reporting a bug in the REST API, be sure to\r\n reconstruct the raw HTTP request(s) being made: Don't rely on a client\r\n library such as pynautobot.\r\n-->\r\n\r\nWhen creating a new prefix while specifying any tags, it is not created with it right away but first without tags and than it is updated with them. The result is correct, having prefix with tags created. But the process is wrong, because it triggers 2 webhooks (`type create` and `type update`), but there should only be single for `type update`, as this is creation.\r\n\r\n### Steps to Reproduce\r\n1. create new prefix with tags\r\n\r\n\r\n\r\n2. check changelog\r\n\r\n\r\n\r\n<!-- What did you expect to happen? -->\r\n### Expected Behavior\r\nSingle change logging creation of the prefix\r\n\r\n<!-- What happened instead? -->\r\n### Observed Behavior\r\nChange logging creation of the prefix and another one logging update (adding tags)\r\n\n", "before_files": [{"content": "import os\nimport random\nimport shutil\nimport uuid\nimport logging\nfrom datetime import timedelta\n\nfrom cacheops.signals import cache_invalidated, cache_read\nfrom django.conf import settings\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.db import transaction\nfrom django.db.models.signals import m2m_changed, pre_delete\nfrom django.dispatch import receiver\nfrom django.utils import timezone\nfrom django_prometheus.models import model_deletes, model_inserts, model_updates\nfrom prometheus_client import Counter\n\nfrom nautobot.extras.tasks import delete_custom_field_data, provision_field\nfrom .choices import JobResultStatusChoices, ObjectChangeActionChoices\nfrom .models import CustomField, GitRepository, JobResult, ObjectChange\nfrom .webhooks import enqueue_webhooks\n\nlogger = logging.getLogger(\"nautobot.extras.signals\")\n\n\n#\n# Change logging/webhooks\n#\n\n\ndef _get_user_if_authenticated(request, objectchange):\n \"\"\"Return the user object associated with the request if the user is defined.\n\n If the user is not defined, log a warning to indicate that the user couldn't be retrived from the request\n This is a workaround to fix a recurring issue where the user shouldn't be present in the request object randomly.\n A similar issue was reported in NetBox https://github.com/netbox-community/netbox/issues/5142\n \"\"\"\n if request.user.is_authenticated:\n return request.user\n else:\n logger.warning(f\"Unable to retrieve the user while creating the changelog for {objectchange.changed_object}\")\n\n\ndef _handle_changed_object(request, sender, instance, **kwargs):\n \"\"\"\n Fires when an object is created or updated.\n \"\"\"\n # Queue the object for processing once the request completes\n if kwargs.get(\"created\"):\n action = ObjectChangeActionChoices.ACTION_CREATE\n elif \"created\" in kwargs:\n action = ObjectChangeActionChoices.ACTION_UPDATE\n elif kwargs.get(\"action\") in [\"post_add\", \"post_remove\"] and kwargs[\"pk_set\"]:\n # m2m_changed with objects added or removed\n action = ObjectChangeActionChoices.ACTION_UPDATE\n else:\n return\n\n # Record an ObjectChange if applicable\n if hasattr(instance, \"to_objectchange\"):\n objectchange = instance.to_objectchange(action)\n objectchange.user = _get_user_if_authenticated(request, objectchange)\n objectchange.request_id = request.id\n objectchange.save()\n\n # Enqueue webhooks\n enqueue_webhooks(instance, request.user, request.id, action)\n\n # Increment metric counters\n if action == ObjectChangeActionChoices.ACTION_CREATE:\n model_inserts.labels(instance._meta.model_name).inc()\n elif action == ObjectChangeActionChoices.ACTION_UPDATE:\n model_updates.labels(instance._meta.model_name).inc()\n\n # Housekeeping: 0.1% chance of clearing out expired ObjectChanges\n if settings.CHANGELOG_RETENTION and random.randint(1, 1000) == 1:\n cutoff = timezone.now() - timedelta(days=settings.CHANGELOG_RETENTION)\n ObjectChange.objects.filter(time__lt=cutoff).delete()\n\n\ndef _handle_deleted_object(request, sender, instance, **kwargs):\n \"\"\"\n Fires when an object is deleted.\n \"\"\"\n # Record an ObjectChange if applicable\n if hasattr(instance, \"to_objectchange\"):\n objectchange = instance.to_objectchange(ObjectChangeActionChoices.ACTION_DELETE)\n objectchange.user = _get_user_if_authenticated(request, objectchange)\n objectchange.request_id = request.id\n objectchange.save()\n\n # Enqueue webhooks\n enqueue_webhooks(instance, request.user, request.id, ObjectChangeActionChoices.ACTION_DELETE)\n\n # Increment metric counters\n model_deletes.labels(instance._meta.model_name).inc()\n\n\n#\n# Custom fields\n#\n\n\ndef handle_cf_removed_obj_types(instance, action, pk_set, **kwargs):\n \"\"\"\n Handle the cleanup of old custom field data when a CustomField is removed from one or more ContentTypes.\n \"\"\"\n if action == \"post_remove\":\n # Existing content types have been removed from the custom field, delete their data\n transaction.on_commit(lambda: delete_custom_field_data.delay(instance.name, pk_set))\n\n elif action == \"post_add\":\n # New content types have been added to the custom field, provision them\n transaction.on_commit(lambda: provision_field.delay(instance.pk, pk_set))\n\n\nm2m_changed.connect(handle_cf_removed_obj_types, sender=CustomField.content_types.through)\n\n\n#\n# Caching\n#\n\ncacheops_cache_hit = Counter(\"cacheops_cache_hit\", \"Number of cache hits\")\ncacheops_cache_miss = Counter(\"cacheops_cache_miss\", \"Number of cache misses\")\ncacheops_cache_invalidated = Counter(\"cacheops_cache_invalidated\", \"Number of cache invalidations\")\n\n\ndef cache_read_collector(sender, func, hit, **kwargs):\n if hit:\n cacheops_cache_hit.inc()\n else:\n cacheops_cache_miss.inc()\n\n\ndef cache_invalidated_collector(sender, obj_dict, **kwargs):\n cacheops_cache_invalidated.inc()\n\n\ncache_read.connect(cache_read_collector)\ncache_invalidated.connect(cache_invalidated_collector)\n\n\n#\n# Datasources\n#\n\n\n@receiver(pre_delete, sender=GitRepository)\ndef git_repository_pre_delete(instance, **kwargs):\n \"\"\"\n When a GitRepository is deleted, invoke all registered callbacks, then remove it from the local filesystem.\n\n Note that GitRepository create/update operations enqueue a background job to handle the sync/resync;\n this operation, by contrast, happens in the foreground as it needs to complete before we allow the\n GitRepository itself to be deleted.\n \"\"\"\n from nautobot.extras.datasources import refresh_datasource_content\n\n job_result = JobResult.objects.create(\n name=instance.name,\n obj_type=ContentType.objects.get_for_model(instance),\n user=None,\n job_id=uuid.uuid4(),\n status=JobResultStatusChoices.STATUS_RUNNING,\n )\n\n refresh_datasource_content(\"extras.gitrepository\", instance, None, job_result, delete=True)\n\n if job_result.status not in JobResultStatusChoices.TERMINAL_STATE_CHOICES:\n job_result.set_status(JobResultStatusChoices.STATUS_COMPLETED)\n job_result.save()\n\n # TODO: In a distributed Nautobot deployment, each Django instance and/or RQ worker instance may have its own clone\n # of this repository; we need some way to ensure that all such clones are deleted.\n # For now we just delete the one that we have locally and rely on other methods (notably get_jobs())\n # to clean up other clones as they're encountered.\n if os.path.isdir(instance.filesystem_path):\n shutil.rmtree(instance.filesystem_path)\n", "path": "nautobot/extras/signals.py"}]}
| 2,993 | 391 |
gh_patches_debug_60264
|
rasdani/github-patches
|
git_diff
|
ARM-DOE__ACT-396
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add Google Analytics ID
Add a Google Analytics ID to the `conf.py` file used by sphinx. For those interested in having access the analytics, you will need to send over your gmail address
Fixes #396
</issue>
<code>
[start of docs/source/conf.py]
1 #!/usr/bin/env python3
2 # -*- coding: utf-8 -*-
3 #
4 # Atmospheric data Community Toolkit documentation build configuration file, created by
5 # sphinx-quickstart on Thu Jun 28 12:35:56 2018.
6 #
7 # This file is execfile()d with the current directory set to its
8 # containing dir.
9 #
10 # Note that not all possible configuration values are present in this
11 # autogenerated file.
12 #
13 # All configuration values have a default; values that are commented out
14 # serve to show the default.
15
16 # If extensions (or modules to document with autodoc) are in another directory,
17 # add these directories to sys.path here. If the directory is relative to the
18 # documentation root, use os.path.abspath to make it absolute, like shown here.
19 #
20 # import os
21 # import sys
22 # sys.path.insert(0, os.path.abspath('.'))
23
24
25 # -- General configuration ------------------------------------------------
26
27 # If your documentation needs a minimal Sphinx version, state it here.
28 #
29 # needs_sphinx = '1.0'
30
31 # Add any Sphinx extension module names here, as strings. They can be
32 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
33 # ones.
34 extensions = [
35 'sphinx.ext.autodoc',
36 'sphinx.ext.autosummary',
37 'sphinx.ext.githubpages',
38 'sphinx.ext.intersphinx',
39 'sphinx.ext.mathjax',
40 'sphinx.ext.viewcode',
41 'IPython.sphinxext.ipython_directive',
42 'IPython.sphinxext.ipython_console_highlighting',
43 'matplotlib.sphinxext.plot_directive',
44 'sphinx_copybutton',
45 'sphinx_gallery.gen_gallery',
46 'sphinx.ext.napoleon',
47 ]
48
49 exclude_patterns = ['_build', '**.ipynb_checkpoints']
50 sphinx_gallery_conf = {
51 'examples_dirs': '../../examples',
52 'gallery_dirs': 'source/auto_examples'
53 }
54
55 # Configuration options for plot_directive. See:
56 # https://github.com/matplotlib/matplotlib/blob/f3ed922d935751e08494e5fb5311d3050a3b637b/lib/matplotlib/sphinxext/plot_directive.py#L81
57 plot_html_show_source_link = False
58 plot_html_show_formats = False
59
60 # Generate the API documentation when building
61 autoclass_content = "both"
62 autosummary_generate = True
63 autosummary_imported_members = True
64
65 # Otherwise, the Return parameter list looks different from the Parameter list
66 napoleon_use_rtype = False
67 napoleon_use_ivar = True
68 napoleon_include_init_with_doc = False
69 napoleon_use_param = False
70
71 # Add any paths that contain templates here, relative to this directory.
72 templates_path = ['_templates']
73
74 # The suffix(es) of source filenames.
75 # You can specify multiple suffix as a list of string:
76 #
77 # source_suffix = ['.rst', '.md']
78 source_suffix = '.rst'
79
80 # The master toctree document.
81 master_doc = 'index'
82
83 # General information about the project.
84 project = 'Atmospheric data Community Toolkit'
85 copyright = '2018, ACT Developers'
86 author = 'ACT Developers'
87
88 # The version info for the project you're documenting, acts as replacement for
89 # |version| and |release|, also used in various other places throughout the
90 # built documents.
91 #
92 import act
93 # The short X.Y version.
94 version = act.__version__
95 # The full version, including alpha/beta/rc tags.
96 release = act.__version__
97
98 # The language for content autogenerated by Sphinx. Refer to documentation
99 # for a list of supported languages.
100 #
101 # This is also used if you do content translation via gettext catalogs.
102 # Usually you set "language" from the command line for these cases.
103 language = None
104
105 # List of patterns, relative to source directory, that match files and
106 # directories to ignore when looking for source files.
107 # This patterns also effect to html_static_path and html_extra_path
108
109 # The name of the Pygments (syntax highlighting) style to use.
110 pygments_style = 'sphinx'
111
112 # If true, `todo` and `todoList` produce output, else they produce nothing.
113 todo_include_todos = False
114
115
116 # -- Options for HTML output ----------------------------------------------
117
118 # The theme to use for HTML and HTML Help pages. See the documentation for
119 # a list of builtin themes.
120 #
121 html_theme = 'sphinx_rtd_theme'
122 import sphinx_rtd_theme
123 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
124
125 # Theme options are theme-specific and customize the look and feel of a theme
126 # further. For a list of options available for each theme, see the
127 # documentation.
128 #
129 # html_theme_options = {}
130
131 # Add any paths that contain custom static files (such as style sheets) here,
132 # relative to this directory. They are copied after the builtin static files,
133 # so a file named "default.css" will overwrite the builtin "default.css".
134 html_static_path = ['_static']
135
136 # Custom sidebar templates, must be a dictionary that maps document names
137 # to template names.
138 #
139 # This is required for the alabaster theme
140 # refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
141 html_sidebars = {
142 '**': [
143 'relations.html', # needs 'show_related': True theme option to display
144 'searchbox.html',
145 ]
146 }
147
148
149 # -- Options for HTMLHelp output ------------------------------------------
150
151 # Output file base name for HTML help builder.
152 htmlhelp_basename = 'act'
153
154
155 # -- Options for LaTeX output ---------------------------------------------
156
157 latex_elements = {
158 # The paper size ('letterpaper' or 'a4paper').
159 #
160 # 'papersize': 'letterpaper',
161
162 # The font size ('10pt', '11pt' or '12pt').
163 #
164 # 'pointsize': '10pt',
165
166 # Additional stuff for the LaTeX preamble.
167 #
168 # 'preamble': '',
169
170 # Latex figure (float) alignment
171 #
172 # 'figure_align': 'htbp',
173 }
174
175 # Grouping the document tree into LaTeX files. List of tuples
176 # (source start file, target name, title,
177 # author, documentclass [howto, manual, or own class]).
178 latex_documents = [
179 (master_doc, 'act.tex', 'Atmospheric data Community Toolkit Documentation',
180 'Contributors', 'manual'),
181 ]
182
183
184 # -- Options for manual page output ---------------------------------------
185
186 # One entry per manual page. List of tuples
187 # (source start file, name, description, authors, manual section).
188 man_pages = [
189 (master_doc, 'act', 'Atmospheric data Community Toolkit Documentation',
190 [author], 1)
191 ]
192
193
194 # -- Options for Texinfo output -------------------------------------------
195
196 # Grouping the document tree into Texinfo files. List of tuples
197 # (source start file, target name, title, author,
198 # dir menu entry, description, category)
199 texinfo_documents = [
200 (master_doc, 'act', 'Atmospheric data Community Toolkit Documentation',
201 author, 'act', 'Package for connecting users to the data',
202 'Miscellaneous'),
203 ]
204
205
206
207
208 # Example configuration for intersphinx: refer to the Python standard library.
209 intersphinx_mapping = {
210 'python': ('https://docs.python.org/3/', None),
211 'numpy': ('https://docs.scipy.org/doc/numpy/', None),
212 'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
213 'pandas': ('https://pandas.pydata.org/pandas-docs/stable', None),
214 'matplotlib': ('https://matplotlib.org', None),
215 }
216
[end of docs/source/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/docs/source/conf.py b/docs/source/conf.py
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -126,7 +126,9 @@
# further. For a list of options available for each theme, see the
# documentation.
#
-# html_theme_options = {}
+html_theme_options = {
+ 'google_analytics_id': 'UA-179020619-3',
+}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
|
{"golden_diff": "diff --git a/docs/source/conf.py b/docs/source/conf.py\n--- a/docs/source/conf.py\n+++ b/docs/source/conf.py\n@@ -126,7 +126,9 @@\n # further. For a list of options available for each theme, see the\n # documentation.\n #\n-# html_theme_options = {}\n+html_theme_options = {\n+ 'google_analytics_id': 'UA-179020619-3',\n+}\n \n # Add any paths that contain custom static files (such as style sheets) here,\n # relative to this directory. They are copied after the builtin static files,\n", "issue": "Add Google Analytics ID\nAdd a Google Analytics ID to the `conf.py` file used by sphinx. For those interested in having access the analytics, you will need to send over your gmail address\r\n\r\nFixes #396 \n", "before_files": [{"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Atmospheric data Community Toolkit documentation build configuration file, created by\n# sphinx-quickstart on Thu Jun 28 12:35:56 2018.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.githubpages',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.viewcode',\n 'IPython.sphinxext.ipython_directive',\n 'IPython.sphinxext.ipython_console_highlighting',\n 'matplotlib.sphinxext.plot_directive',\n 'sphinx_copybutton',\n 'sphinx_gallery.gen_gallery',\n 'sphinx.ext.napoleon',\n]\n\nexclude_patterns = ['_build', '**.ipynb_checkpoints']\nsphinx_gallery_conf = {\n 'examples_dirs': '../../examples',\n 'gallery_dirs': 'source/auto_examples'\n}\n\n# Configuration options for plot_directive. See:\n# https://github.com/matplotlib/matplotlib/blob/f3ed922d935751e08494e5fb5311d3050a3b637b/lib/matplotlib/sphinxext/plot_directive.py#L81\nplot_html_show_source_link = False\nplot_html_show_formats = False\n\n# Generate the API documentation when building\nautoclass_content = \"both\"\nautosummary_generate = True\nautosummary_imported_members = True\n\n# Otherwise, the Return parameter list looks different from the Parameter list\nnapoleon_use_rtype = False\nnapoleon_use_ivar = True\nnapoleon_include_init_with_doc = False\nnapoleon_use_param = False\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'Atmospheric data Community Toolkit'\ncopyright = '2018, ACT Developers'\nauthor = 'ACT Developers'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\nimport act\n# The short X.Y version.\nversion = act.__version__\n# The full version, including alpha/beta/rc tags.\nrelease = act.__version__\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'sphinx_rtd_theme'\nimport sphinx_rtd_theme\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\n# html_theme_options = {}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# This is required for the alabaster theme\n# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars\nhtml_sidebars = {\n '**': [\n 'relations.html', # needs 'show_related': True theme option to display\n 'searchbox.html',\n ]\n}\n\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'act'\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'act.tex', 'Atmospheric data Community Toolkit Documentation',\n 'Contributors', 'manual'),\n]\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'act', 'Atmospheric data Community Toolkit Documentation',\n [author], 1)\n]\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'act', 'Atmospheric data Community Toolkit Documentation',\n author, 'act', 'Package for connecting users to the data',\n 'Miscellaneous'),\n]\n\n\n\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/3/', None),\n 'numpy': ('https://docs.scipy.org/doc/numpy/', None),\n 'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),\n 'pandas': ('https://pandas.pydata.org/pandas-docs/stable', None),\n 'matplotlib': ('https://matplotlib.org', None),\n}\n", "path": "docs/source/conf.py"}]}
| 2,795 | 134 |
gh_patches_debug_17288
|
rasdani/github-patches
|
git_diff
|
freedomofpress__securedrop-83
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add Unit and Integration Tests
If you want to contribute this would be a good place to start. Lots of people are suggesting pretty substantial changes to the codebase and tests would make sure those changes don't unintentionally break things.
</issue>
<code>
[start of modules/deaddrop/files/deaddrop/journalist.py]
1 # -*- coding: utf-8 -*-
2 import os, datetime, uuid
3 import web
4 import config, crypto, store, version
5
6 urls = (
7 '/', 'index',
8 '/reply/', 'reply',
9 '/([A-Z1-7]+)/', 'col',
10 '/([A-Z1-7]+)/([0-9]+\.[0-9]+(?:_msg|_doc|)\.gpg)', 'doc'
11 )
12
13 render = web.template.render(config.JOURNALIST_TEMPLATES_DIR, base='base',
14 globals={'version':version.__version__})
15
16 class index:
17 def GET(self):
18 dirs = os.listdir(config.STORE_DIR)
19 cols = []
20 for d in dirs:
21 if not os.listdir(store.path(d)): continue
22 cols.append(web.storage(name=d, codename=crypto.displayid(d), date=
23 str(datetime.datetime.fromtimestamp(
24 os.stat(store.path(d)).st_mtime
25 )).split('.')[0]
26 ))
27 cols.sort(lambda x,y: cmp(x.date, y.date), reverse=True)
28
29 web.header('Cache-Control', 'no-cache, no-store, must-revalidate')
30 web.header('Pragma', 'no-cache')
31 web.header('Expires', '-1')
32 return render.index(cols)
33
34 class col:
35 def GET(self, sid):
36 fns = os.listdir(store.path(sid))
37 docs = []
38 for f in fns:
39 docs.append(web.storage(
40 name=f,
41 date=str(datetime.datetime.fromtimestamp(os.stat(store.path(sid, f)).st_mtime))
42 ))
43 docs.sort(lambda x,y: cmp(x.date, y.date))
44
45 haskey = bool(crypto.getkey(sid))
46
47 web.header('Cache-Control', 'no-cache, no-store, must-revalidate')
48 web.header('Pragma', 'no-cache')
49 web.header('Expires', '-1')
50 return render.col(docs, sid, haskey, codename=crypto.displayid(sid))
51
52 class doc:
53 def GET(self, sid, fn):
54 web.header('Content-Disposition', 'attachment; filename="' +
55 crypto.displayid(sid).replace(' ', '_') + '_' + fn + '"')
56
57 web.header('Cache-Control', 'no-cache, no-store, must-revalidate')
58 web.header('Pragma', 'no-cache')
59 web.header('Expires', '-1')
60 return file(store.path(sid, fn)).read()
61
62 class reply:
63 def GET(self):
64 raise web.seeother('/')
65
66 def POST(self):
67 i = web.input('sid', 'msg')
68 crypto.encrypt(crypto.getkey(i.sid), i.msg, output=
69 store.path(i.sid, 'reply-%.2f.gpg' % (uuid.uuid4().int, ))
70 )
71
72 web.header('Cache-Control', 'no-cache, no-store, must-revalidate')
73 web.header('Pragma', 'no-cache')
74 web.header('Expires', '-1')
75 return render.reply(i.sid)
76
77
78 web.config.debug = False
79 app = web.application(urls, locals())
80 application = app.wsgifunc()
81
82 if __name__ == "__main__":
83 app.run()
84
[end of modules/deaddrop/files/deaddrop/journalist.py]
[start of modules/deaddrop/files/deaddrop/crypto.py]
1 # -*- coding: utf-8 -*-
2 import os
3 import bcrypt, subprocess, threading
4 from Crypto.Random import random
5 import random as badrandom
6 import gnupg
7 import config
8 import store
9 from base64 import b32encode
10
11 # to fix gpg error #78 on production
12 os.environ['USERNAME'] = 'www-data'
13
14 GPG_KEY_TYPE = "RSA"
15 if 'DEADDROPENV' in os.environ and os.environ['DEADDROPENV'] == 'test':
16 # Use small keys to speed up tests (and try to cheat and avoid issues with
17 # async key generation)
18 GPG_KEY_LENGTH = "1024"
19 else:
20 GPG_KEY_LENGTH = "4096"
21
22 DEFAULT_WORDS_IN_RANDOM_ID = 8
23
24 class CryptoException(Exception): pass
25
26 def clean(s, also=''):
27 """
28 >>> clean("Hello, world!")
29 Traceback (most recent call last):
30 ...
31 CryptoException: invalid input
32 >>> clean("Helloworld")
33 'Helloworld'
34 """
35 ok = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
36 for c in s:
37 if c not in ok and c not in also: raise CryptoException("invalid input")
38 return s
39
40 words = file(config.WORD_LIST).read().split('\n')
41 def genrandomid(words_in_random_id = DEFAULT_WORDS_IN_RANDOM_ID):
42 return ' '.join(random.choice(words) for x in range(words_in_random_id))
43
44 def displayid(n, words_in_random_id = DEFAULT_WORDS_IN_RANDOM_ID):
45 badrandom_value = badrandom.WichmannHill()
46 badrandom_value.seed(n)
47 return ' '.join(badrandom_value.choice(words) for x in range(words_in_random_id))
48
49 def shash(s):
50 """
51 >>> shash('Hello, world!')
52 'EQZGCJBRGISGOTC2NZVWG6LILJBHEV3CINNEWSCLLFTUWZLFHBTS6WLCHFHTOLRSGQXUQLRQHFMXKOKKOQ4WQ6SXGZXDAS3Z'
53 """
54 return b32encode(bcrypt.hashpw(s, config.BCRYPT_SALT))
55
56 GPG_BINARY = 'gpg2'
57 try:
58 p = subprocess.Popen([GPG_BINARY, '--version'], stdout=subprocess.PIPE)
59 except OSError:
60 GPG_BINARY = 'gpg'
61 p = subprocess.Popen([GPG_BINARY, '--version'], stdout=subprocess.PIPE)
62
63 assert p.stdout.readline().split()[-1].split('.')[0] == '2', "upgrade GPG to 2.0"
64 gpg = gnupg.GPG(gpgbinary=GPG_BINARY, gnupghome=config.GPG_KEY_DIR)
65
66 def genkeypair(name, secret):
67 """
68 >>> if not gpg.list_keys(shash('randomid')):
69 ... genkeypair(shash('randomid'), 'randomid').type
70 ... else:
71 ... u'P'
72 u'P'
73 """
74 name, secret = clean(name), clean(secret, ' ')
75 return gpg.gen_key(gpg.gen_key_input(
76 key_type=GPG_KEY_TYPE, key_length=GPG_KEY_LENGTH,
77 passphrase=secret,
78 name_email="%[email protected]" % name
79 ))
80
81 def getkey(name):
82 for key in gpg.list_keys():
83 for uid in key['uids']:
84 if ' <%s@' % name in uid: return key['fingerprint']
85 return None
86
87 def _shquote(s):
88 return "\\'".join("'" + p + "'" for p in s.split("'"))
89 _gpghacklock = threading.Lock()
90
91 def encrypt(fp, s, output=None, fn=None):
92 r"""
93 >>> key = genkeypair('randomid', 'randomid')
94 >>> encrypt('randomid', "Goodbye, cruel world!")[:45]
95 '-----BEGIN PGP MESSAGE-----\nVersion: GnuPG v2'
96 """
97 if output:
98 store.verify(output)
99 fp = fp.replace(' ', '')
100 if isinstance(s, unicode):
101 s = s.encode('utf8')
102 if isinstance(s, str):
103 out = gpg.encrypt(s, [fp], output=output, always_trust=True)
104 else:
105 if fn:
106 with _gpghacklock:
107 oldname = gpg.gpgbinary
108 gpg.gpgbinary += ' --set-filename ' + _shquote(fn)
109 out = gpg.encrypt_file(s, [fp], output=output, always_trust=True)
110 gpg.gpgbinary = oldname
111 else:
112 out = gpg.encrypt_file(s, [fp], output=output, always_trust=True)
113 if out.ok:
114 return out.data
115 else:
116 raise CryptoException(out.stderr)
117
118 def decrypt(name, secret, s):
119 """
120 >>> key = genkeypair('randomid', 'randomid')
121 >>> decrypt('randomid', 'randomid',
122 ... encrypt('randomid', 'Goodbye, cruel world!')
123 ... )
124 'Goodbye, cruel world!'
125 """
126 return gpg.decrypt(s, passphrase=secret).data
127
128 def secureunlink(fn):
129 store.verify(fn)
130 return subprocess.check_call(['srm', fn])
131
132 # crash if we don't have srm:
133 try:
134 subprocess.check_call(['srm'], stdout=subprocess.PIPE)
135 except subprocess.CalledProcessError:
136 pass
137
138 if __name__ == "__main__":
139 import doctest
140 doctest.testmod()
141
[end of modules/deaddrop/files/deaddrop/crypto.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/modules/deaddrop/files/deaddrop/crypto.py b/modules/deaddrop/files/deaddrop/crypto.py
--- a/modules/deaddrop/files/deaddrop/crypto.py
+++ b/modules/deaddrop/files/deaddrop/crypto.py
@@ -34,7 +34,8 @@
"""
ok = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
for c in s:
- if c not in ok and c not in also: raise CryptoException("invalid input")
+ if c not in ok and c not in also:
+ raise CryptoException("invalid input: %s" % s)
return s
words = file(config.WORD_LIST).read().split('\n')
diff --git a/modules/deaddrop/files/deaddrop/journalist.py b/modules/deaddrop/files/deaddrop/journalist.py
--- a/modules/deaddrop/files/deaddrop/journalist.py
+++ b/modules/deaddrop/files/deaddrop/journalist.py
@@ -51,6 +51,7 @@
class doc:
def GET(self, sid, fn):
+ web.header('Content-Type', 'application/octet-stream')
web.header('Content-Disposition', 'attachment; filename="' +
crypto.displayid(sid).replace(' ', '_') + '_' + fn + '"')
|
{"golden_diff": "diff --git a/modules/deaddrop/files/deaddrop/crypto.py b/modules/deaddrop/files/deaddrop/crypto.py\n--- a/modules/deaddrop/files/deaddrop/crypto.py\n+++ b/modules/deaddrop/files/deaddrop/crypto.py\n@@ -34,7 +34,8 @@\n \"\"\"\n ok = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'\n for c in s:\n- if c not in ok and c not in also: raise CryptoException(\"invalid input\")\n+ if c not in ok and c not in also:\n+ raise CryptoException(\"invalid input: %s\" % s)\n return s\n \n words = file(config.WORD_LIST).read().split('\\n')\ndiff --git a/modules/deaddrop/files/deaddrop/journalist.py b/modules/deaddrop/files/deaddrop/journalist.py\n--- a/modules/deaddrop/files/deaddrop/journalist.py\n+++ b/modules/deaddrop/files/deaddrop/journalist.py\n@@ -51,6 +51,7 @@\n \n class doc:\n def GET(self, sid, fn):\n+ web.header('Content-Type', 'application/octet-stream')\n web.header('Content-Disposition', 'attachment; filename=\"' + \n crypto.displayid(sid).replace(' ', '_') + '_' + fn + '\"')\n", "issue": "Add Unit and Integration Tests\nIf you want to contribute this would be a good place to start. Lots of people are suggesting pretty substantial changes to the codebase and tests would make sure those changes don't unintentionally break things.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport os, datetime, uuid\nimport web\nimport config, crypto, store, version\n\nurls = (\n '/', 'index',\n '/reply/', 'reply',\n '/([A-Z1-7]+)/', 'col',\n '/([A-Z1-7]+)/([0-9]+\\.[0-9]+(?:_msg|_doc|)\\.gpg)', 'doc' \n)\n\nrender = web.template.render(config.JOURNALIST_TEMPLATES_DIR, base='base', \n globals={'version':version.__version__})\n\nclass index:\n def GET(self):\n dirs = os.listdir(config.STORE_DIR)\n cols = []\n for d in dirs:\n if not os.listdir(store.path(d)): continue\n cols.append(web.storage(name=d, codename=crypto.displayid(d), date=\n str(datetime.datetime.fromtimestamp(\n os.stat(store.path(d)).st_mtime\n )).split('.')[0]\n ))\n cols.sort(lambda x,y: cmp(x.date, y.date), reverse=True)\n\n web.header('Cache-Control', 'no-cache, no-store, must-revalidate')\n web.header('Pragma', 'no-cache')\n web.header('Expires', '-1')\n return render.index(cols)\n\nclass col:\n def GET(self, sid):\n fns = os.listdir(store.path(sid))\n docs = []\n for f in fns:\n docs.append(web.storage(\n name=f, \n date=str(datetime.datetime.fromtimestamp(os.stat(store.path(sid, f)).st_mtime))\n ))\n docs.sort(lambda x,y: cmp(x.date, y.date))\n \n haskey = bool(crypto.getkey(sid))\n\n web.header('Cache-Control', 'no-cache, no-store, must-revalidate')\n web.header('Pragma', 'no-cache')\n web.header('Expires', '-1')\n return render.col(docs, sid, haskey, codename=crypto.displayid(sid))\n \nclass doc:\n def GET(self, sid, fn):\n web.header('Content-Disposition', 'attachment; filename=\"' + \n crypto.displayid(sid).replace(' ', '_') + '_' + fn + '\"')\n\n web.header('Cache-Control', 'no-cache, no-store, must-revalidate')\n web.header('Pragma', 'no-cache')\n web.header('Expires', '-1')\n return file(store.path(sid, fn)).read()\n\nclass reply:\n def GET(self):\n raise web.seeother('/')\n \n def POST(self):\n i = web.input('sid', 'msg')\n crypto.encrypt(crypto.getkey(i.sid), i.msg, output=\n store.path(i.sid, 'reply-%.2f.gpg' % (uuid.uuid4().int, ))\n )\n\n web.header('Cache-Control', 'no-cache, no-store, must-revalidate')\n web.header('Pragma', 'no-cache')\n web.header('Expires', '-1')\n return render.reply(i.sid)\n \n\nweb.config.debug = False\napp = web.application(urls, locals())\napplication = app.wsgifunc()\n\nif __name__ == \"__main__\":\n app.run()\n", "path": "modules/deaddrop/files/deaddrop/journalist.py"}, {"content": "# -*- coding: utf-8 -*-\nimport os\nimport bcrypt, subprocess, threading\nfrom Crypto.Random import random\nimport random as badrandom\nimport gnupg\nimport config\nimport store\nfrom base64 import b32encode\n\n# to fix gpg error #78 on production\nos.environ['USERNAME'] = 'www-data'\n\nGPG_KEY_TYPE = \"RSA\"\nif 'DEADDROPENV' in os.environ and os.environ['DEADDROPENV'] == 'test':\n # Use small keys to speed up tests (and try to cheat and avoid issues with\n # async key generation)\n GPG_KEY_LENGTH = \"1024\"\nelse:\n GPG_KEY_LENGTH = \"4096\"\n\nDEFAULT_WORDS_IN_RANDOM_ID = 8\n\nclass CryptoException(Exception): pass\n\ndef clean(s, also=''):\n \"\"\"\n >>> clean(\"Hello, world!\")\n Traceback (most recent call last):\n ...\n CryptoException: invalid input\n >>> clean(\"Helloworld\")\n 'Helloworld'\n \"\"\"\n ok = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'\n for c in s:\n if c not in ok and c not in also: raise CryptoException(\"invalid input\")\n return s\n\nwords = file(config.WORD_LIST).read().split('\\n')\ndef genrandomid(words_in_random_id = DEFAULT_WORDS_IN_RANDOM_ID):\n return ' '.join(random.choice(words) for x in range(words_in_random_id))\n\ndef displayid(n, words_in_random_id = DEFAULT_WORDS_IN_RANDOM_ID):\n badrandom_value = badrandom.WichmannHill()\n badrandom_value.seed(n)\n return ' '.join(badrandom_value.choice(words) for x in range(words_in_random_id))\n\ndef shash(s):\n \"\"\"\n >>> shash('Hello, world!')\n 'EQZGCJBRGISGOTC2NZVWG6LILJBHEV3CINNEWSCLLFTUWZLFHBTS6WLCHFHTOLRSGQXUQLRQHFMXKOKKOQ4WQ6SXGZXDAS3Z'\n \"\"\"\n return b32encode(bcrypt.hashpw(s, config.BCRYPT_SALT))\n\nGPG_BINARY = 'gpg2'\ntry:\n p = subprocess.Popen([GPG_BINARY, '--version'], stdout=subprocess.PIPE)\nexcept OSError:\n GPG_BINARY = 'gpg'\n p = subprocess.Popen([GPG_BINARY, '--version'], stdout=subprocess.PIPE)\n\nassert p.stdout.readline().split()[-1].split('.')[0] == '2', \"upgrade GPG to 2.0\"\ngpg = gnupg.GPG(gpgbinary=GPG_BINARY, gnupghome=config.GPG_KEY_DIR)\n\ndef genkeypair(name, secret):\n \"\"\"\n >>> if not gpg.list_keys(shash('randomid')):\n ... genkeypair(shash('randomid'), 'randomid').type\n ... else:\n ... u'P'\n u'P'\n \"\"\"\n name, secret = clean(name), clean(secret, ' ')\n return gpg.gen_key(gpg.gen_key_input(\n key_type=GPG_KEY_TYPE, key_length=GPG_KEY_LENGTH,\n passphrase=secret,\n name_email=\"%[email protected]\" % name\n ))\n\ndef getkey(name):\n for key in gpg.list_keys():\n for uid in key['uids']:\n if ' <%s@' % name in uid: return key['fingerprint']\n return None\n\ndef _shquote(s):\n return \"\\\\'\".join(\"'\" + p + \"'\" for p in s.split(\"'\"))\n_gpghacklock = threading.Lock()\n\ndef encrypt(fp, s, output=None, fn=None):\n r\"\"\"\n >>> key = genkeypair('randomid', 'randomid')\n >>> encrypt('randomid', \"Goodbye, cruel world!\")[:45]\n '-----BEGIN PGP MESSAGE-----\\nVersion: GnuPG v2'\n \"\"\"\n if output:\n store.verify(output)\n fp = fp.replace(' ', '')\n if isinstance(s, unicode):\n s = s.encode('utf8')\n if isinstance(s, str):\n out = gpg.encrypt(s, [fp], output=output, always_trust=True)\n else:\n if fn:\n with _gpghacklock:\n oldname = gpg.gpgbinary\n gpg.gpgbinary += ' --set-filename ' + _shquote(fn)\n out = gpg.encrypt_file(s, [fp], output=output, always_trust=True)\n gpg.gpgbinary = oldname\n else:\n out = gpg.encrypt_file(s, [fp], output=output, always_trust=True)\n if out.ok:\n return out.data\n else:\n raise CryptoException(out.stderr)\n\ndef decrypt(name, secret, s):\n \"\"\"\n >>> key = genkeypair('randomid', 'randomid')\n >>> decrypt('randomid', 'randomid',\n ... encrypt('randomid', 'Goodbye, cruel world!')\n ... )\n 'Goodbye, cruel world!'\n \"\"\"\n return gpg.decrypt(s, passphrase=secret).data\n\ndef secureunlink(fn):\n store.verify(fn)\n return subprocess.check_call(['srm', fn])\n\n# crash if we don't have srm:\ntry:\n subprocess.check_call(['srm'], stdout=subprocess.PIPE)\nexcept subprocess.CalledProcessError:\n pass\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n", "path": "modules/deaddrop/files/deaddrop/crypto.py"}]}
| 2,953 | 285 |
gh_patches_debug_8501
|
rasdani/github-patches
|
git_diff
|
pyro-ppl__pyro-3250
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Docs builds are failing
Docs builds seem to have been failing since 1.8.5 release. We should fix this before the 1.8.6 release.
https://readthedocs.org/projects/pyro-ppl/builds/20847164/
<img width="812" alt="image" src="https://github.com/pyro-ppl/pyro/assets/648532/45149fae-a72d-481a-aaf9-73262d50aa92">
</issue>
<code>
[start of docs/source/conf.py]
1 # Copyright (c) 2017-2019 Uber Technologies, Inc.
2 # SPDX-License-Identifier: Apache-2.0
3
4 import os
5 import sys
6
7 import sphinx_rtd_theme
8
9 # import pkg_resources
10
11 # -*- coding: utf-8 -*-
12 #
13 # Pyro documentation build configuration file, created by
14 # sphinx-quickstart on Thu Jun 15 17:16:14 2017.
15 #
16 # This file is execfile()d with the current directory set to its
17 # containing dir.
18 #
19 # Note that not all possible configuration values are present in this
20 # autogenerated file.
21 #
22 # All configuration values have a default; values that are commented out
23 # serve to show the default.
24
25 # If extensions (or modules to document with autodoc) are in another directory,
26 # add these directories to sys.path here. If the directory is relative to the
27 # documentation root, use os.path.abspath to make it absolute, like shown here.
28 #
29 sys.path.insert(0, os.path.abspath("../.."))
30
31 # -- General configuration ------------------------------------------------
32
33 # If your documentation needs a minimal Sphinx version, state it here.
34 #
35 # needs_sphinx = '1.0'
36
37 # Add any Sphinx extension module names here, as strings. They can be
38 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
39 # ones.
40 extensions = [
41 "sphinx.ext.intersphinx", #
42 "sphinx.ext.todo", #
43 "sphinx.ext.mathjax", #
44 "sphinx.ext.ifconfig", #
45 "sphinx.ext.viewcode", #
46 "sphinx.ext.githubpages", #
47 "sphinx.ext.graphviz", #
48 "sphinx.ext.autodoc",
49 "sphinx.ext.doctest",
50 'sphinx.ext.napoleon',
51 ]
52
53 # Disable documentation inheritance so as to avoid inheriting
54 # docstrings in a different format, e.g. when the parent class
55 # is a PyTorch class.
56
57 autodoc_inherit_docstrings = False
58
59 # Add any paths that contain templates here, relative to this directory.
60 templates_path = ["_templates"]
61
62 # The suffix(es) of source filenames.
63 # You can specify multiple suffix as a list of string:
64 #
65 # source_suffix = ['.rst', '.md']
66 source_suffix = ".rst"
67
68 # The master toctree document.
69 master_doc = "index"
70
71 # General information about the project.
72 project = u"Pyro"
73 copyright = u"2017-2018, Uber Technologies, Inc"
74 author = u"Uber AI Labs"
75
76 # The version info for the project you're documenting, acts as replacement for
77 # |version| and |release|, also used in various other places throughout the
78 # built documents.
79
80 version = ""
81
82 if "READTHEDOCS" not in os.environ:
83 # if developing locally, use pyro.__version__ as version
84 from pyro import __version__ # noqaE402
85
86 version = __version__
87
88 # release version
89 release = version
90
91 # The language for content autogenerated by Sphinx. Refer to documentation
92 # for a list of supported languages.
93 #
94 # This is also used if you do content translation via gettext catalogs.
95 # Usually you set "language" from the command line for these cases.
96 language = "en"
97
98 # List of patterns, relative to source directory, that match files and
99 # directories to ignore when looking for source files.
100 # This patterns also effect to html_static_path and html_extra_path
101 exclude_patterns = []
102
103 # The name of the Pygments (syntax highlighting) style to use.
104 pygments_style = "sphinx"
105
106 # If true, `todo` and `todoList` produce output, else they produce nothing.
107 todo_include_todos = True
108
109 # do not prepend module name to functions
110 add_module_names = False
111
112 # -- Options for HTML output ----------------------------------------------
113
114 # logo
115 html_logo = "_static/img/pyro_logo_wide.png"
116
117 # logo
118 html_favicon = "_static/img/favicon/favicon.ico"
119
120 # The theme to use for HTML and HTML Help pages. See the documentation for
121 # a list of builtin themes.
122 #
123 html_theme = "sphinx_rtd_theme"
124 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
125
126 # Theme options are theme-specific and customize the look and feel of a theme
127 # further. For a list of options available for each theme, see the
128 # documentation.
129
130 html_theme_options = {
131 "navigation_depth": 3,
132 "logo_only": True,
133 }
134
135 # Add any paths that contain custom static files (such as style sheets) here,
136 # relative to this directory. They are copied after the builtin static files,
137 # so a file named "default.css" will overwrite the builtin "default.css".
138 html_static_path = ["_static"]
139 html_style = "css/pyro.css"
140
141 # -- Options for HTMLHelp output ------------------------------------------
142
143 # Output file base name for HTML help builder.
144 htmlhelp_basename = "Pyrodoc"
145
146 # -- Options for LaTeX output ---------------------------------------------
147
148 latex_elements = {
149 # The paper size ('letterpaper' or 'a4paper').
150 #
151 # 'papersize': 'letterpaper',
152 # The font size ('10pt', '11pt' or '12pt').
153 #
154 # 'pointsize': '10pt',
155 # Additional stuff for the LaTeX preamble.
156 #
157 # 'preamble': '',
158 # Latex figure (float) alignment
159 #
160 # 'figure_align': 'htbp',
161 }
162
163 # Grouping the document tree into LaTeX files. List of tuples
164 # (source start file, target name, title,
165 # author, documentclass [howto, manual, or own class]).
166 latex_documents = [
167 (master_doc, "Pyro.tex", u"Pyro Documentation", u"Uber AI Labs", "manual"),
168 ]
169
170 # -- Options for manual page output ---------------------------------------
171
172 # One entry per manual page. List of tuples
173 # (source start file, name, description, authors, manual section).
174 man_pages = [(master_doc, "pyro", u"Pyro Documentation", [author], 1)]
175
176 # -- Options for Texinfo output -------------------------------------------
177
178 # Grouping the document tree into Texinfo files. List of tuples
179 # (source start file, target name, title, author,
180 # dir menu entry, description, category)
181 texinfo_documents = [
182 (
183 master_doc,
184 "Pyro",
185 u"Pyro Documentation",
186 author,
187 "Pyro",
188 "Deep Universal Probabilistic Programming.",
189 "Miscellaneous",
190 ),
191 ]
192
193 # Example configuration for intersphinx: refer to the Python standard library.
194 intersphinx_mapping = {
195 "python": ("https://docs.python.org/3/", None),
196 "torch": ("https://pytorch.org/docs/master/", None),
197 "funsor": ("http://funsor.pyro.ai/en/stable/", None),
198 "opt_einsum": ("https://optimized-einsum.readthedocs.io/en/stable/", None),
199 "scipy": ("https://docs.scipy.org/doc/scipy/reference/", None),
200 "Bio": ("https://biopython.org/docs/latest/api/", None),
201 "horovod": ("https://horovod.readthedocs.io/en/stable/", None),
202 "graphviz": ("https://graphviz.readthedocs.io/en/stable/", None),
203 }
204
205 # document class constructors (__init__ methods):
206 """ comment out this functionality for now;
207 def skip(app, what, name, obj, skip, options):
208 if name == "__init__":
209 return False
210 return skip
211 """
212
213
214 def setup(app):
215 app.add_css_file("css/pyro.css")
216
217
218 # app.connect("autodoc-skip-member", skip)
219
220
221 # @jpchen's hack to get rtd builder to install latest pytorch
222 # See similar line in the install section of .travis.yml
223 if "READTHEDOCS" in os.environ:
224 os.system("pip install numpy")
225 os.system(
226 "pip install torch==1.11.0+cpu torchvision==0.12.0+cpu "
227 "-f https://download.pytorch.org/whl/torch_stable.html"
228 )
229
[end of docs/source/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/docs/source/conf.py b/docs/source/conf.py
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -164,7 +164,9 @@
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
- (master_doc, "Pyro.tex", u"Pyro Documentation", u"Uber AI Labs", "manual"),
+ # Disabled pdf builds to unblock readthedocs failed builds;
+ # see https://github.com/pyro-ppl/pyro/issues/3248
+ # (master_doc, "Pyro.tex", u"Pyro Documentation", u"Uber AI Labs", "manual"),
]
# -- Options for manual page output ---------------------------------------
|
{"golden_diff": "diff --git a/docs/source/conf.py b/docs/source/conf.py\n--- a/docs/source/conf.py\n+++ b/docs/source/conf.py\n@@ -164,7 +164,9 @@\n # (source start file, target name, title,\n # author, documentclass [howto, manual, or own class]).\n latex_documents = [\n- (master_doc, \"Pyro.tex\", u\"Pyro Documentation\", u\"Uber AI Labs\", \"manual\"),\n+ # Disabled pdf builds to unblock readthedocs failed builds;\n+ # see https://github.com/pyro-ppl/pyro/issues/3248\n+ # (master_doc, \"Pyro.tex\", u\"Pyro Documentation\", u\"Uber AI Labs\", \"manual\"),\n ]\n \n # -- Options for manual page output ---------------------------------------\n", "issue": "Docs builds are failing\nDocs builds seem to have been failing since 1.8.5 release. We should fix this before the 1.8.6 release.\r\n\r\nhttps://readthedocs.org/projects/pyro-ppl/builds/20847164/\r\n<img width=\"812\" alt=\"image\" src=\"https://github.com/pyro-ppl/pyro/assets/648532/45149fae-a72d-481a-aaf9-73262d50aa92\">\r\n\n", "before_files": [{"content": "# Copyright (c) 2017-2019 Uber Technologies, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nimport os\nimport sys\n\nimport sphinx_rtd_theme\n\n# import pkg_resources\n\n# -*- coding: utf-8 -*-\n#\n# Pyro documentation build configuration file, created by\n# sphinx-quickstart on Thu Jun 15 17:16:14 2017.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nsys.path.insert(0, os.path.abspath(\"../..\"))\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.intersphinx\", #\n \"sphinx.ext.todo\", #\n \"sphinx.ext.mathjax\", #\n \"sphinx.ext.ifconfig\", #\n \"sphinx.ext.viewcode\", #\n \"sphinx.ext.githubpages\", #\n \"sphinx.ext.graphviz\", #\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.doctest\",\n 'sphinx.ext.napoleon',\n]\n\n# Disable documentation inheritance so as to avoid inheriting\n# docstrings in a different format, e.g. when the parent class\n# is a PyTorch class.\n\nautodoc_inherit_docstrings = False\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = \".rst\"\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# General information about the project.\nproject = u\"Pyro\"\ncopyright = u\"2017-2018, Uber Technologies, Inc\"\nauthor = u\"Uber AI Labs\"\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n\nversion = \"\"\n\nif \"READTHEDOCS\" not in os.environ:\n # if developing locally, use pyro.__version__ as version\n from pyro import __version__ # noqaE402\n\n version = __version__\n\n# release version\nrelease = version\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = \"en\"\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = []\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n\n# do not prepend module name to functions\nadd_module_names = False\n\n# -- Options for HTML output ----------------------------------------------\n\n# logo\nhtml_logo = \"_static/img/pyro_logo_wide.png\"\n\n# logo\nhtml_favicon = \"_static/img/favicon/favicon.ico\"\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_rtd_theme\"\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n\nhtml_theme_options = {\n \"navigation_depth\": 3,\n \"logo_only\": True,\n}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\nhtml_style = \"css/pyro.css\"\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"Pyrodoc\"\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, \"Pyro.tex\", u\"Pyro Documentation\", u\"Uber AI Labs\", \"manual\"),\n]\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, \"pyro\", u\"Pyro Documentation\", [author], 1)]\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n \"Pyro\",\n u\"Pyro Documentation\",\n author,\n \"Pyro\",\n \"Deep Universal Probabilistic Programming.\",\n \"Miscellaneous\",\n ),\n]\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3/\", None),\n \"torch\": (\"https://pytorch.org/docs/master/\", None),\n \"funsor\": (\"http://funsor.pyro.ai/en/stable/\", None),\n \"opt_einsum\": (\"https://optimized-einsum.readthedocs.io/en/stable/\", None),\n \"scipy\": (\"https://docs.scipy.org/doc/scipy/reference/\", None),\n \"Bio\": (\"https://biopython.org/docs/latest/api/\", None),\n \"horovod\": (\"https://horovod.readthedocs.io/en/stable/\", None),\n \"graphviz\": (\"https://graphviz.readthedocs.io/en/stable/\", None),\n}\n\n# document class constructors (__init__ methods):\n\"\"\" comment out this functionality for now;\ndef skip(app, what, name, obj, skip, options):\n if name == \"__init__\":\n return False\n return skip\n\"\"\"\n\n\ndef setup(app):\n app.add_css_file(\"css/pyro.css\")\n\n\n# app.connect(\"autodoc-skip-member\", skip)\n\n\n# @jpchen's hack to get rtd builder to install latest pytorch\n# See similar line in the install section of .travis.yml\nif \"READTHEDOCS\" in os.environ:\n os.system(\"pip install numpy\")\n os.system(\n \"pip install torch==1.11.0+cpu torchvision==0.12.0+cpu \"\n \"-f https://download.pytorch.org/whl/torch_stable.html\"\n )\n", "path": "docs/source/conf.py"}]}
| 3,022 | 173 |
gh_patches_debug_35401
|
rasdani/github-patches
|
git_diff
|
CTFd__CTFd-543
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bootstrap Beta v3
https://getbootstrap.com/docs/4.0/migration/#beta-3-changes
See if anything needs to be changed to support Bootstrap Betav3. Supposedly there won't be breaking changes between v3 and Stable so this is a good issue to look into.
</issue>
<code>
[start of CTFd/admin/challenges.py]
1 from flask import current_app as app, render_template, request, redirect, jsonify, url_for, Blueprint
2 from CTFd.utils import admins_only, is_admin, cache
3 from CTFd.models import db, Teams, Solves, Awards, Challenges, WrongKeys, Keys, Tags, Files, Tracking, Pages, Config, Hints, Unlocks, DatabaseError
4 from CTFd.plugins.keys import get_key_class, KEY_CLASSES
5 from CTFd.plugins.challenges import get_chal_class, CHALLENGE_CLASSES
6
7 from CTFd import utils
8
9 import os
10
11 admin_challenges = Blueprint('admin_challenges', __name__)
12
13
14 @admin_challenges.route('/admin/chal_types', methods=['GET'])
15 @admins_only
16 def admin_chal_types():
17 data = {}
18 for class_id in CHALLENGE_CLASSES:
19 challenge_class = CHALLENGE_CLASSES.get(class_id)
20 data[challenge_class.id] = {
21 'id': challenge_class.id,
22 'name': challenge_class.name,
23 'templates': challenge_class.templates,
24 'scripts': challenge_class.scripts,
25 }
26
27 return jsonify(data)
28
29
30 @admin_challenges.route('/admin/chals', methods=['POST', 'GET'])
31 @admins_only
32 def admin_chals():
33 if request.method == 'POST':
34 chals = Challenges.query.add_columns('id', 'type', 'name', 'value', 'description', 'category', 'hidden', 'max_attempts').order_by(Challenges.value).all()
35
36 json_data = {'game': []}
37 for x in chals:
38 type_class = CHALLENGE_CLASSES.get(x.type)
39 type_name = type_class.name if type_class else None
40
41 json_data['game'].append({
42 'id': x.id,
43 'name': x.name,
44 'value': x.value,
45 'description': x.description,
46 'category': x.category,
47 'hidden': x.hidden,
48 'max_attempts': x.max_attempts,
49 'type': x.type,
50 'type_name': type_name,
51 'type_data': {
52 'id': type_class.id,
53 'name': type_class.name,
54 'templates': type_class.templates,
55 'scripts': type_class.scripts,
56 }
57 })
58
59 db.session.close()
60 return jsonify(json_data)
61 else:
62 challenges = Challenges.query.all()
63 return render_template('admin/challenges.html', challenges=challenges)
64
65
66 @admin_challenges.route('/admin/chal/<int:chalid>', methods=['GET', 'POST'])
67 @admins_only
68 def admin_chal_detail(chalid):
69 chal = Challenges.query.filter_by(id=chalid).first_or_404()
70 chal_class = get_chal_class(chal.type)
71
72 if request.method == 'POST':
73 status, message = chal_class.attempt(chal, request)
74 if status:
75 return jsonify({'status': 1, 'message': message})
76 else:
77 return jsonify({'status': 0, 'message': message})
78 elif request.method == 'GET':
79 obj, data = chal_class.read(chal)
80 return jsonify(data)
81
82
83 @admin_challenges.route('/admin/tags/<int:chalid>', methods=['GET', 'POST'])
84 @admins_only
85 def admin_tags(chalid):
86 if request.method == 'GET':
87 tags = Tags.query.filter_by(chal=chalid).all()
88 json_data = {'tags': []}
89 for x in tags:
90 json_data['tags'].append({'id': x.id, 'chal': x.chal, 'tag': x.tag})
91 return jsonify(json_data)
92
93 elif request.method == 'POST':
94 newtags = request.form.getlist('tags[]')
95 for x in newtags:
96 tag = Tags(chalid, x)
97 db.session.add(tag)
98 db.session.commit()
99 db.session.close()
100 return '1'
101
102
103 @admin_challenges.route('/admin/tags/<int:tagid>/delete', methods=['POST'])
104 @admins_only
105 def admin_delete_tags(tagid):
106 if request.method == 'POST':
107 tag = Tags.query.filter_by(id=tagid).first_or_404()
108 db.session.delete(tag)
109 db.session.commit()
110 db.session.close()
111 return '1'
112
113
114 @admin_challenges.route('/admin/hints', defaults={'hintid': None}, methods=['POST', 'GET'])
115 @admin_challenges.route('/admin/hints/<int:hintid>', methods=['GET', 'POST', 'DELETE'])
116 @admins_only
117 def admin_hints(hintid):
118 if hintid:
119 hint = Hints.query.filter_by(id=hintid).first_or_404()
120
121 if request.method == 'POST':
122 hint.hint = request.form.get('hint')
123 hint.chal = int(request.form.get('chal'))
124 hint.cost = int(request.form.get('cost') or 0)
125 db.session.commit()
126
127 elif request.method == 'DELETE':
128 db.session.delete(hint)
129 db.session.commit()
130 db.session.close()
131 return ('', 204)
132
133 json_data = {
134 'hint': hint.hint,
135 'type': hint.type,
136 'chal': hint.chal,
137 'cost': hint.cost,
138 'id': hint.id
139 }
140 db.session.close()
141 return jsonify(json_data)
142 else:
143 if request.method == 'GET':
144 hints = Hints.query.all()
145 json_data = []
146 for hint in hints:
147 json_data.append({
148 'hint': hint.hint,
149 'type': hint.type,
150 'chal': hint.chal,
151 'cost': hint.cost,
152 'id': hint.id
153 })
154 return jsonify({'results': json_data})
155 elif request.method == 'POST':
156 hint = request.form.get('hint')
157 chalid = int(request.form.get('chal'))
158 cost = int(request.form.get('cost') or 0)
159 hint_type = request.form.get('type', 0)
160 hint = Hints(chal=chalid, hint=hint, cost=cost)
161 db.session.add(hint)
162 db.session.commit()
163 json_data = {
164 'hint': hint.hint,
165 'type': hint.type,
166 'chal': hint.chal,
167 'cost': hint.cost,
168 'id': hint.id
169 }
170 db.session.close()
171 return jsonify(json_data)
172
173
174 @admin_challenges.route('/admin/files/<int:chalid>', methods=['GET', 'POST'])
175 @admins_only
176 def admin_files(chalid):
177 if request.method == 'GET':
178 files = Files.query.filter_by(chal=chalid).all()
179 json_data = {'files': []}
180 for x in files:
181 json_data['files'].append({'id': x.id, 'file': x.location})
182 return jsonify(json_data)
183 if request.method == 'POST':
184 if request.form['method'] == "delete":
185 utils.delete_file(request.form['file'])
186
187 db.session.commit()
188 db.session.close()
189 return '1'
190 elif request.form['method'] == "upload":
191 files = request.files.getlist('files[]')
192
193 for f in files:
194 utils.upload_file(file=f, chalid=chalid)
195
196 db.session.commit()
197 db.session.close()
198 return '1'
199
200
201 @admin_challenges.route('/admin/chal/<int:chalid>/<prop>', methods=['GET'])
202 @admins_only
203 def admin_get_values(chalid, prop):
204 challenge = Challenges.query.filter_by(id=chalid).first_or_404()
205 if prop == 'keys':
206 chal_keys = Keys.query.filter_by(chal=challenge.id).all()
207 json_data = {'keys': []}
208 for x in chal_keys:
209 key_class = get_key_class(x.type)
210 json_data['keys'].append({
211 'id': x.id,
212 'key': x.flag,
213 'type': x.type,
214 'type_name': key_class.name,
215 'templates': key_class.templates,
216 })
217 return jsonify(json_data)
218 elif prop == 'tags':
219 tags = Tags.query.filter_by(chal=chalid).all()
220 json_data = {'tags': []}
221 for x in tags:
222 json_data['tags'].append({
223 'id': x.id,
224 'chal': x.chal,
225 'tag': x.tag
226 })
227 return jsonify(json_data)
228 elif prop == 'hints':
229 hints = Hints.query.filter_by(chal=chalid)
230 json_data = {'hints': []}
231 for hint in hints:
232 json_data['hints'].append({
233 'hint': hint.hint,
234 'type': hint.type,
235 'chal': hint.chal,
236 'cost': hint.cost,
237 'id': hint.id
238 })
239 return jsonify(json_data)
240
241
242 @admin_challenges.route('/admin/chal/new', methods=['GET', 'POST'])
243 @admins_only
244 def admin_create_chal():
245 if request.method == 'POST':
246 chal_type = request.form['chaltype']
247 chal_class = get_chal_class(chal_type)
248 chal_class.create(request)
249 return redirect(url_for('admin_challenges.admin_chals'))
250 else:
251 return render_template('admin/chals/create.html')
252
253
254 @admin_challenges.route('/admin/chal/delete', methods=['POST'])
255 @admins_only
256 def admin_delete_chal():
257 challenge = Challenges.query.filter_by(id=request.form['id']).first_or_404()
258 chal_class = get_chal_class(challenge.type)
259 chal_class.delete(challenge)
260 return '1'
261
262
263 @admin_challenges.route('/admin/chal/update', methods=['POST'])
264 @admins_only
265 def admin_update_chal():
266 challenge = Challenges.query.filter_by(id=request.form['id']).first_or_404()
267 chal_class = get_chal_class(challenge.type)
268 chal_class.update(challenge, request)
269 return redirect(url_for('admin_challenges.admin_chals'))
270
[end of CTFd/admin/challenges.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/CTFd/admin/challenges.py b/CTFd/admin/challenges.py
--- a/CTFd/admin/challenges.py
+++ b/CTFd/admin/challenges.py
@@ -31,22 +31,31 @@
@admins_only
def admin_chals():
if request.method == 'POST':
- chals = Challenges.query.add_columns('id', 'type', 'name', 'value', 'description', 'category', 'hidden', 'max_attempts').order_by(Challenges.value).all()
+ chals = Challenges.query.order_by(Challenges.value).all()
json_data = {'game': []}
- for x in chals:
- type_class = CHALLENGE_CLASSES.get(x.type)
+ for chal in chals:
+ tags = [tag.tag for tag in Tags.query.add_columns('tag').filter_by(chal=chal.id).all()]
+ files = [str(f.location) for f in Files.query.filter_by(chal=chal.id).all()]
+ hints = []
+ for hint in Hints.query.filter_by(chal=chal.id).all():
+ hints.append({'id': hint.id, 'cost': hint.cost, 'hint': hint.hint})
+
+ type_class = CHALLENGE_CLASSES.get(chal.type)
type_name = type_class.name if type_class else None
json_data['game'].append({
- 'id': x.id,
- 'name': x.name,
- 'value': x.value,
- 'description': x.description,
- 'category': x.category,
- 'hidden': x.hidden,
- 'max_attempts': x.max_attempts,
- 'type': x.type,
+ 'id': chal.id,
+ 'name': chal.name,
+ 'value': chal.value,
+ 'description': chal.description,
+ 'category': chal.category,
+ 'files': files,
+ 'tags': tags,
+ 'hints': hints,
+ 'hidden': chal.hidden,
+ 'max_attempts': chal.max_attempts,
+ 'type': chal.type,
'type_name': type_name,
'type_data': {
'id': type_class.id,
@@ -77,6 +86,17 @@
return jsonify({'status': 0, 'message': message})
elif request.method == 'GET':
obj, data = chal_class.read(chal)
+
+ tags = [tag.tag for tag in Tags.query.add_columns('tag').filter_by(chal=chal.id).all()]
+ files = [str(f.location) for f in Files.query.filter_by(chal=chal.id).all()]
+ hints = []
+ for hint in Hints.query.filter_by(chal=chal.id).all():
+ hints.append({'id': hint.id, 'cost': hint.cost, 'hint': hint.hint})
+
+ data['tags'] = tags
+ data['files'] = files
+ data['hints'] = hints
+
return jsonify(data)
|
{"golden_diff": "diff --git a/CTFd/admin/challenges.py b/CTFd/admin/challenges.py\n--- a/CTFd/admin/challenges.py\n+++ b/CTFd/admin/challenges.py\n@@ -31,22 +31,31 @@\n @admins_only\n def admin_chals():\n if request.method == 'POST':\n- chals = Challenges.query.add_columns('id', 'type', 'name', 'value', 'description', 'category', 'hidden', 'max_attempts').order_by(Challenges.value).all()\n+ chals = Challenges.query.order_by(Challenges.value).all()\n \n json_data = {'game': []}\n- for x in chals:\n- type_class = CHALLENGE_CLASSES.get(x.type)\n+ for chal in chals:\n+ tags = [tag.tag for tag in Tags.query.add_columns('tag').filter_by(chal=chal.id).all()]\n+ files = [str(f.location) for f in Files.query.filter_by(chal=chal.id).all()]\n+ hints = []\n+ for hint in Hints.query.filter_by(chal=chal.id).all():\n+ hints.append({'id': hint.id, 'cost': hint.cost, 'hint': hint.hint})\n+\n+ type_class = CHALLENGE_CLASSES.get(chal.type)\n type_name = type_class.name if type_class else None\n \n json_data['game'].append({\n- 'id': x.id,\n- 'name': x.name,\n- 'value': x.value,\n- 'description': x.description,\n- 'category': x.category,\n- 'hidden': x.hidden,\n- 'max_attempts': x.max_attempts,\n- 'type': x.type,\n+ 'id': chal.id,\n+ 'name': chal.name,\n+ 'value': chal.value,\n+ 'description': chal.description,\n+ 'category': chal.category,\n+ 'files': files,\n+ 'tags': tags,\n+ 'hints': hints,\n+ 'hidden': chal.hidden,\n+ 'max_attempts': chal.max_attempts,\n+ 'type': chal.type,\n 'type_name': type_name,\n 'type_data': {\n 'id': type_class.id,\n@@ -77,6 +86,17 @@\n return jsonify({'status': 0, 'message': message})\n elif request.method == 'GET':\n obj, data = chal_class.read(chal)\n+\n+ tags = [tag.tag for tag in Tags.query.add_columns('tag').filter_by(chal=chal.id).all()]\n+ files = [str(f.location) for f in Files.query.filter_by(chal=chal.id).all()]\n+ hints = []\n+ for hint in Hints.query.filter_by(chal=chal.id).all():\n+ hints.append({'id': hint.id, 'cost': hint.cost, 'hint': hint.hint})\n+\n+ data['tags'] = tags\n+ data['files'] = files\n+ data['hints'] = hints\n+\n return jsonify(data)\n", "issue": "Bootstrap Beta v3\nhttps://getbootstrap.com/docs/4.0/migration/#beta-3-changes\r\n\r\nSee if anything needs to be changed to support Bootstrap Betav3. Supposedly there won't be breaking changes between v3 and Stable so this is a good issue to look into. \r\n\n", "before_files": [{"content": "from flask import current_app as app, render_template, request, redirect, jsonify, url_for, Blueprint\nfrom CTFd.utils import admins_only, is_admin, cache\nfrom CTFd.models import db, Teams, Solves, Awards, Challenges, WrongKeys, Keys, Tags, Files, Tracking, Pages, Config, Hints, Unlocks, DatabaseError\nfrom CTFd.plugins.keys import get_key_class, KEY_CLASSES\nfrom CTFd.plugins.challenges import get_chal_class, CHALLENGE_CLASSES\n\nfrom CTFd import utils\n\nimport os\n\nadmin_challenges = Blueprint('admin_challenges', __name__)\n\n\n@admin_challenges.route('/admin/chal_types', methods=['GET'])\n@admins_only\ndef admin_chal_types():\n data = {}\n for class_id in CHALLENGE_CLASSES:\n challenge_class = CHALLENGE_CLASSES.get(class_id)\n data[challenge_class.id] = {\n 'id': challenge_class.id,\n 'name': challenge_class.name,\n 'templates': challenge_class.templates,\n 'scripts': challenge_class.scripts,\n }\n\n return jsonify(data)\n\n\n@admin_challenges.route('/admin/chals', methods=['POST', 'GET'])\n@admins_only\ndef admin_chals():\n if request.method == 'POST':\n chals = Challenges.query.add_columns('id', 'type', 'name', 'value', 'description', 'category', 'hidden', 'max_attempts').order_by(Challenges.value).all()\n\n json_data = {'game': []}\n for x in chals:\n type_class = CHALLENGE_CLASSES.get(x.type)\n type_name = type_class.name if type_class else None\n\n json_data['game'].append({\n 'id': x.id,\n 'name': x.name,\n 'value': x.value,\n 'description': x.description,\n 'category': x.category,\n 'hidden': x.hidden,\n 'max_attempts': x.max_attempts,\n 'type': x.type,\n 'type_name': type_name,\n 'type_data': {\n 'id': type_class.id,\n 'name': type_class.name,\n 'templates': type_class.templates,\n 'scripts': type_class.scripts,\n }\n })\n\n db.session.close()\n return jsonify(json_data)\n else:\n challenges = Challenges.query.all()\n return render_template('admin/challenges.html', challenges=challenges)\n\n\n@admin_challenges.route('/admin/chal/<int:chalid>', methods=['GET', 'POST'])\n@admins_only\ndef admin_chal_detail(chalid):\n chal = Challenges.query.filter_by(id=chalid).first_or_404()\n chal_class = get_chal_class(chal.type)\n\n if request.method == 'POST':\n status, message = chal_class.attempt(chal, request)\n if status:\n return jsonify({'status': 1, 'message': message})\n else:\n return jsonify({'status': 0, 'message': message})\n elif request.method == 'GET':\n obj, data = chal_class.read(chal)\n return jsonify(data)\n\n\n@admin_challenges.route('/admin/tags/<int:chalid>', methods=['GET', 'POST'])\n@admins_only\ndef admin_tags(chalid):\n if request.method == 'GET':\n tags = Tags.query.filter_by(chal=chalid).all()\n json_data = {'tags': []}\n for x in tags:\n json_data['tags'].append({'id': x.id, 'chal': x.chal, 'tag': x.tag})\n return jsonify(json_data)\n\n elif request.method == 'POST':\n newtags = request.form.getlist('tags[]')\n for x in newtags:\n tag = Tags(chalid, x)\n db.session.add(tag)\n db.session.commit()\n db.session.close()\n return '1'\n\n\n@admin_challenges.route('/admin/tags/<int:tagid>/delete', methods=['POST'])\n@admins_only\ndef admin_delete_tags(tagid):\n if request.method == 'POST':\n tag = Tags.query.filter_by(id=tagid).first_or_404()\n db.session.delete(tag)\n db.session.commit()\n db.session.close()\n return '1'\n\n\n@admin_challenges.route('/admin/hints', defaults={'hintid': None}, methods=['POST', 'GET'])\n@admin_challenges.route('/admin/hints/<int:hintid>', methods=['GET', 'POST', 'DELETE'])\n@admins_only\ndef admin_hints(hintid):\n if hintid:\n hint = Hints.query.filter_by(id=hintid).first_or_404()\n\n if request.method == 'POST':\n hint.hint = request.form.get('hint')\n hint.chal = int(request.form.get('chal'))\n hint.cost = int(request.form.get('cost') or 0)\n db.session.commit()\n\n elif request.method == 'DELETE':\n db.session.delete(hint)\n db.session.commit()\n db.session.close()\n return ('', 204)\n\n json_data = {\n 'hint': hint.hint,\n 'type': hint.type,\n 'chal': hint.chal,\n 'cost': hint.cost,\n 'id': hint.id\n }\n db.session.close()\n return jsonify(json_data)\n else:\n if request.method == 'GET':\n hints = Hints.query.all()\n json_data = []\n for hint in hints:\n json_data.append({\n 'hint': hint.hint,\n 'type': hint.type,\n 'chal': hint.chal,\n 'cost': hint.cost,\n 'id': hint.id\n })\n return jsonify({'results': json_data})\n elif request.method == 'POST':\n hint = request.form.get('hint')\n chalid = int(request.form.get('chal'))\n cost = int(request.form.get('cost') or 0)\n hint_type = request.form.get('type', 0)\n hint = Hints(chal=chalid, hint=hint, cost=cost)\n db.session.add(hint)\n db.session.commit()\n json_data = {\n 'hint': hint.hint,\n 'type': hint.type,\n 'chal': hint.chal,\n 'cost': hint.cost,\n 'id': hint.id\n }\n db.session.close()\n return jsonify(json_data)\n\n\n@admin_challenges.route('/admin/files/<int:chalid>', methods=['GET', 'POST'])\n@admins_only\ndef admin_files(chalid):\n if request.method == 'GET':\n files = Files.query.filter_by(chal=chalid).all()\n json_data = {'files': []}\n for x in files:\n json_data['files'].append({'id': x.id, 'file': x.location})\n return jsonify(json_data)\n if request.method == 'POST':\n if request.form['method'] == \"delete\":\n utils.delete_file(request.form['file'])\n\n db.session.commit()\n db.session.close()\n return '1'\n elif request.form['method'] == \"upload\":\n files = request.files.getlist('files[]')\n\n for f in files:\n utils.upload_file(file=f, chalid=chalid)\n\n db.session.commit()\n db.session.close()\n return '1'\n\n\n@admin_challenges.route('/admin/chal/<int:chalid>/<prop>', methods=['GET'])\n@admins_only\ndef admin_get_values(chalid, prop):\n challenge = Challenges.query.filter_by(id=chalid).first_or_404()\n if prop == 'keys':\n chal_keys = Keys.query.filter_by(chal=challenge.id).all()\n json_data = {'keys': []}\n for x in chal_keys:\n key_class = get_key_class(x.type)\n json_data['keys'].append({\n 'id': x.id,\n 'key': x.flag,\n 'type': x.type,\n 'type_name': key_class.name,\n 'templates': key_class.templates,\n })\n return jsonify(json_data)\n elif prop == 'tags':\n tags = Tags.query.filter_by(chal=chalid).all()\n json_data = {'tags': []}\n for x in tags:\n json_data['tags'].append({\n 'id': x.id,\n 'chal': x.chal,\n 'tag': x.tag\n })\n return jsonify(json_data)\n elif prop == 'hints':\n hints = Hints.query.filter_by(chal=chalid)\n json_data = {'hints': []}\n for hint in hints:\n json_data['hints'].append({\n 'hint': hint.hint,\n 'type': hint.type,\n 'chal': hint.chal,\n 'cost': hint.cost,\n 'id': hint.id\n })\n return jsonify(json_data)\n\n\n@admin_challenges.route('/admin/chal/new', methods=['GET', 'POST'])\n@admins_only\ndef admin_create_chal():\n if request.method == 'POST':\n chal_type = request.form['chaltype']\n chal_class = get_chal_class(chal_type)\n chal_class.create(request)\n return redirect(url_for('admin_challenges.admin_chals'))\n else:\n return render_template('admin/chals/create.html')\n\n\n@admin_challenges.route('/admin/chal/delete', methods=['POST'])\n@admins_only\ndef admin_delete_chal():\n challenge = Challenges.query.filter_by(id=request.form['id']).first_or_404()\n chal_class = get_chal_class(challenge.type)\n chal_class.delete(challenge)\n return '1'\n\n\n@admin_challenges.route('/admin/chal/update', methods=['POST'])\n@admins_only\ndef admin_update_chal():\n challenge = Challenges.query.filter_by(id=request.form['id']).first_or_404()\n chal_class = get_chal_class(challenge.type)\n chal_class.update(challenge, request)\n return redirect(url_for('admin_challenges.admin_chals'))\n", "path": "CTFd/admin/challenges.py"}]}
| 3,450 | 665 |
gh_patches_debug_28400
|
rasdani/github-patches
|
git_diff
|
scrapy__scrapy-6269
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add brotlicffi support
Currently, brotli compression is supported when using `brotli` or `brotlipy` (deprecated). We should also support it thorugh `brotlicffi`, the new name of `brotlipy`, which performs worse than `brotli` but works on PyPy.
</issue>
<code>
[start of scrapy/downloadermiddlewares/httpcompression.py]
1 from __future__ import annotations
2
3 import warnings
4 from itertools import chain
5 from logging import getLogger
6 from typing import TYPE_CHECKING, List, Optional, Union
7
8 from scrapy import Request, Spider, signals
9 from scrapy.crawler import Crawler
10 from scrapy.exceptions import IgnoreRequest, NotConfigured
11 from scrapy.http import Response, TextResponse
12 from scrapy.responsetypes import responsetypes
13 from scrapy.statscollectors import StatsCollector
14 from scrapy.utils._compression import (
15 _DecompressionMaxSizeExceeded,
16 _inflate,
17 _unbrotli,
18 _unzstd,
19 )
20 from scrapy.utils.deprecate import ScrapyDeprecationWarning
21 from scrapy.utils.gz import gunzip
22
23 if TYPE_CHECKING:
24 # typing.Self requires Python 3.11
25 from typing_extensions import Self
26
27 logger = getLogger(__name__)
28
29 ACCEPTED_ENCODINGS: List[bytes] = [b"gzip", b"deflate"]
30
31 try:
32 import brotli # noqa: F401
33 except ImportError:
34 pass
35 else:
36 ACCEPTED_ENCODINGS.append(b"br")
37
38 try:
39 import zstandard # noqa: F401
40 except ImportError:
41 pass
42 else:
43 ACCEPTED_ENCODINGS.append(b"zstd")
44
45
46 class HttpCompressionMiddleware:
47 """This middleware allows compressed (gzip, deflate) traffic to be
48 sent/received from web sites"""
49
50 def __init__(
51 self,
52 stats: Optional[StatsCollector] = None,
53 *,
54 crawler: Optional[Crawler] = None,
55 ):
56 if not crawler:
57 self.stats = stats
58 self._max_size = 1073741824
59 self._warn_size = 33554432
60 return
61 self.stats = crawler.stats
62 self._max_size = crawler.settings.getint("DOWNLOAD_MAXSIZE")
63 self._warn_size = crawler.settings.getint("DOWNLOAD_WARNSIZE")
64 crawler.signals.connect(self.open_spider, signals.spider_opened)
65
66 @classmethod
67 def from_crawler(cls, crawler: Crawler) -> Self:
68 if not crawler.settings.getbool("COMPRESSION_ENABLED"):
69 raise NotConfigured
70 try:
71 return cls(crawler=crawler)
72 except TypeError:
73 warnings.warn(
74 "HttpCompressionMiddleware subclasses must either modify "
75 "their '__init__' method to support a 'crawler' parameter or "
76 "reimplement their 'from_crawler' method.",
77 ScrapyDeprecationWarning,
78 )
79 mw = cls()
80 mw.stats = crawler.stats
81 mw._max_size = crawler.settings.getint("DOWNLOAD_MAXSIZE")
82 mw._warn_size = crawler.settings.getint("DOWNLOAD_WARNSIZE")
83 crawler.signals.connect(mw.open_spider, signals.spider_opened)
84 return mw
85
86 def open_spider(self, spider):
87 if hasattr(spider, "download_maxsize"):
88 self._max_size = spider.download_maxsize
89 if hasattr(spider, "download_warnsize"):
90 self._warn_size = spider.download_warnsize
91
92 def process_request(
93 self, request: Request, spider: Spider
94 ) -> Union[Request, Response, None]:
95 request.headers.setdefault("Accept-Encoding", b", ".join(ACCEPTED_ENCODINGS))
96 return None
97
98 def process_response(
99 self, request: Request, response: Response, spider: Spider
100 ) -> Union[Request, Response]:
101 if request.method == "HEAD":
102 return response
103 if isinstance(response, Response):
104 content_encoding = response.headers.getlist("Content-Encoding")
105 if content_encoding:
106 max_size = request.meta.get("download_maxsize", self._max_size)
107 warn_size = request.meta.get("download_warnsize", self._warn_size)
108 try:
109 decoded_body, content_encoding = self._handle_encoding(
110 response.body, content_encoding, max_size
111 )
112 except _DecompressionMaxSizeExceeded:
113 raise IgnoreRequest(
114 f"Ignored response {response} because its body "
115 f"({len(response.body)} B compressed) exceeded "
116 f"DOWNLOAD_MAXSIZE ({max_size} B) during "
117 f"decompression."
118 )
119 if len(response.body) < warn_size <= len(decoded_body):
120 logger.warning(
121 f"{response} body size after decompression "
122 f"({len(decoded_body)} B) is larger than the "
123 f"download warning size ({warn_size} B)."
124 )
125 response.headers["Content-Encoding"] = content_encoding
126 if self.stats:
127 self.stats.inc_value(
128 "httpcompression/response_bytes",
129 len(decoded_body),
130 spider=spider,
131 )
132 self.stats.inc_value(
133 "httpcompression/response_count", spider=spider
134 )
135 respcls = responsetypes.from_args(
136 headers=response.headers, url=response.url, body=decoded_body
137 )
138 kwargs = {"cls": respcls, "body": decoded_body}
139 if issubclass(respcls, TextResponse):
140 # force recalculating the encoding until we make sure the
141 # responsetypes guessing is reliable
142 kwargs["encoding"] = None
143 response = response.replace(**kwargs)
144 if not content_encoding:
145 del response.headers["Content-Encoding"]
146
147 return response
148
149 def _handle_encoding(self, body, content_encoding, max_size):
150 to_decode, to_keep = self._split_encodings(content_encoding)
151 for encoding in to_decode:
152 body = self._decode(body, encoding, max_size)
153 return body, to_keep
154
155 def _split_encodings(self, content_encoding):
156 to_keep = [
157 encoding.strip().lower()
158 for encoding in chain.from_iterable(
159 encodings.split(b",") for encodings in content_encoding
160 )
161 ]
162 to_decode = []
163 while to_keep:
164 encoding = to_keep.pop()
165 if encoding not in ACCEPTED_ENCODINGS:
166 to_keep.append(encoding)
167 return to_decode, to_keep
168 to_decode.append(encoding)
169 return to_decode, to_keep
170
171 def _decode(self, body: bytes, encoding: bytes, max_size: int) -> bytes:
172 if encoding in {b"gzip", b"x-gzip"}:
173 return gunzip(body, max_size=max_size)
174 if encoding == b"deflate":
175 return _inflate(body, max_size=max_size)
176 if encoding == b"br" and b"br" in ACCEPTED_ENCODINGS:
177 return _unbrotli(body, max_size=max_size)
178 if encoding == b"zstd" and b"zstd" in ACCEPTED_ENCODINGS:
179 return _unzstd(body, max_size=max_size)
180 return body
181
[end of scrapy/downloadermiddlewares/httpcompression.py]
[start of scrapy/utils/_compression.py]
1 import zlib
2 from io import BytesIO
3 from warnings import warn
4
5 from scrapy.exceptions import ScrapyDeprecationWarning
6
7 try:
8 import brotli
9 except ImportError:
10 pass
11 else:
12 try:
13 brotli.Decompressor.process
14 except AttributeError:
15 warn(
16 (
17 "You have brotlipy installed, and Scrapy will use it, but "
18 "Scrapy support for brotlipy is deprecated and will stop "
19 "working in a future version of Scrapy. brotlipy itself is "
20 "deprecated, it has been superseded by brotlicffi (not "
21 "currently supported by Scrapy). Please, uninstall brotlipy "
22 "and install brotli instead. brotlipy has the same import "
23 "name as brotli, so keeping both installed is strongly "
24 "discouraged."
25 ),
26 ScrapyDeprecationWarning,
27 )
28
29 def _brotli_decompress(decompressor, data):
30 return decompressor.decompress(data)
31
32 else:
33
34 def _brotli_decompress(decompressor, data):
35 return decompressor.process(data)
36
37
38 try:
39 import zstandard
40 except ImportError:
41 pass
42
43
44 _CHUNK_SIZE = 65536 # 64 KiB
45
46
47 class _DecompressionMaxSizeExceeded(ValueError):
48 pass
49
50
51 def _inflate(data: bytes, *, max_size: int = 0) -> bytes:
52 decompressor = zlib.decompressobj()
53 raw_decompressor = zlib.decompressobj(wbits=-15)
54 input_stream = BytesIO(data)
55 output_stream = BytesIO()
56 output_chunk = b"."
57 decompressed_size = 0
58 while output_chunk:
59 input_chunk = input_stream.read(_CHUNK_SIZE)
60 try:
61 output_chunk = decompressor.decompress(input_chunk)
62 except zlib.error:
63 if decompressor != raw_decompressor:
64 # ugly hack to work with raw deflate content that may
65 # be sent by microsoft servers. For more information, see:
66 # http://carsten.codimi.de/gzip.yaws/
67 # http://www.port80software.com/200ok/archive/2005/10/31/868.aspx
68 # http://www.gzip.org/zlib/zlib_faq.html#faq38
69 decompressor = raw_decompressor
70 output_chunk = decompressor.decompress(input_chunk)
71 else:
72 raise
73 decompressed_size += len(output_chunk)
74 if max_size and decompressed_size > max_size:
75 raise _DecompressionMaxSizeExceeded(
76 f"The number of bytes decompressed so far "
77 f"({decompressed_size} B) exceed the specified maximum "
78 f"({max_size} B)."
79 )
80 output_stream.write(output_chunk)
81 output_stream.seek(0)
82 return output_stream.read()
83
84
85 def _unbrotli(data: bytes, *, max_size: int = 0) -> bytes:
86 decompressor = brotli.Decompressor()
87 input_stream = BytesIO(data)
88 output_stream = BytesIO()
89 output_chunk = b"."
90 decompressed_size = 0
91 while output_chunk:
92 input_chunk = input_stream.read(_CHUNK_SIZE)
93 output_chunk = _brotli_decompress(decompressor, input_chunk)
94 decompressed_size += len(output_chunk)
95 if max_size and decompressed_size > max_size:
96 raise _DecompressionMaxSizeExceeded(
97 f"The number of bytes decompressed so far "
98 f"({decompressed_size} B) exceed the specified maximum "
99 f"({max_size} B)."
100 )
101 output_stream.write(output_chunk)
102 output_stream.seek(0)
103 return output_stream.read()
104
105
106 def _unzstd(data: bytes, *, max_size: int = 0) -> bytes:
107 decompressor = zstandard.ZstdDecompressor()
108 stream_reader = decompressor.stream_reader(BytesIO(data))
109 output_stream = BytesIO()
110 output_chunk = b"."
111 decompressed_size = 0
112 while output_chunk:
113 output_chunk = stream_reader.read(_CHUNK_SIZE)
114 decompressed_size += len(output_chunk)
115 if max_size and decompressed_size > max_size:
116 raise _DecompressionMaxSizeExceeded(
117 f"The number of bytes decompressed so far "
118 f"({decompressed_size} B) exceed the specified maximum "
119 f"({max_size} B)."
120 )
121 output_stream.write(output_chunk)
122 output_stream.seek(0)
123 return output_stream.read()
124
[end of scrapy/utils/_compression.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/scrapy/downloadermiddlewares/httpcompression.py b/scrapy/downloadermiddlewares/httpcompression.py
--- a/scrapy/downloadermiddlewares/httpcompression.py
+++ b/scrapy/downloadermiddlewares/httpcompression.py
@@ -29,7 +29,10 @@
ACCEPTED_ENCODINGS: List[bytes] = [b"gzip", b"deflate"]
try:
- import brotli # noqa: F401
+ try:
+ import brotli # noqa: F401
+ except ImportError:
+ import brotlicffi # noqa: F401
except ImportError:
pass
else:
diff --git a/scrapy/utils/_compression.py b/scrapy/utils/_compression.py
--- a/scrapy/utils/_compression.py
+++ b/scrapy/utils/_compression.py
@@ -5,7 +5,10 @@
from scrapy.exceptions import ScrapyDeprecationWarning
try:
- import brotli
+ try:
+ import brotli
+ except ImportError:
+ import brotlicffi as brotli
except ImportError:
pass
else:
@@ -17,9 +20,9 @@
"You have brotlipy installed, and Scrapy will use it, but "
"Scrapy support for brotlipy is deprecated and will stop "
"working in a future version of Scrapy. brotlipy itself is "
- "deprecated, it has been superseded by brotlicffi (not "
- "currently supported by Scrapy). Please, uninstall brotlipy "
- "and install brotli instead. brotlipy has the same import "
+ "deprecated, it has been superseded by brotlicffi. "
+ "Please, uninstall brotlipy "
+ "and install brotli or brotlicffi instead. brotlipy has the same import "
"name as brotli, so keeping both installed is strongly "
"discouraged."
),
|
{"golden_diff": "diff --git a/scrapy/downloadermiddlewares/httpcompression.py b/scrapy/downloadermiddlewares/httpcompression.py\n--- a/scrapy/downloadermiddlewares/httpcompression.py\n+++ b/scrapy/downloadermiddlewares/httpcompression.py\n@@ -29,7 +29,10 @@\n ACCEPTED_ENCODINGS: List[bytes] = [b\"gzip\", b\"deflate\"]\n \n try:\n- import brotli # noqa: F401\n+ try:\n+ import brotli # noqa: F401\n+ except ImportError:\n+ import brotlicffi # noqa: F401\n except ImportError:\n pass\n else:\ndiff --git a/scrapy/utils/_compression.py b/scrapy/utils/_compression.py\n--- a/scrapy/utils/_compression.py\n+++ b/scrapy/utils/_compression.py\n@@ -5,7 +5,10 @@\n from scrapy.exceptions import ScrapyDeprecationWarning\n \n try:\n- import brotli\n+ try:\n+ import brotli\n+ except ImportError:\n+ import brotlicffi as brotli\n except ImportError:\n pass\n else:\n@@ -17,9 +20,9 @@\n \"You have brotlipy installed, and Scrapy will use it, but \"\n \"Scrapy support for brotlipy is deprecated and will stop \"\n \"working in a future version of Scrapy. brotlipy itself is \"\n- \"deprecated, it has been superseded by brotlicffi (not \"\n- \"currently supported by Scrapy). Please, uninstall brotlipy \"\n- \"and install brotli instead. brotlipy has the same import \"\n+ \"deprecated, it has been superseded by brotlicffi. \"\n+ \"Please, uninstall brotlipy \"\n+ \"and install brotli or brotlicffi instead. brotlipy has the same import \"\n \"name as brotli, so keeping both installed is strongly \"\n \"discouraged.\"\n ),\n", "issue": "Add brotlicffi support\nCurrently, brotli compression is supported when using `brotli` or `brotlipy` (deprecated). We should also support it thorugh `brotlicffi`, the new name of `brotlipy`, which performs worse than `brotli` but works on PyPy.\n", "before_files": [{"content": "from __future__ import annotations\n\nimport warnings\nfrom itertools import chain\nfrom logging import getLogger\nfrom typing import TYPE_CHECKING, List, Optional, Union\n\nfrom scrapy import Request, Spider, signals\nfrom scrapy.crawler import Crawler\nfrom scrapy.exceptions import IgnoreRequest, NotConfigured\nfrom scrapy.http import Response, TextResponse\nfrom scrapy.responsetypes import responsetypes\nfrom scrapy.statscollectors import StatsCollector\nfrom scrapy.utils._compression import (\n _DecompressionMaxSizeExceeded,\n _inflate,\n _unbrotli,\n _unzstd,\n)\nfrom scrapy.utils.deprecate import ScrapyDeprecationWarning\nfrom scrapy.utils.gz import gunzip\n\nif TYPE_CHECKING:\n # typing.Self requires Python 3.11\n from typing_extensions import Self\n\nlogger = getLogger(__name__)\n\nACCEPTED_ENCODINGS: List[bytes] = [b\"gzip\", b\"deflate\"]\n\ntry:\n import brotli # noqa: F401\nexcept ImportError:\n pass\nelse:\n ACCEPTED_ENCODINGS.append(b\"br\")\n\ntry:\n import zstandard # noqa: F401\nexcept ImportError:\n pass\nelse:\n ACCEPTED_ENCODINGS.append(b\"zstd\")\n\n\nclass HttpCompressionMiddleware:\n \"\"\"This middleware allows compressed (gzip, deflate) traffic to be\n sent/received from web sites\"\"\"\n\n def __init__(\n self,\n stats: Optional[StatsCollector] = None,\n *,\n crawler: Optional[Crawler] = None,\n ):\n if not crawler:\n self.stats = stats\n self._max_size = 1073741824\n self._warn_size = 33554432\n return\n self.stats = crawler.stats\n self._max_size = crawler.settings.getint(\"DOWNLOAD_MAXSIZE\")\n self._warn_size = crawler.settings.getint(\"DOWNLOAD_WARNSIZE\")\n crawler.signals.connect(self.open_spider, signals.spider_opened)\n\n @classmethod\n def from_crawler(cls, crawler: Crawler) -> Self:\n if not crawler.settings.getbool(\"COMPRESSION_ENABLED\"):\n raise NotConfigured\n try:\n return cls(crawler=crawler)\n except TypeError:\n warnings.warn(\n \"HttpCompressionMiddleware subclasses must either modify \"\n \"their '__init__' method to support a 'crawler' parameter or \"\n \"reimplement their 'from_crawler' method.\",\n ScrapyDeprecationWarning,\n )\n mw = cls()\n mw.stats = crawler.stats\n mw._max_size = crawler.settings.getint(\"DOWNLOAD_MAXSIZE\")\n mw._warn_size = crawler.settings.getint(\"DOWNLOAD_WARNSIZE\")\n crawler.signals.connect(mw.open_spider, signals.spider_opened)\n return mw\n\n def open_spider(self, spider):\n if hasattr(spider, \"download_maxsize\"):\n self._max_size = spider.download_maxsize\n if hasattr(spider, \"download_warnsize\"):\n self._warn_size = spider.download_warnsize\n\n def process_request(\n self, request: Request, spider: Spider\n ) -> Union[Request, Response, None]:\n request.headers.setdefault(\"Accept-Encoding\", b\", \".join(ACCEPTED_ENCODINGS))\n return None\n\n def process_response(\n self, request: Request, response: Response, spider: Spider\n ) -> Union[Request, Response]:\n if request.method == \"HEAD\":\n return response\n if isinstance(response, Response):\n content_encoding = response.headers.getlist(\"Content-Encoding\")\n if content_encoding:\n max_size = request.meta.get(\"download_maxsize\", self._max_size)\n warn_size = request.meta.get(\"download_warnsize\", self._warn_size)\n try:\n decoded_body, content_encoding = self._handle_encoding(\n response.body, content_encoding, max_size\n )\n except _DecompressionMaxSizeExceeded:\n raise IgnoreRequest(\n f\"Ignored response {response} because its body \"\n f\"({len(response.body)} B compressed) exceeded \"\n f\"DOWNLOAD_MAXSIZE ({max_size} B) during \"\n f\"decompression.\"\n )\n if len(response.body) < warn_size <= len(decoded_body):\n logger.warning(\n f\"{response} body size after decompression \"\n f\"({len(decoded_body)} B) is larger than the \"\n f\"download warning size ({warn_size} B).\"\n )\n response.headers[\"Content-Encoding\"] = content_encoding\n if self.stats:\n self.stats.inc_value(\n \"httpcompression/response_bytes\",\n len(decoded_body),\n spider=spider,\n )\n self.stats.inc_value(\n \"httpcompression/response_count\", spider=spider\n )\n respcls = responsetypes.from_args(\n headers=response.headers, url=response.url, body=decoded_body\n )\n kwargs = {\"cls\": respcls, \"body\": decoded_body}\n if issubclass(respcls, TextResponse):\n # force recalculating the encoding until we make sure the\n # responsetypes guessing is reliable\n kwargs[\"encoding\"] = None\n response = response.replace(**kwargs)\n if not content_encoding:\n del response.headers[\"Content-Encoding\"]\n\n return response\n\n def _handle_encoding(self, body, content_encoding, max_size):\n to_decode, to_keep = self._split_encodings(content_encoding)\n for encoding in to_decode:\n body = self._decode(body, encoding, max_size)\n return body, to_keep\n\n def _split_encodings(self, content_encoding):\n to_keep = [\n encoding.strip().lower()\n for encoding in chain.from_iterable(\n encodings.split(b\",\") for encodings in content_encoding\n )\n ]\n to_decode = []\n while to_keep:\n encoding = to_keep.pop()\n if encoding not in ACCEPTED_ENCODINGS:\n to_keep.append(encoding)\n return to_decode, to_keep\n to_decode.append(encoding)\n return to_decode, to_keep\n\n def _decode(self, body: bytes, encoding: bytes, max_size: int) -> bytes:\n if encoding in {b\"gzip\", b\"x-gzip\"}:\n return gunzip(body, max_size=max_size)\n if encoding == b\"deflate\":\n return _inflate(body, max_size=max_size)\n if encoding == b\"br\" and b\"br\" in ACCEPTED_ENCODINGS:\n return _unbrotli(body, max_size=max_size)\n if encoding == b\"zstd\" and b\"zstd\" in ACCEPTED_ENCODINGS:\n return _unzstd(body, max_size=max_size)\n return body\n", "path": "scrapy/downloadermiddlewares/httpcompression.py"}, {"content": "import zlib\nfrom io import BytesIO\nfrom warnings import warn\n\nfrom scrapy.exceptions import ScrapyDeprecationWarning\n\ntry:\n import brotli\nexcept ImportError:\n pass\nelse:\n try:\n brotli.Decompressor.process\n except AttributeError:\n warn(\n (\n \"You have brotlipy installed, and Scrapy will use it, but \"\n \"Scrapy support for brotlipy is deprecated and will stop \"\n \"working in a future version of Scrapy. brotlipy itself is \"\n \"deprecated, it has been superseded by brotlicffi (not \"\n \"currently supported by Scrapy). Please, uninstall brotlipy \"\n \"and install brotli instead. brotlipy has the same import \"\n \"name as brotli, so keeping both installed is strongly \"\n \"discouraged.\"\n ),\n ScrapyDeprecationWarning,\n )\n\n def _brotli_decompress(decompressor, data):\n return decompressor.decompress(data)\n\n else:\n\n def _brotli_decompress(decompressor, data):\n return decompressor.process(data)\n\n\ntry:\n import zstandard\nexcept ImportError:\n pass\n\n\n_CHUNK_SIZE = 65536 # 64 KiB\n\n\nclass _DecompressionMaxSizeExceeded(ValueError):\n pass\n\n\ndef _inflate(data: bytes, *, max_size: int = 0) -> bytes:\n decompressor = zlib.decompressobj()\n raw_decompressor = zlib.decompressobj(wbits=-15)\n input_stream = BytesIO(data)\n output_stream = BytesIO()\n output_chunk = b\".\"\n decompressed_size = 0\n while output_chunk:\n input_chunk = input_stream.read(_CHUNK_SIZE)\n try:\n output_chunk = decompressor.decompress(input_chunk)\n except zlib.error:\n if decompressor != raw_decompressor:\n # ugly hack to work with raw deflate content that may\n # be sent by microsoft servers. For more information, see:\n # http://carsten.codimi.de/gzip.yaws/\n # http://www.port80software.com/200ok/archive/2005/10/31/868.aspx\n # http://www.gzip.org/zlib/zlib_faq.html#faq38\n decompressor = raw_decompressor\n output_chunk = decompressor.decompress(input_chunk)\n else:\n raise\n decompressed_size += len(output_chunk)\n if max_size and decompressed_size > max_size:\n raise _DecompressionMaxSizeExceeded(\n f\"The number of bytes decompressed so far \"\n f\"({decompressed_size} B) exceed the specified maximum \"\n f\"({max_size} B).\"\n )\n output_stream.write(output_chunk)\n output_stream.seek(0)\n return output_stream.read()\n\n\ndef _unbrotli(data: bytes, *, max_size: int = 0) -> bytes:\n decompressor = brotli.Decompressor()\n input_stream = BytesIO(data)\n output_stream = BytesIO()\n output_chunk = b\".\"\n decompressed_size = 0\n while output_chunk:\n input_chunk = input_stream.read(_CHUNK_SIZE)\n output_chunk = _brotli_decompress(decompressor, input_chunk)\n decompressed_size += len(output_chunk)\n if max_size and decompressed_size > max_size:\n raise _DecompressionMaxSizeExceeded(\n f\"The number of bytes decompressed so far \"\n f\"({decompressed_size} B) exceed the specified maximum \"\n f\"({max_size} B).\"\n )\n output_stream.write(output_chunk)\n output_stream.seek(0)\n return output_stream.read()\n\n\ndef _unzstd(data: bytes, *, max_size: int = 0) -> bytes:\n decompressor = zstandard.ZstdDecompressor()\n stream_reader = decompressor.stream_reader(BytesIO(data))\n output_stream = BytesIO()\n output_chunk = b\".\"\n decompressed_size = 0\n while output_chunk:\n output_chunk = stream_reader.read(_CHUNK_SIZE)\n decompressed_size += len(output_chunk)\n if max_size and decompressed_size > max_size:\n raise _DecompressionMaxSizeExceeded(\n f\"The number of bytes decompressed so far \"\n f\"({decompressed_size} B) exceed the specified maximum \"\n f\"({max_size} B).\"\n )\n output_stream.write(output_chunk)\n output_stream.seek(0)\n return output_stream.read()\n", "path": "scrapy/utils/_compression.py"}]}
| 3,759 | 433 |
gh_patches_debug_11918
|
rasdani/github-patches
|
git_diff
|
meltano__meltano-7198
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Proposed: Should we add explicit `virtualenv` dependency
Follows from:
- https://github.com/meltano/meltano/issues/6882
We used to recommend _first_ installing `virtualenv`, then using `virtualenv` to install Meltano. This meant that Meltano was able to use `virtualenv` for plugin installation even without any explicit dependency declaration.
Today, if the user installs with `pipx`, then they also get `virtualenv` installed, since `pipx` ships with `virtualenv`.
Question: Should we just create an explicit `virtualenv` dependency so we can be sure it is available during plugin installation?
A valid scenario where `pip` would be fine for Meltano installation would be on a new+dedicated docker image. Presumably, that would be a safe time to install with `pip` and skip `pipx` installation - without requiring virtualenv isolation for Meltano itself. However, even in those cases, Meltano still needs `virtualenv` for plugin installation, and plugin installations would likely fail unless the image already contains `virtualenv` installed.
</issue>
<code>
[start of src/meltano/core/venv_service.py]
1 """Manage Python virtual environments."""
2
3 from __future__ import annotations
4
5 import asyncio
6 import hashlib
7 import logging
8 import os
9 import platform
10 import shutil
11 import subprocess
12 import sys
13 from asyncio.subprocess import Process
14 from collections import namedtuple
15 from collections.abc import Iterable
16 from pathlib import Path
17
18 from meltano.core.error import AsyncSubprocessError
19 from meltano.core.project import Project
20
21 logger = logging.getLogger(__name__)
22
23 VenvSpecs = namedtuple("VenvSpecs", ("lib_dir", "bin_dir", "site_packages_dir"))
24
25 POSIX = VenvSpecs(
26 lib_dir="lib",
27 bin_dir="bin",
28 site_packages_dir=os.path.join(
29 "lib",
30 f"python{'.'.join(str(part) for part in sys.version_info[:2])}",
31 "site-packages",
32 ),
33 )
34
35 NT = VenvSpecs(
36 lib_dir="Lib",
37 bin_dir="Scripts",
38 site_packages_dir=os.path.join("Lib", "site-packages"),
39 )
40
41 PLATFORM_SPECS = {"Linux": POSIX, "Darwin": POSIX, "Windows": NT}
42
43
44 def venv_platform_specs():
45 """Get virtual environment sub-path info for the current platform.
46
47 Raises:
48 Exception: This platform is not supported.
49
50 Returns:
51 Virtual environment sub-path info for the current platform.
52 """
53 system = platform.system()
54 try:
55 return PLATFORM_SPECS[system]
56 except KeyError as ex:
57 raise Exception(f"Platform {system!r} not supported.") from ex
58
59
60 PIP_PACKAGES = ("pip", "setuptools==57.5.0", "wheel")
61
62
63 class VirtualEnv:
64 """Info about a single virtual environment."""
65
66 def __init__(self, root: Path):
67 """Initialize the `VirtualEnv` instance.
68
69 Args:
70 root: The root directory of the virtual environment.
71 """
72 self.root = root.resolve()
73 self.specs = venv_platform_specs()
74
75 def __getattr__(self, key: str):
76 """Get a specific attribute from this instance.
77
78 Used to provide `VenvSpecs` attributes for this specific virtual environment.
79
80 Args:
81 key: The attribute name. Must be one of the `VenvSpecs` attributes.
82
83 Returns:
84 The root directory of this virtual environment joined to the requested
85 platform-specific path using this platform's `VenvSpecs` instance.
86 """
87 return self.root / getattr(self.specs, key)
88
89 def __str__(self):
90 """_summary_.
91
92 Returns:
93 _description_.
94 """
95 return str(self.root)
96
97
98 async def exec_async(*args, **kwargs) -> Process:
99 """Run an executable asyncronously in a subprocess.
100
101 Args:
102 args: Positional arguments for `asyncio.create_subprocess_exec`.
103 kwargs: Keyword arguments for `asyncio.create_subprocess_exec`.
104
105 Raises:
106 AsyncSubprocessError: The command failed.
107
108 Returns:
109 The subprocess.
110 """
111 run = await asyncio.create_subprocess_exec(
112 *args,
113 stdout=subprocess.PIPE,
114 stderr=subprocess.PIPE,
115 **kwargs,
116 )
117 await run.wait()
118
119 if run.returncode != 0:
120 raise AsyncSubprocessError("Command failed", run)
121
122 return run
123
124
125 def fingerprint(pip_install_args: Iterable[str]) -> str:
126 """Generate a hash identifying pip install args.
127
128 Arguments are sorted and deduplicated before the hash is generated.
129
130 Args:
131 pip_install_args: Arguments for `pip install`.
132
133 Returns:
134 The SHA256 hash hex digest of the sorted set of pip install args.
135 """
136 return hashlib.sha256(" ".join(sorted(set(pip_install_args))).encode()).hexdigest()
137
138
139 class VenvService: # noqa: WPS214
140 """Manages virtual environments.
141
142 The methods in this class are not threadsafe.
143 """
144
145 def __init__(self, project: Project, namespace: str = "", name: str = ""):
146 """Initialize the `VenvService`.
147
148 Args:
149 project: The Meltano project.
150 namespace: The namespace for the venv, e.g. a Plugin type.
151 name: The name of the venv, e.g. a Plugin name.
152 """
153 self.project = project
154 self.namespace = namespace
155 self.name = name
156 self.venv = VirtualEnv(self.project.venvs_dir(namespace, name))
157 self.plugin_fingerprint_path = self.venv.root / ".meltano_plugin_fingerprint"
158
159 async def install(self, pip_install_args: list[str], clean: bool = False) -> None:
160 """Configure a virtual environment, then run pip install with the given args.
161
162 Args:
163 pip_install_args: Arguments passed to `pip install`.
164 clean: Whether to not attempt to use an existing virtual environment.
165 """
166 if not clean and self.requires_clean_install(pip_install_args):
167 logger.debug(
168 f"Packages for '{self.namespace}/{self.name}' have changed so performing a clean install."
169 )
170 clean = True
171
172 self.clean_run_files()
173 await self._pip_install(pip_install_args=pip_install_args, clean=clean)
174 self.write_fingerprint(pip_install_args)
175
176 def requires_clean_install(self, pip_install_args: list[str]) -> bool:
177 """Determine whether a clean install is needed.
178
179 Args:
180 pip_install_args: The arguments being passed to `pip install`, used
181 for fingerprinting the installation.
182
183 Returns:
184 Whether virtual environment doesn't exist or can't be reused.
185 """
186 # A generator function is used to perform the checks lazily
187 def checks():
188 # The Python installation used to create this venv no longer exists
189 yield not self.exec_path("python").exists()
190 # The deprecated `meltano_venv.pth` feature is used by this venv
191 yield self.venv.site_packages_dir.joinpath("meltano_venv.pth").exists()
192 # The fingerprint of the venv does not match the pip install args
193 existing_fingerprint = self.read_fingerprint()
194 yield existing_fingerprint is None
195 yield existing_fingerprint != fingerprint(pip_install_args)
196
197 return any(checks())
198
199 def clean_run_files(self) -> None:
200 """Destroy cached configuration files, if they exist."""
201 try:
202 shutil.rmtree(self.project.run_dir(self.name, make_dirs=False))
203 except FileNotFoundError:
204 logger.debug("No cached configuration files to remove")
205
206 def clean(self) -> None:
207 """Destroy the virtual environment, if it exists."""
208 try:
209 shutil.rmtree(self.venv.root)
210 logger.debug(
211 "Removed old virtual environment for '%s/%s'", # noqa: WPS323
212 self.namespace,
213 self.name,
214 )
215 except FileNotFoundError:
216 # If the VirtualEnv has never been created before do nothing
217 logger.debug("No old virtual environment to remove")
218
219 async def create(self) -> Process:
220 """Create a new virtual environment.
221
222 Raises:
223 AsyncSubprocessError: The virtual environment could not be created.
224
225 Returns:
226 The Python process creating the virtual environment.
227 """
228 logger.debug(f"Creating virtual environment for '{self.namespace}/{self.name}'")
229 try:
230 return await exec_async(sys.executable, "-m", "venv", str(self.venv))
231 except AsyncSubprocessError as err:
232 raise AsyncSubprocessError(
233 f"Could not create the virtualenv for '{self.namespace}/{self.name}'",
234 err.process,
235 ) from err
236
237 async def upgrade_pip(self) -> Process:
238 """Upgrade the `pip` package to the latest version in the virtual environment.
239
240 Raises:
241 AsyncSubprocessError: Failed to upgrade pip to the latest version.
242
243 Returns:
244 The process running `pip install --upgrade ...`.
245 """
246 logger.debug(f"Upgrading pip for '{self.namespace}/{self.name}'")
247 try:
248 return await self._pip_install(["--upgrade", *PIP_PACKAGES])
249 except AsyncSubprocessError as err:
250 raise AsyncSubprocessError(
251 "Failed to upgrade pip to the latest version.", err.process
252 ) from err
253
254 def read_fingerprint(self) -> str | None:
255 """Get the fingerprint of the existing virtual environment.
256
257 Returns:
258 The fingerprint of the existing virtual environment if it exists.
259 `None` otherwise.
260 """
261 if not self.plugin_fingerprint_path.exists():
262 return None
263 with open(self.plugin_fingerprint_path) as fingerprint_file:
264 return fingerprint_file.read()
265
266 def write_fingerprint(self, pip_install_args: list[str]) -> None:
267 """Save the fingerprint for this installation.
268
269 Args:
270 pip_install_args: The arguments being passed to `pip install`.
271 """
272 with open(self.plugin_fingerprint_path, "w") as fingerprint_file:
273 fingerprint_file.write(fingerprint(pip_install_args))
274
275 def exec_path(self, executable: str) -> Path:
276 """Return the absolute path for the given executable in the virtual environment.
277
278 Args:
279 executable: The path to the executable relative to the venv bin directory.
280
281 Returns:
282 The venv bin directory joined to the provided executable.
283 """
284 absolute_executable = self.venv.bin_dir / executable
285 if platform.system() != "Windows":
286 return absolute_executable
287
288 # On Windows, try using the '.exe' suffixed version if it exists. Use the
289 # regular executable path as a fallback (and for backwards compatibility).
290 absolute_executable_windows = absolute_executable.with_suffix(".exe")
291 return (
292 absolute_executable_windows
293 if absolute_executable_windows.exists()
294 else absolute_executable
295 )
296
297 async def _pip_install(
298 self, pip_install_args: list[str], clean: bool = False
299 ) -> Process:
300 """Install a package using `pip` in the proper virtual environment.
301
302 Args:
303 pip_install_args: The arguments to pass to `pip install`.
304 clean: Whether the installation should be done in a clean venv.
305
306 Raises:
307 AsyncSubprocessError: The command failed.
308
309 Returns:
310 The process running `pip install` with the provided args.
311 """
312 if clean:
313 self.clean()
314 await self.create()
315 await self.upgrade_pip()
316
317 pip_install_args_str = " ".join(pip_install_args)
318 log_msg_prefix = (
319 f"Upgrading with args {pip_install_args_str!r} in existing"
320 if "--upgrade" in pip_install_args
321 else f"Installing with args {pip_install_args_str!r} into"
322 )
323 logger.debug(
324 f"{log_msg_prefix} virtual environment for '{self.namespace}/{self.name}'"
325 )
326
327 try:
328 return await exec_async(
329 str(self.exec_path("python")), "-m", "pip", "install", *pip_install_args
330 )
331 except AsyncSubprocessError as err:
332 raise AsyncSubprocessError(
333 f"Failed to install plugin '{self.name}'.", err.process
334 ) from err
335
[end of src/meltano/core/venv_service.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/meltano/core/venv_service.py b/src/meltano/core/venv_service.py
--- a/src/meltano/core/venv_service.py
+++ b/src/meltano/core/venv_service.py
@@ -227,7 +227,10 @@
"""
logger.debug(f"Creating virtual environment for '{self.namespace}/{self.name}'")
try:
- return await exec_async(sys.executable, "-m", "venv", str(self.venv))
+ return await exec_async(
+ str(Path(sys.executable).parent / "virtualenv"),
+ str(self.venv),
+ )
except AsyncSubprocessError as err:
raise AsyncSubprocessError(
f"Could not create the virtualenv for '{self.namespace}/{self.name}'",
|
{"golden_diff": "diff --git a/src/meltano/core/venv_service.py b/src/meltano/core/venv_service.py\n--- a/src/meltano/core/venv_service.py\n+++ b/src/meltano/core/venv_service.py\n@@ -227,7 +227,10 @@\n \"\"\"\n logger.debug(f\"Creating virtual environment for '{self.namespace}/{self.name}'\")\n try:\n- return await exec_async(sys.executable, \"-m\", \"venv\", str(self.venv))\n+ return await exec_async(\n+ str(Path(sys.executable).parent / \"virtualenv\"),\n+ str(self.venv),\n+ )\n except AsyncSubprocessError as err:\n raise AsyncSubprocessError(\n f\"Could not create the virtualenv for '{self.namespace}/{self.name}'\",\n", "issue": "Proposed: Should we add explicit `virtualenv` dependency\nFollows from:\r\n\r\n- https://github.com/meltano/meltano/issues/6882\r\n\r\nWe used to recommend _first_ installing `virtualenv`, then using `virtualenv` to install Meltano. This meant that Meltano was able to use `virtualenv` for plugin installation even without any explicit dependency declaration.\r\n\r\nToday, if the user installs with `pipx`, then they also get `virtualenv` installed, since `pipx` ships with `virtualenv`.\r\n\r\nQuestion: Should we just create an explicit `virtualenv` dependency so we can be sure it is available during plugin installation?\r\n\r\nA valid scenario where `pip` would be fine for Meltano installation would be on a new+dedicated docker image. Presumably, that would be a safe time to install with `pip` and skip `pipx` installation - without requiring virtualenv isolation for Meltano itself. However, even in those cases, Meltano still needs `virtualenv` for plugin installation, and plugin installations would likely fail unless the image already contains `virtualenv` installed.\r\n\n", "before_files": [{"content": "\"\"\"Manage Python virtual environments.\"\"\"\n\nfrom __future__ import annotations\n\nimport asyncio\nimport hashlib\nimport logging\nimport os\nimport platform\nimport shutil\nimport subprocess\nimport sys\nfrom asyncio.subprocess import Process\nfrom collections import namedtuple\nfrom collections.abc import Iterable\nfrom pathlib import Path\n\nfrom meltano.core.error import AsyncSubprocessError\nfrom meltano.core.project import Project\n\nlogger = logging.getLogger(__name__)\n\nVenvSpecs = namedtuple(\"VenvSpecs\", (\"lib_dir\", \"bin_dir\", \"site_packages_dir\"))\n\nPOSIX = VenvSpecs(\n lib_dir=\"lib\",\n bin_dir=\"bin\",\n site_packages_dir=os.path.join(\n \"lib\",\n f\"python{'.'.join(str(part) for part in sys.version_info[:2])}\",\n \"site-packages\",\n ),\n)\n\nNT = VenvSpecs(\n lib_dir=\"Lib\",\n bin_dir=\"Scripts\",\n site_packages_dir=os.path.join(\"Lib\", \"site-packages\"),\n)\n\nPLATFORM_SPECS = {\"Linux\": POSIX, \"Darwin\": POSIX, \"Windows\": NT}\n\n\ndef venv_platform_specs():\n \"\"\"Get virtual environment sub-path info for the current platform.\n\n Raises:\n Exception: This platform is not supported.\n\n Returns:\n Virtual environment sub-path info for the current platform.\n \"\"\"\n system = platform.system()\n try:\n return PLATFORM_SPECS[system]\n except KeyError as ex:\n raise Exception(f\"Platform {system!r} not supported.\") from ex\n\n\nPIP_PACKAGES = (\"pip\", \"setuptools==57.5.0\", \"wheel\")\n\n\nclass VirtualEnv:\n \"\"\"Info about a single virtual environment.\"\"\"\n\n def __init__(self, root: Path):\n \"\"\"Initialize the `VirtualEnv` instance.\n\n Args:\n root: The root directory of the virtual environment.\n \"\"\"\n self.root = root.resolve()\n self.specs = venv_platform_specs()\n\n def __getattr__(self, key: str):\n \"\"\"Get a specific attribute from this instance.\n\n Used to provide `VenvSpecs` attributes for this specific virtual environment.\n\n Args:\n key: The attribute name. Must be one of the `VenvSpecs` attributes.\n\n Returns:\n The root directory of this virtual environment joined to the requested\n platform-specific path using this platform's `VenvSpecs` instance.\n \"\"\"\n return self.root / getattr(self.specs, key)\n\n def __str__(self):\n \"\"\"_summary_.\n\n Returns:\n _description_.\n \"\"\"\n return str(self.root)\n\n\nasync def exec_async(*args, **kwargs) -> Process:\n \"\"\"Run an executable asyncronously in a subprocess.\n\n Args:\n args: Positional arguments for `asyncio.create_subprocess_exec`.\n kwargs: Keyword arguments for `asyncio.create_subprocess_exec`.\n\n Raises:\n AsyncSubprocessError: The command failed.\n\n Returns:\n The subprocess.\n \"\"\"\n run = await asyncio.create_subprocess_exec(\n *args,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n **kwargs,\n )\n await run.wait()\n\n if run.returncode != 0:\n raise AsyncSubprocessError(\"Command failed\", run)\n\n return run\n\n\ndef fingerprint(pip_install_args: Iterable[str]) -> str:\n \"\"\"Generate a hash identifying pip install args.\n\n Arguments are sorted and deduplicated before the hash is generated.\n\n Args:\n pip_install_args: Arguments for `pip install`.\n\n Returns:\n The SHA256 hash hex digest of the sorted set of pip install args.\n \"\"\"\n return hashlib.sha256(\" \".join(sorted(set(pip_install_args))).encode()).hexdigest()\n\n\nclass VenvService: # noqa: WPS214\n \"\"\"Manages virtual environments.\n\n The methods in this class are not threadsafe.\n \"\"\"\n\n def __init__(self, project: Project, namespace: str = \"\", name: str = \"\"):\n \"\"\"Initialize the `VenvService`.\n\n Args:\n project: The Meltano project.\n namespace: The namespace for the venv, e.g. a Plugin type.\n name: The name of the venv, e.g. a Plugin name.\n \"\"\"\n self.project = project\n self.namespace = namespace\n self.name = name\n self.venv = VirtualEnv(self.project.venvs_dir(namespace, name))\n self.plugin_fingerprint_path = self.venv.root / \".meltano_plugin_fingerprint\"\n\n async def install(self, pip_install_args: list[str], clean: bool = False) -> None:\n \"\"\"Configure a virtual environment, then run pip install with the given args.\n\n Args:\n pip_install_args: Arguments passed to `pip install`.\n clean: Whether to not attempt to use an existing virtual environment.\n \"\"\"\n if not clean and self.requires_clean_install(pip_install_args):\n logger.debug(\n f\"Packages for '{self.namespace}/{self.name}' have changed so performing a clean install.\"\n )\n clean = True\n\n self.clean_run_files()\n await self._pip_install(pip_install_args=pip_install_args, clean=clean)\n self.write_fingerprint(pip_install_args)\n\n def requires_clean_install(self, pip_install_args: list[str]) -> bool:\n \"\"\"Determine whether a clean install is needed.\n\n Args:\n pip_install_args: The arguments being passed to `pip install`, used\n for fingerprinting the installation.\n\n Returns:\n Whether virtual environment doesn't exist or can't be reused.\n \"\"\"\n # A generator function is used to perform the checks lazily\n def checks():\n # The Python installation used to create this venv no longer exists\n yield not self.exec_path(\"python\").exists()\n # The deprecated `meltano_venv.pth` feature is used by this venv\n yield self.venv.site_packages_dir.joinpath(\"meltano_venv.pth\").exists()\n # The fingerprint of the venv does not match the pip install args\n existing_fingerprint = self.read_fingerprint()\n yield existing_fingerprint is None\n yield existing_fingerprint != fingerprint(pip_install_args)\n\n return any(checks())\n\n def clean_run_files(self) -> None:\n \"\"\"Destroy cached configuration files, if they exist.\"\"\"\n try:\n shutil.rmtree(self.project.run_dir(self.name, make_dirs=False))\n except FileNotFoundError:\n logger.debug(\"No cached configuration files to remove\")\n\n def clean(self) -> None:\n \"\"\"Destroy the virtual environment, if it exists.\"\"\"\n try:\n shutil.rmtree(self.venv.root)\n logger.debug(\n \"Removed old virtual environment for '%s/%s'\", # noqa: WPS323\n self.namespace,\n self.name,\n )\n except FileNotFoundError:\n # If the VirtualEnv has never been created before do nothing\n logger.debug(\"No old virtual environment to remove\")\n\n async def create(self) -> Process:\n \"\"\"Create a new virtual environment.\n\n Raises:\n AsyncSubprocessError: The virtual environment could not be created.\n\n Returns:\n The Python process creating the virtual environment.\n \"\"\"\n logger.debug(f\"Creating virtual environment for '{self.namespace}/{self.name}'\")\n try:\n return await exec_async(sys.executable, \"-m\", \"venv\", str(self.venv))\n except AsyncSubprocessError as err:\n raise AsyncSubprocessError(\n f\"Could not create the virtualenv for '{self.namespace}/{self.name}'\",\n err.process,\n ) from err\n\n async def upgrade_pip(self) -> Process:\n \"\"\"Upgrade the `pip` package to the latest version in the virtual environment.\n\n Raises:\n AsyncSubprocessError: Failed to upgrade pip to the latest version.\n\n Returns:\n The process running `pip install --upgrade ...`.\n \"\"\"\n logger.debug(f\"Upgrading pip for '{self.namespace}/{self.name}'\")\n try:\n return await self._pip_install([\"--upgrade\", *PIP_PACKAGES])\n except AsyncSubprocessError as err:\n raise AsyncSubprocessError(\n \"Failed to upgrade pip to the latest version.\", err.process\n ) from err\n\n def read_fingerprint(self) -> str | None:\n \"\"\"Get the fingerprint of the existing virtual environment.\n\n Returns:\n The fingerprint of the existing virtual environment if it exists.\n `None` otherwise.\n \"\"\"\n if not self.plugin_fingerprint_path.exists():\n return None\n with open(self.plugin_fingerprint_path) as fingerprint_file:\n return fingerprint_file.read()\n\n def write_fingerprint(self, pip_install_args: list[str]) -> None:\n \"\"\"Save the fingerprint for this installation.\n\n Args:\n pip_install_args: The arguments being passed to `pip install`.\n \"\"\"\n with open(self.plugin_fingerprint_path, \"w\") as fingerprint_file:\n fingerprint_file.write(fingerprint(pip_install_args))\n\n def exec_path(self, executable: str) -> Path:\n \"\"\"Return the absolute path for the given executable in the virtual environment.\n\n Args:\n executable: The path to the executable relative to the venv bin directory.\n\n Returns:\n The venv bin directory joined to the provided executable.\n \"\"\"\n absolute_executable = self.venv.bin_dir / executable\n if platform.system() != \"Windows\":\n return absolute_executable\n\n # On Windows, try using the '.exe' suffixed version if it exists. Use the\n # regular executable path as a fallback (and for backwards compatibility).\n absolute_executable_windows = absolute_executable.with_suffix(\".exe\")\n return (\n absolute_executable_windows\n if absolute_executable_windows.exists()\n else absolute_executable\n )\n\n async def _pip_install(\n self, pip_install_args: list[str], clean: bool = False\n ) -> Process:\n \"\"\"Install a package using `pip` in the proper virtual environment.\n\n Args:\n pip_install_args: The arguments to pass to `pip install`.\n clean: Whether the installation should be done in a clean venv.\n\n Raises:\n AsyncSubprocessError: The command failed.\n\n Returns:\n The process running `pip install` with the provided args.\n \"\"\"\n if clean:\n self.clean()\n await self.create()\n await self.upgrade_pip()\n\n pip_install_args_str = \" \".join(pip_install_args)\n log_msg_prefix = (\n f\"Upgrading with args {pip_install_args_str!r} in existing\"\n if \"--upgrade\" in pip_install_args\n else f\"Installing with args {pip_install_args_str!r} into\"\n )\n logger.debug(\n f\"{log_msg_prefix} virtual environment for '{self.namespace}/{self.name}'\"\n )\n\n try:\n return await exec_async(\n str(self.exec_path(\"python\")), \"-m\", \"pip\", \"install\", *pip_install_args\n )\n except AsyncSubprocessError as err:\n raise AsyncSubprocessError(\n f\"Failed to install plugin '{self.name}'.\", err.process\n ) from err\n", "path": "src/meltano/core/venv_service.py"}]}
| 4,074 | 178 |
gh_patches_debug_18523
|
rasdani/github-patches
|
git_diff
|
cloudtools__troposphere-1168
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add support for DockerVolumeConfiguration in Volume property of AWS::ECS::TaskDefinition
See here for more details: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ecs-taskdefinition-dockervolumeconfiguration.html
</issue>
<code>
[start of troposphere/ecs.py]
1 from . import AWSObject, AWSProperty
2 from .validators import boolean, integer, network_port, positive_integer
3
4
5 LAUNCH_TYPE_EC2 = 'EC2'
6 LAUNCH_TYPE_FARGATE = 'FARGATE'
7
8 SCHEDULING_STRATEGY_REPLICA = 'REPLICA'
9 SCHEDULING_STRATEGY_DAEMON = 'DAEMON'
10
11
12 class Cluster(AWSObject):
13 resource_type = "AWS::ECS::Cluster"
14
15 props = {
16 'ClusterName': (basestring, False),
17 }
18
19
20 class LoadBalancer(AWSProperty):
21 props = {
22 'ContainerName': (basestring, False),
23 'ContainerPort': (network_port, True),
24 'LoadBalancerName': (basestring, False),
25 'TargetGroupArn': (basestring, False),
26 }
27
28
29 class DeploymentConfiguration(AWSProperty):
30 props = {
31 'MaximumPercent': (positive_integer, False),
32 'MinimumHealthyPercent': (positive_integer, False),
33 }
34
35
36 def placement_strategy_validator(x):
37 valid_values = ['random', 'spread', 'binpack']
38 if x not in valid_values:
39 raise ValueError("Placement Strategy type must be one of: %s" %
40 ', '.join(valid_values))
41 return x
42
43
44 def placement_constraint_validator(x):
45 valid_values = ['distinctInstance', 'memberOf']
46 if x not in valid_values:
47 raise ValueError("Placement Constraint type must be one of: %s" %
48 ', '.join(valid_values))
49 return x
50
51
52 class PlacementConstraint(AWSProperty):
53 props = {
54 'Type': (placement_constraint_validator, True),
55 'Expression': (basestring, False),
56 }
57
58
59 class PlacementStrategy(AWSProperty):
60 props = {
61 'Type': (placement_strategy_validator, True),
62 'Field': (basestring, False),
63 }
64
65
66 class AwsvpcConfiguration(AWSProperty):
67 props = {
68 'AssignPublicIp': (basestring, False),
69 'SecurityGroups': (list, False),
70 'Subnets': (list, True),
71 }
72
73
74 class NetworkConfiguration(AWSProperty):
75 props = {
76 'AwsvpcConfiguration': (AwsvpcConfiguration, False),
77 }
78
79
80 def launch_type_validator(x):
81 valid_values = [LAUNCH_TYPE_EC2, LAUNCH_TYPE_FARGATE]
82 if x not in valid_values:
83 raise ValueError("Launch Type must be one of: %s" %
84 ', '.join(valid_values))
85 return x
86
87
88 class ServiceRegistry(AWSProperty):
89 props = {
90 'Port': (integer, False),
91 'RegistryArn': (basestring, False),
92 }
93
94
95 class Service(AWSObject):
96 resource_type = "AWS::ECS::Service"
97
98 props = {
99 'Cluster': (basestring, False),
100 'DeploymentConfiguration': (DeploymentConfiguration, False),
101 'DesiredCount': (positive_integer, False),
102 'HealthCheckGracePeriodSeconds': (positive_integer, False),
103 'LaunchType': (launch_type_validator, False),
104 'LoadBalancers': ([LoadBalancer], False),
105 'NetworkConfiguration': (NetworkConfiguration, False),
106 'Role': (basestring, False),
107 'PlacementConstraints': ([PlacementConstraint], False),
108 'PlacementStrategies': ([PlacementStrategy], False),
109 'PlatformVersion': (basestring, False),
110 'SchedulingStrategy': (basestring, False),
111 'ServiceName': (basestring, False),
112 'ServiceRegistries': ([ServiceRegistry], False),
113 'TaskDefinition': (basestring, True),
114 }
115
116
117 class Environment(AWSProperty):
118 props = {
119 'Name': (basestring, True),
120 'Value': (basestring, True),
121 }
122
123
124 class MountPoint(AWSProperty):
125 props = {
126 'ContainerPath': (basestring, True),
127 'SourceVolume': (basestring, True),
128 'ReadOnly': (boolean, False),
129 }
130
131
132 class PortMapping(AWSProperty):
133 props = {
134 'ContainerPort': (network_port, True),
135 'HostPort': (network_port, False),
136 'Protocol': (basestring, False),
137 }
138
139
140 class VolumesFrom(AWSProperty):
141 props = {
142 'SourceContainer': (basestring, True),
143 'ReadOnly': (boolean, False),
144 }
145
146
147 class HostEntry(AWSProperty):
148 props = {
149 'Hostname': (basestring, True),
150 'IpAddress': (basestring, True),
151 }
152
153
154 class Device(AWSProperty):
155 props = {
156 'ContainerPath': (basestring, False),
157 'HostPath': (basestring, False),
158 'Permissions': ([basestring], False),
159 }
160
161
162 class HealthCheck(AWSProperty):
163 props = {
164 'Command': ([basestring], True),
165 'Interval': (integer, False),
166 'Retries': (integer, False),
167 'StartPeriod': (integer, False),
168 'Timeout': (integer, False),
169 }
170
171
172 class KernelCapabilities(AWSProperty):
173 props = {
174 'Add': ([basestring], False),
175 'Drop': ([basestring], False),
176 }
177
178
179 class LinuxParameters(AWSProperty):
180 props = {
181 'Capabilities': (KernelCapabilities, False),
182 'Devices': ([Device], False),
183 'InitProcessEnabled': (boolean, False),
184 }
185
186
187 class LogConfiguration(AWSProperty):
188 props = {
189 'LogDriver': (basestring, True),
190 'Options': (dict, False),
191 }
192
193
194 class Ulimit(AWSProperty):
195 props = {
196 'HardLimit': (integer, True),
197 'Name': (basestring, False),
198 'SoftLimit': (integer, True),
199 }
200
201
202 class ContainerDefinition(AWSProperty):
203 props = {
204 'Command': ([basestring], False),
205 'Cpu': (positive_integer, False),
206 'DisableNetworking': (boolean, False),
207 'DnsSearchDomains': ([basestring], False),
208 'DnsServers': ([basestring], False),
209 'DockerLabels': (dict, False),
210 'DockerSecurityOptions': ([basestring], False),
211 'EntryPoint': ([basestring], False),
212 'Environment': ([Environment], False),
213 'Essential': (boolean, False),
214 'ExtraHosts': ([HostEntry], False),
215 'HealthCheck': (HealthCheck, False),
216 'Hostname': (basestring, False),
217 'Image': (basestring, True),
218 'Links': ([basestring], False),
219 'LinuxParameters': (LinuxParameters, False),
220 'LogConfiguration': (LogConfiguration, False),
221 'Memory': (positive_integer, False),
222 'MemoryReservation': (positive_integer, False),
223 'MountPoints': ([MountPoint], False),
224 'Name': (basestring, True),
225 'PortMappings': ([PortMapping], False),
226 'Privileged': (boolean, False),
227 'ReadonlyRootFilesystem': (boolean, False),
228 'Ulimits': ([Ulimit], False),
229 'User': (basestring, False),
230 'VolumesFrom': ([VolumesFrom], False),
231 'WorkingDirectory': (basestring, False),
232 }
233
234
235 class Host(AWSProperty):
236 props = {
237 'SourcePath': (basestring, False),
238 }
239
240
241 class Volume(AWSProperty):
242 props = {
243 'Name': (basestring, True),
244 'Host': (Host, False),
245 }
246
247
248 class TaskDefinition(AWSObject):
249 resource_type = "AWS::ECS::TaskDefinition"
250
251 props = {
252 'ContainerDefinitions': ([ContainerDefinition], True),
253 'Cpu': (basestring, False),
254 'ExecutionRoleArn': (basestring, False),
255 'Family': (basestring, False),
256 'Memory': (basestring, False),
257 'NetworkMode': (basestring, False),
258 'PlacementConstraints': ([PlacementConstraint], False),
259 'RequiresCompatibilities': ([basestring], False),
260 'TaskRoleArn': (basestring, False),
261 'Volumes': ([Volume], False),
262 }
263
[end of troposphere/ecs.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/troposphere/ecs.py b/troposphere/ecs.py
--- a/troposphere/ecs.py
+++ b/troposphere/ecs.py
@@ -49,6 +49,14 @@
return x
+def scope_validator(x):
+ valid_values = ['shared', 'task']
+ if x not in valid_values:
+ raise ValueError("Scope type must be one of: %s" %
+ ', '.join(valid_values))
+ return x
+
+
class PlacementConstraint(AWSProperty):
props = {
'Type': (placement_constraint_validator, True),
@@ -238,8 +246,19 @@
}
+class DockerVolumeConfiguration(AWSProperty):
+ props = {
+ 'Autoprovision': (boolean, False),
+ 'Driver': (basestring, False),
+ 'DriverOpts': ([basestring], False),
+ 'Labels': ([basestring], False),
+ 'Scope': (scope_validator, False)
+ }
+
+
class Volume(AWSProperty):
props = {
+ 'DockerVolumeConfiguration': (DockerVolumeConfiguration, False),
'Name': (basestring, True),
'Host': (Host, False),
}
|
{"golden_diff": "diff --git a/troposphere/ecs.py b/troposphere/ecs.py\n--- a/troposphere/ecs.py\n+++ b/troposphere/ecs.py\n@@ -49,6 +49,14 @@\n return x\n \n \n+def scope_validator(x):\n+ valid_values = ['shared', 'task']\n+ if x not in valid_values:\n+ raise ValueError(\"Scope type must be one of: %s\" %\n+ ', '.join(valid_values))\n+ return x\n+\n+\n class PlacementConstraint(AWSProperty):\n props = {\n 'Type': (placement_constraint_validator, True),\n@@ -238,8 +246,19 @@\n }\n \n \n+class DockerVolumeConfiguration(AWSProperty):\n+ props = {\n+ 'Autoprovision': (boolean, False),\n+ 'Driver': (basestring, False),\n+ 'DriverOpts': ([basestring], False),\n+ 'Labels': ([basestring], False),\n+ 'Scope': (scope_validator, False)\n+ }\n+\n+\n class Volume(AWSProperty):\n props = {\n+ 'DockerVolumeConfiguration': (DockerVolumeConfiguration, False),\n 'Name': (basestring, True),\n 'Host': (Host, False),\n }\n", "issue": "Add support for DockerVolumeConfiguration in Volume property of AWS::ECS::TaskDefinition\nSee here for more details: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ecs-taskdefinition-dockervolumeconfiguration.html\n", "before_files": [{"content": "from . import AWSObject, AWSProperty\nfrom .validators import boolean, integer, network_port, positive_integer\n\n\nLAUNCH_TYPE_EC2 = 'EC2'\nLAUNCH_TYPE_FARGATE = 'FARGATE'\n\nSCHEDULING_STRATEGY_REPLICA = 'REPLICA'\nSCHEDULING_STRATEGY_DAEMON = 'DAEMON'\n\n\nclass Cluster(AWSObject):\n resource_type = \"AWS::ECS::Cluster\"\n\n props = {\n 'ClusterName': (basestring, False),\n }\n\n\nclass LoadBalancer(AWSProperty):\n props = {\n 'ContainerName': (basestring, False),\n 'ContainerPort': (network_port, True),\n 'LoadBalancerName': (basestring, False),\n 'TargetGroupArn': (basestring, False),\n }\n\n\nclass DeploymentConfiguration(AWSProperty):\n props = {\n 'MaximumPercent': (positive_integer, False),\n 'MinimumHealthyPercent': (positive_integer, False),\n }\n\n\ndef placement_strategy_validator(x):\n valid_values = ['random', 'spread', 'binpack']\n if x not in valid_values:\n raise ValueError(\"Placement Strategy type must be one of: %s\" %\n ', '.join(valid_values))\n return x\n\n\ndef placement_constraint_validator(x):\n valid_values = ['distinctInstance', 'memberOf']\n if x not in valid_values:\n raise ValueError(\"Placement Constraint type must be one of: %s\" %\n ', '.join(valid_values))\n return x\n\n\nclass PlacementConstraint(AWSProperty):\n props = {\n 'Type': (placement_constraint_validator, True),\n 'Expression': (basestring, False),\n }\n\n\nclass PlacementStrategy(AWSProperty):\n props = {\n 'Type': (placement_strategy_validator, True),\n 'Field': (basestring, False),\n }\n\n\nclass AwsvpcConfiguration(AWSProperty):\n props = {\n 'AssignPublicIp': (basestring, False),\n 'SecurityGroups': (list, False),\n 'Subnets': (list, True),\n }\n\n\nclass NetworkConfiguration(AWSProperty):\n props = {\n 'AwsvpcConfiguration': (AwsvpcConfiguration, False),\n }\n\n\ndef launch_type_validator(x):\n valid_values = [LAUNCH_TYPE_EC2, LAUNCH_TYPE_FARGATE]\n if x not in valid_values:\n raise ValueError(\"Launch Type must be one of: %s\" %\n ', '.join(valid_values))\n return x\n\n\nclass ServiceRegistry(AWSProperty):\n props = {\n 'Port': (integer, False),\n 'RegistryArn': (basestring, False),\n }\n\n\nclass Service(AWSObject):\n resource_type = \"AWS::ECS::Service\"\n\n props = {\n 'Cluster': (basestring, False),\n 'DeploymentConfiguration': (DeploymentConfiguration, False),\n 'DesiredCount': (positive_integer, False),\n 'HealthCheckGracePeriodSeconds': (positive_integer, False),\n 'LaunchType': (launch_type_validator, False),\n 'LoadBalancers': ([LoadBalancer], False),\n 'NetworkConfiguration': (NetworkConfiguration, False),\n 'Role': (basestring, False),\n 'PlacementConstraints': ([PlacementConstraint], False),\n 'PlacementStrategies': ([PlacementStrategy], False),\n 'PlatformVersion': (basestring, False),\n 'SchedulingStrategy': (basestring, False),\n 'ServiceName': (basestring, False),\n 'ServiceRegistries': ([ServiceRegistry], False),\n 'TaskDefinition': (basestring, True),\n }\n\n\nclass Environment(AWSProperty):\n props = {\n 'Name': (basestring, True),\n 'Value': (basestring, True),\n }\n\n\nclass MountPoint(AWSProperty):\n props = {\n 'ContainerPath': (basestring, True),\n 'SourceVolume': (basestring, True),\n 'ReadOnly': (boolean, False),\n }\n\n\nclass PortMapping(AWSProperty):\n props = {\n 'ContainerPort': (network_port, True),\n 'HostPort': (network_port, False),\n 'Protocol': (basestring, False),\n }\n\n\nclass VolumesFrom(AWSProperty):\n props = {\n 'SourceContainer': (basestring, True),\n 'ReadOnly': (boolean, False),\n }\n\n\nclass HostEntry(AWSProperty):\n props = {\n 'Hostname': (basestring, True),\n 'IpAddress': (basestring, True),\n }\n\n\nclass Device(AWSProperty):\n props = {\n 'ContainerPath': (basestring, False),\n 'HostPath': (basestring, False),\n 'Permissions': ([basestring], False),\n }\n\n\nclass HealthCheck(AWSProperty):\n props = {\n 'Command': ([basestring], True),\n 'Interval': (integer, False),\n 'Retries': (integer, False),\n 'StartPeriod': (integer, False),\n 'Timeout': (integer, False),\n }\n\n\nclass KernelCapabilities(AWSProperty):\n props = {\n 'Add': ([basestring], False),\n 'Drop': ([basestring], False),\n }\n\n\nclass LinuxParameters(AWSProperty):\n props = {\n 'Capabilities': (KernelCapabilities, False),\n 'Devices': ([Device], False),\n 'InitProcessEnabled': (boolean, False),\n }\n\n\nclass LogConfiguration(AWSProperty):\n props = {\n 'LogDriver': (basestring, True),\n 'Options': (dict, False),\n }\n\n\nclass Ulimit(AWSProperty):\n props = {\n 'HardLimit': (integer, True),\n 'Name': (basestring, False),\n 'SoftLimit': (integer, True),\n }\n\n\nclass ContainerDefinition(AWSProperty):\n props = {\n 'Command': ([basestring], False),\n 'Cpu': (positive_integer, False),\n 'DisableNetworking': (boolean, False),\n 'DnsSearchDomains': ([basestring], False),\n 'DnsServers': ([basestring], False),\n 'DockerLabels': (dict, False),\n 'DockerSecurityOptions': ([basestring], False),\n 'EntryPoint': ([basestring], False),\n 'Environment': ([Environment], False),\n 'Essential': (boolean, False),\n 'ExtraHosts': ([HostEntry], False),\n 'HealthCheck': (HealthCheck, False),\n 'Hostname': (basestring, False),\n 'Image': (basestring, True),\n 'Links': ([basestring], False),\n 'LinuxParameters': (LinuxParameters, False),\n 'LogConfiguration': (LogConfiguration, False),\n 'Memory': (positive_integer, False),\n 'MemoryReservation': (positive_integer, False),\n 'MountPoints': ([MountPoint], False),\n 'Name': (basestring, True),\n 'PortMappings': ([PortMapping], False),\n 'Privileged': (boolean, False),\n 'ReadonlyRootFilesystem': (boolean, False),\n 'Ulimits': ([Ulimit], False),\n 'User': (basestring, False),\n 'VolumesFrom': ([VolumesFrom], False),\n 'WorkingDirectory': (basestring, False),\n }\n\n\nclass Host(AWSProperty):\n props = {\n 'SourcePath': (basestring, False),\n }\n\n\nclass Volume(AWSProperty):\n props = {\n 'Name': (basestring, True),\n 'Host': (Host, False),\n }\n\n\nclass TaskDefinition(AWSObject):\n resource_type = \"AWS::ECS::TaskDefinition\"\n\n props = {\n 'ContainerDefinitions': ([ContainerDefinition], True),\n 'Cpu': (basestring, False),\n 'ExecutionRoleArn': (basestring, False),\n 'Family': (basestring, False),\n 'Memory': (basestring, False),\n 'NetworkMode': (basestring, False),\n 'PlacementConstraints': ([PlacementConstraint], False),\n 'RequiresCompatibilities': ([basestring], False),\n 'TaskRoleArn': (basestring, False),\n 'Volumes': ([Volume], False),\n }\n", "path": "troposphere/ecs.py"}]}
| 3,017 | 274 |
gh_patches_debug_3522
|
rasdani/github-patches
|
git_diff
|
pytorch__TensorRT-1953
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
✨[Converter] Implement aten::addmm
Torch op:
func: addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
Aten op: torch.ops.addmm.default
</issue>
<code>
[start of py/torch_tensorrt/dynamo/backend/lowering/_decompositions.py]
1 import torch
2 from torch._decomp import register_decomposition, core_aten_decompositions
3
4
5 DECOMPOSITIONS = {**core_aten_decompositions()}
6
7 aten = torch.ops.aten
8
9
10 def replace_inplace_op(aten_op, outplace_op):
11 """Replace inplace operation with functional equivalent
12 Adapted from:
13 https://github.com/pytorch/pytorch/blob/3344d79e3f732dadd5c85b99a7aa1a022f187929/torch/_decomp/decompositions.py#L3355-L3361
14 """
15
16 @register_decomposition(aten_op, registry=DECOMPOSITIONS)
17 def inplace_op(*args, **kwargs):
18 out = outplace_op(*args, **kwargs)
19 return args[0].copy_(out)
20
21 return inplace_op
22
23
24 replace_inplace_op(aten.add_, aten.add)
25 replace_inplace_op(aten.addbmm_, aten.addbmm)
26 replace_inplace_op(aten.addmm_, aten.addmm)
27 replace_inplace_op(aten.addmv_, aten.addmv)
28 replace_inplace_op(aten.baddbmm_, aten.baddbmm)
29 replace_inplace_op(aten.cumprod_, aten.cumprod)
30 replace_inplace_op(aten.fill_, aten.fill)
31 replace_inplace_op(aten.gelu_, aten.gelu)
32 replace_inplace_op(aten.hardsigmoid_, aten.hardsigmoid)
33 replace_inplace_op(aten.index_put_, aten.index_put)
34 replace_inplace_op(aten.index_reduce_, aten.index_reduce)
35 replace_inplace_op(aten.logit_, aten.logit)
36 replace_inplace_op(aten.relu_, aten.relu)
37 replace_inplace_op(aten.renorm_, aten.renorm)
38 replace_inplace_op(aten.round_, aten.round)
39 replace_inplace_op(aten.scatter_, aten.scatter)
40 replace_inplace_op(aten.scatter_add_, aten.scatter_add)
41 replace_inplace_op(aten.scatter_reduce_, aten.scatter_reduce)
42
43
44 @register_decomposition(aten.std, registry=DECOMPOSITIONS)
45 def std_replacement(*args, **kwargs) -> torch.Tensor:
46 return torch.sqrt(torch.var(*args, **kwargs))
47
48
49 @register_decomposition(aten.rsqrt, registry=DECOMPOSITIONS)
50 def rsqrt_replacement(*args, **kwargs) -> torch.Tensor:
51 return torch.reciprocal(torch.sqrt(*args, **kwargs))
52
53
54 @register_decomposition(aten.alias, registry=DECOMPOSITIONS)
55 def alias_replacement(x: torch.Tensor) -> torch.Tensor:
56 return x
57
58
59 def get_decompositions():
60 return DECOMPOSITIONS
61
[end of py/torch_tensorrt/dynamo/backend/lowering/_decompositions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/py/torch_tensorrt/dynamo/backend/lowering/_decompositions.py b/py/torch_tensorrt/dynamo/backend/lowering/_decompositions.py
--- a/py/torch_tensorrt/dynamo/backend/lowering/_decompositions.py
+++ b/py/torch_tensorrt/dynamo/backend/lowering/_decompositions.py
@@ -56,5 +56,14 @@
return x
+@register_decomposition(torch.ops.aten.addmm, registry=DECOMPOSITIONS)
+def addmm_replacement(
+ input_: torch.Tensor, mat1: torch.Tensor, mat2: torch.Tensor, *, beta=1, alpha=1
+) -> torch.Tensor:
+ return torch.add(
+ torch.mul(input_, beta), torch.mul(torch.matmul(mat1, mat2), alpha)
+ )
+
+
def get_decompositions():
return DECOMPOSITIONS
|
{"golden_diff": "diff --git a/py/torch_tensorrt/dynamo/backend/lowering/_decompositions.py b/py/torch_tensorrt/dynamo/backend/lowering/_decompositions.py\n--- a/py/torch_tensorrt/dynamo/backend/lowering/_decompositions.py\n+++ b/py/torch_tensorrt/dynamo/backend/lowering/_decompositions.py\n@@ -56,5 +56,14 @@\n return x\n \n \n+@register_decomposition(torch.ops.aten.addmm, registry=DECOMPOSITIONS)\n+def addmm_replacement(\n+ input_: torch.Tensor, mat1: torch.Tensor, mat2: torch.Tensor, *, beta=1, alpha=1\n+) -> torch.Tensor:\n+ return torch.add(\n+ torch.mul(input_, beta), torch.mul(torch.matmul(mat1, mat2), alpha)\n+ )\n+\n+\n def get_decompositions():\n return DECOMPOSITIONS\n", "issue": "\u2728[Converter] Implement aten::addmm\nTorch op:\r\nfunc: addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor\r\nAten op: torch.ops.addmm.default\n", "before_files": [{"content": "import torch\nfrom torch._decomp import register_decomposition, core_aten_decompositions\n\n\nDECOMPOSITIONS = {**core_aten_decompositions()}\n\naten = torch.ops.aten\n\n\ndef replace_inplace_op(aten_op, outplace_op):\n \"\"\"Replace inplace operation with functional equivalent\n Adapted from:\n https://github.com/pytorch/pytorch/blob/3344d79e3f732dadd5c85b99a7aa1a022f187929/torch/_decomp/decompositions.py#L3355-L3361\n \"\"\"\n\n @register_decomposition(aten_op, registry=DECOMPOSITIONS)\n def inplace_op(*args, **kwargs):\n out = outplace_op(*args, **kwargs)\n return args[0].copy_(out)\n\n return inplace_op\n\n\nreplace_inplace_op(aten.add_, aten.add)\nreplace_inplace_op(aten.addbmm_, aten.addbmm)\nreplace_inplace_op(aten.addmm_, aten.addmm)\nreplace_inplace_op(aten.addmv_, aten.addmv)\nreplace_inplace_op(aten.baddbmm_, aten.baddbmm)\nreplace_inplace_op(aten.cumprod_, aten.cumprod)\nreplace_inplace_op(aten.fill_, aten.fill)\nreplace_inplace_op(aten.gelu_, aten.gelu)\nreplace_inplace_op(aten.hardsigmoid_, aten.hardsigmoid)\nreplace_inplace_op(aten.index_put_, aten.index_put)\nreplace_inplace_op(aten.index_reduce_, aten.index_reduce)\nreplace_inplace_op(aten.logit_, aten.logit)\nreplace_inplace_op(aten.relu_, aten.relu)\nreplace_inplace_op(aten.renorm_, aten.renorm)\nreplace_inplace_op(aten.round_, aten.round)\nreplace_inplace_op(aten.scatter_, aten.scatter)\nreplace_inplace_op(aten.scatter_add_, aten.scatter_add)\nreplace_inplace_op(aten.scatter_reduce_, aten.scatter_reduce)\n\n\n@register_decomposition(aten.std, registry=DECOMPOSITIONS)\ndef std_replacement(*args, **kwargs) -> torch.Tensor:\n return torch.sqrt(torch.var(*args, **kwargs))\n\n\n@register_decomposition(aten.rsqrt, registry=DECOMPOSITIONS)\ndef rsqrt_replacement(*args, **kwargs) -> torch.Tensor:\n return torch.reciprocal(torch.sqrt(*args, **kwargs))\n\n\n@register_decomposition(aten.alias, registry=DECOMPOSITIONS)\ndef alias_replacement(x: torch.Tensor) -> torch.Tensor:\n return x\n\n\ndef get_decompositions():\n return DECOMPOSITIONS\n", "path": "py/torch_tensorrt/dynamo/backend/lowering/_decompositions.py"}]}
| 1,314 | 193 |
gh_patches_debug_65034
|
rasdani/github-patches
|
git_diff
|
learningequality__kolibri-7238
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
import footer styling regression
### Observed behavior

### Expected behavior
button and text should be vertically centered, or the footer should be shorter in height
### Steps to reproduce
import
### Context
0.14 beta 3
</issue>
<code>
[start of kolibri/core/content/utils/channels.py]
1 import fnmatch
2 import logging
3 import os
4
5 from django.core.cache import cache
6 from sqlalchemy.exc import DatabaseError
7
8 from .paths import get_content_database_dir_path
9 from .sqlalchemybridge import Bridge
10 from kolibri.core.discovery.utils.filesystem import enumerate_mounted_disk_partitions
11 from kolibri.utils.uuids import is_valid_uuid
12
13 logger = logging.getLogger(__name__)
14
15
16 def get_channel_ids_for_content_dirs(content_dirs):
17 database_dir_paths = [
18 get_content_database_dir_path(contentfolder=path) for path in content_dirs
19 ]
20 channel_ids = set()
21 for path in database_dir_paths:
22 channel_ids.update(get_channel_ids_for_content_database_dir(path))
23 return list(channel_ids)
24
25
26 def get_channel_ids_for_content_database_dir(content_database_dir):
27 """
28 Returns a list of channel IDs for the channel databases that exist in a content database directory.
29 """
30
31 # immediately return an empty list if the content database directory doesn't exist
32 if not os.path.isdir(content_database_dir):
33 return []
34
35 # get a list of all the database files in the directory, and extract IDs
36 db_list = fnmatch.filter(os.listdir(content_database_dir), "*.sqlite3")
37 db_names = [db.split(".sqlite3", 1)[0] for db in db_list]
38
39 # determine which database names are valid, and only use those ones
40 valid_db_names = [name for name in db_names if is_valid_uuid(name)]
41 invalid_db_names = set(db_names) - set(valid_db_names)
42 if invalid_db_names:
43 logger.warning(
44 "Ignoring databases in content database directory '{directory}' with invalid names: {names}".format(
45 directory=content_database_dir, names=invalid_db_names
46 )
47 )
48
49 # nonexistent database files are created if we delete the files that have broken symbolic links;
50 # empty database files are created if we delete a database file while the server is running and connected to it;
51 # here, we delete and exclude such databases to avoid errors when we try to connect to them
52 db_files_to_remove = set({})
53 for db_name in valid_db_names:
54 filename = os.path.join(content_database_dir, "{}.sqlite3".format(db_name))
55 if not os.path.exists(filename) or os.path.getsize(filename) == 0:
56 db_files_to_remove.add(db_name)
57 os.remove(filename)
58
59 if db_files_to_remove:
60 err_msg = (
61 "Removing nonexistent or empty databases in content database directory "
62 "'{directory}' with IDs: {names}.\nPlease import the channels again."
63 )
64 logger.warning(
65 err_msg.format(directory=content_database_dir, names=db_files_to_remove)
66 )
67 valid_dbs = list(set(valid_db_names) - set(db_files_to_remove))
68
69 return valid_dbs
70
71
72 def enumerate_content_database_file_paths(content_database_dir):
73 full_dir_template = os.path.join(content_database_dir, "{}.sqlite3")
74 channel_ids = get_channel_ids_for_content_database_dir(content_database_dir)
75 return [full_dir_template.format(f) for f in channel_ids]
76
77
78 def read_channel_metadata_from_db_file(channeldbpath):
79 # import here to avoid circular imports whenever kolibri.core.content.models imports utils too
80 from kolibri.core.content.models import ChannelMetadata
81
82 source = Bridge(sqlite_file_path=channeldbpath)
83
84 ChannelMetadataClass = source.get_class(ChannelMetadata)
85
86 source_channel_metadata = source.session.query(ChannelMetadataClass).all()[0]
87
88 # Use the inferred version from the SQLAlchemy Bridge object, and set it as additional
89 # metadata on the channel data
90
91 source_channel_metadata.inferred_schema_version = source.schema_version
92
93 source.end()
94
95 # Adds an attribute `root_id` when `root_id` does not exist to match with
96 # the latest schema.
97 if not hasattr(source_channel_metadata, "root_id"):
98 setattr(
99 source_channel_metadata,
100 "root_id",
101 getattr(source_channel_metadata, "root_pk"),
102 )
103
104 return source_channel_metadata
105
106
107 def get_channels_for_data_folder(datafolder):
108 channels = []
109 for path in enumerate_content_database_file_paths(
110 get_content_database_dir_path(datafolder)
111 ):
112 try:
113 channel = read_channel_metadata_from_db_file(path)
114 except DatabaseError:
115 logger.warning(
116 "Tried to import channel from database file {}, but the file was corrupted.".format(
117 path
118 )
119 )
120 continue
121 channel_data = {
122 "path": path,
123 "id": channel.id,
124 "name": channel.name,
125 "description": channel.description,
126 "tagline": channel.tagline,
127 "thumbnail": channel.thumbnail,
128 "version": channel.version,
129 "root": channel.root_id,
130 "author": channel.author,
131 "last_updated": getattr(channel, "last_updated", None),
132 "lang_code": getattr(channel, "lang_code", None),
133 "lang_name": getattr(channel, "lang_name", None),
134 }
135 channels.append(channel_data)
136 return channels
137
138
139 # Use this to cache mounted drive information when
140 # it has already been fetched for querying by drive id
141 MOUNTED_DRIVES_CACHE_KEY = "mounted_drives_cache_key"
142
143
144 def get_mounted_drives_with_channel_info():
145 drives = enumerate_mounted_disk_partitions()
146 for drive in drives.values():
147 drive.metadata["channels"] = (
148 get_channels_for_data_folder(drive.datafolder) if drive.datafolder else []
149 )
150 cache.set(MOUNTED_DRIVES_CACHE_KEY, drives, 3600)
151 return drives
152
153
154 def get_mounted_drive_by_id(drive_id):
155 drives = cache.get(MOUNTED_DRIVES_CACHE_KEY)
156 if drives is None or drives.get(drive_id, None) is None:
157 drives = get_mounted_drives_with_channel_info()
158 return drives[drive_id]
159
[end of kolibri/core/content/utils/channels.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/kolibri/core/content/utils/channels.py b/kolibri/core/content/utils/channels.py
--- a/kolibri/core/content/utils/channels.py
+++ b/kolibri/core/content/utils/channels.py
@@ -123,7 +123,7 @@
"id": channel.id,
"name": channel.name,
"description": channel.description,
- "tagline": channel.tagline,
+ "tagline": getattr(channel, "tagline", ""),
"thumbnail": channel.thumbnail,
"version": channel.version,
"root": channel.root_id,
|
{"golden_diff": "diff --git a/kolibri/core/content/utils/channels.py b/kolibri/core/content/utils/channels.py\n--- a/kolibri/core/content/utils/channels.py\n+++ b/kolibri/core/content/utils/channels.py\n@@ -123,7 +123,7 @@\n \"id\": channel.id,\n \"name\": channel.name,\n \"description\": channel.description,\n- \"tagline\": channel.tagline,\n+ \"tagline\": getattr(channel, \"tagline\", \"\"),\n \"thumbnail\": channel.thumbnail,\n \"version\": channel.version,\n \"root\": channel.root_id,\n", "issue": "import footer styling regression\n\r\n\r\n### Observed behavior\r\n\r\n\r\n\r\n### Expected behavior\r\n\r\nbutton and text should be vertically centered, or the footer should be shorter in height\r\n\r\n\r\n\r\n### Steps to reproduce\r\n\r\nimport\r\n\r\n### Context\r\n\r\n\r\n0.14 beta 3\n", "before_files": [{"content": "import fnmatch\nimport logging\nimport os\n\nfrom django.core.cache import cache\nfrom sqlalchemy.exc import DatabaseError\n\nfrom .paths import get_content_database_dir_path\nfrom .sqlalchemybridge import Bridge\nfrom kolibri.core.discovery.utils.filesystem import enumerate_mounted_disk_partitions\nfrom kolibri.utils.uuids import is_valid_uuid\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_channel_ids_for_content_dirs(content_dirs):\n database_dir_paths = [\n get_content_database_dir_path(contentfolder=path) for path in content_dirs\n ]\n channel_ids = set()\n for path in database_dir_paths:\n channel_ids.update(get_channel_ids_for_content_database_dir(path))\n return list(channel_ids)\n\n\ndef get_channel_ids_for_content_database_dir(content_database_dir):\n \"\"\"\n Returns a list of channel IDs for the channel databases that exist in a content database directory.\n \"\"\"\n\n # immediately return an empty list if the content database directory doesn't exist\n if not os.path.isdir(content_database_dir):\n return []\n\n # get a list of all the database files in the directory, and extract IDs\n db_list = fnmatch.filter(os.listdir(content_database_dir), \"*.sqlite3\")\n db_names = [db.split(\".sqlite3\", 1)[0] for db in db_list]\n\n # determine which database names are valid, and only use those ones\n valid_db_names = [name for name in db_names if is_valid_uuid(name)]\n invalid_db_names = set(db_names) - set(valid_db_names)\n if invalid_db_names:\n logger.warning(\n \"Ignoring databases in content database directory '{directory}' with invalid names: {names}\".format(\n directory=content_database_dir, names=invalid_db_names\n )\n )\n\n # nonexistent database files are created if we delete the files that have broken symbolic links;\n # empty database files are created if we delete a database file while the server is running and connected to it;\n # here, we delete and exclude such databases to avoid errors when we try to connect to them\n db_files_to_remove = set({})\n for db_name in valid_db_names:\n filename = os.path.join(content_database_dir, \"{}.sqlite3\".format(db_name))\n if not os.path.exists(filename) or os.path.getsize(filename) == 0:\n db_files_to_remove.add(db_name)\n os.remove(filename)\n\n if db_files_to_remove:\n err_msg = (\n \"Removing nonexistent or empty databases in content database directory \"\n \"'{directory}' with IDs: {names}.\\nPlease import the channels again.\"\n )\n logger.warning(\n err_msg.format(directory=content_database_dir, names=db_files_to_remove)\n )\n valid_dbs = list(set(valid_db_names) - set(db_files_to_remove))\n\n return valid_dbs\n\n\ndef enumerate_content_database_file_paths(content_database_dir):\n full_dir_template = os.path.join(content_database_dir, \"{}.sqlite3\")\n channel_ids = get_channel_ids_for_content_database_dir(content_database_dir)\n return [full_dir_template.format(f) for f in channel_ids]\n\n\ndef read_channel_metadata_from_db_file(channeldbpath):\n # import here to avoid circular imports whenever kolibri.core.content.models imports utils too\n from kolibri.core.content.models import ChannelMetadata\n\n source = Bridge(sqlite_file_path=channeldbpath)\n\n ChannelMetadataClass = source.get_class(ChannelMetadata)\n\n source_channel_metadata = source.session.query(ChannelMetadataClass).all()[0]\n\n # Use the inferred version from the SQLAlchemy Bridge object, and set it as additional\n # metadata on the channel data\n\n source_channel_metadata.inferred_schema_version = source.schema_version\n\n source.end()\n\n # Adds an attribute `root_id` when `root_id` does not exist to match with\n # the latest schema.\n if not hasattr(source_channel_metadata, \"root_id\"):\n setattr(\n source_channel_metadata,\n \"root_id\",\n getattr(source_channel_metadata, \"root_pk\"),\n )\n\n return source_channel_metadata\n\n\ndef get_channels_for_data_folder(datafolder):\n channels = []\n for path in enumerate_content_database_file_paths(\n get_content_database_dir_path(datafolder)\n ):\n try:\n channel = read_channel_metadata_from_db_file(path)\n except DatabaseError:\n logger.warning(\n \"Tried to import channel from database file {}, but the file was corrupted.\".format(\n path\n )\n )\n continue\n channel_data = {\n \"path\": path,\n \"id\": channel.id,\n \"name\": channel.name,\n \"description\": channel.description,\n \"tagline\": channel.tagline,\n \"thumbnail\": channel.thumbnail,\n \"version\": channel.version,\n \"root\": channel.root_id,\n \"author\": channel.author,\n \"last_updated\": getattr(channel, \"last_updated\", None),\n \"lang_code\": getattr(channel, \"lang_code\", None),\n \"lang_name\": getattr(channel, \"lang_name\", None),\n }\n channels.append(channel_data)\n return channels\n\n\n# Use this to cache mounted drive information when\n# it has already been fetched for querying by drive id\nMOUNTED_DRIVES_CACHE_KEY = \"mounted_drives_cache_key\"\n\n\ndef get_mounted_drives_with_channel_info():\n drives = enumerate_mounted_disk_partitions()\n for drive in drives.values():\n drive.metadata[\"channels\"] = (\n get_channels_for_data_folder(drive.datafolder) if drive.datafolder else []\n )\n cache.set(MOUNTED_DRIVES_CACHE_KEY, drives, 3600)\n return drives\n\n\ndef get_mounted_drive_by_id(drive_id):\n drives = cache.get(MOUNTED_DRIVES_CACHE_KEY)\n if drives is None or drives.get(drive_id, None) is None:\n drives = get_mounted_drives_with_channel_info()\n return drives[drive_id]\n", "path": "kolibri/core/content/utils/channels.py"}]}
| 2,271 | 129 |
gh_patches_debug_26328
|
rasdani/github-patches
|
git_diff
|
mindsdb__mindsdb-1020
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Run tests on windows and OSX
Part of the reason we wanted tests with remote databases was to be able to test mindsdb on windows and OSX.
This is currently being done for native but not for mindsdb, current issues that stop us from testing on OSX and windows:
* `psutil.net_connections` requires root privileges on osx/windows
* `ssh` command will fail on windows
* ???
</issue>
<code>
[start of mindsdb/utilities/ps.py]
1 import psutil
2 import time
3
4
5 def is_port_in_use(port_num):
6 portsinuse = []
7 conns = psutil.net_connections()
8 portsinuse = [x.laddr[1] for x in conns if x.status == 'LISTEN']
9 portsinuse.sort()
10 return int(port_num) in portsinuse
11
12
13 def wait_func_is_true(func, timeout, *args, **kwargs):
14 start_time = time.time()
15
16 result = func(*args, **kwargs)
17 while result is False and (time.time() - start_time) < timeout:
18 time.sleep(2)
19 result = func(*args, **kwargs)
20
21 return result
22
23
24 def wait_port(port_num, timeout):
25 return wait_func_is_true(func=is_port_in_use, timeout=timeout, port_num=port_num)
26
27
28 def get_listen_ports(pid):
29 try:
30 p = psutil.Process(pid)
31 cons = p.connections()
32 cons = [x.laddr.port for x in cons]
33 except Exception:
34 return []
35 return cons
36
37
38 def is_pid_listen_port(pid, port):
39 ports = get_listen_ports(pid)
40 return int(port) in ports
41
[end of mindsdb/utilities/ps.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mindsdb/utilities/ps.py b/mindsdb/utilities/ps.py
--- a/mindsdb/utilities/ps.py
+++ b/mindsdb/utilities/ps.py
@@ -1,11 +1,44 @@
-import psutil
+import sys
import time
+from collections import namedtuple
+import psutil
+
+
+def net_connections():
+ """Cross-platform psutil.net_connections like interface"""
+ if sys.platform.lower().startswith('linux'):
+ return psutil.net_connections()
+
+ all_connections = []
+ Pconn = None
+ for p in psutil.process_iter(['pid']):
+ try:
+ process = psutil.Process(p.pid)
+ connections = process.connections()
+ if connections:
+ for conn in connections:
+ # Adding pid to the returned instance
+ # for consistency with psutil.net_connections()
+ if Pconn is None:
+ fields = list(conn._fields)
+ fields.append('pid')
+ _conn = namedtuple('Pconn', fields)
+ for attr in conn._fields:
+ setattr(_conn, attr, getattr(conn, attr))
+ _conn.pid = p.pid
+ all_connections.append(_conn)
+
+ except (psutil.AccessDenied, psutil.ZombieProcess, psutil.NoSuchProcess):
+ pass
+ return all_connections
def is_port_in_use(port_num):
- portsinuse = []
- conns = psutil.net_connections()
- portsinuse = [x.laddr[1] for x in conns if x.status == 'LISTEN']
+ """Check does any of child process uses specified port."""
+ parent_process = psutil.Process()
+ child_pids = [x.pid for x in parent_process.children(recursive=True)]
+ conns = net_connections()
+ portsinuse = [x.laddr[1] for x in conns if x.pid in child_pids and x.status == 'LISTEN']
portsinuse.sort()
return int(port_num) in portsinuse
|
{"golden_diff": "diff --git a/mindsdb/utilities/ps.py b/mindsdb/utilities/ps.py\n--- a/mindsdb/utilities/ps.py\n+++ b/mindsdb/utilities/ps.py\n@@ -1,11 +1,44 @@\n-import psutil\n+import sys\n import time\n+from collections import namedtuple\n+import psutil\n+\n+\n+def net_connections():\n+ \"\"\"Cross-platform psutil.net_connections like interface\"\"\"\n+ if sys.platform.lower().startswith('linux'):\n+ return psutil.net_connections()\n+\n+ all_connections = []\n+ Pconn = None\n+ for p in psutil.process_iter(['pid']):\n+ try:\n+ process = psutil.Process(p.pid)\n+ connections = process.connections()\n+ if connections:\n+ for conn in connections:\n+ # Adding pid to the returned instance\n+ # for consistency with psutil.net_connections()\n+ if Pconn is None:\n+ fields = list(conn._fields)\n+ fields.append('pid')\n+ _conn = namedtuple('Pconn', fields)\n+ for attr in conn._fields:\n+ setattr(_conn, attr, getattr(conn, attr))\n+ _conn.pid = p.pid\n+ all_connections.append(_conn)\n+\n+ except (psutil.AccessDenied, psutil.ZombieProcess, psutil.NoSuchProcess):\n+ pass\n+ return all_connections\n \n \n def is_port_in_use(port_num):\n- portsinuse = []\n- conns = psutil.net_connections()\n- portsinuse = [x.laddr[1] for x in conns if x.status == 'LISTEN']\n+ \"\"\"Check does any of child process uses specified port.\"\"\"\n+ parent_process = psutil.Process()\n+ child_pids = [x.pid for x in parent_process.children(recursive=True)]\n+ conns = net_connections()\n+ portsinuse = [x.laddr[1] for x in conns if x.pid in child_pids and x.status == 'LISTEN']\n portsinuse.sort()\n return int(port_num) in portsinuse\n", "issue": "Run tests on windows and OSX\nPart of the reason we wanted tests with remote databases was to be able to test mindsdb on windows and OSX.\r\n\r\nThis is currently being done for native but not for mindsdb, current issues that stop us from testing on OSX and windows:\r\n\r\n* `psutil.net_connections` requires root privileges on osx/windows\r\n* `ssh` command will fail on windows\r\n* ??? \n", "before_files": [{"content": "import psutil\nimport time\n\n\ndef is_port_in_use(port_num):\n portsinuse = []\n conns = psutil.net_connections()\n portsinuse = [x.laddr[1] for x in conns if x.status == 'LISTEN']\n portsinuse.sort()\n return int(port_num) in portsinuse\n\n\ndef wait_func_is_true(func, timeout, *args, **kwargs):\n start_time = time.time()\n\n result = func(*args, **kwargs)\n while result is False and (time.time() - start_time) < timeout:\n time.sleep(2)\n result = func(*args, **kwargs)\n\n return result\n\n\ndef wait_port(port_num, timeout):\n return wait_func_is_true(func=is_port_in_use, timeout=timeout, port_num=port_num)\n\n\ndef get_listen_ports(pid):\n try:\n p = psutil.Process(pid)\n cons = p.connections()\n cons = [x.laddr.port for x in cons]\n except Exception:\n return []\n return cons\n\n\ndef is_pid_listen_port(pid, port):\n ports = get_listen_ports(pid)\n return int(port) in ports\n", "path": "mindsdb/utilities/ps.py"}]}
| 951 | 453 |
gh_patches_debug_29878
|
rasdani/github-patches
|
git_diff
|
translate__pootle-4260
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Enable sorting by contribution in contributor command
Currently contributors are sorted in alphabetical order. This is great for crediting. But it would be more helpful to allow sorting by contribution in cases where you want to use to list to make other decisions around the amount of contribution.
Thus add `--sort-by-contribution` and `--sort-by-name` options.
</issue>
<code>
[start of pootle/apps/pootle_app/management/commands/contributors.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Copyright (C) Pootle contributors.
5 #
6 # This file is a part of the Pootle project. It is distributed under the GPL3
7 # or later license. See the LICENSE file for a copy of the license and the
8 # AUTHORS file for copyright and authorship information.
9
10 import os
11 from collections import Counter
12 from optparse import make_option
13
14 os.environ["DJANGO_SETTINGS_MODULE"] = "pootle.settings"
15
16 from django.contrib.auth import get_user_model
17
18 from pootle_store.models import Unit
19
20 from . import PootleCommand
21
22
23 User = get_user_model()
24
25
26 class Command(PootleCommand):
27 option_list = PootleCommand.option_list + (
28 make_option(
29 "--from-revision",
30 type=int,
31 default=0,
32 dest="revision",
33 help="Only count contributions newer than this revision",
34 ),
35 )
36
37 help = "Print a list of contributors."
38
39 def handle_all(self, **options):
40 system_user = User.objects.get_system_user()
41 units = Unit.objects.exclude(submitted_by=system_user) \
42 .exclude(submitted_by=None)
43
44 if options["revision"]:
45 units = units.filter(revision__gte=options["revision"])
46
47 if self.projects:
48 units = units.filter(
49 store__translation_project__project__code__in=self.projects,
50 )
51
52 if self.languages:
53 units = units.filter(
54 store__translation_project__language__code__in=self.languages,
55 )
56
57 contribs = Counter()
58 for v in units.values("submitted_by"):
59 contribs.update((v["submitted_by"], ))
60
61 self.list_contributions(contribs)
62
63 def list_contributions(self, contribs):
64 out = []
65 for id, count in contribs.items():
66 user = User.objects.get(id=id)
67 name = user.display_name
68 if user.email:
69 name += " <%s>" % (user.email)
70 out.append("%s (%i contributions)" % (name, count))
71
72 # Sort users alphabetically
73 for line in sorted(out):
74 self.stdout.write(line)
75
[end of pootle/apps/pootle_app/management/commands/contributors.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pootle/apps/pootle_app/management/commands/contributors.py b/pootle/apps/pootle_app/management/commands/contributors.py
--- a/pootle/apps/pootle_app/management/commands/contributors.py
+++ b/pootle/apps/pootle_app/management/commands/contributors.py
@@ -32,6 +32,15 @@
dest="revision",
help="Only count contributions newer than this revision",
),
+ make_option(
+ "--sort-by",
+ type="choice",
+ default="name",
+ choices=["name", "contributions"],
+ dest="sort_by",
+ help="Sort by specified item. Accepts name and contributions. "
+ "Default: %default",
+ ),
)
help = "Print a list of contributors."
@@ -58,17 +67,25 @@
for v in units.values("submitted_by"):
contribs.update((v["submitted_by"], ))
- self.list_contributions(contribs)
+ self.list_contributions(contribs, options["sort_by"])
+
+ def list_contributions(self, contribs, sort_by):
+ if sort_by == "name":
+ contributions = contribs.items()
+ else:
+ contributions = contribs.most_common()
- def list_contributions(self, contribs):
out = []
- for id, count in contribs.items():
+ for id, count in contributions:
user = User.objects.get(id=id)
name = user.display_name
if user.email:
name += " <%s>" % (user.email)
out.append("%s (%i contributions)" % (name, count))
- # Sort users alphabetically
- for line in sorted(out):
+ if sort_by == "name":
+ # Sort users alphabetically
+ out = sorted(out)
+
+ for line in out:
self.stdout.write(line)
|
{"golden_diff": "diff --git a/pootle/apps/pootle_app/management/commands/contributors.py b/pootle/apps/pootle_app/management/commands/contributors.py\n--- a/pootle/apps/pootle_app/management/commands/contributors.py\n+++ b/pootle/apps/pootle_app/management/commands/contributors.py\n@@ -32,6 +32,15 @@\n dest=\"revision\",\n help=\"Only count contributions newer than this revision\",\n ),\n+ make_option(\n+ \"--sort-by\",\n+ type=\"choice\",\n+ default=\"name\",\n+ choices=[\"name\", \"contributions\"],\n+ dest=\"sort_by\",\n+ help=\"Sort by specified item. Accepts name and contributions. \"\n+ \"Default: %default\",\n+ ),\n )\n \n help = \"Print a list of contributors.\"\n@@ -58,17 +67,25 @@\n for v in units.values(\"submitted_by\"):\n contribs.update((v[\"submitted_by\"], ))\n \n- self.list_contributions(contribs)\n+ self.list_contributions(contribs, options[\"sort_by\"])\n+\n+ def list_contributions(self, contribs, sort_by):\n+ if sort_by == \"name\":\n+ contributions = contribs.items()\n+ else:\n+ contributions = contribs.most_common()\n \n- def list_contributions(self, contribs):\n out = []\n- for id, count in contribs.items():\n+ for id, count in contributions:\n user = User.objects.get(id=id)\n name = user.display_name\n if user.email:\n name += \" <%s>\" % (user.email)\n out.append(\"%s (%i contributions)\" % (name, count))\n \n- # Sort users alphabetically\n- for line in sorted(out):\n+ if sort_by == \"name\":\n+ # Sort users alphabetically\n+ out = sorted(out)\n+\n+ for line in out:\n self.stdout.write(line)\n", "issue": "Enable sorting by contribution in contributor command\nCurrently contributors are sorted in alphabetical order. This is great for crediting. But it would be more helpful to allow sorting by contribution in cases where you want to use to list to make other decisions around the amount of contribution.\n\nThus add `--sort-by-contribution` and `--sort-by-name` options.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport os\nfrom collections import Counter\nfrom optparse import make_option\n\nos.environ[\"DJANGO_SETTINGS_MODULE\"] = \"pootle.settings\"\n\nfrom django.contrib.auth import get_user_model\n\nfrom pootle_store.models import Unit\n\nfrom . import PootleCommand\n\n\nUser = get_user_model()\n\n\nclass Command(PootleCommand):\n option_list = PootleCommand.option_list + (\n make_option(\n \"--from-revision\",\n type=int,\n default=0,\n dest=\"revision\",\n help=\"Only count contributions newer than this revision\",\n ),\n )\n\n help = \"Print a list of contributors.\"\n\n def handle_all(self, **options):\n system_user = User.objects.get_system_user()\n units = Unit.objects.exclude(submitted_by=system_user) \\\n .exclude(submitted_by=None)\n\n if options[\"revision\"]:\n units = units.filter(revision__gte=options[\"revision\"])\n\n if self.projects:\n units = units.filter(\n store__translation_project__project__code__in=self.projects,\n )\n\n if self.languages:\n units = units.filter(\n store__translation_project__language__code__in=self.languages,\n )\n\n contribs = Counter()\n for v in units.values(\"submitted_by\"):\n contribs.update((v[\"submitted_by\"], ))\n\n self.list_contributions(contribs)\n\n def list_contributions(self, contribs):\n out = []\n for id, count in contribs.items():\n user = User.objects.get(id=id)\n name = user.display_name\n if user.email:\n name += \" <%s>\" % (user.email)\n out.append(\"%s (%i contributions)\" % (name, count))\n\n # Sort users alphabetically\n for line in sorted(out):\n self.stdout.write(line)\n", "path": "pootle/apps/pootle_app/management/commands/contributors.py"}]}
| 1,237 | 433 |
gh_patches_debug_11685
|
rasdani/github-patches
|
git_diff
|
bokeh__bokeh-2025
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Update/refresh installation guide and quick start
Many parts are out of date or confusing.
</issue>
<code>
[start of bokeh/sphinxext/bokeh_plot.py]
1 """ Include Bokeh plots in Sphinx HTML documentation.
2
3 For other output types, the placeholder text ``[graph]`` will
4 be generated.
5
6 Usage
7 -----
8
9 The ``bokeh-plot`` directive can be used by either supplying:
10
11 1. **A path to a source file** as the argument to the directive::
12
13 .. bokeh-plot:: path/to/plot.py
14
15
16 2. **Inline code** as the content of the directive::
17
18 .. bokeh-plot::
19
20 from bokeh.plotting import figure, output_file, show
21
22 output_file("example.html")
23
24 x = [1, 2, 3, 4, 5]
25 y = [6, 7, 6, 4, 5]
26
27 p = figure(title="example", plot_width=300, plot_height=300)
28 p.line(x, y, line_width=2)
29 p.circle(x, y, size=10, fill_color="white")
30
31 show(p)
32
33 This directive also works in conjunction with Sphinx autodoc, when
34 used in docstrings.
35
36 Options
37 -------
38
39 The ``bokeh-plot`` directive accepts the following options:
40
41 source-position : enum('above', 'below', 'none')
42 Where to locate the the block of formatted source
43 code (if anywhere).
44
45 linenos : bool
46 Whether to display line numbers along with the source.
47
48 emphasize-lines : list[int]
49 A list of source code lines to emphasize.
50
51 Examples
52 --------
53
54 The inline example code above produces the following output:
55
56 ----
57
58 .. bokeh-plot::
59
60 from bokeh.plotting import figure, output_file, show
61
62 output_file("example.html")
63
64 x = [1, 2, 3, 4, 5]
65 y = [6, 7, 6, 4, 5]
66
67 p = figure(title="example", plot_width=300, plot_height=300)
68 p.line(x, y, line_width=2)
69 p.circle(x, y, size=10, fill_color="white")
70
71 show(p)
72
73 """
74 from __future__ import absolute_import
75
76 import hashlib
77 from os import makedirs
78 from os.path import basename, dirname, exists, isdir, join, relpath
79 import re
80 from shutil import copy
81 import sys
82 from tempfile import mkdtemp
83 import webbrowser
84
85 from docutils import nodes
86 from docutils.parsers.rst.directives import choice, flag, unchanged
87 from docutils.statemachine import ViewList
88
89 import jinja2
90
91 from sphinx.locale import _
92 from sphinx.util.compat import Directive
93
94 from .utils import out_of_date
95 from .. import plotting
96 from ..document import Document
97 from ..embed import autoload_static
98 from ..resources import CDN
99 from ..utils import decode_utf8
100
101
102 SOURCE_TEMPLATE = jinja2.Template(u"""
103 .. code-block:: python
104 {% if linenos %}:linenos:{% endif %}
105 {% if emphasize_lines %}:emphasize-lines: {{ emphasize_lines }}{% endif %}
106
107 {{ source|indent(3) }}
108
109 """)
110
111
112 SCRIPT_TEMPLATE = jinja2.Template(u"""
113 <table>
114 <tr>
115 <td>
116 {{ script|indent(4) }}
117 </td>
118 </tr>
119 </table>
120 """)
121
122
123 class bokeh_plot(nodes.General, nodes.Element):
124 pass
125
126
127 def _source_position(argument):
128 return choice(argument, ('below', 'above', 'none'))
129
130
131 class BokehPlotDirective(Directive):
132
133 has_content = True
134 optional_arguments = 2
135
136 option_spec = {
137 'source-position' : _source_position,
138 'linenos' : flag,
139 'emphasize-lines' : unchanged,
140 }
141
142 def run(self):
143 # filename *or* python code content, but not both
144 if self.arguments and self.content:
145 raise RuntimeError("bokeh-plot:: directive can't have both args and content")
146
147 env = self.state.document.settings.env
148 app = env.app
149
150 if not hasattr(env, 'bokeh_plot_tmpdir'):
151 env.bokeh_plot_tmpdir = mkdtemp()
152 app.verbose("creating new temp dir for bokeh-plot cache: %s" % env.bokeh_plot_tmpdir)
153 else:
154 tmpdir = env.bokeh_plot_tmpdir
155 if not exists(tmpdir) or not isdir(tmpdir):
156 app.verbose("creating new temp dir for bokeh-plot cache: %s" % env.bokeh_plot_tmpdir)
157 env.bokeh_plot_tmpdir = mkdtemp()
158 else:
159 app.verbose("using existing temp dir for bokeh-plot cache: %s" % env.bokeh_plot_tmpdir)
160
161 # TODO (bev) verify that this is always the correct thing
162 rst_source = self.state_machine.node.document['source']
163 rst_dir = dirname(rst_source)
164 rst_filename = basename(rst_source)
165
166 target_id = "%s.bokeh-plot-%d" % (rst_filename, env.new_serialno('bokeh-plot'))
167 target_node = nodes.target('', '', ids=[target_id])
168 result = [target_node]
169
170 try:
171 source = self._get_source()
172 except Exception:
173 node = nodes.error(None,
174 nodes.paragraph(text="Unable to generate Bokeh plot at %s:%d:" % (basename(rst_source), self.lineno)),
175 nodes.paragraph(text=str(sys.exc_info()[1])))
176 return [node]
177
178 source_position = self.options.get('source-position', 'below')
179
180 if source_position == 'above':
181 result += self._get_source_nodes(source)
182
183 node = bokeh_plot()
184 node['target_id'] = target_id
185 node['source'] = source
186 node['relpath'] = relpath(rst_dir, env.srcdir)
187 node['rst_source'] = rst_source
188 node['rst_lineno'] = self.lineno
189 if 'alt' in self.options:
190 node['alt'] = self.options['alt']
191 if self.arguments:
192 node['path'] = self.arguments[0]
193 env.note_dependency(node['path'])
194 if len(self.arguments) == 2:
195 node['symbol'] = self.arguments[1]
196 result += [node]
197
198 if source_position == 'below':
199 result += self._get_source_nodes(source)
200
201 return result
202
203 def _get_source(self):
204 if self.arguments:
205 source = open(self.arguments[0], "r").read()
206 source = decode_utf8(source)
207 else:
208 source = u""
209 for line in self.content:
210 source += "%s\n" % line
211 return source
212
213 def _get_source_nodes(self, source):
214 linenos = 'linenos' in self.options
215 emphasize_lines = self.options.get('emphasize-lines', False)
216 if emphasize_lines: linenos = True
217 result = ViewList()
218 text = SOURCE_TEMPLATE.render(source=source, linenos=linenos, emphasize_lines=emphasize_lines)
219 for line in text.split("\n"):
220 result.append(line, "<bokeh-plot>")
221 node = nodes.paragraph()
222 node.document = self.state.document
223 self.state.nested_parse(result, 0, node)
224 return node.children
225
226 # patch open and show and save to be no-ops
227 def _noop(*args, **kwargs):
228 pass
229
230 def _show(obj=None):
231 if obj:
232 plotting._obj = obj
233
234 webbrowser.open = _noop
235 plotting.save = _noop
236 plotting.show = _show
237
238 def _render_plot(source, symbol):
239 plotting._default_document = Document()
240 namespace = {}
241 # need to remove any encoding comment before compiling unicode
242 pat = re.compile(r"^# -\*- coding: (.*) -\*-$", re.M)
243 source = pat.sub("", source)
244 code = compile(source, "<string>", mode="exec")
245 eval(code, namespace)
246 # TODO (bev) remove this crap
247 if symbol is not None:
248 if 'bokeh.charts' in source:
249 obj = namespace[symbol].chart.plot
250 else:
251 obj = namespace[symbol]
252 else:
253 obj = plotting._obj
254 return obj
255
256 def html_visit_bokeh_plot(self, node):
257 env = self.builder.env
258 dest_dir = join(self.builder.outdir, node["relpath"])
259
260 try:
261 if node.has_key('path'):
262 path = node['path']
263 filename = "bokeh-plot-%s.js" % hashlib.md5(path.encode('utf-8')).hexdigest()
264 dest_path = join(dest_dir, filename)
265 tmpdir = join(env.bokeh_plot_tmpdir, node["relpath"])
266 if not exists(tmpdir): makedirs(tmpdir)
267 cached_path = join(tmpdir, filename)
268
269 if out_of_date(path, cached_path) or not exists(cached_path+".script"):
270 self.builder.app.verbose("generating new plot for '%s'" % path)
271 plot = _render_plot(node['source'], node.get('symbol'))
272 js, script = autoload_static(plot, CDN, filename)
273 with open(cached_path, "w") as f:
274 f.write(js)
275 with open(cached_path+".script", "w") as f:
276 f.write(script)
277 else:
278 self.builder.app.verbose("using cached plot for '%s'" % path)
279 script = open(cached_path+".script", "r").read()
280
281 if not exists(dest_dir): makedirs(dest_dir)
282 copy(cached_path, dest_path)
283 else:
284 filename = node['target_id'] + ".js"
285 dest_path = join(dest_dir, filename)
286 plot = _render_plot(node['source'], None)
287 js, script = autoload_static(plot, CDN, filename)
288 self.builder.app.verbose("saving inline plot at: %s" % dest_path)
289 with open(dest_path, "w") as f:
290 f.write(js)
291
292 html = SCRIPT_TEMPLATE.render(script=script)
293 self.body.append(html)
294 except Exception:
295 err_node = nodes.error(None,
296 nodes.paragraph(text="Unable to generate Bokeh plot at %s:%d:" % (node['rst_source'], node['rst_lineno'])),
297 nodes.paragraph(text=str(sys.exc_info()[1])))
298 node.children.append(err_node)
299 raise nodes.SkipDeparture
300 else:
301 raise nodes.SkipNode
302
303
304 def latex_visit_bokeh_plot(self, node):
305 if 'alt' in node.attributes:
306 self.body.append(_('[graph: %s]') % node['alt'])
307 else:
308 self.body.append(_('[graph]'))
309 raise nodes.SkipNode
310
311
312 def texinfo_visit_bokeh_plot(self, node):
313 if 'alt' in node.attributes:
314 self.body.append(_('[graph: %s]') % node['alt'])
315 else:
316 self.body.append(_('[graph]'))
317 raise nodes.SkipNode
318
319
320 def text_visit_bokeh_plot(self, node):
321 if 'alt' in node.attributes:
322 self.add_text(_('[graph: %s]') % node['alt'])
323 else:
324 self.add_text(_('[graph]'))
325 raise nodes.SkipNode
326
327
328 def man_visit_bokeh_plot(self, node):
329 if 'alt' in node.attributes:
330 self.body.append(_('[graph: %s]') % node['alt'])
331 else:
332 self.body.append(_('[graph]'))
333 raise nodes.SkipNode
334
335
336 def setup(app):
337 app.add_node(bokeh_plot,
338 html=(html_visit_bokeh_plot, None),
339 latex=(latex_visit_bokeh_plot, None),
340 texinfo=(texinfo_visit_bokeh_plot, None),
341 text=(text_visit_bokeh_plot, None),
342 man=(man_visit_bokeh_plot, None))
343 app.add_directive('bokeh-plot', BokehPlotDirective)
344
345
346
347
348
349
350
[end of bokeh/sphinxext/bokeh_plot.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/bokeh/sphinxext/bokeh_plot.py b/bokeh/sphinxext/bokeh_plot.py
--- a/bokeh/sphinxext/bokeh_plot.py
+++ b/bokeh/sphinxext/bokeh_plot.py
@@ -92,7 +92,7 @@
from sphinx.util.compat import Directive
from .utils import out_of_date
-from .. import plotting
+from .. import charts, io, plotting
from ..document import Document
from ..embed import autoload_static
from ..resources import CDN
@@ -232,6 +232,10 @@
plotting._obj = obj
webbrowser.open = _noop
+charts.save = _noop
+charts.show = _show
+io.save = _noop
+io.show = _show
plotting.save = _noop
plotting.show = _show
|
{"golden_diff": "diff --git a/bokeh/sphinxext/bokeh_plot.py b/bokeh/sphinxext/bokeh_plot.py\n--- a/bokeh/sphinxext/bokeh_plot.py\n+++ b/bokeh/sphinxext/bokeh_plot.py\n@@ -92,7 +92,7 @@\n from sphinx.util.compat import Directive\n \n from .utils import out_of_date\n-from .. import plotting\n+from .. import charts, io, plotting\n from ..document import Document\n from ..embed import autoload_static\n from ..resources import CDN\n@@ -232,6 +232,10 @@\n plotting._obj = obj\n \n webbrowser.open = _noop\n+charts.save = _noop\n+charts.show = _show\n+io.save = _noop\n+io.show = _show\n plotting.save = _noop\n plotting.show = _show\n", "issue": "Update/refresh installation guide and quick start\nMany parts are out of date or confusing. \n\n", "before_files": [{"content": "\"\"\" Include Bokeh plots in Sphinx HTML documentation.\n\nFor other output types, the placeholder text ``[graph]`` will\nbe generated.\n\nUsage\n-----\n\nThe ``bokeh-plot`` directive can be used by either supplying:\n\n1. **A path to a source file** as the argument to the directive::\n\n .. bokeh-plot:: path/to/plot.py\n\n\n2. **Inline code** as the content of the directive::\n\n .. bokeh-plot::\n\n from bokeh.plotting import figure, output_file, show\n\n output_file(\"example.html\")\n\n x = [1, 2, 3, 4, 5]\n y = [6, 7, 6, 4, 5]\n\n p = figure(title=\"example\", plot_width=300, plot_height=300)\n p.line(x, y, line_width=2)\n p.circle(x, y, size=10, fill_color=\"white\")\n\n show(p)\n\nThis directive also works in conjunction with Sphinx autodoc, when\nused in docstrings.\n\nOptions\n-------\n\nThe ``bokeh-plot`` directive accepts the following options:\n\nsource-position : enum('above', 'below', 'none')\n Where to locate the the block of formatted source\n code (if anywhere).\n\nlinenos : bool\n Whether to display line numbers along with the source.\n\nemphasize-lines : list[int]\n A list of source code lines to emphasize.\n\nExamples\n--------\n\nThe inline example code above produces the following output:\n\n----\n\n.. bokeh-plot::\n\n from bokeh.plotting import figure, output_file, show\n\n output_file(\"example.html\")\n\n x = [1, 2, 3, 4, 5]\n y = [6, 7, 6, 4, 5]\n\n p = figure(title=\"example\", plot_width=300, plot_height=300)\n p.line(x, y, line_width=2)\n p.circle(x, y, size=10, fill_color=\"white\")\n\n show(p)\n\n\"\"\"\nfrom __future__ import absolute_import\n\nimport hashlib\nfrom os import makedirs\nfrom os.path import basename, dirname, exists, isdir, join, relpath\nimport re\nfrom shutil import copy\nimport sys\nfrom tempfile import mkdtemp\nimport webbrowser\n\nfrom docutils import nodes\nfrom docutils.parsers.rst.directives import choice, flag, unchanged\nfrom docutils.statemachine import ViewList\n\nimport jinja2\n\nfrom sphinx.locale import _\nfrom sphinx.util.compat import Directive\n\nfrom .utils import out_of_date\nfrom .. import plotting\nfrom ..document import Document\nfrom ..embed import autoload_static\nfrom ..resources import CDN\nfrom ..utils import decode_utf8\n\n\nSOURCE_TEMPLATE = jinja2.Template(u\"\"\"\n.. code-block:: python\n {% if linenos %}:linenos:{% endif %}\n {% if emphasize_lines %}:emphasize-lines: {{ emphasize_lines }}{% endif %}\n\n {{ source|indent(3) }}\n\n\"\"\")\n\n\nSCRIPT_TEMPLATE = jinja2.Template(u\"\"\"\n<table>\n <tr>\n <td>\n {{ script|indent(4) }}\n </td>\n </tr>\n</table>\n\"\"\")\n\n\nclass bokeh_plot(nodes.General, nodes.Element):\n pass\n\n\ndef _source_position(argument):\n return choice(argument, ('below', 'above', 'none'))\n\n\nclass BokehPlotDirective(Directive):\n\n has_content = True\n optional_arguments = 2\n\n option_spec = {\n 'source-position' : _source_position,\n 'linenos' : flag,\n 'emphasize-lines' : unchanged,\n }\n\n def run(self):\n # filename *or* python code content, but not both\n if self.arguments and self.content:\n raise RuntimeError(\"bokeh-plot:: directive can't have both args and content\")\n\n env = self.state.document.settings.env\n app = env.app\n\n if not hasattr(env, 'bokeh_plot_tmpdir'):\n env.bokeh_plot_tmpdir = mkdtemp()\n app.verbose(\"creating new temp dir for bokeh-plot cache: %s\" % env.bokeh_plot_tmpdir)\n else:\n tmpdir = env.bokeh_plot_tmpdir\n if not exists(tmpdir) or not isdir(tmpdir):\n app.verbose(\"creating new temp dir for bokeh-plot cache: %s\" % env.bokeh_plot_tmpdir)\n env.bokeh_plot_tmpdir = mkdtemp()\n else:\n app.verbose(\"using existing temp dir for bokeh-plot cache: %s\" % env.bokeh_plot_tmpdir)\n\n # TODO (bev) verify that this is always the correct thing\n rst_source = self.state_machine.node.document['source']\n rst_dir = dirname(rst_source)\n rst_filename = basename(rst_source)\n\n target_id = \"%s.bokeh-plot-%d\" % (rst_filename, env.new_serialno('bokeh-plot'))\n target_node = nodes.target('', '', ids=[target_id])\n result = [target_node]\n\n try:\n source = self._get_source()\n except Exception:\n node = nodes.error(None,\n nodes.paragraph(text=\"Unable to generate Bokeh plot at %s:%d:\" % (basename(rst_source), self.lineno)),\n nodes.paragraph(text=str(sys.exc_info()[1])))\n return [node]\n\n source_position = self.options.get('source-position', 'below')\n\n if source_position == 'above':\n result += self._get_source_nodes(source)\n\n node = bokeh_plot()\n node['target_id'] = target_id\n node['source'] = source\n node['relpath'] = relpath(rst_dir, env.srcdir)\n node['rst_source'] = rst_source\n node['rst_lineno'] = self.lineno\n if 'alt' in self.options:\n node['alt'] = self.options['alt']\n if self.arguments:\n node['path'] = self.arguments[0]\n env.note_dependency(node['path'])\n if len(self.arguments) == 2:\n node['symbol'] = self.arguments[1]\n result += [node]\n\n if source_position == 'below':\n result += self._get_source_nodes(source)\n\n return result\n\n def _get_source(self):\n if self.arguments:\n source = open(self.arguments[0], \"r\").read()\n source = decode_utf8(source)\n else:\n source = u\"\"\n for line in self.content:\n source += \"%s\\n\" % line\n return source\n\n def _get_source_nodes(self, source):\n linenos = 'linenos' in self.options\n emphasize_lines = self.options.get('emphasize-lines', False)\n if emphasize_lines: linenos = True\n result = ViewList()\n text = SOURCE_TEMPLATE.render(source=source, linenos=linenos, emphasize_lines=emphasize_lines)\n for line in text.split(\"\\n\"):\n result.append(line, \"<bokeh-plot>\")\n node = nodes.paragraph()\n node.document = self.state.document\n self.state.nested_parse(result, 0, node)\n return node.children\n\n# patch open and show and save to be no-ops\ndef _noop(*args, **kwargs):\n pass\n\ndef _show(obj=None):\n if obj:\n plotting._obj = obj\n\nwebbrowser.open = _noop\nplotting.save = _noop\nplotting.show = _show\n\ndef _render_plot(source, symbol):\n plotting._default_document = Document()\n namespace = {}\n # need to remove any encoding comment before compiling unicode\n pat = re.compile(r\"^# -\\*- coding: (.*) -\\*-$\", re.M)\n source = pat.sub(\"\", source)\n code = compile(source, \"<string>\", mode=\"exec\")\n eval(code, namespace)\n # TODO (bev) remove this crap\n if symbol is not None:\n if 'bokeh.charts' in source:\n obj = namespace[symbol].chart.plot\n else:\n obj = namespace[symbol]\n else:\n obj = plotting._obj\n return obj\n\ndef html_visit_bokeh_plot(self, node):\n env = self.builder.env\n dest_dir = join(self.builder.outdir, node[\"relpath\"])\n\n try:\n if node.has_key('path'):\n path = node['path']\n filename = \"bokeh-plot-%s.js\" % hashlib.md5(path.encode('utf-8')).hexdigest()\n dest_path = join(dest_dir, filename)\n tmpdir = join(env.bokeh_plot_tmpdir, node[\"relpath\"])\n if not exists(tmpdir): makedirs(tmpdir)\n cached_path = join(tmpdir, filename)\n\n if out_of_date(path, cached_path) or not exists(cached_path+\".script\"):\n self.builder.app.verbose(\"generating new plot for '%s'\" % path)\n plot = _render_plot(node['source'], node.get('symbol'))\n js, script = autoload_static(plot, CDN, filename)\n with open(cached_path, \"w\") as f:\n f.write(js)\n with open(cached_path+\".script\", \"w\") as f:\n f.write(script)\n else:\n self.builder.app.verbose(\"using cached plot for '%s'\" % path)\n script = open(cached_path+\".script\", \"r\").read()\n\n if not exists(dest_dir): makedirs(dest_dir)\n copy(cached_path, dest_path)\n else:\n filename = node['target_id'] + \".js\"\n dest_path = join(dest_dir, filename)\n plot = _render_plot(node['source'], None)\n js, script = autoload_static(plot, CDN, filename)\n self.builder.app.verbose(\"saving inline plot at: %s\" % dest_path)\n with open(dest_path, \"w\") as f:\n f.write(js)\n\n html = SCRIPT_TEMPLATE.render(script=script)\n self.body.append(html)\n except Exception:\n err_node = nodes.error(None,\n nodes.paragraph(text=\"Unable to generate Bokeh plot at %s:%d:\" % (node['rst_source'], node['rst_lineno'])),\n nodes.paragraph(text=str(sys.exc_info()[1])))\n node.children.append(err_node)\n raise nodes.SkipDeparture\n else:\n raise nodes.SkipNode\n\n\ndef latex_visit_bokeh_plot(self, node):\n if 'alt' in node.attributes:\n self.body.append(_('[graph: %s]') % node['alt'])\n else:\n self.body.append(_('[graph]'))\n raise nodes.SkipNode\n\n\ndef texinfo_visit_bokeh_plot(self, node):\n if 'alt' in node.attributes:\n self.body.append(_('[graph: %s]') % node['alt'])\n else:\n self.body.append(_('[graph]'))\n raise nodes.SkipNode\n\n\ndef text_visit_bokeh_plot(self, node):\n if 'alt' in node.attributes:\n self.add_text(_('[graph: %s]') % node['alt'])\n else:\n self.add_text(_('[graph]'))\n raise nodes.SkipNode\n\n\ndef man_visit_bokeh_plot(self, node):\n if 'alt' in node.attributes:\n self.body.append(_('[graph: %s]') % node['alt'])\n else:\n self.body.append(_('[graph]'))\n raise nodes.SkipNode\n\n\ndef setup(app):\n app.add_node(bokeh_plot,\n html=(html_visit_bokeh_plot, None),\n latex=(latex_visit_bokeh_plot, None),\n texinfo=(texinfo_visit_bokeh_plot, None),\n text=(text_visit_bokeh_plot, None),\n man=(man_visit_bokeh_plot, None))\n app.add_directive('bokeh-plot', BokehPlotDirective)\n\n\n\n\n\n\n", "path": "bokeh/sphinxext/bokeh_plot.py"}]}
| 4,089 | 187 |
gh_patches_debug_15183
|
rasdani/github-patches
|
git_diff
|
activeloopai__deeplake-2472
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] Possibly unsafe conversion of DatasetDiff instance to bytes
## 🐛🐛 Bug Report
### ⚗️ Current Behavior
The code block
https://github.com/activeloopai/deeplake/blob/c88925d8afa94841972fe4fd6f1ddeb1ede8dde0/deeplake/core/version_control/dataset_diff.py#L37-L39
assumes `len(str.encode('utf-8')) == len(str)` which is not true in general.
### 🧰 Possible Solution
The code block should read:
```python
len(old_name_encoded).to_bytes(8, "big"),
len(new_name_encoded).to_bytes(8, "big"),
old_name_encoded + new_name_encoded,
```
where
```python
old_name_encoded = old_name.encode("utf-8")
new_name_encoded = new_name.encode("utf-8")
```
</issue>
<code>
[start of deeplake/core/version_control/dataset_diff.py]
1 from deeplake.core.storage.deeplake_memory_object import DeepLakeMemoryObject
2 from deeplake.core.storage import LRUCache
3 from deeplake.util.keys import get_dataset_diff_key
4 import typing
5 from collections import OrderedDict
6
7
8 class DatasetDiff(DeepLakeMemoryObject):
9 def __init__(self) -> None:
10 self.is_dirty = False
11 self.info_updated = False
12 self.renamed: typing.OrderedDict = OrderedDict()
13 self.deleted: typing.List[str] = []
14
15 def tobytes(self) -> bytes:
16 """Returns bytes representation of the dataset diff
17
18 The format stores the following information in order:
19 1. The first byte is a boolean value indicating whether the Dataset info was modified or not.
20 2. The next 8 bytes give the number of renamed tensors, let's call this m.
21 3. Next, there will be m blocks of bytes with the following format:
22 1. 8 + 8 bytes giving the length of old and new names, let's call them x and y.
23 2. x bytes of old name.
24 3. y bytes of new name.
25 4. The next 8 bytes give the number of deleted tensors, let's call this n.
26 5. Next, there will be n blocks of bytes with the following format:
27 1. 8 bytes giving the length of the name of the deleted tensor, let's call this z.
28 2. n bytes of name of the deleted tensor.
29 """
30 return b"".join(
31 [
32 self.info_updated.to_bytes(1, "big"),
33 len(self.renamed).to_bytes(8, "big"),
34 *(
35 b"".join(
36 [
37 len(old_name).to_bytes(8, "big"),
38 len(new_name).to_bytes(8, "big"),
39 (old_name + new_name).encode("utf-8"),
40 ]
41 )
42 for old_name, new_name in self.renamed.items()
43 ),
44 len(self.deleted).to_bytes(8, "big"),
45 *(
46 b"".join([len(name).to_bytes(8, "big"), name.encode("utf-8")])
47 for name in self.deleted
48 ),
49 ]
50 )
51
52 @classmethod
53 def frombuffer(cls, data: bytes) -> "DatasetDiff":
54 """Creates a DatasetDiff object from bytes"""
55 dataset_diff = cls()
56 dataset_diff.info_updated = bool(int.from_bytes(data[:1], "big"))
57 len_renamed = int.from_bytes(data[1:9], "big")
58 pos = 9
59 for _ in range(len_renamed):
60 len_old, len_new = (
61 int.from_bytes(data[pos : pos + 8], "big"),
62 int.from_bytes(data[pos + 8 : pos + 16], "big"),
63 )
64 pos += 16
65 old_name, new_name = (
66 data[pos : pos + len_old].decode("utf-8"),
67 data[pos + len_old : pos + len_old + len_new].decode("utf-8"),
68 )
69 pos += len_old + len_new
70 dataset_diff.renamed[old_name] = new_name
71 len_deleted = int.from_bytes(data[pos : pos + 8], "big")
72 pos += 8
73 for _ in range(len_deleted):
74 len_name = int.from_bytes(data[pos : pos + 8], "big")
75 pos += 8
76 name = data[pos : pos + len_name].decode("utf-8")
77 pos += len_name
78 dataset_diff.deleted.append(name)
79 return dataset_diff
80
81 @property
82 def nbytes(self):
83 """Returns number of bytes required to store the dataset diff"""
84 return 1
85
86 def modify_info(self) -> None:
87 """Stores information that the info has changed"""
88 self.info_updated = True
89 self.is_dirty = True
90
91 def tensor_renamed(self, old_name, new_name):
92 """Adds old and new name of a tensor that was renamed to renamed"""
93 for old, new in self.renamed.items():
94 if old_name == new:
95 if old == new_name:
96 self.renamed.pop(old)
97 else:
98 self.renamed[old] = new_name
99 break
100 else:
101 self.renamed[old_name] = new_name
102
103 self.is_dirty = True
104
105 def tensor_deleted(self, name):
106 """Adds name of deleted tensor to deleted"""
107 if name not in self.deleted:
108 for old, new in self.renamed.items():
109 if name == new:
110 self.renamed.pop(old)
111 self.deleted.append(old)
112 break
113 else:
114 self.deleted.append(name)
115 self.is_dirty = True
116
117
118 def load_dataset_diff(dataset):
119 storage: LRUCache = dataset.storage
120 path = get_dataset_diff_key(dataset.version_state["commit_id"])
121 try:
122 diff = storage.get_deeplake_object(path, DatasetDiff)
123 except KeyError:
124 diff = DatasetDiff()
125 storage.register_deeplake_object(path, diff)
126 return diff
127
[end of deeplake/core/version_control/dataset_diff.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/deeplake/core/version_control/dataset_diff.py b/deeplake/core/version_control/dataset_diff.py
--- a/deeplake/core/version_control/dataset_diff.py
+++ b/deeplake/core/version_control/dataset_diff.py
@@ -36,10 +36,13 @@
[
len(old_name).to_bytes(8, "big"),
len(new_name).to_bytes(8, "big"),
- (old_name + new_name).encode("utf-8"),
+ (old_name + new_name),
]
)
- for old_name, new_name in self.renamed.items()
+ for old_name, new_name in map(
+ lambda n: (n[0].encode("utf-8"), n[1].encode("utf-8")),
+ self.renamed.items(),
+ )
),
len(self.deleted).to_bytes(8, "big"),
*(
|
{"golden_diff": "diff --git a/deeplake/core/version_control/dataset_diff.py b/deeplake/core/version_control/dataset_diff.py\n--- a/deeplake/core/version_control/dataset_diff.py\n+++ b/deeplake/core/version_control/dataset_diff.py\n@@ -36,10 +36,13 @@\n [\n len(old_name).to_bytes(8, \"big\"),\n len(new_name).to_bytes(8, \"big\"),\n- (old_name + new_name).encode(\"utf-8\"),\n+ (old_name + new_name),\n ]\n )\n- for old_name, new_name in self.renamed.items()\n+ for old_name, new_name in map(\n+ lambda n: (n[0].encode(\"utf-8\"), n[1].encode(\"utf-8\")),\n+ self.renamed.items(),\n+ )\n ),\n len(self.deleted).to_bytes(8, \"big\"),\n *(\n", "issue": "[BUG] Possibly unsafe conversion of DatasetDiff instance to bytes\n## \ud83d\udc1b\ud83d\udc1b Bug Report\r\n\r\n### \u2697\ufe0f Current Behavior\r\n\r\nThe code block\r\n\r\nhttps://github.com/activeloopai/deeplake/blob/c88925d8afa94841972fe4fd6f1ddeb1ede8dde0/deeplake/core/version_control/dataset_diff.py#L37-L39\r\n\r\nassumes `len(str.encode('utf-8')) == len(str)` which is not true in general.\r\n\r\n### \ud83e\uddf0 Possible Solution\r\n\r\nThe code block should read:\r\n```python\r\n len(old_name_encoded).to_bytes(8, \"big\"),\r\n len(new_name_encoded).to_bytes(8, \"big\"),\r\n old_name_encoded + new_name_encoded,\r\n```\r\nwhere\r\n```python\r\nold_name_encoded = old_name.encode(\"utf-8\")\r\nnew_name_encoded = new_name.encode(\"utf-8\")\r\n```\n", "before_files": [{"content": "from deeplake.core.storage.deeplake_memory_object import DeepLakeMemoryObject\nfrom deeplake.core.storage import LRUCache\nfrom deeplake.util.keys import get_dataset_diff_key\nimport typing\nfrom collections import OrderedDict\n\n\nclass DatasetDiff(DeepLakeMemoryObject):\n def __init__(self) -> None:\n self.is_dirty = False\n self.info_updated = False\n self.renamed: typing.OrderedDict = OrderedDict()\n self.deleted: typing.List[str] = []\n\n def tobytes(self) -> bytes:\n \"\"\"Returns bytes representation of the dataset diff\n\n The format stores the following information in order:\n 1. The first byte is a boolean value indicating whether the Dataset info was modified or not.\n 2. The next 8 bytes give the number of renamed tensors, let's call this m.\n 3. Next, there will be m blocks of bytes with the following format:\n 1. 8 + 8 bytes giving the length of old and new names, let's call them x and y.\n 2. x bytes of old name.\n 3. y bytes of new name.\n 4. The next 8 bytes give the number of deleted tensors, let's call this n.\n 5. Next, there will be n blocks of bytes with the following format:\n 1. 8 bytes giving the length of the name of the deleted tensor, let's call this z.\n 2. n bytes of name of the deleted tensor.\n \"\"\"\n return b\"\".join(\n [\n self.info_updated.to_bytes(1, \"big\"),\n len(self.renamed).to_bytes(8, \"big\"),\n *(\n b\"\".join(\n [\n len(old_name).to_bytes(8, \"big\"),\n len(new_name).to_bytes(8, \"big\"),\n (old_name + new_name).encode(\"utf-8\"),\n ]\n )\n for old_name, new_name in self.renamed.items()\n ),\n len(self.deleted).to_bytes(8, \"big\"),\n *(\n b\"\".join([len(name).to_bytes(8, \"big\"), name.encode(\"utf-8\")])\n for name in self.deleted\n ),\n ]\n )\n\n @classmethod\n def frombuffer(cls, data: bytes) -> \"DatasetDiff\":\n \"\"\"Creates a DatasetDiff object from bytes\"\"\"\n dataset_diff = cls()\n dataset_diff.info_updated = bool(int.from_bytes(data[:1], \"big\"))\n len_renamed = int.from_bytes(data[1:9], \"big\")\n pos = 9\n for _ in range(len_renamed):\n len_old, len_new = (\n int.from_bytes(data[pos : pos + 8], \"big\"),\n int.from_bytes(data[pos + 8 : pos + 16], \"big\"),\n )\n pos += 16\n old_name, new_name = (\n data[pos : pos + len_old].decode(\"utf-8\"),\n data[pos + len_old : pos + len_old + len_new].decode(\"utf-8\"),\n )\n pos += len_old + len_new\n dataset_diff.renamed[old_name] = new_name\n len_deleted = int.from_bytes(data[pos : pos + 8], \"big\")\n pos += 8\n for _ in range(len_deleted):\n len_name = int.from_bytes(data[pos : pos + 8], \"big\")\n pos += 8\n name = data[pos : pos + len_name].decode(\"utf-8\")\n pos += len_name\n dataset_diff.deleted.append(name)\n return dataset_diff\n\n @property\n def nbytes(self):\n \"\"\"Returns number of bytes required to store the dataset diff\"\"\"\n return 1\n\n def modify_info(self) -> None:\n \"\"\"Stores information that the info has changed\"\"\"\n self.info_updated = True\n self.is_dirty = True\n\n def tensor_renamed(self, old_name, new_name):\n \"\"\"Adds old and new name of a tensor that was renamed to renamed\"\"\"\n for old, new in self.renamed.items():\n if old_name == new:\n if old == new_name:\n self.renamed.pop(old)\n else:\n self.renamed[old] = new_name\n break\n else:\n self.renamed[old_name] = new_name\n\n self.is_dirty = True\n\n def tensor_deleted(self, name):\n \"\"\"Adds name of deleted tensor to deleted\"\"\"\n if name not in self.deleted:\n for old, new in self.renamed.items():\n if name == new:\n self.renamed.pop(old)\n self.deleted.append(old)\n break\n else:\n self.deleted.append(name)\n self.is_dirty = True\n\n\ndef load_dataset_diff(dataset):\n storage: LRUCache = dataset.storage\n path = get_dataset_diff_key(dataset.version_state[\"commit_id\"])\n try:\n diff = storage.get_deeplake_object(path, DatasetDiff)\n except KeyError:\n diff = DatasetDiff()\n storage.register_deeplake_object(path, diff)\n return diff\n", "path": "deeplake/core/version_control/dataset_diff.py"}]}
| 2,113 | 205 |
gh_patches_debug_21888
|
rasdani/github-patches
|
git_diff
|
wagtail__wagtail-651
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
"wagtail start project_name" fails to run on Windows 7
Hi. So everything is compiled perfectly inside the virtualenv and I'm trying to start a new project.
```
$ wagtail start wagtailtest
Creating a wagtail project called wagtailtest
Traceback (most recent call last):
File "d:\VirtualEnvs\wagtail_env\Scripts\wagtail-script.py", line 9, in <module>
load_entry_point('wagtail==0.6', 'console_scripts', 'wagtail')()
File "d:\VirtualEnvs\wagtail_env\lib\site-packages\wagtail\bin\wagtail.py", line 75, in main
COMMANDS[command](parser, options, args)
File "d:\VirtualEnvs\wagtail_env\lib\site-packages\wagtail\bin\wagtail.py", line 51, in create_project
project_name
File "C:\Python27\Lib\subprocess.py", line 522, in call
return Popen(*popenargs, **kwargs).wait()
File "C:\Python27\Lib\subprocess.py", line 710, in __init__
errread, errwrite)
File "C:\Python27\Lib\subprocess.py", line 958, in _execute_child
startupinfo)
WindowsError: [Error 193] %1 is not a valid Win32 application
```
Windows 7 x64, Python 2.7 x32.
</issue>
<code>
[start of wagtail/bin/wagtail.py]
1 #!/usr/bin/env python
2 from __future__ import print_function, absolute_import
3
4 import os
5 import subprocess
6 import errno
7 import sys
8
9 from optparse import OptionParser
10
11
12 def create_project(parser, options, args):
13 # Validate args
14 if len(args) < 2:
15 parser.error("Please specify a name for your wagtail installation")
16 elif len(args) > 2:
17 parser.error("Too many arguments")
18
19 project_name = args[1]
20
21 # Make sure given name is not already in use by another python package/module.
22 try:
23 __import__(project_name)
24 except ImportError:
25 pass
26 else:
27 parser.error("'%s' conflicts with the name of an existing "
28 "Python module and cannot be used as a project "
29 "name. Please try another name." % project_name)
30
31 # Make sure directory does not already exist
32 if os.path.exists(project_name):
33 print('A directory called %(project_name)s already exists. \
34 Please choose another name for your wagtail project or remove the existing directory.' % {'project_name': project_name})
35 sys.exit(errno.EEXIST)
36
37 print("Creating a wagtail project called %(project_name)s" % {'project_name': project_name})
38
39 # Create the project from the wagtail template using startapp
40
41 # First find the path to wagtail
42 import wagtail
43 wagtail_path = os.path.dirname(wagtail.__file__)
44 template_path = os.path.join(wagtail_path, 'project_template')
45
46 # Call django-admin startproject
47 result = subprocess.call([
48 'django-admin.py', 'startproject',
49 '--template=' + template_path,
50 '--name=Vagrantfile', '--ext=html,rst',
51 project_name
52 ])
53
54 if result == 0:
55 print("Success! %(project_name)s is created" % {'project_name': project_name})
56
57
58 COMMANDS = {
59 'start': create_project,
60 }
61
62 def main():
63 # Parse options
64 parser = OptionParser(usage="Usage: %prog start project_name")
65 (options, args) = parser.parse_args()
66
67 # Find command
68 try:
69 command = args[0]
70 except IndexError:
71 parser.print_help()
72 return
73
74 if command in COMMANDS:
75 COMMANDS[command](parser, options, args)
76 else:
77 parser.error("Unrecognised command: " + command)
78
79 if __name__ == "__main__":
80 main()
81
[end of wagtail/bin/wagtail.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/wagtail/bin/wagtail.py b/wagtail/bin/wagtail.py
--- a/wagtail/bin/wagtail.py
+++ b/wagtail/bin/wagtail.py
@@ -2,11 +2,11 @@
from __future__ import print_function, absolute_import
import os
-import subprocess
import errno
import sys
from optparse import OptionParser
+from django.core.management import ManagementUtility
def create_project(parser, options, args):
@@ -44,15 +44,15 @@
template_path = os.path.join(wagtail_path, 'project_template')
# Call django-admin startproject
- result = subprocess.call([
+ utility = ManagementUtility([
'django-admin.py', 'startproject',
'--template=' + template_path,
'--name=Vagrantfile', '--ext=html,rst',
project_name
])
+ utility.execute()
- if result == 0:
- print("Success! %(project_name)s is created" % {'project_name': project_name})
+ print("Success! %(project_name)s is created" % {'project_name': project_name})
COMMANDS = {
|
{"golden_diff": "diff --git a/wagtail/bin/wagtail.py b/wagtail/bin/wagtail.py\n--- a/wagtail/bin/wagtail.py\n+++ b/wagtail/bin/wagtail.py\n@@ -2,11 +2,11 @@\n from __future__ import print_function, absolute_import\n \n import os\n-import subprocess\n import errno\n import sys\n \n from optparse import OptionParser\n+from django.core.management import ManagementUtility\n \n \n def create_project(parser, options, args):\n@@ -44,15 +44,15 @@\n template_path = os.path.join(wagtail_path, 'project_template')\n \n # Call django-admin startproject\n- result = subprocess.call([\n+ utility = ManagementUtility([\n 'django-admin.py', 'startproject',\n '--template=' + template_path,\n '--name=Vagrantfile', '--ext=html,rst',\n project_name\n ])\n+ utility.execute()\n \n- if result == 0:\n- print(\"Success! %(project_name)s is created\" % {'project_name': project_name})\n+ print(\"Success! %(project_name)s is created\" % {'project_name': project_name})\n \n \n COMMANDS = {\n", "issue": "\"wagtail start project_name\" fails to run on Windows 7\nHi. So everything is compiled perfectly inside the virtualenv and I'm trying to start a new project.\n\n```\n$ wagtail start wagtailtest\nCreating a wagtail project called wagtailtest\nTraceback (most recent call last):\n File \"d:\\VirtualEnvs\\wagtail_env\\Scripts\\wagtail-script.py\", line 9, in <module>\n load_entry_point('wagtail==0.6', 'console_scripts', 'wagtail')()\n File \"d:\\VirtualEnvs\\wagtail_env\\lib\\site-packages\\wagtail\\bin\\wagtail.py\", line 75, in main\n COMMANDS[command](parser, options, args)\n File \"d:\\VirtualEnvs\\wagtail_env\\lib\\site-packages\\wagtail\\bin\\wagtail.py\", line 51, in create_project\n project_name\n File \"C:\\Python27\\Lib\\subprocess.py\", line 522, in call\n return Popen(*popenargs, **kwargs).wait()\n File \"C:\\Python27\\Lib\\subprocess.py\", line 710, in __init__\n errread, errwrite)\n File \"C:\\Python27\\Lib\\subprocess.py\", line 958, in _execute_child\n startupinfo)\nWindowsError: [Error 193] %1 is not a valid Win32 application\n```\n\nWindows 7 x64, Python 2.7 x32.\n\n", "before_files": [{"content": "#!/usr/bin/env python\nfrom __future__ import print_function, absolute_import\n\nimport os\nimport subprocess\nimport errno\nimport sys\n\nfrom optparse import OptionParser\n\n\ndef create_project(parser, options, args):\n # Validate args\n if len(args) < 2:\n parser.error(\"Please specify a name for your wagtail installation\")\n elif len(args) > 2:\n parser.error(\"Too many arguments\")\n\n project_name = args[1]\n\n # Make sure given name is not already in use by another python package/module.\n try:\n __import__(project_name)\n except ImportError:\n pass\n else:\n parser.error(\"'%s' conflicts with the name of an existing \"\n \"Python module and cannot be used as a project \"\n \"name. Please try another name.\" % project_name)\n\n # Make sure directory does not already exist\n if os.path.exists(project_name):\n print('A directory called %(project_name)s already exists. \\\n Please choose another name for your wagtail project or remove the existing directory.' % {'project_name': project_name})\n sys.exit(errno.EEXIST)\n\n print(\"Creating a wagtail project called %(project_name)s\" % {'project_name': project_name})\n\n # Create the project from the wagtail template using startapp\n\n # First find the path to wagtail\n import wagtail\n wagtail_path = os.path.dirname(wagtail.__file__)\n template_path = os.path.join(wagtail_path, 'project_template')\n\n # Call django-admin startproject\n result = subprocess.call([\n 'django-admin.py', 'startproject',\n '--template=' + template_path,\n '--name=Vagrantfile', '--ext=html,rst',\n project_name\n ])\n\n if result == 0:\n print(\"Success! %(project_name)s is created\" % {'project_name': project_name})\n\n\nCOMMANDS = {\n 'start': create_project,\n}\n\ndef main():\n # Parse options\n parser = OptionParser(usage=\"Usage: %prog start project_name\")\n (options, args) = parser.parse_args()\n\n # Find command\n try:\n command = args[0]\n except IndexError:\n parser.print_help()\n return\n\n if command in COMMANDS:\n COMMANDS[command](parser, options, args)\n else:\n parser.error(\"Unrecognised command: \" + command)\n\nif __name__ == \"__main__\":\n main()\n", "path": "wagtail/bin/wagtail.py"}]}
| 1,576 | 258 |
gh_patches_debug_11078
|
rasdani/github-patches
|
git_diff
|
svthalia__concrexit-1717
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ValidationError when saving completed membership registration
In GitLab by _thaliatechnicie on Jun 5, 2019, 13:13
### One-sentence description
<!-- What breaks -->
### Current behaviour / Reproducing the bug
Click save while observing a completed registration.
Sentry Issue: [CONCREXIT-4](https://sentry.io/organizations/thalia/issues/1057927495/?referrer=gitlab_integration)
```
ValueError: 'RegistrationForm' has no field named 'email'.
(14 additional frame(s) were not displayed)
...
File "django/forms/forms.py", line 180, in errors
self.full_clean()
File "django/forms/forms.py", line 383, in full_clean
self._post_clean()
File "django/forms/models.py", line 405, in _post_clean
self._update_errors(e)
File "django/forms/models.py", line 379, in _update_errors
self.add_error(None, errors)
File "django/forms/forms.py", line 353, in add_error
"'%s' has no field named '%s'." % (self.__class__.__name__, field))
ValidationError: {'email': ['A user with that email address already exists. Login using the existing account and renew the membership by visiting the account settings.'], 'student_number': ['A user with that student number already exists. Login using the existing account and renew the membership by visiting the account settings.'], 'username': ['A user with that username already exists.']}
File "django/forms/models.py", line 403, in _post_clean
self.instance.full_clean(exclude=exclude, validate_unique=False)
File "django/db/models/base.py", line 1152, in full_clean
raise ValidationError(errors)
```
### Expected behaviour
No crash, maybe even no save button available.
</issue>
<code>
[start of website/registrations/admin.py]
1 """Registers admin interfaces for the registrations module."""
2 from functools import partial
3
4 from django.contrib import admin, messages
5 from django.contrib.admin.utils import model_ngettext
6 from django.forms import Field
7 from django.utils.translation import gettext_lazy as _
8
9 from payments.widgets import PaymentWidget
10 from . import services
11 from .forms import RegistrationAdminForm
12 from .models import Entry, Registration, Renewal, Reference
13
14
15 class ReferenceInline(admin.StackedInline):
16 model = Reference
17 extra = 0
18
19
20 def _show_message(model_admin, request, n, message, error):
21 """Show a message in the Django Admin."""
22 if n == 0:
23 model_admin.message_user(request, error, messages.ERROR)
24 else:
25 model_admin.message_user(
26 request,
27 message % {"count": n, "items": model_ngettext(model_admin.opts, n)},
28 messages.SUCCESS,
29 )
30
31
32 @admin.register(Registration)
33 class RegistrationAdmin(admin.ModelAdmin):
34 """Manage the registrations."""
35
36 list_display = (
37 "name",
38 "email",
39 "status",
40 "membership_type",
41 "contribution",
42 "created_at",
43 "payment",
44 "no_references",
45 "reference_count",
46 )
47 list_filter = (
48 "status",
49 "programme",
50 "membership_type",
51 "no_references",
52 "payment__type",
53 "contribution",
54 )
55 inlines = (ReferenceInline,)
56 search_fields = (
57 "first_name",
58 "last_name",
59 "email",
60 "phone_number",
61 "student_number",
62 )
63 date_hierarchy = "created_at"
64 fieldsets = (
65 (
66 _("Application information"),
67 {
68 "fields": (
69 "created_at",
70 "updated_at",
71 "username",
72 "length",
73 "contribution",
74 "membership_type",
75 "status",
76 "payment",
77 "remarks",
78 )
79 },
80 ),
81 (
82 _("Personal information"),
83 {
84 "fields": (
85 "first_name",
86 "last_name",
87 "birthday",
88 "optin_birthday",
89 "email",
90 "optin_mailinglist",
91 "phone_number",
92 )
93 },
94 ),
95 (
96 _("Address"),
97 {
98 "fields": (
99 "address_street",
100 "address_street2",
101 "address_postal_code",
102 "address_city",
103 "address_country",
104 )
105 },
106 ),
107 (
108 _("Financial"),
109 {"fields": ("direct_debit", "initials", "iban", "bic", "signature",)},
110 ),
111 (
112 _("University information"),
113 {"fields": ("student_number", "programme", "starting_year",)},
114 ),
115 )
116 actions = ["accept_selected", "reject_selected"]
117 form = RegistrationAdminForm
118
119 def reference_count(self, obj):
120 return obj.reference_set.count()
121
122 reference_count.short_description = _("references")
123
124 def get_form(self, request, obj=None, **kwargs):
125 return super().get_form(
126 request,
127 obj,
128 formfield_callback=partial(
129 self.formfield_for_dbfield, request=request, obj=obj
130 ),
131 **kwargs
132 )
133
134 def formfield_for_dbfield(self, db_field, request, obj=None, **kwargs):
135 field = super().formfield_for_dbfield(db_field, request, **kwargs)
136 if db_field.name == "payment":
137 return Field(
138 widget=PaymentWidget(obj=obj), initial=field.initial, required=False
139 )
140 return field
141
142 def changeform_view(self, request, object_id=None, form_url="", extra_context=None):
143 """Render the change formview.
144
145 Only allow when the entry has not been processed yet
146 """
147 obj = None
148 can_review = False
149 can_resend = False
150 can_revert = False
151 if object_id is not None and request.user.has_perm(
152 "registrations.review_entries"
153 ):
154 obj = Entry.objects.get(id=object_id)
155 can_review = obj.status == Entry.STATUS_REVIEW
156 can_revert = obj.status in [Entry.STATUS_ACCEPTED, Entry.STATUS_REJECTED]
157 try:
158 can_resend = obj.registration.status == Entry.STATUS_CONFIRM
159 except Registration.DoesNotExist:
160 pass
161 return super().changeform_view(
162 request,
163 object_id,
164 form_url,
165 {
166 "entry": obj,
167 "can_review": can_review,
168 "can_resend": can_resend,
169 "can_revert": can_revert,
170 },
171 )
172
173 def get_readonly_fields(self, request, obj=None):
174 if obj is None or not (
175 obj.status == Entry.STATUS_REJECTED
176 or obj.status == Entry.STATUS_ACCEPTED
177 or obj.status == Entry.STATUS_COMPLETED
178 ):
179 return ["status", "created_at", "updated_at", "payment", "contribution"]
180 return [
181 field.name
182 for field in self.model._meta.get_fields()
183 if field.name not in ["payment", "no_references"] and field.editable
184 ]
185
186 @staticmethod
187 def name(obj):
188 return obj.get_full_name()
189
190 def reject_selected(self, request, queryset):
191 """Reject the selected entries."""
192 if request.user.has_perm("registrations.review_entries"):
193 rows_updated = services.reject_entries(request.user.pk, queryset)
194 _show_message(
195 self,
196 request,
197 rows_updated,
198 message=_("Successfully rejected %(count)d %(items)s."),
199 error=_("The selected registration(s) could not be rejected."),
200 )
201
202 reject_selected.short_description = _("Reject selected registrations")
203 reject_selected.allowed_permissions = ("review",)
204
205 def accept_selected(self, request, queryset):
206 """Accept the selected entries."""
207 if request.user.has_perm("registrations.review_entries"):
208 rows_updated = services.accept_entries(request.user.pk, queryset)
209 _show_message(
210 self,
211 request,
212 rows_updated,
213 message=_("Successfully accepted %(count)d %(items)s."),
214 error=_("The selected registration(s) could not be accepted."),
215 )
216
217 accept_selected.short_description = _("Accept selected registrations")
218 accept_selected.allowed_permissions = ("review",)
219
220 def has_review_permission(self, request):
221 """Check if the user has the review permission."""
222 return request.user.has_perm("registrations.review_entries")
223
224 def save_model(self, request, obj, form, change):
225 if not (
226 obj.status == Entry.STATUS_REJECTED
227 or obj.status == Entry.STATUS_ACCEPTED
228 or obj.status == Entry.STATUS_COMPLETED
229 ):
230 super().save_model(request, obj, form, change)
231
232
233 @admin.register(Renewal)
234 class RenewalAdmin(RegistrationAdmin):
235 """Manage the renewals."""
236
237 list_display = (
238 "name",
239 "email",
240 "status",
241 "membership_type",
242 "contribution",
243 "created_at",
244 "payment",
245 "no_references",
246 "reference_count",
247 )
248 list_filter = (
249 "status",
250 "membership_type",
251 "no_references",
252 "payment__type",
253 "contribution",
254 )
255 search_fields = (
256 "member__first_name",
257 "member__last_name",
258 "member__email",
259 "member__profile__phone_number",
260 "member__profile__student_number",
261 )
262 date_hierarchy = "created_at"
263 fieldsets = (
264 (
265 _("Application information"),
266 {
267 "fields": (
268 "created_at",
269 "updated_at",
270 "length",
271 "contribution",
272 "membership_type",
273 "status",
274 "payment",
275 "remarks",
276 "member",
277 )
278 },
279 ),
280 )
281 actions = RegistrationAdmin.actions
282
283 def get_readonly_fields(self, request, obj=None):
284 """Make all fields read-only and add member if needed."""
285 fields = super().get_readonly_fields(request, obj)
286 if obj is None or not (
287 obj.status == Entry.STATUS_REJECTED
288 or obj.status == Entry.STATUS_ACCEPTED
289 or obj.status == Entry.STATUS_COMPLETED
290 ):
291 fields.remove("contribution")
292 if "member" not in fields and obj is not None:
293 return fields + ["member"]
294 return fields
295
296 @staticmethod
297 def name(obj):
298 return obj.member.get_full_name()
299
300 name.short_description = _("name")
301
302 @staticmethod
303 def email(obj):
304 return obj.member.email
305
[end of website/registrations/admin.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/website/registrations/admin.py b/website/registrations/admin.py
--- a/website/registrations/admin.py
+++ b/website/registrations/admin.py
@@ -221,6 +221,14 @@
"""Check if the user has the review permission."""
return request.user.has_perm("registrations.review_entries")
+ def has_change_permission(self, request, obj=None):
+ """Completed registrations are read-only."""
+ return (
+ False
+ if obj and obj.status == Entry.STATUS_COMPLETED
+ else super().has_change_permission(request, obj)
+ )
+
def save_model(self, request, obj, form, change):
if not (
obj.status == Entry.STATUS_REJECTED
|
{"golden_diff": "diff --git a/website/registrations/admin.py b/website/registrations/admin.py\n--- a/website/registrations/admin.py\n+++ b/website/registrations/admin.py\n@@ -221,6 +221,14 @@\n \"\"\"Check if the user has the review permission.\"\"\"\n return request.user.has_perm(\"registrations.review_entries\")\n \n+ def has_change_permission(self, request, obj=None):\n+ \"\"\"Completed registrations are read-only.\"\"\"\n+ return (\n+ False\n+ if obj and obj.status == Entry.STATUS_COMPLETED\n+ else super().has_change_permission(request, obj)\n+ )\n+\n def save_model(self, request, obj, form, change):\n if not (\n obj.status == Entry.STATUS_REJECTED\n", "issue": "ValidationError when saving completed membership registration\nIn GitLab by _thaliatechnicie on Jun 5, 2019, 13:13\n\n\n### One-sentence description\n\n<!-- What breaks -->\n\n### Current behaviour / Reproducing the bug\n\nClick save while observing a completed registration.\n\nSentry Issue: [CONCREXIT-4](https://sentry.io/organizations/thalia/issues/1057927495/?referrer=gitlab_integration)\n\n```\nValueError: 'RegistrationForm' has no field named 'email'.\n(14 additional frame(s) were not displayed)\n...\n File \"django/forms/forms.py\", line 180, in errors\n self.full_clean()\n File \"django/forms/forms.py\", line 383, in full_clean\n self._post_clean()\n File \"django/forms/models.py\", line 405, in _post_clean\n self._update_errors(e)\n File \"django/forms/models.py\", line 379, in _update_errors\n self.add_error(None, errors)\n File \"django/forms/forms.py\", line 353, in add_error\n \"'%s' has no field named '%s'.\" % (self.__class__.__name__, field))\n\nValidationError: {'email': ['A user with that email address already exists. Login using the existing account and renew the membership by visiting the account settings.'], 'student_number': ['A user with that student number already exists. Login using the existing account and renew the membership by visiting the account settings.'], 'username': ['A user with that username already exists.']}\n File \"django/forms/models.py\", line 403, in _post_clean\n self.instance.full_clean(exclude=exclude, validate_unique=False)\n File \"django/db/models/base.py\", line 1152, in full_clean\n raise ValidationError(errors)\n```\n\n### Expected behaviour\n\nNo crash, maybe even no save button available.\n\n", "before_files": [{"content": "\"\"\"Registers admin interfaces for the registrations module.\"\"\"\nfrom functools import partial\n\nfrom django.contrib import admin, messages\nfrom django.contrib.admin.utils import model_ngettext\nfrom django.forms import Field\nfrom django.utils.translation import gettext_lazy as _\n\nfrom payments.widgets import PaymentWidget\nfrom . import services\nfrom .forms import RegistrationAdminForm\nfrom .models import Entry, Registration, Renewal, Reference\n\n\nclass ReferenceInline(admin.StackedInline):\n model = Reference\n extra = 0\n\n\ndef _show_message(model_admin, request, n, message, error):\n \"\"\"Show a message in the Django Admin.\"\"\"\n if n == 0:\n model_admin.message_user(request, error, messages.ERROR)\n else:\n model_admin.message_user(\n request,\n message % {\"count\": n, \"items\": model_ngettext(model_admin.opts, n)},\n messages.SUCCESS,\n )\n\n\[email protected](Registration)\nclass RegistrationAdmin(admin.ModelAdmin):\n \"\"\"Manage the registrations.\"\"\"\n\n list_display = (\n \"name\",\n \"email\",\n \"status\",\n \"membership_type\",\n \"contribution\",\n \"created_at\",\n \"payment\",\n \"no_references\",\n \"reference_count\",\n )\n list_filter = (\n \"status\",\n \"programme\",\n \"membership_type\",\n \"no_references\",\n \"payment__type\",\n \"contribution\",\n )\n inlines = (ReferenceInline,)\n search_fields = (\n \"first_name\",\n \"last_name\",\n \"email\",\n \"phone_number\",\n \"student_number\",\n )\n date_hierarchy = \"created_at\"\n fieldsets = (\n (\n _(\"Application information\"),\n {\n \"fields\": (\n \"created_at\",\n \"updated_at\",\n \"username\",\n \"length\",\n \"contribution\",\n \"membership_type\",\n \"status\",\n \"payment\",\n \"remarks\",\n )\n },\n ),\n (\n _(\"Personal information\"),\n {\n \"fields\": (\n \"first_name\",\n \"last_name\",\n \"birthday\",\n \"optin_birthday\",\n \"email\",\n \"optin_mailinglist\",\n \"phone_number\",\n )\n },\n ),\n (\n _(\"Address\"),\n {\n \"fields\": (\n \"address_street\",\n \"address_street2\",\n \"address_postal_code\",\n \"address_city\",\n \"address_country\",\n )\n },\n ),\n (\n _(\"Financial\"),\n {\"fields\": (\"direct_debit\", \"initials\", \"iban\", \"bic\", \"signature\",)},\n ),\n (\n _(\"University information\"),\n {\"fields\": (\"student_number\", \"programme\", \"starting_year\",)},\n ),\n )\n actions = [\"accept_selected\", \"reject_selected\"]\n form = RegistrationAdminForm\n\n def reference_count(self, obj):\n return obj.reference_set.count()\n\n reference_count.short_description = _(\"references\")\n\n def get_form(self, request, obj=None, **kwargs):\n return super().get_form(\n request,\n obj,\n formfield_callback=partial(\n self.formfield_for_dbfield, request=request, obj=obj\n ),\n **kwargs\n )\n\n def formfield_for_dbfield(self, db_field, request, obj=None, **kwargs):\n field = super().formfield_for_dbfield(db_field, request, **kwargs)\n if db_field.name == \"payment\":\n return Field(\n widget=PaymentWidget(obj=obj), initial=field.initial, required=False\n )\n return field\n\n def changeform_view(self, request, object_id=None, form_url=\"\", extra_context=None):\n \"\"\"Render the change formview.\n\n Only allow when the entry has not been processed yet\n \"\"\"\n obj = None\n can_review = False\n can_resend = False\n can_revert = False\n if object_id is not None and request.user.has_perm(\n \"registrations.review_entries\"\n ):\n obj = Entry.objects.get(id=object_id)\n can_review = obj.status == Entry.STATUS_REVIEW\n can_revert = obj.status in [Entry.STATUS_ACCEPTED, Entry.STATUS_REJECTED]\n try:\n can_resend = obj.registration.status == Entry.STATUS_CONFIRM\n except Registration.DoesNotExist:\n pass\n return super().changeform_view(\n request,\n object_id,\n form_url,\n {\n \"entry\": obj,\n \"can_review\": can_review,\n \"can_resend\": can_resend,\n \"can_revert\": can_revert,\n },\n )\n\n def get_readonly_fields(self, request, obj=None):\n if obj is None or not (\n obj.status == Entry.STATUS_REJECTED\n or obj.status == Entry.STATUS_ACCEPTED\n or obj.status == Entry.STATUS_COMPLETED\n ):\n return [\"status\", \"created_at\", \"updated_at\", \"payment\", \"contribution\"]\n return [\n field.name\n for field in self.model._meta.get_fields()\n if field.name not in [\"payment\", \"no_references\"] and field.editable\n ]\n\n @staticmethod\n def name(obj):\n return obj.get_full_name()\n\n def reject_selected(self, request, queryset):\n \"\"\"Reject the selected entries.\"\"\"\n if request.user.has_perm(\"registrations.review_entries\"):\n rows_updated = services.reject_entries(request.user.pk, queryset)\n _show_message(\n self,\n request,\n rows_updated,\n message=_(\"Successfully rejected %(count)d %(items)s.\"),\n error=_(\"The selected registration(s) could not be rejected.\"),\n )\n\n reject_selected.short_description = _(\"Reject selected registrations\")\n reject_selected.allowed_permissions = (\"review\",)\n\n def accept_selected(self, request, queryset):\n \"\"\"Accept the selected entries.\"\"\"\n if request.user.has_perm(\"registrations.review_entries\"):\n rows_updated = services.accept_entries(request.user.pk, queryset)\n _show_message(\n self,\n request,\n rows_updated,\n message=_(\"Successfully accepted %(count)d %(items)s.\"),\n error=_(\"The selected registration(s) could not be accepted.\"),\n )\n\n accept_selected.short_description = _(\"Accept selected registrations\")\n accept_selected.allowed_permissions = (\"review\",)\n\n def has_review_permission(self, request):\n \"\"\"Check if the user has the review permission.\"\"\"\n return request.user.has_perm(\"registrations.review_entries\")\n\n def save_model(self, request, obj, form, change):\n if not (\n obj.status == Entry.STATUS_REJECTED\n or obj.status == Entry.STATUS_ACCEPTED\n or obj.status == Entry.STATUS_COMPLETED\n ):\n super().save_model(request, obj, form, change)\n\n\[email protected](Renewal)\nclass RenewalAdmin(RegistrationAdmin):\n \"\"\"Manage the renewals.\"\"\"\n\n list_display = (\n \"name\",\n \"email\",\n \"status\",\n \"membership_type\",\n \"contribution\",\n \"created_at\",\n \"payment\",\n \"no_references\",\n \"reference_count\",\n )\n list_filter = (\n \"status\",\n \"membership_type\",\n \"no_references\",\n \"payment__type\",\n \"contribution\",\n )\n search_fields = (\n \"member__first_name\",\n \"member__last_name\",\n \"member__email\",\n \"member__profile__phone_number\",\n \"member__profile__student_number\",\n )\n date_hierarchy = \"created_at\"\n fieldsets = (\n (\n _(\"Application information\"),\n {\n \"fields\": (\n \"created_at\",\n \"updated_at\",\n \"length\",\n \"contribution\",\n \"membership_type\",\n \"status\",\n \"payment\",\n \"remarks\",\n \"member\",\n )\n },\n ),\n )\n actions = RegistrationAdmin.actions\n\n def get_readonly_fields(self, request, obj=None):\n \"\"\"Make all fields read-only and add member if needed.\"\"\"\n fields = super().get_readonly_fields(request, obj)\n if obj is None or not (\n obj.status == Entry.STATUS_REJECTED\n or obj.status == Entry.STATUS_ACCEPTED\n or obj.status == Entry.STATUS_COMPLETED\n ):\n fields.remove(\"contribution\")\n if \"member\" not in fields and obj is not None:\n return fields + [\"member\"]\n return fields\n\n @staticmethod\n def name(obj):\n return obj.member.get_full_name()\n\n name.short_description = _(\"name\")\n\n @staticmethod\n def email(obj):\n return obj.member.email\n", "path": "website/registrations/admin.py"}]}
| 3,543 | 164 |
gh_patches_debug_21272
|
rasdani/github-patches
|
git_diff
|
ansible__molecule-2966
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
ansible_args passed to all steps
# Issue Type
- Bug report
# Molecule and Ansible details
```
molecule 3.2.0a0
ansible:2.9.7 python:3.8
delegated:3.2.0a0 from molecule
podman:0.3.0 from molecule_podman
vagrant:0.5 from molecule_vagrant
```
Molecule installation method (one of):
- pip
Ansible installation method (one of):
- OS package
# Desired Behavior
I have a particular use case where I want to test a full playbook and it's roles. However, the playbook has to be called with specified tags. I would call the playbook in production like this:
```
ansible-playbook site.yml -t install
```
Therefore, I configured my `molecule.yml` like this:
```
---
dependency:
name: galaxy
driver:
name: vagrant
platforms:
- name: opensuse15
box: generic/opensuse15
- name: debian10
box: generic/debian10
provisioner:
name: ansible
ansible_args:
- --tags=install
playbooks:
converge: ../../site.yml
verifier:
name: ansible
```
I would expect molecule to pass these parameters to the converge step only.
# Actual Behaviour
Molecule passes the tags also to create and destroy, which makes my machines never start/stop.
A good create command looks like this
```
COMMAND: ansible-playbook --diff --inventory /home/t/.cache/molecule/playbook/default/inventory --skip-tags molecule-notest,notest /home/t/.local/lib/python3.8/site-packages/molecule_vagrant/playbooks/create.yml
```
With `ansible_args` it looks like that:
```
COMMAND: ansible-playbook --diff --inventory /home/t/.cache/molecule/playbook/default/inventory --skip-tags molecule-notest,notest --become --tags=install /home/t/.local/lib/python3.8/site-packages/molecule_default/playbooks/create.yml
```
The create playbook doesn't do anything with just this tag.
I have reprodiced this with both vagrant and podman driver.
</issue>
<code>
[start of lib/molecule/provisioner/ansible_playbook.py]
1 # Copyright (c) 2015-2018 Cisco Systems, Inc.
2 #
3 # Permission is hereby granted, free of charge, to any person obtaining a copy
4 # of this software and associated documentation files (the "Software"), to
5 # deal in the Software without restriction, including without limitation the
6 # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7 # sell copies of the Software, and to permit persons to whom the Software is
8 # furnished to do so, subject to the following conditions:
9 #
10 # The above copyright notice and this permission notice shall be included in
11 # all copies or substantial portions of the Software.
12 #
13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18 # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
19 # DEALINGS IN THE SOFTWARE.
20 """Ansible-Playbook Provisioner Module."""
21
22 from molecule import logger, util
23
24 LOG = logger.get_logger(__name__)
25
26
27 class AnsiblePlaybook(object):
28 """Privisioner Playbook."""
29
30 def __init__(self, playbook, config, out=LOG.out, err=LOG.error):
31 """
32 Set up the requirements to execute ``ansible-playbook`` and returns \
33 None.
34
35 :param playbook: A string containing the path to the playbook.
36 :param config: An instance of a Molecule config.
37 :param out: An optional function to process STDOUT for underlying
38 :func:``sh`` call.
39 :param err: An optional function to process STDERR for underlying
40 :func:``sh`` call.
41 :returns: None
42 """
43 self._ansible_command = None
44 self._playbook = playbook
45 self._config = config
46 self._out = out
47 self._err = err
48 self._cli = {}
49 self._env = self._config.provisioner.env
50
51 def bake(self):
52 """
53 Bake an ``ansible-playbook`` command so it's ready to execute and \
54 returns ``None``.
55
56 :return: None
57 """
58 if not self._playbook:
59 return
60
61 # Pass a directory as inventory to let Ansible merge the multiple
62 # inventory sources located under
63 self.add_cli_arg("inventory", self._config.provisioner.inventory_directory)
64 options = util.merge_dicts(self._config.provisioner.options, self._cli)
65 verbose_flag = util.verbose_flag(options)
66 if self._playbook != self._config.provisioner.playbooks.converge:
67 if options.get("become"):
68 del options["become"]
69
70 ansible_args = list(self._config.provisioner.ansible_args) + list(
71 self._config.ansible_args
72 )
73
74 # if ansible_args:
75 # if self._config.action not in ["create", "destroy"]:
76 # # inserts ansible_args at index 1
77 # self._ansible_command.cmd.extend(ansible_args)
78
79 self._ansible_command = util.BakedCommand(
80 cmd=[
81 "ansible-playbook",
82 *util.dict2args(options),
83 *util.bool2args(verbose_flag),
84 *ansible_args,
85 self._playbook, # must always go last
86 ],
87 cwd=self._config.scenario.directory,
88 env=self._env,
89 stdout=self._out,
90 stderr=self._err,
91 )
92
93 def execute(self):
94 """
95 Execute ``ansible-playbook`` and returns a string.
96
97 :return: str
98 """
99 if self._ansible_command is None:
100 self.bake()
101
102 if not self._playbook:
103 LOG.warning("Skipping, %s action has no playbook." % self._config.action)
104 return
105
106 self._config.driver.sanity_checks()
107 result = util.run_command(self._ansible_command, debug=self._config.debug)
108 if result.returncode != 0:
109 util.sysexit_with_message(
110 f"Ansible return code was {result.returncode}, command was: {result.args}",
111 result.returncode,
112 )
113
114 return result.stdout
115
116 def add_cli_arg(self, name, value):
117 """
118 Add argument to CLI passed to ansible-playbook and returns None.
119
120 :param name: A string containing the name of argument to be added.
121 :param value: The value of argument to be added.
122 :return: None
123 """
124 if value:
125 self._cli[name] = value
126
127 def add_env_arg(self, name, value):
128 """
129 Add argument to environment passed to ansible-playbook and returns \
130 None.
131
132 :param name: A string containing the name of argument to be added.
133 :param value: The value of argument to be added.
134 :return: None
135 """
136 self._env[name] = value
137
[end of lib/molecule/provisioner/ansible_playbook.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/lib/molecule/provisioner/ansible_playbook.py b/lib/molecule/provisioner/ansible_playbook.py
--- a/lib/molecule/provisioner/ansible_playbook.py
+++ b/lib/molecule/provisioner/ansible_playbook.py
@@ -67,14 +67,20 @@
if options.get("become"):
del options["become"]
- ansible_args = list(self._config.provisioner.ansible_args) + list(
- self._config.ansible_args
- )
-
- # if ansible_args:
- # if self._config.action not in ["create", "destroy"]:
- # # inserts ansible_args at index 1
- # self._ansible_command.cmd.extend(ansible_args)
+ # We do not pass user-specified Ansible arguments to the create and
+ # destroy invocations because playbooks involved in those two
+ # operations are not always provided by end users. And in those cases,
+ # custom Ansible arguments can break the creation and destruction
+ # processes.
+ #
+ # If users need to modify the creation of deletion, they can supply
+ # custom playbooks and specify them in the scenario configuration.
+ if self._config.action not in ["create", "destroy"]:
+ ansible_args = list(self._config.provisioner.ansible_args) + list(
+ self._config.ansible_args
+ )
+ else:
+ ansible_args = []
self._ansible_command = util.BakedCommand(
cmd=[
|
{"golden_diff": "diff --git a/lib/molecule/provisioner/ansible_playbook.py b/lib/molecule/provisioner/ansible_playbook.py\n--- a/lib/molecule/provisioner/ansible_playbook.py\n+++ b/lib/molecule/provisioner/ansible_playbook.py\n@@ -67,14 +67,20 @@\n if options.get(\"become\"):\n del options[\"become\"]\n \n- ansible_args = list(self._config.provisioner.ansible_args) + list(\n- self._config.ansible_args\n- )\n-\n- # if ansible_args:\n- # if self._config.action not in [\"create\", \"destroy\"]:\n- # # inserts ansible_args at index 1\n- # self._ansible_command.cmd.extend(ansible_args)\n+ # We do not pass user-specified Ansible arguments to the create and\n+ # destroy invocations because playbooks involved in those two\n+ # operations are not always provided by end users. And in those cases,\n+ # custom Ansible arguments can break the creation and destruction\n+ # processes.\n+ #\n+ # If users need to modify the creation of deletion, they can supply\n+ # custom playbooks and specify them in the scenario configuration.\n+ if self._config.action not in [\"create\", \"destroy\"]:\n+ ansible_args = list(self._config.provisioner.ansible_args) + list(\n+ self._config.ansible_args\n+ )\n+ else:\n+ ansible_args = []\n \n self._ansible_command = util.BakedCommand(\n cmd=[\n", "issue": "ansible_args passed to all steps\n# Issue Type\r\n\r\n- Bug report\r\n\r\n# Molecule and Ansible details\r\n\r\n```\r\nmolecule 3.2.0a0\r\n ansible:2.9.7 python:3.8\r\n delegated:3.2.0a0 from molecule\r\n podman:0.3.0 from molecule_podman\r\n vagrant:0.5 from molecule_vagrant\r\n```\r\n\r\nMolecule installation method (one of):\r\n\r\n- pip\r\n\r\nAnsible installation method (one of):\r\n\r\n- OS package\r\n\r\n# Desired Behavior\r\n\r\nI have a particular use case where I want to test a full playbook and it's roles. However, the playbook has to be called with specified tags. I would call the playbook in production like this:\r\n```\r\nansible-playbook site.yml -t install\r\n```\r\nTherefore, I configured my `molecule.yml` like this:\r\n```\r\n---\r\ndependency:\r\n name: galaxy\r\ndriver:\r\n name: vagrant\r\nplatforms:\r\n - name: opensuse15\r\n box: generic/opensuse15\r\n - name: debian10\r\n box: generic/debian10\r\nprovisioner:\r\n name: ansible\r\n ansible_args:\r\n - --tags=install\r\n playbooks:\r\n converge: ../../site.yml\r\nverifier:\r\n name: ansible\r\n```\r\nI would expect molecule to pass these parameters to the converge step only. \r\n\r\n# Actual Behaviour\r\nMolecule passes the tags also to create and destroy, which makes my machines never start/stop.\r\n\r\nA good create command looks like this\r\n```\r\nCOMMAND: ansible-playbook --diff --inventory /home/t/.cache/molecule/playbook/default/inventory --skip-tags molecule-notest,notest /home/t/.local/lib/python3.8/site-packages/molecule_vagrant/playbooks/create.yml\r\n```\r\nWith `ansible_args` it looks like that: \r\n```\r\nCOMMAND: ansible-playbook --diff --inventory /home/t/.cache/molecule/playbook/default/inventory --skip-tags molecule-notest,notest --become --tags=install /home/t/.local/lib/python3.8/site-packages/molecule_default/playbooks/create.yml\r\n```\r\nThe create playbook doesn't do anything with just this tag.\r\n\r\nI have reprodiced this with both vagrant and podman driver.\n", "before_files": [{"content": "# Copyright (c) 2015-2018 Cisco Systems, Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to\n# deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n# sell copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\"\"\"Ansible-Playbook Provisioner Module.\"\"\"\n\nfrom molecule import logger, util\n\nLOG = logger.get_logger(__name__)\n\n\nclass AnsiblePlaybook(object):\n \"\"\"Privisioner Playbook.\"\"\"\n\n def __init__(self, playbook, config, out=LOG.out, err=LOG.error):\n \"\"\"\n Set up the requirements to execute ``ansible-playbook`` and returns \\\n None.\n\n :param playbook: A string containing the path to the playbook.\n :param config: An instance of a Molecule config.\n :param out: An optional function to process STDOUT for underlying\n :func:``sh`` call.\n :param err: An optional function to process STDERR for underlying\n :func:``sh`` call.\n :returns: None\n \"\"\"\n self._ansible_command = None\n self._playbook = playbook\n self._config = config\n self._out = out\n self._err = err\n self._cli = {}\n self._env = self._config.provisioner.env\n\n def bake(self):\n \"\"\"\n Bake an ``ansible-playbook`` command so it's ready to execute and \\\n returns ``None``.\n\n :return: None\n \"\"\"\n if not self._playbook:\n return\n\n # Pass a directory as inventory to let Ansible merge the multiple\n # inventory sources located under\n self.add_cli_arg(\"inventory\", self._config.provisioner.inventory_directory)\n options = util.merge_dicts(self._config.provisioner.options, self._cli)\n verbose_flag = util.verbose_flag(options)\n if self._playbook != self._config.provisioner.playbooks.converge:\n if options.get(\"become\"):\n del options[\"become\"]\n\n ansible_args = list(self._config.provisioner.ansible_args) + list(\n self._config.ansible_args\n )\n\n # if ansible_args:\n # if self._config.action not in [\"create\", \"destroy\"]:\n # # inserts ansible_args at index 1\n # self._ansible_command.cmd.extend(ansible_args)\n\n self._ansible_command = util.BakedCommand(\n cmd=[\n \"ansible-playbook\",\n *util.dict2args(options),\n *util.bool2args(verbose_flag),\n *ansible_args,\n self._playbook, # must always go last\n ],\n cwd=self._config.scenario.directory,\n env=self._env,\n stdout=self._out,\n stderr=self._err,\n )\n\n def execute(self):\n \"\"\"\n Execute ``ansible-playbook`` and returns a string.\n\n :return: str\n \"\"\"\n if self._ansible_command is None:\n self.bake()\n\n if not self._playbook:\n LOG.warning(\"Skipping, %s action has no playbook.\" % self._config.action)\n return\n\n self._config.driver.sanity_checks()\n result = util.run_command(self._ansible_command, debug=self._config.debug)\n if result.returncode != 0:\n util.sysexit_with_message(\n f\"Ansible return code was {result.returncode}, command was: {result.args}\",\n result.returncode,\n )\n\n return result.stdout\n\n def add_cli_arg(self, name, value):\n \"\"\"\n Add argument to CLI passed to ansible-playbook and returns None.\n\n :param name: A string containing the name of argument to be added.\n :param value: The value of argument to be added.\n :return: None\n \"\"\"\n if value:\n self._cli[name] = value\n\n def add_env_arg(self, name, value):\n \"\"\"\n Add argument to environment passed to ansible-playbook and returns \\\n None.\n\n :param name: A string containing the name of argument to be added.\n :param value: The value of argument to be added.\n :return: None\n \"\"\"\n self._env[name] = value\n", "path": "lib/molecule/provisioner/ansible_playbook.py"}]}
| 2,438 | 342 |
gh_patches_debug_6267
|
rasdani/github-patches
|
git_diff
|
encode__starlette-33
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error serving static files larger than 4096 bytes
Static files larger than 4096 bytes do not appear to be served correctly.
Here's a test I just wrote that illustrates the problem: https://github.com/simonw/starlette/commit/e2d6665fa5c32e77a3fe22836b14620a7f5999bb
Running that test gives me the following output:
```
(venv) starlette $ PYTHONPATH=. pytest -k test_large_staticfile
===================================================== test session starts ======================================================
platform darwin -- Python 3.6.5, pytest-3.6.1, py-1.5.3, pluggy-0.6.0
rootdir: /Users/simonw/Dropbox/Development/starlette, inifile:
collected 43 items / 42 deselected
tests/test_staticfiles.py F [100%]
=========================================================== FAILURES ===========================================================
____________________________________________________ test_large_staticfile _____________________________________________________
tmpdir = local('/private/var/folders/jj/fngnv0810tn2lt_kd3911pdc0000gp/T/pytest-of-simonw/pytest-8/test_large_staticfile0')
def test_large_staticfile(tmpdir):
path = os.path.join(tmpdir, "example.txt")
content = "this is a lot of content" * 200
print("content len = ", len(content))
with open(path, "w") as file:
file.write(content)
app = StaticFile(path=path)
client = TestClient(app)
response = client.get("/")
assert response.status_code == 200
> assert len(content) == len(response.text)
E AssertionError: assert 4800 == 4096
E + where 4800 = len('this is a lot of contentthis is a lot of contentthis is a lot of contentthis is a lot of contentthis is a lot of cont...is is a lot of contentthis is a lot of contentthis is a lot of contentthis is a lot of contentthis is a lot of content')
E + and 4096 = len(' contentthis is a lot of contentthis is a lot of contentthis is a lot of contentthis is a lot of contentthis is a lot...ontentthis is a lot of contentthis is a lot of contentthis is a lot of contentthis is a lot of contentthis is a lot of')
E + where ' contentthis is a lot of contentthis is a lot of contentthis is a lot of contentthis is a lot of contentthis is a lot...ontentthis is a lot of contentthis is a lot of contentthis is a lot of contentthis is a lot of contentthis is a lot of' = <Response [200]>.text
tests/test_staticfiles.py:30: AssertionError
```
</issue>
<code>
[start of starlette/response.py]
1 from aiofiles.os import stat as aio_stat
2 from email.utils import formatdate
3 from mimetypes import guess_type
4 from starlette.datastructures import MutableHeaders
5 from starlette.types import Receive, Send
6 import aiofiles
7 import json
8 import hashlib
9 import os
10 import stat
11 import typing
12
13
14 class Response:
15 media_type = None
16 charset = "utf-8"
17
18 def __init__(
19 self,
20 content: typing.Any,
21 status_code: int = 200,
22 headers: dict = None,
23 media_type: str = None,
24 ) -> None:
25 self.body = self.render(content)
26 self.status_code = status_code
27 if media_type is not None:
28 self.media_type = media_type
29 self.init_headers(headers)
30
31 def render(self, content: typing.Any) -> bytes:
32 if isinstance(content, bytes):
33 return content
34 return content.encode(self.charset)
35
36 def init_headers(self, headers):
37 if headers is None:
38 raw_headers = []
39 populate_content_length = True
40 populate_content_type = True
41 else:
42 raw_headers = [
43 (k.lower().encode("latin-1"), v.encode("latin-1"))
44 for k, v in headers.items()
45 ]
46 keys = [h[0] for h in raw_headers]
47 populate_content_length = b"content-length" in keys
48 populate_content_type = b"content-type" in keys
49
50 body = getattr(self, "body", None)
51 if body is not None and populate_content_length:
52 content_length = str(len(body))
53 raw_headers.append((b"content-length", content_length.encode("latin-1")))
54
55 content_type = self.media_type
56 if content_type is not None and populate_content_type:
57 if content_type.startswith("text/"):
58 content_type += "; charset=" + self.charset
59 raw_headers.append((b"content-type", content_type.encode("latin-1")))
60
61 self.raw_headers = raw_headers
62
63 @property
64 def headers(self):
65 if not hasattr(self, "_headers"):
66 self._headers = MutableHeaders(self.raw_headers)
67 return self._headers
68
69 async def __call__(self, receive: Receive, send: Send) -> None:
70 await send(
71 {
72 "type": "http.response.start",
73 "status": self.status_code,
74 "headers": self.raw_headers,
75 }
76 )
77 await send({"type": "http.response.body", "body": self.body})
78
79
80 class HTMLResponse(Response):
81 media_type = "text/html"
82
83
84 class PlainTextResponse(Response):
85 media_type = "text/plain"
86
87
88 class JSONResponse(Response):
89 media_type = "application/json"
90 options = {
91 "ensure_ascii": False,
92 "allow_nan": False,
93 "indent": None,
94 "separators": (",", ":"),
95 } # type: typing.Dict[str, typing.Any]
96
97 def render(self, content: typing.Any) -> bytes:
98 return json.dumps(content, **self.options).encode("utf-8")
99
100
101 class StreamingResponse(Response):
102 def __init__(
103 self,
104 content: typing.Any,
105 status_code: int = 200,
106 headers: dict = None,
107 media_type: str = None,
108 ) -> None:
109 self.body_iterator = content
110 self.status_code = status_code
111 self.media_type = self.media_type if media_type is None else media_type
112 self.init_headers(headers)
113
114 async def __call__(self, receive: Receive, send: Send) -> None:
115 await send(
116 {
117 "type": "http.response.start",
118 "status": self.status_code,
119 "headers": self.raw_headers,
120 }
121 )
122 async for chunk in self.body_iterator:
123 if not isinstance(chunk, bytes):
124 chunk = chunk.encode(self.charset)
125 await send({"type": "http.response.body", "body": chunk, "more_body": True})
126 await send({"type": "http.response.body", "body": b"", "more_body": False})
127
128
129 class FileResponse(Response):
130 chunk_size = 4096
131
132 def __init__(
133 self,
134 path: str,
135 headers: dict = None,
136 media_type: str = None,
137 filename: str = None,
138 stat_result: os.stat_result = None,
139 ) -> None:
140 self.path = path
141 self.status_code = 200
142 self.filename = filename
143 if media_type is None:
144 media_type = guess_type(filename or path)[0] or "text/plain"
145 self.media_type = media_type
146 self.init_headers(headers)
147 if self.filename is not None:
148 content_disposition = 'attachment; filename="{}"'.format(self.filename)
149 self.headers.setdefault("content-disposition", content_disposition)
150 self.stat_result = stat_result
151 if stat_result is not None:
152 self.set_stat_headers(stat_result)
153
154 def set_stat_headers(self, stat_result):
155 content_length = str(stat_result.st_size)
156 last_modified = formatdate(stat_result.st_mtime, usegmt=True)
157 etag_base = str(stat_result.st_mtime) + "-" + str(stat_result.st_size)
158 etag = hashlib.md5(etag_base.encode()).hexdigest()
159 self.headers.setdefault("content-length", content_length)
160 self.headers.setdefault("last-modified", last_modified)
161 self.headers.setdefault("etag", etag)
162
163 async def __call__(self, receive: Receive, send: Send) -> None:
164 if self.stat_result is None:
165 stat_result = await aio_stat(self.path)
166 self.set_stat_headers(stat_result)
167 await send(
168 {
169 "type": "http.response.start",
170 "status": self.status_code,
171 "headers": self.raw_headers,
172 }
173 )
174 async with aiofiles.open(self.path, mode="rb") as file:
175 more_body = True
176 while more_body:
177 chunk = await file.read(self.chunk_size)
178 more_body = len(chunk) == self.chunk_size
179 await send(
180 {"type": "http.response.body", "body": chunk, "more_body": False}
181 )
182
[end of starlette/response.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/starlette/response.py b/starlette/response.py
--- a/starlette/response.py
+++ b/starlette/response.py
@@ -177,5 +177,9 @@
chunk = await file.read(self.chunk_size)
more_body = len(chunk) == self.chunk_size
await send(
- {"type": "http.response.body", "body": chunk, "more_body": False}
+ {
+ "type": "http.response.body",
+ "body": chunk,
+ "more_body": more_body,
+ }
)
|
{"golden_diff": "diff --git a/starlette/response.py b/starlette/response.py\n--- a/starlette/response.py\n+++ b/starlette/response.py\n@@ -177,5 +177,9 @@\n chunk = await file.read(self.chunk_size)\n more_body = len(chunk) == self.chunk_size\n await send(\n- {\"type\": \"http.response.body\", \"body\": chunk, \"more_body\": False}\n+ {\n+ \"type\": \"http.response.body\",\n+ \"body\": chunk,\n+ \"more_body\": more_body,\n+ }\n )\n", "issue": "Error serving static files larger than 4096 bytes\nStatic files larger than 4096 bytes do not appear to be served correctly.\r\n\r\nHere's a test I just wrote that illustrates the problem: https://github.com/simonw/starlette/commit/e2d6665fa5c32e77a3fe22836b14620a7f5999bb\r\n\r\nRunning that test gives me the following output:\r\n\r\n```\r\n(venv) starlette $ PYTHONPATH=. pytest -k test_large_staticfile \r\n===================================================== test session starts ======================================================\r\nplatform darwin -- Python 3.6.5, pytest-3.6.1, py-1.5.3, pluggy-0.6.0\r\nrootdir: /Users/simonw/Dropbox/Development/starlette, inifile:\r\ncollected 43 items / 42 deselected \r\n\r\ntests/test_staticfiles.py F [100%]\r\n\r\n=========================================================== FAILURES ===========================================================\r\n____________________________________________________ test_large_staticfile _____________________________________________________\r\n\r\ntmpdir = local('/private/var/folders/jj/fngnv0810tn2lt_kd3911pdc0000gp/T/pytest-of-simonw/pytest-8/test_large_staticfile0')\r\n\r\n def test_large_staticfile(tmpdir):\r\n path = os.path.join(tmpdir, \"example.txt\")\r\n content = \"this is a lot of content\" * 200\r\n print(\"content len = \", len(content))\r\n with open(path, \"w\") as file:\r\n file.write(content)\r\n \r\n app = StaticFile(path=path)\r\n client = TestClient(app)\r\n response = client.get(\"/\")\r\n assert response.status_code == 200\r\n> assert len(content) == len(response.text)\r\nE AssertionError: assert 4800 == 4096\r\nE + where 4800 = len('this is a lot of contentthis is a lot of contentthis is a lot of contentthis is a lot of contentthis is a lot of cont...is is a lot of contentthis is a lot of contentthis is a lot of contentthis is a lot of contentthis is a lot of content')\r\nE + and 4096 = len(' contentthis is a lot of contentthis is a lot of contentthis is a lot of contentthis is a lot of contentthis is a lot...ontentthis is a lot of contentthis is a lot of contentthis is a lot of contentthis is a lot of contentthis is a lot of')\r\nE + where ' contentthis is a lot of contentthis is a lot of contentthis is a lot of contentthis is a lot of contentthis is a lot...ontentthis is a lot of contentthis is a lot of contentthis is a lot of contentthis is a lot of contentthis is a lot of' = <Response [200]>.text\r\n\r\ntests/test_staticfiles.py:30: AssertionError\r\n```\n", "before_files": [{"content": "from aiofiles.os import stat as aio_stat\nfrom email.utils import formatdate\nfrom mimetypes import guess_type\nfrom starlette.datastructures import MutableHeaders\nfrom starlette.types import Receive, Send\nimport aiofiles\nimport json\nimport hashlib\nimport os\nimport stat\nimport typing\n\n\nclass Response:\n media_type = None\n charset = \"utf-8\"\n\n def __init__(\n self,\n content: typing.Any,\n status_code: int = 200,\n headers: dict = None,\n media_type: str = None,\n ) -> None:\n self.body = self.render(content)\n self.status_code = status_code\n if media_type is not None:\n self.media_type = media_type\n self.init_headers(headers)\n\n def render(self, content: typing.Any) -> bytes:\n if isinstance(content, bytes):\n return content\n return content.encode(self.charset)\n\n def init_headers(self, headers):\n if headers is None:\n raw_headers = []\n populate_content_length = True\n populate_content_type = True\n else:\n raw_headers = [\n (k.lower().encode(\"latin-1\"), v.encode(\"latin-1\"))\n for k, v in headers.items()\n ]\n keys = [h[0] for h in raw_headers]\n populate_content_length = b\"content-length\" in keys\n populate_content_type = b\"content-type\" in keys\n\n body = getattr(self, \"body\", None)\n if body is not None and populate_content_length:\n content_length = str(len(body))\n raw_headers.append((b\"content-length\", content_length.encode(\"latin-1\")))\n\n content_type = self.media_type\n if content_type is not None and populate_content_type:\n if content_type.startswith(\"text/\"):\n content_type += \"; charset=\" + self.charset\n raw_headers.append((b\"content-type\", content_type.encode(\"latin-1\")))\n\n self.raw_headers = raw_headers\n\n @property\n def headers(self):\n if not hasattr(self, \"_headers\"):\n self._headers = MutableHeaders(self.raw_headers)\n return self._headers\n\n async def __call__(self, receive: Receive, send: Send) -> None:\n await send(\n {\n \"type\": \"http.response.start\",\n \"status\": self.status_code,\n \"headers\": self.raw_headers,\n }\n )\n await send({\"type\": \"http.response.body\", \"body\": self.body})\n\n\nclass HTMLResponse(Response):\n media_type = \"text/html\"\n\n\nclass PlainTextResponse(Response):\n media_type = \"text/plain\"\n\n\nclass JSONResponse(Response):\n media_type = \"application/json\"\n options = {\n \"ensure_ascii\": False,\n \"allow_nan\": False,\n \"indent\": None,\n \"separators\": (\",\", \":\"),\n } # type: typing.Dict[str, typing.Any]\n\n def render(self, content: typing.Any) -> bytes:\n return json.dumps(content, **self.options).encode(\"utf-8\")\n\n\nclass StreamingResponse(Response):\n def __init__(\n self,\n content: typing.Any,\n status_code: int = 200,\n headers: dict = None,\n media_type: str = None,\n ) -> None:\n self.body_iterator = content\n self.status_code = status_code\n self.media_type = self.media_type if media_type is None else media_type\n self.init_headers(headers)\n\n async def __call__(self, receive: Receive, send: Send) -> None:\n await send(\n {\n \"type\": \"http.response.start\",\n \"status\": self.status_code,\n \"headers\": self.raw_headers,\n }\n )\n async for chunk in self.body_iterator:\n if not isinstance(chunk, bytes):\n chunk = chunk.encode(self.charset)\n await send({\"type\": \"http.response.body\", \"body\": chunk, \"more_body\": True})\n await send({\"type\": \"http.response.body\", \"body\": b\"\", \"more_body\": False})\n\n\nclass FileResponse(Response):\n chunk_size = 4096\n\n def __init__(\n self,\n path: str,\n headers: dict = None,\n media_type: str = None,\n filename: str = None,\n stat_result: os.stat_result = None,\n ) -> None:\n self.path = path\n self.status_code = 200\n self.filename = filename\n if media_type is None:\n media_type = guess_type(filename or path)[0] or \"text/plain\"\n self.media_type = media_type\n self.init_headers(headers)\n if self.filename is not None:\n content_disposition = 'attachment; filename=\"{}\"'.format(self.filename)\n self.headers.setdefault(\"content-disposition\", content_disposition)\n self.stat_result = stat_result\n if stat_result is not None:\n self.set_stat_headers(stat_result)\n\n def set_stat_headers(self, stat_result):\n content_length = str(stat_result.st_size)\n last_modified = formatdate(stat_result.st_mtime, usegmt=True)\n etag_base = str(stat_result.st_mtime) + \"-\" + str(stat_result.st_size)\n etag = hashlib.md5(etag_base.encode()).hexdigest()\n self.headers.setdefault(\"content-length\", content_length)\n self.headers.setdefault(\"last-modified\", last_modified)\n self.headers.setdefault(\"etag\", etag)\n\n async def __call__(self, receive: Receive, send: Send) -> None:\n if self.stat_result is None:\n stat_result = await aio_stat(self.path)\n self.set_stat_headers(stat_result)\n await send(\n {\n \"type\": \"http.response.start\",\n \"status\": self.status_code,\n \"headers\": self.raw_headers,\n }\n )\n async with aiofiles.open(self.path, mode=\"rb\") as file:\n more_body = True\n while more_body:\n chunk = await file.read(self.chunk_size)\n more_body = len(chunk) == self.chunk_size\n await send(\n {\"type\": \"http.response.body\", \"body\": chunk, \"more_body\": False}\n )\n", "path": "starlette/response.py"}]}
| 2,944 | 123 |
gh_patches_debug_34546
|
rasdani/github-patches
|
git_diff
|
lutris__lutris-2830
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Disable desktop effects does not work on Gentoo due to expanded $DESKTOP_SESSION variable
**Describe the bug**
[Disable desktop effects](https://github.com/lutris/lutris/blob/4efadf948c6af7611c44855444e56d3852d5a1c3/lutris/util/display.py#L131) uses $DESKTOP_SESSION to figure out which environment is running. On Gentoo with SDDM and Plasma 5, the variable expands to `/usr/share/xsessions/plasma` rather than just `plasma`, preventing the compositor disable command from working.
**Expected behavior**
KWin on Gentoo stops compositing once a game is launched with "Disable desktop effects" enabled
**Current behavior**
Compositing remains enabled
**Steps to reproduce**
- Emerge `lutris` from official gentoo repo
- Log into a KDE Plasma 5 session
- Launch Lutris
- Enable "Disable desktop effects"
**Lutris debugging output**
The terminal output of `lutris -d`.
```
INFO 2020-05-02 17:01:13,531 [application.do_command_line:245]:Running Lutris 0.5.6
INFO 2020-05-02 17:01:13,531 [startup.check_driver:62]:Using X.Org
INFO 2020-05-02 17:01:13,531 [startup.check_driver:67]:Running Mesa driver 20.0.6 on Radeon RX 560 Series (POLARIS11, DRM 3.36.0, 5.6.6-gentoo, LLVM 10.0.0) (0x67ff)
INFO 2020-05-02 17:01:13,531 [startup.check_driver:79]:GPU: 1002:67FF 1458:230A using amdgpu drivers
INFO 2020-05-02 17:01:13,546 [startup.check_vulkan:132]:Vulkan is supported
INFO 2020-05-02 17:01:13,547 [dxvk.get_dxvk_versions:22]:Updating DXVK versions
DEBUG 2020-05-02 17:01:13,772 [http.get:65]:GET https://lutris.net/api/runtime
INFO 2020-05-02 17:01:13,773 [lutriswindow.toggle_connection:642]:Connected to lutris.net as technohacker
DEBUG 2020-05-02 17:01:13,774 [http.get:65]:GET https://lutris.net/api/games/library/technohacker
DEBUG 2020-05-02 17:01:13,778 [store.get_missing_media:209]:Requesting missing icons from API for 15 games
DEBUG 2020-05-02 17:01:13,778 [http.get:65]:GET https://lutris.net/api/games
DEBUG 2020-05-02 17:01:15,691 [api.get_game_api_page:132]:Loaded 6 games from page 1
```
</issue>
<code>
[start of lutris/util/display.py]
1 """Module to deal with various aspects of displays"""
2 # Standard Library
3 import os
4 import subprocess
5
6 # Third Party Libraries
7 from dbus.exceptions import DBusException
8 from gi.repository import Gdk, GLib, GnomeDesktop
9
10 # Lutris Modules
11 from lutris.util import system
12 from lutris.util.graphics.displayconfig import MutterDisplayManager
13 from lutris.util.graphics.xrandr import LegacyDisplayManager, change_resolution, get_outputs
14 from lutris.util.log import logger
15
16
17 class NoScreenDetected(Exception):
18
19 """Raise this when unable to detect screens"""
20
21
22 def restore_gamma():
23 """Restores gamma to a normal level."""
24 xgamma_path = system.find_executable("xgamma")
25 try:
26 subprocess.Popen([xgamma_path, "-gamma", "1.0"])
27 except (FileNotFoundError, TypeError):
28 logger.warning("xgamma is not available on your system")
29 except PermissionError:
30 logger.warning("you do not have permission to call xgamma")
31
32
33 def _get_graphics_adapters():
34 """Return the list of graphics cards available on a system
35
36 Returns:
37 list: list of tuples containing PCI ID and description of the display controller
38 """
39 lspci_path = system.find_executable("lspci")
40 dev_subclasses = ["VGA", "XGA", "3D controller", "Display controller"]
41 if not lspci_path:
42 logger.warning("lspci is not available. List of graphics cards not available")
43 return []
44 return [
45 (pci_id, device_desc.split(": ")[1]) for pci_id, device_desc in [
46 line.split(maxsplit=1) for line in system.execute(lspci_path).split("\n")
47 if any(subclass in line for subclass in dev_subclasses)
48 ]
49 ]
50
51
52 class DisplayManager:
53
54 """Get display and resolution using GnomeDesktop"""
55
56 def __init__(self):
57 screen = Gdk.Screen.get_default()
58 if not screen:
59 raise NoScreenDetected
60 self.rr_screen = GnomeDesktop.RRScreen.new(screen)
61 self.rr_config = GnomeDesktop.RRConfig.new_current(self.rr_screen)
62 self.rr_config.load_current()
63
64 def get_display_names(self):
65 """Return names of connected displays"""
66 return [output_info.get_display_name() for output_info in self.rr_config.get_outputs()]
67
68 def get_resolutions(self):
69 """Return available resolutions"""
70 resolutions = ["%sx%s" % (mode.get_width(), mode.get_height()) for mode in self.rr_screen.list_modes()]
71 return sorted(set(resolutions), key=lambda x: int(x.split("x")[0]), reverse=True)
72
73 def _get_primary_output(self):
74 """Return the RROutput used as a primary display"""
75 for output in self.rr_screen.list_outputs():
76 if output.get_is_primary():
77 return output
78 return
79
80 def get_current_resolution(self):
81 """Return the current resolution for the primary display"""
82 output = self._get_primary_output()
83 if not output:
84 logger.error("Failed to get a default output")
85 return "", ""
86 current_mode = output.get_current_mode()
87 return str(current_mode.get_width()), str(current_mode.get_height())
88
89 @staticmethod
90 def set_resolution(resolution):
91 """Set the resolution of one or more displays.
92 The resolution can either be a string, which will be applied to the
93 primary display or a list of configurations as returned by `get_config`.
94 This method uses XrandR and will not work on Wayland.
95 """
96 return change_resolution(resolution)
97
98 @staticmethod
99 def get_config():
100 """Return the current display resolution
101 This method uses XrandR and will not work on wayland
102 The output can be fed in `set_resolution`
103 """
104 return get_outputs()
105
106
107 def get_display_manager():
108 """Return the appropriate display manager instance.
109 Defaults to Mutter if available. This is the only one to support Wayland.
110 """
111 try:
112 return MutterDisplayManager()
113 except DBusException as ex:
114 logger.debug("Mutter DBus service not reachable: %s", ex)
115 except Exception as ex: # pylint: disable=broad-except
116 logger.exception("Failed to instanciate MutterDisplayConfig. Please report with exception: %s", ex)
117 try:
118 return DisplayManager()
119 except (GLib.Error, NoScreenDetected):
120 return LegacyDisplayManager()
121
122
123 DISPLAY_MANAGER = get_display_manager()
124 USE_DRI_PRIME = len(_get_graphics_adapters()) > 1
125
126
127 def get_compositor_commands():
128 """Nominated for the worst function in lutris"""
129 start_compositor = None
130 stop_compositor = None
131 desktop_session = os.environ.get("DESKTOP_SESSION")
132 if desktop_session == "plasma":
133 stop_compositor = ("qdbus org.kde.KWin /Compositor org.kde.kwin.Compositing.suspend")
134 start_compositor = ("qdbus org.kde.KWin /Compositor org.kde.kwin.Compositing.resume")
135 elif (
136 desktop_session == "mate"
137 and system.execute("gsettings get org.mate.Marco.general compositing-manager", shell=True) == "true"
138 ):
139 stop_compositor = ("gsettings set org.mate.Marco.general compositing-manager false")
140 start_compositor = ("gsettings set org.mate.Marco.general compositing-manager true")
141 elif (
142 desktop_session == "xfce" and system.execute(
143 "xfconf-query --channel=xfwm4 --property=/general/use_compositing",
144 shell=True,
145 ) == "true"
146 ):
147 stop_compositor = ("xfconf-query --channel=xfwm4 --property=/general/use_compositing --set=false")
148 start_compositor = ("xfconf-query --channel=xfwm4 --property=/general/use_compositing --set=true")
149 elif (
150 desktop_session == "deepin" and system.execute(
151 "dbus-send --session --dest=com.deepin.WMSwitcher --type=method_call "
152 "--print-reply=literal /com/deepin/WMSwitcher com.deepin.WMSwitcher.CurrentWM",
153 shell=True,
154 ) == "deepin wm"
155 ):
156 start_compositor, stop_compositor = (
157 "dbus-send --session --dest=com.deepin.WMSwitcher --type=method_call "
158 "/com/deepin/WMSwitcher com.deepin.WMSwitcher.RequestSwitchWM",
159 ) * 2
160 return start_compositor, stop_compositor
161
[end of lutris/util/display.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/lutris/util/display.py b/lutris/util/display.py
--- a/lutris/util/display.py
+++ b/lutris/util/display.py
@@ -129,17 +129,17 @@
start_compositor = None
stop_compositor = None
desktop_session = os.environ.get("DESKTOP_SESSION")
- if desktop_session == "plasma":
+ if desktop_session.endswith("plasma"):
stop_compositor = ("qdbus org.kde.KWin /Compositor org.kde.kwin.Compositing.suspend")
start_compositor = ("qdbus org.kde.KWin /Compositor org.kde.kwin.Compositing.resume")
elif (
- desktop_session == "mate"
+ desktop_session.endswith("mate")
and system.execute("gsettings get org.mate.Marco.general compositing-manager", shell=True) == "true"
):
stop_compositor = ("gsettings set org.mate.Marco.general compositing-manager false")
start_compositor = ("gsettings set org.mate.Marco.general compositing-manager true")
elif (
- desktop_session == "xfce" and system.execute(
+ desktop_session.endswith("xfce") and system.execute(
"xfconf-query --channel=xfwm4 --property=/general/use_compositing",
shell=True,
) == "true"
@@ -147,7 +147,7 @@
stop_compositor = ("xfconf-query --channel=xfwm4 --property=/general/use_compositing --set=false")
start_compositor = ("xfconf-query --channel=xfwm4 --property=/general/use_compositing --set=true")
elif (
- desktop_session == "deepin" and system.execute(
+ desktop_session.endswith("deepin") and system.execute(
"dbus-send --session --dest=com.deepin.WMSwitcher --type=method_call "
"--print-reply=literal /com/deepin/WMSwitcher com.deepin.WMSwitcher.CurrentWM",
shell=True,
|
{"golden_diff": "diff --git a/lutris/util/display.py b/lutris/util/display.py\n--- a/lutris/util/display.py\n+++ b/lutris/util/display.py\n@@ -129,17 +129,17 @@\n start_compositor = None\n stop_compositor = None\n desktop_session = os.environ.get(\"DESKTOP_SESSION\")\n- if desktop_session == \"plasma\":\n+ if desktop_session.endswith(\"plasma\"):\n stop_compositor = (\"qdbus org.kde.KWin /Compositor org.kde.kwin.Compositing.suspend\")\n start_compositor = (\"qdbus org.kde.KWin /Compositor org.kde.kwin.Compositing.resume\")\n elif (\n- desktop_session == \"mate\"\n+ desktop_session.endswith(\"mate\")\n and system.execute(\"gsettings get org.mate.Marco.general compositing-manager\", shell=True) == \"true\"\n ):\n stop_compositor = (\"gsettings set org.mate.Marco.general compositing-manager false\")\n start_compositor = (\"gsettings set org.mate.Marco.general compositing-manager true\")\n elif (\n- desktop_session == \"xfce\" and system.execute(\n+ desktop_session.endswith(\"xfce\") and system.execute(\n \"xfconf-query --channel=xfwm4 --property=/general/use_compositing\",\n shell=True,\n ) == \"true\"\n@@ -147,7 +147,7 @@\n stop_compositor = (\"xfconf-query --channel=xfwm4 --property=/general/use_compositing --set=false\")\n start_compositor = (\"xfconf-query --channel=xfwm4 --property=/general/use_compositing --set=true\")\n elif (\n- desktop_session == \"deepin\" and system.execute(\n+ desktop_session.endswith(\"deepin\") and system.execute(\n \"dbus-send --session --dest=com.deepin.WMSwitcher --type=method_call \"\n \"--print-reply=literal /com/deepin/WMSwitcher com.deepin.WMSwitcher.CurrentWM\",\n shell=True,\n", "issue": "Disable desktop effects does not work on Gentoo due to expanded $DESKTOP_SESSION variable\n**Describe the bug**\r\n\r\n[Disable desktop effects](https://github.com/lutris/lutris/blob/4efadf948c6af7611c44855444e56d3852d5a1c3/lutris/util/display.py#L131) uses $DESKTOP_SESSION to figure out which environment is running. On Gentoo with SDDM and Plasma 5, the variable expands to `/usr/share/xsessions/plasma` rather than just `plasma`, preventing the compositor disable command from working.\r\n\r\n**Expected behavior**\r\n\r\nKWin on Gentoo stops compositing once a game is launched with \"Disable desktop effects\" enabled\r\n\r\n**Current behavior**\r\n\r\nCompositing remains enabled\r\n\r\n**Steps to reproduce**\r\n\r\n- Emerge `lutris` from official gentoo repo\r\n- Log into a KDE Plasma 5 session\r\n- Launch Lutris\r\n- Enable \"Disable desktop effects\"\r\n\r\n**Lutris debugging output**\r\n\r\nThe terminal output of `lutris -d`.\r\n\r\n```\r\nINFO 2020-05-02 17:01:13,531 [application.do_command_line:245]:Running Lutris 0.5.6\r\nINFO 2020-05-02 17:01:13,531 [startup.check_driver:62]:Using X.Org\r\nINFO 2020-05-02 17:01:13,531 [startup.check_driver:67]:Running Mesa driver 20.0.6 on Radeon RX 560 Series (POLARIS11, DRM 3.36.0, 5.6.6-gentoo, LLVM 10.0.0) (0x67ff)\r\nINFO 2020-05-02 17:01:13,531 [startup.check_driver:79]:GPU: 1002:67FF 1458:230A using amdgpu drivers\r\nINFO 2020-05-02 17:01:13,546 [startup.check_vulkan:132]:Vulkan is supported\r\nINFO 2020-05-02 17:01:13,547 [dxvk.get_dxvk_versions:22]:Updating DXVK versions\r\nDEBUG 2020-05-02 17:01:13,772 [http.get:65]:GET https://lutris.net/api/runtime\r\nINFO 2020-05-02 17:01:13,773 [lutriswindow.toggle_connection:642]:Connected to lutris.net as technohacker\r\nDEBUG 2020-05-02 17:01:13,774 [http.get:65]:GET https://lutris.net/api/games/library/technohacker\r\nDEBUG 2020-05-02 17:01:13,778 [store.get_missing_media:209]:Requesting missing icons from API for 15 games\r\nDEBUG 2020-05-02 17:01:13,778 [http.get:65]:GET https://lutris.net/api/games\r\nDEBUG 2020-05-02 17:01:15,691 [api.get_game_api_page:132]:Loaded 6 games from page 1\r\n```\r\n\n", "before_files": [{"content": "\"\"\"Module to deal with various aspects of displays\"\"\"\n# Standard Library\nimport os\nimport subprocess\n\n# Third Party Libraries\nfrom dbus.exceptions import DBusException\nfrom gi.repository import Gdk, GLib, GnomeDesktop\n\n# Lutris Modules\nfrom lutris.util import system\nfrom lutris.util.graphics.displayconfig import MutterDisplayManager\nfrom lutris.util.graphics.xrandr import LegacyDisplayManager, change_resolution, get_outputs\nfrom lutris.util.log import logger\n\n\nclass NoScreenDetected(Exception):\n\n \"\"\"Raise this when unable to detect screens\"\"\"\n\n\ndef restore_gamma():\n \"\"\"Restores gamma to a normal level.\"\"\"\n xgamma_path = system.find_executable(\"xgamma\")\n try:\n subprocess.Popen([xgamma_path, \"-gamma\", \"1.0\"])\n except (FileNotFoundError, TypeError):\n logger.warning(\"xgamma is not available on your system\")\n except PermissionError:\n logger.warning(\"you do not have permission to call xgamma\")\n\n\ndef _get_graphics_adapters():\n \"\"\"Return the list of graphics cards available on a system\n\n Returns:\n list: list of tuples containing PCI ID and description of the display controller\n \"\"\"\n lspci_path = system.find_executable(\"lspci\")\n dev_subclasses = [\"VGA\", \"XGA\", \"3D controller\", \"Display controller\"]\n if not lspci_path:\n logger.warning(\"lspci is not available. List of graphics cards not available\")\n return []\n return [\n (pci_id, device_desc.split(\": \")[1]) for pci_id, device_desc in [\n line.split(maxsplit=1) for line in system.execute(lspci_path).split(\"\\n\")\n if any(subclass in line for subclass in dev_subclasses)\n ]\n ]\n\n\nclass DisplayManager:\n\n \"\"\"Get display and resolution using GnomeDesktop\"\"\"\n\n def __init__(self):\n screen = Gdk.Screen.get_default()\n if not screen:\n raise NoScreenDetected\n self.rr_screen = GnomeDesktop.RRScreen.new(screen)\n self.rr_config = GnomeDesktop.RRConfig.new_current(self.rr_screen)\n self.rr_config.load_current()\n\n def get_display_names(self):\n \"\"\"Return names of connected displays\"\"\"\n return [output_info.get_display_name() for output_info in self.rr_config.get_outputs()]\n\n def get_resolutions(self):\n \"\"\"Return available resolutions\"\"\"\n resolutions = [\"%sx%s\" % (mode.get_width(), mode.get_height()) for mode in self.rr_screen.list_modes()]\n return sorted(set(resolutions), key=lambda x: int(x.split(\"x\")[0]), reverse=True)\n\n def _get_primary_output(self):\n \"\"\"Return the RROutput used as a primary display\"\"\"\n for output in self.rr_screen.list_outputs():\n if output.get_is_primary():\n return output\n return\n\n def get_current_resolution(self):\n \"\"\"Return the current resolution for the primary display\"\"\"\n output = self._get_primary_output()\n if not output:\n logger.error(\"Failed to get a default output\")\n return \"\", \"\"\n current_mode = output.get_current_mode()\n return str(current_mode.get_width()), str(current_mode.get_height())\n\n @staticmethod\n def set_resolution(resolution):\n \"\"\"Set the resolution of one or more displays.\n The resolution can either be a string, which will be applied to the\n primary display or a list of configurations as returned by `get_config`.\n This method uses XrandR and will not work on Wayland.\n \"\"\"\n return change_resolution(resolution)\n\n @staticmethod\n def get_config():\n \"\"\"Return the current display resolution\n This method uses XrandR and will not work on wayland\n The output can be fed in `set_resolution`\n \"\"\"\n return get_outputs()\n\n\ndef get_display_manager():\n \"\"\"Return the appropriate display manager instance.\n Defaults to Mutter if available. This is the only one to support Wayland.\n \"\"\"\n try:\n return MutterDisplayManager()\n except DBusException as ex:\n logger.debug(\"Mutter DBus service not reachable: %s\", ex)\n except Exception as ex: # pylint: disable=broad-except\n logger.exception(\"Failed to instanciate MutterDisplayConfig. Please report with exception: %s\", ex)\n try:\n return DisplayManager()\n except (GLib.Error, NoScreenDetected):\n return LegacyDisplayManager()\n\n\nDISPLAY_MANAGER = get_display_manager()\nUSE_DRI_PRIME = len(_get_graphics_adapters()) > 1\n\n\ndef get_compositor_commands():\n \"\"\"Nominated for the worst function in lutris\"\"\"\n start_compositor = None\n stop_compositor = None\n desktop_session = os.environ.get(\"DESKTOP_SESSION\")\n if desktop_session == \"plasma\":\n stop_compositor = (\"qdbus org.kde.KWin /Compositor org.kde.kwin.Compositing.suspend\")\n start_compositor = (\"qdbus org.kde.KWin /Compositor org.kde.kwin.Compositing.resume\")\n elif (\n desktop_session == \"mate\"\n and system.execute(\"gsettings get org.mate.Marco.general compositing-manager\", shell=True) == \"true\"\n ):\n stop_compositor = (\"gsettings set org.mate.Marco.general compositing-manager false\")\n start_compositor = (\"gsettings set org.mate.Marco.general compositing-manager true\")\n elif (\n desktop_session == \"xfce\" and system.execute(\n \"xfconf-query --channel=xfwm4 --property=/general/use_compositing\",\n shell=True,\n ) == \"true\"\n ):\n stop_compositor = (\"xfconf-query --channel=xfwm4 --property=/general/use_compositing --set=false\")\n start_compositor = (\"xfconf-query --channel=xfwm4 --property=/general/use_compositing --set=true\")\n elif (\n desktop_session == \"deepin\" and system.execute(\n \"dbus-send --session --dest=com.deepin.WMSwitcher --type=method_call \"\n \"--print-reply=literal /com/deepin/WMSwitcher com.deepin.WMSwitcher.CurrentWM\",\n shell=True,\n ) == \"deepin wm\"\n ):\n start_compositor, stop_compositor = (\n \"dbus-send --session --dest=com.deepin.WMSwitcher --type=method_call \"\n \"/com/deepin/WMSwitcher com.deepin.WMSwitcher.RequestSwitchWM\",\n ) * 2\n return start_compositor, stop_compositor\n", "path": "lutris/util/display.py"}]}
| 3,163 | 446 |
gh_patches_debug_4310
|
rasdani/github-patches
|
git_diff
|
mathesar-foundation__mathesar-1366
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Columns re-ordered after type change
## Reproduce
1. "New Table" > "Import Data" > "Copy and Paste Text"
1. Paste the following data and proceed to create and view the table (by clicking "Continue" and then "Finish Import").
```
id,a,b
1,100,200
2,101,201
```
1. Click on the column header for column `a` and change the type from "Number" to "Text", clicking "Save".
1. Expect columns to be ordered `id`, `a`, `b`.
1. Once I did this and observed the columns to be ordered `a`, `id`, `b`. Another time. I observed the columns to be ordered `id`, `b`, `a`. I'm not sure what affects the ordering of the columns, but the order seems to remain consistent (even if incorrect) after the type-change operation.
## Implementation
The backend should make sure that the column list in the API should always be ordered by suitable `index`.
## Additional details
- The front end displays the columns in the order it receives them from the columns API. In this case the API is returning the columns out-of-order, which is why I've marked this as a backend bug.
- A while back, we had a [discussion about whether the user should be able to re-order columns](https://github.com/centerofci/mathesar/discussions/803) in which we decided _not_ to support re-ordering columns, at least for now.
</issue>
<code>
[start of mathesar/api/db/viewsets/columns.py]
1 import warnings
2 from psycopg2.errors import DuplicateColumn
3 from rest_framework import status, viewsets
4 from rest_framework.exceptions import NotFound
5 from rest_framework.response import Response
6 from sqlalchemy.exc import ProgrammingError
7
8 from mathesar.api.exceptions.database_exceptions import (
9 exceptions as database_api_exceptions,
10 base_exceptions as database_base_api_exceptions,
11 )
12 from mathesar.api.exceptions.generic_exceptions import base_exceptions as base_api_exceptions
13 from db.columns.exceptions import (
14 DynamicDefaultWarning, InvalidDefaultError, InvalidTypeOptionError, InvalidTypeError,
15 )
16 from db.columns.operations.select import get_column_attnum_from_name
17 from db.types.exceptions import InvalidTypeParameters
18 from db.records.exceptions import UndefinedFunction
19 from mathesar.api.pagination import DefaultLimitOffsetPagination
20 from mathesar.api.serializers.columns import ColumnSerializer
21 from mathesar.api.utils import get_table_or_404
22 from mathesar.models import Column
23
24
25 class ColumnViewSet(viewsets.ModelViewSet):
26 serializer_class = ColumnSerializer
27 pagination_class = DefaultLimitOffsetPagination
28
29 def get_queryset(self):
30 return Column.objects.filter(table=self.kwargs['table_pk'])
31
32 def create(self, request, table_pk=None):
33 table = get_table_or_404(table_pk)
34 # We only support adding a single column through the API.
35 serializer = ColumnSerializer(data=request.data, context={'request': request})
36 serializer.is_valid(raise_exception=True)
37 type_options = request.data.get('type_options', None)
38 if type_options is not None:
39 scale = type_options.get('scale', None)
40 precision = type_options.get('precision', None)
41 if scale is not None and precision is None:
42 request.data['type_options']['precision'] = 1000
43 if 'source_column' in serializer.validated_data:
44 column = table.duplicate_column(
45 serializer.validated_data['source_column'],
46 serializer.validated_data['copy_source_data'],
47 serializer.validated_data['copy_source_constraints'],
48 serializer.validated_data.get('name'),
49 )
50 else:
51 try:
52 # TODO Refactor add_column to user serializer validated date instead of request data
53 column = table.add_column(request.data)
54 except ProgrammingError as e:
55 if type(e.orig) == DuplicateColumn:
56 name = request.data['name']
57 raise database_api_exceptions.DuplicateTableAPIException(
58 e,
59 message=f'Column {name} already exists',
60 field='name',
61 status_code=status.HTTP_400_BAD_REQUEST
62 )
63 else:
64 raise database_base_api_exceptions.ProgrammingAPIException(e)
65 except TypeError as e:
66 raise base_api_exceptions.TypeErrorAPIException(
67 e,
68 message="Unknown type_option passed",
69 status_code=status.HTTP_400_BAD_REQUEST
70 )
71 except InvalidDefaultError as e:
72 raise database_api_exceptions.InvalidDefaultAPIException(
73 e,
74 message=f'default "{request.data["default"]}" is invalid for type {request.data["type"]}',
75 status_code=status.HTTP_400_BAD_REQUEST
76 )
77 except (InvalidTypeOptionError, InvalidTypeParameters) as e:
78 type_options = request.data.get('type_options', '')
79 raise database_api_exceptions.InvalidTypeOptionAPIException(
80 e,
81 message=f'parameter dict {type_options} is invalid for type {request.data["type"]}',
82 field="type_options",
83 status_code=status.HTTP_400_BAD_REQUEST
84 )
85 except InvalidTypeError as e:
86 raise database_api_exceptions.InvalidTypeCastAPIException(
87 e,
88 message='This type casting is invalid.',
89 status_code=status.HTTP_400_BAD_REQUEST
90 )
91 dj_column = Column(
92 table=table,
93 attnum=get_column_attnum_from_name(table.oid, column.name, table.schema._sa_engine),
94 **serializer.validated_model_fields
95 )
96 dj_column.save()
97 out_serializer = ColumnSerializer(dj_column)
98 return Response(out_serializer.data, status=status.HTTP_201_CREATED)
99
100 def partial_update(self, request, pk=None, table_pk=None):
101 column_instance = self.get_object()
102 table = column_instance.table
103 serializer = ColumnSerializer(instance=column_instance, data=request.data, partial=True)
104 serializer.is_valid(raise_exception=True)
105 with warnings.catch_warnings():
106 warnings.filterwarnings("error", category=DynamicDefaultWarning)
107 try:
108 table.alter_column(column_instance._sa_column.column_attnum, serializer.validated_data)
109 except UndefinedFunction as e:
110 raise database_api_exceptions.UndefinedFunctionAPIException(
111 e,
112 message='This type cast is not implemented',
113 status_code=status.HTTP_400_BAD_REQUEST
114 )
115 except ProgrammingError as e:
116 raise database_base_api_exceptions.ProgrammingAPIException(
117 e,
118 status_code=status.HTTP_400_BAD_REQUEST
119 )
120 except IndexError as e:
121 raise base_api_exceptions.NotFoundAPIException(e)
122 except TypeError as e:
123 raise database_api_exceptions.InvalidTypeOptionAPIException(
124 e,
125 message="Unknown type_option passed",
126 status_code=status.HTTP_400_BAD_REQUEST
127 )
128 except InvalidDefaultError as e:
129 raise database_api_exceptions.InvalidDefaultAPIException(
130 e,
131 message=f'default "{request.data["default"]}" is invalid for this column',
132 status_code=status.HTTP_400_BAD_REQUEST
133 )
134 except DynamicDefaultWarning as e:
135 raise database_api_exceptions.DynamicDefaultAPIException(
136 e,
137 message='Changing type of columns with dynamically-generated defaults is not supported.'
138 'Delete or change the default first.',
139 status_code=status.HTTP_400_BAD_REQUEST
140 )
141 except (InvalidTypeOptionError, InvalidTypeParameters) as e:
142 type_options = request.data.get('type_options', '')
143 raise database_api_exceptions.InvalidTypeOptionAPIException(
144 e,
145 message=f'parameter dict {type_options} is invalid for type {request.data["type"]}',
146 status_code=status.HTTP_400_BAD_REQUEST
147 )
148 except InvalidTypeError as e:
149 raise database_api_exceptions.InvalidTypeCastAPIException(
150 e,
151 message='This type casting is invalid.',
152 status_code=status.HTTP_400_BAD_REQUEST
153 )
154 except Exception as e:
155 raise base_api_exceptions.MathesarAPIException(e)
156
157 serializer.update(column_instance, serializer.validated_model_fields)
158 # Invalidate the cache as the underlying columns have changed
159 column_instance = self.get_object()
160 out_serializer = ColumnSerializer(column_instance)
161 return Response(out_serializer.data)
162
163 def destroy(self, request, pk=None, table_pk=None):
164 column_instance = self.get_object()
165 table = column_instance.table
166 try:
167 table.drop_column(column_instance.attnum)
168 column_instance.delete()
169 except IndexError:
170 raise NotFound
171 return Response(status=status.HTTP_204_NO_CONTENT)
172
[end of mathesar/api/db/viewsets/columns.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/mathesar/api/db/viewsets/columns.py b/mathesar/api/db/viewsets/columns.py
--- a/mathesar/api/db/viewsets/columns.py
+++ b/mathesar/api/db/viewsets/columns.py
@@ -27,7 +27,7 @@
pagination_class = DefaultLimitOffsetPagination
def get_queryset(self):
- return Column.objects.filter(table=self.kwargs['table_pk'])
+ return Column.objects.filter(table=self.kwargs['table_pk']).order_by('attnum')
def create(self, request, table_pk=None):
table = get_table_or_404(table_pk)
|
{"golden_diff": "diff --git a/mathesar/api/db/viewsets/columns.py b/mathesar/api/db/viewsets/columns.py\n--- a/mathesar/api/db/viewsets/columns.py\n+++ b/mathesar/api/db/viewsets/columns.py\n@@ -27,7 +27,7 @@\n pagination_class = DefaultLimitOffsetPagination\n \n def get_queryset(self):\n- return Column.objects.filter(table=self.kwargs['table_pk'])\n+ return Column.objects.filter(table=self.kwargs['table_pk']).order_by('attnum')\n \n def create(self, request, table_pk=None):\n table = get_table_or_404(table_pk)\n", "issue": "Columns re-ordered after type change\n## Reproduce\r\n\r\n1. \"New Table\" > \"Import Data\" > \"Copy and Paste Text\"\r\n\r\n1. Paste the following data and proceed to create and view the table (by clicking \"Continue\" and then \"Finish Import\").\r\n\r\n ```\r\n id,a,b\r\n 1,100,200\r\n 2,101,201\r\n ```\r\n\r\n1. Click on the column header for column `a` and change the type from \"Number\" to \"Text\", clicking \"Save\".\r\n\r\n1. Expect columns to be ordered `id`, `a`, `b`.\r\n\r\n1. Once I did this and observed the columns to be ordered `a`, `id`, `b`. Another time. I observed the columns to be ordered `id`, `b`, `a`. I'm not sure what affects the ordering of the columns, but the order seems to remain consistent (even if incorrect) after the type-change operation.\r\n\r\n## Implementation\r\nThe backend should make sure that the column list in the API should always be ordered by suitable `index`.\r\n\r\n## Additional details\r\n\r\n- The front end displays the columns in the order it receives them from the columns API. In this case the API is returning the columns out-of-order, which is why I've marked this as a backend bug.\r\n- A while back, we had a [discussion about whether the user should be able to re-order columns](https://github.com/centerofci/mathesar/discussions/803) in which we decided _not_ to support re-ordering columns, at least for now.\r\n\n", "before_files": [{"content": "import warnings\nfrom psycopg2.errors import DuplicateColumn\nfrom rest_framework import status, viewsets\nfrom rest_framework.exceptions import NotFound\nfrom rest_framework.response import Response\nfrom sqlalchemy.exc import ProgrammingError\n\nfrom mathesar.api.exceptions.database_exceptions import (\n exceptions as database_api_exceptions,\n base_exceptions as database_base_api_exceptions,\n)\nfrom mathesar.api.exceptions.generic_exceptions import base_exceptions as base_api_exceptions\nfrom db.columns.exceptions import (\n DynamicDefaultWarning, InvalidDefaultError, InvalidTypeOptionError, InvalidTypeError,\n)\nfrom db.columns.operations.select import get_column_attnum_from_name\nfrom db.types.exceptions import InvalidTypeParameters\nfrom db.records.exceptions import UndefinedFunction\nfrom mathesar.api.pagination import DefaultLimitOffsetPagination\nfrom mathesar.api.serializers.columns import ColumnSerializer\nfrom mathesar.api.utils import get_table_or_404\nfrom mathesar.models import Column\n\n\nclass ColumnViewSet(viewsets.ModelViewSet):\n serializer_class = ColumnSerializer\n pagination_class = DefaultLimitOffsetPagination\n\n def get_queryset(self):\n return Column.objects.filter(table=self.kwargs['table_pk'])\n\n def create(self, request, table_pk=None):\n table = get_table_or_404(table_pk)\n # We only support adding a single column through the API.\n serializer = ColumnSerializer(data=request.data, context={'request': request})\n serializer.is_valid(raise_exception=True)\n type_options = request.data.get('type_options', None)\n if type_options is not None:\n scale = type_options.get('scale', None)\n precision = type_options.get('precision', None)\n if scale is not None and precision is None:\n request.data['type_options']['precision'] = 1000\n if 'source_column' in serializer.validated_data:\n column = table.duplicate_column(\n serializer.validated_data['source_column'],\n serializer.validated_data['copy_source_data'],\n serializer.validated_data['copy_source_constraints'],\n serializer.validated_data.get('name'),\n )\n else:\n try:\n # TODO Refactor add_column to user serializer validated date instead of request data\n column = table.add_column(request.data)\n except ProgrammingError as e:\n if type(e.orig) == DuplicateColumn:\n name = request.data['name']\n raise database_api_exceptions.DuplicateTableAPIException(\n e,\n message=f'Column {name} already exists',\n field='name',\n status_code=status.HTTP_400_BAD_REQUEST\n )\n else:\n raise database_base_api_exceptions.ProgrammingAPIException(e)\n except TypeError as e:\n raise base_api_exceptions.TypeErrorAPIException(\n e,\n message=\"Unknown type_option passed\",\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except InvalidDefaultError as e:\n raise database_api_exceptions.InvalidDefaultAPIException(\n e,\n message=f'default \"{request.data[\"default\"]}\" is invalid for type {request.data[\"type\"]}',\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except (InvalidTypeOptionError, InvalidTypeParameters) as e:\n type_options = request.data.get('type_options', '')\n raise database_api_exceptions.InvalidTypeOptionAPIException(\n e,\n message=f'parameter dict {type_options} is invalid for type {request.data[\"type\"]}',\n field=\"type_options\",\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except InvalidTypeError as e:\n raise database_api_exceptions.InvalidTypeCastAPIException(\n e,\n message='This type casting is invalid.',\n status_code=status.HTTP_400_BAD_REQUEST\n )\n dj_column = Column(\n table=table,\n attnum=get_column_attnum_from_name(table.oid, column.name, table.schema._sa_engine),\n **serializer.validated_model_fields\n )\n dj_column.save()\n out_serializer = ColumnSerializer(dj_column)\n return Response(out_serializer.data, status=status.HTTP_201_CREATED)\n\n def partial_update(self, request, pk=None, table_pk=None):\n column_instance = self.get_object()\n table = column_instance.table\n serializer = ColumnSerializer(instance=column_instance, data=request.data, partial=True)\n serializer.is_valid(raise_exception=True)\n with warnings.catch_warnings():\n warnings.filterwarnings(\"error\", category=DynamicDefaultWarning)\n try:\n table.alter_column(column_instance._sa_column.column_attnum, serializer.validated_data)\n except UndefinedFunction as e:\n raise database_api_exceptions.UndefinedFunctionAPIException(\n e,\n message='This type cast is not implemented',\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except ProgrammingError as e:\n raise database_base_api_exceptions.ProgrammingAPIException(\n e,\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except IndexError as e:\n raise base_api_exceptions.NotFoundAPIException(e)\n except TypeError as e:\n raise database_api_exceptions.InvalidTypeOptionAPIException(\n e,\n message=\"Unknown type_option passed\",\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except InvalidDefaultError as e:\n raise database_api_exceptions.InvalidDefaultAPIException(\n e,\n message=f'default \"{request.data[\"default\"]}\" is invalid for this column',\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except DynamicDefaultWarning as e:\n raise database_api_exceptions.DynamicDefaultAPIException(\n e,\n message='Changing type of columns with dynamically-generated defaults is not supported.'\n 'Delete or change the default first.',\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except (InvalidTypeOptionError, InvalidTypeParameters) as e:\n type_options = request.data.get('type_options', '')\n raise database_api_exceptions.InvalidTypeOptionAPIException(\n e,\n message=f'parameter dict {type_options} is invalid for type {request.data[\"type\"]}',\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except InvalidTypeError as e:\n raise database_api_exceptions.InvalidTypeCastAPIException(\n e,\n message='This type casting is invalid.',\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except Exception as e:\n raise base_api_exceptions.MathesarAPIException(e)\n\n serializer.update(column_instance, serializer.validated_model_fields)\n # Invalidate the cache as the underlying columns have changed\n column_instance = self.get_object()\n out_serializer = ColumnSerializer(column_instance)\n return Response(out_serializer.data)\n\n def destroy(self, request, pk=None, table_pk=None):\n column_instance = self.get_object()\n table = column_instance.table\n try:\n table.drop_column(column_instance.attnum)\n column_instance.delete()\n except IndexError:\n raise NotFound\n return Response(status=status.HTTP_204_NO_CONTENT)\n", "path": "mathesar/api/db/viewsets/columns.py"}]}
| 2,731 | 133 |
gh_patches_debug_27903
|
rasdani/github-patches
|
git_diff
|
pre-commit__pre-commit-233
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Failures when hook ids are non-ascii
```
$ pre-commit run ☃
An unexpected error has occurred: UnicodeDecodeError: 'ascii' codec can't decode byte 0xe2 in position 0: ordinal not in range(128)
Check the log at ~/.pre-commit/pre-commit.log
$ cat ~/.pre-commit/pre-commit.log
An unexpected error has occurred: UnicodeDecodeError: 'ascii' codec can't decode byte 0xe2 in position 0: ordinal not in range(128)
Traceback (most recent call last):
File "/home/asottile/workspace/pre-commit/pre_commit/error_handler.py", line 34, in error_handler
yield
File "/home/asottile/workspace/pre-commit/pre_commit/main.py", line 129, in main
return run(runner, args)
File "/home/asottile/workspace/pre-commit/pre_commit/commands/run.py", line 163, in run
return _run_hook(runner, args, write=write)
File "/home/asottile/workspace/pre-commit/pre_commit/commands/run.py", line 133, in _run_hook
write('No hook with id `{0}`\n'.format(hook_id))
UnicodeDecodeError: 'ascii' codec can't decode byte 0xe2 in position 0: ordinal not in range(128)
```
</issue>
<code>
[start of pre_commit/output.py]
1 from __future__ import unicode_literals
2
3 import subprocess
4 import sys
5
6 from pre_commit import color
7 from pre_commit import five
8
9
10 # TODO: smell: import side-effects
11 try:
12 COLS = int(
13 subprocess.Popen(
14 ['tput', 'cols'], stdout=subprocess.PIPE,
15 ).communicate()[0] or
16 # Default in the case of no terminal
17 80
18 )
19 except OSError: # pragma: no cover (windows)
20 COLS = 80
21
22
23 def get_hook_message(
24 start,
25 postfix='',
26 end_msg=None,
27 end_len=0,
28 end_color=None,
29 use_color=None,
30 cols=COLS,
31 ):
32 """Prints a message for running a hook.
33
34 This currently supports three approaches:
35
36 # Print `start` followed by dots, leaving 6 characters at the end
37 >>> print_hook_message('start', end_len=6)
38 start...............................................................
39
40 # Print `start` followed by dots with the end message colored if coloring
41 # is specified and a newline afterwards
42 >>> print_hook_message(
43 'start',
44 end_msg='end',
45 end_color=color.RED,
46 use_color=True,
47 )
48 start...................................................................end
49
50 # Print `start` followed by dots, followed by the `postfix` message
51 # uncolored, followed by the `end_msg` colored if specified and a newline
52 # afterwards
53 >>> print_hook_message(
54 'start',
55 postfix='postfix ',
56 end_msg='end',
57 end_color=color.RED,
58 use_color=True,
59 )
60 start...........................................................postfix end
61 """
62 if bool(end_msg) == bool(end_len):
63 raise ValueError('Expected one of (`end_msg`, `end_len`)')
64 if end_msg is not None and (end_color is None or use_color is None):
65 raise ValueError(
66 '`end_color` and `use_color` are required with `end_msg`'
67 )
68
69 if end_len:
70 return start + '.' * (cols - len(start) - end_len - 1)
71 else:
72 return '{0}{1}{2}{3}\n'.format(
73 start,
74 '.' * (cols - len(start) - len(postfix) - len(end_msg) - 1),
75 postfix,
76 color.format_color(end_msg, end_color, use_color),
77 )
78
79
80 def sys_stdout_write_wrapper(s, stream=sys.stdout):
81 """Python 2.6 chokes on unicode being passed to sys.stdout.write.
82
83 This is an adapter because PY2 is ok with bytes and PY3 requires text.
84 """
85 assert type(s) is five.text
86 if five.PY2: # pragma: no cover (PY2)
87 s = s.encode('UTF-8')
88 stream.write(s)
89
[end of pre_commit/output.py]
[start of pre_commit/five.py]
1 from __future__ import unicode_literals
2
3 # pylint:disable=invalid-name
4 PY2 = str is bytes
5 PY3 = str is not bytes
6
7 if PY2: # pragma: no cover (PY2 only)
8 text = unicode # flake8: noqa
9
10 def n(s):
11 if isinstance(s, bytes):
12 return s
13 else:
14 return s.encode('UTF-8')
15 else: # pragma: no cover (PY3 only)
16 text = str
17
18 def n(s):
19 if isinstance(s, text):
20 return s
21 else:
22 return s.decode('UTF-8')
23
[end of pre_commit/five.py]
[start of pre_commit/main.py]
1 from __future__ import unicode_literals
2
3 import argparse
4 import os
5 import sys
6
7 import pkg_resources
8
9 from pre_commit import color
10 from pre_commit.commands.autoupdate import autoupdate
11 from pre_commit.commands.clean import clean
12 from pre_commit.commands.install_uninstall import install
13 from pre_commit.commands.install_uninstall import uninstall
14 from pre_commit.commands.run import run
15 from pre_commit.error_handler import error_handler
16 from pre_commit.runner import Runner
17
18
19 # https://github.com/pre-commit/pre-commit/issues/217
20 # On OSX, making a virtualenv using pyvenv at . causes `virtualenv` and `pip`
21 # to install packages to the wrong place. We don't want anything to deal with
22 # pyvenv
23 os.environ.pop('__PYVENV_LAUNCHER__', None)
24
25
26 def main(argv=None):
27 argv = argv if argv is not None else sys.argv[1:]
28 parser = argparse.ArgumentParser()
29
30 # http://stackoverflow.com/a/8521644/812183
31 parser.add_argument(
32 '-V', '--version',
33 action='version',
34 version='%(prog)s {0}'.format(
35 pkg_resources.get_distribution('pre-commit').version
36 )
37 )
38
39 subparsers = parser.add_subparsers(dest='command')
40
41 install_parser = subparsers.add_parser(
42 'install', help='Install the pre-commit script.',
43 )
44 install_parser.add_argument(
45 '-f', '--overwrite', action='store_true',
46 help='Overwrite existing hooks / remove migration mode.',
47 )
48 install_parser.add_argument(
49 '--install-hooks', action='store_true',
50 help=(
51 'Whether to install hook environments for all environments '
52 'in the config file.'
53 ),
54 )
55 install_parser.add_argument(
56 '-t', '--hook-type', choices=('pre-commit', 'pre-push'),
57 default='pre-commit',
58 )
59
60 uninstall_parser = subparsers.add_parser(
61 'uninstall', help='Uninstall the pre-commit script.',
62 )
63 uninstall_parser.add_argument(
64 '-t', '--hook-type', choices=('pre-commit', 'pre-push'),
65 default='pre-commit',
66 )
67
68 subparsers.add_parser('clean', help='Clean out pre-commit files.')
69
70 subparsers.add_parser(
71 'autoupdate',
72 help="Auto-update pre-commit config to the latest repos' versions.",
73 )
74
75 run_parser = subparsers.add_parser('run', help='Run hooks.')
76 run_parser.add_argument('hook', nargs='?', help='A single hook-id to run')
77 run_parser.add_argument(
78 '--color', default='auto', type=color.use_color,
79 help='Whether to use color in output. Defaults to `auto`',
80 )
81 run_parser.add_argument(
82 '--no-stash', default=False, action='store_true',
83 help='Use this option to prevent auto stashing of unstaged files.',
84 )
85 run_parser.add_argument(
86 '--verbose', '-v', action='store_true', default=False,
87 )
88
89 run_parser.add_argument(
90 '--origin', '-o',
91 help='The origin branch"s commit_id when using `git push`',
92 )
93 run_parser.add_argument(
94 '--source', '-s',
95 help='The remote branch"s commit_id when using `git push`',
96 )
97 run_parser.add_argument(
98 '--allow-unstaged-config', default=False, action='store_true',
99 help='Allow an unstaged config to be present. Note that this will'
100 'be stashed before parsing unless --no-stash is specified'
101 )
102 run_mutex_group = run_parser.add_mutually_exclusive_group(required=False)
103 run_mutex_group.add_argument(
104 '--all-files', '-a', action='store_true', default=False,
105 help='Run on all the files in the repo. Implies --no-stash.',
106 )
107 run_mutex_group.add_argument(
108 '--files', nargs='*', help='Specific filenames to run hooks on.',
109 )
110
111 help = subparsers.add_parser(
112 'help', help='Show help for a specific command.'
113 )
114 help.add_argument('help_cmd', nargs='?', help='Command to show help for.')
115
116 # Argparse doesn't really provide a way to use a `default` subparser
117 if len(argv) == 0:
118 argv = ['run']
119 args = parser.parse_args(argv)
120
121 if args.command == 'help':
122 if args.help_cmd:
123 parser.parse_args([args.help_cmd, '--help'])
124 else:
125 parser.parse_args(['--help'])
126
127 with error_handler():
128 runner = Runner.create()
129
130 if args.command == 'install':
131 return install(
132 runner, overwrite=args.overwrite, hooks=args.install_hooks,
133 hook_type=args.hook_type,
134 )
135 elif args.command == 'uninstall':
136 return uninstall(runner, hook_type=args.hook_type)
137 elif args.command == 'clean':
138 return clean(runner)
139 elif args.command == 'autoupdate':
140 return autoupdate(runner)
141 elif args.command == 'run':
142 return run(runner, args)
143 else:
144 raise NotImplementedError(
145 'Command {0} not implemented.'.format(args.command)
146 )
147
148 raise AssertionError(
149 'Command {0} failed to exit with a returncode'.format(args.command)
150 )
151
152
153 if __name__ == '__main__':
154 exit(main())
155
[end of pre_commit/main.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/pre_commit/five.py b/pre_commit/five.py
--- a/pre_commit/five.py
+++ b/pre_commit/five.py
@@ -20,3 +20,11 @@
return s
else:
return s.decode('UTF-8')
+
+
+def to_text(s):
+ return s if isinstance(s, text) else s.decode('UTF-8')
+
+
+def to_bytes(s):
+ return s if isinstance(s, bytes) else s.encode('UTF-8')
diff --git a/pre_commit/main.py b/pre_commit/main.py
--- a/pre_commit/main.py
+++ b/pre_commit/main.py
@@ -7,6 +7,7 @@
import pkg_resources
from pre_commit import color
+from pre_commit import five
from pre_commit.commands.autoupdate import autoupdate
from pre_commit.commands.clean import clean
from pre_commit.commands.install_uninstall import install
@@ -25,6 +26,7 @@
def main(argv=None):
argv = argv if argv is not None else sys.argv[1:]
+ argv = [five.to_text(arg) for arg in argv]
parser = argparse.ArgumentParser()
# http://stackoverflow.com/a/8521644/812183
diff --git a/pre_commit/output.py b/pre_commit/output.py
--- a/pre_commit/output.py
+++ b/pre_commit/output.py
@@ -77,12 +77,8 @@
)
-def sys_stdout_write_wrapper(s, stream=sys.stdout):
- """Python 2.6 chokes on unicode being passed to sys.stdout.write.
+stdout_byte_stream = getattr(sys.stdout, 'buffer', sys.stdout)
- This is an adapter because PY2 is ok with bytes and PY3 requires text.
- """
- assert type(s) is five.text
- if five.PY2: # pragma: no cover (PY2)
- s = s.encode('UTF-8')
- stream.write(s)
+
+def sys_stdout_write_wrapper(s, stream=stdout_byte_stream):
+ stream.write(five.to_bytes(s))
|
{"golden_diff": "diff --git a/pre_commit/five.py b/pre_commit/five.py\n--- a/pre_commit/five.py\n+++ b/pre_commit/five.py\n@@ -20,3 +20,11 @@\n return s\n else:\n return s.decode('UTF-8')\n+\n+\n+def to_text(s):\n+ return s if isinstance(s, text) else s.decode('UTF-8')\n+\n+\n+def to_bytes(s):\n+ return s if isinstance(s, bytes) else s.encode('UTF-8')\ndiff --git a/pre_commit/main.py b/pre_commit/main.py\n--- a/pre_commit/main.py\n+++ b/pre_commit/main.py\n@@ -7,6 +7,7 @@\n import pkg_resources\n \n from pre_commit import color\n+from pre_commit import five\n from pre_commit.commands.autoupdate import autoupdate\n from pre_commit.commands.clean import clean\n from pre_commit.commands.install_uninstall import install\n@@ -25,6 +26,7 @@\n \n def main(argv=None):\n argv = argv if argv is not None else sys.argv[1:]\n+ argv = [five.to_text(arg) for arg in argv]\n parser = argparse.ArgumentParser()\n \n # http://stackoverflow.com/a/8521644/812183\ndiff --git a/pre_commit/output.py b/pre_commit/output.py\n--- a/pre_commit/output.py\n+++ b/pre_commit/output.py\n@@ -77,12 +77,8 @@\n )\n \n \n-def sys_stdout_write_wrapper(s, stream=sys.stdout):\n- \"\"\"Python 2.6 chokes on unicode being passed to sys.stdout.write.\n+stdout_byte_stream = getattr(sys.stdout, 'buffer', sys.stdout)\n \n- This is an adapter because PY2 is ok with bytes and PY3 requires text.\n- \"\"\"\n- assert type(s) is five.text\n- if five.PY2: # pragma: no cover (PY2)\n- s = s.encode('UTF-8')\n- stream.write(s)\n+\n+def sys_stdout_write_wrapper(s, stream=stdout_byte_stream):\n+ stream.write(five.to_bytes(s))\n", "issue": "Failures when hook ids are non-ascii\n```\n$ pre-commit run \u2603\nAn unexpected error has occurred: UnicodeDecodeError: 'ascii' codec can't decode byte 0xe2 in position 0: ordinal not in range(128)\nCheck the log at ~/.pre-commit/pre-commit.log\n$ cat ~/.pre-commit/pre-commit.log \nAn unexpected error has occurred: UnicodeDecodeError: 'ascii' codec can't decode byte 0xe2 in position 0: ordinal not in range(128)\nTraceback (most recent call last):\n File \"/home/asottile/workspace/pre-commit/pre_commit/error_handler.py\", line 34, in error_handler\n yield\n File \"/home/asottile/workspace/pre-commit/pre_commit/main.py\", line 129, in main\n return run(runner, args)\n File \"/home/asottile/workspace/pre-commit/pre_commit/commands/run.py\", line 163, in run\n return _run_hook(runner, args, write=write)\n File \"/home/asottile/workspace/pre-commit/pre_commit/commands/run.py\", line 133, in _run_hook\n write('No hook with id `{0}`\\n'.format(hook_id))\nUnicodeDecodeError: 'ascii' codec can't decode byte 0xe2 in position 0: ordinal not in range(128)\n```\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport subprocess\nimport sys\n\nfrom pre_commit import color\nfrom pre_commit import five\n\n\n# TODO: smell: import side-effects\ntry:\n COLS = int(\n subprocess.Popen(\n ['tput', 'cols'], stdout=subprocess.PIPE,\n ).communicate()[0] or\n # Default in the case of no terminal\n 80\n )\nexcept OSError: # pragma: no cover (windows)\n COLS = 80\n\n\ndef get_hook_message(\n start,\n postfix='',\n end_msg=None,\n end_len=0,\n end_color=None,\n use_color=None,\n cols=COLS,\n):\n \"\"\"Prints a message for running a hook.\n\n This currently supports three approaches:\n\n # Print `start` followed by dots, leaving 6 characters at the end\n >>> print_hook_message('start', end_len=6)\n start...............................................................\n\n # Print `start` followed by dots with the end message colored if coloring\n # is specified and a newline afterwards\n >>> print_hook_message(\n 'start',\n end_msg='end',\n end_color=color.RED,\n use_color=True,\n )\n start...................................................................end\n\n # Print `start` followed by dots, followed by the `postfix` message\n # uncolored, followed by the `end_msg` colored if specified and a newline\n # afterwards\n >>> print_hook_message(\n 'start',\n postfix='postfix ',\n end_msg='end',\n end_color=color.RED,\n use_color=True,\n )\n start...........................................................postfix end\n \"\"\"\n if bool(end_msg) == bool(end_len):\n raise ValueError('Expected one of (`end_msg`, `end_len`)')\n if end_msg is not None and (end_color is None or use_color is None):\n raise ValueError(\n '`end_color` and `use_color` are required with `end_msg`'\n )\n\n if end_len:\n return start + '.' * (cols - len(start) - end_len - 1)\n else:\n return '{0}{1}{2}{3}\\n'.format(\n start,\n '.' * (cols - len(start) - len(postfix) - len(end_msg) - 1),\n postfix,\n color.format_color(end_msg, end_color, use_color),\n )\n\n\ndef sys_stdout_write_wrapper(s, stream=sys.stdout):\n \"\"\"Python 2.6 chokes on unicode being passed to sys.stdout.write.\n\n This is an adapter because PY2 is ok with bytes and PY3 requires text.\n \"\"\"\n assert type(s) is five.text\n if five.PY2: # pragma: no cover (PY2)\n s = s.encode('UTF-8')\n stream.write(s)\n", "path": "pre_commit/output.py"}, {"content": "from __future__ import unicode_literals\n\n# pylint:disable=invalid-name\nPY2 = str is bytes\nPY3 = str is not bytes\n\nif PY2: # pragma: no cover (PY2 only)\n text = unicode # flake8: noqa\n\n def n(s):\n if isinstance(s, bytes):\n return s\n else:\n return s.encode('UTF-8')\nelse: # pragma: no cover (PY3 only)\n text = str\n\n def n(s):\n if isinstance(s, text):\n return s\n else:\n return s.decode('UTF-8')\n", "path": "pre_commit/five.py"}, {"content": "from __future__ import unicode_literals\n\nimport argparse\nimport os\nimport sys\n\nimport pkg_resources\n\nfrom pre_commit import color\nfrom pre_commit.commands.autoupdate import autoupdate\nfrom pre_commit.commands.clean import clean\nfrom pre_commit.commands.install_uninstall import install\nfrom pre_commit.commands.install_uninstall import uninstall\nfrom pre_commit.commands.run import run\nfrom pre_commit.error_handler import error_handler\nfrom pre_commit.runner import Runner\n\n\n# https://github.com/pre-commit/pre-commit/issues/217\n# On OSX, making a virtualenv using pyvenv at . causes `virtualenv` and `pip`\n# to install packages to the wrong place. We don't want anything to deal with\n# pyvenv\nos.environ.pop('__PYVENV_LAUNCHER__', None)\n\n\ndef main(argv=None):\n argv = argv if argv is not None else sys.argv[1:]\n parser = argparse.ArgumentParser()\n\n # http://stackoverflow.com/a/8521644/812183\n parser.add_argument(\n '-V', '--version',\n action='version',\n version='%(prog)s {0}'.format(\n pkg_resources.get_distribution('pre-commit').version\n )\n )\n\n subparsers = parser.add_subparsers(dest='command')\n\n install_parser = subparsers.add_parser(\n 'install', help='Install the pre-commit script.',\n )\n install_parser.add_argument(\n '-f', '--overwrite', action='store_true',\n help='Overwrite existing hooks / remove migration mode.',\n )\n install_parser.add_argument(\n '--install-hooks', action='store_true',\n help=(\n 'Whether to install hook environments for all environments '\n 'in the config file.'\n ),\n )\n install_parser.add_argument(\n '-t', '--hook-type', choices=('pre-commit', 'pre-push'),\n default='pre-commit',\n )\n\n uninstall_parser = subparsers.add_parser(\n 'uninstall', help='Uninstall the pre-commit script.',\n )\n uninstall_parser.add_argument(\n '-t', '--hook-type', choices=('pre-commit', 'pre-push'),\n default='pre-commit',\n )\n\n subparsers.add_parser('clean', help='Clean out pre-commit files.')\n\n subparsers.add_parser(\n 'autoupdate',\n help=\"Auto-update pre-commit config to the latest repos' versions.\",\n )\n\n run_parser = subparsers.add_parser('run', help='Run hooks.')\n run_parser.add_argument('hook', nargs='?', help='A single hook-id to run')\n run_parser.add_argument(\n '--color', default='auto', type=color.use_color,\n help='Whether to use color in output. Defaults to `auto`',\n )\n run_parser.add_argument(\n '--no-stash', default=False, action='store_true',\n help='Use this option to prevent auto stashing of unstaged files.',\n )\n run_parser.add_argument(\n '--verbose', '-v', action='store_true', default=False,\n )\n\n run_parser.add_argument(\n '--origin', '-o',\n help='The origin branch\"s commit_id when using `git push`',\n )\n run_parser.add_argument(\n '--source', '-s',\n help='The remote branch\"s commit_id when using `git push`',\n )\n run_parser.add_argument(\n '--allow-unstaged-config', default=False, action='store_true',\n help='Allow an unstaged config to be present. Note that this will'\n 'be stashed before parsing unless --no-stash is specified'\n )\n run_mutex_group = run_parser.add_mutually_exclusive_group(required=False)\n run_mutex_group.add_argument(\n '--all-files', '-a', action='store_true', default=False,\n help='Run on all the files in the repo. Implies --no-stash.',\n )\n run_mutex_group.add_argument(\n '--files', nargs='*', help='Specific filenames to run hooks on.',\n )\n\n help = subparsers.add_parser(\n 'help', help='Show help for a specific command.'\n )\n help.add_argument('help_cmd', nargs='?', help='Command to show help for.')\n\n # Argparse doesn't really provide a way to use a `default` subparser\n if len(argv) == 0:\n argv = ['run']\n args = parser.parse_args(argv)\n\n if args.command == 'help':\n if args.help_cmd:\n parser.parse_args([args.help_cmd, '--help'])\n else:\n parser.parse_args(['--help'])\n\n with error_handler():\n runner = Runner.create()\n\n if args.command == 'install':\n return install(\n runner, overwrite=args.overwrite, hooks=args.install_hooks,\n hook_type=args.hook_type,\n )\n elif args.command == 'uninstall':\n return uninstall(runner, hook_type=args.hook_type)\n elif args.command == 'clean':\n return clean(runner)\n elif args.command == 'autoupdate':\n return autoupdate(runner)\n elif args.command == 'run':\n return run(runner, args)\n else:\n raise NotImplementedError(\n 'Command {0} not implemented.'.format(args.command)\n )\n\n raise AssertionError(\n 'Command {0} failed to exit with a returncode'.format(args.command)\n )\n\n\nif __name__ == '__main__':\n exit(main())\n", "path": "pre_commit/main.py"}]}
| 3,325 | 458 |
gh_patches_debug_13650
|
rasdani/github-patches
|
git_diff
|
ktbyers__netmiko-2043
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
cryptography 3.1 library breaks tplink drivers
I've just updated to netmiko 3.3.2 from 3.3.0, and paramiko 2.7.2 before and after the netmiko upgrade. My custom ssh driver worked fine with netmiko 3.3.0. The stack trace is interesting for a couple reasons:
The actual error "AttributeError: module 'cryptography.utils' has no attribute 'bit_length'". Looking back through the stack trace, there's a "raise e" from paramiko/transport.py line 660 in start_client(). The reported error is the actual exception raised at that point.
I don't get how the code goes from raising that exception and ends up in the new tplink driver.
I think I've installed all the latest module versions using pip to recreate the environment.
Debugging ideas are much appreciated!
```
Traceback (most recent call last):
(...deleted portion of stack trace...)
File "/usr/local/lib/python3.6/site-packages/netmiko/ssh_dispatcher.py, line 324, in ConnectHandler
return ConnectionClass(*args, **kwargs)
File "/usr/local/lib/python3.6/site-packages/netmiko/<vendor>/<driverfile>, line 189, in __init__
self.client.connect(self.host, port=22, username=self.username, password=self.password, look_for_keys=False, timeout=self.timeout)
File "/usr/local/lib/python3.6/site-packages/paramiko/client.py, line 406, in connect
t.start_client(timeout=timeout)
File "/usr/local/lib/python3.6/site-packages/paramiko/transport.py, line 660, in start_client
raise e
File "/usr/local/lib/python3.6/site-packages/paramiko/transport.py, line 2075, in run
self.kex_engine.parse_next(ptype, m)
File "/usr/local/lib/python3.6/site-packages/paramiko/kex_group1.py line 75, in parse_next
return self._parse_kexdh_reply(m)
File "/usr/local/lib/python3.6/site-packages/paramiko/kex_group1.py line 120, in _parse_kexdh_reply
self.transport._verify_key(host_key, sig)
File "/usr/local/lib/python3.6/site-packages/paramiko/transport.py, line 1886, in _verify_key
if not key.verify_ssh_sig(self.H, Message(sig)):
File "/usr/local/lib/python3.6/site-packages/paramiko/dsskey.py, line 153, in verify_ssh_sig
).public_key(backend=default_backend())
File "/usr/local/lib64/python3.6/site-packages/cryptography/hazmat/primitives/asymmetric/dsa.py, line 212, in public_key
return backend.load_dsa_public_numbers(self)
File "/usr/local/lib64/python3.6/site-packages/cryptography/hazmat/backends/openssl/backend.py, line 873, in load_dsa_public_numbers
dsa._check_dsa_parameters(numbers.parameter_numbers)
File "/usr/local/lib/python3.6/site-packages/netmiko/tplink/tplink_jetstream.py, line 145, in _override_check_dsa_parameters
if crypto_utils.bit_length(parameters.q) not in [160, 256]:
AttributeError: module 'cryptography.utils' has no attribute 'bit_length'
```
</issue>
<code>
[start of netmiko/tplink/tplink_jetstream.py]
1 import re
2 import time
3
4 from cryptography import utils as crypto_utils
5 from cryptography.hazmat.primitives.asymmetric import dsa
6
7 from netmiko import log
8 from netmiko.cisco_base_connection import CiscoSSHConnection
9 from netmiko.ssh_exception import NetmikoTimeoutException
10
11
12 class TPLinkJetStreamBase(CiscoSSHConnection):
13 def __init__(self, **kwargs):
14 # TP-Link doesn't have a way to set terminal width which breaks cmd_verify
15 if kwargs.get("global_cmd_verify") is None:
16 kwargs["global_cmd_verify"] = False
17 # TP-Link uses "\r\n" as default_enter for SSH and Telnet
18 if kwargs.get("default_enter") is None:
19 kwargs["default_enter"] = "\r\n"
20 return super().__init__(**kwargs)
21
22 def session_preparation(self):
23 """
24 Prepare the session after the connection has been established.
25 """
26 delay_factor = self.select_delay_factor(delay_factor=0)
27 time.sleep(0.3 * delay_factor)
28 self.clear_buffer()
29 self._test_channel_read(pattern=r"[>#]")
30 self.set_base_prompt()
31 self.enable()
32 self.disable_paging()
33 # Clear the read buffer
34 time.sleep(0.3 * self.global_delay_factor)
35 self.clear_buffer()
36
37 def enable(self, cmd="", pattern="ssword", re_flags=re.IGNORECASE):
38 """
39 TPLink JetStream requires you to first execute "enable" and then execute "enable-admin".
40 This is necessary as "configure" is generally only available at "enable-admin" level
41
42 If the user does not have the Admin role, he will need to execute enable-admin to really
43 enable all functions.
44 """
45
46 # If end-user passes in "cmd" execute that using normal process.
47 if cmd:
48 return super().enable(cmd=cmd, pattern=pattern, re_flags=re_flags)
49
50 output = ""
51 msg = (
52 "Failed to enter enable mode. Please ensure you pass "
53 "the 'secret' argument to ConnectHandler."
54 )
55
56 cmds = ["enable", "enable-admin"]
57 if not self.check_enable_mode():
58 for cmd in cmds:
59 self.write_channel(self.normalize_cmd(cmd))
60 try:
61 output += self.read_until_prompt_or_pattern(
62 pattern=pattern, re_flags=re_flags
63 )
64 self.write_channel(self.normalize_cmd(self.secret))
65 output += self.read_until_prompt()
66 except NetmikoTimeoutException:
67 raise ValueError(msg)
68 if not self.check_enable_mode():
69 raise ValueError(msg)
70 return output
71
72 def config_mode(self, config_command="configure"):
73 """Enter configuration mode."""
74 return super().config_mode(config_command=config_command)
75
76 def exit_config_mode(self, exit_config="exit", pattern=r"#"):
77 """
78 Exit config mode.
79
80 Like the Mellanox equipment, the TP-Link Jetstream does not
81 support a single command to completely exit the configuration mode.
82
83 Consequently, need to keep checking and sending "exit".
84 """
85 output = ""
86 check_count = 12
87 while check_count >= 0:
88 if self.check_config_mode():
89 self.write_channel(self.normalize_cmd(exit_config))
90 output += self.read_until_pattern(pattern=pattern)
91 else:
92 break
93 check_count -= 1
94
95 if self.check_config_mode():
96 raise ValueError("Failed to exit configuration mode")
97 log.debug(f"exit_config_mode: {output}")
98
99 return output
100
101 def check_config_mode(self, check_string="(config", pattern=r"#"):
102 """Check whether device is in configuration mode. Return a boolean."""
103 return super().check_config_mode(check_string=check_string, pattern=pattern)
104
105 def set_base_prompt(
106 self, pri_prompt_terminator=">", alt_prompt_terminator="#", delay_factor=1
107 ):
108 """
109 Sets self.base_prompt
110
111 Used as delimiter for stripping of trailing prompt in output.
112
113 Should be set to something that is general and applies in multiple
114 contexts. For TP-Link this will be the router prompt with > or #
115 stripped off.
116
117 This will be set on logging in, but not when entering system-view
118 """
119 return super().set_base_prompt(
120 pri_prompt_terminator=pri_prompt_terminator,
121 alt_prompt_terminator=alt_prompt_terminator,
122 delay_factor=delay_factor,
123 )
124
125
126 class TPLinkJetStreamSSH(TPLinkJetStreamBase):
127 def _override_check_dsa_parameters(parameters):
128 """
129 Override check_dsa_parameters from cryptography's dsa.py
130
131 Without this the error below occurs:
132
133 ValueError: p must be exactly 1024, 2048, or 3072 bits long
134
135 Allows for shorter or longer parameters.p to be returned
136 from the server's host key. This is a HORRIBLE hack and a
137 security risk, please remove if possible!
138
139 By now, with firmware:
140
141 2.0.5 Build 20200109 Rel.36203(s)
142
143 It's still not possible to remove this hack.
144 """
145 if crypto_utils.bit_length(parameters.q) not in [160, 256]:
146 raise ValueError("q must be exactly 160 or 256 bits long")
147
148 if not (1 < parameters.g < parameters.p):
149 raise ValueError("g, p don't satisfy 1 < g < p.")
150
151 dsa._check_dsa_parameters = _override_check_dsa_parameters
152
153
154 class TPLinkJetStreamTelnet(TPLinkJetStreamBase):
155 def telnet_login(
156 self,
157 pri_prompt_terminator="#",
158 alt_prompt_terminator=">",
159 username_pattern=r"User:",
160 pwd_pattern=r"Password:",
161 delay_factor=1,
162 max_loops=60,
163 ):
164 """Telnet login: can be username/password or just password."""
165 super().telnet_login(
166 pri_prompt_terminator=pri_prompt_terminator,
167 alt_prompt_terminator=alt_prompt_terminator,
168 username_pattern=username_pattern,
169 pwd_pattern=pwd_pattern,
170 delay_factor=delay_factor,
171 max_loops=max_loops,
172 )
173
[end of netmiko/tplink/tplink_jetstream.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/netmiko/tplink/tplink_jetstream.py b/netmiko/tplink/tplink_jetstream.py
--- a/netmiko/tplink/tplink_jetstream.py
+++ b/netmiko/tplink/tplink_jetstream.py
@@ -1,7 +1,6 @@
import re
import time
-from cryptography import utils as crypto_utils
from cryptography.hazmat.primitives.asymmetric import dsa
from netmiko import log
@@ -142,7 +141,7 @@
It's still not possible to remove this hack.
"""
- if crypto_utils.bit_length(parameters.q) not in [160, 256]:
+ if parameters.q.bit_length() not in [160, 256]:
raise ValueError("q must be exactly 160 or 256 bits long")
if not (1 < parameters.g < parameters.p):
|
{"golden_diff": "diff --git a/netmiko/tplink/tplink_jetstream.py b/netmiko/tplink/tplink_jetstream.py\n--- a/netmiko/tplink/tplink_jetstream.py\n+++ b/netmiko/tplink/tplink_jetstream.py\n@@ -1,7 +1,6 @@\n import re\n import time\n \n-from cryptography import utils as crypto_utils\n from cryptography.hazmat.primitives.asymmetric import dsa\n \n from netmiko import log\n@@ -142,7 +141,7 @@\n \n It's still not possible to remove this hack.\n \"\"\"\n- if crypto_utils.bit_length(parameters.q) not in [160, 256]:\n+ if parameters.q.bit_length() not in [160, 256]:\n raise ValueError(\"q must be exactly 160 or 256 bits long\")\n \n if not (1 < parameters.g < parameters.p):\n", "issue": "cryptography 3.1 library breaks tplink drivers\nI've just updated to netmiko 3.3.2 from 3.3.0, and paramiko 2.7.2 before and after the netmiko upgrade. My custom ssh driver worked fine with netmiko 3.3.0. The stack trace is interesting for a couple reasons:\r\n\r\nThe actual error \"AttributeError: module 'cryptography.utils' has no attribute 'bit_length'\". Looking back through the stack trace, there's a \"raise e\" from paramiko/transport.py line 660 in start_client(). The reported error is the actual exception raised at that point.\r\n\r\nI don't get how the code goes from raising that exception and ends up in the new tplink driver.\r\n\r\nI think I've installed all the latest module versions using pip to recreate the environment.\r\n\r\nDebugging ideas are much appreciated!\r\n\r\n```\r\nTraceback (most recent call last):\r\n(...deleted portion of stack trace...)\r\n\u00a0File \"/usr/local/lib/python3.6/site-packages/netmiko/ssh_dispatcher.py, line 324, in ConnectHandler\r\n\u00a0\u00a0 return ConnectionClass(*args, **kwargs)\r\n\u00a0File \"/usr/local/lib/python3.6/site-packages/netmiko/<vendor>/<driverfile>, line 189, in __init__\r\n\u00a0\u00a0 self.client.connect(self.host, port=22, username=self.username, password=self.password, look_for_keys=False, timeout=self.timeout)\r\n\u00a0File \"/usr/local/lib/python3.6/site-packages/paramiko/client.py, line 406, in connect\r\n\u00a0\u00a0 t.start_client(timeout=timeout)\r\n\u00a0File \"/usr/local/lib/python3.6/site-packages/paramiko/transport.py, line 660, in start_client\r\n\u00a0\u00a0 raise e\r\n\u00a0File \"/usr/local/lib/python3.6/site-packages/paramiko/transport.py, line 2075, in run\r\n\u00a0\u00a0 self.kex_engine.parse_next(ptype, m)\r\n\u00a0File \"/usr/local/lib/python3.6/site-packages/paramiko/kex_group1.py line 75, in parse_next\r\n\u00a0\u00a0 return self._parse_kexdh_reply(m)\r\n\u00a0File \"/usr/local/lib/python3.6/site-packages/paramiko/kex_group1.py line 120, in _parse_kexdh_reply\r\n\u00a0\u00a0 self.transport._verify_key(host_key, sig)\r\n\u00a0File \"/usr/local/lib/python3.6/site-packages/paramiko/transport.py, line 1886, in _verify_key\r\n\u00a0\u00a0 if not key.verify_ssh_sig(self.H, Message(sig)):\r\n\u00a0File \"/usr/local/lib/python3.6/site-packages/paramiko/dsskey.py, line 153, in verify_ssh_sig\r\n\u00a0\u00a0 ).public_key(backend=default_backend())\r\n\u00a0File \"/usr/local/lib64/python3.6/site-packages/cryptography/hazmat/primitives/asymmetric/dsa.py, line 212, in public_key\r\n\u00a0\u00a0 return backend.load_dsa_public_numbers(self)\r\n\u00a0File \"/usr/local/lib64/python3.6/site-packages/cryptography/hazmat/backends/openssl/backend.py, line 873, in load_dsa_public_numbers\r\n\u00a0\u00a0 dsa._check_dsa_parameters(numbers.parameter_numbers)\r\n\u00a0File \"/usr/local/lib/python3.6/site-packages/netmiko/tplink/tplink_jetstream.py, line 145, in _override_check_dsa_parameters\r\n\u00a0\u00a0 if crypto_utils.bit_length(parameters.q) not in [160, 256]:\r\nAttributeError: module 'cryptography.utils' has no attribute 'bit_length'\r\n```\n", "before_files": [{"content": "import re\nimport time\n\nfrom cryptography import utils as crypto_utils\nfrom cryptography.hazmat.primitives.asymmetric import dsa\n\nfrom netmiko import log\nfrom netmiko.cisco_base_connection import CiscoSSHConnection\nfrom netmiko.ssh_exception import NetmikoTimeoutException\n\n\nclass TPLinkJetStreamBase(CiscoSSHConnection):\n def __init__(self, **kwargs):\n # TP-Link doesn't have a way to set terminal width which breaks cmd_verify\n if kwargs.get(\"global_cmd_verify\") is None:\n kwargs[\"global_cmd_verify\"] = False\n # TP-Link uses \"\\r\\n\" as default_enter for SSH and Telnet\n if kwargs.get(\"default_enter\") is None:\n kwargs[\"default_enter\"] = \"\\r\\n\"\n return super().__init__(**kwargs)\n\n def session_preparation(self):\n \"\"\"\n Prepare the session after the connection has been established.\n \"\"\"\n delay_factor = self.select_delay_factor(delay_factor=0)\n time.sleep(0.3 * delay_factor)\n self.clear_buffer()\n self._test_channel_read(pattern=r\"[>#]\")\n self.set_base_prompt()\n self.enable()\n self.disable_paging()\n # Clear the read buffer\n time.sleep(0.3 * self.global_delay_factor)\n self.clear_buffer()\n\n def enable(self, cmd=\"\", pattern=\"ssword\", re_flags=re.IGNORECASE):\n \"\"\"\n TPLink JetStream requires you to first execute \"enable\" and then execute \"enable-admin\".\n This is necessary as \"configure\" is generally only available at \"enable-admin\" level\n\n If the user does not have the Admin role, he will need to execute enable-admin to really\n enable all functions.\n \"\"\"\n\n # If end-user passes in \"cmd\" execute that using normal process.\n if cmd:\n return super().enable(cmd=cmd, pattern=pattern, re_flags=re_flags)\n\n output = \"\"\n msg = (\n \"Failed to enter enable mode. Please ensure you pass \"\n \"the 'secret' argument to ConnectHandler.\"\n )\n\n cmds = [\"enable\", \"enable-admin\"]\n if not self.check_enable_mode():\n for cmd in cmds:\n self.write_channel(self.normalize_cmd(cmd))\n try:\n output += self.read_until_prompt_or_pattern(\n pattern=pattern, re_flags=re_flags\n )\n self.write_channel(self.normalize_cmd(self.secret))\n output += self.read_until_prompt()\n except NetmikoTimeoutException:\n raise ValueError(msg)\n if not self.check_enable_mode():\n raise ValueError(msg)\n return output\n\n def config_mode(self, config_command=\"configure\"):\n \"\"\"Enter configuration mode.\"\"\"\n return super().config_mode(config_command=config_command)\n\n def exit_config_mode(self, exit_config=\"exit\", pattern=r\"#\"):\n \"\"\"\n Exit config mode.\n\n Like the Mellanox equipment, the TP-Link Jetstream does not\n support a single command to completely exit the configuration mode.\n\n Consequently, need to keep checking and sending \"exit\".\n \"\"\"\n output = \"\"\n check_count = 12\n while check_count >= 0:\n if self.check_config_mode():\n self.write_channel(self.normalize_cmd(exit_config))\n output += self.read_until_pattern(pattern=pattern)\n else:\n break\n check_count -= 1\n\n if self.check_config_mode():\n raise ValueError(\"Failed to exit configuration mode\")\n log.debug(f\"exit_config_mode: {output}\")\n\n return output\n\n def check_config_mode(self, check_string=\"(config\", pattern=r\"#\"):\n \"\"\"Check whether device is in configuration mode. Return a boolean.\"\"\"\n return super().check_config_mode(check_string=check_string, pattern=pattern)\n\n def set_base_prompt(\n self, pri_prompt_terminator=\">\", alt_prompt_terminator=\"#\", delay_factor=1\n ):\n \"\"\"\n Sets self.base_prompt\n\n Used as delimiter for stripping of trailing prompt in output.\n\n Should be set to something that is general and applies in multiple\n contexts. For TP-Link this will be the router prompt with > or #\n stripped off.\n\n This will be set on logging in, but not when entering system-view\n \"\"\"\n return super().set_base_prompt(\n pri_prompt_terminator=pri_prompt_terminator,\n alt_prompt_terminator=alt_prompt_terminator,\n delay_factor=delay_factor,\n )\n\n\nclass TPLinkJetStreamSSH(TPLinkJetStreamBase):\n def _override_check_dsa_parameters(parameters):\n \"\"\"\n Override check_dsa_parameters from cryptography's dsa.py\n\n Without this the error below occurs:\n\n ValueError: p must be exactly 1024, 2048, or 3072 bits long\n\n Allows for shorter or longer parameters.p to be returned\n from the server's host key. This is a HORRIBLE hack and a\n security risk, please remove if possible!\n\n By now, with firmware:\n\n 2.0.5 Build 20200109 Rel.36203(s)\n\n It's still not possible to remove this hack.\n \"\"\"\n if crypto_utils.bit_length(parameters.q) not in [160, 256]:\n raise ValueError(\"q must be exactly 160 or 256 bits long\")\n\n if not (1 < parameters.g < parameters.p):\n raise ValueError(\"g, p don't satisfy 1 < g < p.\")\n\n dsa._check_dsa_parameters = _override_check_dsa_parameters\n\n\nclass TPLinkJetStreamTelnet(TPLinkJetStreamBase):\n def telnet_login(\n self,\n pri_prompt_terminator=\"#\",\n alt_prompt_terminator=\">\",\n username_pattern=r\"User:\",\n pwd_pattern=r\"Password:\",\n delay_factor=1,\n max_loops=60,\n ):\n \"\"\"Telnet login: can be username/password or just password.\"\"\"\n super().telnet_login(\n pri_prompt_terminator=pri_prompt_terminator,\n alt_prompt_terminator=alt_prompt_terminator,\n username_pattern=username_pattern,\n pwd_pattern=pwd_pattern,\n delay_factor=delay_factor,\n max_loops=max_loops,\n )\n", "path": "netmiko/tplink/tplink_jetstream.py"}]}
| 3,073 | 207 |
gh_patches_debug_30556
|
rasdani/github-patches
|
git_diff
|
mdn__kuma-6929
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
T - the API to support subscription management
The backend pieces needed for https://github.com/mdn/kuma/issues/6703
That way we, can Reactify the subscription management page.
What we need is to endpoints:
1. Getting your subscriptions (plural but it's probably never more than 1)
2. Cancel a subscription (or cancel them all if that's easier)
</issue>
<code>
[start of kuma/users/stripe_utils.py]
1 from datetime import datetime
2
3 import stripe
4 from django.conf import settings
5 from django.utils import timezone
6
7 from kuma.core.urlresolvers import reverse
8 from kuma.wiki.templatetags.jinja_helpers import absolutify
9
10 from .models import UserSubscription
11
12
13 def retrieve_stripe_subscription(customer):
14 for subscription in customer.subscriptions.list().auto_paging_iter():
15 # We have to use array indexing syntax, as stripe uses dicts to
16 # represent its objects (dicts come with an .items method)
17 for item in subscription["items"].auto_paging_iter():
18 if item.plan.id == settings.STRIPE_PLAN_ID:
19 return subscription
20
21 return None
22
23
24 def retrieve_and_synchronize_subscription_info(user):
25 """For the given user, if it has as 'stripe_customer_id' retrieve the info
26 about the subscription if it's there. All packaged in a way that is
27 practical for the stripe_subscription.html template.
28
29 Also, whilst doing this check, we also verify that the UserSubscription record
30 for this user is right. Doing that check is a second-layer check in case
31 our webhooks have failed us.
32 """
33 subscription_info = None
34 stripe_customer = get_stripe_customer(user)
35 if stripe_customer:
36 stripe_subscription_info = get_stripe_subscription_info(stripe_customer)
37 if stripe_subscription_info:
38 source = stripe_customer.default_source
39 if source.object == "card":
40 card = source
41 elif source.object == "source":
42 card = source.card
43 else:
44 raise ValueError(
45 f"unexpected stripe customer default_source of type {source.object!r}"
46 )
47
48 subscription_info = {
49 "id": stripe_subscription_info.id,
50 "amount": stripe_subscription_info.plan.amount,
51 "brand": card.brand,
52 "expires_at": f"{card.exp_month}/{card.exp_year}",
53 "last4": card.last4,
54 # Cards that are part of a "source" don't have a zip
55 "zip": card.get("address_zip", None),
56 # TODO: Deprecated. Only used in the Edit Profile view
57 "next_payment_at": datetime.fromtimestamp(
58 stripe_subscription_info.current_period_end
59 ),
60 }
61
62 # To perfect the synchronization, take this opportunity to make sure
63 # we have an up-to-date record of this.
64 UserSubscription.set_active(user, stripe_subscription_info.id)
65 else:
66 # The user has a stripe_customer_id but no active subscription
67 # on the current settings.STRIPE_PLAN_ID! Perhaps it has been cancelled
68 # and not updated in our own records.
69 for user_subscription in UserSubscription.objects.filter(
70 user=user, canceled__isnull=True
71 ):
72 user_subscription.canceled = timezone.now()
73 user_subscription.save()
74
75 return subscription_info
76
77
78 def create_stripe_customer_and_subscription_for_user(user, email, stripe_token):
79 customer = (
80 stripe.Customer.retrieve(user.stripe_customer_id)
81 if user.stripe_customer_id
82 else None
83 )
84 if not customer or customer.email != email:
85 customer = stripe.Customer.create(email=email, source=stripe_token,)
86 user.stripe_customer_id = customer.id
87 user.save()
88
89 subscription = retrieve_stripe_subscription(customer)
90 if not subscription:
91 subscription = stripe.Subscription.create(
92 customer=customer.id, items=[{"plan": settings.STRIPE_PLAN_ID}],
93 )
94
95 UserSubscription.set_active(user, subscription.id)
96
97
98 def cancel_stripe_customer_subscriptions(user):
99 """Delete all subscriptions for a Stripe customer."""
100 assert user.stripe_customer_id
101 customer = stripe.Customer.retrieve(user.stripe_customer_id)
102 for sub in customer.subscriptions.data:
103 s = stripe.Subscription.retrieve(sub.id)
104 UserSubscription.set_canceled(user, s.id)
105 s.delete()
106
107
108 def get_stripe_customer(user):
109 if settings.STRIPE_PLAN_ID and user.stripe_customer_id:
110 return stripe.Customer.retrieve(
111 user.stripe_customer_id, expand=["default_source"]
112 )
113
114
115 def get_stripe_subscription_info(stripe_customer):
116 return retrieve_stripe_subscription(stripe_customer)
117
118
119 def create_missing_stripe_webhook():
120 url_path = reverse("users.stripe_hooks")
121 url = (
122 "https://" + settings.STRIPE_WEBHOOK_HOSTNAME + url_path
123 if settings.STRIPE_WEBHOOK_HOSTNAME
124 else absolutify(url_path)
125 )
126
127 # From https://stripe.com/docs/api/webhook_endpoints/create
128 events = (
129 # "Occurs whenever an invoice payment attempt succeeds."
130 "invoice.payment_succeeded",
131 # "Occurs whenever a customer’s subscription ends."
132 # Also, if you go into the Stripe Dashboard, click Billing, Subscriptions,
133 # and find a customer and click the "Cancel subscription" button, this
134 # triggers.
135 "customer.subscription.deleted",
136 )
137
138 for webhook in stripe.WebhookEndpoint.list().auto_paging_iter():
139 if webhook.url == url and set(events) == set(webhook.enabled_events):
140 return
141
142 stripe.WebhookEndpoint.create(
143 url=url, enabled_events=events,
144 )
145
[end of kuma/users/stripe_utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/kuma/users/stripe_utils.py b/kuma/users/stripe_utils.py
--- a/kuma/users/stripe_utils.py
+++ b/kuma/users/stripe_utils.py
@@ -64,7 +64,7 @@
UserSubscription.set_active(user, stripe_subscription_info.id)
else:
# The user has a stripe_customer_id but no active subscription
- # on the current settings.STRIPE_PLAN_ID! Perhaps it has been cancelled
+ # on the current settings.STRIPE_PLAN_ID! Perhaps it has been canceled
# and not updated in our own records.
for user_subscription in UserSubscription.objects.filter(
user=user, canceled__isnull=True
@@ -82,7 +82,7 @@
else None
)
if not customer or customer.email != email:
- customer = stripe.Customer.create(email=email, source=stripe_token,)
+ customer = stripe.Customer.create(email=email, source=stripe_token)
user.stripe_customer_id = customer.id
user.save()
@@ -99,10 +99,13 @@
"""Delete all subscriptions for a Stripe customer."""
assert user.stripe_customer_id
customer = stripe.Customer.retrieve(user.stripe_customer_id)
+ canceled = []
for sub in customer.subscriptions.data:
s = stripe.Subscription.retrieve(sub.id)
UserSubscription.set_canceled(user, s.id)
s.delete()
+ canceled.append(s)
+ return canceled
def get_stripe_customer(user):
|
{"golden_diff": "diff --git a/kuma/users/stripe_utils.py b/kuma/users/stripe_utils.py\n--- a/kuma/users/stripe_utils.py\n+++ b/kuma/users/stripe_utils.py\n@@ -64,7 +64,7 @@\n UserSubscription.set_active(user, stripe_subscription_info.id)\n else:\n # The user has a stripe_customer_id but no active subscription\n- # on the current settings.STRIPE_PLAN_ID! Perhaps it has been cancelled\n+ # on the current settings.STRIPE_PLAN_ID! Perhaps it has been canceled\n # and not updated in our own records.\n for user_subscription in UserSubscription.objects.filter(\n user=user, canceled__isnull=True\n@@ -82,7 +82,7 @@\n else None\n )\n if not customer or customer.email != email:\n- customer = stripe.Customer.create(email=email, source=stripe_token,)\n+ customer = stripe.Customer.create(email=email, source=stripe_token)\n user.stripe_customer_id = customer.id\n user.save()\n \n@@ -99,10 +99,13 @@\n \"\"\"Delete all subscriptions for a Stripe customer.\"\"\"\n assert user.stripe_customer_id\n customer = stripe.Customer.retrieve(user.stripe_customer_id)\n+ canceled = []\n for sub in customer.subscriptions.data:\n s = stripe.Subscription.retrieve(sub.id)\n UserSubscription.set_canceled(user, s.id)\n s.delete()\n+ canceled.append(s)\n+ return canceled\n \n \n def get_stripe_customer(user):\n", "issue": "T - the API to support subscription management\nThe backend pieces needed for https://github.com/mdn/kuma/issues/6703\r\n\r\nThat way we, can Reactify the subscription management page. \r\n\r\nWhat we need is to endpoints:\r\n\r\n1. Getting your subscriptions (plural but it's probably never more than 1)\r\n2. Cancel a subscription (or cancel them all if that's easier)\r\n\r\n\n", "before_files": [{"content": "from datetime import datetime\n\nimport stripe\nfrom django.conf import settings\nfrom django.utils import timezone\n\nfrom kuma.core.urlresolvers import reverse\nfrom kuma.wiki.templatetags.jinja_helpers import absolutify\n\nfrom .models import UserSubscription\n\n\ndef retrieve_stripe_subscription(customer):\n for subscription in customer.subscriptions.list().auto_paging_iter():\n # We have to use array indexing syntax, as stripe uses dicts to\n # represent its objects (dicts come with an .items method)\n for item in subscription[\"items\"].auto_paging_iter():\n if item.plan.id == settings.STRIPE_PLAN_ID:\n return subscription\n\n return None\n\n\ndef retrieve_and_synchronize_subscription_info(user):\n \"\"\"For the given user, if it has as 'stripe_customer_id' retrieve the info\n about the subscription if it's there. All packaged in a way that is\n practical for the stripe_subscription.html template.\n\n Also, whilst doing this check, we also verify that the UserSubscription record\n for this user is right. Doing that check is a second-layer check in case\n our webhooks have failed us.\n \"\"\"\n subscription_info = None\n stripe_customer = get_stripe_customer(user)\n if stripe_customer:\n stripe_subscription_info = get_stripe_subscription_info(stripe_customer)\n if stripe_subscription_info:\n source = stripe_customer.default_source\n if source.object == \"card\":\n card = source\n elif source.object == \"source\":\n card = source.card\n else:\n raise ValueError(\n f\"unexpected stripe customer default_source of type {source.object!r}\"\n )\n\n subscription_info = {\n \"id\": stripe_subscription_info.id,\n \"amount\": stripe_subscription_info.plan.amount,\n \"brand\": card.brand,\n \"expires_at\": f\"{card.exp_month}/{card.exp_year}\",\n \"last4\": card.last4,\n # Cards that are part of a \"source\" don't have a zip\n \"zip\": card.get(\"address_zip\", None),\n # TODO: Deprecated. Only used in the Edit Profile view\n \"next_payment_at\": datetime.fromtimestamp(\n stripe_subscription_info.current_period_end\n ),\n }\n\n # To perfect the synchronization, take this opportunity to make sure\n # we have an up-to-date record of this.\n UserSubscription.set_active(user, stripe_subscription_info.id)\n else:\n # The user has a stripe_customer_id but no active subscription\n # on the current settings.STRIPE_PLAN_ID! Perhaps it has been cancelled\n # and not updated in our own records.\n for user_subscription in UserSubscription.objects.filter(\n user=user, canceled__isnull=True\n ):\n user_subscription.canceled = timezone.now()\n user_subscription.save()\n\n return subscription_info\n\n\ndef create_stripe_customer_and_subscription_for_user(user, email, stripe_token):\n customer = (\n stripe.Customer.retrieve(user.stripe_customer_id)\n if user.stripe_customer_id\n else None\n )\n if not customer or customer.email != email:\n customer = stripe.Customer.create(email=email, source=stripe_token,)\n user.stripe_customer_id = customer.id\n user.save()\n\n subscription = retrieve_stripe_subscription(customer)\n if not subscription:\n subscription = stripe.Subscription.create(\n customer=customer.id, items=[{\"plan\": settings.STRIPE_PLAN_ID}],\n )\n\n UserSubscription.set_active(user, subscription.id)\n\n\ndef cancel_stripe_customer_subscriptions(user):\n \"\"\"Delete all subscriptions for a Stripe customer.\"\"\"\n assert user.stripe_customer_id\n customer = stripe.Customer.retrieve(user.stripe_customer_id)\n for sub in customer.subscriptions.data:\n s = stripe.Subscription.retrieve(sub.id)\n UserSubscription.set_canceled(user, s.id)\n s.delete()\n\n\ndef get_stripe_customer(user):\n if settings.STRIPE_PLAN_ID and user.stripe_customer_id:\n return stripe.Customer.retrieve(\n user.stripe_customer_id, expand=[\"default_source\"]\n )\n\n\ndef get_stripe_subscription_info(stripe_customer):\n return retrieve_stripe_subscription(stripe_customer)\n\n\ndef create_missing_stripe_webhook():\n url_path = reverse(\"users.stripe_hooks\")\n url = (\n \"https://\" + settings.STRIPE_WEBHOOK_HOSTNAME + url_path\n if settings.STRIPE_WEBHOOK_HOSTNAME\n else absolutify(url_path)\n )\n\n # From https://stripe.com/docs/api/webhook_endpoints/create\n events = (\n # \"Occurs whenever an invoice payment attempt succeeds.\"\n \"invoice.payment_succeeded\",\n # \"Occurs whenever a customer\u2019s subscription ends.\"\n # Also, if you go into the Stripe Dashboard, click Billing, Subscriptions,\n # and find a customer and click the \"Cancel subscription\" button, this\n # triggers.\n \"customer.subscription.deleted\",\n )\n\n for webhook in stripe.WebhookEndpoint.list().auto_paging_iter():\n if webhook.url == url and set(events) == set(webhook.enabled_events):\n return\n\n stripe.WebhookEndpoint.create(\n url=url, enabled_events=events,\n )\n", "path": "kuma/users/stripe_utils.py"}]}
| 2,036 | 323 |
gh_patches_debug_31465
|
rasdani/github-patches
|
git_diff
|
weecology__retriever-769
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Reinstate Fia script (Forest Inventory and Analysis)
Forest Inventory and Analysis changed the file locations.
New species table url: https://apps.fs.usda.gov/fiadb-downloads/CSV/REF_SPECIES.csv
new main:
The new changes needed for FIA
self.urls = {"main": "https://apps.fs.usda.gov/fiadb-downloads/CSV/CSV/",
'species': 'https://apps.fs.usda.gov/fiadb-downloads/CSV/REF_SPECIES.csv'}
</issue>
<code>
[start of scripts/forest-inventory-analysis.py]
1 #retriever
2 """Retriever script for Forest Inventory and Analysis
3
4 """
5 from __future__ import print_function
6 from future import standard_library
7 standard_library.install_aliases()
8
9 import os
10 import urllib.request, urllib.parse, urllib.error
11 import zipfile
12 from decimal import Decimal
13 from retriever.lib.templates import Script
14 from retriever.lib.models import Table, Cleanup, no_cleanup
15 from retriever import HOME_DIR, open_fr, open_fw
16
17
18 class main(Script):
19 def __init__(self, **kwargs):
20 Script.__init__(self, **kwargs)
21 self.name = "Forest Inventory and Analysis"
22 self.shortname = "forest-inventory-analysis"
23 self.retriever_minimum_version = '2.0.dev'
24 self.version = '1.2.0'
25 self.ref = "http://fia.fs.fed.us/"
26 self.urls = {"main": "http://apps.fs.fed.us/fiadb-downloads/CSV/",
27 'species': 'http://apps.fs.fed.us/fiadb-downloads/CSV/REF_SPECIES.csv'}
28 self.tags = ["plants", "continental-scale", "observational"]
29 self.citation = "DATEOFDOWNLOAD. Forest Inventory and Analysis Database, St. Paul, MN: U.S. Department of Agriculture, Forest Service, Northern Research Station. [Available only on internet: http://apps.fs.fed.us/fiadb-downloads/datamart.html]"
30 self.description = """WARNING: This dataset requires downloading many large files and will probably take several hours to finish installing."""
31 self.addendum = """This dataset requires downloading many large files - please be patient."""
32
33 def download(self, engine=None, debug=False):
34 Script.download(self, engine, debug)
35 engine = self.engine
36
37 # download and create species table
38 table = Table('species')
39 self.engine.auto_create_table(table, url=self.urls['species'])
40 self.engine.insert_data_from_url(self.urls['species'])
41
42 # State abbreviations with the year annual inventory began for that state
43 stateslist = [('AL', 2001), ('AK', 2004), ('AZ', 2001), ('AR', 2000),
44 ('CA', 2001), ('CO', 2002), ('CT', 2003), ('DE', 2004),
45 ('FL', 2003), ('GA', 1998), ('ID', 2004), ('IL', 2001),
46 ('IN', 1999), ('IA', 1999), ('KS', 2001), ('KY', 1999),
47 ('LA', 2001), ('ME', 1999), ('MD', 2004), ('MA', 2003),
48 ('MI', 2000), ('MN', 1999), ('MO', 1999), ('MS', 2006),
49 ('MT', 2003), ('NE', 2001), ('NV', 2004), ('NH', 2002),
50 ('NJ', 2004), ('NM', 1999), ('NY', 2002), ('NC', 2003),
51 ('ND', 2001), ('OH', 2001), ('OK', 2008), ('OR', 2001),
52 ('PA', 2000), ('RI', 2003), ('SC', 1999), ('SD', 2001),
53 ('TN', 2000), ('TX', 2001), ('UT', 2000), ('VT', 2003),
54 ('VA', 1998), ('WA', 2002), ('WV', 2004), ('WI', 2000),
55 ('WY', 2000), ('PR', 2001)]
56
57 tablelist = ["SURVEY", "PLOT", "COND", "SUBPLOT", "SUBP_COND", "TREE", "SEEDLING"]
58
59 for table in tablelist:
60 for state, year in stateslist:
61 engine.download_files_from_archive(self.urls["main"] + state + "_" + table + ".ZIP",
62 [state + "_" + table + ".csv"])
63
64 for table in tablelist:
65 print("Scanning data for table %s..." % table)
66 prep_file_name = "%s.csv" % table
67 prep_file = open_fw(engine.format_filename(prep_file_name))
68 this_file = open_fr(engine.format_filename(stateslist[0][0] + "_" + table + ".csv"))
69 col_names = this_file.readline()
70 prep_file.write(col_names)
71 column_names = [col.strip('"') for col in col_names.split(',')]
72 year_column = column_names.index("INVYR")
73 this_file.close()
74
75 for state, year in stateslist:
76 this_file = open_fr(engine.format_filename(state + "_" + table + ".csv"))
77 this_file.readline()
78 for line in this_file:
79 values = line.split(',')
80 this_year = values[year_column]
81 if int(this_year) >= year:
82 prep_file.write(line)
83 prep_file.close()
84 engine.auto_create_table(Table(table), filename=prep_file_name)
85
86 engine.insert_data_from_file(engine.format_filename(prep_file_name))
87
88 try:
89 os.remove(engine.format_filename(prep_file_name))
90 except:
91 pass
92
93 return engine
94
95
96 SCRIPT = main()
97
[end of scripts/forest-inventory-analysis.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/scripts/forest-inventory-analysis.py b/scripts/forest-inventory-analysis.py
--- a/scripts/forest-inventory-analysis.py
+++ b/scripts/forest-inventory-analysis.py
@@ -7,12 +7,10 @@
standard_library.install_aliases()
import os
-import urllib.request, urllib.parse, urllib.error
-import zipfile
-from decimal import Decimal
+
from retriever.lib.templates import Script
-from retriever.lib.models import Table, Cleanup, no_cleanup
-from retriever import HOME_DIR, open_fr, open_fw
+from retriever.lib.models import Table
+from retriever import open_fr, open_fw
class main(Script):
@@ -21,10 +19,10 @@
self.name = "Forest Inventory and Analysis"
self.shortname = "forest-inventory-analysis"
self.retriever_minimum_version = '2.0.dev'
- self.version = '1.2.0'
+ self.version = '1.3.0'
self.ref = "http://fia.fs.fed.us/"
- self.urls = {"main": "http://apps.fs.fed.us/fiadb-downloads/CSV/",
- 'species': 'http://apps.fs.fed.us/fiadb-downloads/CSV/REF_SPECIES.csv'}
+ self.urls = {"main": "https://apps.fs.usda.gov/fiadb-downloads/CSV/",
+ 'species': 'https://apps.fs.usda.gov/fiadb-downloads/CSV/REF_SPECIES.csv'}
self.tags = ["plants", "continental-scale", "observational"]
self.citation = "DATEOFDOWNLOAD. Forest Inventory and Analysis Database, St. Paul, MN: U.S. Department of Agriculture, Forest Service, Northern Research Station. [Available only on internet: http://apps.fs.fed.us/fiadb-downloads/datamart.html]"
self.description = """WARNING: This dataset requires downloading many large files and will probably take several hours to finish installing."""
|
{"golden_diff": "diff --git a/scripts/forest-inventory-analysis.py b/scripts/forest-inventory-analysis.py\n--- a/scripts/forest-inventory-analysis.py\n+++ b/scripts/forest-inventory-analysis.py\n@@ -7,12 +7,10 @@\n standard_library.install_aliases()\n \n import os\n-import urllib.request, urllib.parse, urllib.error\n-import zipfile\n-from decimal import Decimal\n+\n from retriever.lib.templates import Script\n-from retriever.lib.models import Table, Cleanup, no_cleanup\n-from retriever import HOME_DIR, open_fr, open_fw\n+from retriever.lib.models import Table\n+from retriever import open_fr, open_fw\n \n \n class main(Script):\n@@ -21,10 +19,10 @@\n self.name = \"Forest Inventory and Analysis\"\n self.shortname = \"forest-inventory-analysis\"\n self.retriever_minimum_version = '2.0.dev'\n- self.version = '1.2.0'\n+ self.version = '1.3.0'\n self.ref = \"http://fia.fs.fed.us/\"\n- self.urls = {\"main\": \"http://apps.fs.fed.us/fiadb-downloads/CSV/\",\n- 'species': 'http://apps.fs.fed.us/fiadb-downloads/CSV/REF_SPECIES.csv'}\n+ self.urls = {\"main\": \"https://apps.fs.usda.gov/fiadb-downloads/CSV/\",\n+ 'species': 'https://apps.fs.usda.gov/fiadb-downloads/CSV/REF_SPECIES.csv'}\n self.tags = [\"plants\", \"continental-scale\", \"observational\"]\n self.citation = \"DATEOFDOWNLOAD. Forest Inventory and Analysis Database, St. Paul, MN: U.S. Department of Agriculture, Forest Service, Northern Research Station. [Available only on internet: http://apps.fs.fed.us/fiadb-downloads/datamart.html]\"\n self.description = \"\"\"WARNING: This dataset requires downloading many large files and will probably take several hours to finish installing.\"\"\"\n", "issue": "Reinstate Fia script (Forest Inventory and Analysis)\nForest Inventory and Analysis changed the file locations.\r\nNew species table url: https://apps.fs.usda.gov/fiadb-downloads/CSV/REF_SPECIES.csv\r\nnew main:\r\n\r\nThe new changes needed for FIA\r\n\r\n self.urls = {\"main\": \"https://apps.fs.usda.gov/fiadb-downloads/CSV/CSV/\",\r\n 'species': 'https://apps.fs.usda.gov/fiadb-downloads/CSV/REF_SPECIES.csv'}\n", "before_files": [{"content": "#retriever\n\"\"\"Retriever script for Forest Inventory and Analysis\n\n\"\"\"\nfrom __future__ import print_function\nfrom future import standard_library\nstandard_library.install_aliases()\n\nimport os\nimport urllib.request, urllib.parse, urllib.error\nimport zipfile\nfrom decimal import Decimal\nfrom retriever.lib.templates import Script\nfrom retriever.lib.models import Table, Cleanup, no_cleanup\nfrom retriever import HOME_DIR, open_fr, open_fw\n\n\nclass main(Script):\n def __init__(self, **kwargs):\n Script.__init__(self, **kwargs)\n self.name = \"Forest Inventory and Analysis\"\n self.shortname = \"forest-inventory-analysis\"\n self.retriever_minimum_version = '2.0.dev'\n self.version = '1.2.0'\n self.ref = \"http://fia.fs.fed.us/\"\n self.urls = {\"main\": \"http://apps.fs.fed.us/fiadb-downloads/CSV/\",\n 'species': 'http://apps.fs.fed.us/fiadb-downloads/CSV/REF_SPECIES.csv'}\n self.tags = [\"plants\", \"continental-scale\", \"observational\"]\n self.citation = \"DATEOFDOWNLOAD. Forest Inventory and Analysis Database, St. Paul, MN: U.S. Department of Agriculture, Forest Service, Northern Research Station. [Available only on internet: http://apps.fs.fed.us/fiadb-downloads/datamart.html]\"\n self.description = \"\"\"WARNING: This dataset requires downloading many large files and will probably take several hours to finish installing.\"\"\"\n self.addendum = \"\"\"This dataset requires downloading many large files - please be patient.\"\"\"\n\n def download(self, engine=None, debug=False):\n Script.download(self, engine, debug)\n engine = self.engine\n\n # download and create species table\n table = Table('species')\n self.engine.auto_create_table(table, url=self.urls['species'])\n self.engine.insert_data_from_url(self.urls['species'])\n\n # State abbreviations with the year annual inventory began for that state\n stateslist = [('AL', 2001), ('AK', 2004), ('AZ', 2001), ('AR', 2000),\n ('CA', 2001), ('CO', 2002), ('CT', 2003), ('DE', 2004),\n ('FL', 2003), ('GA', 1998), ('ID', 2004), ('IL', 2001),\n ('IN', 1999), ('IA', 1999), ('KS', 2001), ('KY', 1999),\n ('LA', 2001), ('ME', 1999), ('MD', 2004), ('MA', 2003),\n ('MI', 2000), ('MN', 1999), ('MO', 1999), ('MS', 2006),\n ('MT', 2003), ('NE', 2001), ('NV', 2004), ('NH', 2002),\n ('NJ', 2004), ('NM', 1999), ('NY', 2002), ('NC', 2003),\n ('ND', 2001), ('OH', 2001), ('OK', 2008), ('OR', 2001),\n ('PA', 2000), ('RI', 2003), ('SC', 1999), ('SD', 2001),\n ('TN', 2000), ('TX', 2001), ('UT', 2000), ('VT', 2003),\n ('VA', 1998), ('WA', 2002), ('WV', 2004), ('WI', 2000),\n ('WY', 2000), ('PR', 2001)]\n\n tablelist = [\"SURVEY\", \"PLOT\", \"COND\", \"SUBPLOT\", \"SUBP_COND\", \"TREE\", \"SEEDLING\"]\n\n for table in tablelist:\n for state, year in stateslist:\n engine.download_files_from_archive(self.urls[\"main\"] + state + \"_\" + table + \".ZIP\",\n [state + \"_\" + table + \".csv\"])\n\n for table in tablelist:\n print(\"Scanning data for table %s...\" % table)\n prep_file_name = \"%s.csv\" % table\n prep_file = open_fw(engine.format_filename(prep_file_name))\n this_file = open_fr(engine.format_filename(stateslist[0][0] + \"_\" + table + \".csv\"))\n col_names = this_file.readline()\n prep_file.write(col_names)\n column_names = [col.strip('\"') for col in col_names.split(',')]\n year_column = column_names.index(\"INVYR\")\n this_file.close()\n\n for state, year in stateslist:\n this_file = open_fr(engine.format_filename(state + \"_\" + table + \".csv\"))\n this_file.readline()\n for line in this_file:\n values = line.split(',')\n this_year = values[year_column]\n if int(this_year) >= year:\n prep_file.write(line)\n prep_file.close()\n engine.auto_create_table(Table(table), filename=prep_file_name)\n\n engine.insert_data_from_file(engine.format_filename(prep_file_name))\n\n try:\n os.remove(engine.format_filename(prep_file_name))\n except:\n pass\n\n return engine\n\n\nSCRIPT = main()\n", "path": "scripts/forest-inventory-analysis.py"}]}
| 2,070 | 425 |
gh_patches_debug_17799
|
rasdani/github-patches
|
git_diff
|
internetarchive__openlibrary-4247
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
solrupdater getting randomly stuck
Solrupdater has been getting stuck randomly at various dates. It seems related to the infobase py3 deploy.
### Steps to Reproduce
<!-- What steps caused you to find the bug? -->
1. Do nothing
<!-- What actually happened after these steps? What did you expect to happen? -->
* Actual: It seems to occasionally get stuck in the log
* Expected: Solrupdater runs through the infobase logs, making updates
### Proposal & Constraints
<!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? -->
### Related files
<!-- Files related to this issue; this is super useful for new contributors who might want to help! If you're not sure, leave this blank; a maintainer will add them. -->
### Stakeholders
@seabelis
</issue>
<code>
[start of scripts/new-solr-updater.py]
1 """New script to handle solr updates.
2
3 Author: Anand Chitipothu
4
5 Changes:
6 2013-02-25: First version
7 2018-02-11: Use newer config method
8 """
9 import _init_path
10
11 from six.moves import urllib
12 import yaml
13 import logging
14 import json
15 import argparse
16 import datetime
17 import time
18 import web
19 import sys
20 import re
21 import socket
22
23 from openlibrary.solr import update_work
24 from openlibrary.config import load_config
25 from infogami import config
26
27 logger = logging.getLogger("openlibrary.solr-updater")
28
29 LOAD_IA_SCANS = False
30 COMMIT = True
31 args = {}
32
33
34 def parse_arguments():
35 parser = argparse.ArgumentParser()
36 parser.add_argument('-c', '--config')
37 parser.add_argument('--debugger', action="store_true", help="Wait for a debugger to attach before beginning.")
38 parser.add_argument('--state-file', default="solr-update.state")
39 parser.add_argument('--exclude-edits-containing', help="Don't index matching edits")
40 parser.add_argument('--ol-url', default="http://openlibrary.org/")
41 parser.add_argument('--socket-timeout', type=int, default=10)
42 parser.add_argument('--load-ia-scans', dest="load_ia_scans", action="store_true", default=False)
43 parser.add_argument('--no-commit', dest="commit", action="store_false", default=True)
44 return parser.parse_args()
45
46 def read_state_file(path):
47 try:
48 return open(path).read()
49 except IOError:
50 logger.error("State file %s is not found. Reading log from the beginning of today", path)
51 return datetime.date.today().isoformat() + ":0"
52
53 def get_default_offset():
54 return datetime.date.today().isoformat() + ":0"
55
56
57 class InfobaseLog:
58 def __init__(self, hostname, exclude=None):
59 """
60 :param str hostname:
61 :param str|None exclude: if specified, excludes records that include the string
62 """
63 self.base_url = 'http://%s/openlibrary.org/log' % hostname
64 self.offset = get_default_offset()
65 self.exclude = exclude
66
67 def tell(self):
68 return self.offset
69
70 def seek(self, offset):
71 self.offset = offset.strip()
72
73 def read_records(self, max_fetches=10):
74 """Reads all the available log records from the server.
75 """
76 for i in range(max_fetches):
77 url = "%s/%s?limit=100" % (self.base_url, self.offset)
78 logger.debug("Reading log from %s", url)
79 try:
80 jsontext = urllib.request.urlopen(url).read()
81 except urllib.error.URLError as e:
82 logger.error("Failed to open URL %s", url, exc_info=True)
83 if e.args and e.args[0].args == (111, 'Connection refused'):
84 logger.error('make sure infogami server is working, connection refused from %s', url)
85 sys.exit(1)
86 raise
87
88 try:
89 d = json.loads(jsontext)
90 except:
91 logger.error("Bad JSON: %s", jsontext)
92 raise
93 data = d['data']
94 # no more data is available
95 if not data:
96 logger.debug("no more records found")
97 return
98
99 for record in data:
100 if self.exclude and self.exclude in json.dumps(record):
101 continue
102 yield record
103
104 self.offset = d['offset']
105
106 def parse_log(records):
107 for rec in records:
108 action = rec.get('action')
109 if action == 'save':
110 key = rec['data'].get('key')
111 if key:
112 yield key
113 elif action == 'save_many':
114 changes = rec['data'].get('changeset', {}).get('changes', [])
115 for c in changes:
116 yield c['key']
117
118 elif action == 'store.put':
119 # A sample record looks like this:
120 # {
121 # "action": "store.put",
122 # "timestamp": "2011-12-01T00:00:44.241604",
123 # "data": {
124 # "data": {"borrowed": "false", "_key": "ebooks/books/OL5854888M", "_rev": "975708", "type": "ebook", "book_key": "/books/OL5854888M"},
125 # "key": "ebooks/books/OL5854888M"
126 # },
127 # "site": "openlibrary.org"
128 # }
129 data = rec.get('data', {}).get("data", {})
130 key = data.get("_key", "")
131 if data.get("type") == "ebook" and key.startswith("ebooks/books/"):
132 edition_key = data.get('book_key')
133 if edition_key:
134 yield edition_key
135 elif LOAD_IA_SCANS and data.get("type") == "ia-scan" and key.startswith("ia-scan/"):
136 identifier = data.get('identifier')
137 if identifier and is_allowed_itemid(identifier):
138 yield "/books/ia:" + identifier
139
140 # Hack to force updating something from admin interface
141 # The admin interface writes the keys to update to a document named
142 # 'solr-force-update' in the store and whatever keys are written to that
143 # are picked by this script
144 elif key == 'solr-force-update':
145 keys = data.get('keys')
146 for k in keys:
147 yield k
148
149 elif action == 'store.delete':
150 key = rec.get("data", {}).get("key")
151 # An ia-scan key is deleted when that book is deleted/darked from IA.
152 # Delete it from OL solr by updating that key
153 if key.startswith("ia-scan/"):
154 ol_key = "/works/ia:" + key.split("/")[-1]
155 yield ol_key
156
157 def is_allowed_itemid(identifier):
158 if not re.match("^[a-zA-Z0-9_.-]*$", identifier):
159 return False
160
161 # items starts with these prefixes are not books. Ignore them.
162 ignore_prefixes = config.get("ia_ignore_prefixes", [])
163 for prefix in ignore_prefixes:
164 if identifier.startswith(prefix):
165 return False
166
167 return True
168
169 def update_keys(keys):
170 if not keys:
171 return 0
172
173 # FIXME: Some kind of hack introduced to work around DB connectivity issue
174 global args
175 logger.debug("Args: %s" % str(args))
176 update_work.load_configs(args.ol_url, args.config, 'default')
177
178 keys = [k for k in keys if k.count("/") == 2 and k.split("/")[1] in ("books", "authors", "works")]
179
180 count = 0
181 for chunk in web.group(keys, 100):
182 chunk = list(chunk)
183 count += len(chunk)
184 update_work.do_updates(chunk)
185
186 if count:
187 logger.info("updated %d documents", count)
188
189 return count
190
191 class Solr:
192 def __init__(self):
193 self.reset()
194
195 def reset(self):
196 self.total_docs = 0
197 self.t_start = time.time()
198
199 def commit(self, ndocs):
200 """Performs solr commit only if there are sufficient number
201 of documents or enough time has been passed since last commit.
202 """
203 self.total_docs += ndocs
204
205 # no documents to commit
206 if not self.total_docs:
207 return
208
209 dt = time.time() - self.t_start
210 if self.total_docs > 100 or dt > 60:
211 logger.info("doing solr commit (%d docs updated, last commit was %0.1f seconds ago)", self.total_docs, dt)
212 self._solr_commit()
213 self.reset()
214 else:
215 logger.debug("skipping solr commit (%d docs updated, last commit was %0.1f seconds ago)", self.total_docs, dt)
216
217 def _solr_commit(self):
218 logger.info("BEGIN commit")
219 update_work.solr_update(['<commit/>'])
220 logger.info("END commit")
221
222
223 def process_args(args):
224 if args.debugger:
225 import ptvsd
226
227 logger.info("Enabling debugger attachment (attach if it hangs here)")
228 ptvsd.enable_attach(address=('0.0.0.0', 3000))
229 logger.info("Waiting for debugger to attach...")
230 ptvsd.wait_for_attach()
231 logger.info("Debugger attached to port 3000")
232
233 # Sometimes archive.org requests blocks forever.
234 # Setting a timeout will make the request fail instead of waiting forever.
235 socket.setdefaulttimeout(args.socket_timeout)
236
237 global LOAD_IA_SCANS, COMMIT
238 LOAD_IA_SCANS = args.load_ia_scans
239 COMMIT = args.commit
240
241
242 def main():
243 global args
244 FORMAT = "%(asctime)-15s %(levelname)s %(message)s"
245 logging.basicConfig(level=logging.INFO, format=FORMAT)
246
247 logger.info("BEGIN new-solr-updater")
248
249 args = parse_arguments()
250 process_args(args)
251
252 # set OL URL when running on a dev-instance
253 if args.ol_url:
254 host = web.lstrips(args.ol_url, "http://").strip("/")
255 update_work.set_query_host(host)
256
257 logger.info(str(args))
258 logger.info("loading config from %s", args.config)
259 load_config(args.config)
260
261 state_file = args.state_file
262 offset = read_state_file(state_file)
263
264 logfile = InfobaseLog(config.get('infobase_server'),
265 exclude=args.exclude_edits_containing)
266 logfile.seek(offset)
267
268 solr = Solr()
269
270 while True:
271 records = logfile.read_records()
272 keys = parse_log(records)
273 count = update_keys(keys)
274
275 if logfile.tell() != offset:
276 offset = logfile.tell()
277 logger.info("saving offset %s", offset)
278 with open(state_file, "w") as f:
279 f.write(offset)
280
281 if COMMIT:
282 solr.commit(ndocs=count)
283 else:
284 logger.info("not doing solr commit as commit is off")
285
286 # don't sleep after committing some records.
287 # While the commit was on, some more edits might have happened.
288 if count == 0:
289 logger.debug("No more log records available, sleeping...")
290 time.sleep(5)
291
292
293 if __name__ == "__main__":
294 main()
295
[end of scripts/new-solr-updater.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/scripts/new-solr-updater.py b/scripts/new-solr-updater.py
--- a/scripts/new-solr-updater.py
+++ b/scripts/new-solr-updater.py
@@ -94,6 +94,18 @@
# no more data is available
if not data:
logger.debug("no more records found")
+ # There's an infobase bug where we'll sometimes get 0 items, but the
+ # binary offset will have incremented...?
+ if 'offset' in d:
+ # There's _another_ infobase bug where if you query a future date,
+ # it'll return back 2020-12-01. To avoid solrupdater getting stuck
+ # in a loop, only update the offset if it's newer than the current
+ old_day, old_boffset = self.offset.split(':')
+ old_boffset = int(old_boffset)
+ new_day, new_boffset = d['offset'].split(':')
+ new_boffset = int(new_boffset)
+ if new_day >= old_day and new_boffset >= old_boffset:
+ self.offset = d['offset']
return
for record in data:
|
{"golden_diff": "diff --git a/scripts/new-solr-updater.py b/scripts/new-solr-updater.py\n--- a/scripts/new-solr-updater.py\n+++ b/scripts/new-solr-updater.py\n@@ -94,6 +94,18 @@\n # no more data is available\n if not data:\n logger.debug(\"no more records found\")\n+ # There's an infobase bug where we'll sometimes get 0 items, but the\n+ # binary offset will have incremented...?\n+ if 'offset' in d:\n+ # There's _another_ infobase bug where if you query a future date,\n+ # it'll return back 2020-12-01. To avoid solrupdater getting stuck\n+ # in a loop, only update the offset if it's newer than the current\n+ old_day, old_boffset = self.offset.split(':')\n+ old_boffset = int(old_boffset)\n+ new_day, new_boffset = d['offset'].split(':')\n+ new_boffset = int(new_boffset)\n+ if new_day >= old_day and new_boffset >= old_boffset:\n+ self.offset = d['offset']\n return\n \n for record in data:\n", "issue": "solrupdater getting randomly stuck\nSolrupdater has been getting stuck randomly at various dates. It seems related to the infobase py3 deploy.\r\n\r\n### Steps to Reproduce\r\n<!-- What steps caused you to find the bug? -->\r\n1. Do nothing\r\n\r\n<!-- What actually happened after these steps? What did you expect to happen? -->\r\n* Actual: It seems to occasionally get stuck in the log\r\n* Expected: Solrupdater runs through the infobase logs, making updates\r\n\r\n### Proposal & Constraints\r\n<!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? -->\r\n\r\n### Related files\r\n<!-- Files related to this issue; this is super useful for new contributors who might want to help! If you're not sure, leave this blank; a maintainer will add them. -->\r\n\r\n### Stakeholders\r\n@seabelis \r\n\n", "before_files": [{"content": "\"\"\"New script to handle solr updates.\n\nAuthor: Anand Chitipothu\n\nChanges:\n2013-02-25: First version\n2018-02-11: Use newer config method\n\"\"\"\nimport _init_path\n\nfrom six.moves import urllib\nimport yaml\nimport logging\nimport json\nimport argparse\nimport datetime\nimport time\nimport web\nimport sys\nimport re\nimport socket\n\nfrom openlibrary.solr import update_work\nfrom openlibrary.config import load_config\nfrom infogami import config\n\nlogger = logging.getLogger(\"openlibrary.solr-updater\")\n\nLOAD_IA_SCANS = False\nCOMMIT = True\nargs = {}\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('-c', '--config')\n parser.add_argument('--debugger', action=\"store_true\", help=\"Wait for a debugger to attach before beginning.\")\n parser.add_argument('--state-file', default=\"solr-update.state\")\n parser.add_argument('--exclude-edits-containing', help=\"Don't index matching edits\")\n parser.add_argument('--ol-url', default=\"http://openlibrary.org/\")\n parser.add_argument('--socket-timeout', type=int, default=10)\n parser.add_argument('--load-ia-scans', dest=\"load_ia_scans\", action=\"store_true\", default=False)\n parser.add_argument('--no-commit', dest=\"commit\", action=\"store_false\", default=True)\n return parser.parse_args()\n\ndef read_state_file(path):\n try:\n return open(path).read()\n except IOError:\n logger.error(\"State file %s is not found. Reading log from the beginning of today\", path)\n return datetime.date.today().isoformat() + \":0\"\n\ndef get_default_offset():\n return datetime.date.today().isoformat() + \":0\"\n\n\nclass InfobaseLog:\n def __init__(self, hostname, exclude=None):\n \"\"\"\n :param str hostname:\n :param str|None exclude: if specified, excludes records that include the string\n \"\"\"\n self.base_url = 'http://%s/openlibrary.org/log' % hostname\n self.offset = get_default_offset()\n self.exclude = exclude\n\n def tell(self):\n return self.offset\n\n def seek(self, offset):\n self.offset = offset.strip()\n\n def read_records(self, max_fetches=10):\n \"\"\"Reads all the available log records from the server.\n \"\"\"\n for i in range(max_fetches):\n url = \"%s/%s?limit=100\" % (self.base_url, self.offset)\n logger.debug(\"Reading log from %s\", url)\n try:\n jsontext = urllib.request.urlopen(url).read()\n except urllib.error.URLError as e:\n logger.error(\"Failed to open URL %s\", url, exc_info=True)\n if e.args and e.args[0].args == (111, 'Connection refused'):\n logger.error('make sure infogami server is working, connection refused from %s', url)\n sys.exit(1)\n raise\n\n try:\n d = json.loads(jsontext)\n except:\n logger.error(\"Bad JSON: %s\", jsontext)\n raise\n data = d['data']\n # no more data is available\n if not data:\n logger.debug(\"no more records found\")\n return\n\n for record in data:\n if self.exclude and self.exclude in json.dumps(record):\n continue\n yield record\n\n self.offset = d['offset']\n\ndef parse_log(records):\n for rec in records:\n action = rec.get('action')\n if action == 'save':\n key = rec['data'].get('key')\n if key:\n yield key\n elif action == 'save_many':\n changes = rec['data'].get('changeset', {}).get('changes', [])\n for c in changes:\n yield c['key']\n\n elif action == 'store.put':\n # A sample record looks like this:\n # {\n # \"action\": \"store.put\",\n # \"timestamp\": \"2011-12-01T00:00:44.241604\",\n # \"data\": {\n # \"data\": {\"borrowed\": \"false\", \"_key\": \"ebooks/books/OL5854888M\", \"_rev\": \"975708\", \"type\": \"ebook\", \"book_key\": \"/books/OL5854888M\"},\n # \"key\": \"ebooks/books/OL5854888M\"\n # },\n # \"site\": \"openlibrary.org\"\n # }\n data = rec.get('data', {}).get(\"data\", {})\n key = data.get(\"_key\", \"\")\n if data.get(\"type\") == \"ebook\" and key.startswith(\"ebooks/books/\"):\n edition_key = data.get('book_key')\n if edition_key:\n yield edition_key\n elif LOAD_IA_SCANS and data.get(\"type\") == \"ia-scan\" and key.startswith(\"ia-scan/\"):\n identifier = data.get('identifier')\n if identifier and is_allowed_itemid(identifier):\n yield \"/books/ia:\" + identifier\n\n # Hack to force updating something from admin interface\n # The admin interface writes the keys to update to a document named\n # 'solr-force-update' in the store and whatever keys are written to that\n # are picked by this script\n elif key == 'solr-force-update':\n keys = data.get('keys')\n for k in keys:\n yield k\n\n elif action == 'store.delete':\n key = rec.get(\"data\", {}).get(\"key\")\n # An ia-scan key is deleted when that book is deleted/darked from IA.\n # Delete it from OL solr by updating that key\n if key.startswith(\"ia-scan/\"):\n ol_key = \"/works/ia:\" + key.split(\"/\")[-1]\n yield ol_key\n\ndef is_allowed_itemid(identifier):\n if not re.match(\"^[a-zA-Z0-9_.-]*$\", identifier):\n return False\n\n # items starts with these prefixes are not books. Ignore them.\n ignore_prefixes = config.get(\"ia_ignore_prefixes\", [])\n for prefix in ignore_prefixes:\n if identifier.startswith(prefix):\n return False\n\n return True\n\ndef update_keys(keys):\n if not keys:\n return 0\n\n # FIXME: Some kind of hack introduced to work around DB connectivity issue\n global args\n logger.debug(\"Args: %s\" % str(args))\n update_work.load_configs(args.ol_url, args.config, 'default')\n\n keys = [k for k in keys if k.count(\"/\") == 2 and k.split(\"/\")[1] in (\"books\", \"authors\", \"works\")]\n\n count = 0\n for chunk in web.group(keys, 100):\n chunk = list(chunk)\n count += len(chunk)\n update_work.do_updates(chunk)\n\n if count:\n logger.info(\"updated %d documents\", count)\n\n return count\n\nclass Solr:\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.total_docs = 0\n self.t_start = time.time()\n\n def commit(self, ndocs):\n \"\"\"Performs solr commit only if there are sufficient number\n of documents or enough time has been passed since last commit.\n \"\"\"\n self.total_docs += ndocs\n\n # no documents to commit\n if not self.total_docs:\n return\n\n dt = time.time() - self.t_start\n if self.total_docs > 100 or dt > 60:\n logger.info(\"doing solr commit (%d docs updated, last commit was %0.1f seconds ago)\", self.total_docs, dt)\n self._solr_commit()\n self.reset()\n else:\n logger.debug(\"skipping solr commit (%d docs updated, last commit was %0.1f seconds ago)\", self.total_docs, dt)\n\n def _solr_commit(self):\n logger.info(\"BEGIN commit\")\n update_work.solr_update(['<commit/>'])\n logger.info(\"END commit\")\n\n\ndef process_args(args):\n if args.debugger:\n import ptvsd\n\n logger.info(\"Enabling debugger attachment (attach if it hangs here)\")\n ptvsd.enable_attach(address=('0.0.0.0', 3000))\n logger.info(\"Waiting for debugger to attach...\")\n ptvsd.wait_for_attach()\n logger.info(\"Debugger attached to port 3000\")\n\n # Sometimes archive.org requests blocks forever.\n # Setting a timeout will make the request fail instead of waiting forever.\n socket.setdefaulttimeout(args.socket_timeout)\n\n global LOAD_IA_SCANS, COMMIT\n LOAD_IA_SCANS = args.load_ia_scans\n COMMIT = args.commit\n\n\ndef main():\n global args\n FORMAT = \"%(asctime)-15s %(levelname)s %(message)s\"\n logging.basicConfig(level=logging.INFO, format=FORMAT)\n\n logger.info(\"BEGIN new-solr-updater\")\n\n args = parse_arguments()\n process_args(args)\n\n # set OL URL when running on a dev-instance\n if args.ol_url:\n host = web.lstrips(args.ol_url, \"http://\").strip(\"/\")\n update_work.set_query_host(host)\n\n logger.info(str(args))\n logger.info(\"loading config from %s\", args.config)\n load_config(args.config)\n\n state_file = args.state_file\n offset = read_state_file(state_file)\n\n logfile = InfobaseLog(config.get('infobase_server'),\n exclude=args.exclude_edits_containing)\n logfile.seek(offset)\n\n solr = Solr()\n\n while True:\n records = logfile.read_records()\n keys = parse_log(records)\n count = update_keys(keys)\n\n if logfile.tell() != offset:\n offset = logfile.tell()\n logger.info(\"saving offset %s\", offset)\n with open(state_file, \"w\") as f:\n f.write(offset)\n\n if COMMIT:\n solr.commit(ndocs=count)\n else:\n logger.info(\"not doing solr commit as commit is off\")\n\n # don't sleep after committing some records.\n # While the commit was on, some more edits might have happened.\n if count == 0:\n logger.debug(\"No more log records available, sleeping...\")\n time.sleep(5)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "scripts/new-solr-updater.py"}]}
| 3,787 | 273 |
gh_patches_debug_572
|
rasdani/github-patches
|
git_diff
|
hylang__hy-1343
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
REPL history is lost on (quit)
REPL history is not flushed to disk if the REPL is exited using `(quit)`.
A workaround is to remember to use `CTRL-D` to exit the REPL.
Would be nice if `(quit)` also worked.
</issue>
<code>
[start of hy/completer.py]
1 # Copyright 2017 the authors.
2 # This file is part of Hy, which is free software licensed under the Expat
3 # license. See the LICENSE.
4
5 import contextlib
6 import os
7 import re
8 import sys
9
10 import hy.macros
11 import hy.compiler
12 from hy._compat import builtins, string_types
13
14
15 docomplete = True
16
17 try:
18 import readline
19 except ImportError:
20 try:
21 import pyreadline.rlmain
22 import pyreadline.unicode_helper # NOQA
23 import readline
24 except ImportError:
25 docomplete = False
26
27 if sys.platform == 'darwin' and 'libedit' in readline.__doc__:
28 readline_bind = "bind ^I rl_complete"
29 else:
30 readline_bind = "tab: complete"
31
32
33 class Completer(object):
34
35 def __init__(self, namespace={}):
36 if not isinstance(namespace, dict):
37 raise TypeError('namespace must be a dictionary')
38 self.namespace = namespace
39 self.path = [hy.compiler._compile_table,
40 builtins.__dict__,
41 hy.macros._hy_macros[None],
42 namespace]
43 self.tag_path = [hy.macros._hy_tag[None]]
44 if '__name__' in namespace:
45 module_name = namespace['__name__']
46 self.path.append(hy.macros._hy_macros[module_name])
47 self.tag_path.append(hy.macros._hy_tag[module_name])
48
49 def attr_matches(self, text):
50 # Borrowed from IPython's completer
51 m = re.match(r"(\S+(\.[\w-]+)*)\.([\w-]*)$", text)
52
53 if m:
54 expr, attr = m.group(1, 3)
55 attr = attr.replace("-", "_")
56 expr = expr.replace("-", "_")
57 else:
58 return []
59
60 try:
61 obj = eval(expr, self.namespace)
62 words = dir(obj)
63 except Exception:
64 return []
65
66 n = len(attr)
67 matches = []
68 for w in words:
69 if w[:n] == attr:
70 matches.append("{}.{}".format(
71 expr.replace("_", "-"), w.replace("_", "-")))
72 return matches
73
74 def global_matches(self, text):
75 matches = []
76 for p in self.path:
77 for k in p.keys():
78 if isinstance(k, string_types):
79 k = k.replace("_", "-")
80 if k.startswith(text):
81 matches.append(k)
82 return matches
83
84 def tag_matches(self, text):
85 text = text[1:]
86 matches = []
87 for p in self.tag_path:
88 for k in p.keys():
89 if isinstance(k, string_types):
90 if k.startswith(text):
91 matches.append("#{}".format(k))
92 return matches
93
94 def complete(self, text, state):
95 if text.startswith("#"):
96 matches = self.tag_matches(text)
97 elif "." in text:
98 matches = self.attr_matches(text)
99 else:
100 matches = self.global_matches(text)
101 try:
102 return matches[state]
103 except IndexError:
104 return None
105
106
107 @contextlib.contextmanager
108 def completion(completer=None):
109 delims = "()[]{} "
110 if not completer:
111 completer = Completer()
112
113 if docomplete:
114 readline.set_completer(completer.complete)
115 readline.set_completer_delims(delims)
116
117 history = os.path.expanduser("~/.hy-history")
118 readline.parse_and_bind("set blink-matching-paren on")
119
120 try:
121 readline.read_history_file(history)
122 except IOError:
123 open(history, 'a').close()
124
125 readline.parse_and_bind(readline_bind)
126
127 yield
128
129 if docomplete:
130 readline.write_history_file(history)
131
[end of hy/completer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/hy/completer.py b/hy/completer.py
--- a/hy/completer.py
+++ b/hy/completer.py
@@ -124,7 +124,8 @@
readline.parse_and_bind(readline_bind)
- yield
-
- if docomplete:
- readline.write_history_file(history)
+ try:
+ yield
+ finally:
+ if docomplete:
+ readline.write_history_file(history)
|
{"golden_diff": "diff --git a/hy/completer.py b/hy/completer.py\n--- a/hy/completer.py\n+++ b/hy/completer.py\n@@ -124,7 +124,8 @@\n \n readline.parse_and_bind(readline_bind)\n \n- yield\n-\n- if docomplete:\n- readline.write_history_file(history)\n+ try:\n+ yield\n+ finally:\n+ if docomplete:\n+ readline.write_history_file(history)\n", "issue": "REPL history is lost on (quit)\nREPL history is not flushed to disk if the REPL is exited using `(quit)`. \r\n\r\nA workaround is to remember to use `CTRL-D` to exit the REPL.\r\n\r\nWould be nice if `(quit)` also worked.\n", "before_files": [{"content": "# Copyright 2017 the authors.\n# This file is part of Hy, which is free software licensed under the Expat\n# license. See the LICENSE.\n\nimport contextlib\nimport os\nimport re\nimport sys\n\nimport hy.macros\nimport hy.compiler\nfrom hy._compat import builtins, string_types\n\n\ndocomplete = True\n\ntry:\n import readline\nexcept ImportError:\n try:\n import pyreadline.rlmain\n import pyreadline.unicode_helper # NOQA\n import readline\n except ImportError:\n docomplete = False\n\nif sys.platform == 'darwin' and 'libedit' in readline.__doc__:\n readline_bind = \"bind ^I rl_complete\"\nelse:\n readline_bind = \"tab: complete\"\n\n\nclass Completer(object):\n\n def __init__(self, namespace={}):\n if not isinstance(namespace, dict):\n raise TypeError('namespace must be a dictionary')\n self.namespace = namespace\n self.path = [hy.compiler._compile_table,\n builtins.__dict__,\n hy.macros._hy_macros[None],\n namespace]\n self.tag_path = [hy.macros._hy_tag[None]]\n if '__name__' in namespace:\n module_name = namespace['__name__']\n self.path.append(hy.macros._hy_macros[module_name])\n self.tag_path.append(hy.macros._hy_tag[module_name])\n\n def attr_matches(self, text):\n # Borrowed from IPython's completer\n m = re.match(r\"(\\S+(\\.[\\w-]+)*)\\.([\\w-]*)$\", text)\n\n if m:\n expr, attr = m.group(1, 3)\n attr = attr.replace(\"-\", \"_\")\n expr = expr.replace(\"-\", \"_\")\n else:\n return []\n\n try:\n obj = eval(expr, self.namespace)\n words = dir(obj)\n except Exception:\n return []\n\n n = len(attr)\n matches = []\n for w in words:\n if w[:n] == attr:\n matches.append(\"{}.{}\".format(\n expr.replace(\"_\", \"-\"), w.replace(\"_\", \"-\")))\n return matches\n\n def global_matches(self, text):\n matches = []\n for p in self.path:\n for k in p.keys():\n if isinstance(k, string_types):\n k = k.replace(\"_\", \"-\")\n if k.startswith(text):\n matches.append(k)\n return matches\n\n def tag_matches(self, text):\n text = text[1:]\n matches = []\n for p in self.tag_path:\n for k in p.keys():\n if isinstance(k, string_types):\n if k.startswith(text):\n matches.append(\"#{}\".format(k))\n return matches\n\n def complete(self, text, state):\n if text.startswith(\"#\"):\n matches = self.tag_matches(text)\n elif \".\" in text:\n matches = self.attr_matches(text)\n else:\n matches = self.global_matches(text)\n try:\n return matches[state]\n except IndexError:\n return None\n\n\[email protected]\ndef completion(completer=None):\n delims = \"()[]{} \"\n if not completer:\n completer = Completer()\n\n if docomplete:\n readline.set_completer(completer.complete)\n readline.set_completer_delims(delims)\n\n history = os.path.expanduser(\"~/.hy-history\")\n readline.parse_and_bind(\"set blink-matching-paren on\")\n\n try:\n readline.read_history_file(history)\n except IOError:\n open(history, 'a').close()\n\n readline.parse_and_bind(readline_bind)\n\n yield\n\n if docomplete:\n readline.write_history_file(history)\n", "path": "hy/completer.py"}]}
| 1,669 | 108 |
gh_patches_debug_26071
|
rasdani/github-patches
|
git_diff
|
localstack__localstack-2117
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Is it possible to support dynamodbstreams ListStreams?
See also https://github.com/localstack/localstack/blob/master/localstack/services/dynamodbstreams/dynamodbstreams_api.py#L70
</issue>
<code>
[start of localstack/services/dynamodbstreams/dynamodbstreams_api.py]
1 import json
2 import uuid
3 import hashlib
4 import six
5 from flask import Flask, jsonify, request, make_response
6 from localstack.services import generic_proxy
7 from localstack.utils.aws import aws_stack
8 from localstack.utils.common import to_str, to_bytes
9 from localstack.utils.analytics import event_publisher
10
11 APP_NAME = 'ddb_streams_api'
12
13 app = Flask(APP_NAME)
14
15 DDB_STREAMS = {}
16
17 DDB_KINESIS_STREAM_NAME_PREFIX = '__ddb_stream_'
18
19 ACTION_HEADER_PREFIX = 'DynamoDBStreams_20120810'
20
21 SEQUENCE_NUMBER_COUNTER = 1
22
23
24 def add_dynamodb_stream(table_name, latest_stream_label=None, view_type='NEW_AND_OLD_IMAGES', enabled=True):
25 if enabled:
26 # create kinesis stream as a backend
27 stream_name = get_kinesis_stream_name(table_name)
28 aws_stack.create_kinesis_stream(stream_name)
29 latest_stream_label = latest_stream_label or 'latest'
30 stream = {
31 'StreamArn': aws_stack.dynamodb_stream_arn(
32 table_name=table_name, latest_stream_label=latest_stream_label),
33 'TableName': table_name,
34 'StreamLabel': latest_stream_label,
35 'StreamStatus': 'ENABLED',
36 'KeySchema': [],
37 'Shards': []
38 }
39 table_arn = aws_stack.dynamodb_table_arn(table_name)
40 DDB_STREAMS[table_arn] = stream
41 # record event
42 event_publisher.fire_event(event_publisher.EVENT_DYNAMODB_CREATE_STREAM,
43 payload={'n': event_publisher.get_hash(table_name)})
44
45
46 def forward_events(records):
47 global SEQUENCE_NUMBER_COUNTER
48 kinesis = aws_stack.connect_to_service('kinesis')
49 for record in records:
50 if 'SequenceNumber' not in record['dynamodb']:
51 record['dynamodb']['SequenceNumber'] = str(SEQUENCE_NUMBER_COUNTER)
52 SEQUENCE_NUMBER_COUNTER += 1
53 table_arn = record['eventSourceARN']
54 stream = DDB_STREAMS.get(table_arn)
55 if stream:
56 table_name = table_name_from_stream_arn(stream['StreamArn'])
57 stream_name = get_kinesis_stream_name(table_name)
58 kinesis.put_record(StreamName=stream_name, Data=json.dumps(record), PartitionKey='TODO')
59
60
61 @app.route('/', methods=['POST'])
62 def post_request():
63 action = request.headers.get('x-amz-target')
64 data = json.loads(to_str(request.data))
65 result = {}
66 kinesis = aws_stack.connect_to_service('kinesis')
67 if action == '%s.ListStreams' % ACTION_HEADER_PREFIX:
68 result = {
69 'Streams': list(DDB_STREAMS.values()),
70 'LastEvaluatedStreamArn': 'TODO'
71 }
72 elif action == '%s.DescribeStream' % ACTION_HEADER_PREFIX:
73 for stream in DDB_STREAMS.values():
74 if stream['StreamArn'] == data['StreamArn']:
75 result = {
76 'StreamDescription': stream
77 }
78 # get stream details
79 dynamodb = aws_stack.connect_to_service('dynamodb')
80 table_name = table_name_from_stream_arn(stream['StreamArn'])
81 stream_name = get_kinesis_stream_name(table_name)
82 stream_details = kinesis.describe_stream(StreamName=stream_name)
83 table_details = dynamodb.describe_table(TableName=table_name)
84 stream['KeySchema'] = table_details['Table']['KeySchema']
85
86 # Replace Kinesis ShardIDs with ones that mimic actual
87 # DynamoDBStream ShardIDs.
88 stream_shards = stream_details['StreamDescription']['Shards']
89 for shard in stream_shards:
90 shard['ShardId'] = shard_id(stream_name, shard['ShardId'])
91 stream['Shards'] = stream_shards
92 break
93 if not result:
94 return error_response('Requested resource not found', error_type='ResourceNotFoundException')
95 elif action == '%s.GetShardIterator' % ACTION_HEADER_PREFIX:
96 # forward request to Kinesis API
97 stream_name = stream_name_from_stream_arn(data['StreamArn'])
98 stream_shard_id = kinesis_shard_id(data['ShardId'])
99 result = kinesis.get_shard_iterator(StreamName=stream_name,
100 ShardId=stream_shard_id, ShardIteratorType=data['ShardIteratorType'])
101 elif action == '%s.GetRecords' % ACTION_HEADER_PREFIX:
102 kinesis_records = kinesis.get_records(**data)
103 result = {'Records': [], 'NextShardIterator': kinesis_records.get('NextShardIterator')}
104 for record in kinesis_records['Records']:
105 result['Records'].append(json.loads(to_str(record['Data'])))
106 else:
107 print('WARNING: Unknown operation "%s"' % action)
108 return jsonify(result)
109
110
111 # -----------------
112 # HELPER FUNCTIONS
113 # -----------------
114
115 def error_response(message=None, error_type=None, code=400):
116 if not message:
117 message = 'Unknown error'
118 if not error_type:
119 error_type = 'UnknownError'
120 if 'com.amazonaws.dynamodb' not in error_type:
121 error_type = 'com.amazonaws.dynamodb.v20120810#%s' % error_type
122 content = {
123 'message': message,
124 '__type': error_type
125 }
126 return make_response(jsonify(content), code)
127
128
129 def get_kinesis_stream_name(table_name):
130 return DDB_KINESIS_STREAM_NAME_PREFIX + table_name
131
132
133 def table_name_from_stream_arn(stream_arn):
134 return stream_arn.split(':table/')[1].split('/')[0]
135
136
137 def stream_name_from_stream_arn(stream_arn):
138 table_name = table_name_from_stream_arn(stream_arn)
139 return get_kinesis_stream_name(table_name)
140
141
142 def random_id(stream_arn, kinesis_shard_id):
143 namespace = uuid.UUID(bytes=hashlib.sha1(to_bytes(stream_arn)).digest()[:16])
144 if six.PY2:
145 kinesis_shard_id = to_bytes(kinesis_shard_id, 'utf-8')
146 return uuid.uuid5(namespace, kinesis_shard_id).hex
147
148
149 def shard_id(stream_arn, kinesis_shard_id):
150 return '-'.join([kinesis_shard_id, random_id(stream_arn, kinesis_shard_id)])
151
152
153 def kinesis_shard_id(dynamodbstream_shard_id):
154 return dynamodbstream_shard_id.rsplit('-', 1)[0]
155
156
157 def serve(port, quiet=True):
158 generic_proxy.serve_flask_app(app=app, port=port, quiet=quiet)
159
[end of localstack/services/dynamodbstreams/dynamodbstreams_api.py]
[start of localstack/constants.py]
1 import os
2 import localstack_client.config
3
4 # LocalStack version
5 VERSION = '0.10.7'
6
7 # constant to represent the "local" region, i.e., local machine
8 REGION_LOCAL = 'local'
9
10 # dev environment
11 ENV_DEV = 'dev'
12
13 # backend service ports, for services that are behind a proxy (counting down from 4566)
14 DEFAULT_PORT_APIGATEWAY_BACKEND = 4566
15 DEFAULT_PORT_KINESIS_BACKEND = 4565
16 DEFAULT_PORT_DYNAMODB_BACKEND = 4564
17 DEFAULT_PORT_S3_BACKEND = 4563
18 DEFAULT_PORT_SNS_BACKEND = 4562
19 DEFAULT_PORT_SQS_BACKEND = 4561
20 DEFAULT_PORT_ELASTICSEARCH_BACKEND = 4560
21 DEFAULT_PORT_CLOUDFORMATION_BACKEND = 4559
22 DEFAULT_PORT_STEPFUNCTIONS_BACKEND = 4558
23 DEFAULT_PORT_IAM_BACKEND = 4557
24 DEFAULT_PORT_EC2_BACKEND = 4556
25 DEFAULT_PORT_KMS_BACKEND = 4555
26 DEFAULT_PORT_EVENTS_BACKEND = 4554
27 DEFAULT_PORT_LOGS_BACKEND = 4553
28
29 DEFAULT_PORT_WEB_UI = 8080
30
31 LOCALHOST = 'localhost'
32
33 # version of the Maven dependency with Java utility code
34 LOCALSTACK_MAVEN_VERSION = '0.2.0'
35
36 # map of default service APIs and ports to be spun up (fetch map from localstack_client)
37 DEFAULT_SERVICE_PORTS = localstack_client.config.get_service_ports()
38
39 # host to bind to when starting the services
40 BIND_HOST = '0.0.0.0'
41
42 # AWS user account ID used for tests
43 if 'TEST_AWS_ACCOUNT_ID' not in os.environ:
44 os.environ['TEST_AWS_ACCOUNT_ID'] = '000000000000'
45 TEST_AWS_ACCOUNT_ID = os.environ['TEST_AWS_ACCOUNT_ID']
46
47 # root code folder
48 LOCALSTACK_ROOT_FOLDER = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
49
50 # virtualenv folder
51 LOCALSTACK_VENV_FOLDER = os.path.join(LOCALSTACK_ROOT_FOLDER, '.venv')
52 if not os.path.isdir(LOCALSTACK_VENV_FOLDER):
53 # assuming this package lives here: <python>/lib/pythonX.X/site-packages/localstack/
54 LOCALSTACK_VENV_FOLDER = os.path.realpath(os.path.join(LOCALSTACK_ROOT_FOLDER, '..', '..', '..'))
55
56 # API Gateway path to indicate a user request sent to the gateway
57 PATH_USER_REQUEST = '_user_request_'
58
59 # name of LocalStack Docker image
60 DOCKER_IMAGE_NAME = 'localstack/localstack'
61
62 # backdoor API path used to retrieve or update config variables
63 CONFIG_UPDATE_PATH = '/?_config_'
64
65 # environment variable name to tag local test runs
66 ENV_INTERNAL_TEST_RUN = 'LOCALSTACK_INTERNAL_TEST_RUN'
67
68 # content types
69 APPLICATION_AMZ_JSON_1_0 = 'application/x-amz-json-1.0'
70 APPLICATION_AMZ_JSON_1_1 = 'application/x-amz-json-1.1'
71 APPLICATION_JSON = 'application/json'
72 APPLICATION_XML = 'application/xml'
73 APPLICATION_X_WWW_FORM_URLENCODED = 'application/x-www-form-urlencoded'
74
75 # strings to indicate truthy/falsy values
76 TRUE_STRINGS = ('1', 'true', 'True')
77 FALSE_STRINGS = ('0', 'false', 'False')
78
79 # Lambda defaults
80 LAMBDA_TEST_ROLE = 'arn:aws:iam::%s:role/lambda-test-role' % TEST_AWS_ACCOUNT_ID
81
82 # installation constants
83 ELASTICSEARCH_JAR_URL = 'https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-6.7.0.zip'
84 # See https://docs.aws.amazon.com/ja_jp/elasticsearch-service/latest/developerguide/aes-supported-plugins.html
85 ELASTICSEARCH_PLUGIN_LIST = ['analysis-icu', 'ingest-attachment', 'analysis-kuromoji',
86 'mapper-murmur3', 'mapper-size', 'analysis-phonetic', 'analysis-smartcn', 'analysis-stempel', 'analysis-ukrainian']
87 # Default ES modules to exclude (save apprx 66MB in the final image)
88 ELASTICSEARCH_DELETE_MODULES = ['ingest-geoip']
89 ELASTICMQ_JAR_URL = 'https://s3-eu-west-1.amazonaws.com/softwaremill-public/elasticmq-server-0.15.4.jar'
90 STS_JAR_URL = 'https://repo1.maven.org/maven2/com/amazonaws/aws-java-sdk-sts/1.11.14/aws-java-sdk-sts-1.11.14.jar'
91 STEPFUNCTIONS_ZIP_URL = 'https://s3.amazonaws.com/stepfunctionslocal/StepFunctionsLocal.zip'
92 KMS_URL_PATTERN = 'https://s3-eu-west-2.amazonaws.com/local-kms/localstack/v3/local-kms.<arch>.bin'
93
94 # TODO: Temporarily using a fixed version of DDB in Alpine, as we're hitting a SIGSEGV JVM crash with latest
95 DYNAMODB_JAR_URL_ALPINE = 'https://github.com/whummer/dynamodb-local/raw/master/etc/DynamoDBLocal.zip'
96 DYNAMODB_JAR_URL = 'https://s3-us-west-2.amazonaws.com/dynamodb-local/dynamodb_local_latest.zip'
97
98 # API endpoint for analytics events
99 API_ENDPOINT = os.environ.get('API_ENDPOINT') or 'https://api.localstack.cloud/v1'
100
101 # environment variable to indicates that this process is running the Web UI
102 LOCALSTACK_WEB_PROCESS = 'LOCALSTACK_WEB_PROCESS'
103 LOCALSTACK_INFRA_PROCESS = 'LOCALSTACK_INFRA_PROCESS'
104
105 # Hardcoded AWS account ID used by moto
106 MOTO_ACCOUNT_ID = '123456789012'
107
108 # Default lambda registry
109 DEFAULT_LAMBDA_CONTAINER_REGISTRY = 'lambci/lambda'
110
[end of localstack/constants.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/localstack/constants.py b/localstack/constants.py
--- a/localstack/constants.py
+++ b/localstack/constants.py
@@ -2,7 +2,7 @@
import localstack_client.config
# LocalStack version
-VERSION = '0.10.7'
+VERSION = '0.10.8'
# constant to represent the "local" region, i.e., local machine
REGION_LOCAL = 'local'
@@ -31,7 +31,7 @@
LOCALHOST = 'localhost'
# version of the Maven dependency with Java utility code
-LOCALSTACK_MAVEN_VERSION = '0.2.0'
+LOCALSTACK_MAVEN_VERSION = '0.2.1'
# map of default service APIs and ports to be spun up (fetch map from localstack_client)
DEFAULT_SERVICE_PORTS = localstack_client.config.get_service_ports()
diff --git a/localstack/services/dynamodbstreams/dynamodbstreams_api.py b/localstack/services/dynamodbstreams/dynamodbstreams_api.py
--- a/localstack/services/dynamodbstreams/dynamodbstreams_api.py
+++ b/localstack/services/dynamodbstreams/dynamodbstreams_api.py
@@ -66,8 +66,7 @@
kinesis = aws_stack.connect_to_service('kinesis')
if action == '%s.ListStreams' % ACTION_HEADER_PREFIX:
result = {
- 'Streams': list(DDB_STREAMS.values()),
- 'LastEvaluatedStreamArn': 'TODO'
+ 'Streams': list(DDB_STREAMS.values())
}
elif action == '%s.DescribeStream' % ACTION_HEADER_PREFIX:
for stream in DDB_STREAMS.values():
|
{"golden_diff": "diff --git a/localstack/constants.py b/localstack/constants.py\n--- a/localstack/constants.py\n+++ b/localstack/constants.py\n@@ -2,7 +2,7 @@\n import localstack_client.config\n \n # LocalStack version\n-VERSION = '0.10.7'\n+VERSION = '0.10.8'\n \n # constant to represent the \"local\" region, i.e., local machine\n REGION_LOCAL = 'local'\n@@ -31,7 +31,7 @@\n LOCALHOST = 'localhost'\n \n # version of the Maven dependency with Java utility code\n-LOCALSTACK_MAVEN_VERSION = '0.2.0'\n+LOCALSTACK_MAVEN_VERSION = '0.2.1'\n \n # map of default service APIs and ports to be spun up (fetch map from localstack_client)\n DEFAULT_SERVICE_PORTS = localstack_client.config.get_service_ports()\ndiff --git a/localstack/services/dynamodbstreams/dynamodbstreams_api.py b/localstack/services/dynamodbstreams/dynamodbstreams_api.py\n--- a/localstack/services/dynamodbstreams/dynamodbstreams_api.py\n+++ b/localstack/services/dynamodbstreams/dynamodbstreams_api.py\n@@ -66,8 +66,7 @@\n kinesis = aws_stack.connect_to_service('kinesis')\n if action == '%s.ListStreams' % ACTION_HEADER_PREFIX:\n result = {\n- 'Streams': list(DDB_STREAMS.values()),\n- 'LastEvaluatedStreamArn': 'TODO'\n+ 'Streams': list(DDB_STREAMS.values())\n }\n elif action == '%s.DescribeStream' % ACTION_HEADER_PREFIX:\n for stream in DDB_STREAMS.values():\n", "issue": "Is it possible to support dynamodbstreams ListStreams?\nSee also https://github.com/localstack/localstack/blob/master/localstack/services/dynamodbstreams/dynamodbstreams_api.py#L70 \n", "before_files": [{"content": "import json\nimport uuid\nimport hashlib\nimport six\nfrom flask import Flask, jsonify, request, make_response\nfrom localstack.services import generic_proxy\nfrom localstack.utils.aws import aws_stack\nfrom localstack.utils.common import to_str, to_bytes\nfrom localstack.utils.analytics import event_publisher\n\nAPP_NAME = 'ddb_streams_api'\n\napp = Flask(APP_NAME)\n\nDDB_STREAMS = {}\n\nDDB_KINESIS_STREAM_NAME_PREFIX = '__ddb_stream_'\n\nACTION_HEADER_PREFIX = 'DynamoDBStreams_20120810'\n\nSEQUENCE_NUMBER_COUNTER = 1\n\n\ndef add_dynamodb_stream(table_name, latest_stream_label=None, view_type='NEW_AND_OLD_IMAGES', enabled=True):\n if enabled:\n # create kinesis stream as a backend\n stream_name = get_kinesis_stream_name(table_name)\n aws_stack.create_kinesis_stream(stream_name)\n latest_stream_label = latest_stream_label or 'latest'\n stream = {\n 'StreamArn': aws_stack.dynamodb_stream_arn(\n table_name=table_name, latest_stream_label=latest_stream_label),\n 'TableName': table_name,\n 'StreamLabel': latest_stream_label,\n 'StreamStatus': 'ENABLED',\n 'KeySchema': [],\n 'Shards': []\n }\n table_arn = aws_stack.dynamodb_table_arn(table_name)\n DDB_STREAMS[table_arn] = stream\n # record event\n event_publisher.fire_event(event_publisher.EVENT_DYNAMODB_CREATE_STREAM,\n payload={'n': event_publisher.get_hash(table_name)})\n\n\ndef forward_events(records):\n global SEQUENCE_NUMBER_COUNTER\n kinesis = aws_stack.connect_to_service('kinesis')\n for record in records:\n if 'SequenceNumber' not in record['dynamodb']:\n record['dynamodb']['SequenceNumber'] = str(SEQUENCE_NUMBER_COUNTER)\n SEQUENCE_NUMBER_COUNTER += 1\n table_arn = record['eventSourceARN']\n stream = DDB_STREAMS.get(table_arn)\n if stream:\n table_name = table_name_from_stream_arn(stream['StreamArn'])\n stream_name = get_kinesis_stream_name(table_name)\n kinesis.put_record(StreamName=stream_name, Data=json.dumps(record), PartitionKey='TODO')\n\n\[email protected]('/', methods=['POST'])\ndef post_request():\n action = request.headers.get('x-amz-target')\n data = json.loads(to_str(request.data))\n result = {}\n kinesis = aws_stack.connect_to_service('kinesis')\n if action == '%s.ListStreams' % ACTION_HEADER_PREFIX:\n result = {\n 'Streams': list(DDB_STREAMS.values()),\n 'LastEvaluatedStreamArn': 'TODO'\n }\n elif action == '%s.DescribeStream' % ACTION_HEADER_PREFIX:\n for stream in DDB_STREAMS.values():\n if stream['StreamArn'] == data['StreamArn']:\n result = {\n 'StreamDescription': stream\n }\n # get stream details\n dynamodb = aws_stack.connect_to_service('dynamodb')\n table_name = table_name_from_stream_arn(stream['StreamArn'])\n stream_name = get_kinesis_stream_name(table_name)\n stream_details = kinesis.describe_stream(StreamName=stream_name)\n table_details = dynamodb.describe_table(TableName=table_name)\n stream['KeySchema'] = table_details['Table']['KeySchema']\n\n # Replace Kinesis ShardIDs with ones that mimic actual\n # DynamoDBStream ShardIDs.\n stream_shards = stream_details['StreamDescription']['Shards']\n for shard in stream_shards:\n shard['ShardId'] = shard_id(stream_name, shard['ShardId'])\n stream['Shards'] = stream_shards\n break\n if not result:\n return error_response('Requested resource not found', error_type='ResourceNotFoundException')\n elif action == '%s.GetShardIterator' % ACTION_HEADER_PREFIX:\n # forward request to Kinesis API\n stream_name = stream_name_from_stream_arn(data['StreamArn'])\n stream_shard_id = kinesis_shard_id(data['ShardId'])\n result = kinesis.get_shard_iterator(StreamName=stream_name,\n ShardId=stream_shard_id, ShardIteratorType=data['ShardIteratorType'])\n elif action == '%s.GetRecords' % ACTION_HEADER_PREFIX:\n kinesis_records = kinesis.get_records(**data)\n result = {'Records': [], 'NextShardIterator': kinesis_records.get('NextShardIterator')}\n for record in kinesis_records['Records']:\n result['Records'].append(json.loads(to_str(record['Data'])))\n else:\n print('WARNING: Unknown operation \"%s\"' % action)\n return jsonify(result)\n\n\n# -----------------\n# HELPER FUNCTIONS\n# -----------------\n\ndef error_response(message=None, error_type=None, code=400):\n if not message:\n message = 'Unknown error'\n if not error_type:\n error_type = 'UnknownError'\n if 'com.amazonaws.dynamodb' not in error_type:\n error_type = 'com.amazonaws.dynamodb.v20120810#%s' % error_type\n content = {\n 'message': message,\n '__type': error_type\n }\n return make_response(jsonify(content), code)\n\n\ndef get_kinesis_stream_name(table_name):\n return DDB_KINESIS_STREAM_NAME_PREFIX + table_name\n\n\ndef table_name_from_stream_arn(stream_arn):\n return stream_arn.split(':table/')[1].split('/')[0]\n\n\ndef stream_name_from_stream_arn(stream_arn):\n table_name = table_name_from_stream_arn(stream_arn)\n return get_kinesis_stream_name(table_name)\n\n\ndef random_id(stream_arn, kinesis_shard_id):\n namespace = uuid.UUID(bytes=hashlib.sha1(to_bytes(stream_arn)).digest()[:16])\n if six.PY2:\n kinesis_shard_id = to_bytes(kinesis_shard_id, 'utf-8')\n return uuid.uuid5(namespace, kinesis_shard_id).hex\n\n\ndef shard_id(stream_arn, kinesis_shard_id):\n return '-'.join([kinesis_shard_id, random_id(stream_arn, kinesis_shard_id)])\n\n\ndef kinesis_shard_id(dynamodbstream_shard_id):\n return dynamodbstream_shard_id.rsplit('-', 1)[0]\n\n\ndef serve(port, quiet=True):\n generic_proxy.serve_flask_app(app=app, port=port, quiet=quiet)\n", "path": "localstack/services/dynamodbstreams/dynamodbstreams_api.py"}, {"content": "import os\nimport localstack_client.config\n\n# LocalStack version\nVERSION = '0.10.7'\n\n# constant to represent the \"local\" region, i.e., local machine\nREGION_LOCAL = 'local'\n\n# dev environment\nENV_DEV = 'dev'\n\n# backend service ports, for services that are behind a proxy (counting down from 4566)\nDEFAULT_PORT_APIGATEWAY_BACKEND = 4566\nDEFAULT_PORT_KINESIS_BACKEND = 4565\nDEFAULT_PORT_DYNAMODB_BACKEND = 4564\nDEFAULT_PORT_S3_BACKEND = 4563\nDEFAULT_PORT_SNS_BACKEND = 4562\nDEFAULT_PORT_SQS_BACKEND = 4561\nDEFAULT_PORT_ELASTICSEARCH_BACKEND = 4560\nDEFAULT_PORT_CLOUDFORMATION_BACKEND = 4559\nDEFAULT_PORT_STEPFUNCTIONS_BACKEND = 4558\nDEFAULT_PORT_IAM_BACKEND = 4557\nDEFAULT_PORT_EC2_BACKEND = 4556\nDEFAULT_PORT_KMS_BACKEND = 4555\nDEFAULT_PORT_EVENTS_BACKEND = 4554\nDEFAULT_PORT_LOGS_BACKEND = 4553\n\nDEFAULT_PORT_WEB_UI = 8080\n\nLOCALHOST = 'localhost'\n\n# version of the Maven dependency with Java utility code\nLOCALSTACK_MAVEN_VERSION = '0.2.0'\n\n# map of default service APIs and ports to be spun up (fetch map from localstack_client)\nDEFAULT_SERVICE_PORTS = localstack_client.config.get_service_ports()\n\n# host to bind to when starting the services\nBIND_HOST = '0.0.0.0'\n\n# AWS user account ID used for tests\nif 'TEST_AWS_ACCOUNT_ID' not in os.environ:\n os.environ['TEST_AWS_ACCOUNT_ID'] = '000000000000'\nTEST_AWS_ACCOUNT_ID = os.environ['TEST_AWS_ACCOUNT_ID']\n\n# root code folder\nLOCALSTACK_ROOT_FOLDER = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))\n\n# virtualenv folder\nLOCALSTACK_VENV_FOLDER = os.path.join(LOCALSTACK_ROOT_FOLDER, '.venv')\nif not os.path.isdir(LOCALSTACK_VENV_FOLDER):\n # assuming this package lives here: <python>/lib/pythonX.X/site-packages/localstack/\n LOCALSTACK_VENV_FOLDER = os.path.realpath(os.path.join(LOCALSTACK_ROOT_FOLDER, '..', '..', '..'))\n\n# API Gateway path to indicate a user request sent to the gateway\nPATH_USER_REQUEST = '_user_request_'\n\n# name of LocalStack Docker image\nDOCKER_IMAGE_NAME = 'localstack/localstack'\n\n# backdoor API path used to retrieve or update config variables\nCONFIG_UPDATE_PATH = '/?_config_'\n\n# environment variable name to tag local test runs\nENV_INTERNAL_TEST_RUN = 'LOCALSTACK_INTERNAL_TEST_RUN'\n\n# content types\nAPPLICATION_AMZ_JSON_1_0 = 'application/x-amz-json-1.0'\nAPPLICATION_AMZ_JSON_1_1 = 'application/x-amz-json-1.1'\nAPPLICATION_JSON = 'application/json'\nAPPLICATION_XML = 'application/xml'\nAPPLICATION_X_WWW_FORM_URLENCODED = 'application/x-www-form-urlencoded'\n\n# strings to indicate truthy/falsy values\nTRUE_STRINGS = ('1', 'true', 'True')\nFALSE_STRINGS = ('0', 'false', 'False')\n\n# Lambda defaults\nLAMBDA_TEST_ROLE = 'arn:aws:iam::%s:role/lambda-test-role' % TEST_AWS_ACCOUNT_ID\n\n# installation constants\nELASTICSEARCH_JAR_URL = 'https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-6.7.0.zip'\n# See https://docs.aws.amazon.com/ja_jp/elasticsearch-service/latest/developerguide/aes-supported-plugins.html\nELASTICSEARCH_PLUGIN_LIST = ['analysis-icu', 'ingest-attachment', 'analysis-kuromoji',\n 'mapper-murmur3', 'mapper-size', 'analysis-phonetic', 'analysis-smartcn', 'analysis-stempel', 'analysis-ukrainian']\n# Default ES modules to exclude (save apprx 66MB in the final image)\nELASTICSEARCH_DELETE_MODULES = ['ingest-geoip']\nELASTICMQ_JAR_URL = 'https://s3-eu-west-1.amazonaws.com/softwaremill-public/elasticmq-server-0.15.4.jar'\nSTS_JAR_URL = 'https://repo1.maven.org/maven2/com/amazonaws/aws-java-sdk-sts/1.11.14/aws-java-sdk-sts-1.11.14.jar'\nSTEPFUNCTIONS_ZIP_URL = 'https://s3.amazonaws.com/stepfunctionslocal/StepFunctionsLocal.zip'\nKMS_URL_PATTERN = 'https://s3-eu-west-2.amazonaws.com/local-kms/localstack/v3/local-kms.<arch>.bin'\n\n# TODO: Temporarily using a fixed version of DDB in Alpine, as we're hitting a SIGSEGV JVM crash with latest\nDYNAMODB_JAR_URL_ALPINE = 'https://github.com/whummer/dynamodb-local/raw/master/etc/DynamoDBLocal.zip'\nDYNAMODB_JAR_URL = 'https://s3-us-west-2.amazonaws.com/dynamodb-local/dynamodb_local_latest.zip'\n\n# API endpoint for analytics events\nAPI_ENDPOINT = os.environ.get('API_ENDPOINT') or 'https://api.localstack.cloud/v1'\n\n# environment variable to indicates that this process is running the Web UI\nLOCALSTACK_WEB_PROCESS = 'LOCALSTACK_WEB_PROCESS'\nLOCALSTACK_INFRA_PROCESS = 'LOCALSTACK_INFRA_PROCESS'\n\n# Hardcoded AWS account ID used by moto\nMOTO_ACCOUNT_ID = '123456789012'\n\n# Default lambda registry\nDEFAULT_LAMBDA_CONTAINER_REGISTRY = 'lambci/lambda'\n", "path": "localstack/constants.py"}]}
| 3,856 | 349 |
gh_patches_debug_21107
|
rasdani/github-patches
|
git_diff
|
bridgecrewio__checkov-3043
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Checks IDs changing
Hi Team,
would like to request the check IDs do not get changed since we allowlist some checks which we run in our environment.
Eg : https://docs.bridgecrew.io/docs/bc_aws_iam_45 Check ID says CKV_AWS_61
whereas code is different for CKV_AWS_61.
Thanks!
</issue>
<code>
[start of checkov/terraform/checks/resource/aws/IAMRoleAllowAssumeFromAccount.py]
1 import re
2
3 from checkov.common.models.enums import CheckResult, CheckCategories
4 from checkov.common.util.type_forcers import extract_policy_dict
5 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
6 from typing import List
7
8
9 class IAMRoleAllowAssumeFromAccount(BaseResourceCheck):
10
11 def __init__(self):
12 name = "Ensure IAM role allows only specific principals in account to assume it"
13 id = "CKV_AWS_61"
14 supported_resources = ['aws_iam_role']
15 categories = [CheckCategories.IAM]
16 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
17
18 def scan_resource_conf(self, conf):
19 try:
20 assume_role_block = extract_policy_dict(conf['assume_role_policy'][0])
21 if assume_role_block and 'Statement' in assume_role_block.keys() \
22 and 'Principal' in assume_role_block['Statement'][0] \
23 and 'AWS' in assume_role_block['Statement'][0]['Principal']:
24 account_access = re.compile(r'\d{12}|arn:aws:iam::\d{12}:root')
25 if re.match(account_access, assume_role_block['Statement'][0]['Principal']['AWS']):
26 return CheckResult.FAILED
27 except Exception: # nosec
28 pass
29 return CheckResult.PASSED
30
31 def get_evaluated_keys(self) -> List[str]:
32 return ['assume_role_policy']
33
34
35 check = IAMRoleAllowAssumeFromAccount()
36
[end of checkov/terraform/checks/resource/aws/IAMRoleAllowAssumeFromAccount.py]
[start of checkov/cloudformation/checks/resource/aws/IAMRoleAllowAssumeFromAccount.py]
1 import json
2 import re
3
4 from checkov.cloudformation.checks.resource.base_resource_check import BaseResourceCheck
5 from checkov.common.models.enums import CheckResult, CheckCategories
6
7 ACCOUNT_ACCESS = re.compile(r'\d{12}|arn:aws:iam::\d{12}:root')
8
9 class IAMRoleAllowAssumeFromAccount(BaseResourceCheck):
10 def __init__(self):
11 name = "Ensure IAM role allows only specific principals in account to assume it"
12 id = "CKV_AWS_61"
13 supported_resources = ['AWS::IAM::Role']
14 categories = [CheckCategories.IAM]
15 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
16
17 def scan_resource_conf(self, conf):
18 if 'AssumeRolePolicyDocument' in conf['Properties']:
19 assume_role_policy_doc = conf['Properties']['AssumeRolePolicyDocument']
20 if isinstance(assume_role_policy_doc, dict) and 'Fn::Sub' in assume_role_policy_doc.keys():
21 policy_fn_sub_block = assume_role_policy_doc['Fn::Sub']
22 if isinstance(policy_fn_sub_block, list) and len(policy_fn_sub_block) == 2:
23 assume_role_block = json.loads(policy_fn_sub_block[0])
24 else:
25 assume_role_block = json.loads(policy_fn_sub_block)
26 elif isinstance(assume_role_policy_doc, str):
27 assume_role_block = json.loads(assume_role_policy_doc)
28 else:
29 assume_role_block = assume_role_policy_doc
30 else:
31 return CheckResult.UNKNOWN
32
33 if 'Statement' in assume_role_block.keys():
34 if isinstance(assume_role_block['Statement'], list) and 'Principal' in \
35 assume_role_block['Statement'][0]:
36 if 'AWS' in assume_role_block['Statement'][0]['Principal']:
37 if isinstance(assume_role_block['Statement'][0]['Principal']['AWS'],list) \
38 and isinstance(assume_role_block['Statement'][0]['Principal']['AWS'][0], str):
39 if re.match(ACCOUNT_ACCESS, assume_role_block['Statement'][0]['Principal']['AWS'][0]):
40 return CheckResult.FAILED
41
42 return CheckResult.PASSED
43
44
45 check = IAMRoleAllowAssumeFromAccount()
46
[end of checkov/cloudformation/checks/resource/aws/IAMRoleAllowAssumeFromAccount.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/checkov/cloudformation/checks/resource/aws/IAMRoleAllowAssumeFromAccount.py b/checkov/cloudformation/checks/resource/aws/IAMRoleAllowAssumeFromAccount.py
--- a/checkov/cloudformation/checks/resource/aws/IAMRoleAllowAssumeFromAccount.py
+++ b/checkov/cloudformation/checks/resource/aws/IAMRoleAllowAssumeFromAccount.py
@@ -8,7 +8,7 @@
class IAMRoleAllowAssumeFromAccount(BaseResourceCheck):
def __init__(self):
- name = "Ensure IAM role allows only specific principals in account to assume it"
+ name = "Ensure AWS IAM policy does not allow assume role permission across all services"
id = "CKV_AWS_61"
supported_resources = ['AWS::IAM::Role']
categories = [CheckCategories.IAM]
diff --git a/checkov/terraform/checks/resource/aws/IAMRoleAllowAssumeFromAccount.py b/checkov/terraform/checks/resource/aws/IAMRoleAllowAssumeFromAccount.py
--- a/checkov/terraform/checks/resource/aws/IAMRoleAllowAssumeFromAccount.py
+++ b/checkov/terraform/checks/resource/aws/IAMRoleAllowAssumeFromAccount.py
@@ -9,7 +9,7 @@
class IAMRoleAllowAssumeFromAccount(BaseResourceCheck):
def __init__(self):
- name = "Ensure IAM role allows only specific principals in account to assume it"
+ name = "Ensure AWS IAM policy does not allow assume role permission across all services"
id = "CKV_AWS_61"
supported_resources = ['aws_iam_role']
categories = [CheckCategories.IAM]
|
{"golden_diff": "diff --git a/checkov/cloudformation/checks/resource/aws/IAMRoleAllowAssumeFromAccount.py b/checkov/cloudformation/checks/resource/aws/IAMRoleAllowAssumeFromAccount.py\n--- a/checkov/cloudformation/checks/resource/aws/IAMRoleAllowAssumeFromAccount.py\n+++ b/checkov/cloudformation/checks/resource/aws/IAMRoleAllowAssumeFromAccount.py\n@@ -8,7 +8,7 @@\n \n class IAMRoleAllowAssumeFromAccount(BaseResourceCheck):\n def __init__(self):\n- name = \"Ensure IAM role allows only specific principals in account to assume it\"\n+ name = \"Ensure AWS IAM policy does not allow assume role permission across all services\"\n id = \"CKV_AWS_61\"\n supported_resources = ['AWS::IAM::Role']\n categories = [CheckCategories.IAM]\ndiff --git a/checkov/terraform/checks/resource/aws/IAMRoleAllowAssumeFromAccount.py b/checkov/terraform/checks/resource/aws/IAMRoleAllowAssumeFromAccount.py\n--- a/checkov/terraform/checks/resource/aws/IAMRoleAllowAssumeFromAccount.py\n+++ b/checkov/terraform/checks/resource/aws/IAMRoleAllowAssumeFromAccount.py\n@@ -9,7 +9,7 @@\n class IAMRoleAllowAssumeFromAccount(BaseResourceCheck):\n \n def __init__(self):\n- name = \"Ensure IAM role allows only specific principals in account to assume it\"\n+ name = \"Ensure AWS IAM policy does not allow assume role permission across all services\"\n id = \"CKV_AWS_61\"\n supported_resources = ['aws_iam_role']\n categories = [CheckCategories.IAM]\n", "issue": "Checks IDs changing\nHi Team,\r\n\r\nwould like to request the check IDs do not get changed since we allowlist some checks which we run in our environment.\r\nEg : https://docs.bridgecrew.io/docs/bc_aws_iam_45 Check ID says CKV_AWS_61\r\n\r\nwhereas code is different for CKV_AWS_61.\r\nThanks!\r\n\n", "before_files": [{"content": "import re\n\nfrom checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.common.util.type_forcers import extract_policy_dict\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\nfrom typing import List\n\n\nclass IAMRoleAllowAssumeFromAccount(BaseResourceCheck):\n\n def __init__(self):\n name = \"Ensure IAM role allows only specific principals in account to assume it\"\n id = \"CKV_AWS_61\"\n supported_resources = ['aws_iam_role']\n categories = [CheckCategories.IAM]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n try:\n assume_role_block = extract_policy_dict(conf['assume_role_policy'][0])\n if assume_role_block and 'Statement' in assume_role_block.keys() \\\n and 'Principal' in assume_role_block['Statement'][0] \\\n and 'AWS' in assume_role_block['Statement'][0]['Principal']:\n account_access = re.compile(r'\\d{12}|arn:aws:iam::\\d{12}:root')\n if re.match(account_access, assume_role_block['Statement'][0]['Principal']['AWS']):\n return CheckResult.FAILED\n except Exception: # nosec\n pass\n return CheckResult.PASSED\n\n def get_evaluated_keys(self) -> List[str]:\n return ['assume_role_policy']\n\n\ncheck = IAMRoleAllowAssumeFromAccount()\n", "path": "checkov/terraform/checks/resource/aws/IAMRoleAllowAssumeFromAccount.py"}, {"content": "import json\nimport re\n\nfrom checkov.cloudformation.checks.resource.base_resource_check import BaseResourceCheck\nfrom checkov.common.models.enums import CheckResult, CheckCategories\n\nACCOUNT_ACCESS = re.compile(r'\\d{12}|arn:aws:iam::\\d{12}:root')\n\nclass IAMRoleAllowAssumeFromAccount(BaseResourceCheck):\n def __init__(self):\n name = \"Ensure IAM role allows only specific principals in account to assume it\"\n id = \"CKV_AWS_61\"\n supported_resources = ['AWS::IAM::Role']\n categories = [CheckCategories.IAM]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n if 'AssumeRolePolicyDocument' in conf['Properties']:\n assume_role_policy_doc = conf['Properties']['AssumeRolePolicyDocument']\n if isinstance(assume_role_policy_doc, dict) and 'Fn::Sub' in assume_role_policy_doc.keys():\n policy_fn_sub_block = assume_role_policy_doc['Fn::Sub']\n if isinstance(policy_fn_sub_block, list) and len(policy_fn_sub_block) == 2:\n assume_role_block = json.loads(policy_fn_sub_block[0])\n else:\n assume_role_block = json.loads(policy_fn_sub_block)\n elif isinstance(assume_role_policy_doc, str):\n assume_role_block = json.loads(assume_role_policy_doc)\n else:\n assume_role_block = assume_role_policy_doc\n else:\n return CheckResult.UNKNOWN\n\n if 'Statement' in assume_role_block.keys():\n if isinstance(assume_role_block['Statement'], list) and 'Principal' in \\\n assume_role_block['Statement'][0]:\n if 'AWS' in assume_role_block['Statement'][0]['Principal']:\n if isinstance(assume_role_block['Statement'][0]['Principal']['AWS'],list) \\\n and isinstance(assume_role_block['Statement'][0]['Principal']['AWS'][0], str):\n if re.match(ACCOUNT_ACCESS, assume_role_block['Statement'][0]['Principal']['AWS'][0]):\n return CheckResult.FAILED\n\n return CheckResult.PASSED\n\n\ncheck = IAMRoleAllowAssumeFromAccount()\n", "path": "checkov/cloudformation/checks/resource/aws/IAMRoleAllowAssumeFromAccount.py"}]}
| 1,622 | 357 |
gh_patches_debug_4476
|
rasdani/github-patches
|
git_diff
|
internetarchive__openlibrary-7789
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix git SHA on website + status page (dupe?)
<!-- What problem are we solving? What does the experience look like today? What are the symptoms? -->
The footer + https://openlibrary.org/status should both show the website's git sha. Somewhere down the road we had a regression.
### Evidence / Screenshot (if possible)
### Relevant url?
<!-- `https://openlibrary.org/...` -->
### Steps to Reproduce
<!-- What steps caused you to find the bug? -->
1. Go to ...
2. Do ...
<!-- What actually happened after these steps? What did you expect to happen? -->
* Actual:
* Expected:
### Details
- **Logged in (Y/N)?**
- **Browser type/version?**
- **Operating system?**
- **Environment (prod/dev/local)?** prod
<!-- If not sure, put prod -->
### Proposal & Constraints
<!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? -->
### Related files
<!-- Files related to this issue; this is super useful for new contributors who might want to help! If you're not sure, leave this blank; a maintainer will add them. -->
### Stakeholders
<!-- @ tag stakeholders of this bug -->
</issue>
<code>
[start of openlibrary/utils/__init__.py]
1 """Generic utilities"""
2
3 from enum import Enum
4 import re
5 from subprocess import run
6 from typing import TypeVar, Literal, Optional
7 from collections.abc import Iterable, Callable
8
9 to_drop = set(''';/?:@&=+$,<>#%"{}|\\^[]`\n\r''')
10
11
12 def str_to_key(s: str) -> str:
13 """
14 >>> str_to_key("?H$e##l{o}[0] -world!")
15 'helo0_-world!'
16 >>> str_to_key("".join(to_drop))
17 ''
18 >>> str_to_key("")
19 ''
20 """
21 return ''.join(c if c != ' ' else '_' for c in s.lower() if c not in to_drop)
22
23
24 def finddict(dicts, **filters):
25 """Find a dictionary that matches given filter conditions.
26
27 >>> dicts = [{"x": 1, "y": 2}, {"x": 3, "y": 4}]
28 >>> sorted(finddict(dicts, x=1).items())
29 [('x', 1), ('y', 2)]
30 """
31 for d in dicts:
32 if all(d.get(k) == v for k, v in filters.items()):
33 return d
34
35
36 T = TypeVar('T')
37
38
39 def uniq(values: Iterable[T], key=None) -> list[T]:
40 """Returns the unique entries from the given values in the original order.
41
42 The value of the optional `key` parameter should be a function that takes
43 a single argument and returns a key to test the uniqueness.
44 TODO: Moved this to core/utils.py
45
46 >>> uniq("abcbcddefefg")
47 ['a', 'b', 'c', 'd', 'e', 'f', 'g']
48 >>> uniq("011223344556677889")
49 ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
50 """
51 key = key or (lambda x: x)
52 s = set()
53 result = []
54 for v in values:
55 k = key(v)
56 if k not in s:
57 s.add(k)
58 result.append(v)
59 return result
60
61
62 def take_best(
63 items: list[T],
64 optimization: Literal["min", "max"],
65 scoring_fn: Callable[[T], float],
66 ) -> list[T]:
67 """
68 >>> take_best([], 'min', lambda x: x)
69 []
70 >>> take_best([3, 2, 1], 'min', lambda x: x)
71 [1]
72 >>> take_best([3, 4, 5], 'max', lambda x: x)
73 [5]
74 >>> take_best([4, 1, -1, -1], 'min', lambda x: x)
75 [-1, -1]
76 """
77 best_score = float("-inf") if optimization == "max" else float("inf")
78 besties = []
79 for item in items:
80 score = scoring_fn(item)
81 if (optimization == "max" and score > best_score) or (
82 optimization == "min" and score < best_score
83 ):
84 best_score = score
85 besties = [item]
86 elif score == best_score:
87 besties.append(item)
88 else:
89 continue
90 return besties
91
92
93 def multisort_best(
94 items: list[T], specs: list[tuple[Literal["min", "max"], Callable[[T], float]]]
95 ) -> Optional[T]:
96 """
97 Takes the best item, taking into account the multisorts
98
99 >>> multisort_best([], [])
100
101 >>> multisort_best([3,4,5], [('max', lambda x: x)])
102 5
103
104 >>> multisort_best([
105 ... {'provider': 'ia', 'size': 4},
106 ... {'provider': 'ia', 'size': 12},
107 ... {'provider': None, 'size': 42},
108 ... ], [
109 ... ('min', lambda x: 0 if x['provider'] == 'ia' else 1),
110 ... ('max', lambda x: x['size']),
111 ... ])
112 {'provider': 'ia', 'size': 12}
113 """
114 if not items:
115 return None
116 pool = items
117 for optimization, fn in specs:
118 # Shrink the pool down each time
119 pool = take_best(pool, optimization, fn)
120 return pool[0]
121
122
123 def dicthash(d):
124 """Dictionaries are not hashable. This function converts dictionary into nested
125 tuples, so that it can hashed.
126 """
127 if isinstance(d, dict):
128 return tuple((k, dicthash(d[k])) for k in sorted(d))
129 elif isinstance(d, list):
130 return tuple(dicthash(v) for v in d)
131 else:
132 return d
133
134
135 author_olid_embedded_re = re.compile(r'OL\d+A', re.IGNORECASE)
136
137
138 def find_author_olid_in_string(s):
139 """
140 >>> find_author_olid_in_string("ol123a")
141 'OL123A'
142 >>> find_author_olid_in_string("/authors/OL123A/edit")
143 'OL123A'
144 >>> find_author_olid_in_string("some random string")
145 """
146 found = re.search(author_olid_embedded_re, s)
147 return found and found.group(0).upper()
148
149
150 work_olid_embedded_re = re.compile(r'OL\d+W', re.IGNORECASE)
151
152
153 def find_work_olid_in_string(s):
154 """
155 >>> find_work_olid_in_string("ol123w")
156 'OL123W'
157 >>> find_work_olid_in_string("/works/OL123W/Title_of_book")
158 'OL123W'
159 >>> find_work_olid_in_string("some random string")
160 """
161 found = re.search(work_olid_embedded_re, s)
162 return found and found.group(0).upper()
163
164
165 def extract_numeric_id_from_olid(olid):
166 """
167 >>> extract_numeric_id_from_olid("OL123W")
168 '123'
169 >>> extract_numeric_id_from_olid("/authors/OL123A")
170 '123'
171 """
172 if '/' in olid:
173 olid = olid.split('/')[-1]
174 if olid.lower().startswith('ol'):
175 olid = olid[2:]
176 if not is_number(olid[-1].lower()):
177 olid = olid[:-1]
178 return olid
179
180
181 def is_number(s):
182 """
183 >>> all(is_number(n) for n in (1234, "1234", -1234, "-1234", 123.4, -123.4))
184 True
185 >>> not any(is_number(n) for n in ("123.4", "-123.4", "123a", "--1234"))
186 True
187 """
188 try:
189 int(s)
190 return True
191 except ValueError:
192 return False
193
194
195 def get_software_version() -> str:
196 cmd = "git rev-parse --short HEAD --".split()
197 return run(cmd, text=True).stdout
198
199
200 # See https://docs.python.org/3/library/enum.html#orderedenum
201 class OrderedEnum(Enum):
202 def __ge__(self, other):
203 if self.__class__ is other.__class__:
204 return self.value >= other.value
205 return NotImplemented
206
207 def __gt__(self, other):
208 if self.__class__ is other.__class__:
209 return self.value > other.value
210 return NotImplemented
211
212 def __le__(self, other):
213 if self.__class__ is other.__class__:
214 return self.value <= other.value
215 return NotImplemented
216
217 def __lt__(self, other):
218 if self.__class__ is other.__class__:
219 return self.value < other.value
220 return NotImplemented
221
[end of openlibrary/utils/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/openlibrary/utils/__init__.py b/openlibrary/utils/__init__.py
--- a/openlibrary/utils/__init__.py
+++ b/openlibrary/utils/__init__.py
@@ -193,8 +193,11 @@
def get_software_version() -> str:
+ """
+ assert get_software_version() # Should never return a falsy value
+ """
cmd = "git rev-parse --short HEAD --".split()
- return run(cmd, text=True).stdout
+ return run(cmd, capture_output=True, text=True).stdout.strip()
# See https://docs.python.org/3/library/enum.html#orderedenum
|
{"golden_diff": "diff --git a/openlibrary/utils/__init__.py b/openlibrary/utils/__init__.py\n--- a/openlibrary/utils/__init__.py\n+++ b/openlibrary/utils/__init__.py\n@@ -193,8 +193,11 @@\n \n \n def get_software_version() -> str:\n+ \"\"\"\n+ assert get_software_version() # Should never return a falsy value\n+ \"\"\"\n cmd = \"git rev-parse --short HEAD --\".split()\n- return run(cmd, text=True).stdout\n+ return run(cmd, capture_output=True, text=True).stdout.strip()\n \n \n # See https://docs.python.org/3/library/enum.html#orderedenum\n", "issue": "Fix git SHA on website + status page (dupe?)\n<!-- What problem are we solving? What does the experience look like today? What are the symptoms? -->\r\n\r\nThe footer + https://openlibrary.org/status should both show the website's git sha. Somewhere down the road we had a regression.\r\n\r\n### Evidence / Screenshot (if possible)\r\n\r\n### Relevant url?\r\n<!-- `https://openlibrary.org/...` -->\r\n\r\n### Steps to Reproduce\r\n<!-- What steps caused you to find the bug? -->\r\n1. Go to ...\r\n2. Do ...\r\n\r\n<!-- What actually happened after these steps? What did you expect to happen? -->\r\n* Actual:\r\n* Expected:\r\n\r\n### Details\r\n\r\n- **Logged in (Y/N)?**\r\n- **Browser type/version?**\r\n- **Operating system?**\r\n- **Environment (prod/dev/local)?** prod\r\n<!-- If not sure, put prod -->\r\n\r\n### Proposal & Constraints\r\n<!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? -->\r\n\r\n### Related files\r\n<!-- Files related to this issue; this is super useful for new contributors who might want to help! If you're not sure, leave this blank; a maintainer will add them. -->\r\n\r\n### Stakeholders\r\n<!-- @ tag stakeholders of this bug -->\r\n\n", "before_files": [{"content": "\"\"\"Generic utilities\"\"\"\n\nfrom enum import Enum\nimport re\nfrom subprocess import run\nfrom typing import TypeVar, Literal, Optional\nfrom collections.abc import Iterable, Callable\n\nto_drop = set(''';/?:@&=+$,<>#%\"{}|\\\\^[]`\\n\\r''')\n\n\ndef str_to_key(s: str) -> str:\n \"\"\"\n >>> str_to_key(\"?H$e##l{o}[0] -world!\")\n 'helo0_-world!'\n >>> str_to_key(\"\".join(to_drop))\n ''\n >>> str_to_key(\"\")\n ''\n \"\"\"\n return ''.join(c if c != ' ' else '_' for c in s.lower() if c not in to_drop)\n\n\ndef finddict(dicts, **filters):\n \"\"\"Find a dictionary that matches given filter conditions.\n\n >>> dicts = [{\"x\": 1, \"y\": 2}, {\"x\": 3, \"y\": 4}]\n >>> sorted(finddict(dicts, x=1).items())\n [('x', 1), ('y', 2)]\n \"\"\"\n for d in dicts:\n if all(d.get(k) == v for k, v in filters.items()):\n return d\n\n\nT = TypeVar('T')\n\n\ndef uniq(values: Iterable[T], key=None) -> list[T]:\n \"\"\"Returns the unique entries from the given values in the original order.\n\n The value of the optional `key` parameter should be a function that takes\n a single argument and returns a key to test the uniqueness.\n TODO: Moved this to core/utils.py\n\n >>> uniq(\"abcbcddefefg\")\n ['a', 'b', 'c', 'd', 'e', 'f', 'g']\n >>> uniq(\"011223344556677889\")\n ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n \"\"\"\n key = key or (lambda x: x)\n s = set()\n result = []\n for v in values:\n k = key(v)\n if k not in s:\n s.add(k)\n result.append(v)\n return result\n\n\ndef take_best(\n items: list[T],\n optimization: Literal[\"min\", \"max\"],\n scoring_fn: Callable[[T], float],\n) -> list[T]:\n \"\"\"\n >>> take_best([], 'min', lambda x: x)\n []\n >>> take_best([3, 2, 1], 'min', lambda x: x)\n [1]\n >>> take_best([3, 4, 5], 'max', lambda x: x)\n [5]\n >>> take_best([4, 1, -1, -1], 'min', lambda x: x)\n [-1, -1]\n \"\"\"\n best_score = float(\"-inf\") if optimization == \"max\" else float(\"inf\")\n besties = []\n for item in items:\n score = scoring_fn(item)\n if (optimization == \"max\" and score > best_score) or (\n optimization == \"min\" and score < best_score\n ):\n best_score = score\n besties = [item]\n elif score == best_score:\n besties.append(item)\n else:\n continue\n return besties\n\n\ndef multisort_best(\n items: list[T], specs: list[tuple[Literal[\"min\", \"max\"], Callable[[T], float]]]\n) -> Optional[T]:\n \"\"\"\n Takes the best item, taking into account the multisorts\n\n >>> multisort_best([], [])\n\n >>> multisort_best([3,4,5], [('max', lambda x: x)])\n 5\n\n >>> multisort_best([\n ... {'provider': 'ia', 'size': 4},\n ... {'provider': 'ia', 'size': 12},\n ... {'provider': None, 'size': 42},\n ... ], [\n ... ('min', lambda x: 0 if x['provider'] == 'ia' else 1),\n ... ('max', lambda x: x['size']),\n ... ])\n {'provider': 'ia', 'size': 12}\n \"\"\"\n if not items:\n return None\n pool = items\n for optimization, fn in specs:\n # Shrink the pool down each time\n pool = take_best(pool, optimization, fn)\n return pool[0]\n\n\ndef dicthash(d):\n \"\"\"Dictionaries are not hashable. This function converts dictionary into nested\n tuples, so that it can hashed.\n \"\"\"\n if isinstance(d, dict):\n return tuple((k, dicthash(d[k])) for k in sorted(d))\n elif isinstance(d, list):\n return tuple(dicthash(v) for v in d)\n else:\n return d\n\n\nauthor_olid_embedded_re = re.compile(r'OL\\d+A', re.IGNORECASE)\n\n\ndef find_author_olid_in_string(s):\n \"\"\"\n >>> find_author_olid_in_string(\"ol123a\")\n 'OL123A'\n >>> find_author_olid_in_string(\"/authors/OL123A/edit\")\n 'OL123A'\n >>> find_author_olid_in_string(\"some random string\")\n \"\"\"\n found = re.search(author_olid_embedded_re, s)\n return found and found.group(0).upper()\n\n\nwork_olid_embedded_re = re.compile(r'OL\\d+W', re.IGNORECASE)\n\n\ndef find_work_olid_in_string(s):\n \"\"\"\n >>> find_work_olid_in_string(\"ol123w\")\n 'OL123W'\n >>> find_work_olid_in_string(\"/works/OL123W/Title_of_book\")\n 'OL123W'\n >>> find_work_olid_in_string(\"some random string\")\n \"\"\"\n found = re.search(work_olid_embedded_re, s)\n return found and found.group(0).upper()\n\n\ndef extract_numeric_id_from_olid(olid):\n \"\"\"\n >>> extract_numeric_id_from_olid(\"OL123W\")\n '123'\n >>> extract_numeric_id_from_olid(\"/authors/OL123A\")\n '123'\n \"\"\"\n if '/' in olid:\n olid = olid.split('/')[-1]\n if olid.lower().startswith('ol'):\n olid = olid[2:]\n if not is_number(olid[-1].lower()):\n olid = olid[:-1]\n return olid\n\n\ndef is_number(s):\n \"\"\"\n >>> all(is_number(n) for n in (1234, \"1234\", -1234, \"-1234\", 123.4, -123.4))\n True\n >>> not any(is_number(n) for n in (\"123.4\", \"-123.4\", \"123a\", \"--1234\"))\n True\n \"\"\"\n try:\n int(s)\n return True\n except ValueError:\n return False\n\n\ndef get_software_version() -> str:\n cmd = \"git rev-parse --short HEAD --\".split()\n return run(cmd, text=True).stdout\n\n\n# See https://docs.python.org/3/library/enum.html#orderedenum\nclass OrderedEnum(Enum):\n def __ge__(self, other):\n if self.__class__ is other.__class__:\n return self.value >= other.value\n return NotImplemented\n\n def __gt__(self, other):\n if self.__class__ is other.__class__:\n return self.value > other.value\n return NotImplemented\n\n def __le__(self, other):\n if self.__class__ is other.__class__:\n return self.value <= other.value\n return NotImplemented\n\n def __lt__(self, other):\n if self.__class__ is other.__class__:\n return self.value < other.value\n return NotImplemented\n", "path": "openlibrary/utils/__init__.py"}]}
| 3,099 | 148 |
gh_patches_debug_18325
|
rasdani/github-patches
|
git_diff
|
matrix-org__synapse-5077
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
KeyError: 'expiry_template_html'
```
Traceback (most recent call last):
File "/usr/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/home/rav/work/synapse/synapse/app/homeserver.py", line 661, in <module>
main()
File "/home/rav/work/synapse/synapse/app/homeserver.py", line 656, in main
hs = setup(sys.argv[1:])
File "/home/rav/work/synapse/synapse/app/homeserver.py", line 329, in setup
config_options,
File "/home/rav/work/synapse/synapse/config/_base.py", line 362, in load_or_generate_config
generate_keys=generate_keys,
File "/home/rav/work/synapse/synapse/config/_base.py", line 408, in read_config_files
self.parse_config_dict(config)
File "/home/rav/work/synapse/synapse/config/_base.py", line 411, in parse_config_dict
self.invoke_all("read_config", config_dict)
File "/home/rav/work/synapse/synapse/config/_base.py", line 146, in invoke_all
results.append(getattr(cls, name)(self, *args, **kargs))
File "/home/rav/work/synapse/synapse/config/emailconfig.py", line 74, in read_config
self.email_expiry_template_html = email_config["expiry_template_html"]
KeyError: 'expiry_template_html'
```
</issue>
<code>
[start of synapse/config/emailconfig.py]
1 # -*- coding: utf-8 -*-
2 # Copyright 2015, 2016 OpenMarket Ltd
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 from __future__ import print_function
17
18 # This file can't be called email.py because if it is, we cannot:
19 import email.utils
20 import logging
21 import os
22
23 import pkg_resources
24
25 from ._base import Config, ConfigError
26
27 logger = logging.getLogger(__name__)
28
29
30 class EmailConfig(Config):
31 def read_config(self, config):
32 self.email_enable_notifs = False
33
34 email_config = config.get("email", {})
35 self.email_enable_notifs = email_config.get("enable_notifs", False)
36
37 if self.email_enable_notifs:
38 # make sure we can import the required deps
39 import jinja2
40 import bleach
41 # prevent unused warnings
42 jinja2
43 bleach
44
45 required = [
46 "smtp_host",
47 "smtp_port",
48 "notif_from",
49 "notif_template_html",
50 "notif_template_text",
51 ]
52
53 missing = []
54 for k in required:
55 if k not in email_config:
56 missing.append(k)
57
58 if (len(missing) > 0):
59 raise RuntimeError(
60 "email.enable_notifs is True but required keys are missing: %s" %
61 (", ".join(["email." + k for k in missing]),)
62 )
63
64 if config.get("public_baseurl") is None:
65 raise RuntimeError(
66 "email.enable_notifs is True but no public_baseurl is set"
67 )
68
69 self.email_smtp_host = email_config["smtp_host"]
70 self.email_smtp_port = email_config["smtp_port"]
71 self.email_notif_from = email_config["notif_from"]
72 self.email_notif_template_html = email_config["notif_template_html"]
73 self.email_notif_template_text = email_config["notif_template_text"]
74 self.email_expiry_template_html = email_config["expiry_template_html"]
75 self.email_expiry_template_text = email_config["expiry_template_text"]
76
77 template_dir = email_config.get("template_dir")
78 # we need an absolute path, because we change directory after starting (and
79 # we don't yet know what auxilliary templates like mail.css we will need).
80 # (Note that loading as package_resources with jinja.PackageLoader doesn't
81 # work for the same reason.)
82 if not template_dir:
83 template_dir = pkg_resources.resource_filename(
84 'synapse', 'res/templates'
85 )
86 template_dir = os.path.abspath(template_dir)
87
88 for f in self.email_notif_template_text, self.email_notif_template_html:
89 p = os.path.join(template_dir, f)
90 if not os.path.isfile(p):
91 raise ConfigError("Unable to find email template file %s" % (p, ))
92 self.email_template_dir = template_dir
93
94 self.email_notif_for_new_users = email_config.get(
95 "notif_for_new_users", True
96 )
97 self.email_riot_base_url = email_config.get(
98 "riot_base_url", None
99 )
100 self.email_smtp_user = email_config.get(
101 "smtp_user", None
102 )
103 self.email_smtp_pass = email_config.get(
104 "smtp_pass", None
105 )
106 self.require_transport_security = email_config.get(
107 "require_transport_security", False
108 )
109 if "app_name" in email_config:
110 self.email_app_name = email_config["app_name"]
111 else:
112 self.email_app_name = "Matrix"
113
114 # make sure it's valid
115 parsed = email.utils.parseaddr(self.email_notif_from)
116 if parsed[1] == '':
117 raise RuntimeError("Invalid notif_from address")
118 else:
119 self.email_enable_notifs = False
120 # Not much point setting defaults for the rest: it would be an
121 # error for them to be used.
122
123 def default_config(self, config_dir_path, server_name, **kwargs):
124 return """
125 # Enable sending emails for notification events or expiry notices
126 # Defining a custom URL for Riot is only needed if email notifications
127 # should contain links to a self-hosted installation of Riot; when set
128 # the "app_name" setting is ignored.
129 #
130 # If your SMTP server requires authentication, the optional smtp_user &
131 # smtp_pass variables should be used
132 #
133 #email:
134 # enable_notifs: false
135 # smtp_host: "localhost"
136 # smtp_port: 25
137 # smtp_user: "exampleusername"
138 # smtp_pass: "examplepassword"
139 # require_transport_security: False
140 # notif_from: "Your Friendly %(app)s Home Server <[email protected]>"
141 # app_name: Matrix
142 # # if template_dir is unset, uses the example templates that are part of
143 # # the Synapse distribution.
144 # #template_dir: res/templates
145 # notif_template_html: notif_mail.html
146 # notif_template_text: notif_mail.txt
147 # # Templates for account expiry notices.
148 # expiry_template_html: notice_expiry.html
149 # expiry_template_text: notice_expiry.txt
150 # notif_for_new_users: True
151 # riot_base_url: "http://localhost/riot"
152 """
153
[end of synapse/config/emailconfig.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py
--- a/synapse/config/emailconfig.py
+++ b/synapse/config/emailconfig.py
@@ -71,8 +71,12 @@
self.email_notif_from = email_config["notif_from"]
self.email_notif_template_html = email_config["notif_template_html"]
self.email_notif_template_text = email_config["notif_template_text"]
- self.email_expiry_template_html = email_config["expiry_template_html"]
- self.email_expiry_template_text = email_config["expiry_template_text"]
+ self.email_expiry_template_html = email_config.get(
+ "expiry_template_html", "notice_expiry.html",
+ )
+ self.email_expiry_template_text = email_config.get(
+ "expiry_template_text", "notice_expiry.txt",
+ )
template_dir = email_config.get("template_dir")
# we need an absolute path, because we change directory after starting (and
|
{"golden_diff": "diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py\n--- a/synapse/config/emailconfig.py\n+++ b/synapse/config/emailconfig.py\n@@ -71,8 +71,12 @@\n self.email_notif_from = email_config[\"notif_from\"]\n self.email_notif_template_html = email_config[\"notif_template_html\"]\n self.email_notif_template_text = email_config[\"notif_template_text\"]\n- self.email_expiry_template_html = email_config[\"expiry_template_html\"]\n- self.email_expiry_template_text = email_config[\"expiry_template_text\"]\n+ self.email_expiry_template_html = email_config.get(\n+ \"expiry_template_html\", \"notice_expiry.html\",\n+ )\n+ self.email_expiry_template_text = email_config.get(\n+ \"expiry_template_text\", \"notice_expiry.txt\",\n+ )\n \n template_dir = email_config.get(\"template_dir\")\n # we need an absolute path, because we change directory after starting (and\n", "issue": "KeyError: 'expiry_template_html'\n```\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python3.6/runpy.py\", line 193, in _run_module_as_main\r\n \"__main__\", mod_spec)\r\n File \"/usr/lib/python3.6/runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)\r\n File \"/home/rav/work/synapse/synapse/app/homeserver.py\", line 661, in <module>\r\n main()\r\n File \"/home/rav/work/synapse/synapse/app/homeserver.py\", line 656, in main\r\n hs = setup(sys.argv[1:])\r\n File \"/home/rav/work/synapse/synapse/app/homeserver.py\", line 329, in setup\r\n config_options,\r\n File \"/home/rav/work/synapse/synapse/config/_base.py\", line 362, in load_or_generate_config\r\n generate_keys=generate_keys,\r\n File \"/home/rav/work/synapse/synapse/config/_base.py\", line 408, in read_config_files\r\n self.parse_config_dict(config)\r\n File \"/home/rav/work/synapse/synapse/config/_base.py\", line 411, in parse_config_dict\r\n self.invoke_all(\"read_config\", config_dict)\r\n File \"/home/rav/work/synapse/synapse/config/_base.py\", line 146, in invoke_all\r\n results.append(getattr(cls, name)(self, *args, **kargs))\r\n File \"/home/rav/work/synapse/synapse/config/emailconfig.py\", line 74, in read_config\r\n self.email_expiry_template_html = email_config[\"expiry_template_html\"]\r\nKeyError: 'expiry_template_html'\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2015, 2016 OpenMarket Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\n# This file can't be called email.py because if it is, we cannot:\nimport email.utils\nimport logging\nimport os\n\nimport pkg_resources\n\nfrom ._base import Config, ConfigError\n\nlogger = logging.getLogger(__name__)\n\n\nclass EmailConfig(Config):\n def read_config(self, config):\n self.email_enable_notifs = False\n\n email_config = config.get(\"email\", {})\n self.email_enable_notifs = email_config.get(\"enable_notifs\", False)\n\n if self.email_enable_notifs:\n # make sure we can import the required deps\n import jinja2\n import bleach\n # prevent unused warnings\n jinja2\n bleach\n\n required = [\n \"smtp_host\",\n \"smtp_port\",\n \"notif_from\",\n \"notif_template_html\",\n \"notif_template_text\",\n ]\n\n missing = []\n for k in required:\n if k not in email_config:\n missing.append(k)\n\n if (len(missing) > 0):\n raise RuntimeError(\n \"email.enable_notifs is True but required keys are missing: %s\" %\n (\", \".join([\"email.\" + k for k in missing]),)\n )\n\n if config.get(\"public_baseurl\") is None:\n raise RuntimeError(\n \"email.enable_notifs is True but no public_baseurl is set\"\n )\n\n self.email_smtp_host = email_config[\"smtp_host\"]\n self.email_smtp_port = email_config[\"smtp_port\"]\n self.email_notif_from = email_config[\"notif_from\"]\n self.email_notif_template_html = email_config[\"notif_template_html\"]\n self.email_notif_template_text = email_config[\"notif_template_text\"]\n self.email_expiry_template_html = email_config[\"expiry_template_html\"]\n self.email_expiry_template_text = email_config[\"expiry_template_text\"]\n\n template_dir = email_config.get(\"template_dir\")\n # we need an absolute path, because we change directory after starting (and\n # we don't yet know what auxilliary templates like mail.css we will need).\n # (Note that loading as package_resources with jinja.PackageLoader doesn't\n # work for the same reason.)\n if not template_dir:\n template_dir = pkg_resources.resource_filename(\n 'synapse', 'res/templates'\n )\n template_dir = os.path.abspath(template_dir)\n\n for f in self.email_notif_template_text, self.email_notif_template_html:\n p = os.path.join(template_dir, f)\n if not os.path.isfile(p):\n raise ConfigError(\"Unable to find email template file %s\" % (p, ))\n self.email_template_dir = template_dir\n\n self.email_notif_for_new_users = email_config.get(\n \"notif_for_new_users\", True\n )\n self.email_riot_base_url = email_config.get(\n \"riot_base_url\", None\n )\n self.email_smtp_user = email_config.get(\n \"smtp_user\", None\n )\n self.email_smtp_pass = email_config.get(\n \"smtp_pass\", None\n )\n self.require_transport_security = email_config.get(\n \"require_transport_security\", False\n )\n if \"app_name\" in email_config:\n self.email_app_name = email_config[\"app_name\"]\n else:\n self.email_app_name = \"Matrix\"\n\n # make sure it's valid\n parsed = email.utils.parseaddr(self.email_notif_from)\n if parsed[1] == '':\n raise RuntimeError(\"Invalid notif_from address\")\n else:\n self.email_enable_notifs = False\n # Not much point setting defaults for the rest: it would be an\n # error for them to be used.\n\n def default_config(self, config_dir_path, server_name, **kwargs):\n return \"\"\"\n # Enable sending emails for notification events or expiry notices\n # Defining a custom URL for Riot is only needed if email notifications\n # should contain links to a self-hosted installation of Riot; when set\n # the \"app_name\" setting is ignored.\n #\n # If your SMTP server requires authentication, the optional smtp_user &\n # smtp_pass variables should be used\n #\n #email:\n # enable_notifs: false\n # smtp_host: \"localhost\"\n # smtp_port: 25\n # smtp_user: \"exampleusername\"\n # smtp_pass: \"examplepassword\"\n # require_transport_security: False\n # notif_from: \"Your Friendly %(app)s Home Server <[email protected]>\"\n # app_name: Matrix\n # # if template_dir is unset, uses the example templates that are part of\n # # the Synapse distribution.\n # #template_dir: res/templates\n # notif_template_html: notif_mail.html\n # notif_template_text: notif_mail.txt\n # # Templates for account expiry notices.\n # expiry_template_html: notice_expiry.html\n # expiry_template_text: notice_expiry.txt\n # notif_for_new_users: True\n # riot_base_url: \"http://localhost/riot\"\n \"\"\"\n", "path": "synapse/config/emailconfig.py"}]}
| 2,535 | 209 |
gh_patches_debug_13349
|
rasdani/github-patches
|
git_diff
|
python-poetry__poetry-3583
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
--short has no effect when `poetry version` is passed a new version
<!--
Hi there! Thank you for discovering and submitting an issue.
Before you submit this; let's make sure of a few things.
Please make sure the following boxes are ticked if they are correct.
If not, please try and fulfill these first.
-->
<!-- Checked checkbox should look like this: [x] -->
- [x] I am on the [latest](https://github.com/python-poetry/poetry/releases/latest) Poetry version.
- [x] I have searched the [issues](https://github.com/python-poetry/poetry/issues) of this repo and believe that this is not a duplicate.
- [x] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option).
<!--
Once those are done, if you're able to fill in the following list with your information,
it'd be very helpful to whoever handles the issue.
-->
- **OS version and name**: Ubuntu 16.04
- **Poetry version**: 1.1.4
- **Link of a [Gist](https://gist.github.com/) with the contents of your pyproject.toml file**: n/a
## Issue
<!-- Now feel free to write your issue, but please be descriptive! Thanks again 🙌 ❤️ -->
Hi there!
Assuming the current version of a project is 0.1.0, this is what happens when trying to update to the next patch version with the `--short` option:
```console
$ poetry version patch --short
Bumping version from 0.1.0 to 0.1.1
```
Instead, I would like this to be output (only the new version number):
```console
$ poetry version patch --short
0.1.1
```
My use case is scripting: if the user only provides a bump rule to my script, I'd like to be able to easily parse the new version computed by poetry so that I can e.g. create a new git tag easily, and so on.
Thanks!
</issue>
<code>
[start of poetry/console/commands/version.py]
1 from cleo import argument
2 from cleo import option
3
4 from .command import Command
5
6
7 class VersionCommand(Command):
8
9 name = "version"
10 description = (
11 "Shows the version of the project or bumps it when a valid "
12 "bump rule is provided."
13 )
14
15 arguments = [
16 argument(
17 "version",
18 "The version number or the rule to update the version.",
19 optional=True,
20 )
21 ]
22 options = [option("short", "s", "Output the version number only")]
23
24 help = """\
25 The version command shows the current version of the project or bumps the version of
26 the project and writes the new version back to <comment>pyproject.toml</> if a valid
27 bump rule is provided.
28
29 The new version should ideally be a valid semver string or a valid bump rule:
30 patch, minor, major, prepatch, preminor, premajor, prerelease.
31 """
32
33 RESERVED = {
34 "major",
35 "minor",
36 "patch",
37 "premajor",
38 "preminor",
39 "prepatch",
40 "prerelease",
41 }
42
43 def handle(self):
44 version = self.argument("version")
45
46 if version:
47 version = self.increment_version(
48 self.poetry.package.pretty_version, version
49 )
50
51 self.line(
52 "Bumping version from <b>{}</> to <fg=green>{}</>".format(
53 self.poetry.package.pretty_version, version
54 )
55 )
56
57 content = self.poetry.file.read()
58 poetry_content = content["tool"]["poetry"]
59 poetry_content["version"] = version.text
60
61 self.poetry.file.write(content)
62 else:
63 if self.option("short"):
64 self.line("{}".format(self.poetry.package.pretty_version))
65 else:
66 self.line(
67 "<comment>{}</> <info>{}</>".format(
68 self.poetry.package.name, self.poetry.package.pretty_version
69 )
70 )
71
72 def increment_version(self, version, rule):
73 from poetry.core.semver import Version
74
75 try:
76 version = Version.parse(version)
77 except ValueError:
78 raise ValueError("The project's version doesn't seem to follow semver")
79
80 if rule in {"major", "premajor"}:
81 new = version.next_major
82 if rule == "premajor":
83 new = new.first_prerelease
84 elif rule in {"minor", "preminor"}:
85 new = version.next_minor
86 if rule == "preminor":
87 new = new.first_prerelease
88 elif rule in {"patch", "prepatch"}:
89 new = version.next_patch
90 if rule == "prepatch":
91 new = new.first_prerelease
92 elif rule == "prerelease":
93 if version.is_prerelease():
94 pre = version.prerelease
95 new_prerelease = int(pre[1]) + 1
96 new = Version.parse(
97 "{}.{}.{}-{}".format(
98 version.major,
99 version.minor,
100 version.patch,
101 ".".join([pre[0], str(new_prerelease)]),
102 )
103 )
104 else:
105 new = version.next_patch.first_prerelease
106 else:
107 new = Version.parse(rule)
108
109 return new
110
[end of poetry/console/commands/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/poetry/console/commands/version.py b/poetry/console/commands/version.py
--- a/poetry/console/commands/version.py
+++ b/poetry/console/commands/version.py
@@ -48,11 +48,14 @@
self.poetry.package.pretty_version, version
)
- self.line(
- "Bumping version from <b>{}</> to <fg=green>{}</>".format(
- self.poetry.package.pretty_version, version
+ if self.option("short"):
+ self.line("{}".format(version))
+ else:
+ self.line(
+ "Bumping version from <b>{}</> to <fg=green>{}</>".format(
+ self.poetry.package.pretty_version, version
+ )
)
- )
content = self.poetry.file.read()
poetry_content = content["tool"]["poetry"]
|
{"golden_diff": "diff --git a/poetry/console/commands/version.py b/poetry/console/commands/version.py\n--- a/poetry/console/commands/version.py\n+++ b/poetry/console/commands/version.py\n@@ -48,11 +48,14 @@\n self.poetry.package.pretty_version, version\n )\n \n- self.line(\n- \"Bumping version from <b>{}</> to <fg=green>{}</>\".format(\n- self.poetry.package.pretty_version, version\n+ if self.option(\"short\"):\n+ self.line(\"{}\".format(version))\n+ else:\n+ self.line(\n+ \"Bumping version from <b>{}</> to <fg=green>{}</>\".format(\n+ self.poetry.package.pretty_version, version\n+ )\n )\n- )\n \n content = self.poetry.file.read()\n poetry_content = content[\"tool\"][\"poetry\"]\n", "issue": "--short has no effect when `poetry version` is passed a new version\n<!--\r\n Hi there! Thank you for discovering and submitting an issue.\r\n\r\n Before you submit this; let's make sure of a few things.\r\n Please make sure the following boxes are ticked if they are correct.\r\n If not, please try and fulfill these first.\r\n-->\r\n\r\n<!-- Checked checkbox should look like this: [x] -->\r\n- [x] I am on the [latest](https://github.com/python-poetry/poetry/releases/latest) Poetry version.\r\n- [x] I have searched the [issues](https://github.com/python-poetry/poetry/issues) of this repo and believe that this is not a duplicate.\r\n- [x] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option).\r\n\r\n<!--\r\n Once those are done, if you're able to fill in the following list with your information,\r\n it'd be very helpful to whoever handles the issue.\r\n-->\r\n\r\n- **OS version and name**: Ubuntu 16.04\r\n- **Poetry version**: 1.1.4\r\n- **Link of a [Gist](https://gist.github.com/) with the contents of your pyproject.toml file**: n/a\r\n\r\n## Issue\r\n<!-- Now feel free to write your issue, but please be descriptive! Thanks again \ud83d\ude4c \u2764\ufe0f -->\r\n\r\nHi there!\r\n\r\nAssuming the current version of a project is 0.1.0, this is what happens when trying to update to the next patch version with the `--short` option:\r\n\r\n```console\r\n$ poetry version patch --short\r\nBumping version from 0.1.0 to 0.1.1\r\n```\r\n\r\nInstead, I would like this to be output (only the new version number):\r\n\r\n```console\r\n$ poetry version patch --short\r\n0.1.1\r\n```\r\n\r\nMy use case is scripting: if the user only provides a bump rule to my script, I'd like to be able to easily parse the new version computed by poetry so that I can e.g. create a new git tag easily, and so on.\r\n\r\nThanks!\n", "before_files": [{"content": "from cleo import argument\nfrom cleo import option\n\nfrom .command import Command\n\n\nclass VersionCommand(Command):\n\n name = \"version\"\n description = (\n \"Shows the version of the project or bumps it when a valid \"\n \"bump rule is provided.\"\n )\n\n arguments = [\n argument(\n \"version\",\n \"The version number or the rule to update the version.\",\n optional=True,\n )\n ]\n options = [option(\"short\", \"s\", \"Output the version number only\")]\n\n help = \"\"\"\\\nThe version command shows the current version of the project or bumps the version of\nthe project and writes the new version back to <comment>pyproject.toml</> if a valid\nbump rule is provided.\n\nThe new version should ideally be a valid semver string or a valid bump rule:\npatch, minor, major, prepatch, preminor, premajor, prerelease.\n\"\"\"\n\n RESERVED = {\n \"major\",\n \"minor\",\n \"patch\",\n \"premajor\",\n \"preminor\",\n \"prepatch\",\n \"prerelease\",\n }\n\n def handle(self):\n version = self.argument(\"version\")\n\n if version:\n version = self.increment_version(\n self.poetry.package.pretty_version, version\n )\n\n self.line(\n \"Bumping version from <b>{}</> to <fg=green>{}</>\".format(\n self.poetry.package.pretty_version, version\n )\n )\n\n content = self.poetry.file.read()\n poetry_content = content[\"tool\"][\"poetry\"]\n poetry_content[\"version\"] = version.text\n\n self.poetry.file.write(content)\n else:\n if self.option(\"short\"):\n self.line(\"{}\".format(self.poetry.package.pretty_version))\n else:\n self.line(\n \"<comment>{}</> <info>{}</>\".format(\n self.poetry.package.name, self.poetry.package.pretty_version\n )\n )\n\n def increment_version(self, version, rule):\n from poetry.core.semver import Version\n\n try:\n version = Version.parse(version)\n except ValueError:\n raise ValueError(\"The project's version doesn't seem to follow semver\")\n\n if rule in {\"major\", \"premajor\"}:\n new = version.next_major\n if rule == \"premajor\":\n new = new.first_prerelease\n elif rule in {\"minor\", \"preminor\"}:\n new = version.next_minor\n if rule == \"preminor\":\n new = new.first_prerelease\n elif rule in {\"patch\", \"prepatch\"}:\n new = version.next_patch\n if rule == \"prepatch\":\n new = new.first_prerelease\n elif rule == \"prerelease\":\n if version.is_prerelease():\n pre = version.prerelease\n new_prerelease = int(pre[1]) + 1\n new = Version.parse(\n \"{}.{}.{}-{}\".format(\n version.major,\n version.minor,\n version.patch,\n \".\".join([pre[0], str(new_prerelease)]),\n )\n )\n else:\n new = version.next_patch.first_prerelease\n else:\n new = Version.parse(rule)\n\n return new\n", "path": "poetry/console/commands/version.py"}]}
| 1,907 | 197 |
gh_patches_debug_3951
|
rasdani/github-patches
|
git_diff
|
ARM-DOE__ACT-837
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AmeriFlux Documentation is not showing up in the API
The new act.io.ameriflux code is not showing up in the documentation.
</issue>
<code>
[start of act/io/__init__.py]
1 """
2 This module contains procedures for reading and writing various ARM datasets.
3
4 """
5
6 import lazy_loader as lazy
7
8 __getattr__, __dir__, __all__ = lazy.attach(
9 __name__,
10 submodules=[
11 'arm',
12 'ameriflux',
13 'text',
14 'icartt',
15 'mpl',
16 'neon',
17 'noaagml',
18 'noaapsl',
19 'pysp2',
20 'hysplit',
21 ],
22 submod_attrs={
23 'arm': [
24 'WriteDataset',
25 'check_arm_standards',
26 'create_ds_from_arm_dod',
27 'read_arm_netcdf',
28 'check_if_tar_gz_file',
29 'read_arm_mmcr',
30 ],
31 'ameriflux': ['format_as_ameriflux'],
32 'text': ['read_csv'],
33 'icartt': ['read_icartt'],
34 'mpl': ['proc_sigma_mplv5_read', 'read_sigma_mplv5'],
35 'neon': ['read_neon_csv'],
36 'noaagml': [
37 'read_gml',
38 'read_gml_co2',
39 'read_gml_halo',
40 'read_gml_met',
41 'read_gml_ozone',
42 'read_gml_radiation',
43 'read_surfrad',
44 ],
45 'noaapsl': [
46 'read_psl_wind_profiler',
47 'read_psl_wind_profiler_temperature',
48 'read_psl_parsivel',
49 'read_psl_radar_fmcw_moment',
50 'read_psl_surface_met',
51 ],
52 'pysp2': ['read_hk_file', 'read_sp2', 'read_sp2_dat'],
53 'sodar': ['read_mfas_sodar'],
54 'hysplit': ['read_hysplit'],
55 },
56 )
57
[end of act/io/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/act/io/__init__.py b/act/io/__init__.py
--- a/act/io/__init__.py
+++ b/act/io/__init__.py
@@ -28,7 +28,7 @@
'check_if_tar_gz_file',
'read_arm_mmcr',
],
- 'ameriflux': ['format_as_ameriflux'],
+ 'ameriflux': ['convert_to_ameriflux'],
'text': ['read_csv'],
'icartt': ['read_icartt'],
'mpl': ['proc_sigma_mplv5_read', 'read_sigma_mplv5'],
|
{"golden_diff": "diff --git a/act/io/__init__.py b/act/io/__init__.py\n--- a/act/io/__init__.py\n+++ b/act/io/__init__.py\n@@ -28,7 +28,7 @@\n 'check_if_tar_gz_file',\n 'read_arm_mmcr',\n ],\n- 'ameriflux': ['format_as_ameriflux'],\n+ 'ameriflux': ['convert_to_ameriflux'],\n 'text': ['read_csv'],\n 'icartt': ['read_icartt'],\n 'mpl': ['proc_sigma_mplv5_read', 'read_sigma_mplv5'],\n", "issue": "AmeriFlux Documentation is not showing up in the API\nThe new act.io.ameriflux code is not showing up in the documentation.\n", "before_files": [{"content": "\"\"\"\nThis module contains procedures for reading and writing various ARM datasets.\n\n\"\"\"\n\nimport lazy_loader as lazy\n\n__getattr__, __dir__, __all__ = lazy.attach(\n __name__,\n submodules=[\n 'arm',\n 'ameriflux',\n 'text',\n 'icartt',\n 'mpl',\n 'neon',\n 'noaagml',\n 'noaapsl',\n 'pysp2',\n 'hysplit',\n ],\n submod_attrs={\n 'arm': [\n 'WriteDataset',\n 'check_arm_standards',\n 'create_ds_from_arm_dod',\n 'read_arm_netcdf',\n 'check_if_tar_gz_file',\n 'read_arm_mmcr',\n ],\n 'ameriflux': ['format_as_ameriflux'],\n 'text': ['read_csv'],\n 'icartt': ['read_icartt'],\n 'mpl': ['proc_sigma_mplv5_read', 'read_sigma_mplv5'],\n 'neon': ['read_neon_csv'],\n 'noaagml': [\n 'read_gml',\n 'read_gml_co2',\n 'read_gml_halo',\n 'read_gml_met',\n 'read_gml_ozone',\n 'read_gml_radiation',\n 'read_surfrad',\n ],\n 'noaapsl': [\n 'read_psl_wind_profiler',\n 'read_psl_wind_profiler_temperature',\n 'read_psl_parsivel',\n 'read_psl_radar_fmcw_moment',\n 'read_psl_surface_met',\n ],\n 'pysp2': ['read_hk_file', 'read_sp2', 'read_sp2_dat'],\n 'sodar': ['read_mfas_sodar'],\n 'hysplit': ['read_hysplit'],\n },\n)\n", "path": "act/io/__init__.py"}]}
| 1,071 | 141 |
gh_patches_debug_13418
|
rasdani/github-patches
|
git_diff
|
scikit-hep__awkward-2111
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`fill_none` doesn't move through indexed types
### Version of Awkward Array
main
### Description and code to reproduce
The failure mode here is where we have a lazy carryd `RecordArray`, which then contains an option. Our existing logic returns early if it doesn't see an option, union, or record.
</issue>
<code>
[start of src/awkward/operations/ak_fill_none.py]
1 # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
2
3 import numbers
4
5 import awkward as ak
6
7 np = ak._nplikes.NumpyMetadata.instance()
8 cpu = ak._backends.NumpyBackend.instance()
9
10
11 def fill_none(array, value, axis=-1, *, highlevel=True, behavior=None):
12 """
13 Args:
14 array: Array-like data (anything #ak.to_layout recognizes).
15 value: Data with which to replace None.
16 axis (None or int): If None, replace all None values in the array
17 with the given value; if an int, The dimension at which this
18 operation is applied. The outermost dimension is `0`, followed
19 by `1`, etc., and negative values count backward from the
20 innermost: `-1` is the innermost dimension, `-2` is the next
21 level up, etc.
22 highlevel (bool): If True, return an #ak.Array; otherwise, return
23 a low-level #ak.contents.Content subclass.
24 behavior (None or dict): Custom #ak.behavior for the output array, if
25 high-level.
26
27 Replaces missing values (None) with a given `value`.
28
29 For example, in the following
30
31 >>> array = ak.Array([[1.1, None, 2.2], [], [None, 3.3, 4.4]])
32
33 The None values could be replaced with `0` by
34
35 >>> ak.fill_none(array, 0)
36 <Array [[1.1, 0, 2.2], [], [0, 3.3, 4.4]] type='3 * var * float64'>
37
38 The replacement value doesn't strictly need the same type as the
39 surrounding data. For example, the None values could also be replaced
40 by a string.
41
42 >>> ak.fill_none(array, "hi")
43 <Array [[1.1, 'hi', 2.2], [], ['hi', ...]] type='3 * var * union[float64, s...'>
44
45 The list content now has a union type:
46
47 >>> ak.fill_none(array, "hi").type.show()
48 3 * var * union[
49 float64,
50 string
51 ]
52
53 The values could be floating-point numbers or strings.
54 """
55 with ak._errors.OperationErrorContext(
56 "ak.fill_none",
57 dict(
58 array=array, value=value, axis=axis, highlevel=highlevel, behavior=behavior
59 ),
60 ):
61 return _impl(array, value, axis, highlevel, behavior)
62
63
64 def _impl(array, value, axis, highlevel, behavior):
65 arraylayout = ak.operations.to_layout(array, allow_record=True, allow_other=False)
66 behavior = ak._util.behavior_of(array, value, behavior=behavior)
67 backend = ak._backends.backend_of(arraylayout, default=cpu)
68
69 # Convert value type to appropriate layout
70 if (
71 isinstance(value, np.ndarray)
72 and issubclass(value.dtype.type, (np.bool_, np.number))
73 and len(value.shape) != 0
74 ):
75 valuelayout = ak.operations.to_layout(
76 backend.nplike.asarray(value)[np.newaxis],
77 allow_record=False,
78 allow_other=False,
79 )
80 elif isinstance(value, (bool, numbers.Number, np.bool_, np.number)) or (
81 isinstance(value, np.ndarray)
82 and issubclass(value.dtype.type, (np.bool_, np.number))
83 ):
84 valuelayout = ak.operations.to_layout(
85 backend.nplike.asarray(value), allow_record=False, allow_other=False
86 )
87 elif (
88 ak._util.is_sized_iterable(value)
89 and not (isinstance(value, (str, bytes)))
90 or isinstance(value, (ak.highlevel.Record, ak.record.Record))
91 ):
92 valuelayout = ak.operations.to_layout(
93 value, allow_record=True, allow_other=False
94 )
95 if isinstance(valuelayout, ak.record.Record):
96 valuelayout = valuelayout.array[valuelayout.at : valuelayout.at + 1]
97 elif len(valuelayout) == 0:
98 offsets = ak.index.Index64(
99 backend.index_nplike.array([0, 0], dtype=np.int64)
100 )
101 valuelayout = ak.contents.ListOffsetArray(offsets, valuelayout)
102 else:
103 valuelayout = ak.contents.RegularArray(valuelayout, len(valuelayout), 1)
104 else:
105 valuelayout = ak.operations.to_layout(
106 [value], allow_record=False, allow_other=False
107 )
108
109 if axis is None:
110
111 def action(layout, continuation, **kwargs):
112 if layout.is_option:
113 return ak._do.fill_none(continuation(), valuelayout)
114
115 else:
116
117 def action(layout, depth, **kwargs):
118 posaxis = ak._util.maybe_posaxis(layout, axis, depth)
119 if posaxis is not None and posaxis + 1 == depth:
120 if layout.is_union or layout.is_record:
121 return None
122 elif layout.is_option:
123 return ak._do.fill_none(layout, valuelayout)
124 else:
125 return layout
126
127 elif layout.is_leaf:
128 raise ak._errors.wrap_error(
129 np.AxisError(
130 f"axis={axis} exceeds the depth of this array ({depth})"
131 )
132 )
133
134 out = ak._do.recursively_apply(arraylayout, action, behavior)
135 return ak._util.wrap(out, behavior, highlevel)
136
[end of src/awkward/operations/ak_fill_none.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/awkward/operations/ak_fill_none.py b/src/awkward/operations/ak_fill_none.py
--- a/src/awkward/operations/ak_fill_none.py
+++ b/src/awkward/operations/ak_fill_none.py
@@ -117,10 +117,10 @@
def action(layout, depth, **kwargs):
posaxis = ak._util.maybe_posaxis(layout, axis, depth)
if posaxis is not None and posaxis + 1 == depth:
- if layout.is_union or layout.is_record:
- return None
- elif layout.is_option:
+ if layout.is_option:
return ak._do.fill_none(layout, valuelayout)
+ elif layout.is_union or layout.is_record or layout.is_indexed:
+ return None
else:
return layout
|
{"golden_diff": "diff --git a/src/awkward/operations/ak_fill_none.py b/src/awkward/operations/ak_fill_none.py\n--- a/src/awkward/operations/ak_fill_none.py\n+++ b/src/awkward/operations/ak_fill_none.py\n@@ -117,10 +117,10 @@\n def action(layout, depth, **kwargs):\n posaxis = ak._util.maybe_posaxis(layout, axis, depth)\n if posaxis is not None and posaxis + 1 == depth:\n- if layout.is_union or layout.is_record:\n- return None\n- elif layout.is_option:\n+ if layout.is_option:\n return ak._do.fill_none(layout, valuelayout)\n+ elif layout.is_union or layout.is_record or layout.is_indexed:\n+ return None\n else:\n return layout\n", "issue": "`fill_none` doesn't move through indexed types\n### Version of Awkward Array\n\nmain\n\n### Description and code to reproduce\n\nThe failure mode here is where we have a lazy carryd `RecordArray`, which then contains an option. Our existing logic returns early if it doesn't see an option, union, or record.\n", "before_files": [{"content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\nimport numbers\n\nimport awkward as ak\n\nnp = ak._nplikes.NumpyMetadata.instance()\ncpu = ak._backends.NumpyBackend.instance()\n\n\ndef fill_none(array, value, axis=-1, *, highlevel=True, behavior=None):\n \"\"\"\n Args:\n array: Array-like data (anything #ak.to_layout recognizes).\n value: Data with which to replace None.\n axis (None or int): If None, replace all None values in the array\n with the given value; if an int, The dimension at which this\n operation is applied. The outermost dimension is `0`, followed\n by `1`, etc., and negative values count backward from the\n innermost: `-1` is the innermost dimension, `-2` is the next\n level up, etc.\n highlevel (bool): If True, return an #ak.Array; otherwise, return\n a low-level #ak.contents.Content subclass.\n behavior (None or dict): Custom #ak.behavior for the output array, if\n high-level.\n\n Replaces missing values (None) with a given `value`.\n\n For example, in the following\n\n >>> array = ak.Array([[1.1, None, 2.2], [], [None, 3.3, 4.4]])\n\n The None values could be replaced with `0` by\n\n >>> ak.fill_none(array, 0)\n <Array [[1.1, 0, 2.2], [], [0, 3.3, 4.4]] type='3 * var * float64'>\n\n The replacement value doesn't strictly need the same type as the\n surrounding data. For example, the None values could also be replaced\n by a string.\n\n >>> ak.fill_none(array, \"hi\")\n <Array [[1.1, 'hi', 2.2], [], ['hi', ...]] type='3 * var * union[float64, s...'>\n\n The list content now has a union type:\n\n >>> ak.fill_none(array, \"hi\").type.show()\n 3 * var * union[\n float64,\n string\n ]\n\n The values could be floating-point numbers or strings.\n \"\"\"\n with ak._errors.OperationErrorContext(\n \"ak.fill_none\",\n dict(\n array=array, value=value, axis=axis, highlevel=highlevel, behavior=behavior\n ),\n ):\n return _impl(array, value, axis, highlevel, behavior)\n\n\ndef _impl(array, value, axis, highlevel, behavior):\n arraylayout = ak.operations.to_layout(array, allow_record=True, allow_other=False)\n behavior = ak._util.behavior_of(array, value, behavior=behavior)\n backend = ak._backends.backend_of(arraylayout, default=cpu)\n\n # Convert value type to appropriate layout\n if (\n isinstance(value, np.ndarray)\n and issubclass(value.dtype.type, (np.bool_, np.number))\n and len(value.shape) != 0\n ):\n valuelayout = ak.operations.to_layout(\n backend.nplike.asarray(value)[np.newaxis],\n allow_record=False,\n allow_other=False,\n )\n elif isinstance(value, (bool, numbers.Number, np.bool_, np.number)) or (\n isinstance(value, np.ndarray)\n and issubclass(value.dtype.type, (np.bool_, np.number))\n ):\n valuelayout = ak.operations.to_layout(\n backend.nplike.asarray(value), allow_record=False, allow_other=False\n )\n elif (\n ak._util.is_sized_iterable(value)\n and not (isinstance(value, (str, bytes)))\n or isinstance(value, (ak.highlevel.Record, ak.record.Record))\n ):\n valuelayout = ak.operations.to_layout(\n value, allow_record=True, allow_other=False\n )\n if isinstance(valuelayout, ak.record.Record):\n valuelayout = valuelayout.array[valuelayout.at : valuelayout.at + 1]\n elif len(valuelayout) == 0:\n offsets = ak.index.Index64(\n backend.index_nplike.array([0, 0], dtype=np.int64)\n )\n valuelayout = ak.contents.ListOffsetArray(offsets, valuelayout)\n else:\n valuelayout = ak.contents.RegularArray(valuelayout, len(valuelayout), 1)\n else:\n valuelayout = ak.operations.to_layout(\n [value], allow_record=False, allow_other=False\n )\n\n if axis is None:\n\n def action(layout, continuation, **kwargs):\n if layout.is_option:\n return ak._do.fill_none(continuation(), valuelayout)\n\n else:\n\n def action(layout, depth, **kwargs):\n posaxis = ak._util.maybe_posaxis(layout, axis, depth)\n if posaxis is not None and posaxis + 1 == depth:\n if layout.is_union or layout.is_record:\n return None\n elif layout.is_option:\n return ak._do.fill_none(layout, valuelayout)\n else:\n return layout\n\n elif layout.is_leaf:\n raise ak._errors.wrap_error(\n np.AxisError(\n f\"axis={axis} exceeds the depth of this array ({depth})\"\n )\n )\n\n out = ak._do.recursively_apply(arraylayout, action, behavior)\n return ak._util.wrap(out, behavior, highlevel)\n", "path": "src/awkward/operations/ak_fill_none.py"}]}
| 2,112 | 186 |
gh_patches_debug_2766
|
rasdani/github-patches
|
git_diff
|
DataDog__dd-trace-py-3119
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
aioredis integration: Tracing breaks pipeline as context managers
Hello,
looks like the `aioredis` integration breaks the interface of `Pipeline` objects as context managers:
```py
RuntimeWarning: coroutine 'traced_pipeline' was never awaited
```
and
```py
async with redis.pipeline(transaction=True) as pipe:
AttributeError: __aexit__
```
This is bad since the documented of usage is exactly as context managers (see https://aioredis.readthedocs.io/en/latest/migration/#pipelines-and-transactions-multiexec).
The fix for now is to just use pipelines outside of contexts, without relying on them as context managers, but that is less than ideal.
`ddtrace` is the latest version (`0.57.0`).
</issue>
<code>
[start of ddtrace/contrib/aioredis/patch.py]
1 import sys
2
3 import aioredis
4
5 from ddtrace import config
6 from ddtrace.internal.utils.wrappers import unwrap as _u
7 from ddtrace.pin import Pin
8 from ddtrace.vendor.wrapt import wrap_function_wrapper as _w
9
10 from .. import trace_utils
11 from ...constants import ANALYTICS_SAMPLE_RATE_KEY
12 from ...constants import SPAN_MEASURED_KEY
13 from ...ext import SpanTypes
14 from ...ext import net
15 from ...ext import redis as redisx
16 from ..redis.util import _trace_redis_cmd
17 from ..redis.util import _trace_redis_execute_pipeline
18 from ..redis.util import format_command_args
19
20
21 try:
22 from aioredis.commands.transaction import _RedisBuffer
23 except ImportError:
24 _RedisBuffer = None
25
26 config._add("aioredis", dict(_default_service="redis"))
27
28 aioredis_version_str = getattr(aioredis, "__version__", "0.0.0")
29 aioredis_version = tuple([int(i) for i in aioredis_version_str.split(".")])
30
31
32 def patch():
33 if getattr(aioredis, "_datadog_patch", False):
34 return
35 setattr(aioredis, "_datadog_patch", True)
36 pin = Pin()
37 if aioredis_version >= (2, 0):
38 _w("aioredis.client", "Redis.execute_command", traced_execute_command)
39 _w("aioredis.client", "Redis.pipeline", traced_pipeline)
40 _w("aioredis.client", "Pipeline.execute", traced_execute_pipeline)
41 pin.onto(aioredis.client.Redis)
42 else:
43 _w("aioredis", "Redis.execute", traced_13_execute_command)
44 _w("aioredis", "Redis.pipeline", traced_13_pipeline)
45 _w("aioredis.commands.transaction", "Pipeline.execute", traced_13_execute_pipeline)
46 pin.onto(aioredis.Redis)
47
48
49 def unpatch():
50 if not getattr(aioredis, "_datadog_patch", False):
51 return
52
53 setattr(aioredis, "_datadog_patch", False)
54 if aioredis_version >= (2, 0):
55 _u(aioredis.client.Redis, "execute_command")
56 _u(aioredis.client.Redis, "pipeline")
57 _u(aioredis.client.Pipeline, "execute")
58 else:
59 _u(aioredis.Redis, "execute")
60 _u(aioredis.Redis, "pipeline")
61 _u(aioredis.commands.transaction.Pipeline, "execute")
62
63
64 async def traced_execute_command(func, instance, args, kwargs):
65 pin = Pin.get_from(instance)
66 if not pin or not pin.enabled():
67 return await func(*args, **kwargs)
68
69 with _trace_redis_cmd(pin, config.aioredis, instance, args):
70 return await func(*args, **kwargs)
71
72
73 async def traced_pipeline(func, instance, args, kwargs):
74 pipeline = await func(*args, **kwargs)
75 pin = Pin.get_from(instance)
76 if pin:
77 pin.onto(pipeline)
78 return pipeline
79
80
81 async def traced_execute_pipeline(func, instance, args, kwargs):
82 pin = Pin.get_from(instance)
83 if not pin or not pin.enabled():
84 return await func(*args, **kwargs)
85
86 cmds = [format_command_args(c) for c, _ in instance.command_stack]
87 resource = "\n".join(cmds)
88 with _trace_redis_execute_pipeline(pin, config.aioredis, resource, instance):
89 return await func(*args, **kwargs)
90
91
92 def traced_13_pipeline(func, instance, args, kwargs):
93 pipeline = func(*args, **kwargs)
94 pin = Pin.get_from(instance)
95 if pin:
96 pin.onto(pipeline)
97 return pipeline
98
99
100 def traced_13_execute_command(func, instance, args, kwargs):
101 # If we have a _RedisBuffer then we are in a pipeline
102 if isinstance(instance.connection, _RedisBuffer):
103 return func(*args, **kwargs)
104
105 pin = Pin.get_from(instance)
106 if not pin or not pin.enabled():
107 return func(*args, **kwargs)
108
109 # Don't activate the span since this operation is performed as a future which concludes sometime later on in
110 # execution so subsequent operations in the stack are not necessarily semantically related
111 # (we don't want this span to be the parent of all other spans created before the future is resolved)
112 span = pin.tracer.start_span(
113 redisx.CMD, service=trace_utils.ext_service(pin, config.aioredis), span_type=SpanTypes.REDIS, activate=False
114 )
115
116 span.set_tag(SPAN_MEASURED_KEY)
117 query = format_command_args(args)
118 span.resource = query
119 span.set_tag(redisx.RAWCMD, query)
120 if pin.tags:
121 span.set_tags(pin.tags)
122
123 span.set_tags(
124 {
125 net.TARGET_HOST: instance.address[0],
126 net.TARGET_PORT: instance.address[1],
127 redisx.DB: instance.db or 0,
128 }
129 )
130 span.set_metric(redisx.ARGS_LEN, len(args))
131 # set analytics sample rate if enabled
132 span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, config.aioredis.get_analytics_sample_rate())
133
134 def _finish_span(future):
135 try:
136 # Accessing the result will raise an exception if:
137 # - The future was cancelled
138 # - There was an error executing the future (`future.exception()`)
139 # - The future is in an invalid state
140 future.result()
141 except Exception:
142 span.set_exc_info(*sys.exc_info())
143 finally:
144 span.finish()
145
146 task = func(*args, **kwargs)
147 task.add_done_callback(_finish_span)
148 return task
149
150
151 async def traced_13_execute_pipeline(func, instance, args, kwargs):
152 pin = Pin.get_from(instance)
153 if not pin or not pin.enabled():
154 return await func(*args, **kwargs)
155
156 cmds = []
157 for _, cmd, cmd_args, _ in instance._pipeline:
158 parts = [cmd]
159 parts.extend(cmd_args)
160 cmds.append(format_command_args(parts))
161 resource = "\n".join(cmds)
162 with pin.tracer.trace(
163 redisx.CMD,
164 resource=resource,
165 service=trace_utils.ext_service(pin, config.aioredis),
166 span_type=SpanTypes.REDIS,
167 ) as span:
168
169 span.set_tags(
170 {
171 net.TARGET_HOST: instance._pool_or_conn.address[0],
172 net.TARGET_PORT: instance._pool_or_conn.address[1],
173 redisx.DB: instance._pool_or_conn.db or 0,
174 }
175 )
176
177 span.set_tag(SPAN_MEASURED_KEY)
178 span.set_tag(redisx.RAWCMD, resource)
179 span.set_metric(redisx.PIPELINE_LEN, len(instance._pipeline))
180 # set analytics sample rate if enabled
181 span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, config.aioredis.get_analytics_sample_rate())
182
183 return await func(*args, **kwargs)
184
[end of ddtrace/contrib/aioredis/patch.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ddtrace/contrib/aioredis/patch.py b/ddtrace/contrib/aioredis/patch.py
--- a/ddtrace/contrib/aioredis/patch.py
+++ b/ddtrace/contrib/aioredis/patch.py
@@ -70,8 +70,8 @@
return await func(*args, **kwargs)
-async def traced_pipeline(func, instance, args, kwargs):
- pipeline = await func(*args, **kwargs)
+def traced_pipeline(func, instance, args, kwargs):
+ pipeline = func(*args, **kwargs)
pin = Pin.get_from(instance)
if pin:
pin.onto(pipeline)
|
{"golden_diff": "diff --git a/ddtrace/contrib/aioredis/patch.py b/ddtrace/contrib/aioredis/patch.py\n--- a/ddtrace/contrib/aioredis/patch.py\n+++ b/ddtrace/contrib/aioredis/patch.py\n@@ -70,8 +70,8 @@\n return await func(*args, **kwargs)\n \n \n-async def traced_pipeline(func, instance, args, kwargs):\n- pipeline = await func(*args, **kwargs)\n+def traced_pipeline(func, instance, args, kwargs):\n+ pipeline = func(*args, **kwargs)\n pin = Pin.get_from(instance)\n if pin:\n pin.onto(pipeline)\n", "issue": "aioredis integration: Tracing breaks pipeline as context managers\nHello,\r\n\r\nlooks like the `aioredis` integration breaks the interface of `Pipeline` objects as context managers:\r\n\r\n```py\r\nRuntimeWarning: coroutine 'traced_pipeline' was never awaited\r\n```\r\n\r\nand\r\n\r\n```py\r\nasync with redis.pipeline(transaction=True) as pipe:\r\nAttributeError: __aexit__\r\n```\r\n\r\nThis is bad since the documented of usage is exactly as context managers (see https://aioredis.readthedocs.io/en/latest/migration/#pipelines-and-transactions-multiexec).\r\n\r\nThe fix for now is to just use pipelines outside of contexts, without relying on them as context managers, but that is less than ideal.\r\n\r\n`ddtrace` is the latest version (`0.57.0`).\n", "before_files": [{"content": "import sys\n\nimport aioredis\n\nfrom ddtrace import config\nfrom ddtrace.internal.utils.wrappers import unwrap as _u\nfrom ddtrace.pin import Pin\nfrom ddtrace.vendor.wrapt import wrap_function_wrapper as _w\n\nfrom .. import trace_utils\nfrom ...constants import ANALYTICS_SAMPLE_RATE_KEY\nfrom ...constants import SPAN_MEASURED_KEY\nfrom ...ext import SpanTypes\nfrom ...ext import net\nfrom ...ext import redis as redisx\nfrom ..redis.util import _trace_redis_cmd\nfrom ..redis.util import _trace_redis_execute_pipeline\nfrom ..redis.util import format_command_args\n\n\ntry:\n from aioredis.commands.transaction import _RedisBuffer\nexcept ImportError:\n _RedisBuffer = None\n\nconfig._add(\"aioredis\", dict(_default_service=\"redis\"))\n\naioredis_version_str = getattr(aioredis, \"__version__\", \"0.0.0\")\naioredis_version = tuple([int(i) for i in aioredis_version_str.split(\".\")])\n\n\ndef patch():\n if getattr(aioredis, \"_datadog_patch\", False):\n return\n setattr(aioredis, \"_datadog_patch\", True)\n pin = Pin()\n if aioredis_version >= (2, 0):\n _w(\"aioredis.client\", \"Redis.execute_command\", traced_execute_command)\n _w(\"aioredis.client\", \"Redis.pipeline\", traced_pipeline)\n _w(\"aioredis.client\", \"Pipeline.execute\", traced_execute_pipeline)\n pin.onto(aioredis.client.Redis)\n else:\n _w(\"aioredis\", \"Redis.execute\", traced_13_execute_command)\n _w(\"aioredis\", \"Redis.pipeline\", traced_13_pipeline)\n _w(\"aioredis.commands.transaction\", \"Pipeline.execute\", traced_13_execute_pipeline)\n pin.onto(aioredis.Redis)\n\n\ndef unpatch():\n if not getattr(aioredis, \"_datadog_patch\", False):\n return\n\n setattr(aioredis, \"_datadog_patch\", False)\n if aioredis_version >= (2, 0):\n _u(aioredis.client.Redis, \"execute_command\")\n _u(aioredis.client.Redis, \"pipeline\")\n _u(aioredis.client.Pipeline, \"execute\")\n else:\n _u(aioredis.Redis, \"execute\")\n _u(aioredis.Redis, \"pipeline\")\n _u(aioredis.commands.transaction.Pipeline, \"execute\")\n\n\nasync def traced_execute_command(func, instance, args, kwargs):\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return await func(*args, **kwargs)\n\n with _trace_redis_cmd(pin, config.aioredis, instance, args):\n return await func(*args, **kwargs)\n\n\nasync def traced_pipeline(func, instance, args, kwargs):\n pipeline = await func(*args, **kwargs)\n pin = Pin.get_from(instance)\n if pin:\n pin.onto(pipeline)\n return pipeline\n\n\nasync def traced_execute_pipeline(func, instance, args, kwargs):\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return await func(*args, **kwargs)\n\n cmds = [format_command_args(c) for c, _ in instance.command_stack]\n resource = \"\\n\".join(cmds)\n with _trace_redis_execute_pipeline(pin, config.aioredis, resource, instance):\n return await func(*args, **kwargs)\n\n\ndef traced_13_pipeline(func, instance, args, kwargs):\n pipeline = func(*args, **kwargs)\n pin = Pin.get_from(instance)\n if pin:\n pin.onto(pipeline)\n return pipeline\n\n\ndef traced_13_execute_command(func, instance, args, kwargs):\n # If we have a _RedisBuffer then we are in a pipeline\n if isinstance(instance.connection, _RedisBuffer):\n return func(*args, **kwargs)\n\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return func(*args, **kwargs)\n\n # Don't activate the span since this operation is performed as a future which concludes sometime later on in\n # execution so subsequent operations in the stack are not necessarily semantically related\n # (we don't want this span to be the parent of all other spans created before the future is resolved)\n span = pin.tracer.start_span(\n redisx.CMD, service=trace_utils.ext_service(pin, config.aioredis), span_type=SpanTypes.REDIS, activate=False\n )\n\n span.set_tag(SPAN_MEASURED_KEY)\n query = format_command_args(args)\n span.resource = query\n span.set_tag(redisx.RAWCMD, query)\n if pin.tags:\n span.set_tags(pin.tags)\n\n span.set_tags(\n {\n net.TARGET_HOST: instance.address[0],\n net.TARGET_PORT: instance.address[1],\n redisx.DB: instance.db or 0,\n }\n )\n span.set_metric(redisx.ARGS_LEN, len(args))\n # set analytics sample rate if enabled\n span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, config.aioredis.get_analytics_sample_rate())\n\n def _finish_span(future):\n try:\n # Accessing the result will raise an exception if:\n # - The future was cancelled\n # - There was an error executing the future (`future.exception()`)\n # - The future is in an invalid state\n future.result()\n except Exception:\n span.set_exc_info(*sys.exc_info())\n finally:\n span.finish()\n\n task = func(*args, **kwargs)\n task.add_done_callback(_finish_span)\n return task\n\n\nasync def traced_13_execute_pipeline(func, instance, args, kwargs):\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return await func(*args, **kwargs)\n\n cmds = []\n for _, cmd, cmd_args, _ in instance._pipeline:\n parts = [cmd]\n parts.extend(cmd_args)\n cmds.append(format_command_args(parts))\n resource = \"\\n\".join(cmds)\n with pin.tracer.trace(\n redisx.CMD,\n resource=resource,\n service=trace_utils.ext_service(pin, config.aioredis),\n span_type=SpanTypes.REDIS,\n ) as span:\n\n span.set_tags(\n {\n net.TARGET_HOST: instance._pool_or_conn.address[0],\n net.TARGET_PORT: instance._pool_or_conn.address[1],\n redisx.DB: instance._pool_or_conn.db or 0,\n }\n )\n\n span.set_tag(SPAN_MEASURED_KEY)\n span.set_tag(redisx.RAWCMD, resource)\n span.set_metric(redisx.PIPELINE_LEN, len(instance._pipeline))\n # set analytics sample rate if enabled\n span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, config.aioredis.get_analytics_sample_rate())\n\n return await func(*args, **kwargs)\n", "path": "ddtrace/contrib/aioredis/patch.py"}]}
| 2,663 | 146 |
gh_patches_debug_37498
|
rasdani/github-patches
|
git_diff
|
scikit-hep__pyhf-1965
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
staterror regressions blocking v0.7.0
Before `v0.7.0rc2` can go out and then `v0.7.0` we have to fix the regressions in `staterror`. This Issue is just to keep track of them and help us squash them all.
* [ ] Issue #1720
* [x] Issue #1944
</issue>
<code>
[start of src/pyhf/modifiers/staterror.py]
1 import logging
2 from typing import List
3
4 import pyhf
5 from pyhf import events
6 from pyhf.exceptions import InvalidModifier
7 from pyhf.parameters import ParamViewer
8 from pyhf.tensor.manager import get_backend
9
10 log = logging.getLogger(__name__)
11
12
13 def required_parset(sigmas, fixed: List[bool]):
14 n_parameters = len(sigmas)
15 return {
16 'paramset_type': 'constrained_by_normal',
17 'n_parameters': n_parameters,
18 'is_shared': True,
19 'is_scalar': False,
20 'inits': (1.0,) * n_parameters,
21 'bounds': ((1e-10, 10.0),) * n_parameters,
22 'fixed': tuple(fixed),
23 'auxdata': (1.0,) * n_parameters,
24 'sigmas': tuple(sigmas),
25 }
26
27
28 class staterror_builder:
29 """Builder class for collecting staterror modifier data"""
30
31 def __init__(self, config):
32 self.builder_data = {}
33 self.config = config
34 self.required_parsets = {}
35
36 def collect(self, thismod, nom):
37 uncrt = thismod['data'] if thismod else [0.0] * len(nom)
38 mask = [True if thismod else False] * len(nom)
39 return {'mask': mask, 'nom_data': nom, 'uncrt': uncrt}
40
41 def append(self, key, channel, sample, thismod, defined_samp):
42 self.builder_data.setdefault(key, {}).setdefault(sample, {}).setdefault(
43 'data', {'uncrt': [], 'nom_data': [], 'mask': []}
44 )
45 nom = (
46 defined_samp['data']
47 if defined_samp
48 else [0.0] * self.config.channel_nbins[channel]
49 )
50 moddata = self.collect(thismod, nom)
51 self.builder_data[key][sample]['data']['mask'].append(moddata['mask'])
52 self.builder_data[key][sample]['data']['uncrt'].append(moddata['uncrt'])
53 self.builder_data[key][sample]['data']['nom_data'].append(moddata['nom_data'])
54
55 def finalize(self):
56 default_backend = pyhf.default_backend
57
58 for modifier_name, modifier in self.builder_data.items():
59 for sample_name, sample in modifier.items():
60 sample["data"]["mask"] = default_backend.concatenate(
61 sample["data"]["mask"]
62 )
63 sample["data"]["uncrt"] = default_backend.concatenate(
64 sample["data"]["uncrt"]
65 )
66 sample["data"]["nom_data"] = default_backend.concatenate(
67 sample["data"]["nom_data"]
68 )
69 if len(sample["data"]["nom_data"]) != len(sample["data"]["uncrt"]):
70 _modifier_type, _modifier_name = modifier_name.split("/")
71 _sample_data_len = len(sample["data"]["nom_data"])
72 _uncrt_len = len(sample["data"]["uncrt"])
73 raise InvalidModifier(
74 f"The '{sample_name}' sample {_modifier_type} modifier"
75 + f" '{_modifier_name}' has data shape inconsistent with the sample.\n"
76 + f"{sample_name} has 'data' of length {_sample_data_len} but {_modifier_name}"
77 + f" has 'data' of length {_uncrt_len}."
78 )
79
80 for modname in self.builder_data.keys():
81 parname = modname.split('/')[1]
82
83 nomsall = default_backend.sum(
84 [
85 modifier_data['data']['nom_data']
86 for modifier_data in self.builder_data[modname].values()
87 if default_backend.astensor(modifier_data['data']['mask']).any()
88 ],
89 axis=0,
90 )
91 relerrs = default_backend.sum(
92 [
93 [
94 (modifier_data['data']['uncrt'][binnr] / nomsall[binnr]) ** 2
95 if nomsall[binnr] > 0
96 else 0.0
97 for binnr in range(len(modifier_data['data']['nom_data']))
98 ]
99 for modifier_data in self.builder_data[modname].values()
100 ],
101 axis=0,
102 )
103 relerrs = default_backend.sqrt(relerrs)
104
105 masks = {}
106 for modifier_data in self.builder_data[modname].values():
107 mask_this_sample = default_backend.astensor(
108 modifier_data['data']['mask'], dtype='bool'
109 )
110 if mask_this_sample.any():
111 if modname not in masks:
112 masks[modname] = mask_this_sample
113 else:
114 assert (mask_this_sample == masks[modname]).all()
115
116 for modifier_data in self.builder_data[modname].values():
117 modifier_data['data']['mask'] = masks[modname]
118 sigmas = relerrs[masks[modname]]
119 # list of bools, consistent with other modifiers (no numpy.bool_)
120 fixed = default_backend.tolist(sigmas == 0)
121 # ensures non-Nan constraint term, but in a future PR we need to remove constraints for these
122 sigmas[fixed] = 1.0
123 self.required_parsets.setdefault(parname, [required_parset(sigmas, fixed)])
124 return self.builder_data
125
126
127 class staterror_combined:
128 name = 'staterror'
129 op_code = 'multiplication'
130
131 def __init__(self, modifiers, pdfconfig, builder_data, batch_size=None):
132
133 default_backend = pyhf.default_backend
134 self.batch_size = batch_size
135
136 keys = [f'{mtype}/{m}' for m, mtype in modifiers]
137 self._staterr_mods = [m for m, _ in modifiers]
138
139 parfield_shape = (self.batch_size or 1, pdfconfig.npars)
140 self.param_viewer = ParamViewer(
141 parfield_shape, pdfconfig.par_map, self._staterr_mods
142 )
143
144 self._staterror_mask = [
145 [[builder_data[m][s]['data']['mask']] for s in pdfconfig.samples]
146 for m in keys
147 ]
148 self.__staterror_uncrt = default_backend.astensor(
149 [
150 [
151 [
152 builder_data[m][s]['data']['uncrt'],
153 builder_data[m][s]['data']['nom_data'],
154 ]
155 for s in pdfconfig.samples
156 ]
157 for m in keys
158 ]
159 )
160 global_concatenated_bin_indices = [
161 [[j for c in pdfconfig.channels for j in range(pdfconfig.channel_nbins[c])]]
162 ]
163
164 self._access_field = default_backend.tile(
165 global_concatenated_bin_indices,
166 (len(self._staterr_mods), self.batch_size or 1, 1),
167 )
168
169 self._reindex_access_field(pdfconfig)
170
171 self._precompute()
172 events.subscribe('tensorlib_changed')(self._precompute)
173
174 def _reindex_access_field(self, pdfconfig):
175 default_backend = pyhf.default_backend
176 for syst_index, syst_access in enumerate(self._access_field):
177 singular_sample_index = [
178 idx
179 for idx, syst in enumerate(
180 default_backend.astensor(self._staterror_mask)[syst_index, :, 0]
181 )
182 if any(syst)
183 ][-1]
184
185 for batch_index, batch_access in enumerate(syst_access):
186 selection = self.param_viewer.index_selection[syst_index][batch_index]
187 access_field_for_syst_and_batch = default_backend.zeros(
188 len(batch_access)
189 )
190
191 sample_mask = self._staterror_mask[syst_index][singular_sample_index][0]
192 access_field_for_syst_and_batch[sample_mask] = selection
193 self._access_field[
194 syst_index, batch_index
195 ] = access_field_for_syst_and_batch
196
197 def _precompute(self):
198 if not self.param_viewer.index_selection:
199 return
200 tensorlib, _ = get_backend()
201 self.staterror_mask = tensorlib.astensor(self._staterror_mask, dtype="bool")
202 self.staterror_mask = tensorlib.tile(
203 self.staterror_mask, (1, 1, self.batch_size or 1, 1)
204 )
205 self.access_field = tensorlib.astensor(self._access_field, dtype='int')
206 self.sample_ones = tensorlib.ones(tensorlib.shape(self.staterror_mask)[1])
207 self.staterror_default = tensorlib.ones(tensorlib.shape(self.staterror_mask))
208
209 def apply(self, pars):
210 if not self.param_viewer.index_selection:
211 return
212
213 tensorlib, _ = get_backend()
214 if self.batch_size is None:
215 flat_pars = pars
216 else:
217 flat_pars = tensorlib.reshape(pars, (-1,))
218 statfactors = tensorlib.gather(flat_pars, self.access_field)
219 results_staterr = tensorlib.einsum('mab,s->msab', statfactors, self.sample_ones)
220 results_staterr = tensorlib.where(
221 self.staterror_mask, results_staterr, self.staterror_default
222 )
223 return results_staterr
224
[end of src/pyhf/modifiers/staterror.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/pyhf/modifiers/staterror.py b/src/pyhf/modifiers/staterror.py
--- a/src/pyhf/modifiers/staterror.py
+++ b/src/pyhf/modifiers/staterror.py
@@ -100,8 +100,12 @@
],
axis=0,
)
+ # here relerrs still has all the bins, while the staterror are usually per-channel
+ # so we need to pick out the masks for this modifier to extract the
+ # modifier configuration (sigmas, etc..)
+ # so loop over samples and extract the first mask
+ # while making sure any subsequent mask is consistent
relerrs = default_backend.sqrt(relerrs)
-
masks = {}
for modifier_data in self.builder_data[modname].values():
mask_this_sample = default_backend.astensor(
@@ -113,12 +117,14 @@
else:
assert (mask_this_sample == masks[modname]).all()
- for modifier_data in self.builder_data[modname].values():
- modifier_data['data']['mask'] = masks[modname]
+ # extract sigmas using this modifiers mask
sigmas = relerrs[masks[modname]]
+
# list of bools, consistent with other modifiers (no numpy.bool_)
fixed = default_backend.tolist(sigmas == 0)
- # ensures non-Nan constraint term, but in a future PR we need to remove constraints for these
+ # FIXME: sigmas that are zero will be fixed to 1.0 arbitrarily to ensure
+ # non-Nan constraint term, but in a future PR need to remove constraints
+ # for these
sigmas[fixed] = 1.0
self.required_parsets.setdefault(parname, [required_parset(sigmas, fixed)])
return self.builder_data
@@ -145,18 +151,6 @@
[[builder_data[m][s]['data']['mask']] for s in pdfconfig.samples]
for m in keys
]
- self.__staterror_uncrt = default_backend.astensor(
- [
- [
- [
- builder_data[m][s]['data']['uncrt'],
- builder_data[m][s]['data']['nom_data'],
- ]
- for s in pdfconfig.samples
- ]
- for m in keys
- ]
- )
global_concatenated_bin_indices = [
[[j for c in pdfconfig.channels for j in range(pdfconfig.channel_nbins[c])]]
]
|
{"golden_diff": "diff --git a/src/pyhf/modifiers/staterror.py b/src/pyhf/modifiers/staterror.py\n--- a/src/pyhf/modifiers/staterror.py\n+++ b/src/pyhf/modifiers/staterror.py\n@@ -100,8 +100,12 @@\n ],\n axis=0,\n )\n+ # here relerrs still has all the bins, while the staterror are usually per-channel\n+ # so we need to pick out the masks for this modifier to extract the\n+ # modifier configuration (sigmas, etc..)\n+ # so loop over samples and extract the first mask\n+ # while making sure any subsequent mask is consistent\n relerrs = default_backend.sqrt(relerrs)\n-\n masks = {}\n for modifier_data in self.builder_data[modname].values():\n mask_this_sample = default_backend.astensor(\n@@ -113,12 +117,14 @@\n else:\n assert (mask_this_sample == masks[modname]).all()\n \n- for modifier_data in self.builder_data[modname].values():\n- modifier_data['data']['mask'] = masks[modname]\n+ # extract sigmas using this modifiers mask\n sigmas = relerrs[masks[modname]]\n+\n # list of bools, consistent with other modifiers (no numpy.bool_)\n fixed = default_backend.tolist(sigmas == 0)\n- # ensures non-Nan constraint term, but in a future PR we need to remove constraints for these\n+ # FIXME: sigmas that are zero will be fixed to 1.0 arbitrarily to ensure\n+ # non-Nan constraint term, but in a future PR need to remove constraints\n+ # for these\n sigmas[fixed] = 1.0\n self.required_parsets.setdefault(parname, [required_parset(sigmas, fixed)])\n return self.builder_data\n@@ -145,18 +151,6 @@\n [[builder_data[m][s]['data']['mask']] for s in pdfconfig.samples]\n for m in keys\n ]\n- self.__staterror_uncrt = default_backend.astensor(\n- [\n- [\n- [\n- builder_data[m][s]['data']['uncrt'],\n- builder_data[m][s]['data']['nom_data'],\n- ]\n- for s in pdfconfig.samples\n- ]\n- for m in keys\n- ]\n- )\n global_concatenated_bin_indices = [\n [[j for c in pdfconfig.channels for j in range(pdfconfig.channel_nbins[c])]]\n ]\n", "issue": "staterror regressions blocking v0.7.0\nBefore `v0.7.0rc2` can go out and then `v0.7.0` we have to fix the regressions in `staterror`. This Issue is just to keep track of them and help us squash them all.\r\n\r\n* [ ] Issue #1720\r\n* [x] Issue #1944\n", "before_files": [{"content": "import logging\nfrom typing import List\n\nimport pyhf\nfrom pyhf import events\nfrom pyhf.exceptions import InvalidModifier\nfrom pyhf.parameters import ParamViewer\nfrom pyhf.tensor.manager import get_backend\n\nlog = logging.getLogger(__name__)\n\n\ndef required_parset(sigmas, fixed: List[bool]):\n n_parameters = len(sigmas)\n return {\n 'paramset_type': 'constrained_by_normal',\n 'n_parameters': n_parameters,\n 'is_shared': True,\n 'is_scalar': False,\n 'inits': (1.0,) * n_parameters,\n 'bounds': ((1e-10, 10.0),) * n_parameters,\n 'fixed': tuple(fixed),\n 'auxdata': (1.0,) * n_parameters,\n 'sigmas': tuple(sigmas),\n }\n\n\nclass staterror_builder:\n \"\"\"Builder class for collecting staterror modifier data\"\"\"\n\n def __init__(self, config):\n self.builder_data = {}\n self.config = config\n self.required_parsets = {}\n\n def collect(self, thismod, nom):\n uncrt = thismod['data'] if thismod else [0.0] * len(nom)\n mask = [True if thismod else False] * len(nom)\n return {'mask': mask, 'nom_data': nom, 'uncrt': uncrt}\n\n def append(self, key, channel, sample, thismod, defined_samp):\n self.builder_data.setdefault(key, {}).setdefault(sample, {}).setdefault(\n 'data', {'uncrt': [], 'nom_data': [], 'mask': []}\n )\n nom = (\n defined_samp['data']\n if defined_samp\n else [0.0] * self.config.channel_nbins[channel]\n )\n moddata = self.collect(thismod, nom)\n self.builder_data[key][sample]['data']['mask'].append(moddata['mask'])\n self.builder_data[key][sample]['data']['uncrt'].append(moddata['uncrt'])\n self.builder_data[key][sample]['data']['nom_data'].append(moddata['nom_data'])\n\n def finalize(self):\n default_backend = pyhf.default_backend\n\n for modifier_name, modifier in self.builder_data.items():\n for sample_name, sample in modifier.items():\n sample[\"data\"][\"mask\"] = default_backend.concatenate(\n sample[\"data\"][\"mask\"]\n )\n sample[\"data\"][\"uncrt\"] = default_backend.concatenate(\n sample[\"data\"][\"uncrt\"]\n )\n sample[\"data\"][\"nom_data\"] = default_backend.concatenate(\n sample[\"data\"][\"nom_data\"]\n )\n if len(sample[\"data\"][\"nom_data\"]) != len(sample[\"data\"][\"uncrt\"]):\n _modifier_type, _modifier_name = modifier_name.split(\"/\")\n _sample_data_len = len(sample[\"data\"][\"nom_data\"])\n _uncrt_len = len(sample[\"data\"][\"uncrt\"])\n raise InvalidModifier(\n f\"The '{sample_name}' sample {_modifier_type} modifier\"\n + f\" '{_modifier_name}' has data shape inconsistent with the sample.\\n\"\n + f\"{sample_name} has 'data' of length {_sample_data_len} but {_modifier_name}\"\n + f\" has 'data' of length {_uncrt_len}.\"\n )\n\n for modname in self.builder_data.keys():\n parname = modname.split('/')[1]\n\n nomsall = default_backend.sum(\n [\n modifier_data['data']['nom_data']\n for modifier_data in self.builder_data[modname].values()\n if default_backend.astensor(modifier_data['data']['mask']).any()\n ],\n axis=0,\n )\n relerrs = default_backend.sum(\n [\n [\n (modifier_data['data']['uncrt'][binnr] / nomsall[binnr]) ** 2\n if nomsall[binnr] > 0\n else 0.0\n for binnr in range(len(modifier_data['data']['nom_data']))\n ]\n for modifier_data in self.builder_data[modname].values()\n ],\n axis=0,\n )\n relerrs = default_backend.sqrt(relerrs)\n\n masks = {}\n for modifier_data in self.builder_data[modname].values():\n mask_this_sample = default_backend.astensor(\n modifier_data['data']['mask'], dtype='bool'\n )\n if mask_this_sample.any():\n if modname not in masks:\n masks[modname] = mask_this_sample\n else:\n assert (mask_this_sample == masks[modname]).all()\n\n for modifier_data in self.builder_data[modname].values():\n modifier_data['data']['mask'] = masks[modname]\n sigmas = relerrs[masks[modname]]\n # list of bools, consistent with other modifiers (no numpy.bool_)\n fixed = default_backend.tolist(sigmas == 0)\n # ensures non-Nan constraint term, but in a future PR we need to remove constraints for these\n sigmas[fixed] = 1.0\n self.required_parsets.setdefault(parname, [required_parset(sigmas, fixed)])\n return self.builder_data\n\n\nclass staterror_combined:\n name = 'staterror'\n op_code = 'multiplication'\n\n def __init__(self, modifiers, pdfconfig, builder_data, batch_size=None):\n\n default_backend = pyhf.default_backend\n self.batch_size = batch_size\n\n keys = [f'{mtype}/{m}' for m, mtype in modifiers]\n self._staterr_mods = [m for m, _ in modifiers]\n\n parfield_shape = (self.batch_size or 1, pdfconfig.npars)\n self.param_viewer = ParamViewer(\n parfield_shape, pdfconfig.par_map, self._staterr_mods\n )\n\n self._staterror_mask = [\n [[builder_data[m][s]['data']['mask']] for s in pdfconfig.samples]\n for m in keys\n ]\n self.__staterror_uncrt = default_backend.astensor(\n [\n [\n [\n builder_data[m][s]['data']['uncrt'],\n builder_data[m][s]['data']['nom_data'],\n ]\n for s in pdfconfig.samples\n ]\n for m in keys\n ]\n )\n global_concatenated_bin_indices = [\n [[j for c in pdfconfig.channels for j in range(pdfconfig.channel_nbins[c])]]\n ]\n\n self._access_field = default_backend.tile(\n global_concatenated_bin_indices,\n (len(self._staterr_mods), self.batch_size or 1, 1),\n )\n\n self._reindex_access_field(pdfconfig)\n\n self._precompute()\n events.subscribe('tensorlib_changed')(self._precompute)\n\n def _reindex_access_field(self, pdfconfig):\n default_backend = pyhf.default_backend\n for syst_index, syst_access in enumerate(self._access_field):\n singular_sample_index = [\n idx\n for idx, syst in enumerate(\n default_backend.astensor(self._staterror_mask)[syst_index, :, 0]\n )\n if any(syst)\n ][-1]\n\n for batch_index, batch_access in enumerate(syst_access):\n selection = self.param_viewer.index_selection[syst_index][batch_index]\n access_field_for_syst_and_batch = default_backend.zeros(\n len(batch_access)\n )\n\n sample_mask = self._staterror_mask[syst_index][singular_sample_index][0]\n access_field_for_syst_and_batch[sample_mask] = selection\n self._access_field[\n syst_index, batch_index\n ] = access_field_for_syst_and_batch\n\n def _precompute(self):\n if not self.param_viewer.index_selection:\n return\n tensorlib, _ = get_backend()\n self.staterror_mask = tensorlib.astensor(self._staterror_mask, dtype=\"bool\")\n self.staterror_mask = tensorlib.tile(\n self.staterror_mask, (1, 1, self.batch_size or 1, 1)\n )\n self.access_field = tensorlib.astensor(self._access_field, dtype='int')\n self.sample_ones = tensorlib.ones(tensorlib.shape(self.staterror_mask)[1])\n self.staterror_default = tensorlib.ones(tensorlib.shape(self.staterror_mask))\n\n def apply(self, pars):\n if not self.param_viewer.index_selection:\n return\n\n tensorlib, _ = get_backend()\n if self.batch_size is None:\n flat_pars = pars\n else:\n flat_pars = tensorlib.reshape(pars, (-1,))\n statfactors = tensorlib.gather(flat_pars, self.access_field)\n results_staterr = tensorlib.einsum('mab,s->msab', statfactors, self.sample_ones)\n results_staterr = tensorlib.where(\n self.staterror_mask, results_staterr, self.staterror_default\n )\n return results_staterr\n", "path": "src/pyhf/modifiers/staterror.py"}]}
| 3,110 | 556 |
gh_patches_debug_19108
|
rasdani/github-patches
|
git_diff
|
arviz-devs__arviz-1533
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
plot_pair axis labels no longer include coords
**Describe the bug**
Using `plot_pair` with a selection of `coords`, I got a plot where the axis labels only show `var_names`, so different `coords` get the same label.
**To Reproduce**
```
test_idata = arviz.from_dict(
posterior = dict(a=numpy.random.rand(1, 10, 3), b=numpy.random.rand(1 ,10, 1)),
dims={'a':['a_dim',]},
coords=dict(a_dim=['test_A', 'test_B', 'test_C'])
)
arviz.plot_pair(
test_idata,
coords=dict(a_dim=['test_A', 'test_B']),
kind='kde'
);
```

**Expected behavior**
Axis labels should also include the `coords` to be unique (in this case a_test_A or similar).
**Additional context**
arviz_version : 0.11.0
</issue>
<code>
[start of arviz/plots/pairplot.py]
1 """Plot a scatter, kde and/or hexbin of sampled parameters."""
2 import warnings
3 from typing import List, Optional, Union
4
5 import numpy as np
6
7 from ..data import convert_to_dataset
8 from ..rcparams import rcParams
9 from ..utils import _var_names, get_coords
10 from .plot_utils import (
11 get_plotting_function,
12 xarray_to_ndarray,
13 filter_plotters_list,
14 xarray_var_iter,
15 )
16
17
18 def plot_pair(
19 data,
20 group="posterior",
21 var_names: Optional[List[str]] = None,
22 filter_vars: Optional[str] = None,
23 coords=None,
24 marginals=False,
25 figsize=None,
26 textsize=None,
27 kind: Union[str, List[str]] = "scatter",
28 gridsize="auto",
29 contour: Optional[bool] = None,
30 plot_kwargs=None,
31 fill_last=False,
32 divergences=False,
33 colorbar=False,
34 ax=None,
35 divergences_kwargs=None,
36 scatter_kwargs=None,
37 kde_kwargs=None,
38 hexbin_kwargs=None,
39 backend=None,
40 backend_kwargs=None,
41 marginal_kwargs=None,
42 point_estimate=None,
43 point_estimate_kwargs=None,
44 point_estimate_marker_kwargs=None,
45 reference_values=None,
46 reference_values_kwargs=None,
47 show=None,
48 ):
49 """
50 Plot a scatter, kde and/or hexbin matrix with (optional) marginals on the diagonal.
51
52 Parameters
53 ----------
54 data: obj
55 Any object that can be converted to an az.InferenceData object
56 Refer to documentation of az.convert_to_dataset for details
57 group: str, optional
58 Specifies which InferenceData group should be plotted. Defaults to 'posterior'.
59 var_names: list of variable names, optional
60 Variables to be plotted, if None all variable are plotted. Prefix the
61 variables by `~` when you want to exclude them from the plot.
62 filter_vars: {None, "like", "regex"}, optional, default=None
63 If `None` (default), interpret var_names as the real variables names. If "like",
64 interpret var_names as substrings of the real variables names. If "regex",
65 interpret var_names as regular expressions on the real variables names. A la
66 `pandas.filter`.
67 coords: mapping, optional
68 Coordinates of var_names to be plotted. Passed to `Dataset.sel`
69 marginals: bool, optional
70 If True pairplot will include marginal distributions for every variable
71 figsize: figure size tuple
72 If None, size is (8 + numvars, 8 + numvars)
73 textsize: int
74 Text size for labels. If None it will be autoscaled based on figsize.
75 kind : str or List[str]
76 Type of plot to display (scatter, kde and/or hexbin)
77 gridsize: int or (int, int), optional
78 Only works for kind=hexbin.
79 The number of hexagons in the x-direction. The corresponding number of hexagons in the
80 y-direction is chosen such that the hexagons are approximately regular.
81 Alternatively, gridsize can be a tuple with two elements specifying the number of hexagons
82 in the x-direction and the y-direction.
83 contour : bool, optional, deprecated, Defaults to True.
84 If True plot the 2D KDE using contours, otherwise plot a smooth 2D KDE. Defaults to True.
85 **Note:** this default is implemented in the body of the code, not in argument processing.
86 fill_last : bool
87 If True fill the last contour of the 2D KDE plot. Defaults to True.
88 divergences: Boolean
89 If True divergences will be plotted in a different color, only if group is either 'prior'
90 or 'posterior'.
91 colorbar: bool
92 If True a colorbar will be included as part of the plot (Defaults to False).
93 Only works when kind=hexbin
94 ax: axes, optional
95 Matplotlib axes or bokeh figures.
96 divergences_kwargs: dicts, optional
97 Additional keywords passed to ax.scatter for divergences
98 scatter_kwargs:
99 Additional keywords passed to ax.plot when using scatter kind
100 kde_kwargs: dict, optional
101 Additional keywords passed to az.plot_kde when using kde kind
102 hexbin_kwargs: dict, optional
103 Additional keywords passed to ax.hexbin when using hexbin kind
104 backend: str, optional
105 Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib".
106 backend_kwargs: bool, optional
107 These are kwargs specific to the backend being used. For additional documentation
108 check the plotting method of the backend.
109 marginal_kwargs: dict, optional
110 Additional keywords passed to az.plot_dist, modifying the marginal distributions
111 plotted in the diagonal.
112 point_estimate: str, optional
113 Select point estimate from 'mean', 'mode' or 'median'. The point estimate will be
114 plotted using a scatter marker and vertical/horizontal lines.
115 point_estimate_kwargs: dict, optional
116 Additional keywords passed to ax.vline, ax.hline (matplotlib) or ax.square, Span (bokeh)
117 point_estimate_marker_kwargs: dict, optional
118 Additional keywords passed to ax.scatter in point estimate plot. Not available in bokeh
119 reference_values: dict, optional
120 Reference values for the plotted variables. The Reference values will be plotted
121 using a scatter marker
122 reference_values_kwargs: dict, optional
123 Additional keywords passed to ax.plot or ax.circle in reference values plot
124 show: bool, optional
125 Call backend show function.
126
127 Returns
128 -------
129 axes: matplotlib axes or bokeh figures
130
131 Examples
132 --------
133 KDE Pair Plot
134
135 .. plot::
136 :context: close-figs
137
138 >>> import arviz as az
139 >>> centered = az.load_arviz_data('centered_eight')
140 >>> coords = {'school': ['Choate', 'Deerfield']}
141 >>> az.plot_pair(centered,
142 >>> var_names=['theta', 'mu', 'tau'],
143 >>> kind='kde',
144 >>> coords=coords,
145 >>> divergences=True,
146 >>> textsize=18)
147
148 Hexbin pair plot
149
150 .. plot::
151 :context: close-figs
152
153 >>> az.plot_pair(centered,
154 >>> var_names=['theta', 'mu'],
155 >>> coords=coords,
156 >>> textsize=18,
157 >>> kind='hexbin')
158
159 Pair plot showing divergences and select variables with regular expressions
160
161 .. plot::
162 :context: close-figs
163
164 >>> az.plot_pair(centered,
165 ... var_names=['^t', 'mu'],
166 ... filter_vars="regex",
167 ... coords=coords,
168 ... divergences=True,
169 ... textsize=18)
170 """
171 valid_kinds = ["scatter", "kde", "hexbin"]
172 kind_boolean: Union[bool, List[bool]]
173 if isinstance(kind, str):
174 kind_boolean = kind in valid_kinds
175 else:
176 kind_boolean = [kind[i] in valid_kinds for i in range(len(kind))]
177 if not np.all(kind_boolean):
178 raise ValueError((f"Plot type {kind} not recognized." "Plot type must be in {valid_kinds}"))
179 if fill_last or contour:
180 warnings.warn(
181 "fill_last and contour will be deprecated. Please use kde_kwargs",
182 UserWarning,
183 )
184 if plot_kwargs:
185 warnings.warn(
186 "plot_kwargs will be deprecated."
187 " Please use scatter_kwargs, kde_kwargs and/or hexbin_kwargs",
188 UserWarning,
189 )
190
191 if coords is None:
192 coords = {}
193
194 # Get posterior draws and combine chains
195 dataset = convert_to_dataset(data, group=group)
196 var_names = _var_names(var_names, dataset, filter_vars)
197 plotters = filter_plotters_list(
198 list(xarray_var_iter(get_coords(dataset, coords), var_names=var_names, combined=True)),
199 "plot_pair",
200 )
201 flat_var_names = [plotter[0] for plotter in plotters]
202
203 divergent_data = None
204 diverging_mask = None
205
206 # Assigning divergence group based on group param
207 if group == "posterior":
208 divergent_group = "sample_stats"
209 elif group == "prior":
210 divergent_group = "sample_stats_prior"
211 else:
212 divergences = False
213
214 # Get diverging draws and combine chains
215 if divergences:
216 if hasattr(data, divergent_group) and hasattr(getattr(data, divergent_group), "diverging"):
217 divergent_data = convert_to_dataset(data, group=divergent_group)
218 _, diverging_mask = xarray_to_ndarray(
219 divergent_data, var_names=("diverging",), combined=True
220 )
221 diverging_mask = np.squeeze(diverging_mask)
222 else:
223 divergences = False
224 warnings.warn(
225 "Divergences data not found, plotting without divergences. "
226 "Make sure the sample method provides divergences data and "
227 "that it is present in the `diverging` field of `sample_stats` "
228 "or `sample_stats_prior` or set divergences=False",
229 UserWarning,
230 )
231
232 if gridsize == "auto":
233 gridsize = int(dataset.dims["draw"] ** 0.35)
234
235 numvars = len(flat_var_names)
236
237 if numvars < 2:
238 raise ValueError("Number of variables to be plotted must be 2 or greater.")
239
240 pairplot_kwargs = dict(
241 ax=ax,
242 plotters=plotters,
243 numvars=numvars,
244 figsize=figsize,
245 textsize=textsize,
246 kind=kind,
247 scatter_kwargs=scatter_kwargs,
248 kde_kwargs=kde_kwargs,
249 hexbin_kwargs=hexbin_kwargs,
250 gridsize=gridsize,
251 colorbar=colorbar,
252 divergences=divergences,
253 diverging_mask=diverging_mask,
254 divergences_kwargs=divergences_kwargs,
255 flat_var_names=flat_var_names,
256 backend_kwargs=backend_kwargs,
257 marginal_kwargs=marginal_kwargs,
258 show=show,
259 marginals=marginals,
260 point_estimate=point_estimate,
261 point_estimate_kwargs=point_estimate_kwargs,
262 point_estimate_marker_kwargs=point_estimate_marker_kwargs,
263 reference_values=reference_values,
264 reference_values_kwargs=reference_values_kwargs,
265 )
266
267 if backend is None:
268 backend = rcParams["plot.backend"]
269 backend = backend.lower()
270
271 # TODO: Add backend kwargs
272 plot = get_plotting_function("plot_pair", "pairplot", backend)
273 ax = plot(**pairplot_kwargs)
274 return ax
275
[end of arviz/plots/pairplot.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/arviz/plots/pairplot.py b/arviz/plots/pairplot.py
--- a/arviz/plots/pairplot.py
+++ b/arviz/plots/pairplot.py
@@ -10,8 +10,8 @@
from .plot_utils import (
get_plotting_function,
xarray_to_ndarray,
- filter_plotters_list,
xarray_var_iter,
+ make_label,
)
@@ -194,11 +194,10 @@
# Get posterior draws and combine chains
dataset = convert_to_dataset(data, group=group)
var_names = _var_names(var_names, dataset, filter_vars)
- plotters = filter_plotters_list(
- list(xarray_var_iter(get_coords(dataset, coords), var_names=var_names, combined=True)),
- "plot_pair",
+ plotters = list(
+ xarray_var_iter(get_coords(dataset, coords), var_names=var_names, combined=True)
)
- flat_var_names = [plotter[0] for plotter in plotters]
+ flat_var_names = [make_label(var_name, selection) for var_name, selection, _ in plotters]
divergent_data = None
diverging_mask = None
|
{"golden_diff": "diff --git a/arviz/plots/pairplot.py b/arviz/plots/pairplot.py\n--- a/arviz/plots/pairplot.py\n+++ b/arviz/plots/pairplot.py\n@@ -10,8 +10,8 @@\n from .plot_utils import (\n get_plotting_function,\n xarray_to_ndarray,\n- filter_plotters_list,\n xarray_var_iter,\n+ make_label,\n )\n \n \n@@ -194,11 +194,10 @@\n # Get posterior draws and combine chains\n dataset = convert_to_dataset(data, group=group)\n var_names = _var_names(var_names, dataset, filter_vars)\n- plotters = filter_plotters_list(\n- list(xarray_var_iter(get_coords(dataset, coords), var_names=var_names, combined=True)),\n- \"plot_pair\",\n+ plotters = list(\n+ xarray_var_iter(get_coords(dataset, coords), var_names=var_names, combined=True)\n )\n- flat_var_names = [plotter[0] for plotter in plotters]\n+ flat_var_names = [make_label(var_name, selection) for var_name, selection, _ in plotters]\n \n divergent_data = None\n diverging_mask = None\n", "issue": "plot_pair axis labels no longer include coords\n**Describe the bug**\r\nUsing `plot_pair` with a selection of `coords`, I got a plot where the axis labels only show `var_names`, so different `coords` get the same label.\r\n\r\n**To Reproduce**\r\n```\r\ntest_idata = arviz.from_dict(\r\n posterior = dict(a=numpy.random.rand(1, 10, 3), b=numpy.random.rand(1 ,10, 1)),\r\n dims={'a':['a_dim',]},\r\n coords=dict(a_dim=['test_A', 'test_B', 'test_C'])\r\n)\r\narviz.plot_pair(\r\n test_idata, \r\n coords=dict(a_dim=['test_A', 'test_B']),\r\n kind='kde'\r\n);\r\n```\r\n\r\n\r\n\r\n\r\n**Expected behavior**\r\nAxis labels should also include the `coords` to be unique (in this case a_test_A or similar).\r\n\r\n**Additional context**\r\narviz_version : 0.11.0\r\n\n", "before_files": [{"content": "\"\"\"Plot a scatter, kde and/or hexbin of sampled parameters.\"\"\"\nimport warnings\nfrom typing import List, Optional, Union\n\nimport numpy as np\n\nfrom ..data import convert_to_dataset\nfrom ..rcparams import rcParams\nfrom ..utils import _var_names, get_coords\nfrom .plot_utils import (\n get_plotting_function,\n xarray_to_ndarray,\n filter_plotters_list,\n xarray_var_iter,\n)\n\n\ndef plot_pair(\n data,\n group=\"posterior\",\n var_names: Optional[List[str]] = None,\n filter_vars: Optional[str] = None,\n coords=None,\n marginals=False,\n figsize=None,\n textsize=None,\n kind: Union[str, List[str]] = \"scatter\",\n gridsize=\"auto\",\n contour: Optional[bool] = None,\n plot_kwargs=None,\n fill_last=False,\n divergences=False,\n colorbar=False,\n ax=None,\n divergences_kwargs=None,\n scatter_kwargs=None,\n kde_kwargs=None,\n hexbin_kwargs=None,\n backend=None,\n backend_kwargs=None,\n marginal_kwargs=None,\n point_estimate=None,\n point_estimate_kwargs=None,\n point_estimate_marker_kwargs=None,\n reference_values=None,\n reference_values_kwargs=None,\n show=None,\n):\n \"\"\"\n Plot a scatter, kde and/or hexbin matrix with (optional) marginals on the diagonal.\n\n Parameters\n ----------\n data: obj\n Any object that can be converted to an az.InferenceData object\n Refer to documentation of az.convert_to_dataset for details\n group: str, optional\n Specifies which InferenceData group should be plotted. Defaults to 'posterior'.\n var_names: list of variable names, optional\n Variables to be plotted, if None all variable are plotted. Prefix the\n variables by `~` when you want to exclude them from the plot.\n filter_vars: {None, \"like\", \"regex\"}, optional, default=None\n If `None` (default), interpret var_names as the real variables names. If \"like\",\n interpret var_names as substrings of the real variables names. If \"regex\",\n interpret var_names as regular expressions on the real variables names. A la\n `pandas.filter`.\n coords: mapping, optional\n Coordinates of var_names to be plotted. Passed to `Dataset.sel`\n marginals: bool, optional\n If True pairplot will include marginal distributions for every variable\n figsize: figure size tuple\n If None, size is (8 + numvars, 8 + numvars)\n textsize: int\n Text size for labels. If None it will be autoscaled based on figsize.\n kind : str or List[str]\n Type of plot to display (scatter, kde and/or hexbin)\n gridsize: int or (int, int), optional\n Only works for kind=hexbin.\n The number of hexagons in the x-direction. The corresponding number of hexagons in the\n y-direction is chosen such that the hexagons are approximately regular.\n Alternatively, gridsize can be a tuple with two elements specifying the number of hexagons\n in the x-direction and the y-direction.\n contour : bool, optional, deprecated, Defaults to True.\n If True plot the 2D KDE using contours, otherwise plot a smooth 2D KDE. Defaults to True.\n **Note:** this default is implemented in the body of the code, not in argument processing.\n fill_last : bool\n If True fill the last contour of the 2D KDE plot. Defaults to True.\n divergences: Boolean\n If True divergences will be plotted in a different color, only if group is either 'prior'\n or 'posterior'.\n colorbar: bool\n If True a colorbar will be included as part of the plot (Defaults to False).\n Only works when kind=hexbin\n ax: axes, optional\n Matplotlib axes or bokeh figures.\n divergences_kwargs: dicts, optional\n Additional keywords passed to ax.scatter for divergences\n scatter_kwargs:\n Additional keywords passed to ax.plot when using scatter kind\n kde_kwargs: dict, optional\n Additional keywords passed to az.plot_kde when using kde kind\n hexbin_kwargs: dict, optional\n Additional keywords passed to ax.hexbin when using hexbin kind\n backend: str, optional\n Select plotting backend {\"matplotlib\",\"bokeh\"}. Default \"matplotlib\".\n backend_kwargs: bool, optional\n These are kwargs specific to the backend being used. For additional documentation\n check the plotting method of the backend.\n marginal_kwargs: dict, optional\n Additional keywords passed to az.plot_dist, modifying the marginal distributions\n plotted in the diagonal.\n point_estimate: str, optional\n Select point estimate from 'mean', 'mode' or 'median'. The point estimate will be\n plotted using a scatter marker and vertical/horizontal lines.\n point_estimate_kwargs: dict, optional\n Additional keywords passed to ax.vline, ax.hline (matplotlib) or ax.square, Span (bokeh)\n point_estimate_marker_kwargs: dict, optional\n Additional keywords passed to ax.scatter in point estimate plot. Not available in bokeh\n reference_values: dict, optional\n Reference values for the plotted variables. The Reference values will be plotted\n using a scatter marker\n reference_values_kwargs: dict, optional\n Additional keywords passed to ax.plot or ax.circle in reference values plot\n show: bool, optional\n Call backend show function.\n\n Returns\n -------\n axes: matplotlib axes or bokeh figures\n\n Examples\n --------\n KDE Pair Plot\n\n .. plot::\n :context: close-figs\n\n >>> import arviz as az\n >>> centered = az.load_arviz_data('centered_eight')\n >>> coords = {'school': ['Choate', 'Deerfield']}\n >>> az.plot_pair(centered,\n >>> var_names=['theta', 'mu', 'tau'],\n >>> kind='kde',\n >>> coords=coords,\n >>> divergences=True,\n >>> textsize=18)\n\n Hexbin pair plot\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_pair(centered,\n >>> var_names=['theta', 'mu'],\n >>> coords=coords,\n >>> textsize=18,\n >>> kind='hexbin')\n\n Pair plot showing divergences and select variables with regular expressions\n\n .. plot::\n :context: close-figs\n\n >>> az.plot_pair(centered,\n ... var_names=['^t', 'mu'],\n ... filter_vars=\"regex\",\n ... coords=coords,\n ... divergences=True,\n ... textsize=18)\n \"\"\"\n valid_kinds = [\"scatter\", \"kde\", \"hexbin\"]\n kind_boolean: Union[bool, List[bool]]\n if isinstance(kind, str):\n kind_boolean = kind in valid_kinds\n else:\n kind_boolean = [kind[i] in valid_kinds for i in range(len(kind))]\n if not np.all(kind_boolean):\n raise ValueError((f\"Plot type {kind} not recognized.\" \"Plot type must be in {valid_kinds}\"))\n if fill_last or contour:\n warnings.warn(\n \"fill_last and contour will be deprecated. Please use kde_kwargs\",\n UserWarning,\n )\n if plot_kwargs:\n warnings.warn(\n \"plot_kwargs will be deprecated.\"\n \" Please use scatter_kwargs, kde_kwargs and/or hexbin_kwargs\",\n UserWarning,\n )\n\n if coords is None:\n coords = {}\n\n # Get posterior draws and combine chains\n dataset = convert_to_dataset(data, group=group)\n var_names = _var_names(var_names, dataset, filter_vars)\n plotters = filter_plotters_list(\n list(xarray_var_iter(get_coords(dataset, coords), var_names=var_names, combined=True)),\n \"plot_pair\",\n )\n flat_var_names = [plotter[0] for plotter in plotters]\n\n divergent_data = None\n diverging_mask = None\n\n # Assigning divergence group based on group param\n if group == \"posterior\":\n divergent_group = \"sample_stats\"\n elif group == \"prior\":\n divergent_group = \"sample_stats_prior\"\n else:\n divergences = False\n\n # Get diverging draws and combine chains\n if divergences:\n if hasattr(data, divergent_group) and hasattr(getattr(data, divergent_group), \"diverging\"):\n divergent_data = convert_to_dataset(data, group=divergent_group)\n _, diverging_mask = xarray_to_ndarray(\n divergent_data, var_names=(\"diverging\",), combined=True\n )\n diverging_mask = np.squeeze(diverging_mask)\n else:\n divergences = False\n warnings.warn(\n \"Divergences data not found, plotting without divergences. \"\n \"Make sure the sample method provides divergences data and \"\n \"that it is present in the `diverging` field of `sample_stats` \"\n \"or `sample_stats_prior` or set divergences=False\",\n UserWarning,\n )\n\n if gridsize == \"auto\":\n gridsize = int(dataset.dims[\"draw\"] ** 0.35)\n\n numvars = len(flat_var_names)\n\n if numvars < 2:\n raise ValueError(\"Number of variables to be plotted must be 2 or greater.\")\n\n pairplot_kwargs = dict(\n ax=ax,\n plotters=plotters,\n numvars=numvars,\n figsize=figsize,\n textsize=textsize,\n kind=kind,\n scatter_kwargs=scatter_kwargs,\n kde_kwargs=kde_kwargs,\n hexbin_kwargs=hexbin_kwargs,\n gridsize=gridsize,\n colorbar=colorbar,\n divergences=divergences,\n diverging_mask=diverging_mask,\n divergences_kwargs=divergences_kwargs,\n flat_var_names=flat_var_names,\n backend_kwargs=backend_kwargs,\n marginal_kwargs=marginal_kwargs,\n show=show,\n marginals=marginals,\n point_estimate=point_estimate,\n point_estimate_kwargs=point_estimate_kwargs,\n point_estimate_marker_kwargs=point_estimate_marker_kwargs,\n reference_values=reference_values,\n reference_values_kwargs=reference_values_kwargs,\n )\n\n if backend is None:\n backend = rcParams[\"plot.backend\"]\n backend = backend.lower()\n\n # TODO: Add backend kwargs\n plot = get_plotting_function(\"plot_pair\", \"pairplot\", backend)\n ax = plot(**pairplot_kwargs)\n return ax\n", "path": "arviz/plots/pairplot.py"}]}
| 3,849 | 280 |
gh_patches_debug_3468
|
rasdani/github-patches
|
git_diff
|
pallets__werkzeug-2612
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Increase default work factor for PBKDF2 to 600,000 iterations
https://github.com/pallets/werkzeug/blob/bba2109e276ab876b8d64e2b967412cb6da1d865/src/werkzeug/security.py#L12
The value has not been updated for a few years. I propose increasing it to the same number as the next release of Django will use.
See https://github.com/django/django/pull/16521 and https://cheatsheetseries.owasp.org/cheatsheets/Password_Storage_Cheat_Sheet.html#pbkdf2.
</issue>
<code>
[start of src/werkzeug/security.py]
1 import hashlib
2 import hmac
3 import os
4 import posixpath
5 import secrets
6 import typing as t
7
8 if t.TYPE_CHECKING:
9 pass
10
11 SALT_CHARS = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
12 DEFAULT_PBKDF2_ITERATIONS = 260000
13
14 _os_alt_seps: t.List[str] = list(
15 sep for sep in [os.sep, os.path.altsep] if sep is not None and sep != "/"
16 )
17
18
19 def gen_salt(length: int) -> str:
20 """Generate a random string of SALT_CHARS with specified ``length``."""
21 if length <= 0:
22 raise ValueError("Salt length must be positive")
23
24 return "".join(secrets.choice(SALT_CHARS) for _ in range(length))
25
26
27 def _hash_internal(method: str, salt: str, password: str) -> t.Tuple[str, str]:
28 """Internal password hash helper. Supports plaintext without salt,
29 unsalted and salted passwords. In case salted passwords are used
30 hmac is used.
31 """
32 if method == "plain":
33 return password, method
34
35 salt = salt.encode("utf-8")
36 password = password.encode("utf-8")
37
38 if method.startswith("pbkdf2:"):
39 if not salt:
40 raise ValueError("Salt is required for PBKDF2")
41
42 args = method[7:].split(":")
43
44 if len(args) not in (1, 2):
45 raise ValueError("Invalid number of arguments for PBKDF2")
46
47 method = args.pop(0)
48 iterations = int(args[0] or 0) if args else DEFAULT_PBKDF2_ITERATIONS
49 return (
50 hashlib.pbkdf2_hmac(method, password, salt, iterations).hex(),
51 f"pbkdf2:{method}:{iterations}",
52 )
53
54 if salt:
55 return hmac.new(salt, password, method).hexdigest(), method
56
57 return hashlib.new(method, password).hexdigest(), method
58
59
60 def generate_password_hash(
61 password: str, method: str = "pbkdf2:sha256", salt_length: int = 16
62 ) -> str:
63 """Hash a password with the given method and salt with a string of
64 the given length. The format of the string returned includes the method
65 that was used so that :func:`check_password_hash` can check the hash.
66
67 The format for the hashed string looks like this::
68
69 method$salt$hash
70
71 This method can **not** generate unsalted passwords but it is possible
72 to set param method='plain' in order to enforce plaintext passwords.
73 If a salt is used, hmac is used internally to salt the password.
74
75 If PBKDF2 is wanted it can be enabled by setting the method to
76 ``pbkdf2:method:iterations`` where iterations is optional::
77
78 pbkdf2:sha256:80000$salt$hash
79 pbkdf2:sha256$salt$hash
80
81 :param password: the password to hash.
82 :param method: the hash method to use (one that hashlib supports). Can
83 optionally be in the format ``pbkdf2:method:iterations``
84 to enable PBKDF2.
85 :param salt_length: the length of the salt in letters.
86 """
87 salt = gen_salt(salt_length) if method != "plain" else ""
88 h, actual_method = _hash_internal(method, salt, password)
89 return f"{actual_method}${salt}${h}"
90
91
92 def check_password_hash(pwhash: str, password: str) -> bool:
93 """Check a password against a given salted and hashed password value.
94 In order to support unsalted legacy passwords this method supports
95 plain text passwords, md5 and sha1 hashes (both salted and unsalted).
96
97 Returns `True` if the password matched, `False` otherwise.
98
99 :param pwhash: a hashed string like returned by
100 :func:`generate_password_hash`.
101 :param password: the plaintext password to compare against the hash.
102 """
103 if pwhash.count("$") < 2:
104 return False
105
106 method, salt, hashval = pwhash.split("$", 2)
107 return hmac.compare_digest(_hash_internal(method, salt, password)[0], hashval)
108
109
110 def safe_join(directory: str, *pathnames: str) -> t.Optional[str]:
111 """Safely join zero or more untrusted path components to a base
112 directory to avoid escaping the base directory.
113
114 :param directory: The trusted base directory.
115 :param pathnames: The untrusted path components relative to the
116 base directory.
117 :return: A safe path, otherwise ``None``.
118 """
119 if not directory:
120 # Ensure we end up with ./path if directory="" is given,
121 # otherwise the first untrusted part could become trusted.
122 directory = "."
123
124 parts = [directory]
125
126 for filename in pathnames:
127 if filename != "":
128 filename = posixpath.normpath(filename)
129
130 if (
131 any(sep in filename for sep in _os_alt_seps)
132 or os.path.isabs(filename)
133 or filename == ".."
134 or filename.startswith("../")
135 ):
136 return None
137
138 parts.append(filename)
139
140 return posixpath.join(*parts)
141
[end of src/werkzeug/security.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/werkzeug/security.py b/src/werkzeug/security.py
--- a/src/werkzeug/security.py
+++ b/src/werkzeug/security.py
@@ -9,7 +9,7 @@
pass
SALT_CHARS = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
-DEFAULT_PBKDF2_ITERATIONS = 260000
+DEFAULT_PBKDF2_ITERATIONS = 600000
_os_alt_seps: t.List[str] = list(
sep for sep in [os.sep, os.path.altsep] if sep is not None and sep != "/"
|
{"golden_diff": "diff --git a/src/werkzeug/security.py b/src/werkzeug/security.py\n--- a/src/werkzeug/security.py\n+++ b/src/werkzeug/security.py\n@@ -9,7 +9,7 @@\n pass\n \n SALT_CHARS = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\"\n-DEFAULT_PBKDF2_ITERATIONS = 260000\n+DEFAULT_PBKDF2_ITERATIONS = 600000\n \n _os_alt_seps: t.List[str] = list(\n sep for sep in [os.sep, os.path.altsep] if sep is not None and sep != \"/\"\n", "issue": "Increase default work factor for PBKDF2 to 600,000 iterations\nhttps://github.com/pallets/werkzeug/blob/bba2109e276ab876b8d64e2b967412cb6da1d865/src/werkzeug/security.py#L12\r\n\r\nThe value has not been updated for a few years. I propose increasing it to the same number as the next release of Django will use.\r\n\r\nSee https://github.com/django/django/pull/16521 and https://cheatsheetseries.owasp.org/cheatsheets/Password_Storage_Cheat_Sheet.html#pbkdf2.\n", "before_files": [{"content": "import hashlib\nimport hmac\nimport os\nimport posixpath\nimport secrets\nimport typing as t\n\nif t.TYPE_CHECKING:\n pass\n\nSALT_CHARS = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\"\nDEFAULT_PBKDF2_ITERATIONS = 260000\n\n_os_alt_seps: t.List[str] = list(\n sep for sep in [os.sep, os.path.altsep] if sep is not None and sep != \"/\"\n)\n\n\ndef gen_salt(length: int) -> str:\n \"\"\"Generate a random string of SALT_CHARS with specified ``length``.\"\"\"\n if length <= 0:\n raise ValueError(\"Salt length must be positive\")\n\n return \"\".join(secrets.choice(SALT_CHARS) for _ in range(length))\n\n\ndef _hash_internal(method: str, salt: str, password: str) -> t.Tuple[str, str]:\n \"\"\"Internal password hash helper. Supports plaintext without salt,\n unsalted and salted passwords. In case salted passwords are used\n hmac is used.\n \"\"\"\n if method == \"plain\":\n return password, method\n\n salt = salt.encode(\"utf-8\")\n password = password.encode(\"utf-8\")\n\n if method.startswith(\"pbkdf2:\"):\n if not salt:\n raise ValueError(\"Salt is required for PBKDF2\")\n\n args = method[7:].split(\":\")\n\n if len(args) not in (1, 2):\n raise ValueError(\"Invalid number of arguments for PBKDF2\")\n\n method = args.pop(0)\n iterations = int(args[0] or 0) if args else DEFAULT_PBKDF2_ITERATIONS\n return (\n hashlib.pbkdf2_hmac(method, password, salt, iterations).hex(),\n f\"pbkdf2:{method}:{iterations}\",\n )\n\n if salt:\n return hmac.new(salt, password, method).hexdigest(), method\n\n return hashlib.new(method, password).hexdigest(), method\n\n\ndef generate_password_hash(\n password: str, method: str = \"pbkdf2:sha256\", salt_length: int = 16\n) -> str:\n \"\"\"Hash a password with the given method and salt with a string of\n the given length. The format of the string returned includes the method\n that was used so that :func:`check_password_hash` can check the hash.\n\n The format for the hashed string looks like this::\n\n method$salt$hash\n\n This method can **not** generate unsalted passwords but it is possible\n to set param method='plain' in order to enforce plaintext passwords.\n If a salt is used, hmac is used internally to salt the password.\n\n If PBKDF2 is wanted it can be enabled by setting the method to\n ``pbkdf2:method:iterations`` where iterations is optional::\n\n pbkdf2:sha256:80000$salt$hash\n pbkdf2:sha256$salt$hash\n\n :param password: the password to hash.\n :param method: the hash method to use (one that hashlib supports). Can\n optionally be in the format ``pbkdf2:method:iterations``\n to enable PBKDF2.\n :param salt_length: the length of the salt in letters.\n \"\"\"\n salt = gen_salt(salt_length) if method != \"plain\" else \"\"\n h, actual_method = _hash_internal(method, salt, password)\n return f\"{actual_method}${salt}${h}\"\n\n\ndef check_password_hash(pwhash: str, password: str) -> bool:\n \"\"\"Check a password against a given salted and hashed password value.\n In order to support unsalted legacy passwords this method supports\n plain text passwords, md5 and sha1 hashes (both salted and unsalted).\n\n Returns `True` if the password matched, `False` otherwise.\n\n :param pwhash: a hashed string like returned by\n :func:`generate_password_hash`.\n :param password: the plaintext password to compare against the hash.\n \"\"\"\n if pwhash.count(\"$\") < 2:\n return False\n\n method, salt, hashval = pwhash.split(\"$\", 2)\n return hmac.compare_digest(_hash_internal(method, salt, password)[0], hashval)\n\n\ndef safe_join(directory: str, *pathnames: str) -> t.Optional[str]:\n \"\"\"Safely join zero or more untrusted path components to a base\n directory to avoid escaping the base directory.\n\n :param directory: The trusted base directory.\n :param pathnames: The untrusted path components relative to the\n base directory.\n :return: A safe path, otherwise ``None``.\n \"\"\"\n if not directory:\n # Ensure we end up with ./path if directory=\"\" is given,\n # otherwise the first untrusted part could become trusted.\n directory = \".\"\n\n parts = [directory]\n\n for filename in pathnames:\n if filename != \"\":\n filename = posixpath.normpath(filename)\n\n if (\n any(sep in filename for sep in _os_alt_seps)\n or os.path.isabs(filename)\n or filename == \"..\"\n or filename.startswith(\"../\")\n ):\n return None\n\n parts.append(filename)\n\n return posixpath.join(*parts)\n", "path": "src/werkzeug/security.py"}]}
| 2,170 | 137 |
gh_patches_debug_36322
|
rasdani/github-patches
|
git_diff
|
microsoft__ptvsd-1858
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Build failure on Windows when running setup_cython.py
```console
Traceback (most recent call last):
File "setup_cython.py", line 134, in <module>
build_extension("_pydevd_bundle", extension_name, target_pydevd_name, force_cython, extension_folder, True)
File "setup_cython.py", line 95, in build_extension
c_files,
UnboundLocalError: local variable 'c_files' referenced before assignment
```
</issue>
<code>
[start of src/ptvsd/_vendored/pydevd/setup_cython.py]
1 '''
2 A simpler setup version just to compile the speedup module.
3
4 It should be used as:
5
6 python setup_cython build_ext --inplace
7
8 Note: the .c file and other generated files are regenerated from
9 the .pyx file by running "python build_tools/build.py"
10 '''
11
12 import os
13 import sys
14 from setuptools import setup
15
16 os.chdir(os.path.dirname(os.path.abspath(__file__)))
17
18 IS_PY36_OR_GREATER = sys.version_info > (3, 6)
19
20
21 def process_args():
22 extension_folder = None
23 target_pydevd_name = None
24 target_frame_eval = None
25 force_cython = False
26
27 for i, arg in enumerate(sys.argv[:]):
28 if arg == '--build-lib':
29 extension_folder = sys.argv[i + 1]
30 # It shouldn't be removed from sys.argv (among with --build-temp) because they're passed further to setup()
31 if arg.startswith('--target-pyd-name='):
32 sys.argv.remove(arg)
33 target_pydevd_name = arg[len('--target-pyd-name='):]
34 if arg.startswith('--target-pyd-frame-eval='):
35 sys.argv.remove(arg)
36 target_frame_eval = arg[len('--target-pyd-frame-eval='):]
37 if arg == '--force-cython':
38 sys.argv.remove(arg)
39 force_cython = True
40
41 return extension_folder, target_pydevd_name, target_frame_eval, force_cython
42
43
44 def build_extension(dir_name, extension_name, target_pydevd_name, force_cython, extended=False, has_pxd=False):
45 pyx_file = os.path.join(os.path.dirname(__file__), dir_name, "%s.pyx" % (extension_name,))
46
47 if target_pydevd_name != extension_name:
48 # It MUST be there in this case!
49 # (otherwise we'll have unresolved externals because the .c file had another name initially).
50 import shutil
51
52 # We must force cython in this case (but only in this case -- for the regular setup in the user machine, we
53 # should always compile the .c file).
54 force_cython = True
55
56 new_pyx_file = os.path.join(os.path.dirname(__file__), dir_name, "%s.pyx" % (target_pydevd_name,))
57 new_c_file = os.path.join(os.path.dirname(__file__), dir_name, "%s.c" % (target_pydevd_name,))
58 shutil.copy(pyx_file, new_pyx_file)
59 pyx_file = new_pyx_file
60 if has_pxd:
61 pxd_file = os.path.join(os.path.dirname(__file__), dir_name, "%s.pxd" % (extension_name,))
62 new_pxd_file = os.path.join(os.path.dirname(__file__), dir_name, "%s.pxd" % (target_pydevd_name,))
63 shutil.copy(pxd_file, new_pxd_file)
64 assert os.path.exists(pyx_file)
65
66 try:
67 if force_cython:
68 from Cython.Build import cythonize # @UnusedImport
69 # Generate the .c files in cythonize (will not compile at this point).
70 cythonize([
71 "%s/%s.pyx" % (dir_name, target_pydevd_name,),
72 ])
73
74 # This is needed in CPython 3.8 to access PyInterpreterState.eval_frame.
75 # i.e.: we change #include "pystate.h" to also #include "internal/pycore_pystate.h"
76 # if compiling on Python 3.8.
77 c_files = [os.path.join(dir_name, "%s.c" % target_pydevd_name), ]
78 for c_file in c_files:
79 with open(c_file, 'r') as stream:
80 c_file_contents = stream.read()
81
82 c_file_contents = c_file_contents.replace('#include "pystate.h"', '''#include "pystate.h"
83 #if PY_VERSION_HEX >= 0x03080000
84 #include "internal/pycore_pystate.h"
85 #endif
86 ''')
87 c_file_contents = c_file_contents.replace('\r\n', '\n').replace('\r', '\n')
88
89 with open(c_file, 'w') as stream:
90 stream.write(c_file_contents)
91
92 # Always compile the .c (and not the .pyx) file (which we should keep up-to-date by running build_tools/build.py).
93 from distutils.extension import Extension
94 ext_modules = [Extension("%s%s.%s" % (dir_name, "_ext" if extended else "", target_pydevd_name,),
95 c_files,
96 # uncomment to generate pdbs for visual studio.
97 # extra_compile_args=["-Zi", "/Od"],
98 # extra_link_args=["-debug"],
99 )]
100
101 # This is needed in CPython 3.8 to be able to include internal/pycore_pystate.h
102 # (needed to set PyInterpreterState.eval_frame).
103 for module in ext_modules:
104 module.define_macros = [('Py_BUILD_CORE_MODULE', '1')]
105 setup(
106 name='Cythonize',
107 ext_modules=ext_modules
108 )
109 finally:
110 if target_pydevd_name != extension_name:
111 try:
112 os.remove(new_pyx_file)
113 except:
114 import traceback
115 traceback.print_exc()
116 try:
117 os.remove(new_c_file)
118 except:
119 import traceback
120 traceback.print_exc()
121 if has_pxd:
122 try:
123 os.remove(new_pxd_file)
124 except:
125 import traceback
126 traceback.print_exc()
127
128
129 extension_folder, target_pydevd_name, target_frame_eval, force_cython = process_args()
130
131 extension_name = "pydevd_cython"
132 if target_pydevd_name is None:
133 target_pydevd_name = extension_name
134 build_extension("_pydevd_bundle", extension_name, target_pydevd_name, force_cython, extension_folder, True)
135
136 if IS_PY36_OR_GREATER:
137 extension_name = "pydevd_frame_evaluator"
138 if target_frame_eval is None:
139 target_frame_eval = extension_name
140 build_extension("_pydevd_frame_eval", extension_name, target_frame_eval, force_cython, extension_folder, True)
141
142 if extension_folder:
143 os.chdir(extension_folder)
144 for folder in [file for file in os.listdir(extension_folder) if
145 file != 'build' and os.path.isdir(os.path.join(extension_folder, file))]:
146 file = os.path.join(folder, "__init__.py")
147 if not os.path.exists(file):
148 open(file, 'a').close()
149
[end of src/ptvsd/_vendored/pydevd/setup_cython.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/src/ptvsd/_vendored/pydevd/setup_cython.py b/src/ptvsd/_vendored/pydevd/setup_cython.py
--- a/src/ptvsd/_vendored/pydevd/setup_cython.py
+++ b/src/ptvsd/_vendored/pydevd/setup_cython.py
@@ -64,7 +64,13 @@
assert os.path.exists(pyx_file)
try:
+ c_files = [os.path.join(dir_name, "%s.c" % target_pydevd_name), ]
if force_cython:
+ for c_file in c_files:
+ try:
+ os.remove(c_file)
+ except:
+ pass
from Cython.Build import cythonize # @UnusedImport
# Generate the .c files in cythonize (will not compile at this point).
cythonize([
@@ -74,17 +80,23 @@
# This is needed in CPython 3.8 to access PyInterpreterState.eval_frame.
# i.e.: we change #include "pystate.h" to also #include "internal/pycore_pystate.h"
# if compiling on Python 3.8.
- c_files = [os.path.join(dir_name, "%s.c" % target_pydevd_name), ]
for c_file in c_files:
with open(c_file, 'r') as stream:
c_file_contents = stream.read()
- c_file_contents = c_file_contents.replace('#include "pystate.h"', '''#include "pystate.h"
+ if '#include "internal/pycore_pystate.h"' not in c_file_contents:
+ c_file_contents = c_file_contents.replace('#include "pystate.h"', '''#include "pystate.h"
#if PY_VERSION_HEX >= 0x03080000
#include "internal/pycore_pystate.h"
#endif
''')
+
+ # We want the same output on Windows and Linux.
c_file_contents = c_file_contents.replace('\r\n', '\n').replace('\r', '\n')
+ c_file_contents = c_file_contents.replace(r'_pydevd_frame_eval\\release_mem.h', '_pydevd_frame_eval/release_mem.h')
+ c_file_contents = c_file_contents.replace(r'_pydevd_frame_eval\\pydevd_frame_evaluator.pyx', '_pydevd_frame_eval/pydevd_frame_evaluator.pyx')
+ c_file_contents = c_file_contents.replace(r'_pydevd_bundle\\pydevd_cython.pxd', '_pydevd_bundle/pydevd_cython.pxd')
+ c_file_contents = c_file_contents.replace(r'_pydevd_bundle\\pydevd_cython.pyx', '_pydevd_bundle/pydevd_cython.pyx')
with open(c_file, 'w') as stream:
stream.write(c_file_contents)
|
{"golden_diff": "diff --git a/src/ptvsd/_vendored/pydevd/setup_cython.py b/src/ptvsd/_vendored/pydevd/setup_cython.py\n--- a/src/ptvsd/_vendored/pydevd/setup_cython.py\n+++ b/src/ptvsd/_vendored/pydevd/setup_cython.py\n@@ -64,7 +64,13 @@\n assert os.path.exists(pyx_file)\n \n try:\n+ c_files = [os.path.join(dir_name, \"%s.c\" % target_pydevd_name), ]\n if force_cython:\n+ for c_file in c_files:\n+ try:\n+ os.remove(c_file)\n+ except:\n+ pass\n from Cython.Build import cythonize # @UnusedImport\n # Generate the .c files in cythonize (will not compile at this point).\n cythonize([\n@@ -74,17 +80,23 @@\n # This is needed in CPython 3.8 to access PyInterpreterState.eval_frame.\n # i.e.: we change #include \"pystate.h\" to also #include \"internal/pycore_pystate.h\"\n # if compiling on Python 3.8.\n- c_files = [os.path.join(dir_name, \"%s.c\" % target_pydevd_name), ]\n for c_file in c_files:\n with open(c_file, 'r') as stream:\n c_file_contents = stream.read()\n \n- c_file_contents = c_file_contents.replace('#include \"pystate.h\"', '''#include \"pystate.h\"\n+ if '#include \"internal/pycore_pystate.h\"' not in c_file_contents:\n+ c_file_contents = c_file_contents.replace('#include \"pystate.h\"', '''#include \"pystate.h\"\n #if PY_VERSION_HEX >= 0x03080000\n #include \"internal/pycore_pystate.h\"\n #endif\n ''')\n+\n+ # We want the same output on Windows and Linux.\n c_file_contents = c_file_contents.replace('\\r\\n', '\\n').replace('\\r', '\\n')\n+ c_file_contents = c_file_contents.replace(r'_pydevd_frame_eval\\\\release_mem.h', '_pydevd_frame_eval/release_mem.h')\n+ c_file_contents = c_file_contents.replace(r'_pydevd_frame_eval\\\\pydevd_frame_evaluator.pyx', '_pydevd_frame_eval/pydevd_frame_evaluator.pyx')\n+ c_file_contents = c_file_contents.replace(r'_pydevd_bundle\\\\pydevd_cython.pxd', '_pydevd_bundle/pydevd_cython.pxd')\n+ c_file_contents = c_file_contents.replace(r'_pydevd_bundle\\\\pydevd_cython.pyx', '_pydevd_bundle/pydevd_cython.pyx')\n \n with open(c_file, 'w') as stream:\n stream.write(c_file_contents)\n", "issue": "Build failure on Windows when running setup_cython.py\n```console\r\nTraceback (most recent call last):\r\n\r\n File \"setup_cython.py\", line 134, in <module>\r\n\r\n\r\n build_extension(\"_pydevd_bundle\", extension_name, target_pydevd_name, force_cython, extension_folder, True)\r\n\r\n File \"setup_cython.py\", line 95, in build_extension\r\n\r\n c_files,\r\n\r\nUnboundLocalError: local variable 'c_files' referenced before assignment\r\n\r\n```\n", "before_files": [{"content": "'''\nA simpler setup version just to compile the speedup module.\n\nIt should be used as:\n\npython setup_cython build_ext --inplace\n\nNote: the .c file and other generated files are regenerated from\nthe .pyx file by running \"python build_tools/build.py\"\n'''\n\nimport os\nimport sys\nfrom setuptools import setup\n\nos.chdir(os.path.dirname(os.path.abspath(__file__)))\n\nIS_PY36_OR_GREATER = sys.version_info > (3, 6)\n\n\ndef process_args():\n extension_folder = None\n target_pydevd_name = None\n target_frame_eval = None\n force_cython = False\n\n for i, arg in enumerate(sys.argv[:]):\n if arg == '--build-lib':\n extension_folder = sys.argv[i + 1]\n # It shouldn't be removed from sys.argv (among with --build-temp) because they're passed further to setup()\n if arg.startswith('--target-pyd-name='):\n sys.argv.remove(arg)\n target_pydevd_name = arg[len('--target-pyd-name='):]\n if arg.startswith('--target-pyd-frame-eval='):\n sys.argv.remove(arg)\n target_frame_eval = arg[len('--target-pyd-frame-eval='):]\n if arg == '--force-cython':\n sys.argv.remove(arg)\n force_cython = True\n\n return extension_folder, target_pydevd_name, target_frame_eval, force_cython\n\n\ndef build_extension(dir_name, extension_name, target_pydevd_name, force_cython, extended=False, has_pxd=False):\n pyx_file = os.path.join(os.path.dirname(__file__), dir_name, \"%s.pyx\" % (extension_name,))\n\n if target_pydevd_name != extension_name:\n # It MUST be there in this case!\n # (otherwise we'll have unresolved externals because the .c file had another name initially).\n import shutil\n\n # We must force cython in this case (but only in this case -- for the regular setup in the user machine, we\n # should always compile the .c file).\n force_cython = True\n\n new_pyx_file = os.path.join(os.path.dirname(__file__), dir_name, \"%s.pyx\" % (target_pydevd_name,))\n new_c_file = os.path.join(os.path.dirname(__file__), dir_name, \"%s.c\" % (target_pydevd_name,))\n shutil.copy(pyx_file, new_pyx_file)\n pyx_file = new_pyx_file\n if has_pxd:\n pxd_file = os.path.join(os.path.dirname(__file__), dir_name, \"%s.pxd\" % (extension_name,))\n new_pxd_file = os.path.join(os.path.dirname(__file__), dir_name, \"%s.pxd\" % (target_pydevd_name,))\n shutil.copy(pxd_file, new_pxd_file)\n assert os.path.exists(pyx_file)\n\n try:\n if force_cython:\n from Cython.Build import cythonize # @UnusedImport\n # Generate the .c files in cythonize (will not compile at this point).\n cythonize([\n \"%s/%s.pyx\" % (dir_name, target_pydevd_name,),\n ])\n\n # This is needed in CPython 3.8 to access PyInterpreterState.eval_frame.\n # i.e.: we change #include \"pystate.h\" to also #include \"internal/pycore_pystate.h\"\n # if compiling on Python 3.8.\n c_files = [os.path.join(dir_name, \"%s.c\" % target_pydevd_name), ]\n for c_file in c_files:\n with open(c_file, 'r') as stream:\n c_file_contents = stream.read()\n\n c_file_contents = c_file_contents.replace('#include \"pystate.h\"', '''#include \"pystate.h\"\n#if PY_VERSION_HEX >= 0x03080000\n#include \"internal/pycore_pystate.h\"\n#endif\n''')\n c_file_contents = c_file_contents.replace('\\r\\n', '\\n').replace('\\r', '\\n')\n\n with open(c_file, 'w') as stream:\n stream.write(c_file_contents)\n\n # Always compile the .c (and not the .pyx) file (which we should keep up-to-date by running build_tools/build.py).\n from distutils.extension import Extension\n ext_modules = [Extension(\"%s%s.%s\" % (dir_name, \"_ext\" if extended else \"\", target_pydevd_name,),\n c_files,\n # uncomment to generate pdbs for visual studio.\n # extra_compile_args=[\"-Zi\", \"/Od\"],\n # extra_link_args=[\"-debug\"],\n )]\n\n # This is needed in CPython 3.8 to be able to include internal/pycore_pystate.h\n # (needed to set PyInterpreterState.eval_frame).\n for module in ext_modules:\n module.define_macros = [('Py_BUILD_CORE_MODULE', '1')]\n setup(\n name='Cythonize',\n ext_modules=ext_modules\n )\n finally:\n if target_pydevd_name != extension_name:\n try:\n os.remove(new_pyx_file)\n except:\n import traceback\n traceback.print_exc()\n try:\n os.remove(new_c_file)\n except:\n import traceback\n traceback.print_exc()\n if has_pxd:\n try:\n os.remove(new_pxd_file)\n except:\n import traceback\n traceback.print_exc()\n\n\nextension_folder, target_pydevd_name, target_frame_eval, force_cython = process_args()\n\nextension_name = \"pydevd_cython\"\nif target_pydevd_name is None:\n target_pydevd_name = extension_name\nbuild_extension(\"_pydevd_bundle\", extension_name, target_pydevd_name, force_cython, extension_folder, True)\n\nif IS_PY36_OR_GREATER:\n extension_name = \"pydevd_frame_evaluator\"\n if target_frame_eval is None:\n target_frame_eval = extension_name\n build_extension(\"_pydevd_frame_eval\", extension_name, target_frame_eval, force_cython, extension_folder, True)\n\nif extension_folder:\n os.chdir(extension_folder)\n for folder in [file for file in os.listdir(extension_folder) if\n file != 'build' and os.path.isdir(os.path.join(extension_folder, file))]:\n file = os.path.join(folder, \"__init__.py\")\n if not os.path.exists(file):\n open(file, 'a').close()\n", "path": "src/ptvsd/_vendored/pydevd/setup_cython.py"}]}
| 2,395 | 632 |
gh_patches_debug_9595
|
rasdani/github-patches
|
git_diff
|
mirumee__ariadne-824
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use `execute_sync` in `graphql_sync`
`graphql_sync` should use `execute_sync` instead of `execute`.
I run my code in a synchronous context, but a resolver was identified as asynchronous due to the fact that GraphQL core does a simple `hasattr(value, "__await__")` check instead of using `inspect.isawaitable`. This caused a false positive.
However, when using `graphql_sync` I expect that this check does not even happen. Consider using [`execute_sync`](https://github.com/graphql-python/graphql-core/blob/27768d3faac5bb8ebb2c6a151a7192b7fa498f2f/src/graphql/execution/execute.py#L1061) instead.
</issue>
<code>
[start of ariadne/graphql.py]
1 from asyncio import ensure_future
2 from inspect import isawaitable
3 from typing import (
4 Any,
5 AsyncGenerator,
6 Awaitable,
7 Collection,
8 List,
9 Optional,
10 Sequence,
11 Type,
12 cast,
13 )
14
15 from graphql import (
16 DocumentNode,
17 ExecutionContext,
18 ExecutionResult,
19 GraphQLError,
20 GraphQLSchema,
21 TypeInfo,
22 execute,
23 parse,
24 subscribe as _subscribe,
25 )
26 from graphql.execution import MiddlewareManager
27 from graphql.validation import specified_rules, validate
28 from graphql.validation.rules import ASTValidationRule
29
30 from .extensions import ExtensionManager
31 from .format_error import format_error
32 from .logger import log_error
33 from .types import (
34 ErrorFormatter,
35 ExtensionList,
36 GraphQLResult,
37 RootValue,
38 SubscriptionResult,
39 ValidationRules,
40 )
41 from .validation.introspection_disabled import IntrospectionDisabledRule
42
43
44 async def graphql(
45 schema: GraphQLSchema,
46 data: Any,
47 *,
48 context_value: Optional[Any] = None,
49 root_value: Optional[RootValue] = None,
50 debug: bool = False,
51 introspection: bool = True,
52 logger: Optional[str] = None,
53 validation_rules: Optional[ValidationRules] = None,
54 error_formatter: ErrorFormatter = format_error,
55 middleware: Optional[MiddlewareManager] = None,
56 extensions: ExtensionList = None,
57 **kwargs,
58 ) -> GraphQLResult:
59 extension_manager = ExtensionManager(extensions, context_value)
60
61 with extension_manager.request():
62 try:
63 validate_data(data)
64 query, variables, operation_name = (
65 data["query"],
66 data.get("variables"),
67 data.get("operationName"),
68 )
69
70 document = parse_query(query)
71
72 if callable(validation_rules):
73 validation_rules = cast(
74 Optional[Collection[Type[ASTValidationRule]]],
75 validation_rules(context_value, document, data),
76 )
77
78 validation_errors = validate_query(
79 schema, document, validation_rules, enable_introspection=introspection
80 )
81 if validation_errors:
82 return handle_graphql_errors(
83 validation_errors,
84 logger=logger,
85 error_formatter=error_formatter,
86 debug=debug,
87 extension_manager=extension_manager,
88 )
89
90 if callable(root_value):
91 root_value = root_value(context_value, document)
92 if isawaitable(root_value):
93 root_value = await root_value
94
95 result = execute(
96 schema,
97 document,
98 root_value=root_value,
99 context_value=context_value,
100 variable_values=variables,
101 operation_name=operation_name,
102 execution_context_class=ExecutionContext,
103 middleware=extension_manager.as_middleware_manager(middleware),
104 **kwargs,
105 )
106
107 if isawaitable(result):
108 result = await cast(Awaitable[ExecutionResult], result)
109 except GraphQLError as error:
110 return handle_graphql_errors(
111 [error],
112 logger=logger,
113 error_formatter=error_formatter,
114 debug=debug,
115 extension_manager=extension_manager,
116 )
117 else:
118 return handle_query_result(
119 result,
120 logger=logger,
121 error_formatter=error_formatter,
122 debug=debug,
123 extension_manager=extension_manager,
124 )
125
126
127 def graphql_sync(
128 schema: GraphQLSchema,
129 data: Any,
130 *,
131 context_value: Optional[Any] = None,
132 root_value: Optional[RootValue] = None,
133 debug: bool = False,
134 introspection: bool = True,
135 logger: Optional[str] = None,
136 validation_rules: Optional[ValidationRules] = None,
137 error_formatter: ErrorFormatter = format_error,
138 middleware: Optional[MiddlewareManager] = None,
139 extensions: ExtensionList = None,
140 **kwargs,
141 ) -> GraphQLResult:
142 extension_manager = ExtensionManager(extensions, context_value)
143
144 with extension_manager.request():
145 try:
146 validate_data(data)
147 query, variables, operation_name = (
148 data["query"],
149 data.get("variables"),
150 data.get("operationName"),
151 )
152
153 document = parse_query(query)
154
155 if callable(validation_rules):
156 validation_rules = cast(
157 Optional[Collection[Type[ASTValidationRule]]],
158 validation_rules(context_value, document, data),
159 )
160
161 validation_errors = validate_query(
162 schema, document, validation_rules, enable_introspection=introspection
163 )
164 if validation_errors:
165 return handle_graphql_errors(
166 validation_errors,
167 logger=logger,
168 error_formatter=error_formatter,
169 debug=debug,
170 extension_manager=extension_manager,
171 )
172
173 if callable(root_value):
174 root_value = root_value(context_value, document)
175 if isawaitable(root_value):
176 ensure_future(root_value).cancel()
177 raise RuntimeError(
178 "Root value resolver can't be asynchronous "
179 "in synchronous query executor."
180 )
181
182 result = execute(
183 schema,
184 document,
185 root_value=root_value,
186 context_value=context_value,
187 variable_values=variables,
188 operation_name=operation_name,
189 execution_context_class=ExecutionContext,
190 middleware=extension_manager.as_middleware_manager(middleware),
191 **kwargs,
192 )
193
194 if isawaitable(result):
195 ensure_future(cast(Awaitable[ExecutionResult], result)).cancel()
196 raise RuntimeError(
197 "GraphQL execution failed to complete synchronously."
198 )
199 except GraphQLError as error:
200 return handle_graphql_errors(
201 [error],
202 logger=logger,
203 error_formatter=error_formatter,
204 debug=debug,
205 extension_manager=extension_manager,
206 )
207 else:
208 return handle_query_result(
209 result,
210 logger=logger,
211 error_formatter=error_formatter,
212 debug=debug,
213 extension_manager=extension_manager,
214 )
215
216
217 async def subscribe(
218 schema: GraphQLSchema,
219 data: Any,
220 *,
221 context_value: Optional[Any] = None,
222 root_value: Optional[RootValue] = None,
223 debug: bool = False,
224 introspection: bool = True,
225 logger: Optional[str] = None,
226 validation_rules: Optional[ValidationRules] = None,
227 error_formatter: ErrorFormatter = format_error,
228 **kwargs,
229 ) -> SubscriptionResult:
230 try:
231 validate_data(data)
232 query, variables, operation_name = (
233 data["query"],
234 data.get("variables"),
235 data.get("operationName"),
236 )
237
238 document = parse_query(query)
239
240 if callable(validation_rules):
241 validation_rules = cast(
242 Optional[Collection[Type[ASTValidationRule]]],
243 validation_rules(context_value, document, data),
244 )
245
246 validation_errors = validate_query(
247 schema, document, validation_rules, enable_introspection=introspection
248 )
249 if validation_errors:
250 for error_ in validation_errors: # mypy issue #5080
251 log_error(error_, logger)
252 return (
253 False,
254 [error_formatter(error, debug) for error in validation_errors],
255 )
256
257 if callable(root_value):
258 root_value = root_value(context_value, document)
259 if isawaitable(root_value):
260 root_value = await root_value
261
262 result = await _subscribe(
263 schema,
264 document,
265 root_value=root_value,
266 context_value=context_value,
267 variable_values=variables,
268 operation_name=operation_name,
269 **kwargs,
270 )
271 except GraphQLError as error:
272 log_error(error, logger)
273 return False, [error_formatter(error, debug)]
274 else:
275 if isinstance(result, ExecutionResult):
276 errors = cast(List[GraphQLError], result.errors)
277 for error_ in errors: # mypy issue #5080
278 log_error(error_, logger)
279 return False, [error_formatter(error, debug) for error in errors]
280 return True, cast(AsyncGenerator, result)
281
282
283 def handle_query_result(
284 result, *, logger, error_formatter, debug, extension_manager=None
285 ) -> GraphQLResult:
286 response = {"data": result.data}
287 if result.errors:
288 for error in result.errors:
289 log_error(error, logger)
290 response["errors"] = [error_formatter(error, debug) for error in result.errors]
291
292 if extension_manager:
293 if result.errors:
294 extension_manager.has_errors(result.errors)
295 add_extensions_to_response(extension_manager, response)
296 return True, response
297
298
299 def handle_graphql_errors(
300 errors: Sequence[GraphQLError],
301 *,
302 logger,
303 error_formatter,
304 debug,
305 extension_manager=None,
306 ) -> GraphQLResult:
307 for error in errors:
308 log_error(error, logger)
309 response = {"errors": [error_formatter(error, debug) for error in errors]}
310 if extension_manager:
311 extension_manager.has_errors(errors)
312 add_extensions_to_response(extension_manager, response)
313 return False, response
314
315
316 def parse_query(query):
317 try:
318 return parse(query)
319 except GraphQLError as error:
320 raise error
321 except Exception as error:
322 raise GraphQLError(str(error), original_error=error) from error
323
324
325 def add_extensions_to_response(extension_manager: ExtensionManager, response: dict):
326 formatted_extensions = extension_manager.format()
327 if formatted_extensions:
328 if "extensions" in response:
329 response["extensions"].update(formatted_extensions)
330 else:
331 response["extensions"] = formatted_extensions
332
333
334 def validate_query(
335 schema: GraphQLSchema,
336 document_ast: DocumentNode,
337 rules: Optional[Collection[Type[ASTValidationRule]]] = None,
338 max_errors: Optional[int] = None,
339 type_info: Optional[TypeInfo] = None,
340 enable_introspection: bool = True,
341 ) -> List[GraphQLError]:
342 if not enable_introspection:
343 rules = (
344 tuple(rules) + (IntrospectionDisabledRule,)
345 if rules is not None
346 else (IntrospectionDisabledRule,)
347 )
348 if rules:
349 # run validation against rules from spec and custom rules
350 supplemented_rules = specified_rules + tuple(rules)
351 return validate(
352 schema,
353 document_ast,
354 rules=supplemented_rules,
355 max_errors=max_errors,
356 type_info=type_info,
357 )
358 # run validation using spec rules only
359 return validate(schema, document_ast, rules=specified_rules, type_info=type_info)
360
361
362 def validate_data(data: Optional[dict]) -> None:
363 if not isinstance(data, dict):
364 raise GraphQLError("Operation data should be a JSON object")
365 validate_query_body(data.get("query"))
366 validate_variables(data.get("variables"))
367 validate_operation_name(data.get("operationName"))
368
369
370 def validate_query_body(query) -> None:
371 if not query or not isinstance(query, str):
372 raise GraphQLError("The query must be a string.")
373
374
375 def validate_variables(variables) -> None:
376 if variables is not None and not isinstance(variables, dict):
377 raise GraphQLError("Query variables must be a null or an object.")
378
379
380 def validate_operation_name(operation_name) -> None:
381 if operation_name is not None and not isinstance(operation_name, str):
382 raise GraphQLError('"%s" is not a valid operation name.' % operation_name)
383
[end of ariadne/graphql.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/ariadne/graphql.py b/ariadne/graphql.py
--- a/ariadne/graphql.py
+++ b/ariadne/graphql.py
@@ -20,6 +20,7 @@
GraphQLSchema,
TypeInfo,
execute,
+ execute_sync,
parse,
subscribe as _subscribe,
)
@@ -179,7 +180,7 @@
"in synchronous query executor."
)
- result = execute(
+ result = execute_sync(
schema,
document,
root_value=root_value,
|
{"golden_diff": "diff --git a/ariadne/graphql.py b/ariadne/graphql.py\n--- a/ariadne/graphql.py\n+++ b/ariadne/graphql.py\n@@ -20,6 +20,7 @@\n GraphQLSchema,\n TypeInfo,\n execute,\n+ execute_sync,\n parse,\n subscribe as _subscribe,\n )\n@@ -179,7 +180,7 @@\n \"in synchronous query executor.\"\n )\n \n- result = execute(\n+ result = execute_sync(\n schema,\n document,\n root_value=root_value,\n", "issue": "Use `execute_sync` in `graphql_sync`\n`graphql_sync` should use `execute_sync` instead of `execute`.\r\n\r\nI run my code in a synchronous context, but a resolver was identified as asynchronous due to the fact that GraphQL core does a simple `hasattr(value, \"__await__\")` check instead of using `inspect.isawaitable`. This caused a false positive.\r\n\r\nHowever, when using `graphql_sync` I expect that this check does not even happen. Consider using [`execute_sync`](https://github.com/graphql-python/graphql-core/blob/27768d3faac5bb8ebb2c6a151a7192b7fa498f2f/src/graphql/execution/execute.py#L1061) instead.\n", "before_files": [{"content": "from asyncio import ensure_future\nfrom inspect import isawaitable\nfrom typing import (\n Any,\n AsyncGenerator,\n Awaitable,\n Collection,\n List,\n Optional,\n Sequence,\n Type,\n cast,\n)\n\nfrom graphql import (\n DocumentNode,\n ExecutionContext,\n ExecutionResult,\n GraphQLError,\n GraphQLSchema,\n TypeInfo,\n execute,\n parse,\n subscribe as _subscribe,\n)\nfrom graphql.execution import MiddlewareManager\nfrom graphql.validation import specified_rules, validate\nfrom graphql.validation.rules import ASTValidationRule\n\nfrom .extensions import ExtensionManager\nfrom .format_error import format_error\nfrom .logger import log_error\nfrom .types import (\n ErrorFormatter,\n ExtensionList,\n GraphQLResult,\n RootValue,\n SubscriptionResult,\n ValidationRules,\n)\nfrom .validation.introspection_disabled import IntrospectionDisabledRule\n\n\nasync def graphql(\n schema: GraphQLSchema,\n data: Any,\n *,\n context_value: Optional[Any] = None,\n root_value: Optional[RootValue] = None,\n debug: bool = False,\n introspection: bool = True,\n logger: Optional[str] = None,\n validation_rules: Optional[ValidationRules] = None,\n error_formatter: ErrorFormatter = format_error,\n middleware: Optional[MiddlewareManager] = None,\n extensions: ExtensionList = None,\n **kwargs,\n) -> GraphQLResult:\n extension_manager = ExtensionManager(extensions, context_value)\n\n with extension_manager.request():\n try:\n validate_data(data)\n query, variables, operation_name = (\n data[\"query\"],\n data.get(\"variables\"),\n data.get(\"operationName\"),\n )\n\n document = parse_query(query)\n\n if callable(validation_rules):\n validation_rules = cast(\n Optional[Collection[Type[ASTValidationRule]]],\n validation_rules(context_value, document, data),\n )\n\n validation_errors = validate_query(\n schema, document, validation_rules, enable_introspection=introspection\n )\n if validation_errors:\n return handle_graphql_errors(\n validation_errors,\n logger=logger,\n error_formatter=error_formatter,\n debug=debug,\n extension_manager=extension_manager,\n )\n\n if callable(root_value):\n root_value = root_value(context_value, document)\n if isawaitable(root_value):\n root_value = await root_value\n\n result = execute(\n schema,\n document,\n root_value=root_value,\n context_value=context_value,\n variable_values=variables,\n operation_name=operation_name,\n execution_context_class=ExecutionContext,\n middleware=extension_manager.as_middleware_manager(middleware),\n **kwargs,\n )\n\n if isawaitable(result):\n result = await cast(Awaitable[ExecutionResult], result)\n except GraphQLError as error:\n return handle_graphql_errors(\n [error],\n logger=logger,\n error_formatter=error_formatter,\n debug=debug,\n extension_manager=extension_manager,\n )\n else:\n return handle_query_result(\n result,\n logger=logger,\n error_formatter=error_formatter,\n debug=debug,\n extension_manager=extension_manager,\n )\n\n\ndef graphql_sync(\n schema: GraphQLSchema,\n data: Any,\n *,\n context_value: Optional[Any] = None,\n root_value: Optional[RootValue] = None,\n debug: bool = False,\n introspection: bool = True,\n logger: Optional[str] = None,\n validation_rules: Optional[ValidationRules] = None,\n error_formatter: ErrorFormatter = format_error,\n middleware: Optional[MiddlewareManager] = None,\n extensions: ExtensionList = None,\n **kwargs,\n) -> GraphQLResult:\n extension_manager = ExtensionManager(extensions, context_value)\n\n with extension_manager.request():\n try:\n validate_data(data)\n query, variables, operation_name = (\n data[\"query\"],\n data.get(\"variables\"),\n data.get(\"operationName\"),\n )\n\n document = parse_query(query)\n\n if callable(validation_rules):\n validation_rules = cast(\n Optional[Collection[Type[ASTValidationRule]]],\n validation_rules(context_value, document, data),\n )\n\n validation_errors = validate_query(\n schema, document, validation_rules, enable_introspection=introspection\n )\n if validation_errors:\n return handle_graphql_errors(\n validation_errors,\n logger=logger,\n error_formatter=error_formatter,\n debug=debug,\n extension_manager=extension_manager,\n )\n\n if callable(root_value):\n root_value = root_value(context_value, document)\n if isawaitable(root_value):\n ensure_future(root_value).cancel()\n raise RuntimeError(\n \"Root value resolver can't be asynchronous \"\n \"in synchronous query executor.\"\n )\n\n result = execute(\n schema,\n document,\n root_value=root_value,\n context_value=context_value,\n variable_values=variables,\n operation_name=operation_name,\n execution_context_class=ExecutionContext,\n middleware=extension_manager.as_middleware_manager(middleware),\n **kwargs,\n )\n\n if isawaitable(result):\n ensure_future(cast(Awaitable[ExecutionResult], result)).cancel()\n raise RuntimeError(\n \"GraphQL execution failed to complete synchronously.\"\n )\n except GraphQLError as error:\n return handle_graphql_errors(\n [error],\n logger=logger,\n error_formatter=error_formatter,\n debug=debug,\n extension_manager=extension_manager,\n )\n else:\n return handle_query_result(\n result,\n logger=logger,\n error_formatter=error_formatter,\n debug=debug,\n extension_manager=extension_manager,\n )\n\n\nasync def subscribe(\n schema: GraphQLSchema,\n data: Any,\n *,\n context_value: Optional[Any] = None,\n root_value: Optional[RootValue] = None,\n debug: bool = False,\n introspection: bool = True,\n logger: Optional[str] = None,\n validation_rules: Optional[ValidationRules] = None,\n error_formatter: ErrorFormatter = format_error,\n **kwargs,\n) -> SubscriptionResult:\n try:\n validate_data(data)\n query, variables, operation_name = (\n data[\"query\"],\n data.get(\"variables\"),\n data.get(\"operationName\"),\n )\n\n document = parse_query(query)\n\n if callable(validation_rules):\n validation_rules = cast(\n Optional[Collection[Type[ASTValidationRule]]],\n validation_rules(context_value, document, data),\n )\n\n validation_errors = validate_query(\n schema, document, validation_rules, enable_introspection=introspection\n )\n if validation_errors:\n for error_ in validation_errors: # mypy issue #5080\n log_error(error_, logger)\n return (\n False,\n [error_formatter(error, debug) for error in validation_errors],\n )\n\n if callable(root_value):\n root_value = root_value(context_value, document)\n if isawaitable(root_value):\n root_value = await root_value\n\n result = await _subscribe(\n schema,\n document,\n root_value=root_value,\n context_value=context_value,\n variable_values=variables,\n operation_name=operation_name,\n **kwargs,\n )\n except GraphQLError as error:\n log_error(error, logger)\n return False, [error_formatter(error, debug)]\n else:\n if isinstance(result, ExecutionResult):\n errors = cast(List[GraphQLError], result.errors)\n for error_ in errors: # mypy issue #5080\n log_error(error_, logger)\n return False, [error_formatter(error, debug) for error in errors]\n return True, cast(AsyncGenerator, result)\n\n\ndef handle_query_result(\n result, *, logger, error_formatter, debug, extension_manager=None\n) -> GraphQLResult:\n response = {\"data\": result.data}\n if result.errors:\n for error in result.errors:\n log_error(error, logger)\n response[\"errors\"] = [error_formatter(error, debug) for error in result.errors]\n\n if extension_manager:\n if result.errors:\n extension_manager.has_errors(result.errors)\n add_extensions_to_response(extension_manager, response)\n return True, response\n\n\ndef handle_graphql_errors(\n errors: Sequence[GraphQLError],\n *,\n logger,\n error_formatter,\n debug,\n extension_manager=None,\n) -> GraphQLResult:\n for error in errors:\n log_error(error, logger)\n response = {\"errors\": [error_formatter(error, debug) for error in errors]}\n if extension_manager:\n extension_manager.has_errors(errors)\n add_extensions_to_response(extension_manager, response)\n return False, response\n\n\ndef parse_query(query):\n try:\n return parse(query)\n except GraphQLError as error:\n raise error\n except Exception as error:\n raise GraphQLError(str(error), original_error=error) from error\n\n\ndef add_extensions_to_response(extension_manager: ExtensionManager, response: dict):\n formatted_extensions = extension_manager.format()\n if formatted_extensions:\n if \"extensions\" in response:\n response[\"extensions\"].update(formatted_extensions)\n else:\n response[\"extensions\"] = formatted_extensions\n\n\ndef validate_query(\n schema: GraphQLSchema,\n document_ast: DocumentNode,\n rules: Optional[Collection[Type[ASTValidationRule]]] = None,\n max_errors: Optional[int] = None,\n type_info: Optional[TypeInfo] = None,\n enable_introspection: bool = True,\n) -> List[GraphQLError]:\n if not enable_introspection:\n rules = (\n tuple(rules) + (IntrospectionDisabledRule,)\n if rules is not None\n else (IntrospectionDisabledRule,)\n )\n if rules:\n # run validation against rules from spec and custom rules\n supplemented_rules = specified_rules + tuple(rules)\n return validate(\n schema,\n document_ast,\n rules=supplemented_rules,\n max_errors=max_errors,\n type_info=type_info,\n )\n # run validation using spec rules only\n return validate(schema, document_ast, rules=specified_rules, type_info=type_info)\n\n\ndef validate_data(data: Optional[dict]) -> None:\n if not isinstance(data, dict):\n raise GraphQLError(\"Operation data should be a JSON object\")\n validate_query_body(data.get(\"query\"))\n validate_variables(data.get(\"variables\"))\n validate_operation_name(data.get(\"operationName\"))\n\n\ndef validate_query_body(query) -> None:\n if not query or not isinstance(query, str):\n raise GraphQLError(\"The query must be a string.\")\n\n\ndef validate_variables(variables) -> None:\n if variables is not None and not isinstance(variables, dict):\n raise GraphQLError(\"Query variables must be a null or an object.\")\n\n\ndef validate_operation_name(operation_name) -> None:\n if operation_name is not None and not isinstance(operation_name, str):\n raise GraphQLError('\"%s\" is not a valid operation name.' % operation_name)\n", "path": "ariadne/graphql.py"}]}
| 4,073 | 122 |
gh_patches_debug_64988
|
rasdani/github-patches
|
git_diff
|
mampfes__hacs_waste_collection_schedule-911
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
(Berliner Stadtreinigung) BSR not working
Hi there, since about a day or two BSR integration isn't pulling data into HA.
Any idea what might be wrong? Just FYI the data from abfall.io (ALBA Berlin) is working just fine.
Sorry for not posting code / log, but I'm unsure, what I should be posting.
</issue>
<code>
[start of custom_components/waste_collection_schedule/waste_collection_schedule/source/bsr_de.py]
1 import urllib.parse
2
3 import requests
4 from waste_collection_schedule import Collection # type: ignore[attr-defined]
5 from waste_collection_schedule.service.ICS import ICS
6
7 TITLE = "Berliner Stadtreinigungsbetriebe"
8 DESCRIPTION = "Source for Berliner Stadtreinigungsbetriebe waste collection."
9 URL = "https://bsr.de"
10 TEST_CASES = {
11 "Bahnhofstr., 12159 Berlin (Tempelhof-Schöneberg)": {
12 "abf_strasse": "Bahnhofstr., 12159 Berlin (Tempelhof-Schöneberg)",
13 "abf_hausnr": 1,
14 },
15 "Am Ried, 13467 Berlin (Reinickendorf)": {
16 "abf_strasse": "Am Ried, 13467 Berlin (Reinickendorf)",
17 "abf_hausnr": "11G",
18 },
19 }
20
21
22 def myquote(s):
23 # bsr uses strange quoting
24 return urllib.parse.quote(s, safe=",()")
25
26
27 class Source:
28 def __init__(self, abf_strasse, abf_hausnr):
29 self._abf_strasse = abf_strasse
30 self._abf_hausnr = abf_hausnr
31 self._ics = ICS()
32
33 def fetch(self):
34 # get cookie
35 r = requests.get("https://www.bsr.de/abfuhrkalender-20520.php")
36 cookies = r.cookies
37
38 # get street name only (without PLZ)
39 street = self._abf_strasse.split(",")[0]
40
41 # start search using string name (without PLZ)
42 args = {"script": "dynamic_search", "step": 1, "q": street}
43 r = requests.get(
44 "https://www.bsr.de/abfuhrkalender_ajax.php", params=args, cookies=cookies
45 )
46
47 # retrieve house number list
48 args = {"script": "dynamic_search", "step": 2, "q": self._abf_strasse}
49 r = requests.get(
50 "https://www.bsr.de/abfuhrkalender_ajax.php", params=args, cookies=cookies
51 )
52
53 args = {
54 "abf_strasse": street,
55 "abf_hausnr": self._abf_hausnr,
56 "tab_control": "Jahr",
57 "abf_config_weihnachtsbaeume": "",
58 "abf_config_restmuell": "on",
59 "abf_config_biogut": "on",
60 "abf_config_wertstoffe": "on",
61 "abf_config_laubtonne": "on",
62 # "abf_selectmonth": "5 2020",
63 # "abf_datepicker": "28.04.2020",
64 # "listitems":7,
65 }
66 r = requests.post(
67 "https://www.bsr.de/abfuhrkalender_ajax.php?script=dynamic_kalender_ajax",
68 data=args,
69 cookies=cookies,
70 )
71
72 args = {
73 "script": "dynamic_iCal_ajax",
74 "abf_strasse": self._abf_strasse,
75 "abf_hausnr": self._abf_hausnr,
76 "tab_control": "Jahr",
77 "abf_config_weihnachtsbaeume": "",
78 "abf_config_restmuell": "on",
79 "abf_config_biogut": "on",
80 "abf_config_wertstoffe": "on",
81 "abf_config_laubtonne": "on",
82 # "abf_selectmonth": "5 2020",
83 # "listitems":7,
84 }
85
86 # create url using private url encoding
87 encoded = map(lambda key: f"{key}={myquote(str(args[key]))}", args.keys())
88 url = "https://www.bsr.de/abfuhrkalender_ajax.php?" + "&".join(encoded)
89 r = requests.get(url, cookies=cookies)
90
91 # parse ics file
92 dates = self._ics.convert(r.text)
93
94 entries = []
95 for d in dates:
96 entries.append(Collection(d[0], d[1]))
97 return entries
98
[end of custom_components/waste_collection_schedule/waste_collection_schedule/source/bsr_de.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/bsr_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/bsr_de.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/bsr_de.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/bsr_de.py
@@ -28,7 +28,7 @@
def __init__(self, abf_strasse, abf_hausnr):
self._abf_strasse = abf_strasse
self._abf_hausnr = abf_hausnr
- self._ics = ICS()
+ self._ics = ICS(offset=1)
def fetch(self):
# get cookie
|
{"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/bsr_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/bsr_de.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/bsr_de.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/bsr_de.py\n@@ -28,7 +28,7 @@\n def __init__(self, abf_strasse, abf_hausnr):\n self._abf_strasse = abf_strasse\n self._abf_hausnr = abf_hausnr\n- self._ics = ICS()\n+ self._ics = ICS(offset=1)\n \n def fetch(self):\n # get cookie\n", "issue": "(Berliner Stadtreinigung) BSR not working\nHi there, since about a day or two BSR integration isn't pulling data into HA. \r\nAny idea what might be wrong? Just FYI the data from abfall.io (ALBA Berlin) is working just fine. \r\n\r\nSorry for not posting code / log, but I'm unsure, what I should be posting. \n", "before_files": [{"content": "import urllib.parse\n\nimport requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\nfrom waste_collection_schedule.service.ICS import ICS\n\nTITLE = \"Berliner Stadtreinigungsbetriebe\"\nDESCRIPTION = \"Source for Berliner Stadtreinigungsbetriebe waste collection.\"\nURL = \"https://bsr.de\"\nTEST_CASES = {\n \"Bahnhofstr., 12159 Berlin (Tempelhof-Sch\u00f6neberg)\": {\n \"abf_strasse\": \"Bahnhofstr., 12159 Berlin (Tempelhof-Sch\u00f6neberg)\",\n \"abf_hausnr\": 1,\n },\n \"Am Ried, 13467 Berlin (Reinickendorf)\": {\n \"abf_strasse\": \"Am Ried, 13467 Berlin (Reinickendorf)\",\n \"abf_hausnr\": \"11G\",\n },\n}\n\n\ndef myquote(s):\n # bsr uses strange quoting\n return urllib.parse.quote(s, safe=\",()\")\n\n\nclass Source:\n def __init__(self, abf_strasse, abf_hausnr):\n self._abf_strasse = abf_strasse\n self._abf_hausnr = abf_hausnr\n self._ics = ICS()\n\n def fetch(self):\n # get cookie\n r = requests.get(\"https://www.bsr.de/abfuhrkalender-20520.php\")\n cookies = r.cookies\n\n # get street name only (without PLZ)\n street = self._abf_strasse.split(\",\")[0]\n\n # start search using string name (without PLZ)\n args = {\"script\": \"dynamic_search\", \"step\": 1, \"q\": street}\n r = requests.get(\n \"https://www.bsr.de/abfuhrkalender_ajax.php\", params=args, cookies=cookies\n )\n\n # retrieve house number list\n args = {\"script\": \"dynamic_search\", \"step\": 2, \"q\": self._abf_strasse}\n r = requests.get(\n \"https://www.bsr.de/abfuhrkalender_ajax.php\", params=args, cookies=cookies\n )\n\n args = {\n \"abf_strasse\": street,\n \"abf_hausnr\": self._abf_hausnr,\n \"tab_control\": \"Jahr\",\n \"abf_config_weihnachtsbaeume\": \"\",\n \"abf_config_restmuell\": \"on\",\n \"abf_config_biogut\": \"on\",\n \"abf_config_wertstoffe\": \"on\",\n \"abf_config_laubtonne\": \"on\",\n # \"abf_selectmonth\": \"5 2020\",\n # \"abf_datepicker\": \"28.04.2020\",\n # \"listitems\":7,\n }\n r = requests.post(\n \"https://www.bsr.de/abfuhrkalender_ajax.php?script=dynamic_kalender_ajax\",\n data=args,\n cookies=cookies,\n )\n\n args = {\n \"script\": \"dynamic_iCal_ajax\",\n \"abf_strasse\": self._abf_strasse,\n \"abf_hausnr\": self._abf_hausnr,\n \"tab_control\": \"Jahr\",\n \"abf_config_weihnachtsbaeume\": \"\",\n \"abf_config_restmuell\": \"on\",\n \"abf_config_biogut\": \"on\",\n \"abf_config_wertstoffe\": \"on\",\n \"abf_config_laubtonne\": \"on\",\n # \"abf_selectmonth\": \"5 2020\",\n # \"listitems\":7,\n }\n\n # create url using private url encoding\n encoded = map(lambda key: f\"{key}={myquote(str(args[key]))}\", args.keys())\n url = \"https://www.bsr.de/abfuhrkalender_ajax.php?\" + \"&\".join(encoded)\n r = requests.get(url, cookies=cookies)\n\n # parse ics file\n dates = self._ics.convert(r.text)\n\n entries = []\n for d in dates:\n entries.append(Collection(d[0], d[1]))\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/bsr_de.py"}]}
| 1,767 | 166 |
gh_patches_debug_24298
|
rasdani/github-patches
|
git_diff
|
airctic__icevision-71
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Hub Detr fine tuning
Following the first relase of Detr on hub, it would be a very good idea to support fine tuning.
[this](https://github.com/facebookresearch/detr/issues/9) thread should be helpful, and [this](https://gist.github.com/mlk1337/651297e28199b4bb7907fc413c49f58f) gist has the high level overview on how to implement it.
</issue>
<code>
[start of examples/detr_wheat.py]
1 import pandas as pd
2 from mantisshrimp.imports import *
3 from mantisshrimp.hub.detr import *
4
5
6 class WheatParser(DetrBBoxParser):
7 def __init__(self, df, source):
8 self.df = df
9 self.source = source
10 self.imageid_map = IDMap()
11
12 def __iter__(self):
13 yield from self.df.itertuples()
14
15 def __len__(self):
16 return len(self.df)
17
18 def prepare(self, o):
19 self.bbox = BBox.from_xywh(*np.fromstring(o.bbox[1:-1], sep=","))
20
21 def imageid(self, o) -> int:
22 return self.imageid_map[o.image_id]
23
24 def filepath(self, o) -> Union[str, Path]:
25 return self.source / f"{o.image_id}.jpg"
26
27 def height(self, o) -> int:
28 return o.height
29
30 def width(self, o) -> int:
31 return o.width
32
33 def label(self, o) -> int:
34 return 1
35
36 def bbox(self, o) -> BBox:
37 return self.bbox
38
39 def area(self, o) -> float:
40 return self.bbox.area
41
42 def iscrowd(self, o) -> bool:
43 return 0
44
45
46 def get_datasets(args):
47 # parse records
48 source = Path(args.data_path)
49 df = pd.read_csv(source / "train.csv")
50 data_splitter = RandomSplitter([0.8, 0.2])
51 parser = WheatParser(df, source / "train")
52 train_rs, valid_rs = parser.parse(data_splitter)
53 # We use the transforms defined by the authors
54 train_tfm = detr_transform("train")
55 valid_tfm = detr_transform("val")
56 train_dataset = DetrDataset(train_rs, train_tfm)
57 valid_dataset = DetrDataset(valid_rs, valid_tfm)
58 return train_dataset, valid_dataset
59
60
61 if __name__ == "__main__":
62 # adds new arguments to original args_parser
63 args_parser = get_args_parser()
64 args_parser.add_argument("--data_path", type=str)
65 args = args_parser.parse_args()
66
67 train_dataset, valid_dataset = get_datasets(args)
68 run_detr(args=args, dataset_train=train_dataset, dataset_val=valid_dataset)
69
[end of examples/detr_wheat.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/examples/detr_wheat.py b/examples/detr_wheat.py
--- a/examples/detr_wheat.py
+++ b/examples/detr_wheat.py
@@ -30,17 +30,17 @@
def width(self, o) -> int:
return o.width
- def label(self, o) -> int:
- return 1
+ def label(self, o) -> List[int]:
+ return [1]
- def bbox(self, o) -> BBox:
- return self.bbox
+ def bbox(self, o) -> List[BBox]:
+ return [self.bbox]
- def area(self, o) -> float:
- return self.bbox.area
+ def area(self, o) -> List[float]:
+ return [self.bbox.area]
- def iscrowd(self, o) -> bool:
- return 0
+ def iscrowd(self, o) -> List[bool]:
+ return [0]
def get_datasets(args):
@@ -62,7 +62,12 @@
# adds new arguments to original args_parser
args_parser = get_args_parser()
args_parser.add_argument("--data_path", type=str)
+ args_parser.add_argument("--num_classes", type=int, default=None)
+ args_parser.add_argument("--fine_tune", action="store_true")
args = args_parser.parse_args()
+ if args.fine_tune:
+ args.resume = detr_pretrained_checkpoint_base()
+
train_dataset, valid_dataset = get_datasets(args)
run_detr(args=args, dataset_train=train_dataset, dataset_val=valid_dataset)
|
{"golden_diff": "diff --git a/examples/detr_wheat.py b/examples/detr_wheat.py\n--- a/examples/detr_wheat.py\n+++ b/examples/detr_wheat.py\n@@ -30,17 +30,17 @@\n def width(self, o) -> int:\n return o.width\n \n- def label(self, o) -> int:\n- return 1\n+ def label(self, o) -> List[int]:\n+ return [1]\n \n- def bbox(self, o) -> BBox:\n- return self.bbox\n+ def bbox(self, o) -> List[BBox]:\n+ return [self.bbox]\n \n- def area(self, o) -> float:\n- return self.bbox.area\n+ def area(self, o) -> List[float]:\n+ return [self.bbox.area]\n \n- def iscrowd(self, o) -> bool:\n- return 0\n+ def iscrowd(self, o) -> List[bool]:\n+ return [0]\n \n \n def get_datasets(args):\n@@ -62,7 +62,12 @@\n # adds new arguments to original args_parser\n args_parser = get_args_parser()\n args_parser.add_argument(\"--data_path\", type=str)\n+ args_parser.add_argument(\"--num_classes\", type=int, default=None)\n+ args_parser.add_argument(\"--fine_tune\", action=\"store_true\")\n args = args_parser.parse_args()\n \n+ if args.fine_tune:\n+ args.resume = detr_pretrained_checkpoint_base()\n+\n train_dataset, valid_dataset = get_datasets(args)\n run_detr(args=args, dataset_train=train_dataset, dataset_val=valid_dataset)\n", "issue": "Hub Detr fine tuning\nFollowing the first relase of Detr on hub, it would be a very good idea to support fine tuning.\r\n\r\n[this](https://github.com/facebookresearch/detr/issues/9) thread should be helpful, and [this](https://gist.github.com/mlk1337/651297e28199b4bb7907fc413c49f58f) gist has the high level overview on how to implement it.\n", "before_files": [{"content": "import pandas as pd\nfrom mantisshrimp.imports import *\nfrom mantisshrimp.hub.detr import *\n\n\nclass WheatParser(DetrBBoxParser):\n def __init__(self, df, source):\n self.df = df\n self.source = source\n self.imageid_map = IDMap()\n\n def __iter__(self):\n yield from self.df.itertuples()\n\n def __len__(self):\n return len(self.df)\n\n def prepare(self, o):\n self.bbox = BBox.from_xywh(*np.fromstring(o.bbox[1:-1], sep=\",\"))\n\n def imageid(self, o) -> int:\n return self.imageid_map[o.image_id]\n\n def filepath(self, o) -> Union[str, Path]:\n return self.source / f\"{o.image_id}.jpg\"\n\n def height(self, o) -> int:\n return o.height\n\n def width(self, o) -> int:\n return o.width\n\n def label(self, o) -> int:\n return 1\n\n def bbox(self, o) -> BBox:\n return self.bbox\n\n def area(self, o) -> float:\n return self.bbox.area\n\n def iscrowd(self, o) -> bool:\n return 0\n\n\ndef get_datasets(args):\n # parse records\n source = Path(args.data_path)\n df = pd.read_csv(source / \"train.csv\")\n data_splitter = RandomSplitter([0.8, 0.2])\n parser = WheatParser(df, source / \"train\")\n train_rs, valid_rs = parser.parse(data_splitter)\n # We use the transforms defined by the authors\n train_tfm = detr_transform(\"train\")\n valid_tfm = detr_transform(\"val\")\n train_dataset = DetrDataset(train_rs, train_tfm)\n valid_dataset = DetrDataset(valid_rs, valid_tfm)\n return train_dataset, valid_dataset\n\n\nif __name__ == \"__main__\":\n # adds new arguments to original args_parser\n args_parser = get_args_parser()\n args_parser.add_argument(\"--data_path\", type=str)\n args = args_parser.parse_args()\n\n train_dataset, valid_dataset = get_datasets(args)\n run_detr(args=args, dataset_train=train_dataset, dataset_val=valid_dataset)\n", "path": "examples/detr_wheat.py"}]}
| 1,276 | 363 |
gh_patches_debug_4773
|
rasdani/github-patches
|
git_diff
|
mozilla__bugbug-476
|
You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
defect_enhancement_task Taskcluster task is missing the artifact
From the task () logs:
```
[taskcluster 2019-05-21 08:37:51.436Z] === Task Finished ===
[taskcluster 2019-05-21 08:37:51.519Z] Artifact "public/defectenhancementtaskmodel.xz" not found at "/defectenhancementtaskmodel.xz"
[taskcluster 2019-05-21 08:37:51.927Z] Successful task run with exit code: 0 completed in 471.275 seconds
```
</issue>
<code>
[start of scripts/trainer.py]
1 # -*- coding: utf-8 -*-
2
3 import argparse
4 import lzma
5 import os
6 import shutil
7 from logging import INFO, basicConfig, getLogger
8 from urllib.request import urlretrieve
9
10 from bugbug.models import get_model_class
11
12 basicConfig(level=INFO)
13 logger = getLogger(__name__)
14
15 BASE_URL = "https://index.taskcluster.net/v1/task/project.relman.bugbug.data_{}.latest/artifacts/public"
16
17
18 class Trainer(object):
19 def decompress_file(self, path):
20 with lzma.open(f"{path}.xz", "rb") as input_f:
21 with open(path, "wb") as output_f:
22 shutil.copyfileobj(input_f, output_f)
23
24 def compress_file(self, path):
25 with open(path, "rb") as input_f:
26 with lzma.open(f"{path}.xz", "wb") as output_f:
27 shutil.copyfileobj(input_f, output_f)
28
29 def go(self, model_name):
30 # Download datasets that were built by bugbug_data.
31 os.makedirs("data", exist_ok=True)
32
33 # Bugs.json
34 logger.info("Downloading bugs database")
35 bugs_url = BASE_URL.format("bugs")
36 urlretrieve(f"{bugs_url}/bugs.json.xz", "data/bugs.json.xz")
37 logger.info("Decompressing bugs database")
38 self.decompress_file("data/bugs.json")
39
40 logger.info(f"Training *{model_name}* model")
41
42 model_class = get_model_class(model_name)
43 model = model_class()
44 model.train()
45
46 model_file_name = f"{model_name}model"
47 self.compress_file(model_file_name)
48
49
50 def main():
51 description = "Train the models"
52 parser = argparse.ArgumentParser(description=description)
53
54 parser.add_argument("model", help="Which model to train.")
55
56 args = parser.parse_args()
57
58 retriever = Trainer()
59 retriever.go(args.model)
60
[end of scripts/trainer.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch>
|
diff --git a/scripts/trainer.py b/scripts/trainer.py
--- a/scripts/trainer.py
+++ b/scripts/trainer.py
@@ -43,9 +43,14 @@
model = model_class()
model.train()
+ logger.info(f"Training done")
+
model_file_name = f"{model_name}model"
+ assert os.path.exists(model_file_name)
self.compress_file(model_file_name)
+ logger.info(f"Model compressed")
+
def main():
description = "Train the models"
|
{"golden_diff": "diff --git a/scripts/trainer.py b/scripts/trainer.py\n--- a/scripts/trainer.py\n+++ b/scripts/trainer.py\n@@ -43,9 +43,14 @@\n model = model_class()\n model.train()\n \n+ logger.info(f\"Training done\")\n+\n model_file_name = f\"{model_name}model\"\n+ assert os.path.exists(model_file_name)\n self.compress_file(model_file_name)\n \n+ logger.info(f\"Model compressed\")\n+\n \n def main():\n description = \"Train the models\"\n", "issue": "defect_enhancement_task Taskcluster task is missing the artifact\nFrom the task () logs:\r\n```\r\n[taskcluster 2019-05-21 08:37:51.436Z] === Task Finished ===\r\n[taskcluster 2019-05-21 08:37:51.519Z] Artifact \"public/defectenhancementtaskmodel.xz\" not found at \"/defectenhancementtaskmodel.xz\"\r\n[taskcluster 2019-05-21 08:37:51.927Z] Successful task run with exit code: 0 completed in 471.275 seconds\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport argparse\nimport lzma\nimport os\nimport shutil\nfrom logging import INFO, basicConfig, getLogger\nfrom urllib.request import urlretrieve\n\nfrom bugbug.models import get_model_class\n\nbasicConfig(level=INFO)\nlogger = getLogger(__name__)\n\nBASE_URL = \"https://index.taskcluster.net/v1/task/project.relman.bugbug.data_{}.latest/artifacts/public\"\n\n\nclass Trainer(object):\n def decompress_file(self, path):\n with lzma.open(f\"{path}.xz\", \"rb\") as input_f:\n with open(path, \"wb\") as output_f:\n shutil.copyfileobj(input_f, output_f)\n\n def compress_file(self, path):\n with open(path, \"rb\") as input_f:\n with lzma.open(f\"{path}.xz\", \"wb\") as output_f:\n shutil.copyfileobj(input_f, output_f)\n\n def go(self, model_name):\n # Download datasets that were built by bugbug_data.\n os.makedirs(\"data\", exist_ok=True)\n\n # Bugs.json\n logger.info(\"Downloading bugs database\")\n bugs_url = BASE_URL.format(\"bugs\")\n urlretrieve(f\"{bugs_url}/bugs.json.xz\", \"data/bugs.json.xz\")\n logger.info(\"Decompressing bugs database\")\n self.decompress_file(\"data/bugs.json\")\n\n logger.info(f\"Training *{model_name}* model\")\n\n model_class = get_model_class(model_name)\n model = model_class()\n model.train()\n\n model_file_name = f\"{model_name}model\"\n self.compress_file(model_file_name)\n\n\ndef main():\n description = \"Train the models\"\n parser = argparse.ArgumentParser(description=description)\n\n parser.add_argument(\"model\", help=\"Which model to train.\")\n\n args = parser.parse_args()\n\n retriever = Trainer()\n retriever.go(args.model)\n", "path": "scripts/trainer.py"}]}
| 1,216 | 115 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.