problem_id
stringlengths
18
22
source
stringclasses
1 value
task_type
stringclasses
1 value
in_source_id
stringlengths
13
58
prompt
stringlengths
1.71k
18.9k
golden_diff
stringlengths
145
5.13k
verification_info
stringlengths
465
23.6k
num_tokens_prompt
int64
556
4.1k
num_tokens_diff
int64
47
1.02k
gh_patches_debug_5077
rasdani/github-patches
git_diff
stephenmcd__mezzanine-358
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Dynamic page permissions aren't called on the content model The documentation mentions a `can_add` method to define on a a custom page model. When this is called in `page_tags.set_page_permissions`, it only calls in on the `Page` instance, never using `Page.get_content_model()`. I will submit a pull request to fix this, but I'm not sure where the check should go -- is it better in the Page class, having the base `can_add` method delegate to `get_content_model().can_add`, or should it go in the template tag? Dynamic page permissions aren't called on the content model The documentation mentions a `can_add` method to define on a a custom page model. When this is called in `page_tags.set_page_permissions`, it only calls in on the `Page` instance, never using `Page.get_content_model()`. I will submit a pull request to fix this, but I'm not sure where the check should go -- is it better in the Page class, having the base `can_add` method delegate to `get_content_model().can_add`, or should it go in the template tag? </issue> <code> [start of mezzanine/pages/templatetags/pages_tags.py] 1 2 from collections import defaultdict 3 4 from django.core.exceptions import ImproperlyConfigured 5 from django.core.urlresolvers import reverse, NoReverseMatch 6 from django.template import TemplateSyntaxError, Variable 7 from django.template.loader import get_template 8 from django.utils.translation import ugettext_lazy as _ 9 10 from mezzanine.conf import settings 11 from mezzanine.pages.models import Page 12 from mezzanine.utils.urls import admin_url 13 from mezzanine import template 14 15 16 register = template.Library() 17 18 19 @register.render_tag 20 def page_menu(context, token): 21 """ 22 Return a list of child pages for the given parent, storing all 23 pages in a dict in the context when first called using parents as keys 24 for retrieval on subsequent recursive calls from the menu template. 25 """ 26 # First arg could be the menu template file name, or the parent page. 27 # Also allow for both to be used. 28 template_name = None 29 parent_page = None 30 parts = token.split_contents()[1:] 31 for part in parts: 32 part = Variable(part).resolve(context) 33 if isinstance(part, unicode): 34 template_name = part 35 elif isinstance(part, Page): 36 parent_page = part 37 if template_name is None: 38 try: 39 template_name = context["menu_template_name"] 40 except KeyError: 41 error = "No template found for page_menu in: %s" % parts 42 raise TemplateSyntaxError(error) 43 context["menu_template_name"] = template_name 44 if "menu_pages" not in context: 45 try: 46 user = context["request"].user 47 slug = context["request"].path 48 except KeyError: 49 user = None 50 slug = "" 51 num_children = lambda id: lambda: len(context["menu_pages"][id]) 52 has_children = lambda id: lambda: num_children(id)() > 0 53 published = Page.objects.published(for_user=user) 54 if slug == admin_url(Page, "changelist"): 55 related = [m.__name__.lower() for m in Page.get_content_models()] 56 published = published.select_related(*related) 57 else: 58 published = published.select_related(depth=2) 59 # Store the current page being viewed in the context. Used 60 # for comparisons in page.set_menu_helpers. 61 if "page" not in context: 62 try: 63 context["_current_page"] = published.get(slug=slug) 64 except Page.DoesNotExist: 65 context["_current_page"] = None 66 elif slug: 67 context["_current_page"] = context["page"] 68 # Maintain a dict of page IDs -> parent IDs for fast 69 # lookup in setting page.is_current_or_ascendant in 70 # page.set_menu_helpers. 71 context["_parent_page_ids"] = {} 72 pages = defaultdict(list) 73 for page in published.order_by("_order"): 74 page.set_helpers(context) 75 context["_parent_page_ids"][page.id] = page.parent_id 76 setattr(page, "num_children", num_children(page.id)) 77 setattr(page, "has_children", has_children(page.id)) 78 pages[page.parent_id].append(page) 79 context["menu_pages"] = pages 80 context["on_home"] = slug == reverse("home") 81 # ``branch_level`` must be stored against each page so that the 82 # calculation of it is correctly applied. This looks weird but if we do 83 # the ``branch_level`` as a separate arg to the template tag with the 84 # addition performed on it, the addition occurs each time the template 85 # tag is called rather than once per level. 86 context["branch_level"] = 0 87 parent_page_id = None 88 if parent_page is not None: 89 context["branch_level"] = getattr(parent_page, "branch_level", 0) + 1 90 parent_page_id = parent_page.id 91 92 context["page_branch"] = context["menu_pages"].get(parent_page_id, []) 93 context["page_branch_in_menu"] = False 94 for page in context["page_branch"]: 95 # footer/nav for backward compatibility. 96 page.in_footer = page.in_navigation = page.in_menu = True 97 for i, l, t in settings.PAGE_MENU_TEMPLATES: 98 if not unicode(i) in page.in_menus and t == template_name: 99 page.in_navigation = page.in_menu = False 100 if "footer" in template_name: 101 page.in_footer = False 102 break 103 if page.in_menu: 104 context["page_branch_in_menu"] = True 105 # Backwards compatibility 106 context['page_branch_in_navigation'] = context["page_branch_in_menu"] 107 context['page_branch_in_footer'] = (context["page_branch_in_menu"] and 108 template_name == "pages/menu/footer.html") 109 110 for i, page in enumerate(context["page_branch"]): 111 context["page_branch"][i].branch_level = context["branch_level"] 112 context["page_branch"][i].parent = parent_page 113 t = get_template(template_name) 114 return t.render(context) 115 116 117 @register.as_tag 118 def models_for_pages(*args): 119 """ 120 Create a select list containing each of the models that subclass the 121 ``Page`` model. 122 """ 123 page_models = [] 124 for model in Page.get_content_models(): 125 try: 126 admin_url(model, "add") 127 except NoReverseMatch: 128 continue 129 else: 130 setattr(model, "name", model._meta.verbose_name) 131 setattr(model, "add_url", admin_url(model, "add")) 132 page_models.append(model) 133 return page_models 134 135 136 @register.render_tag 137 def set_model_permissions(context, token): 138 """ 139 Assigns a permissions dict to the given model, much like Django 140 does with its dashboard app list. 141 142 Used within the change list for pages, to implement permission 143 checks for the navigation tree. 144 """ 145 model = context[token.split_contents()[1]] 146 opts = model._meta 147 perm_name = opts.app_label + ".%s_" + opts.object_name.lower() 148 request = context["request"] 149 setattr(model, "perms", {}) 150 for perm_type in ("add", "change", "delete"): 151 model.perms[perm_type] = request.user.has_perm(perm_name % perm_type) 152 return "" 153 154 155 @register.render_tag 156 def set_page_permissions(context, token): 157 """ 158 Assigns a permissions dict to the given page instance, combining 159 Django's permission for the page's model and a permission check 160 against the instance itself calling the page's ``can_add``, 161 ``can_change`` and ``can_delete`` custom methods. 162 163 Used within the change list for pages, to implement permission 164 checks for the navigation tree. 165 """ 166 page = context[token.split_contents()[1]] 167 model = page.get_content_model() 168 try: 169 opts = model._meta 170 except AttributeError: 171 # A missing inner Meta class usually means the Page model 172 # hasn't been directly subclassed. 173 error = _("An error occured with the following class. Does " 174 "it subclass Page directly?") 175 raise ImproperlyConfigured(error + " '%s'" % model.__class__.__name__) 176 perm_name = opts.app_label + ".%s_" + opts.object_name.lower() 177 request = context["request"] 178 setattr(page, "perms", {}) 179 for perm_type in ("add", "change", "delete"): 180 perm = request.user.has_perm(perm_name % perm_type) 181 perm = perm and getattr(page, "can_%s" % perm_type)(request) 182 page.perms[perm_type] = perm 183 return "" 184 [end of mezzanine/pages/templatetags/pages_tags.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mezzanine/pages/templatetags/pages_tags.py b/mezzanine/pages/templatetags/pages_tags.py --- a/mezzanine/pages/templatetags/pages_tags.py +++ b/mezzanine/pages/templatetags/pages_tags.py @@ -178,6 +178,6 @@ setattr(page, "perms", {}) for perm_type in ("add", "change", "delete"): perm = request.user.has_perm(perm_name % perm_type) - perm = perm and getattr(page, "can_%s" % perm_type)(request) + perm = perm and getattr(model, "can_%s" % perm_type)(request) page.perms[perm_type] = perm return ""
{"golden_diff": "diff --git a/mezzanine/pages/templatetags/pages_tags.py b/mezzanine/pages/templatetags/pages_tags.py\n--- a/mezzanine/pages/templatetags/pages_tags.py\n+++ b/mezzanine/pages/templatetags/pages_tags.py\n@@ -178,6 +178,6 @@\n setattr(page, \"perms\", {})\n for perm_type in (\"add\", \"change\", \"delete\"):\n perm = request.user.has_perm(perm_name % perm_type)\n- perm = perm and getattr(page, \"can_%s\" % perm_type)(request)\n+ perm = perm and getattr(model, \"can_%s\" % perm_type)(request)\n page.perms[perm_type] = perm\n return \"\"\n", "issue": "Dynamic page permissions aren't called on the content model\nThe documentation mentions a `can_add` method to define on a a custom page model. When this is called in `page_tags.set_page_permissions`, it only calls in on the `Page` instance, never using `Page.get_content_model()`.\n\nI will submit a pull request to fix this, but I'm not sure where the check should go -- is it better in the Page class, having the base `can_add` method delegate to `get_content_model().can_add`, or should it go in the template tag?\n\nDynamic page permissions aren't called on the content model\nThe documentation mentions a `can_add` method to define on a a custom page model. When this is called in `page_tags.set_page_permissions`, it only calls in on the `Page` instance, never using `Page.get_content_model()`.\n\nI will submit a pull request to fix this, but I'm not sure where the check should go -- is it better in the Page class, having the base `can_add` method delegate to `get_content_model().can_add`, or should it go in the template tag?\n\n", "before_files": [{"content": "\nfrom collections import defaultdict\n\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.core.urlresolvers import reverse, NoReverseMatch\nfrom django.template import TemplateSyntaxError, Variable\nfrom django.template.loader import get_template\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom mezzanine.conf import settings\nfrom mezzanine.pages.models import Page\nfrom mezzanine.utils.urls import admin_url\nfrom mezzanine import template\n\n\nregister = template.Library()\n\n\[email protected]_tag\ndef page_menu(context, token):\n \"\"\"\n Return a list of child pages for the given parent, storing all\n pages in a dict in the context when first called using parents as keys\n for retrieval on subsequent recursive calls from the menu template.\n \"\"\"\n # First arg could be the menu template file name, or the parent page.\n # Also allow for both to be used.\n template_name = None\n parent_page = None\n parts = token.split_contents()[1:]\n for part in parts:\n part = Variable(part).resolve(context)\n if isinstance(part, unicode):\n template_name = part\n elif isinstance(part, Page):\n parent_page = part\n if template_name is None:\n try:\n template_name = context[\"menu_template_name\"]\n except KeyError:\n error = \"No template found for page_menu in: %s\" % parts\n raise TemplateSyntaxError(error)\n context[\"menu_template_name\"] = template_name\n if \"menu_pages\" not in context:\n try:\n user = context[\"request\"].user\n slug = context[\"request\"].path\n except KeyError:\n user = None\n slug = \"\"\n num_children = lambda id: lambda: len(context[\"menu_pages\"][id])\n has_children = lambda id: lambda: num_children(id)() > 0\n published = Page.objects.published(for_user=user)\n if slug == admin_url(Page, \"changelist\"):\n related = [m.__name__.lower() for m in Page.get_content_models()]\n published = published.select_related(*related)\n else:\n published = published.select_related(depth=2)\n # Store the current page being viewed in the context. Used\n # for comparisons in page.set_menu_helpers.\n if \"page\" not in context:\n try:\n context[\"_current_page\"] = published.get(slug=slug)\n except Page.DoesNotExist:\n context[\"_current_page\"] = None\n elif slug:\n context[\"_current_page\"] = context[\"page\"]\n # Maintain a dict of page IDs -> parent IDs for fast\n # lookup in setting page.is_current_or_ascendant in\n # page.set_menu_helpers.\n context[\"_parent_page_ids\"] = {}\n pages = defaultdict(list)\n for page in published.order_by(\"_order\"):\n page.set_helpers(context)\n context[\"_parent_page_ids\"][page.id] = page.parent_id\n setattr(page, \"num_children\", num_children(page.id))\n setattr(page, \"has_children\", has_children(page.id))\n pages[page.parent_id].append(page)\n context[\"menu_pages\"] = pages\n context[\"on_home\"] = slug == reverse(\"home\")\n # ``branch_level`` must be stored against each page so that the\n # calculation of it is correctly applied. This looks weird but if we do\n # the ``branch_level`` as a separate arg to the template tag with the\n # addition performed on it, the addition occurs each time the template\n # tag is called rather than once per level.\n context[\"branch_level\"] = 0\n parent_page_id = None\n if parent_page is not None:\n context[\"branch_level\"] = getattr(parent_page, \"branch_level\", 0) + 1\n parent_page_id = parent_page.id\n\n context[\"page_branch\"] = context[\"menu_pages\"].get(parent_page_id, [])\n context[\"page_branch_in_menu\"] = False\n for page in context[\"page_branch\"]:\n # footer/nav for backward compatibility.\n page.in_footer = page.in_navigation = page.in_menu = True\n for i, l, t in settings.PAGE_MENU_TEMPLATES:\n if not unicode(i) in page.in_menus and t == template_name:\n page.in_navigation = page.in_menu = False\n if \"footer\" in template_name:\n page.in_footer = False\n break\n if page.in_menu:\n context[\"page_branch_in_menu\"] = True\n # Backwards compatibility\n context['page_branch_in_navigation'] = context[\"page_branch_in_menu\"]\n context['page_branch_in_footer'] = (context[\"page_branch_in_menu\"] and\n template_name == \"pages/menu/footer.html\")\n\n for i, page in enumerate(context[\"page_branch\"]):\n context[\"page_branch\"][i].branch_level = context[\"branch_level\"]\n context[\"page_branch\"][i].parent = parent_page\n t = get_template(template_name)\n return t.render(context)\n\n\[email protected]_tag\ndef models_for_pages(*args):\n \"\"\"\n Create a select list containing each of the models that subclass the\n ``Page`` model.\n \"\"\"\n page_models = []\n for model in Page.get_content_models():\n try:\n admin_url(model, \"add\")\n except NoReverseMatch:\n continue\n else:\n setattr(model, \"name\", model._meta.verbose_name)\n setattr(model, \"add_url\", admin_url(model, \"add\"))\n page_models.append(model)\n return page_models\n\n\[email protected]_tag\ndef set_model_permissions(context, token):\n \"\"\"\n Assigns a permissions dict to the given model, much like Django\n does with its dashboard app list.\n\n Used within the change list for pages, to implement permission\n checks for the navigation tree.\n \"\"\"\n model = context[token.split_contents()[1]]\n opts = model._meta\n perm_name = opts.app_label + \".%s_\" + opts.object_name.lower()\n request = context[\"request\"]\n setattr(model, \"perms\", {})\n for perm_type in (\"add\", \"change\", \"delete\"):\n model.perms[perm_type] = request.user.has_perm(perm_name % perm_type)\n return \"\"\n\n\[email protected]_tag\ndef set_page_permissions(context, token):\n \"\"\"\n Assigns a permissions dict to the given page instance, combining\n Django's permission for the page's model and a permission check\n against the instance itself calling the page's ``can_add``,\n ``can_change`` and ``can_delete`` custom methods.\n\n Used within the change list for pages, to implement permission\n checks for the navigation tree.\n \"\"\"\n page = context[token.split_contents()[1]]\n model = page.get_content_model()\n try:\n opts = model._meta\n except AttributeError:\n # A missing inner Meta class usually means the Page model\n # hasn't been directly subclassed.\n error = _(\"An error occured with the following class. Does \"\n \"it subclass Page directly?\")\n raise ImproperlyConfigured(error + \" '%s'\" % model.__class__.__name__)\n perm_name = opts.app_label + \".%s_\" + opts.object_name.lower()\n request = context[\"request\"]\n setattr(page, \"perms\", {})\n for perm_type in (\"add\", \"change\", \"delete\"):\n perm = request.user.has_perm(perm_name % perm_type)\n perm = perm and getattr(page, \"can_%s\" % perm_type)(request)\n page.perms[perm_type] = perm\n return \"\"\n", "path": "mezzanine/pages/templatetags/pages_tags.py"}]}
2,826
164
gh_patches_debug_15192
rasdani/github-patches
git_diff
SeldonIO__MLServer-339
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> mlserver --version fails (0.5.0) ``` mlserver --version Traceback (most recent call last): File "/home/clive/anaconda3/envs/mlserver/bin/mlserver", line 8, in <module> sys.exit(main()) File "/home/clive/anaconda3/envs/mlserver/lib/python3.8/site-packages/mlserver/cli/main.py", line 45, in main root() File "/home/clive/anaconda3/envs/mlserver/lib/python3.8/site-packages/click/core.py", line 829, in __call__ return self.main(*args, **kwargs) File "/home/clive/anaconda3/envs/mlserver/lib/python3.8/site-packages/click/core.py", line 781, in main with self.make_context(prog_name, args, **extra) as ctx: File "/home/clive/anaconda3/envs/mlserver/lib/python3.8/site-packages/click/core.py", line 700, in make_context self.parse_args(ctx, args) File "/home/clive/anaconda3/envs/mlserver/lib/python3.8/site-packages/click/core.py", line 1212, in parse_args rest = Command.parse_args(self, ctx, args) File "/home/clive/anaconda3/envs/mlserver/lib/python3.8/site-packages/click/core.py", line 1048, in parse_args value, args = param.handle_parse_result(ctx, opts, args) File "/home/clive/anaconda3/envs/mlserver/lib/python3.8/site-packages/click/core.py", line 1630, in handle_parse_result value = invoke_param_callback(self.callback, ctx, self, value) File "/home/clive/anaconda3/envs/mlserver/lib/python3.8/site-packages/click/core.py", line 123, in invoke_param_callback return callback(ctx, param, value) File "/home/clive/anaconda3/envs/mlserver/lib/python3.8/site-packages/click/decorators.py", line 295, in callback raise RuntimeError("Could not determine version") RuntimeError: Could not determine version (mlserver) /home/clive $ pip freeze | grep mlserver mlserver==0.5.0 ``` </issue> <code> [start of setup.py] 1 import os 2 3 from typing import Dict 4 from setuptools import setup, find_packages 5 6 ROOT_PATH = os.path.dirname(__file__) 7 PKG_NAME = "mlserver" 8 PKG_PATH = os.path.join(ROOT_PATH, PKG_NAME) 9 10 11 def _load_version() -> str: 12 version = "" 13 version_path = os.path.join(PKG_PATH, "version.py") 14 with open(version_path) as fp: 15 version_module: Dict[str, str] = {} 16 exec(fp.read(), version_module) 17 version = version_module["__version__"] 18 19 return version 20 21 22 def _load_description() -> str: 23 readme_path = os.path.join(ROOT_PATH, "README.md") 24 with open(readme_path) as fp: 25 return fp.read() 26 27 28 setup( 29 name=PKG_NAME, 30 version=_load_version(), 31 url="https://github.com/SeldonIO/MLServer.git", 32 author="Seldon Technologies Ltd.", 33 author_email="[email protected]", 34 description="ML server", 35 packages=find_packages(exclude=["tests", "tests.*"]), 36 install_requires=[ 37 "grpcio", 38 "protobuf", 39 # We pin version of fastapi 40 # check https://github.com/SeldonIO/MLServer/issues/340 41 "fastapi==0.68.2", 42 "uvicorn", 43 "click", 44 "numpy", 45 "pandas", 46 ], 47 extras_require={"all": ["orjson"]}, 48 entry_points={"console_scripts": ["mlserver=mlserver.cli:main"]}, 49 long_description=_load_description(), 50 long_description_content_type="text/markdown", 51 license="Apache 2.0", 52 ) 53 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -34,15 +34,16 @@ description="ML server", packages=find_packages(exclude=["tests", "tests.*"]), install_requires=[ - "grpcio", - "protobuf", + "click", # We pin version of fastapi # check https://github.com/SeldonIO/MLServer/issues/340 "fastapi==0.68.2", - "uvicorn", - "click", + "grpcio", + "importlib-metadata;python_version<'3.8'", "numpy", "pandas", + "protobuf", + "uvicorn", ], extras_require={"all": ["orjson"]}, entry_points={"console_scripts": ["mlserver=mlserver.cli:main"]},
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -34,15 +34,16 @@\n description=\"ML server\",\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n install_requires=[\n- \"grpcio\",\n- \"protobuf\",\n+ \"click\",\n # We pin version of fastapi\n # check https://github.com/SeldonIO/MLServer/issues/340\n \"fastapi==0.68.2\",\n- \"uvicorn\",\n- \"click\",\n+ \"grpcio\",\n+ \"importlib-metadata;python_version<'3.8'\",\n \"numpy\",\n \"pandas\",\n+ \"protobuf\",\n+ \"uvicorn\",\n ],\n extras_require={\"all\": [\"orjson\"]},\n entry_points={\"console_scripts\": [\"mlserver=mlserver.cli:main\"]},\n", "issue": "mlserver --version fails (0.5.0)\n```\r\nmlserver --version\r\nTraceback (most recent call last):\r\n File \"/home/clive/anaconda3/envs/mlserver/bin/mlserver\", line 8, in <module>\r\n sys.exit(main())\r\n File \"/home/clive/anaconda3/envs/mlserver/lib/python3.8/site-packages/mlserver/cli/main.py\", line 45, in main\r\n root()\r\n File \"/home/clive/anaconda3/envs/mlserver/lib/python3.8/site-packages/click/core.py\", line 829, in __call__\r\n return self.main(*args, **kwargs)\r\n File \"/home/clive/anaconda3/envs/mlserver/lib/python3.8/site-packages/click/core.py\", line 781, in main\r\n with self.make_context(prog_name, args, **extra) as ctx:\r\n File \"/home/clive/anaconda3/envs/mlserver/lib/python3.8/site-packages/click/core.py\", line 700, in make_context\r\n self.parse_args(ctx, args)\r\n File \"/home/clive/anaconda3/envs/mlserver/lib/python3.8/site-packages/click/core.py\", line 1212, in parse_args\r\n rest = Command.parse_args(self, ctx, args)\r\n File \"/home/clive/anaconda3/envs/mlserver/lib/python3.8/site-packages/click/core.py\", line 1048, in parse_args\r\n value, args = param.handle_parse_result(ctx, opts, args)\r\n File \"/home/clive/anaconda3/envs/mlserver/lib/python3.8/site-packages/click/core.py\", line 1630, in handle_parse_result\r\n value = invoke_param_callback(self.callback, ctx, self, value)\r\n File \"/home/clive/anaconda3/envs/mlserver/lib/python3.8/site-packages/click/core.py\", line 123, in invoke_param_callback\r\n return callback(ctx, param, value)\r\n File \"/home/clive/anaconda3/envs/mlserver/lib/python3.8/site-packages/click/decorators.py\", line 295, in callback\r\n raise RuntimeError(\"Could not determine version\")\r\nRuntimeError: Could not determine version\r\n(mlserver) /home/clive $ pip freeze | grep mlserver\r\nmlserver==0.5.0\r\n```\n", "before_files": [{"content": "import os\n\nfrom typing import Dict\nfrom setuptools import setup, find_packages\n\nROOT_PATH = os.path.dirname(__file__)\nPKG_NAME = \"mlserver\"\nPKG_PATH = os.path.join(ROOT_PATH, PKG_NAME)\n\n\ndef _load_version() -> str:\n version = \"\"\n version_path = os.path.join(PKG_PATH, \"version.py\")\n with open(version_path) as fp:\n version_module: Dict[str, str] = {}\n exec(fp.read(), version_module)\n version = version_module[\"__version__\"]\n\n return version\n\n\ndef _load_description() -> str:\n readme_path = os.path.join(ROOT_PATH, \"README.md\")\n with open(readme_path) as fp:\n return fp.read()\n\n\nsetup(\n name=PKG_NAME,\n version=_load_version(),\n url=\"https://github.com/SeldonIO/MLServer.git\",\n author=\"Seldon Technologies Ltd.\",\n author_email=\"[email protected]\",\n description=\"ML server\",\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n install_requires=[\n \"grpcio\",\n \"protobuf\",\n # We pin version of fastapi\n # check https://github.com/SeldonIO/MLServer/issues/340\n \"fastapi==0.68.2\",\n \"uvicorn\",\n \"click\",\n \"numpy\",\n \"pandas\",\n ],\n extras_require={\"all\": [\"orjson\"]},\n entry_points={\"console_scripts\": [\"mlserver=mlserver.cli:main\"]},\n long_description=_load_description(),\n long_description_content_type=\"text/markdown\",\n license=\"Apache 2.0\",\n)\n", "path": "setup.py"}]}
1,499
195
gh_patches_debug_30223
rasdani/github-patches
git_diff
SeldonIO__MLServer-1346
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Support for NaN in np.ndarray codec? Sending/receiving a `numpy` array containing NaN values to/from an mlserver with the `NumpyCodec` results in a `ValueError: Out of range float values are not JSON compliant` error. Is this a known limitation, and are there any good workarounds that would still allow me to use a pre-packaged server? I understand that it would probably be bad to assume than `NaN` can be mapped to `null` in the JSON, but maybe that could be an option? </issue> <code> [start of mlserver/codecs/numpy.py] 1 import numpy as np 2 3 from typing import Any 4 5 from ..types import RequestInput, ResponseOutput, Parameters 6 7 from .base import InputCodec, register_input_codec, register_request_codec 8 from .utils import SingleInputRequestCodec, InputOrOutput, inject_batch_dimension 9 from .lists import is_list_of 10 from .string import encode_str 11 12 _DatatypeToNumpy = { 13 "BOOL": "bool", 14 "UINT8": "uint8", 15 "UINT16": "uint16", 16 "UINT32": "uint32", 17 "UINT64": "uint64", 18 "INT8": "int8", 19 "INT16": "int16", 20 "INT32": "int32", 21 "INT64": "int64", 22 "FP16": "float16", 23 "FP32": "float32", 24 "FP64": "float64", 25 "BYTES": "bytes", 26 } 27 28 _NumpyToDatatype = {value: key for key, value in _DatatypeToNumpy.items()} 29 30 # NOTE: numpy has more types than v2 protocol 31 _NumpyToDatatype["object"] = "BYTES" 32 _NumpyToDatatype["S"] = "BYTES" 33 _NumpyToDatatype["U"] = "BYTES" 34 35 36 def to_dtype(input_or_output: InputOrOutput) -> "np.dtype": 37 dtype = _DatatypeToNumpy[input_or_output.datatype] 38 39 if input_or_output.datatype == "BYTES": 40 data = getattr(input_or_output.data, "__root__", input_or_output.data) 41 if is_list_of(data, str): 42 # Handle special case of strings being treated as Numpy arrays 43 return np.dtype(str) 44 45 # bytes have variable size, so need to specify as part of type 46 # TODO: Make elem size variable (and not just the last dimension) 47 elem_size = input_or_output.shape[-1] 48 return np.dtype((dtype, elem_size)) 49 50 return np.dtype(dtype) 51 52 53 def to_datatype(dtype: np.dtype) -> str: 54 as_str = str(dtype) 55 56 if as_str not in _NumpyToDatatype: 57 # If not present, try with kind 58 as_str = getattr(dtype, "kind") 59 60 datatype = _NumpyToDatatype[as_str] 61 62 return datatype 63 64 65 def _to_ndarray(input_or_output: InputOrOutput) -> np.ndarray: 66 data = getattr(input_or_output.data, "__root__", input_or_output.data) 67 dtype = to_dtype(input_or_output) 68 69 if input_or_output.datatype == "BYTES": 70 if is_list_of(data, bytes): 71 # If the inputs is of type `BYTES`, there could be multiple "lists" 72 # serialised into multiple buffers. 73 # We will deserialise all of them and concatenate them together. 74 decoded = [np.frombuffer(buffer, dtype) for buffer in data] 75 return np.concatenate(decoded) 76 77 return np.array(data, dtype) 78 79 80 def _encode_data(data: np.ndarray, datatype: str) -> list: 81 if datatype == "BYTES": 82 if np.issubdtype(data.dtype, str): 83 # Handle special case of a string Numpy array, where the diff elems 84 # need to be encoded as well 85 as_list = data.flatten().tolist() 86 return list(map(encode_str, as_list)) 87 88 if np.issubdtype(data.dtype, bytes): 89 # `tobytes` is way faster than tolist, although it's harder to serialise 90 # and only makes sense for actual bytes inputs (#253). 91 # Note that `.tobytes()` will return a single `bytes` payload, thus we 92 # need to encapsulate it into a list so that it's compatible. 93 return [data.tobytes()] 94 95 return data.flatten().tolist() 96 97 98 @register_input_codec 99 class NumpyCodec(InputCodec): 100 """ 101 Decodes an request input (response output) as a NumPy array. 102 """ 103 104 ContentType = "np" 105 TypeHint = np.ndarray 106 107 @classmethod 108 def can_encode(csl, payload: Any) -> bool: 109 return isinstance(payload, np.ndarray) 110 111 @classmethod 112 def encode_output(cls, name: str, payload: np.ndarray, **kwargs) -> ResponseOutput: 113 datatype = to_datatype(payload.dtype) 114 115 shape = inject_batch_dimension(list(payload.shape)) 116 117 return ResponseOutput( 118 name=name, 119 datatype=datatype, 120 shape=shape, 121 data=_encode_data(payload, datatype), 122 parameters=Parameters(content_type=cls.ContentType), 123 ) 124 125 @classmethod 126 def decode_output(cls, response_output: ResponseOutput) -> np.ndarray: 127 return cls.decode_input(response_output) # type: ignore 128 129 @classmethod 130 def encode_input(cls, name: str, payload: np.ndarray, **kwargs) -> RequestInput: 131 output = cls.encode_output(name=name, payload=payload) 132 133 return RequestInput( 134 name=output.name, 135 datatype=output.datatype, 136 shape=output.shape, 137 data=output.data, 138 parameters=Parameters(content_type=cls.ContentType), 139 ) 140 141 @classmethod 142 def decode_input(cls, request_input: RequestInput) -> np.ndarray: 143 model_data = _to_ndarray(request_input) 144 145 # TODO: Check if reshape not valid 146 return model_data.reshape(request_input.shape) 147 148 149 @register_request_codec 150 class NumpyRequestCodec(SingleInputRequestCodec): 151 """ 152 Decodes the first input (output) of request (response) as a NumPy array. 153 This codec can be useful for cases where the whole payload is a single 154 NumPy tensor. 155 """ 156 157 InputCodec = NumpyCodec 158 ContentType = NumpyCodec.ContentType 159 [end of mlserver/codecs/numpy.py] [start of mlserver/codecs/pandas.py] 1 import pandas as pd 2 import numpy as np 3 4 from typing import Optional, Any, List, Tuple 5 6 from .base import RequestCodec, register_request_codec 7 from .numpy import to_datatype, to_dtype 8 from .string import encode_str, StringCodec 9 from .utils import get_decoded_or_raw, InputOrOutput, inject_batch_dimension 10 from .lists import ListElement 11 from ..types import ( 12 InferenceRequest, 13 InferenceResponse, 14 RequestInput, 15 ResponseOutput, 16 Parameters, 17 ) 18 19 20 def _to_series(input_or_output: InputOrOutput) -> pd.Series: 21 payload = get_decoded_or_raw(input_or_output) 22 23 if input_or_output.datatype == "BYTES": 24 # Don't convert the dtype of BYTES 25 return pd.Series(payload) 26 27 if isinstance(payload, np.ndarray): 28 # Necessary so that it's compatible with pd.Series 29 payload = list(payload) 30 31 dtype = to_dtype(input_or_output) 32 return pd.Series(payload, dtype=dtype) 33 34 35 def _to_response_output(series: pd.Series, use_bytes: bool = True) -> ResponseOutput: 36 datatype = to_datatype(series.dtype) 37 data = series.tolist() 38 content_type = None 39 40 if datatype == "BYTES": 41 data, content_type = _process_bytes(data, use_bytes) 42 43 shape = inject_batch_dimension(list(series.shape)) 44 parameters = None 45 if content_type: 46 parameters = Parameters(content_type=content_type) 47 48 return ResponseOutput( 49 name=series.name, 50 shape=shape, 51 data=data, 52 datatype=datatype, 53 parameters=parameters, 54 ) 55 56 57 def _process_bytes( 58 data: List[ListElement], use_bytes: bool = True 59 ) -> Tuple[List[ListElement], Optional[str]]: 60 # To ensure that "string" columns can be encoded in gRPC, we need to 61 # encode them as bytes. 62 # We'll also keep track of whether the list should be treated in the 63 # future as a list of strings. 64 processed = [] 65 content_type: Optional[str] = StringCodec.ContentType 66 for elem in data: 67 converted = elem 68 if not isinstance(elem, str): 69 # There was a non-string element, so we can't determine a content 70 # type 71 content_type = None 72 elif use_bytes: 73 converted = encode_str(elem) 74 75 processed.append(converted) 76 77 return processed, content_type 78 79 80 @register_request_codec 81 class PandasCodec(RequestCodec): 82 """ 83 Decodes a request (response) into a Pandas DataFrame, assuming each input 84 (output) head corresponds to a column of the DataFrame. 85 """ 86 87 ContentType = "pd" 88 TypeHint = pd.DataFrame 89 90 @classmethod 91 def can_encode(cls, payload: Any) -> bool: 92 return isinstance(payload, pd.DataFrame) 93 94 @classmethod 95 def encode_response( 96 cls, 97 model_name: str, 98 payload: pd.DataFrame, 99 model_version: Optional[str] = None, 100 use_bytes: bool = True, 101 **kwargs 102 ) -> InferenceResponse: 103 outputs = cls.encode_outputs(payload, use_bytes=use_bytes) 104 105 return InferenceResponse( 106 model_name=model_name, 107 model_version=model_version, 108 parameters=Parameters(content_type=cls.ContentType), 109 outputs=outputs, 110 ) 111 112 @classmethod 113 def decode_response(cls, response: InferenceResponse) -> pd.DataFrame: 114 data = { 115 response_output.name: _to_series(response_output) 116 for response_output in response.outputs 117 } 118 119 return pd.DataFrame(data) 120 121 @classmethod 122 def encode_outputs( 123 cls, payload: pd.DataFrame, use_bytes: bool = True 124 ) -> List[ResponseOutput]: 125 return [ 126 _to_response_output(payload[col], use_bytes=use_bytes) for col in payload 127 ] 128 129 @classmethod 130 def encode_request( 131 cls, payload: pd.DataFrame, use_bytes: bool = True, **kwargs 132 ) -> InferenceRequest: 133 outputs = cls.encode_outputs(payload, use_bytes=use_bytes) 134 135 return InferenceRequest( 136 parameters=Parameters(content_type=cls.ContentType), 137 inputs=[ 138 RequestInput( 139 name=output.name, 140 datatype=output.datatype, 141 shape=output.shape, 142 data=output.data, 143 parameters=output.parameters, 144 ) 145 for output in outputs 146 ], 147 ) 148 149 @classmethod 150 def decode_request(cls, request: InferenceRequest) -> pd.DataFrame: 151 data = { 152 request_input.name: _to_series(request_input) 153 for request_input in request.inputs 154 } 155 156 return pd.DataFrame(data) 157 [end of mlserver/codecs/pandas.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mlserver/codecs/numpy.py b/mlserver/codecs/numpy.py --- a/mlserver/codecs/numpy.py +++ b/mlserver/codecs/numpy.py @@ -92,7 +92,24 @@ # need to encapsulate it into a list so that it's compatible. return [data.tobytes()] - return data.flatten().tolist() + flattened_list = data.flatten().tolist() + + # Replace NaN with null + if datatype != "BYTES": + # The `isnan` method doesn't work on Numpy arrays with non-numeric + # types + has_nan = np.isnan(data).any() + if has_nan: + flattened_list = list(map(convert_nan, flattened_list)) + + return flattened_list + + +def convert_nan(val): + if np.isnan(val): + return None + + return val @register_input_codec diff --git a/mlserver/codecs/pandas.py b/mlserver/codecs/pandas.py --- a/mlserver/codecs/pandas.py +++ b/mlserver/codecs/pandas.py @@ -4,7 +4,7 @@ from typing import Optional, Any, List, Tuple from .base import RequestCodec, register_request_codec -from .numpy import to_datatype, to_dtype +from .numpy import to_datatype, to_dtype, convert_nan from .string import encode_str, StringCodec from .utils import get_decoded_or_raw, InputOrOutput, inject_batch_dimension from .lists import ListElement @@ -35,8 +35,13 @@ def _to_response_output(series: pd.Series, use_bytes: bool = True) -> ResponseOutput: datatype = to_datatype(series.dtype) data = series.tolist() - content_type = None + # Replace NaN with null + has_nan = series.isnull().any() + if has_nan: + data = list(map(convert_nan, data)) + + content_type = None if datatype == "BYTES": data, content_type = _process_bytes(data, use_bytes)
{"golden_diff": "diff --git a/mlserver/codecs/numpy.py b/mlserver/codecs/numpy.py\n--- a/mlserver/codecs/numpy.py\n+++ b/mlserver/codecs/numpy.py\n@@ -92,7 +92,24 @@\n # need to encapsulate it into a list so that it's compatible.\n return [data.tobytes()]\n \n- return data.flatten().tolist()\n+ flattened_list = data.flatten().tolist()\n+\n+ # Replace NaN with null\n+ if datatype != \"BYTES\":\n+ # The `isnan` method doesn't work on Numpy arrays with non-numeric\n+ # types\n+ has_nan = np.isnan(data).any()\n+ if has_nan:\n+ flattened_list = list(map(convert_nan, flattened_list))\n+\n+ return flattened_list\n+\n+\n+def convert_nan(val):\n+ if np.isnan(val):\n+ return None\n+\n+ return val\n \n \n @register_input_codec\ndiff --git a/mlserver/codecs/pandas.py b/mlserver/codecs/pandas.py\n--- a/mlserver/codecs/pandas.py\n+++ b/mlserver/codecs/pandas.py\n@@ -4,7 +4,7 @@\n from typing import Optional, Any, List, Tuple\n \n from .base import RequestCodec, register_request_codec\n-from .numpy import to_datatype, to_dtype\n+from .numpy import to_datatype, to_dtype, convert_nan\n from .string import encode_str, StringCodec\n from .utils import get_decoded_or_raw, InputOrOutput, inject_batch_dimension\n from .lists import ListElement\n@@ -35,8 +35,13 @@\n def _to_response_output(series: pd.Series, use_bytes: bool = True) -> ResponseOutput:\n datatype = to_datatype(series.dtype)\n data = series.tolist()\n- content_type = None\n \n+ # Replace NaN with null\n+ has_nan = series.isnull().any()\n+ if has_nan:\n+ data = list(map(convert_nan, data))\n+\n+ content_type = None\n if datatype == \"BYTES\":\n data, content_type = _process_bytes(data, use_bytes)\n", "issue": "Support for NaN in np.ndarray codec?\nSending/receiving a `numpy` array containing NaN values to/from an mlserver with the `NumpyCodec` results in a `ValueError: Out of range float values are not JSON compliant` error. Is this a known limitation, and are there any good workarounds that would still allow me to use a pre-packaged server? I understand that it would probably be bad to assume than `NaN` can be mapped to `null` in the JSON, but maybe that could be an option?\n", "before_files": [{"content": "import numpy as np\n\nfrom typing import Any\n\nfrom ..types import RequestInput, ResponseOutput, Parameters\n\nfrom .base import InputCodec, register_input_codec, register_request_codec\nfrom .utils import SingleInputRequestCodec, InputOrOutput, inject_batch_dimension\nfrom .lists import is_list_of\nfrom .string import encode_str\n\n_DatatypeToNumpy = {\n \"BOOL\": \"bool\",\n \"UINT8\": \"uint8\",\n \"UINT16\": \"uint16\",\n \"UINT32\": \"uint32\",\n \"UINT64\": \"uint64\",\n \"INT8\": \"int8\",\n \"INT16\": \"int16\",\n \"INT32\": \"int32\",\n \"INT64\": \"int64\",\n \"FP16\": \"float16\",\n \"FP32\": \"float32\",\n \"FP64\": \"float64\",\n \"BYTES\": \"bytes\",\n}\n\n_NumpyToDatatype = {value: key for key, value in _DatatypeToNumpy.items()}\n\n# NOTE: numpy has more types than v2 protocol\n_NumpyToDatatype[\"object\"] = \"BYTES\"\n_NumpyToDatatype[\"S\"] = \"BYTES\"\n_NumpyToDatatype[\"U\"] = \"BYTES\"\n\n\ndef to_dtype(input_or_output: InputOrOutput) -> \"np.dtype\":\n dtype = _DatatypeToNumpy[input_or_output.datatype]\n\n if input_or_output.datatype == \"BYTES\":\n data = getattr(input_or_output.data, \"__root__\", input_or_output.data)\n if is_list_of(data, str):\n # Handle special case of strings being treated as Numpy arrays\n return np.dtype(str)\n\n # bytes have variable size, so need to specify as part of type\n # TODO: Make elem size variable (and not just the last dimension)\n elem_size = input_or_output.shape[-1]\n return np.dtype((dtype, elem_size))\n\n return np.dtype(dtype)\n\n\ndef to_datatype(dtype: np.dtype) -> str:\n as_str = str(dtype)\n\n if as_str not in _NumpyToDatatype:\n # If not present, try with kind\n as_str = getattr(dtype, \"kind\")\n\n datatype = _NumpyToDatatype[as_str]\n\n return datatype\n\n\ndef _to_ndarray(input_or_output: InputOrOutput) -> np.ndarray:\n data = getattr(input_or_output.data, \"__root__\", input_or_output.data)\n dtype = to_dtype(input_or_output)\n\n if input_or_output.datatype == \"BYTES\":\n if is_list_of(data, bytes):\n # If the inputs is of type `BYTES`, there could be multiple \"lists\"\n # serialised into multiple buffers.\n # We will deserialise all of them and concatenate them together.\n decoded = [np.frombuffer(buffer, dtype) for buffer in data]\n return np.concatenate(decoded)\n\n return np.array(data, dtype)\n\n\ndef _encode_data(data: np.ndarray, datatype: str) -> list:\n if datatype == \"BYTES\":\n if np.issubdtype(data.dtype, str):\n # Handle special case of a string Numpy array, where the diff elems\n # need to be encoded as well\n as_list = data.flatten().tolist()\n return list(map(encode_str, as_list))\n\n if np.issubdtype(data.dtype, bytes):\n # `tobytes` is way faster than tolist, although it's harder to serialise\n # and only makes sense for actual bytes inputs (#253).\n # Note that `.tobytes()` will return a single `bytes` payload, thus we\n # need to encapsulate it into a list so that it's compatible.\n return [data.tobytes()]\n\n return data.flatten().tolist()\n\n\n@register_input_codec\nclass NumpyCodec(InputCodec):\n \"\"\"\n Decodes an request input (response output) as a NumPy array.\n \"\"\"\n\n ContentType = \"np\"\n TypeHint = np.ndarray\n\n @classmethod\n def can_encode(csl, payload: Any) -> bool:\n return isinstance(payload, np.ndarray)\n\n @classmethod\n def encode_output(cls, name: str, payload: np.ndarray, **kwargs) -> ResponseOutput:\n datatype = to_datatype(payload.dtype)\n\n shape = inject_batch_dimension(list(payload.shape))\n\n return ResponseOutput(\n name=name,\n datatype=datatype,\n shape=shape,\n data=_encode_data(payload, datatype),\n parameters=Parameters(content_type=cls.ContentType),\n )\n\n @classmethod\n def decode_output(cls, response_output: ResponseOutput) -> np.ndarray:\n return cls.decode_input(response_output) # type: ignore\n\n @classmethod\n def encode_input(cls, name: str, payload: np.ndarray, **kwargs) -> RequestInput:\n output = cls.encode_output(name=name, payload=payload)\n\n return RequestInput(\n name=output.name,\n datatype=output.datatype,\n shape=output.shape,\n data=output.data,\n parameters=Parameters(content_type=cls.ContentType),\n )\n\n @classmethod\n def decode_input(cls, request_input: RequestInput) -> np.ndarray:\n model_data = _to_ndarray(request_input)\n\n # TODO: Check if reshape not valid\n return model_data.reshape(request_input.shape)\n\n\n@register_request_codec\nclass NumpyRequestCodec(SingleInputRequestCodec):\n \"\"\"\n Decodes the first input (output) of request (response) as a NumPy array.\n This codec can be useful for cases where the whole payload is a single\n NumPy tensor.\n \"\"\"\n\n InputCodec = NumpyCodec\n ContentType = NumpyCodec.ContentType\n", "path": "mlserver/codecs/numpy.py"}, {"content": "import pandas as pd\nimport numpy as np\n\nfrom typing import Optional, Any, List, Tuple\n\nfrom .base import RequestCodec, register_request_codec\nfrom .numpy import to_datatype, to_dtype\nfrom .string import encode_str, StringCodec\nfrom .utils import get_decoded_or_raw, InputOrOutput, inject_batch_dimension\nfrom .lists import ListElement\nfrom ..types import (\n InferenceRequest,\n InferenceResponse,\n RequestInput,\n ResponseOutput,\n Parameters,\n)\n\n\ndef _to_series(input_or_output: InputOrOutput) -> pd.Series:\n payload = get_decoded_or_raw(input_or_output)\n\n if input_or_output.datatype == \"BYTES\":\n # Don't convert the dtype of BYTES\n return pd.Series(payload)\n\n if isinstance(payload, np.ndarray):\n # Necessary so that it's compatible with pd.Series\n payload = list(payload)\n\n dtype = to_dtype(input_or_output)\n return pd.Series(payload, dtype=dtype)\n\n\ndef _to_response_output(series: pd.Series, use_bytes: bool = True) -> ResponseOutput:\n datatype = to_datatype(series.dtype)\n data = series.tolist()\n content_type = None\n\n if datatype == \"BYTES\":\n data, content_type = _process_bytes(data, use_bytes)\n\n shape = inject_batch_dimension(list(series.shape))\n parameters = None\n if content_type:\n parameters = Parameters(content_type=content_type)\n\n return ResponseOutput(\n name=series.name,\n shape=shape,\n data=data,\n datatype=datatype,\n parameters=parameters,\n )\n\n\ndef _process_bytes(\n data: List[ListElement], use_bytes: bool = True\n) -> Tuple[List[ListElement], Optional[str]]:\n # To ensure that \"string\" columns can be encoded in gRPC, we need to\n # encode them as bytes.\n # We'll also keep track of whether the list should be treated in the\n # future as a list of strings.\n processed = []\n content_type: Optional[str] = StringCodec.ContentType\n for elem in data:\n converted = elem\n if not isinstance(elem, str):\n # There was a non-string element, so we can't determine a content\n # type\n content_type = None\n elif use_bytes:\n converted = encode_str(elem)\n\n processed.append(converted)\n\n return processed, content_type\n\n\n@register_request_codec\nclass PandasCodec(RequestCodec):\n \"\"\"\n Decodes a request (response) into a Pandas DataFrame, assuming each input\n (output) head corresponds to a column of the DataFrame.\n \"\"\"\n\n ContentType = \"pd\"\n TypeHint = pd.DataFrame\n\n @classmethod\n def can_encode(cls, payload: Any) -> bool:\n return isinstance(payload, pd.DataFrame)\n\n @classmethod\n def encode_response(\n cls,\n model_name: str,\n payload: pd.DataFrame,\n model_version: Optional[str] = None,\n use_bytes: bool = True,\n **kwargs\n ) -> InferenceResponse:\n outputs = cls.encode_outputs(payload, use_bytes=use_bytes)\n\n return InferenceResponse(\n model_name=model_name,\n model_version=model_version,\n parameters=Parameters(content_type=cls.ContentType),\n outputs=outputs,\n )\n\n @classmethod\n def decode_response(cls, response: InferenceResponse) -> pd.DataFrame:\n data = {\n response_output.name: _to_series(response_output)\n for response_output in response.outputs\n }\n\n return pd.DataFrame(data)\n\n @classmethod\n def encode_outputs(\n cls, payload: pd.DataFrame, use_bytes: bool = True\n ) -> List[ResponseOutput]:\n return [\n _to_response_output(payload[col], use_bytes=use_bytes) for col in payload\n ]\n\n @classmethod\n def encode_request(\n cls, payload: pd.DataFrame, use_bytes: bool = True, **kwargs\n ) -> InferenceRequest:\n outputs = cls.encode_outputs(payload, use_bytes=use_bytes)\n\n return InferenceRequest(\n parameters=Parameters(content_type=cls.ContentType),\n inputs=[\n RequestInput(\n name=output.name,\n datatype=output.datatype,\n shape=output.shape,\n data=output.data,\n parameters=output.parameters,\n )\n for output in outputs\n ],\n )\n\n @classmethod\n def decode_request(cls, request: InferenceRequest) -> pd.DataFrame:\n data = {\n request_input.name: _to_series(request_input)\n for request_input in request.inputs\n }\n\n return pd.DataFrame(data)\n", "path": "mlserver/codecs/pandas.py"}]}
3,636
461
gh_patches_debug_28083
rasdani/github-patches
git_diff
MycroftAI__mycroft-core-1546
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Initial load of skills fails if auto_update is true. On a new clone of the dev tree on an Ubuntu 16.04 machine **that's never run mycroft before.** I ran _msm/msm default_ and it downloaded 27 skills, but mycroft wouldn't load any skills except _pairing_. I looked at the skills loading code and saw it wasn't getting past this: ``` if (exists(SKILLS_DIR) and (self.next_download or not update)): ``` After I changed my config to _auto_update: false_, they loaded. </issue> <code> [start of mycroft/util/__init__.py] 1 # Copyright 2017 Mycroft AI Inc. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 # 15 from __future__ import absolute_import 16 import socket 17 import subprocess 18 from threading import Thread 19 from time import sleep 20 21 import json 22 import os.path 23 import psutil 24 from stat import S_ISREG, ST_MTIME, ST_MODE, ST_SIZE 25 26 import signal as sig 27 28 import mycroft.audio 29 import mycroft.configuration 30 from mycroft.util.format import nice_number 31 # Officially exported methods from this file: 32 # play_wav, play_mp3, get_cache_directory, 33 # resolve_resource_file, wait_while_speaking 34 from mycroft.util.log import LOG 35 from mycroft.util.parse import extract_datetime, extractnumber, normalize 36 from mycroft.util.signal import * 37 38 39 def resolve_resource_file(res_name): 40 """Convert a resource into an absolute filename. 41 42 Resource names are in the form: 'filename.ext' 43 or 'path/filename.ext' 44 45 The system wil look for ~/.mycroft/res_name first, and 46 if not found will look at /opt/mycroft/res_name, 47 then finally it will look for res_name in the 'mycroft/res' 48 folder of the source code package. 49 50 Example: 51 With mycroft running as the user 'bob', if you called 52 resolve_resource_file('snd/beep.wav') 53 it would return either '/home/bob/.mycroft/snd/beep.wav' or 54 '/opt/mycroft/snd/beep.wav' or '.../mycroft/res/snd/beep.wav', 55 where the '...' is replaced by the path where the package has 56 been installed. 57 58 Args: 59 res_name (str): a resource path/name 60 """ 61 62 # First look for fully qualified file (e.g. a user setting) 63 if os.path.isfile(res_name): 64 return res_name 65 66 # Now look for ~/.mycroft/res_name (in user folder) 67 filename = os.path.expanduser("~/.mycroft/" + res_name) 68 if os.path.isfile(filename): 69 return filename 70 71 # Next look for /opt/mycroft/res/res_name 72 filename = os.path.expanduser("/opt/mycroft/" + res_name) 73 if os.path.isfile(filename): 74 return filename 75 76 # Finally look for it in the source package 77 filename = os.path.join(os.path.dirname(__file__), '..', 'res', res_name) 78 filename = os.path.abspath(os.path.normpath(filename)) 79 if os.path.isfile(filename): 80 return filename 81 82 return None # Resource cannot be resolved 83 84 85 def play_wav(uri): 86 config = mycroft.configuration.Configuration.get() 87 play_cmd = config.get("play_wav_cmdline") 88 play_wav_cmd = str(play_cmd).split(" ") 89 for index, cmd in enumerate(play_wav_cmd): 90 if cmd == "%1": 91 play_wav_cmd[index] = (get_http(uri)) 92 return subprocess.Popen(play_wav_cmd) 93 94 95 def play_mp3(uri): 96 config = mycroft.configuration.Configuration.get() 97 play_cmd = config.get("play_mp3_cmdline") 98 play_mp3_cmd = str(play_cmd).split(" ") 99 for index, cmd in enumerate(play_mp3_cmd): 100 if cmd == "%1": 101 play_mp3_cmd[index] = (get_http(uri)) 102 return subprocess.Popen(play_mp3_cmd) 103 104 105 def record(file_path, duration, rate, channels): 106 if duration > 0: 107 return subprocess.Popen( 108 ["arecord", "-r", str(rate), "-c", str(channels), "-d", 109 str(duration), file_path]) 110 else: 111 return subprocess.Popen( 112 ["arecord", "-r", str(rate), "-c", str(channels), file_path]) 113 114 115 def get_http(uri): 116 return uri.replace("https://", "http://") 117 118 119 def remove_last_slash(url): 120 if url and url.endswith('/'): 121 url = url[:-1] 122 return url 123 124 125 def read_stripped_lines(filename): 126 with open(filename, 'r') as f: 127 return [line.strip() for line in f] 128 129 130 def read_dict(filename, div='='): 131 d = {} 132 with open(filename, 'r') as f: 133 for line in f: 134 (key, val) = line.split(div) 135 d[key.strip()] = val.strip() 136 return d 137 138 139 def connected(host="8.8.8.8", port=53, timeout=3): 140 """ 141 Thanks to 7h3rAm on 142 Host: 8.8.8.8 (google-public-dns-a.google.com) 143 OpenPort: 53/tcp 144 Service: domain (DNS/TCP) 145 146 NOTE: 147 This is no longer in use by this version 148 New method checks for a connection using ConnectionError only when 149 a question is asked 150 """ 151 try: 152 socket.setdefaulttimeout(timeout) 153 socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((host, port)) 154 return True 155 except IOError: 156 try: 157 socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect( 158 ("8.8.4.4", port)) 159 return True 160 except IOError: 161 return False 162 163 164 def curate_cache(directory, min_free_percent=5.0, min_free_disk=50): 165 """Clear out the directory if needed 166 167 This assumes all the files in the directory can be deleted as freely 168 169 Args: 170 directory (str): directory path that holds cached files 171 min_free_percent (float): percentage (0.0-100.0) of drive to keep free, 172 default is 5% if not specified. 173 min_free_disk (float): minimum allowed disk space in MB, default 174 value is 50 MB if not specified. 175 """ 176 177 # Simpleminded implementation -- keep a certain percentage of the 178 # disk available. 179 # TODO: Would be easy to add more options, like whitelisted files, etc. 180 space = psutil.disk_usage(directory) 181 182 # convert from MB to bytes 183 min_free_disk *= 1024 * 1024 184 # space.percent = space.used/space.total*100.0 185 percent_free = 100.0 - space.percent 186 if percent_free < min_free_percent and space.free < min_free_disk: 187 LOG.info('Low diskspace detected, cleaning cache') 188 # calculate how many bytes we need to delete 189 bytes_needed = (min_free_percent - percent_free) / 100.0 * space.total 190 bytes_needed = int(bytes_needed + 1.0) 191 192 # get all entries in the directory w/ stats 193 entries = (os.path.join(directory, fn) for fn in os.listdir(directory)) 194 entries = ((os.stat(path), path) for path in entries) 195 196 # leave only regular files, insert modification date 197 entries = ((stat[ST_MTIME], stat[ST_SIZE], path) 198 for stat, path in entries if S_ISREG(stat[ST_MODE])) 199 200 # delete files with oldest modification date until space is freed 201 space_freed = 0 202 for moddate, fsize, path in sorted(entries): 203 try: 204 os.remove(path) 205 space_freed += fsize 206 except: 207 pass 208 209 if space_freed > bytes_needed: 210 return # deleted enough! 211 212 213 def get_cache_directory(domain=None): 214 """Get a directory for caching data 215 216 This directory can be used to hold temporary caches of data to 217 speed up performance. This directory will likely be part of a 218 small RAM disk and may be cleared at any time. So code that 219 uses these cached files must be able to fallback and regenerate 220 the file. 221 222 Args: 223 domain (str): The cache domain. Basically just a subdirectory. 224 225 Return: 226 str: a path to the directory where you can cache data 227 """ 228 config = mycroft.configuration.Configuration.get() 229 dir = config.get("cache_path") 230 if not dir: 231 # If not defined, use /tmp/mycroft/cache 232 dir = os.path.join(tempfile.gettempdir(), "mycroft", "cache") 233 return ensure_directory_exists(dir, domain) 234 235 236 def validate_param(value, name): 237 if not value: 238 raise ValueError("Missing or empty %s in mycroft.conf " % name) 239 240 241 def is_speaking(): 242 """Determine if Text to Speech is occurring 243 244 Returns: 245 bool: True while still speaking 246 """ 247 LOG.info("mycroft.utils.is_speaking() is depreciated, use " 248 "mycroft.audio.is_speaking() instead.") 249 return mycroft.audio.is_speaking() 250 251 252 def wait_while_speaking(): 253 """Pause as long as Text to Speech is still happening 254 255 Pause while Text to Speech is still happening. This always pauses 256 briefly to ensure that any preceeding request to speak has time to 257 begin. 258 """ 259 LOG.info("mycroft.utils.wait_while_speaking() is depreciated, use " 260 "mycroft.audio.wait_while_speaking() instead.") 261 return mycroft.audio.wait_while_speaking() 262 263 264 def stop_speaking(): 265 # TODO: Less hacky approach to this once Audio Manager is implemented 266 # Skills should only be able to stop speech they've initiated 267 LOG.info("mycroft.utils.stop_speaking() is depreciated, use " 268 "mycroft.audio.stop_speaking() instead.") 269 mycroft.audio.stop_speaking() 270 271 272 def get_arch(): 273 """ Get architecture string of system. """ 274 return os.uname()[4] 275 276 277 def reset_sigint_handler(): 278 """ 279 Reset the sigint handler to the default. This fixes KeyboardInterrupt 280 not getting raised when started via start-mycroft.sh 281 """ 282 sig.signal(sig.SIGINT, sig.default_int_handler) 283 284 285 def create_daemon(target, args=(), kwargs=None): 286 """Helper to quickly create and start a thread with daemon = True""" 287 t = Thread(target=target, args=args, kwargs=kwargs) 288 t.daemon = True 289 t.start() 290 return t 291 292 293 def wait_for_exit_signal(): 294 """Blocks until KeyboardInterrupt is received""" 295 try: 296 while True: 297 sleep(100) 298 except KeyboardInterrupt: 299 pass 300 301 302 def create_echo_function(name, whitelist=None): 303 from mycroft.configuration import Configuration 304 blacklist = Configuration.get().get("ignore_logs") 305 306 def echo(message): 307 """Listen for messages and echo them for logging""" 308 try: 309 js_msg = json.loads(message) 310 311 if whitelist and js_msg.get("type") not in whitelist: 312 return 313 314 if blacklist and js_msg.get("type") in blacklist: 315 return 316 317 if js_msg.get("type") == "registration": 318 # do not log tokens from registration messages 319 js_msg["data"]["token"] = None 320 message = json.dumps(js_msg) 321 except Exception: 322 pass 323 LOG(name).debug(message) 324 return echo 325 [end of mycroft/util/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mycroft/util/__init__.py b/mycroft/util/__init__.py --- a/mycroft/util/__init__.py +++ b/mycroft/util/__init__.py @@ -22,6 +22,7 @@ import os.path import psutil from stat import S_ISREG, ST_MTIME, ST_MODE, ST_SIZE +import requests import signal as sig @@ -136,18 +137,41 @@ return d -def connected(host="8.8.8.8", port=53, timeout=3): +def connected(): + """ Check connection by connecting to 8.8.8.8, if this is + blocked/fails, Microsoft NCSI is used as a backup + + Returns: + True if internet connection can be detected + """ + return connected_dns() or connected_ncsi() + + +def connected_ncsi(): + """ Check internet connection by retrieving the Microsoft NCSI endpoint. + + Returns: + True if internet connection can be detected """ - Thanks to 7h3rAm on - Host: 8.8.8.8 (google-public-dns-a.google.com) - OpenPort: 53/tcp - Service: domain (DNS/TCP) - - NOTE: - This is no longer in use by this version - New method checks for a connection using ConnectionError only when - a question is asked + try: + r = requests.get('http://www.msftncsi.com/ncsi.txt') + if r.text == u'Microsoft NCSI': + return True + except Exception: + pass + return False + + +def connected_dns(host="8.8.8.8", port=53, timeout=3): + """ Check internet connection by connecting to DNS servers + + Returns: + True if internet connection can be detected """ + # Thanks to 7h3rAm on + # Host: 8.8.8.8 (google-public-dns-a.google.com) + # OpenPort: 53/tcp + # Service: domain (DNS/TCP) try: socket.setdefaulttimeout(timeout) socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((host, port))
{"golden_diff": "diff --git a/mycroft/util/__init__.py b/mycroft/util/__init__.py\n--- a/mycroft/util/__init__.py\n+++ b/mycroft/util/__init__.py\n@@ -22,6 +22,7 @@\n import os.path\n import psutil\n from stat import S_ISREG, ST_MTIME, ST_MODE, ST_SIZE\n+import requests\n \n import signal as sig\n \n@@ -136,18 +137,41 @@\n return d\n \n \n-def connected(host=\"8.8.8.8\", port=53, timeout=3):\n+def connected():\n+ \"\"\" Check connection by connecting to 8.8.8.8, if this is\n+ blocked/fails, Microsoft NCSI is used as a backup\n+\n+ Returns:\n+ True if internet connection can be detected\n+ \"\"\"\n+ return connected_dns() or connected_ncsi()\n+\n+\n+def connected_ncsi():\n+ \"\"\" Check internet connection by retrieving the Microsoft NCSI endpoint.\n+\n+ Returns:\n+ True if internet connection can be detected\n \"\"\"\n- Thanks to 7h3rAm on\n- Host: 8.8.8.8 (google-public-dns-a.google.com)\n- OpenPort: 53/tcp\n- Service: domain (DNS/TCP)\n-\n- NOTE:\n- This is no longer in use by this version\n- New method checks for a connection using ConnectionError only when\n- a question is asked\n+ try:\n+ r = requests.get('http://www.msftncsi.com/ncsi.txt')\n+ if r.text == u'Microsoft NCSI':\n+ return True\n+ except Exception:\n+ pass\n+ return False\n+\n+\n+def connected_dns(host=\"8.8.8.8\", port=53, timeout=3):\n+ \"\"\" Check internet connection by connecting to DNS servers\n+\n+ Returns:\n+ True if internet connection can be detected\n \"\"\"\n+ # Thanks to 7h3rAm on\n+ # Host: 8.8.8.8 (google-public-dns-a.google.com)\n+ # OpenPort: 53/tcp\n+ # Service: domain (DNS/TCP)\n try:\n socket.setdefaulttimeout(timeout)\n socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((host, port))\n", "issue": "Initial load of skills fails if auto_update is true.\nOn a new clone of the dev tree on an Ubuntu 16.04 machine **that's never run mycroft before.**\r\n\r\nI ran _msm/msm default_ and it downloaded 27 skills, but mycroft wouldn't load any skills except _pairing_. I looked at the skills loading code and saw it wasn't getting past this:\r\n\r\n```\r\n if (exists(SKILLS_DIR) and\r\n (self.next_download or not update)):\r\n```\r\n\r\nAfter I changed my config to _auto_update: false_, they loaded.\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright 2017 Mycroft AI Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom __future__ import absolute_import\nimport socket\nimport subprocess\nfrom threading import Thread\nfrom time import sleep\n\nimport json\nimport os.path\nimport psutil\nfrom stat import S_ISREG, ST_MTIME, ST_MODE, ST_SIZE\n\nimport signal as sig\n\nimport mycroft.audio\nimport mycroft.configuration\nfrom mycroft.util.format import nice_number\n# Officially exported methods from this file:\n# play_wav, play_mp3, get_cache_directory,\n# resolve_resource_file, wait_while_speaking\nfrom mycroft.util.log import LOG\nfrom mycroft.util.parse import extract_datetime, extractnumber, normalize\nfrom mycroft.util.signal import *\n\n\ndef resolve_resource_file(res_name):\n \"\"\"Convert a resource into an absolute filename.\n\n Resource names are in the form: 'filename.ext'\n or 'path/filename.ext'\n\n The system wil look for ~/.mycroft/res_name first, and\n if not found will look at /opt/mycroft/res_name,\n then finally it will look for res_name in the 'mycroft/res'\n folder of the source code package.\n\n Example:\n With mycroft running as the user 'bob', if you called\n resolve_resource_file('snd/beep.wav')\n it would return either '/home/bob/.mycroft/snd/beep.wav' or\n '/opt/mycroft/snd/beep.wav' or '.../mycroft/res/snd/beep.wav',\n where the '...' is replaced by the path where the package has\n been installed.\n\n Args:\n res_name (str): a resource path/name\n \"\"\"\n\n # First look for fully qualified file (e.g. a user setting)\n if os.path.isfile(res_name):\n return res_name\n\n # Now look for ~/.mycroft/res_name (in user folder)\n filename = os.path.expanduser(\"~/.mycroft/\" + res_name)\n if os.path.isfile(filename):\n return filename\n\n # Next look for /opt/mycroft/res/res_name\n filename = os.path.expanduser(\"/opt/mycroft/\" + res_name)\n if os.path.isfile(filename):\n return filename\n\n # Finally look for it in the source package\n filename = os.path.join(os.path.dirname(__file__), '..', 'res', res_name)\n filename = os.path.abspath(os.path.normpath(filename))\n if os.path.isfile(filename):\n return filename\n\n return None # Resource cannot be resolved\n\n\ndef play_wav(uri):\n config = mycroft.configuration.Configuration.get()\n play_cmd = config.get(\"play_wav_cmdline\")\n play_wav_cmd = str(play_cmd).split(\" \")\n for index, cmd in enumerate(play_wav_cmd):\n if cmd == \"%1\":\n play_wav_cmd[index] = (get_http(uri))\n return subprocess.Popen(play_wav_cmd)\n\n\ndef play_mp3(uri):\n config = mycroft.configuration.Configuration.get()\n play_cmd = config.get(\"play_mp3_cmdline\")\n play_mp3_cmd = str(play_cmd).split(\" \")\n for index, cmd in enumerate(play_mp3_cmd):\n if cmd == \"%1\":\n play_mp3_cmd[index] = (get_http(uri))\n return subprocess.Popen(play_mp3_cmd)\n\n\ndef record(file_path, duration, rate, channels):\n if duration > 0:\n return subprocess.Popen(\n [\"arecord\", \"-r\", str(rate), \"-c\", str(channels), \"-d\",\n str(duration), file_path])\n else:\n return subprocess.Popen(\n [\"arecord\", \"-r\", str(rate), \"-c\", str(channels), file_path])\n\n\ndef get_http(uri):\n return uri.replace(\"https://\", \"http://\")\n\n\ndef remove_last_slash(url):\n if url and url.endswith('/'):\n url = url[:-1]\n return url\n\n\ndef read_stripped_lines(filename):\n with open(filename, 'r') as f:\n return [line.strip() for line in f]\n\n\ndef read_dict(filename, div='='):\n d = {}\n with open(filename, 'r') as f:\n for line in f:\n (key, val) = line.split(div)\n d[key.strip()] = val.strip()\n return d\n\n\ndef connected(host=\"8.8.8.8\", port=53, timeout=3):\n \"\"\"\n Thanks to 7h3rAm on\n Host: 8.8.8.8 (google-public-dns-a.google.com)\n OpenPort: 53/tcp\n Service: domain (DNS/TCP)\n\n NOTE:\n This is no longer in use by this version\n New method checks for a connection using ConnectionError only when\n a question is asked\n \"\"\"\n try:\n socket.setdefaulttimeout(timeout)\n socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((host, port))\n return True\n except IOError:\n try:\n socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect(\n (\"8.8.4.4\", port))\n return True\n except IOError:\n return False\n\n\ndef curate_cache(directory, min_free_percent=5.0, min_free_disk=50):\n \"\"\"Clear out the directory if needed\n\n This assumes all the files in the directory can be deleted as freely\n\n Args:\n directory (str): directory path that holds cached files\n min_free_percent (float): percentage (0.0-100.0) of drive to keep free,\n default is 5% if not specified.\n min_free_disk (float): minimum allowed disk space in MB, default\n value is 50 MB if not specified.\n \"\"\"\n\n # Simpleminded implementation -- keep a certain percentage of the\n # disk available.\n # TODO: Would be easy to add more options, like whitelisted files, etc.\n space = psutil.disk_usage(directory)\n\n # convert from MB to bytes\n min_free_disk *= 1024 * 1024\n # space.percent = space.used/space.total*100.0\n percent_free = 100.0 - space.percent\n if percent_free < min_free_percent and space.free < min_free_disk:\n LOG.info('Low diskspace detected, cleaning cache')\n # calculate how many bytes we need to delete\n bytes_needed = (min_free_percent - percent_free) / 100.0 * space.total\n bytes_needed = int(bytes_needed + 1.0)\n\n # get all entries in the directory w/ stats\n entries = (os.path.join(directory, fn) for fn in os.listdir(directory))\n entries = ((os.stat(path), path) for path in entries)\n\n # leave only regular files, insert modification date\n entries = ((stat[ST_MTIME], stat[ST_SIZE], path)\n for stat, path in entries if S_ISREG(stat[ST_MODE]))\n\n # delete files with oldest modification date until space is freed\n space_freed = 0\n for moddate, fsize, path in sorted(entries):\n try:\n os.remove(path)\n space_freed += fsize\n except:\n pass\n\n if space_freed > bytes_needed:\n return # deleted enough!\n\n\ndef get_cache_directory(domain=None):\n \"\"\"Get a directory for caching data\n\n This directory can be used to hold temporary caches of data to\n speed up performance. This directory will likely be part of a\n small RAM disk and may be cleared at any time. So code that\n uses these cached files must be able to fallback and regenerate\n the file.\n\n Args:\n domain (str): The cache domain. Basically just a subdirectory.\n\n Return:\n str: a path to the directory where you can cache data\n \"\"\"\n config = mycroft.configuration.Configuration.get()\n dir = config.get(\"cache_path\")\n if not dir:\n # If not defined, use /tmp/mycroft/cache\n dir = os.path.join(tempfile.gettempdir(), \"mycroft\", \"cache\")\n return ensure_directory_exists(dir, domain)\n\n\ndef validate_param(value, name):\n if not value:\n raise ValueError(\"Missing or empty %s in mycroft.conf \" % name)\n\n\ndef is_speaking():\n \"\"\"Determine if Text to Speech is occurring\n\n Returns:\n bool: True while still speaking\n \"\"\"\n LOG.info(\"mycroft.utils.is_speaking() is depreciated, use \"\n \"mycroft.audio.is_speaking() instead.\")\n return mycroft.audio.is_speaking()\n\n\ndef wait_while_speaking():\n \"\"\"Pause as long as Text to Speech is still happening\n\n Pause while Text to Speech is still happening. This always pauses\n briefly to ensure that any preceeding request to speak has time to\n begin.\n \"\"\"\n LOG.info(\"mycroft.utils.wait_while_speaking() is depreciated, use \"\n \"mycroft.audio.wait_while_speaking() instead.\")\n return mycroft.audio.wait_while_speaking()\n\n\ndef stop_speaking():\n # TODO: Less hacky approach to this once Audio Manager is implemented\n # Skills should only be able to stop speech they've initiated\n LOG.info(\"mycroft.utils.stop_speaking() is depreciated, use \"\n \"mycroft.audio.stop_speaking() instead.\")\n mycroft.audio.stop_speaking()\n\n\ndef get_arch():\n \"\"\" Get architecture string of system. \"\"\"\n return os.uname()[4]\n\n\ndef reset_sigint_handler():\n \"\"\"\n Reset the sigint handler to the default. This fixes KeyboardInterrupt\n not getting raised when started via start-mycroft.sh\n \"\"\"\n sig.signal(sig.SIGINT, sig.default_int_handler)\n\n\ndef create_daemon(target, args=(), kwargs=None):\n \"\"\"Helper to quickly create and start a thread with daemon = True\"\"\"\n t = Thread(target=target, args=args, kwargs=kwargs)\n t.daemon = True\n t.start()\n return t\n\n\ndef wait_for_exit_signal():\n \"\"\"Blocks until KeyboardInterrupt is received\"\"\"\n try:\n while True:\n sleep(100)\n except KeyboardInterrupt:\n pass\n\n\ndef create_echo_function(name, whitelist=None):\n from mycroft.configuration import Configuration\n blacklist = Configuration.get().get(\"ignore_logs\")\n\n def echo(message):\n \"\"\"Listen for messages and echo them for logging\"\"\"\n try:\n js_msg = json.loads(message)\n\n if whitelist and js_msg.get(\"type\") not in whitelist:\n return\n\n if blacklist and js_msg.get(\"type\") in blacklist:\n return\n\n if js_msg.get(\"type\") == \"registration\":\n # do not log tokens from registration messages\n js_msg[\"data\"][\"token\"] = None\n message = json.dumps(js_msg)\n except Exception:\n pass\n LOG(name).debug(message)\n return echo\n", "path": "mycroft/util/__init__.py"}]}
4,024
519
gh_patches_debug_27651
rasdani/github-patches
git_diff
pypa__setuptools-1921
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Handling of find_links in setup.cfg in 42.0.0 Hi, I have a couple of `setup_requires` dependencies in my setup.py and my setup.cfg looks like this: ``` [easy_install] find_links = https://example.com ``` Looking at: https://github.com/pypa/setuptools/blob/e84f616a6507ec9115fad68b221cbf5333d9d2d9/setuptools/installer.py#L119 The `cmd` ends up being: ```python ['python', '-m', 'pip', '--disable-pip-version-check', 'wheel', '--no-deps', '-w', '/tmp/tmpWyNzjZ', '--quiet', '--find-links', 'h', '--find-links', 't', '--find-links', 't', '--find-links', 'p', '--find-links', 's', '--find-links', ':', '--find-links', '/', '--find-links', '/', '--find-links', 'e', '--find-links', 'x', '--find-links', 'a', '--find-links', 'm', '--find-links', 'p', '--find-links', 'l', '--find-links', 'e', '--find-links', '.', '--find-links', 'c', '--find-links', 'o', '--find-links', 'm', 'babel; extra == "i18n"'] ``` It seems the assumption was that `find_links` is a list but it ends up being a string. Additionally, since pip is unable to build/fetch a wheel, the following line throws an IndexError: https://github.com/pypa/setuptools/blob/e84f616a6507ec9115fad68b221cbf5333d9d2d9/setuptools/installer.py#L122 Perhaps this could be caught and a more user-friendly error displayed instead. </issue> <code> [start of setuptools/installer.py] 1 import glob 2 import os 3 import subprocess 4 import sys 5 from distutils import log 6 from distutils.errors import DistutilsError 7 8 import pkg_resources 9 from setuptools.command.easy_install import easy_install 10 from setuptools.wheel import Wheel 11 12 from .py31compat import TemporaryDirectory 13 14 15 def _legacy_fetch_build_egg(dist, req): 16 """Fetch an egg needed for building. 17 18 Legacy path using EasyInstall. 19 """ 20 tmp_dist = dist.__class__({'script_args': ['easy_install']}) 21 opts = tmp_dist.get_option_dict('easy_install') 22 opts.clear() 23 opts.update( 24 (k, v) 25 for k, v in dist.get_option_dict('easy_install').items() 26 if k in ( 27 # don't use any other settings 28 'find_links', 'site_dirs', 'index_url', 29 'optimize', 'site_dirs', 'allow_hosts', 30 )) 31 if dist.dependency_links: 32 links = dist.dependency_links[:] 33 if 'find_links' in opts: 34 links = opts['find_links'][1] + links 35 opts['find_links'] = ('setup', links) 36 install_dir = dist.get_egg_cache_dir() 37 cmd = easy_install( 38 tmp_dist, args=["x"], install_dir=install_dir, 39 exclude_scripts=True, 40 always_copy=False, build_directory=None, editable=False, 41 upgrade=False, multi_version=True, no_report=True, user=False 42 ) 43 cmd.ensure_finalized() 44 return cmd.easy_install(req) 45 46 47 def fetch_build_egg(dist, req): 48 """Fetch an egg needed for building. 49 50 Use pip/wheel to fetch/build a wheel.""" 51 # Check pip is available. 52 try: 53 pkg_resources.get_distribution('pip') 54 except pkg_resources.DistributionNotFound: 55 dist.announce( 56 'WARNING: The pip package is not available, falling back ' 57 'to EasyInstall for handling setup_requires/test_requires; ' 58 'this is deprecated and will be removed in a future version.' 59 , log.WARN 60 ) 61 return _legacy_fetch_build_egg(dist, req) 62 # Warn if wheel is not. 63 try: 64 pkg_resources.get_distribution('wheel') 65 except pkg_resources.DistributionNotFound: 66 dist.announce('WARNING: The wheel package is not available.', log.WARN) 67 # Ignore environment markers; if supplied, it is required. 68 req = strip_marker(req) 69 # Take easy_install options into account, but do not override relevant 70 # pip environment variables (like PIP_INDEX_URL or PIP_QUIET); they'll 71 # take precedence. 72 opts = dist.get_option_dict('easy_install') 73 if 'allow_hosts' in opts: 74 raise DistutilsError('the `allow-hosts` option is not supported ' 75 'when using pip to install requirements.') 76 if 'PIP_QUIET' in os.environ or 'PIP_VERBOSE' in os.environ: 77 quiet = False 78 else: 79 quiet = True 80 if 'PIP_INDEX_URL' in os.environ: 81 index_url = None 82 elif 'index_url' in opts: 83 index_url = opts['index_url'][1] 84 else: 85 index_url = None 86 if 'find_links' in opts: 87 find_links = opts['find_links'][1][:] 88 else: 89 find_links = [] 90 if dist.dependency_links: 91 find_links.extend(dist.dependency_links) 92 eggs_dir = os.path.realpath(dist.get_egg_cache_dir()) 93 environment = pkg_resources.Environment() 94 for egg_dist in pkg_resources.find_distributions(eggs_dir): 95 if egg_dist in req and environment.can_add(egg_dist): 96 return egg_dist 97 with TemporaryDirectory() as tmpdir: 98 cmd = [ 99 sys.executable, '-m', 'pip', 100 '--disable-pip-version-check', 101 'wheel', '--no-deps', 102 '-w', tmpdir, 103 ] 104 if quiet: 105 cmd.append('--quiet') 106 if index_url is not None: 107 cmd.extend(('--index-url', index_url)) 108 if find_links is not None: 109 for link in find_links: 110 cmd.extend(('--find-links', link)) 111 # If requirement is a PEP 508 direct URL, directly pass 112 # the URL to pip, as `req @ url` does not work on the 113 # command line. 114 if req.url: 115 cmd.append(req.url) 116 else: 117 cmd.append(str(req)) 118 try: 119 subprocess.check_call(cmd) 120 except subprocess.CalledProcessError as e: 121 raise DistutilsError(str(e)) 122 wheel = Wheel(glob.glob(os.path.join(tmpdir, '*.whl'))[0]) 123 dist_location = os.path.join(eggs_dir, wheel.egg_name()) 124 wheel.install_as_egg(dist_location) 125 dist_metadata = pkg_resources.PathMetadata( 126 dist_location, os.path.join(dist_location, 'EGG-INFO')) 127 dist = pkg_resources.Distribution.from_filename( 128 dist_location, metadata=dist_metadata) 129 return dist 130 131 132 def strip_marker(req): 133 """ 134 Return a new requirement without the environment marker to avoid 135 calling pip with something like `babel; extra == "i18n"`, which 136 would always be ignored. 137 """ 138 # create a copy to avoid mutating the input 139 req = pkg_resources.Requirement.parse(str(req)) 140 req.marker = None 141 return req 142 [end of setuptools/installer.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setuptools/installer.py b/setuptools/installer.py --- a/setuptools/installer.py +++ b/setuptools/installer.py @@ -7,11 +7,20 @@ import pkg_resources from setuptools.command.easy_install import easy_install +from setuptools.extern import six from setuptools.wheel import Wheel from .py31compat import TemporaryDirectory +def _fixup_find_links(find_links): + """Ensure find-links option end-up being a list of strings.""" + if isinstance(find_links, six.string_types): + return find_links.split() + assert isinstance(find_links, (tuple, list)) + return find_links + + def _legacy_fetch_build_egg(dist, req): """Fetch an egg needed for building. @@ -31,7 +40,7 @@ if dist.dependency_links: links = dist.dependency_links[:] if 'find_links' in opts: - links = opts['find_links'][1] + links + links = _fixup_find_links(opts['find_links'][1]) + links opts['find_links'] = ('setup', links) install_dir = dist.get_egg_cache_dir() cmd = easy_install( @@ -84,7 +93,7 @@ else: index_url = None if 'find_links' in opts: - find_links = opts['find_links'][1][:] + find_links = _fixup_find_links(opts['find_links'][1])[:] else: find_links = [] if dist.dependency_links:
{"golden_diff": "diff --git a/setuptools/installer.py b/setuptools/installer.py\n--- a/setuptools/installer.py\n+++ b/setuptools/installer.py\n@@ -7,11 +7,20 @@\n \n import pkg_resources\n from setuptools.command.easy_install import easy_install\n+from setuptools.extern import six\n from setuptools.wheel import Wheel\n \n from .py31compat import TemporaryDirectory\n \n \n+def _fixup_find_links(find_links):\n+ \"\"\"Ensure find-links option end-up being a list of strings.\"\"\"\n+ if isinstance(find_links, six.string_types):\n+ return find_links.split()\n+ assert isinstance(find_links, (tuple, list))\n+ return find_links\n+\n+\n def _legacy_fetch_build_egg(dist, req):\n \"\"\"Fetch an egg needed for building.\n \n@@ -31,7 +40,7 @@\n if dist.dependency_links:\n links = dist.dependency_links[:]\n if 'find_links' in opts:\n- links = opts['find_links'][1] + links\n+ links = _fixup_find_links(opts['find_links'][1]) + links\n opts['find_links'] = ('setup', links)\n install_dir = dist.get_egg_cache_dir()\n cmd = easy_install(\n@@ -84,7 +93,7 @@\n else:\n index_url = None\n if 'find_links' in opts:\n- find_links = opts['find_links'][1][:]\n+ find_links = _fixup_find_links(opts['find_links'][1])[:]\n else:\n find_links = []\n if dist.dependency_links:\n", "issue": "Handling of find_links in setup.cfg in 42.0.0\nHi,\r\n\r\nI have a couple of `setup_requires` dependencies in my setup.py and my setup.cfg looks like this:\r\n\r\n```\r\n[easy_install]\r\nfind_links = https://example.com\r\n```\r\n\r\nLooking at: https://github.com/pypa/setuptools/blob/e84f616a6507ec9115fad68b221cbf5333d9d2d9/setuptools/installer.py#L119\r\n\r\nThe `cmd` ends up being:\r\n\r\n```python\r\n['python', '-m', 'pip', '--disable-pip-version-check', 'wheel', '--no-deps', '-w', '/tmp/tmpWyNzjZ', '--quiet', '--find-links', 'h', '--find-links', 't', '--find-links', 't', '--find-links', 'p', '--find-links', 's', '--find-links', ':', '--find-links', '/', '--find-links', '/', '--find-links', 'e', '--find-links', 'x', '--find-links', 'a', '--find-links', 'm', '--find-links', 'p', '--find-links', 'l', '--find-links', 'e', '--find-links', '.', '--find-links', 'c', '--find-links', 'o', '--find-links', 'm', 'babel; extra == \"i18n\"']\r\n```\r\nIt seems the assumption was that `find_links` is a list but it ends up being a string.\r\n\r\nAdditionally, since pip is unable to build/fetch a wheel, the following line throws an IndexError:\r\nhttps://github.com/pypa/setuptools/blob/e84f616a6507ec9115fad68b221cbf5333d9d2d9/setuptools/installer.py#L122\r\n\r\nPerhaps this could be caught and a more user-friendly error displayed instead.\r\n\n", "before_files": [{"content": "import glob\nimport os\nimport subprocess\nimport sys\nfrom distutils import log\nfrom distutils.errors import DistutilsError\n\nimport pkg_resources\nfrom setuptools.command.easy_install import easy_install\nfrom setuptools.wheel import Wheel\n\nfrom .py31compat import TemporaryDirectory\n\n\ndef _legacy_fetch_build_egg(dist, req):\n \"\"\"Fetch an egg needed for building.\n\n Legacy path using EasyInstall.\n \"\"\"\n tmp_dist = dist.__class__({'script_args': ['easy_install']})\n opts = tmp_dist.get_option_dict('easy_install')\n opts.clear()\n opts.update(\n (k, v)\n for k, v in dist.get_option_dict('easy_install').items()\n if k in (\n # don't use any other settings\n 'find_links', 'site_dirs', 'index_url',\n 'optimize', 'site_dirs', 'allow_hosts',\n ))\n if dist.dependency_links:\n links = dist.dependency_links[:]\n if 'find_links' in opts:\n links = opts['find_links'][1] + links\n opts['find_links'] = ('setup', links)\n install_dir = dist.get_egg_cache_dir()\n cmd = easy_install(\n tmp_dist, args=[\"x\"], install_dir=install_dir,\n exclude_scripts=True,\n always_copy=False, build_directory=None, editable=False,\n upgrade=False, multi_version=True, no_report=True, user=False\n )\n cmd.ensure_finalized()\n return cmd.easy_install(req)\n\n\ndef fetch_build_egg(dist, req):\n \"\"\"Fetch an egg needed for building.\n\n Use pip/wheel to fetch/build a wheel.\"\"\"\n # Check pip is available.\n try:\n pkg_resources.get_distribution('pip')\n except pkg_resources.DistributionNotFound:\n dist.announce(\n 'WARNING: The pip package is not available, falling back '\n 'to EasyInstall for handling setup_requires/test_requires; '\n 'this is deprecated and will be removed in a future version.'\n , log.WARN\n )\n return _legacy_fetch_build_egg(dist, req)\n # Warn if wheel is not.\n try:\n pkg_resources.get_distribution('wheel')\n except pkg_resources.DistributionNotFound:\n dist.announce('WARNING: The wheel package is not available.', log.WARN)\n # Ignore environment markers; if supplied, it is required.\n req = strip_marker(req)\n # Take easy_install options into account, but do not override relevant\n # pip environment variables (like PIP_INDEX_URL or PIP_QUIET); they'll\n # take precedence.\n opts = dist.get_option_dict('easy_install')\n if 'allow_hosts' in opts:\n raise DistutilsError('the `allow-hosts` option is not supported '\n 'when using pip to install requirements.')\n if 'PIP_QUIET' in os.environ or 'PIP_VERBOSE' in os.environ:\n quiet = False\n else:\n quiet = True\n if 'PIP_INDEX_URL' in os.environ:\n index_url = None\n elif 'index_url' in opts:\n index_url = opts['index_url'][1]\n else:\n index_url = None\n if 'find_links' in opts:\n find_links = opts['find_links'][1][:]\n else:\n find_links = []\n if dist.dependency_links:\n find_links.extend(dist.dependency_links)\n eggs_dir = os.path.realpath(dist.get_egg_cache_dir())\n environment = pkg_resources.Environment()\n for egg_dist in pkg_resources.find_distributions(eggs_dir):\n if egg_dist in req and environment.can_add(egg_dist):\n return egg_dist\n with TemporaryDirectory() as tmpdir:\n cmd = [\n sys.executable, '-m', 'pip',\n '--disable-pip-version-check',\n 'wheel', '--no-deps',\n '-w', tmpdir,\n ]\n if quiet:\n cmd.append('--quiet')\n if index_url is not None:\n cmd.extend(('--index-url', index_url))\n if find_links is not None:\n for link in find_links:\n cmd.extend(('--find-links', link))\n # If requirement is a PEP 508 direct URL, directly pass\n # the URL to pip, as `req @ url` does not work on the\n # command line.\n if req.url:\n cmd.append(req.url)\n else:\n cmd.append(str(req))\n try:\n subprocess.check_call(cmd)\n except subprocess.CalledProcessError as e:\n raise DistutilsError(str(e))\n wheel = Wheel(glob.glob(os.path.join(tmpdir, '*.whl'))[0])\n dist_location = os.path.join(eggs_dir, wheel.egg_name())\n wheel.install_as_egg(dist_location)\n dist_metadata = pkg_resources.PathMetadata(\n dist_location, os.path.join(dist_location, 'EGG-INFO'))\n dist = pkg_resources.Distribution.from_filename(\n dist_location, metadata=dist_metadata)\n return dist\n\n\ndef strip_marker(req):\n \"\"\"\n Return a new requirement without the environment marker to avoid\n calling pip with something like `babel; extra == \"i18n\"`, which\n would always be ignored.\n \"\"\"\n # create a copy to avoid mutating the input\n req = pkg_resources.Requirement.parse(str(req))\n req.marker = None\n return req\n", "path": "setuptools/installer.py"}]}
2,412
341
gh_patches_debug_43501
rasdani/github-patches
git_diff
goauthentik__authentik-7264
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Bug: Authenticator SMS Challenge response doesn't have _errors attribute **Describe the bug** It seems that something has changed in the regards to Authenticator SMS Challenge Response. **To Reproduce** Steps to reproduce the behavior: 1. Have an perfectly working SMS Authenicator stage prior to upgrading to 2023.8.3 2. Upgrade 3. Don't see any immediate errors. 4. See error when user tries to register using said flow. **Expected behavior** I would have expected it to not error out. **Logs** <details> <summary>Stacktrace from authentik</summary> ``` Traceback (most recent call last): File "/authentik/flows/views/executor.py", line 298, in get stage_response = self.current_stage_view.get(request, *args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/authentik/stages/authenticator_sms/stage.py", line 117, in get response._errors.setdefault("phone_number", []) ^^^^^^^^^^^^^^^^ builtins.AttributeError: 'AuthenticatorSMSChallengeResponse' object has no attribute '_errors' ``` </details> **Version and Deployment (please complete the following information):** - authentik version: 2023.8.3 - Deployment: Ansible with Docker </issue> <code> [start of authentik/stages/authenticator_sms/stage.py] 1 """SMS Setup stage""" 2 from typing import Optional 3 4 from django.db.models import Q 5 from django.http import HttpRequest, HttpResponse 6 from django.http.request import QueryDict 7 from django.utils.translation import gettext_lazy as _ 8 from rest_framework.exceptions import ValidationError 9 from rest_framework.fields import BooleanField, CharField, IntegerField 10 11 from authentik.flows.challenge import ( 12 Challenge, 13 ChallengeResponse, 14 ChallengeTypes, 15 ErrorDetailSerializer, 16 WithUserInfoChallenge, 17 ) 18 from authentik.flows.stage import ChallengeStageView 19 from authentik.stages.authenticator_sms.models import ( 20 AuthenticatorSMSStage, 21 SMSDevice, 22 hash_phone_number, 23 ) 24 from authentik.stages.prompt.stage import PLAN_CONTEXT_PROMPT 25 26 SESSION_KEY_SMS_DEVICE = "authentik/stages/authenticator_sms/sms_device" 27 28 29 class AuthenticatorSMSChallenge(WithUserInfoChallenge): 30 """SMS Setup challenge""" 31 32 # Set to true if no previous prompt stage set the phone number 33 # this stage will also check prompt_data.phone 34 phone_number_required = BooleanField(default=True) 35 component = CharField(default="ak-stage-authenticator-sms") 36 37 38 class AuthenticatorSMSChallengeResponse(ChallengeResponse): 39 """SMS Challenge response, device is set by get_response_instance""" 40 41 device: SMSDevice 42 43 code = IntegerField(required=False) 44 phone_number = CharField(required=False) 45 46 component = CharField(default="ak-stage-authenticator-sms") 47 48 def validate(self, attrs: dict) -> dict: 49 """Check""" 50 if "code" not in attrs: 51 self.device.phone_number = attrs["phone_number"] 52 self.stage.validate_and_send(attrs["phone_number"]) 53 return super().validate(attrs) 54 if not self.device.verify_token(str(attrs["code"])): 55 raise ValidationError(_("Code does not match")) 56 self.device.confirmed = True 57 return super().validate(attrs) 58 59 60 class AuthenticatorSMSStageView(ChallengeStageView): 61 """OTP sms Setup stage""" 62 63 response_class = AuthenticatorSMSChallengeResponse 64 65 def validate_and_send(self, phone_number: str): 66 """Validate phone number and send message""" 67 stage: AuthenticatorSMSStage = self.executor.current_stage 68 hashed_number = hash_phone_number(phone_number) 69 query = Q(phone_number=hashed_number) | Q(phone_number=phone_number) 70 if SMSDevice.objects.filter(query, stage=stage.pk).exists(): 71 raise ValidationError(_("Invalid phone number")) 72 # No code yet, but we have a phone number, so send a verification message 73 device: SMSDevice = self.request.session[SESSION_KEY_SMS_DEVICE] 74 stage.send(device.token, device) 75 76 def _has_phone_number(self) -> Optional[str]: 77 context = self.executor.plan.context 78 if "phone" in context.get(PLAN_CONTEXT_PROMPT, {}): 79 self.logger.debug("got phone number from plan context") 80 return context.get(PLAN_CONTEXT_PROMPT, {}).get("phone") 81 if SESSION_KEY_SMS_DEVICE in self.request.session: 82 self.logger.debug("got phone number from device in session") 83 device: SMSDevice = self.request.session[SESSION_KEY_SMS_DEVICE] 84 if device.phone_number == "": 85 return None 86 return device.phone_number 87 return None 88 89 def get_challenge(self, *args, **kwargs) -> Challenge: 90 return AuthenticatorSMSChallenge( 91 data={ 92 "type": ChallengeTypes.NATIVE.value, 93 "phone_number_required": self._has_phone_number() is None, 94 } 95 ) 96 97 def get_response_instance(self, data: QueryDict) -> ChallengeResponse: 98 response = super().get_response_instance(data) 99 response.device = self.request.session[SESSION_KEY_SMS_DEVICE] 100 return response 101 102 def get(self, request: HttpRequest, *args, **kwargs) -> HttpResponse: 103 user = self.get_pending_user() 104 105 stage: AuthenticatorSMSStage = self.executor.current_stage 106 107 if SESSION_KEY_SMS_DEVICE not in self.request.session: 108 device = SMSDevice(user=user, confirmed=False, stage=stage, name="SMS Device") 109 device.generate_token(commit=False) 110 self.request.session[SESSION_KEY_SMS_DEVICE] = device 111 if phone_number := self._has_phone_number(): 112 device.phone_number = phone_number 113 try: 114 self.validate_and_send(phone_number) 115 except ValidationError as exc: 116 response = AuthenticatorSMSChallengeResponse() 117 response._errors.setdefault("phone_number", []) 118 response._errors["phone_number"].append(ErrorDetailSerializer(exc.detail)) 119 return self.challenge_invalid(response) 120 return super().get(request, *args, **kwargs) 121 122 def challenge_valid(self, response: ChallengeResponse) -> HttpResponse: 123 """SMS Token is validated by challenge""" 124 device: SMSDevice = self.request.session[SESSION_KEY_SMS_DEVICE] 125 if not device.confirmed: 126 return self.challenge_invalid(response) 127 stage: AuthenticatorSMSStage = self.executor.current_stage 128 if stage.verify_only: 129 self.logger.debug("Hashing number on device") 130 device.set_hashed_number() 131 device.save() 132 del self.request.session[SESSION_KEY_SMS_DEVICE] 133 return self.executor.stage_ok() 134 [end of authentik/stages/authenticator_sms/stage.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/authentik/stages/authenticator_sms/stage.py b/authentik/stages/authenticator_sms/stage.py --- a/authentik/stages/authenticator_sms/stage.py +++ b/authentik/stages/authenticator_sms/stage.py @@ -12,7 +12,6 @@ Challenge, ChallengeResponse, ChallengeTypes, - ErrorDetailSerializer, WithUserInfoChallenge, ) from authentik.flows.stage import ChallengeStageView @@ -24,6 +23,7 @@ from authentik.stages.prompt.stage import PLAN_CONTEXT_PROMPT SESSION_KEY_SMS_DEVICE = "authentik/stages/authenticator_sms/sms_device" +PLAN_CONTEXT_PHONE = "phone" class AuthenticatorSMSChallenge(WithUserInfoChallenge): @@ -48,6 +48,8 @@ def validate(self, attrs: dict) -> dict: """Check""" if "code" not in attrs: + if "phone_number" not in attrs: + raise ValidationError("phone_number required") self.device.phone_number = attrs["phone_number"] self.stage.validate_and_send(attrs["phone_number"]) return super().validate(attrs) @@ -67,6 +69,7 @@ stage: AuthenticatorSMSStage = self.executor.current_stage hashed_number = hash_phone_number(phone_number) query = Q(phone_number=hashed_number) | Q(phone_number=phone_number) + print(SMSDevice.objects.filter(query, stage=stage.pk)) if SMSDevice.objects.filter(query, stage=stage.pk).exists(): raise ValidationError(_("Invalid phone number")) # No code yet, but we have a phone number, so send a verification message @@ -75,9 +78,9 @@ def _has_phone_number(self) -> Optional[str]: context = self.executor.plan.context - if "phone" in context.get(PLAN_CONTEXT_PROMPT, {}): + if PLAN_CONTEXT_PHONE in context.get(PLAN_CONTEXT_PROMPT, {}): self.logger.debug("got phone number from plan context") - return context.get(PLAN_CONTEXT_PROMPT, {}).get("phone") + return context.get(PLAN_CONTEXT_PROMPT, {}).get(PLAN_CONTEXT_PHONE) if SESSION_KEY_SMS_DEVICE in self.request.session: self.logger.debug("got phone number from device in session") device: SMSDevice = self.request.session[SESSION_KEY_SMS_DEVICE] @@ -113,10 +116,17 @@ try: self.validate_and_send(phone_number) except ValidationError as exc: - response = AuthenticatorSMSChallengeResponse() - response._errors.setdefault("phone_number", []) - response._errors["phone_number"].append(ErrorDetailSerializer(exc.detail)) - return self.challenge_invalid(response) + # We had a phone number given already (at this point only possible from flow + # context), but an error occurred while sending a number (most likely) + # due to a duplicate device, so delete the number we got given, reset the state + # (ish) and retry + device.phone_number = "" + self.executor.plan.context.get(PLAN_CONTEXT_PROMPT, {}).pop( + PLAN_CONTEXT_PHONE, None + ) + self.request.session.pop(SESSION_KEY_SMS_DEVICE, None) + self.logger.warning("failed to send SMS message to pre-set number", exc=exc) + return self.get(request, *args, **kwargs) return super().get(request, *args, **kwargs) def challenge_valid(self, response: ChallengeResponse) -> HttpResponse:
{"golden_diff": "diff --git a/authentik/stages/authenticator_sms/stage.py b/authentik/stages/authenticator_sms/stage.py\n--- a/authentik/stages/authenticator_sms/stage.py\n+++ b/authentik/stages/authenticator_sms/stage.py\n@@ -12,7 +12,6 @@\n Challenge,\n ChallengeResponse,\n ChallengeTypes,\n- ErrorDetailSerializer,\n WithUserInfoChallenge,\n )\n from authentik.flows.stage import ChallengeStageView\n@@ -24,6 +23,7 @@\n from authentik.stages.prompt.stage import PLAN_CONTEXT_PROMPT\n \n SESSION_KEY_SMS_DEVICE = \"authentik/stages/authenticator_sms/sms_device\"\n+PLAN_CONTEXT_PHONE = \"phone\"\n \n \n class AuthenticatorSMSChallenge(WithUserInfoChallenge):\n@@ -48,6 +48,8 @@\n def validate(self, attrs: dict) -> dict:\n \"\"\"Check\"\"\"\n if \"code\" not in attrs:\n+ if \"phone_number\" not in attrs:\n+ raise ValidationError(\"phone_number required\")\n self.device.phone_number = attrs[\"phone_number\"]\n self.stage.validate_and_send(attrs[\"phone_number\"])\n return super().validate(attrs)\n@@ -67,6 +69,7 @@\n stage: AuthenticatorSMSStage = self.executor.current_stage\n hashed_number = hash_phone_number(phone_number)\n query = Q(phone_number=hashed_number) | Q(phone_number=phone_number)\n+ print(SMSDevice.objects.filter(query, stage=stage.pk))\n if SMSDevice.objects.filter(query, stage=stage.pk).exists():\n raise ValidationError(_(\"Invalid phone number\"))\n # No code yet, but we have a phone number, so send a verification message\n@@ -75,9 +78,9 @@\n \n def _has_phone_number(self) -> Optional[str]:\n context = self.executor.plan.context\n- if \"phone\" in context.get(PLAN_CONTEXT_PROMPT, {}):\n+ if PLAN_CONTEXT_PHONE in context.get(PLAN_CONTEXT_PROMPT, {}):\n self.logger.debug(\"got phone number from plan context\")\n- return context.get(PLAN_CONTEXT_PROMPT, {}).get(\"phone\")\n+ return context.get(PLAN_CONTEXT_PROMPT, {}).get(PLAN_CONTEXT_PHONE)\n if SESSION_KEY_SMS_DEVICE in self.request.session:\n self.logger.debug(\"got phone number from device in session\")\n device: SMSDevice = self.request.session[SESSION_KEY_SMS_DEVICE]\n@@ -113,10 +116,17 @@\n try:\n self.validate_and_send(phone_number)\n except ValidationError as exc:\n- response = AuthenticatorSMSChallengeResponse()\n- response._errors.setdefault(\"phone_number\", [])\n- response._errors[\"phone_number\"].append(ErrorDetailSerializer(exc.detail))\n- return self.challenge_invalid(response)\n+ # We had a phone number given already (at this point only possible from flow\n+ # context), but an error occurred while sending a number (most likely)\n+ # due to a duplicate device, so delete the number we got given, reset the state\n+ # (ish) and retry\n+ device.phone_number = \"\"\n+ self.executor.plan.context.get(PLAN_CONTEXT_PROMPT, {}).pop(\n+ PLAN_CONTEXT_PHONE, None\n+ )\n+ self.request.session.pop(SESSION_KEY_SMS_DEVICE, None)\n+ self.logger.warning(\"failed to send SMS message to pre-set number\", exc=exc)\n+ return self.get(request, *args, **kwargs)\n return super().get(request, *args, **kwargs)\n \n def challenge_valid(self, response: ChallengeResponse) -> HttpResponse:\n", "issue": "Bug: Authenticator SMS Challenge response doesn't have _errors attribute\n**Describe the bug**\r\nIt seems that something has changed in the regards to Authenticator SMS Challenge Response.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Have an perfectly working SMS Authenicator stage prior to upgrading to 2023.8.3\r\n2. Upgrade\r\n3. Don't see any immediate errors.\r\n4. See error when user tries to register using said flow.\r\n\r\n**Expected behavior**\r\nI would have expected it to not error out.\r\n\r\n**Logs**\r\n<details>\r\n <summary>Stacktrace from authentik</summary>\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/authentik/flows/views/executor.py\", line 298, in get\r\n stage_response = self.current_stage_view.get(request, *args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/authentik/stages/authenticator_sms/stage.py\", line 117, in get\r\n response._errors.setdefault(\"phone_number\", [])\r\n ^^^^^^^^^^^^^^^^\r\nbuiltins.AttributeError: 'AuthenticatorSMSChallengeResponse' object has no attribute '_errors'\r\n```\r\n</details>\r\n\r\n\r\n**Version and Deployment (please complete the following information):**\r\n- authentik version: 2023.8.3\r\n- Deployment: Ansible with Docker\r\n \n", "before_files": [{"content": "\"\"\"SMS Setup stage\"\"\"\nfrom typing import Optional\n\nfrom django.db.models import Q\nfrom django.http import HttpRequest, HttpResponse\nfrom django.http.request import QueryDict\nfrom django.utils.translation import gettext_lazy as _\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.fields import BooleanField, CharField, IntegerField\n\nfrom authentik.flows.challenge import (\n Challenge,\n ChallengeResponse,\n ChallengeTypes,\n ErrorDetailSerializer,\n WithUserInfoChallenge,\n)\nfrom authentik.flows.stage import ChallengeStageView\nfrom authentik.stages.authenticator_sms.models import (\n AuthenticatorSMSStage,\n SMSDevice,\n hash_phone_number,\n)\nfrom authentik.stages.prompt.stage import PLAN_CONTEXT_PROMPT\n\nSESSION_KEY_SMS_DEVICE = \"authentik/stages/authenticator_sms/sms_device\"\n\n\nclass AuthenticatorSMSChallenge(WithUserInfoChallenge):\n \"\"\"SMS Setup challenge\"\"\"\n\n # Set to true if no previous prompt stage set the phone number\n # this stage will also check prompt_data.phone\n phone_number_required = BooleanField(default=True)\n component = CharField(default=\"ak-stage-authenticator-sms\")\n\n\nclass AuthenticatorSMSChallengeResponse(ChallengeResponse):\n \"\"\"SMS Challenge response, device is set by get_response_instance\"\"\"\n\n device: SMSDevice\n\n code = IntegerField(required=False)\n phone_number = CharField(required=False)\n\n component = CharField(default=\"ak-stage-authenticator-sms\")\n\n def validate(self, attrs: dict) -> dict:\n \"\"\"Check\"\"\"\n if \"code\" not in attrs:\n self.device.phone_number = attrs[\"phone_number\"]\n self.stage.validate_and_send(attrs[\"phone_number\"])\n return super().validate(attrs)\n if not self.device.verify_token(str(attrs[\"code\"])):\n raise ValidationError(_(\"Code does not match\"))\n self.device.confirmed = True\n return super().validate(attrs)\n\n\nclass AuthenticatorSMSStageView(ChallengeStageView):\n \"\"\"OTP sms Setup stage\"\"\"\n\n response_class = AuthenticatorSMSChallengeResponse\n\n def validate_and_send(self, phone_number: str):\n \"\"\"Validate phone number and send message\"\"\"\n stage: AuthenticatorSMSStage = self.executor.current_stage\n hashed_number = hash_phone_number(phone_number)\n query = Q(phone_number=hashed_number) | Q(phone_number=phone_number)\n if SMSDevice.objects.filter(query, stage=stage.pk).exists():\n raise ValidationError(_(\"Invalid phone number\"))\n # No code yet, but we have a phone number, so send a verification message\n device: SMSDevice = self.request.session[SESSION_KEY_SMS_DEVICE]\n stage.send(device.token, device)\n\n def _has_phone_number(self) -> Optional[str]:\n context = self.executor.plan.context\n if \"phone\" in context.get(PLAN_CONTEXT_PROMPT, {}):\n self.logger.debug(\"got phone number from plan context\")\n return context.get(PLAN_CONTEXT_PROMPT, {}).get(\"phone\")\n if SESSION_KEY_SMS_DEVICE in self.request.session:\n self.logger.debug(\"got phone number from device in session\")\n device: SMSDevice = self.request.session[SESSION_KEY_SMS_DEVICE]\n if device.phone_number == \"\":\n return None\n return device.phone_number\n return None\n\n def get_challenge(self, *args, **kwargs) -> Challenge:\n return AuthenticatorSMSChallenge(\n data={\n \"type\": ChallengeTypes.NATIVE.value,\n \"phone_number_required\": self._has_phone_number() is None,\n }\n )\n\n def get_response_instance(self, data: QueryDict) -> ChallengeResponse:\n response = super().get_response_instance(data)\n response.device = self.request.session[SESSION_KEY_SMS_DEVICE]\n return response\n\n def get(self, request: HttpRequest, *args, **kwargs) -> HttpResponse:\n user = self.get_pending_user()\n\n stage: AuthenticatorSMSStage = self.executor.current_stage\n\n if SESSION_KEY_SMS_DEVICE not in self.request.session:\n device = SMSDevice(user=user, confirmed=False, stage=stage, name=\"SMS Device\")\n device.generate_token(commit=False)\n self.request.session[SESSION_KEY_SMS_DEVICE] = device\n if phone_number := self._has_phone_number():\n device.phone_number = phone_number\n try:\n self.validate_and_send(phone_number)\n except ValidationError as exc:\n response = AuthenticatorSMSChallengeResponse()\n response._errors.setdefault(\"phone_number\", [])\n response._errors[\"phone_number\"].append(ErrorDetailSerializer(exc.detail))\n return self.challenge_invalid(response)\n return super().get(request, *args, **kwargs)\n\n def challenge_valid(self, response: ChallengeResponse) -> HttpResponse:\n \"\"\"SMS Token is validated by challenge\"\"\"\n device: SMSDevice = self.request.session[SESSION_KEY_SMS_DEVICE]\n if not device.confirmed:\n return self.challenge_invalid(response)\n stage: AuthenticatorSMSStage = self.executor.current_stage\n if stage.verify_only:\n self.logger.debug(\"Hashing number on device\")\n device.set_hashed_number()\n device.save()\n del self.request.session[SESSION_KEY_SMS_DEVICE]\n return self.executor.stage_ok()\n", "path": "authentik/stages/authenticator_sms/stage.py"}]}
2,217
768
gh_patches_debug_15475
rasdani/github-patches
git_diff
DataDog__dd-trace-py-4198
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> aioredis raises CancelledError in _finish_span ### Which version of dd-trace-py are you using? ~~0.53.0~~ 0.58.0 ### Which version of pip are you using? 21.3.1 ### Which version of the libraries are you using? django==3.2.11 django-redis==5.0.0 channels==3.0.4 daphne==3.0.2 ### How can we reproduce your problem? I am using code similar to the following: asgi.py ``` import django from channels.routing import get_default_application from ddtrace.contrib.asgi import TraceMiddleware django.setup() application = TraceMiddleware(get_default_application()) ``` routing.py ``` from django.urls import re_path import my_app.consumers websocket_urlpatterns = [ re_path(r"^ws/test/$", consumers.TestConsumer.as_asgi()), ] ``` my_app/consumers.py ``` from channels.generic.websocket import WebsocketConsumer class TestConsumer(WebsocketConsumer): groups = ["broadcast"] def connect(self): self.accept() def receive(self, text_data=None, bytes_data=None): raise Exception("An test exception") ``` I am running the application with: `ddtrace-run daphne asgi:application --bind 0.0.0.0 --port 8001` ### What is the result that you get? I don't get any traces at all, and my logs show this: ``` handle: <Handle traced_13_execute_command.<locals>._finish_span(<Future cancelled>) at /usr/local/lib/python3.10/site-packages/ddtrace/contrib/aioredis/patch.py:140> Traceback (most recent call last): File "/usr/local/lib/python3.10/asyncio/events.py", line 80, in _run self._context.run(self._callback, *self._args) File "/usr/local/lib/python3.10/site-packages/ddtrace/contrib/aioredis/patch.py", line 146, in _finish_span future.result() asyncio.exceptions.CancelledError ``` ### What is the result that you expected? No errors </issue> <code> [start of ddtrace/contrib/aioredis/patch.py] 1 import asyncio 2 import sys 3 4 import aioredis 5 6 from ddtrace import config 7 from ddtrace.internal.utils.wrappers import unwrap as _u 8 from ddtrace.pin import Pin 9 from ddtrace.vendor.wrapt import wrap_function_wrapper as _w 10 11 from .. import trace_utils 12 from ...constants import ANALYTICS_SAMPLE_RATE_KEY 13 from ...constants import SPAN_MEASURED_KEY 14 from ...ext import SpanTypes 15 from ...ext import net 16 from ...ext import redis as redisx 17 from ..redis.util import _trace_redis_cmd 18 from ..redis.util import _trace_redis_execute_pipeline 19 from ..redis.util import format_command_args 20 21 22 try: 23 from aioredis.commands.transaction import _RedisBuffer 24 except ImportError: 25 _RedisBuffer = None 26 27 config._add("aioredis", dict(_default_service="redis")) 28 29 aioredis_version_str = getattr(aioredis, "__version__", "0.0.0") 30 aioredis_version = tuple([int(i) for i in aioredis_version_str.split(".")]) 31 32 33 def patch(): 34 if getattr(aioredis, "_datadog_patch", False): 35 return 36 setattr(aioredis, "_datadog_patch", True) 37 pin = Pin() 38 if aioredis_version >= (2, 0): 39 _w("aioredis.client", "Redis.execute_command", traced_execute_command) 40 _w("aioredis.client", "Redis.pipeline", traced_pipeline) 41 _w("aioredis.client", "Pipeline.execute", traced_execute_pipeline) 42 pin.onto(aioredis.client.Redis) 43 else: 44 _w("aioredis", "Redis.execute", traced_13_execute_command) 45 _w("aioredis", "Redis.pipeline", traced_13_pipeline) 46 _w("aioredis.commands.transaction", "Pipeline.execute", traced_13_execute_pipeline) 47 pin.onto(aioredis.Redis) 48 49 50 def unpatch(): 51 if not getattr(aioredis, "_datadog_patch", False): 52 return 53 54 setattr(aioredis, "_datadog_patch", False) 55 if aioredis_version >= (2, 0): 56 _u(aioredis.client.Redis, "execute_command") 57 _u(aioredis.client.Redis, "pipeline") 58 _u(aioredis.client.Pipeline, "execute") 59 else: 60 _u(aioredis.Redis, "execute") 61 _u(aioredis.Redis, "pipeline") 62 _u(aioredis.commands.transaction.Pipeline, "execute") 63 64 65 async def traced_execute_command(func, instance, args, kwargs): 66 pin = Pin.get_from(instance) 67 if not pin or not pin.enabled(): 68 return await func(*args, **kwargs) 69 70 with _trace_redis_cmd(pin, config.aioredis, instance, args): 71 return await func(*args, **kwargs) 72 73 74 def traced_pipeline(func, instance, args, kwargs): 75 pipeline = func(*args, **kwargs) 76 pin = Pin.get_from(instance) 77 if pin: 78 pin.onto(pipeline) 79 return pipeline 80 81 82 async def traced_execute_pipeline(func, instance, args, kwargs): 83 pin = Pin.get_from(instance) 84 if not pin or not pin.enabled(): 85 return await func(*args, **kwargs) 86 87 cmds = [format_command_args(c) for c, _ in instance.command_stack] 88 resource = "\n".join(cmds) 89 with _trace_redis_execute_pipeline(pin, config.aioredis, resource, instance): 90 return await func(*args, **kwargs) 91 92 93 def traced_13_pipeline(func, instance, args, kwargs): 94 pipeline = func(*args, **kwargs) 95 pin = Pin.get_from(instance) 96 if pin: 97 pin.onto(pipeline) 98 return pipeline 99 100 101 def traced_13_execute_command(func, instance, args, kwargs): 102 # If we have a _RedisBuffer then we are in a pipeline 103 if isinstance(instance.connection, _RedisBuffer): 104 return func(*args, **kwargs) 105 106 pin = Pin.get_from(instance) 107 if not pin or not pin.enabled(): 108 return func(*args, **kwargs) 109 110 # Don't activate the span since this operation is performed as a future which concludes sometime later on in 111 # execution so subsequent operations in the stack are not necessarily semantically related 112 # (we don't want this span to be the parent of all other spans created before the future is resolved) 113 parent = pin.tracer.current_span() 114 span = pin.tracer.start_span( 115 redisx.CMD, 116 service=trace_utils.ext_service(pin, config.aioredis), 117 span_type=SpanTypes.REDIS, 118 activate=False, 119 child_of=parent, 120 ) 121 122 span.set_tag(SPAN_MEASURED_KEY) 123 query = format_command_args(args) 124 span.resource = query 125 span.set_tag(redisx.RAWCMD, query) 126 if pin.tags: 127 span.set_tags(pin.tags) 128 129 span.set_tags( 130 { 131 net.TARGET_HOST: instance.address[0], 132 net.TARGET_PORT: instance.address[1], 133 redisx.DB: instance.db or 0, 134 } 135 ) 136 span.set_metric(redisx.ARGS_LEN, len(args)) 137 # set analytics sample rate if enabled 138 span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, config.aioredis.get_analytics_sample_rate()) 139 140 def _finish_span(future): 141 try: 142 # Accessing the result will raise an exception if: 143 # - The future was cancelled 144 # - There was an error executing the future (`future.exception()`) 145 # - The future is in an invalid state 146 future.result() 147 except Exception: 148 span.set_exc_info(*sys.exc_info()) 149 finally: 150 span.finish() 151 152 task = func(*args, **kwargs) 153 # Execute command returns a coroutine when no free connections are available 154 # https://github.com/aio-libs/aioredis-py/blob/v1.3.1/aioredis/pool.py#L191 155 task = asyncio.ensure_future(task) 156 task.add_done_callback(_finish_span) 157 return task 158 159 160 async def traced_13_execute_pipeline(func, instance, args, kwargs): 161 pin = Pin.get_from(instance) 162 if not pin or not pin.enabled(): 163 return await func(*args, **kwargs) 164 165 cmds = [] 166 for _, cmd, cmd_args, _ in instance._pipeline: 167 parts = [cmd] 168 parts.extend(cmd_args) 169 cmds.append(format_command_args(parts)) 170 resource = "\n".join(cmds) 171 with pin.tracer.trace( 172 redisx.CMD, 173 resource=resource, 174 service=trace_utils.ext_service(pin, config.aioredis), 175 span_type=SpanTypes.REDIS, 176 ) as span: 177 178 span.set_tags( 179 { 180 net.TARGET_HOST: instance._pool_or_conn.address[0], 181 net.TARGET_PORT: instance._pool_or_conn.address[1], 182 redisx.DB: instance._pool_or_conn.db or 0, 183 } 184 ) 185 186 span.set_tag(SPAN_MEASURED_KEY) 187 span.set_tag(redisx.RAWCMD, resource) 188 span.set_metric(redisx.PIPELINE_LEN, len(instance._pipeline)) 189 # set analytics sample rate if enabled 190 span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, config.aioredis.get_analytics_sample_rate()) 191 192 return await func(*args, **kwargs) 193 [end of ddtrace/contrib/aioredis/patch.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ddtrace/contrib/aioredis/patch.py b/ddtrace/contrib/aioredis/patch.py --- a/ddtrace/contrib/aioredis/patch.py +++ b/ddtrace/contrib/aioredis/patch.py @@ -140,11 +140,12 @@ def _finish_span(future): try: # Accessing the result will raise an exception if: - # - The future was cancelled + # - The future was cancelled (CancelledError) # - There was an error executing the future (`future.exception()`) # - The future is in an invalid state future.result() - except Exception: + # CancelledError exceptions extend from BaseException as of Python 3.8, instead of usual Exception + except BaseException: span.set_exc_info(*sys.exc_info()) finally: span.finish()
{"golden_diff": "diff --git a/ddtrace/contrib/aioredis/patch.py b/ddtrace/contrib/aioredis/patch.py\n--- a/ddtrace/contrib/aioredis/patch.py\n+++ b/ddtrace/contrib/aioredis/patch.py\n@@ -140,11 +140,12 @@\n def _finish_span(future):\n try:\n # Accessing the result will raise an exception if:\n- # - The future was cancelled\n+ # - The future was cancelled (CancelledError)\n # - There was an error executing the future (`future.exception()`)\n # - The future is in an invalid state\n future.result()\n- except Exception:\n+ # CancelledError exceptions extend from BaseException as of Python 3.8, instead of usual Exception\n+ except BaseException:\n span.set_exc_info(*sys.exc_info())\n finally:\n span.finish()\n", "issue": "aioredis raises CancelledError in _finish_span \n### Which version of dd-trace-py are you using?\r\n\r\n~~0.53.0~~ 0.58.0\r\n\r\n### Which version of pip are you using?\r\n\r\n21.3.1\r\n\r\n### Which version of the libraries are you using?\r\n\r\ndjango==3.2.11\r\ndjango-redis==5.0.0\r\nchannels==3.0.4\r\ndaphne==3.0.2\r\n\r\n### How can we reproduce your problem?\r\n\r\nI am using code similar to the following:\r\n\r\nasgi.py\r\n\r\n```\r\nimport django\r\nfrom channels.routing import get_default_application\r\nfrom ddtrace.contrib.asgi import TraceMiddleware\r\n\r\ndjango.setup()\r\napplication = TraceMiddleware(get_default_application())\r\n```\r\n\r\nrouting.py\r\n\r\n```\r\nfrom django.urls import re_path\r\nimport my_app.consumers\r\n\r\nwebsocket_urlpatterns = [\r\n re_path(r\"^ws/test/$\", consumers.TestConsumer.as_asgi()),\r\n]\r\n```\r\n\r\nmy_app/consumers.py\r\n\r\n```\r\nfrom channels.generic.websocket import WebsocketConsumer\r\n\r\nclass TestConsumer(WebsocketConsumer):\r\n groups = [\"broadcast\"]\r\n\r\n def connect(self):\r\n self.accept()\r\n\r\n def receive(self, text_data=None, bytes_data=None):\r\n raise Exception(\"An test exception\")\r\n```\r\n\r\nI am running the application with: `ddtrace-run daphne asgi:application --bind 0.0.0.0 --port 8001`\r\n\r\n### What is the result that you get?\r\n\r\nI don't get any traces at all, and my logs show this:\r\n\r\n```\r\nhandle: <Handle traced_13_execute_command.<locals>._finish_span(<Future cancelled>) at /usr/local/lib/python3.10/site-packages/ddtrace/contrib/aioredis/patch.py:140>\r\n\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.10/asyncio/events.py\", line 80, in _run\r\n self._context.run(self._callback, *self._args)\r\n File \"/usr/local/lib/python3.10/site-packages/ddtrace/contrib/aioredis/patch.py\", line 146, in _finish_span\r\n future.result()\r\nasyncio.exceptions.CancelledError\r\n```\r\n\r\n\r\n### What is the result that you expected?\r\n\r\nNo errors\r\n\n", "before_files": [{"content": "import asyncio\nimport sys\n\nimport aioredis\n\nfrom ddtrace import config\nfrom ddtrace.internal.utils.wrappers import unwrap as _u\nfrom ddtrace.pin import Pin\nfrom ddtrace.vendor.wrapt import wrap_function_wrapper as _w\n\nfrom .. import trace_utils\nfrom ...constants import ANALYTICS_SAMPLE_RATE_KEY\nfrom ...constants import SPAN_MEASURED_KEY\nfrom ...ext import SpanTypes\nfrom ...ext import net\nfrom ...ext import redis as redisx\nfrom ..redis.util import _trace_redis_cmd\nfrom ..redis.util import _trace_redis_execute_pipeline\nfrom ..redis.util import format_command_args\n\n\ntry:\n from aioredis.commands.transaction import _RedisBuffer\nexcept ImportError:\n _RedisBuffer = None\n\nconfig._add(\"aioredis\", dict(_default_service=\"redis\"))\n\naioredis_version_str = getattr(aioredis, \"__version__\", \"0.0.0\")\naioredis_version = tuple([int(i) for i in aioredis_version_str.split(\".\")])\n\n\ndef patch():\n if getattr(aioredis, \"_datadog_patch\", False):\n return\n setattr(aioredis, \"_datadog_patch\", True)\n pin = Pin()\n if aioredis_version >= (2, 0):\n _w(\"aioredis.client\", \"Redis.execute_command\", traced_execute_command)\n _w(\"aioredis.client\", \"Redis.pipeline\", traced_pipeline)\n _w(\"aioredis.client\", \"Pipeline.execute\", traced_execute_pipeline)\n pin.onto(aioredis.client.Redis)\n else:\n _w(\"aioredis\", \"Redis.execute\", traced_13_execute_command)\n _w(\"aioredis\", \"Redis.pipeline\", traced_13_pipeline)\n _w(\"aioredis.commands.transaction\", \"Pipeline.execute\", traced_13_execute_pipeline)\n pin.onto(aioredis.Redis)\n\n\ndef unpatch():\n if not getattr(aioredis, \"_datadog_patch\", False):\n return\n\n setattr(aioredis, \"_datadog_patch\", False)\n if aioredis_version >= (2, 0):\n _u(aioredis.client.Redis, \"execute_command\")\n _u(aioredis.client.Redis, \"pipeline\")\n _u(aioredis.client.Pipeline, \"execute\")\n else:\n _u(aioredis.Redis, \"execute\")\n _u(aioredis.Redis, \"pipeline\")\n _u(aioredis.commands.transaction.Pipeline, \"execute\")\n\n\nasync def traced_execute_command(func, instance, args, kwargs):\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return await func(*args, **kwargs)\n\n with _trace_redis_cmd(pin, config.aioredis, instance, args):\n return await func(*args, **kwargs)\n\n\ndef traced_pipeline(func, instance, args, kwargs):\n pipeline = func(*args, **kwargs)\n pin = Pin.get_from(instance)\n if pin:\n pin.onto(pipeline)\n return pipeline\n\n\nasync def traced_execute_pipeline(func, instance, args, kwargs):\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return await func(*args, **kwargs)\n\n cmds = [format_command_args(c) for c, _ in instance.command_stack]\n resource = \"\\n\".join(cmds)\n with _trace_redis_execute_pipeline(pin, config.aioredis, resource, instance):\n return await func(*args, **kwargs)\n\n\ndef traced_13_pipeline(func, instance, args, kwargs):\n pipeline = func(*args, **kwargs)\n pin = Pin.get_from(instance)\n if pin:\n pin.onto(pipeline)\n return pipeline\n\n\ndef traced_13_execute_command(func, instance, args, kwargs):\n # If we have a _RedisBuffer then we are in a pipeline\n if isinstance(instance.connection, _RedisBuffer):\n return func(*args, **kwargs)\n\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return func(*args, **kwargs)\n\n # Don't activate the span since this operation is performed as a future which concludes sometime later on in\n # execution so subsequent operations in the stack are not necessarily semantically related\n # (we don't want this span to be the parent of all other spans created before the future is resolved)\n parent = pin.tracer.current_span()\n span = pin.tracer.start_span(\n redisx.CMD,\n service=trace_utils.ext_service(pin, config.aioredis),\n span_type=SpanTypes.REDIS,\n activate=False,\n child_of=parent,\n )\n\n span.set_tag(SPAN_MEASURED_KEY)\n query = format_command_args(args)\n span.resource = query\n span.set_tag(redisx.RAWCMD, query)\n if pin.tags:\n span.set_tags(pin.tags)\n\n span.set_tags(\n {\n net.TARGET_HOST: instance.address[0],\n net.TARGET_PORT: instance.address[1],\n redisx.DB: instance.db or 0,\n }\n )\n span.set_metric(redisx.ARGS_LEN, len(args))\n # set analytics sample rate if enabled\n span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, config.aioredis.get_analytics_sample_rate())\n\n def _finish_span(future):\n try:\n # Accessing the result will raise an exception if:\n # - The future was cancelled\n # - There was an error executing the future (`future.exception()`)\n # - The future is in an invalid state\n future.result()\n except Exception:\n span.set_exc_info(*sys.exc_info())\n finally:\n span.finish()\n\n task = func(*args, **kwargs)\n # Execute command returns a coroutine when no free connections are available\n # https://github.com/aio-libs/aioredis-py/blob/v1.3.1/aioredis/pool.py#L191\n task = asyncio.ensure_future(task)\n task.add_done_callback(_finish_span)\n return task\n\n\nasync def traced_13_execute_pipeline(func, instance, args, kwargs):\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return await func(*args, **kwargs)\n\n cmds = []\n for _, cmd, cmd_args, _ in instance._pipeline:\n parts = [cmd]\n parts.extend(cmd_args)\n cmds.append(format_command_args(parts))\n resource = \"\\n\".join(cmds)\n with pin.tracer.trace(\n redisx.CMD,\n resource=resource,\n service=trace_utils.ext_service(pin, config.aioredis),\n span_type=SpanTypes.REDIS,\n ) as span:\n\n span.set_tags(\n {\n net.TARGET_HOST: instance._pool_or_conn.address[0],\n net.TARGET_PORT: instance._pool_or_conn.address[1],\n redisx.DB: instance._pool_or_conn.db or 0,\n }\n )\n\n span.set_tag(SPAN_MEASURED_KEY)\n span.set_tag(redisx.RAWCMD, resource)\n span.set_metric(redisx.PIPELINE_LEN, len(instance._pipeline))\n # set analytics sample rate if enabled\n span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, config.aioredis.get_analytics_sample_rate())\n\n return await func(*args, **kwargs)\n", "path": "ddtrace/contrib/aioredis/patch.py"}]}
3,102
202
gh_patches_debug_1887
rasdani/github-patches
git_diff
spotify__luigi-2679
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Is there a reason python-dateutil is pinned to v2.7.5? In this [commit](https://github.com/spotify/luigi/commit/ca0aa9afedecda539339e51974ef38cecf180d4b), I can see that python-dateutil has been pinned to version 2.7.5 - is this strictly necessary? Version 2.8.0 was released a couple of weeks ago and It's causing `ContextualVersionConflict` errors for us. </issue> <code> [start of setup.py] 1 # Copyright (c) 2012 Spotify AB 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); you may not 4 # use this file except in compliance with the License. You may obtain a copy of 5 # the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 11 # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 12 # License for the specific language governing permissions and limitations under 13 # the License. 14 15 import os 16 import sys 17 18 from setuptools import setup 19 20 21 def get_static_files(path): 22 return [os.path.join(dirpath.replace("luigi/", ""), ext) 23 for (dirpath, dirnames, filenames) in os.walk(path) 24 for ext in ["*.html", "*.js", "*.css", "*.png", 25 "*.eot", "*.svg", "*.ttf", "*.woff", "*.woff2"]] 26 27 28 luigi_package_data = sum(map(get_static_files, ["luigi/static", "luigi/templates"]), []) 29 30 readme_note = """\ 31 .. note:: 32 33 For the latest source, discussion, etc, please visit the 34 `GitHub repository <https://github.com/spotify/luigi>`_\n\n 35 """ 36 37 with open('README.rst') as fobj: 38 long_description = readme_note + fobj.read() 39 40 install_requires = [ 41 'tornado>=4.0,<5', 42 # https://pagure.io/python-daemon/issue/18 43 'python-daemon<2.2.0', 44 'python-dateutil==2.7.5', 45 ] 46 47 # Note: To support older versions of setuptools, we're explicitly not 48 # using conditional syntax (i.e. 'enum34>1.1.0;python_version<"3.4"'). 49 # This syntax is a problem for setuptools as recent as `20.1.1`, 50 # published Feb 16, 2016. 51 if sys.version_info[:2] < (3, 4): 52 install_requires.append('enum34>1.1.0') 53 54 if os.environ.get('READTHEDOCS', None) == 'True': 55 # So that we can build documentation for luigi.db_task_history and luigi.contrib.sqla 56 install_requires.append('sqlalchemy') 57 # readthedocs don't like python-daemon, see #1342 58 install_requires.remove('python-daemon<2.2.0') 59 install_requires.append('sphinx>=1.4.4') # Value mirrored in doc/conf.py 60 61 setup( 62 name='luigi', 63 version='2.8.3', 64 description='Workflow mgmgt + task scheduling + dependency resolution', 65 long_description=long_description, 66 author='The Luigi Authors', 67 url='https://github.com/spotify/luigi', 68 license='Apache License 2.0', 69 packages=[ 70 'luigi', 71 'luigi.configuration', 72 'luigi.contrib', 73 'luigi.contrib.hdfs', 74 'luigi.tools' 75 ], 76 package_data={ 77 'luigi': luigi_package_data 78 }, 79 entry_points={ 80 'console_scripts': [ 81 'luigi = luigi.cmdline:luigi_run', 82 'luigid = luigi.cmdline:luigid', 83 'luigi-grep = luigi.tools.luigi_grep:main', 84 'luigi-deps = luigi.tools.deps:main', 85 'luigi-deps-tree = luigi.tools.deps_tree:main' 86 ] 87 }, 88 install_requires=install_requires, 89 extras_require={ 90 'toml': ['toml<2.0.0'], 91 }, 92 classifiers=[ 93 'Development Status :: 5 - Production/Stable', 94 'Environment :: Console', 95 'Environment :: Web Environment', 96 'Intended Audience :: Developers', 97 'Intended Audience :: System Administrators', 98 'License :: OSI Approved :: Apache Software License', 99 'Programming Language :: Python :: 2.7', 100 'Programming Language :: Python :: 3.3', 101 'Programming Language :: Python :: 3.4', 102 'Programming Language :: Python :: 3.5', 103 'Programming Language :: Python :: 3.6', 104 'Programming Language :: Python :: 3.7', 105 'Topic :: System :: Monitoring', 106 ], 107 ) 108 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -41,7 +41,7 @@ 'tornado>=4.0,<5', # https://pagure.io/python-daemon/issue/18 'python-daemon<2.2.0', - 'python-dateutil==2.7.5', + 'python-dateutil>=2.7.5,<3', ] # Note: To support older versions of setuptools, we're explicitly not
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -41,7 +41,7 @@\n 'tornado>=4.0,<5',\n # https://pagure.io/python-daemon/issue/18\n 'python-daemon<2.2.0',\n- 'python-dateutil==2.7.5',\n+ 'python-dateutil>=2.7.5,<3',\n ]\n \n # Note: To support older versions of setuptools, we're explicitly not\n", "issue": "Is there a reason python-dateutil is pinned to v2.7.5?\nIn this [commit](https://github.com/spotify/luigi/commit/ca0aa9afedecda539339e51974ef38cecf180d4b), I can see that python-dateutil has been pinned to version 2.7.5 - is this strictly necessary? Version 2.8.0 was released a couple of weeks ago and It's causing `ContextualVersionConflict` errors for us.\r\n\r\n\n", "before_files": [{"content": "# Copyright (c) 2012 Spotify AB\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n# use this file except in compliance with the License. You may obtain a copy of\n# the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations under\n# the License.\n\nimport os\nimport sys\n\nfrom setuptools import setup\n\n\ndef get_static_files(path):\n return [os.path.join(dirpath.replace(\"luigi/\", \"\"), ext)\n for (dirpath, dirnames, filenames) in os.walk(path)\n for ext in [\"*.html\", \"*.js\", \"*.css\", \"*.png\",\n \"*.eot\", \"*.svg\", \"*.ttf\", \"*.woff\", \"*.woff2\"]]\n\n\nluigi_package_data = sum(map(get_static_files, [\"luigi/static\", \"luigi/templates\"]), [])\n\nreadme_note = \"\"\"\\\n.. note::\n\n For the latest source, discussion, etc, please visit the\n `GitHub repository <https://github.com/spotify/luigi>`_\\n\\n\n\"\"\"\n\nwith open('README.rst') as fobj:\n long_description = readme_note + fobj.read()\n\ninstall_requires = [\n 'tornado>=4.0,<5',\n # https://pagure.io/python-daemon/issue/18\n 'python-daemon<2.2.0',\n 'python-dateutil==2.7.5',\n]\n\n# Note: To support older versions of setuptools, we're explicitly not\n# using conditional syntax (i.e. 'enum34>1.1.0;python_version<\"3.4\"').\n# This syntax is a problem for setuptools as recent as `20.1.1`,\n# published Feb 16, 2016.\nif sys.version_info[:2] < (3, 4):\n install_requires.append('enum34>1.1.0')\n\nif os.environ.get('READTHEDOCS', None) == 'True':\n # So that we can build documentation for luigi.db_task_history and luigi.contrib.sqla\n install_requires.append('sqlalchemy')\n # readthedocs don't like python-daemon, see #1342\n install_requires.remove('python-daemon<2.2.0')\n install_requires.append('sphinx>=1.4.4') # Value mirrored in doc/conf.py\n\nsetup(\n name='luigi',\n version='2.8.3',\n description='Workflow mgmgt + task scheduling + dependency resolution',\n long_description=long_description,\n author='The Luigi Authors',\n url='https://github.com/spotify/luigi',\n license='Apache License 2.0',\n packages=[\n 'luigi',\n 'luigi.configuration',\n 'luigi.contrib',\n 'luigi.contrib.hdfs',\n 'luigi.tools'\n ],\n package_data={\n 'luigi': luigi_package_data\n },\n entry_points={\n 'console_scripts': [\n 'luigi = luigi.cmdline:luigi_run',\n 'luigid = luigi.cmdline:luigid',\n 'luigi-grep = luigi.tools.luigi_grep:main',\n 'luigi-deps = luigi.tools.deps:main',\n 'luigi-deps-tree = luigi.tools.deps_tree:main'\n ]\n },\n install_requires=install_requires,\n extras_require={\n 'toml': ['toml<2.0.0'],\n },\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: System :: Monitoring',\n ],\n)\n", "path": "setup.py"}]}
1,828
116
gh_patches_debug_24795
rasdani/github-patches
git_diff
CiviWiki__OpenCiviWiki-1381
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> add django-debug-toolbar ## Task - [ ] install `django-debug-toolbar` with the command `poetry add django-debug-toolbar --group dev` - [ ] follow the [remaining installation instructions](https://django-debug-toolbar.readthedocs.io/en/latest/installation.html) </issue> <code> [start of project/core/urls.py] 1 """civiwiki URL Configuration 2 3 The `urlpatterns` list routes URLs to views. For more information please see: 4 https://docs.djangoproject.com/en/3.2/topics/http/urls/ 5 Examples: 6 Function views 7 1. Add an import: from my_app import views 8 2. Add a URL to urlpatterns: path('', views.home, name='home') 9 Class-based views 10 1. Add an import: from other_app.views import Home 11 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') 12 Including another URLconf 13 1. Import the include() function: from django.urls import include, path 14 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) 15 """ 16 17 from django.conf import settings 18 from django.conf.urls.static import static 19 from django.contrib import admin 20 from django.urls import include, path, re_path 21 from django.views.generic.base import RedirectView 22 from django.views.static import serve 23 24 from core.router import CiviWikiRouter 25 26 27 urlpatterns = [ 28 path("admin/", admin.site.urls), 29 path("api/v1/", include(CiviWikiRouter.urls)), 30 path("api/", include("accounts.urls.api")), 31 path("api/", include("threads.urls.api")), 32 path("", include("accounts.urls.urls")), 33 path("", include("threads.urls.urls")), 34 path( 35 "inbox/notifications/", 36 include("notifications.urls", namespace="notifications"), 37 ), 38 path("favicon.ico", RedirectView.as_view(url="/static/favicon/favicon.ico")), 39 path( 40 "favicon-32x32.png", 41 RedirectView.as_view(url="/static/favicon/favicon-32x32.png"), 42 ), 43 path( 44 "apple-touch-icon.png", 45 RedirectView.as_view(url="/static/favicon/apple-touch-icon.png"), 46 ), 47 path( 48 "mstile-144x144.png", 49 RedirectView.as_view(url="/static/favicon/mstile-144x144.png"), 50 ), 51 re_path(r"^media/(?P<path>.*)$", serve, {"document_root": settings.MEDIA_ROOT}), 52 ] 53 54 urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) 55 urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) 56 [end of project/core/urls.py] [start of project/core/settings.py] 1 """ 2 Django settings for civiwiki project. 3 Darius Calliet May 12, 2016 4 5 Production settings file to select proper environment variables. 6 """ 7 import os 8 9 # False if not in os.environ 10 DEBUG = os.getenv("DEBUG", False) 11 12 # defaults to second value if not found in os.environ 13 DJANGO_HOST = os.getenv("DJANGO_HOST", "LOCALHOST") 14 15 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 16 SECRET_KEY = os.getenv("DJANGO_SECRET_KEY", "TEST_KEY_FOR_DEVELOPMENT") 17 ALLOWED_HOSTS = [".herokuapp.com", ".civiwiki.org", "127.0.0.1", "localhost", "0.0.0.0"] 18 19 INSTALLED_APPS = ( 20 "django.contrib.admin", 21 "django.contrib.auth", 22 "django.contrib.contenttypes", 23 "django.contrib.sessions", 24 "django.contrib.messages", 25 "django.contrib.staticfiles", 26 "django_extensions", 27 "storages", 28 "core", 29 "rest_framework", 30 "accounts.apps.AccountsConfig", 31 "threads", 32 "notifications", 33 "corsheaders", 34 "taggit", 35 "categories", 36 "notification", 37 ) 38 39 MIDDLEWARE = [ 40 "corsheaders.middleware.CorsMiddleware", 41 "django.middleware.security.SecurityMiddleware", 42 "whitenoise.middleware.WhiteNoiseMiddleware", 43 "django.contrib.sessions.middleware.SessionMiddleware", 44 "django.middleware.common.CommonMiddleware", 45 "django.middleware.csrf.CsrfViewMiddleware", 46 "django.contrib.auth.middleware.AuthenticationMiddleware", 47 # 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 48 "django.contrib.messages.middleware.MessageMiddleware", 49 "django.middleware.clickjacking.XFrameOptionsMiddleware", 50 ] 51 52 CSRF_USE_SESSIONS = ( 53 True # Store the CSRF token in the users session instead of in a cookie 54 ) 55 56 CORS_ORIGIN_ALLOW_ALL = True 57 ROOT_URLCONF = "core.urls" 58 59 # SSL Setup 60 if DJANGO_HOST != "LOCALHOST": 61 SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https") 62 SECURE_SSL_REDIRECT = True 63 SESSION_COOKIE_SECURE = True 64 CSRF_COOKIE_SECURE = True 65 66 # Internationalization & Localization 67 LANGUAGE_CODE = "en-us" 68 TIME_ZONE = "UTC" 69 USE_I18N = True 70 USE_L10N = True 71 USE_TZ = True 72 73 TEMPLATES = [ 74 { 75 "BACKEND": "django.template.backends.django.DjangoTemplates", 76 "DIRS": [ 77 os.path.join(BASE_DIR, "threads/templates/threads"), 78 os.path.join(BASE_DIR, "accounts/templates/accounts"), 79 ], # TODO: Add non-webapp template directory 80 "APP_DIRS": True, 81 "OPTIONS": { 82 "context_processors": [ 83 "django.template.context_processors.debug", 84 "django.template.context_processors.request", 85 "django.contrib.auth.context_processors.auth", 86 "django.contrib.messages.context_processors.messages", 87 ], 88 }, 89 }, 90 ] 91 92 WSGI_APPLICATION = "core.wsgi.application" 93 94 # Apex Contact for Production Errors 95 ADMINS = [("Development Team", "[email protected]")] 96 97 STATIC_URL = "/static/" 98 STATICFILES_DIRS = (os.path.join(BASE_DIR, "core/templates/static"),) 99 STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles") 100 101 MEDIA_ROOT = os.path.join(BASE_DIR, "media") 102 MEDIA_URL = "/media/" 103 104 # TODO: re-organize and simplify staticfiles settings 105 if "CIVIWIKI_LOCAL_NAME" not in os.environ: 106 STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage" 107 108 # Use DATABASE_URL in production 109 DATABASE_URL = os.getenv("DATABASE_URL") 110 111 if DATABASE_URL is not None: 112 DATABASES = {"default": DATABASE_URL} 113 else: 114 # Default to sqlite for simplicity in development 115 DATABASES = { 116 "default": { 117 "ENGINE": "django.db.backends.sqlite3", 118 "NAME": BASE_DIR + "/" + "db.sqlite3", 119 } 120 } 121 122 # Email Backend Setup 123 if "EMAIL_HOST" not in os.environ: 124 EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend" 125 EMAIL_HOST_USER = "[email protected]" 126 else: 127 EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend" 128 EMAIL_HOST = os.getenv("EMAIL_HOST") 129 EMAIL_PORT = os.getenv("EMAIL_PORT") 130 EMAIL_HOST_USER = os.getenv("EMAIL_HOST_USER") 131 EMAIL_HOST_PASSWORD = os.getenv("EMAIL_HOST_PASSWORD") 132 EMAIL_USE_SSL = True 133 DEFAULT_FROM_EMAIL = EMAIL_HOST 134 135 # Notification API Settings 136 NOTIFICATIONS_SOFT_DELETE = True 137 NOTIFICATIONS_USE_JSONFIELD = True 138 139 # Django REST API Settings 140 DEFAULT_RENDERER_CLASSES = ("rest_framework.renderers.JSONRenderer",) 141 142 if DEBUG: 143 # Browsable HTML - Enabled only in Debug mode (dev) 144 DEFAULT_RENDERER_CLASSES = DEFAULT_RENDERER_CLASSES + ( 145 "rest_framework.renderers.BrowsableAPIRenderer", 146 ) 147 148 REST_FRAMEWORK = { 149 "DEFAULT_PERMISSION_CLASSES": ("rest_framework.permissions.IsAuthenticated",), 150 "DEFAULT_RENDERER_CLASSES": DEFAULT_RENDERER_CLASSES, 151 "DEFAULT_AUTHENTICATION_CLASSES": ( 152 "rest_framework.authentication.BasicAuthentication", 153 "rest_framework.authentication.SessionAuthentication", 154 ), 155 } 156 157 # CORS Settings 158 CORS_ORIGIN_ALLOW_ALL = True 159 160 # Custom User model 161 AUTH_USER_MODEL = "accounts.User" 162 163 DEFAULT_AUTO_FIELD = "django.db.models.AutoField" 164 165 # Login Logout URLS 166 LOGIN_URL = "login/" 167 LOGIN_REDIRECT_URL = "/" 168 LOGOUT_REDIRECT_URL = "/" 169 170 AUTH_PASSWORD_VALIDATORS = [ 171 { 172 "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator", # noqa: E501 173 }, 174 { 175 "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator", 176 "OPTIONS": { 177 "min_length": 4, 178 }, 179 }, 180 { 181 "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator", 182 }, 183 { 184 "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator", 185 }, 186 ] 187 188 LOGGING = { 189 "version": 1, 190 "disable_existing_loggers": False, 191 "formatters": {"rich": {"datefmt": "[%X]"}}, 192 "handlers": { 193 "console": { 194 "class": "rich.logging.RichHandler", 195 "formatter": "rich", 196 "level": "WARNING", 197 # "filters": ["require_debug_true"], 198 "rich_tracebacks": True, 199 "tracebacks_show_locals": True, 200 } 201 }, 202 "loggers": {"django": {"handlers": ["console"]}}, 203 } 204 [end of project/core/settings.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/project/core/settings.py b/project/core/settings.py --- a/project/core/settings.py +++ b/project/core/settings.py @@ -34,9 +34,11 @@ "taggit", "categories", "notification", + "debug_toolbar", ) MIDDLEWARE = [ + "debug_toolbar.middleware.DebugToolbarMiddleware", "corsheaders.middleware.CorsMiddleware", "django.middleware.security.SecurityMiddleware", "whitenoise.middleware.WhiteNoiseMiddleware", @@ -49,6 +51,10 @@ "django.middleware.clickjacking.XFrameOptionsMiddleware", ] +INTERNAL_IPS = [ + "127.0.0.1", +] + CSRF_USE_SESSIONS = ( True # Store the CSRF token in the users session instead of in a cookie ) diff --git a/project/core/urls.py b/project/core/urls.py --- a/project/core/urls.py +++ b/project/core/urls.py @@ -49,6 +49,7 @@ RedirectView.as_view(url="/static/favicon/mstile-144x144.png"), ), re_path(r"^media/(?P<path>.*)$", serve, {"document_root": settings.MEDIA_ROOT}), + path("__debug__/", include("debug_toolbar.urls")), ] urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
{"golden_diff": "diff --git a/project/core/settings.py b/project/core/settings.py\n--- a/project/core/settings.py\n+++ b/project/core/settings.py\n@@ -34,9 +34,11 @@\n \"taggit\",\n \"categories\",\n \"notification\",\n+ \"debug_toolbar\",\n )\n \n MIDDLEWARE = [\n+ \"debug_toolbar.middleware.DebugToolbarMiddleware\",\n \"corsheaders.middleware.CorsMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n \"whitenoise.middleware.WhiteNoiseMiddleware\",\n@@ -49,6 +51,10 @@\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n ]\n \n+INTERNAL_IPS = [\n+ \"127.0.0.1\",\n+]\n+\n CSRF_USE_SESSIONS = (\n True # Store the CSRF token in the users session instead of in a cookie\n )\ndiff --git a/project/core/urls.py b/project/core/urls.py\n--- a/project/core/urls.py\n+++ b/project/core/urls.py\n@@ -49,6 +49,7 @@\n RedirectView.as_view(url=\"/static/favicon/mstile-144x144.png\"),\n ),\n re_path(r\"^media/(?P<path>.*)$\", serve, {\"document_root\": settings.MEDIA_ROOT}),\n+ path(\"__debug__/\", include(\"debug_toolbar.urls\")),\n ]\n \n urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n", "issue": "add django-debug-toolbar\n## Task\r\n\r\n- [ ] install `django-debug-toolbar` with the command `poetry add django-debug-toolbar --group dev`\r\n- [ ] follow the [remaining installation instructions](https://django-debug-toolbar.readthedocs.io/en/latest/installation.html)\n", "before_files": [{"content": "\"\"\"civiwiki URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\n\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.urls import include, path, re_path\nfrom django.views.generic.base import RedirectView\nfrom django.views.static import serve\n\nfrom core.router import CiviWikiRouter\n\n\nurlpatterns = [\n path(\"admin/\", admin.site.urls),\n path(\"api/v1/\", include(CiviWikiRouter.urls)),\n path(\"api/\", include(\"accounts.urls.api\")),\n path(\"api/\", include(\"threads.urls.api\")),\n path(\"\", include(\"accounts.urls.urls\")),\n path(\"\", include(\"threads.urls.urls\")),\n path(\n \"inbox/notifications/\",\n include(\"notifications.urls\", namespace=\"notifications\"),\n ),\n path(\"favicon.ico\", RedirectView.as_view(url=\"/static/favicon/favicon.ico\")),\n path(\n \"favicon-32x32.png\",\n RedirectView.as_view(url=\"/static/favicon/favicon-32x32.png\"),\n ),\n path(\n \"apple-touch-icon.png\",\n RedirectView.as_view(url=\"/static/favicon/apple-touch-icon.png\"),\n ),\n path(\n \"mstile-144x144.png\",\n RedirectView.as_view(url=\"/static/favicon/mstile-144x144.png\"),\n ),\n re_path(r\"^media/(?P<path>.*)$\", serve, {\"document_root\": settings.MEDIA_ROOT}),\n]\n\nurlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n", "path": "project/core/urls.py"}, {"content": "\"\"\"\nDjango settings for civiwiki project.\nDarius Calliet May 12, 2016\n\nProduction settings file to select proper environment variables.\n\"\"\"\nimport os\n\n# False if not in os.environ\nDEBUG = os.getenv(\"DEBUG\", False)\n\n# defaults to second value if not found in os.environ\nDJANGO_HOST = os.getenv(\"DJANGO_HOST\", \"LOCALHOST\")\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nSECRET_KEY = os.getenv(\"DJANGO_SECRET_KEY\", \"TEST_KEY_FOR_DEVELOPMENT\")\nALLOWED_HOSTS = [\".herokuapp.com\", \".civiwiki.org\", \"127.0.0.1\", \"localhost\", \"0.0.0.0\"]\n\nINSTALLED_APPS = (\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django_extensions\",\n \"storages\",\n \"core\",\n \"rest_framework\",\n \"accounts.apps.AccountsConfig\",\n \"threads\",\n \"notifications\",\n \"corsheaders\",\n \"taggit\",\n \"categories\",\n \"notification\",\n)\n\nMIDDLEWARE = [\n \"corsheaders.middleware.CorsMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n \"whitenoise.middleware.WhiteNoiseMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n # 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\nCSRF_USE_SESSIONS = (\n True # Store the CSRF token in the users session instead of in a cookie\n)\n\nCORS_ORIGIN_ALLOW_ALL = True\nROOT_URLCONF = \"core.urls\"\n\n# SSL Setup\nif DJANGO_HOST != \"LOCALHOST\":\n SECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\n SECURE_SSL_REDIRECT = True\n SESSION_COOKIE_SECURE = True\n CSRF_COOKIE_SECURE = True\n\n# Internationalization & Localization\nLANGUAGE_CODE = \"en-us\"\nTIME_ZONE = \"UTC\"\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [\n os.path.join(BASE_DIR, \"threads/templates/threads\"),\n os.path.join(BASE_DIR, \"accounts/templates/accounts\"),\n ], # TODO: Add non-webapp template directory\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ],\n },\n },\n]\n\nWSGI_APPLICATION = \"core.wsgi.application\"\n\n# Apex Contact for Production Errors\nADMINS = [(\"Development Team\", \"[email protected]\")]\n\nSTATIC_URL = \"/static/\"\nSTATICFILES_DIRS = (os.path.join(BASE_DIR, \"core/templates/static\"),)\nSTATIC_ROOT = os.path.join(BASE_DIR, \"staticfiles\")\n\nMEDIA_ROOT = os.path.join(BASE_DIR, \"media\")\nMEDIA_URL = \"/media/\"\n\n# TODO: re-organize and simplify staticfiles settings\nif \"CIVIWIKI_LOCAL_NAME\" not in os.environ:\n STATICFILES_STORAGE = \"whitenoise.storage.CompressedManifestStaticFilesStorage\"\n\n# Use DATABASE_URL in production\nDATABASE_URL = os.getenv(\"DATABASE_URL\")\n\nif DATABASE_URL is not None:\n DATABASES = {\"default\": DATABASE_URL}\nelse:\n # Default to sqlite for simplicity in development\n DATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": BASE_DIR + \"/\" + \"db.sqlite3\",\n }\n }\n\n# Email Backend Setup\nif \"EMAIL_HOST\" not in os.environ:\n EMAIL_BACKEND = \"django.core.mail.backends.console.EmailBackend\"\n EMAIL_HOST_USER = \"[email protected]\"\nelse:\n EMAIL_BACKEND = \"django.core.mail.backends.smtp.EmailBackend\"\n EMAIL_HOST = os.getenv(\"EMAIL_HOST\")\n EMAIL_PORT = os.getenv(\"EMAIL_PORT\")\n EMAIL_HOST_USER = os.getenv(\"EMAIL_HOST_USER\")\n EMAIL_HOST_PASSWORD = os.getenv(\"EMAIL_HOST_PASSWORD\")\n EMAIL_USE_SSL = True\n DEFAULT_FROM_EMAIL = EMAIL_HOST\n\n# Notification API Settings\nNOTIFICATIONS_SOFT_DELETE = True\nNOTIFICATIONS_USE_JSONFIELD = True\n\n# Django REST API Settings\nDEFAULT_RENDERER_CLASSES = (\"rest_framework.renderers.JSONRenderer\",)\n\nif DEBUG:\n # Browsable HTML - Enabled only in Debug mode (dev)\n DEFAULT_RENDERER_CLASSES = DEFAULT_RENDERER_CLASSES + (\n \"rest_framework.renderers.BrowsableAPIRenderer\",\n )\n\nREST_FRAMEWORK = {\n \"DEFAULT_PERMISSION_CLASSES\": (\"rest_framework.permissions.IsAuthenticated\",),\n \"DEFAULT_RENDERER_CLASSES\": DEFAULT_RENDERER_CLASSES,\n \"DEFAULT_AUTHENTICATION_CLASSES\": (\n \"rest_framework.authentication.BasicAuthentication\",\n \"rest_framework.authentication.SessionAuthentication\",\n ),\n}\n\n# CORS Settings\nCORS_ORIGIN_ALLOW_ALL = True\n\n# Custom User model\nAUTH_USER_MODEL = \"accounts.User\"\n\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n\n# Login Logout URLS\nLOGIN_URL = \"login/\"\nLOGIN_REDIRECT_URL = \"/\"\nLOGOUT_REDIRECT_URL = \"/\"\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\", # noqa: E501\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n \"OPTIONS\": {\n \"min_length\": 4,\n },\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\"rich\": {\"datefmt\": \"[%X]\"}},\n \"handlers\": {\n \"console\": {\n \"class\": \"rich.logging.RichHandler\",\n \"formatter\": \"rich\",\n \"level\": \"WARNING\",\n # \"filters\": [\"require_debug_true\"],\n \"rich_tracebacks\": True,\n \"tracebacks_show_locals\": True,\n }\n },\n \"loggers\": {\"django\": {\"handlers\": [\"console\"]}},\n}\n", "path": "project/core/settings.py"}]}
3,084
298
gh_patches_debug_31048
rasdani/github-patches
git_diff
Cog-Creators__Red-DiscordBot-1193
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> AttributeError: 'MissingRequiredArgument' object has no attribute 'original' https://sentry.io/will-tekulve/bot-development/issues/418646879/ ``` AttributeError: 'MissingRequiredArgument' object has no attribute 'original' File "redbot/core/events.py", line 187, in on_command_error exc_info=error.original) Exception in on_on_command_error ``` </issue> <code> [start of redbot/core/events.py] 1 import sys 2 import codecs 3 import datetime 4 import logging 5 import pkg_resources 6 import traceback 7 from pkg_resources import DistributionNotFound 8 9 10 import discord 11 from discord.ext import commands 12 13 from . import __version__ 14 from .data_manager import storage_type 15 from .utils.chat_formatting import inline, bordered 16 from .rpc import initialize 17 from colorama import Fore, Style, init 18 19 log = logging.getLogger("red") 20 sentry_log = logging.getLogger("red.sentry") 21 init() 22 23 INTRO = """ 24 ______ _ ______ _ _ ______ _ 25 | ___ \ | | | _ (_) | | | ___ \ | | 26 | |_/ /___ __| | ______ | | | |_ ___ ___ ___ _ __ __| | | |_/ / ___ | |_ 27 | // _ \/ _` | |______| | | | | / __|/ __/ _ \| '__/ _` | | ___ \/ _ \| __| 28 | |\ \ __/ (_| | | |/ /| \__ \ (_| (_) | | | (_| | | |_/ / (_) | |_ 29 \_| \_\___|\__,_| |___/ |_|___/\___\___/|_| \__,_| \____/ \___/ \__| 30 """ 31 32 33 def init_events(bot, cli_flags): 34 35 @bot.event 36 async def on_connect(): 37 if bot.uptime is None: 38 print("Connected to Discord. Getting ready...") 39 40 @bot.event 41 async def on_ready(): 42 if bot.uptime is not None: 43 return 44 45 bot.uptime = datetime.datetime.utcnow() 46 47 if cli_flags.no_cogs is False: 48 print("Loading packages...") 49 failed = [] 50 packages = await bot.db.packages() 51 52 for package in packages: 53 try: 54 spec = await bot.cog_mgr.find_cog(package) 55 bot.load_extension(spec) 56 except Exception as e: 57 log.exception("Failed to load package {}".format(package), 58 exc_info=e) 59 await bot.remove_loaded_package(package) 60 if packages: 61 print("Loaded packages: " + ", ".join(packages)) 62 63 guilds = len(bot.guilds) 64 users = len(set([m for m in bot.get_all_members()])) 65 66 try: 67 data = await bot.application_info() 68 invite_url = discord.utils.oauth_url(data.id) 69 except: 70 if bot.user.bot: 71 invite_url = "Could not fetch invite url" 72 else: 73 invite_url = None 74 75 prefixes = await bot.db.prefix() 76 lang = await bot.db.locale() 77 red_version = __version__ 78 red_pkg = pkg_resources.get_distribution("Red-DiscordBot") 79 dpy_version = discord.__version__ 80 81 INFO = [str(bot.user), "Prefixes: {}".format(', '.join(prefixes)), 82 'Language: {}'.format(lang), 83 "Red Bot Version: {}".format(red_version), 84 "Discord.py Version: {}".format(dpy_version), 85 "Shards: {}".format(bot.shard_count)] 86 87 if guilds: 88 INFO.extend(("Servers: {}".format(guilds), "Users: {}".format(users))) 89 else: 90 print("Ready. I'm not in any server yet!") 91 92 INFO.append('{} cogs with {} commands'.format(len(bot.cogs), len(bot.commands))) 93 94 INFO2 = [] 95 96 sentry = await bot.db.enable_sentry() 97 mongo_enabled = storage_type() != "JSON" 98 reqs_installed = { 99 "voice": None, 100 "docs": None, 101 "test": None 102 } 103 for key in reqs_installed.keys(): 104 reqs = [x.name for x in red_pkg._dep_map[key]] 105 try: 106 pkg_resources.require(reqs) 107 except DistributionNotFound: 108 reqs_installed[key] = False 109 else: 110 reqs_installed[key] = True 111 112 options = ( 113 ("Error Reporting", sentry), 114 ("MongoDB", mongo_enabled), 115 ("Voice", reqs_installed["voice"]), 116 ("Docs", reqs_installed["docs"]), 117 ("Tests", reqs_installed["test"]) 118 ) 119 120 on_symbol, off_symbol = _get_settings_symbols() 121 122 for option, enabled in options: 123 enabled = on_symbol if enabled else off_symbol 124 INFO2.append("{} {}".format(enabled, option)) 125 126 print(Fore.RED + INTRO) 127 print(Style.RESET_ALL) 128 print(bordered(INFO, INFO2)) 129 130 if invite_url: 131 print("\nInvite URL: {}\n".format(invite_url)) 132 133 if bot.rpc_enabled: 134 await initialize(bot) 135 136 @bot.event 137 async def on_error(event_method, *args, **kwargs): 138 sentry_log.exception("Exception in on_{}".format(event_method)) 139 140 @bot.event 141 async def on_command_error(ctx, error): 142 if isinstance(error, commands.MissingRequiredArgument): 143 await ctx.send_help() 144 elif isinstance(error, commands.BadArgument): 145 await ctx.send_help() 146 elif isinstance(error, commands.DisabledCommand): 147 await ctx.send("That command is disabled.") 148 elif isinstance(error, commands.CommandInvokeError): 149 # Need to test if the following still works 150 """ 151 no_dms = "Cannot send messages to this user" 152 is_help_cmd = ctx.command.qualified_name == "help" 153 is_forbidden = isinstance(error.original, discord.Forbidden) 154 if is_help_cmd and is_forbidden and error.original.text == no_dms: 155 msg = ("I couldn't send the help message to you in DM. Either" 156 " you blocked me or you disabled DMs in this server.") 157 await ctx.send(msg) 158 return 159 """ 160 log.exception("Exception in command '{}'" 161 "".format(ctx.command.qualified_name), 162 exc_info=error.original) 163 message = ("Error in command '{}'. Check your console or " 164 "logs for details." 165 "".format(ctx.command.qualified_name)) 166 exception_log = ("Exception in command '{}'\n" 167 "".format(ctx.command.qualified_name)) 168 exception_log += "".join(traceback.format_exception(type(error), 169 error, error.__traceback__)) 170 bot._last_exception = exception_log 171 await ctx.send(inline(message)) 172 elif isinstance(error, commands.CommandNotFound): 173 pass 174 elif isinstance(error, commands.CheckFailure): 175 await ctx.send("⛔ You are not authorized to issue that command.") 176 elif isinstance(error, commands.NoPrivateMessage): 177 await ctx.send("That command is not available in DMs.") 178 elif isinstance(error, commands.CommandOnCooldown): 179 await ctx.send("This command is on cooldown. " 180 "Try again in {:.2f}s" 181 "".format(error.retry_after)) 182 else: 183 log.exception(type(error).__name__, exc_info=error) 184 185 sentry_log.exception("Exception in command '{}'" 186 "".format(ctx.command.qualified_name), 187 exc_info=error.original) 188 189 @bot.event 190 async def on_message(message): 191 bot.counter["messages_read"] += 1 192 await bot.process_commands(message) 193 194 @bot.event 195 async def on_resumed(): 196 bot.counter["sessions_resumed"] += 1 197 198 @bot.event 199 async def on_command(command): 200 bot.counter["processed_commands"] += 1 201 202 def _get_settings_symbols(): 203 """Get symbols for displaying settings on stdout. 204 205 This is so we don't get encoding errors when trying to print unicode 206 emojis to stdout (particularly with Windows Command Prompt). 207 """ 208 encoder = codecs.getencoder(sys.stdout.encoding) 209 check_mark = "\N{SQUARE ROOT}" 210 try: 211 encoder(check_mark) 212 except UnicodeEncodeError: 213 on_symbol = "[X]" 214 off_symbol = "[ ]" 215 else: 216 on_symbol = check_mark 217 off_symbol = "X" 218 219 return on_symbol, off_symbol 220 [end of redbot/core/events.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/redbot/core/events.py b/redbot/core/events.py --- a/redbot/core/events.py +++ b/redbot/core/events.py @@ -135,7 +135,7 @@ @bot.event async def on_error(event_method, *args, **kwargs): - sentry_log.exception("Exception in on_{}".format(event_method)) + sentry_log.exception("Exception in {}".format(event_method)) @bot.event async def on_command_error(ctx, error): @@ -160,6 +160,10 @@ log.exception("Exception in command '{}'" "".format(ctx.command.qualified_name), exc_info=error.original) + sentry_log.exception("Exception in command '{}'" + "".format(ctx.command.qualified_name), + exc_info=error.original) + message = ("Error in command '{}'. Check your console or " "logs for details." "".format(ctx.command.qualified_name)) @@ -181,10 +185,13 @@ "".format(error.retry_after)) else: log.exception(type(error).__name__, exc_info=error) + try: + sentry_error = error.original + except AttributeError: + sentry_error = error - sentry_log.exception("Exception in command '{}'" - "".format(ctx.command.qualified_name), - exc_info=error.original) + sentry_log.exception("Unhandled command error.", + exc_info=sentry_error) @bot.event async def on_message(message):
{"golden_diff": "diff --git a/redbot/core/events.py b/redbot/core/events.py\n--- a/redbot/core/events.py\n+++ b/redbot/core/events.py\n@@ -135,7 +135,7 @@\n \n @bot.event\n async def on_error(event_method, *args, **kwargs):\n- sentry_log.exception(\"Exception in on_{}\".format(event_method))\n+ sentry_log.exception(\"Exception in {}\".format(event_method))\n \n @bot.event\n async def on_command_error(ctx, error):\n@@ -160,6 +160,10 @@\n log.exception(\"Exception in command '{}'\"\n \"\".format(ctx.command.qualified_name),\n exc_info=error.original)\n+ sentry_log.exception(\"Exception in command '{}'\"\n+ \"\".format(ctx.command.qualified_name),\n+ exc_info=error.original)\n+\n message = (\"Error in command '{}'. Check your console or \"\n \"logs for details.\"\n \"\".format(ctx.command.qualified_name))\n@@ -181,10 +185,13 @@\n \"\".format(error.retry_after))\n else:\n log.exception(type(error).__name__, exc_info=error)\n+ try:\n+ sentry_error = error.original\n+ except AttributeError:\n+ sentry_error = error\n \n- sentry_log.exception(\"Exception in command '{}'\"\n- \"\".format(ctx.command.qualified_name),\n- exc_info=error.original)\n+ sentry_log.exception(\"Unhandled command error.\",\n+ exc_info=sentry_error)\n \n @bot.event\n async def on_message(message):\n", "issue": "AttributeError: 'MissingRequiredArgument' object has no attribute 'original'\nhttps://sentry.io/will-tekulve/bot-development/issues/418646879/\n\n```\nAttributeError: 'MissingRequiredArgument' object has no attribute 'original'\n File \"redbot/core/events.py\", line 187, in on_command_error\n exc_info=error.original)\n\nException in on_on_command_error\n```\n", "before_files": [{"content": "import sys\nimport codecs\nimport datetime\nimport logging\nimport pkg_resources\nimport traceback\nfrom pkg_resources import DistributionNotFound\n\n\nimport discord\nfrom discord.ext import commands\n\nfrom . import __version__\nfrom .data_manager import storage_type\nfrom .utils.chat_formatting import inline, bordered\nfrom .rpc import initialize\nfrom colorama import Fore, Style, init\n\nlog = logging.getLogger(\"red\")\nsentry_log = logging.getLogger(\"red.sentry\")\ninit()\n\nINTRO = \"\"\"\n______ _ ______ _ _ ______ _ \n| ___ \\ | | | _ (_) | | | ___ \\ | | \n| |_/ /___ __| | ______ | | | |_ ___ ___ ___ _ __ __| | | |_/ / ___ | |_ \n| // _ \\/ _` | |______| | | | | / __|/ __/ _ \\| '__/ _` | | ___ \\/ _ \\| __|\n| |\\ \\ __/ (_| | | |/ /| \\__ \\ (_| (_) | | | (_| | | |_/ / (_) | |_ \n\\_| \\_\\___|\\__,_| |___/ |_|___/\\___\\___/|_| \\__,_| \\____/ \\___/ \\__|\n\"\"\"\n\n\ndef init_events(bot, cli_flags):\n\n @bot.event\n async def on_connect():\n if bot.uptime is None:\n print(\"Connected to Discord. Getting ready...\")\n\n @bot.event\n async def on_ready():\n if bot.uptime is not None:\n return\n\n bot.uptime = datetime.datetime.utcnow()\n\n if cli_flags.no_cogs is False:\n print(\"Loading packages...\")\n failed = []\n packages = await bot.db.packages()\n\n for package in packages:\n try:\n spec = await bot.cog_mgr.find_cog(package)\n bot.load_extension(spec)\n except Exception as e:\n log.exception(\"Failed to load package {}\".format(package),\n exc_info=e)\n await bot.remove_loaded_package(package)\n if packages:\n print(\"Loaded packages: \" + \", \".join(packages))\n\n guilds = len(bot.guilds)\n users = len(set([m for m in bot.get_all_members()]))\n\n try:\n data = await bot.application_info()\n invite_url = discord.utils.oauth_url(data.id)\n except:\n if bot.user.bot:\n invite_url = \"Could not fetch invite url\"\n else:\n invite_url = None\n\n prefixes = await bot.db.prefix()\n lang = await bot.db.locale()\n red_version = __version__\n red_pkg = pkg_resources.get_distribution(\"Red-DiscordBot\")\n dpy_version = discord.__version__\n\n INFO = [str(bot.user), \"Prefixes: {}\".format(', '.join(prefixes)),\n 'Language: {}'.format(lang),\n \"Red Bot Version: {}\".format(red_version),\n \"Discord.py Version: {}\".format(dpy_version),\n \"Shards: {}\".format(bot.shard_count)]\n\n if guilds:\n INFO.extend((\"Servers: {}\".format(guilds), \"Users: {}\".format(users)))\n else:\n print(\"Ready. I'm not in any server yet!\")\n\n INFO.append('{} cogs with {} commands'.format(len(bot.cogs), len(bot.commands)))\n\n INFO2 = []\n\n sentry = await bot.db.enable_sentry()\n mongo_enabled = storage_type() != \"JSON\"\n reqs_installed = {\n \"voice\": None,\n \"docs\": None,\n \"test\": None\n }\n for key in reqs_installed.keys():\n reqs = [x.name for x in red_pkg._dep_map[key]]\n try:\n pkg_resources.require(reqs)\n except DistributionNotFound:\n reqs_installed[key] = False\n else:\n reqs_installed[key] = True\n\n options = (\n (\"Error Reporting\", sentry),\n (\"MongoDB\", mongo_enabled),\n (\"Voice\", reqs_installed[\"voice\"]),\n (\"Docs\", reqs_installed[\"docs\"]),\n (\"Tests\", reqs_installed[\"test\"])\n )\n\n on_symbol, off_symbol = _get_settings_symbols()\n\n for option, enabled in options:\n enabled = on_symbol if enabled else off_symbol\n INFO2.append(\"{} {}\".format(enabled, option))\n\n print(Fore.RED + INTRO)\n print(Style.RESET_ALL)\n print(bordered(INFO, INFO2))\n\n if invite_url:\n print(\"\\nInvite URL: {}\\n\".format(invite_url))\n\n if bot.rpc_enabled:\n await initialize(bot)\n\n @bot.event\n async def on_error(event_method, *args, **kwargs):\n sentry_log.exception(\"Exception in on_{}\".format(event_method))\n\n @bot.event\n async def on_command_error(ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send_help()\n elif isinstance(error, commands.BadArgument):\n await ctx.send_help()\n elif isinstance(error, commands.DisabledCommand):\n await ctx.send(\"That command is disabled.\")\n elif isinstance(error, commands.CommandInvokeError):\n # Need to test if the following still works\n \"\"\"\n no_dms = \"Cannot send messages to this user\"\n is_help_cmd = ctx.command.qualified_name == \"help\"\n is_forbidden = isinstance(error.original, discord.Forbidden)\n if is_help_cmd and is_forbidden and error.original.text == no_dms:\n msg = (\"I couldn't send the help message to you in DM. Either\"\n \" you blocked me or you disabled DMs in this server.\")\n await ctx.send(msg)\n return\n \"\"\"\n log.exception(\"Exception in command '{}'\"\n \"\".format(ctx.command.qualified_name),\n exc_info=error.original)\n message = (\"Error in command '{}'. Check your console or \"\n \"logs for details.\"\n \"\".format(ctx.command.qualified_name))\n exception_log = (\"Exception in command '{}'\\n\"\n \"\".format(ctx.command.qualified_name))\n exception_log += \"\".join(traceback.format_exception(type(error),\n error, error.__traceback__))\n bot._last_exception = exception_log\n await ctx.send(inline(message))\n elif isinstance(error, commands.CommandNotFound):\n pass\n elif isinstance(error, commands.CheckFailure):\n await ctx.send(\"\u26d4 You are not authorized to issue that command.\")\n elif isinstance(error, commands.NoPrivateMessage):\n await ctx.send(\"That command is not available in DMs.\")\n elif isinstance(error, commands.CommandOnCooldown):\n await ctx.send(\"This command is on cooldown. \"\n \"Try again in {:.2f}s\"\n \"\".format(error.retry_after))\n else:\n log.exception(type(error).__name__, exc_info=error)\n\n sentry_log.exception(\"Exception in command '{}'\"\n \"\".format(ctx.command.qualified_name),\n exc_info=error.original)\n\n @bot.event\n async def on_message(message):\n bot.counter[\"messages_read\"] += 1\n await bot.process_commands(message)\n\n @bot.event\n async def on_resumed():\n bot.counter[\"sessions_resumed\"] += 1\n\n @bot.event\n async def on_command(command):\n bot.counter[\"processed_commands\"] += 1\n\ndef _get_settings_symbols():\n \"\"\"Get symbols for displaying settings on stdout.\n\n This is so we don't get encoding errors when trying to print unicode\n emojis to stdout (particularly with Windows Command Prompt).\n \"\"\"\n encoder = codecs.getencoder(sys.stdout.encoding)\n check_mark = \"\\N{SQUARE ROOT}\"\n try:\n encoder(check_mark)\n except UnicodeEncodeError:\n on_symbol = \"[X]\"\n off_symbol = \"[ ]\"\n else:\n on_symbol = check_mark\n off_symbol = \"X\"\n\n return on_symbol, off_symbol\n", "path": "redbot/core/events.py"}]}
2,853
339
gh_patches_debug_19906
rasdani/github-patches
git_diff
mitmproxy__mitmproxy-4246
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> v4 --replacements vs v5 --modify-headers I'm trying to replace the `User-Agent` request header if it contains a certain string. This works with "mitmproxy-4.0.4-linux": ``` ./mitmproxy --replacements ":~hq User-Agent:Mozilla(.+):CUSTOMAGENT" ``` With "mitmproxy-5.2-linux", this at least replaces the `User-Agent`, but is missing my "certain string condition": ``` ./mitmproxy --modify-headers "|~hq .+|User-Agent|CUSTOMAGENT" ``` How do I add my `Mozilla` condition in v5? None of these work: ``` ./mitmproxy --modify-headers "|~hq ^(.*?)Mozilla(.*?)$|User-Agent|CUSTOMAGENT" ./mitmproxy --modify-headers "/~hq .*?Mozilla.*?/User-Agent/CUSTOMAGENT" ./mitmproxy --modify-headers "|~hq Mozilla|User-Agent|CUSTOMAGENT" ./mitmproxy --modify-headers "|~hq User-Agent: Mozilla|User-Agent|CUSTOMAGENT" ./mitmproxy --modify-headers "|~hq \"^(.*?)Mozilla(.*?)$\"|User-Agent|CUSTOMAGENT" ``` I've been trying for hours, and I feel like I've tried every variation under the sun. There's a very small chance it's a bug, but most likely I'm just doing it wrong. If it matters, this system is Ubuntu 16.04. </issue> <code> [start of mitmproxy/addons/modifyheaders.py] 1 import re 2 import typing 3 from pathlib import Path 4 5 from mitmproxy import ctx, exceptions, flowfilter, http 6 from mitmproxy.net.http import Headers 7 from mitmproxy.utils import strutils 8 from mitmproxy.utils.spec import parse_spec 9 10 11 class ModifySpec(typing.NamedTuple): 12 matches: flowfilter.TFilter 13 subject: bytes 14 replacement_str: str 15 16 def read_replacement(self) -> bytes: 17 """ 18 Process the replacement str. This usually just involves converting it to bytes. 19 However, if it starts with `@`, we interpret the rest as a file path to read from. 20 21 Raises: 22 - IOError if the file cannot be read. 23 """ 24 if self.replacement_str.startswith("@"): 25 return Path(self.replacement_str[1:]).expanduser().read_bytes() 26 else: 27 # We could cache this at some point, but unlikely to be a problem. 28 return strutils.escaped_str_to_bytes(self.replacement_str) 29 30 31 def parse_modify_spec(option: str, subject_is_regex: bool) -> ModifySpec: 32 flow_filter, subject_str, replacement = parse_spec(option) 33 34 subject = strutils.escaped_str_to_bytes(subject_str) 35 if subject_is_regex: 36 try: 37 re.compile(subject) 38 except re.error as e: 39 raise ValueError(f"Invalid regular expression {subject!r} ({e})") 40 41 spec = ModifySpec(flow_filter, subject, replacement) 42 43 try: 44 spec.read_replacement() 45 except OSError as e: 46 raise ValueError(f"Invalid file path: {replacement[1:]} ({e})") 47 48 return spec 49 50 51 class ModifyHeaders: 52 def __init__(self): 53 self.replacements: typing.List[ModifySpec] = [] 54 55 def load(self, loader): 56 loader.add_option( 57 "modify_headers", typing.Sequence[str], [], 58 """ 59 Header modify pattern of the form "[/flow-filter]/header-name/[@]header-value", where the 60 separator can be any character. The @ allows to provide a file path that is used to read 61 the header value string. An empty header-value removes existing header-name headers. 62 """ 63 ) 64 65 def configure(self, updated): 66 if "modify_headers" in updated: 67 self.replacements = [] 68 for option in ctx.options.modify_headers: 69 try: 70 spec = parse_modify_spec(option, False) 71 except ValueError as e: 72 raise exceptions.OptionsError(f"Cannot parse modify_headers option {option}: {e}") from e 73 self.replacements.append(spec) 74 75 def request(self, flow): 76 if flow.response or flow.error or flow.reply.state == "taken": 77 return 78 self.run(flow, flow.request.headers) 79 80 def response(self, flow): 81 if flow.error or flow.reply.state == "taken": 82 return 83 self.run(flow, flow.response.headers) 84 85 def run(self, flow: http.HTTPFlow, hdrs: Headers) -> None: 86 # unset all specified headers 87 for spec in self.replacements: 88 if spec.matches(flow): 89 hdrs.pop(spec.subject, None) 90 91 # set all specified headers if the replacement string is not empty 92 for spec in self.replacements: 93 if spec.matches(flow): 94 try: 95 replacement = spec.read_replacement() 96 except OSError as e: 97 ctx.log.warn(f"Could not read replacement file: {e}") 98 continue 99 else: 100 if replacement: 101 hdrs.add(spec.subject, replacement) 102 [end of mitmproxy/addons/modifyheaders.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mitmproxy/addons/modifyheaders.py b/mitmproxy/addons/modifyheaders.py --- a/mitmproxy/addons/modifyheaders.py +++ b/mitmproxy/addons/modifyheaders.py @@ -83,14 +83,21 @@ self.run(flow, flow.response.headers) def run(self, flow: http.HTTPFlow, hdrs: Headers) -> None: - # unset all specified headers + matches = [] + + # first check all the filters against the original, unmodified flow for spec in self.replacements: - if spec.matches(flow): + matches.append(spec.matches(flow)) + + # unset all specified headers + for i, spec in enumerate(self.replacements): + if matches[i]: hdrs.pop(spec.subject, None) # set all specified headers if the replacement string is not empty - for spec in self.replacements: - if spec.matches(flow): + + for i, spec in enumerate(self.replacements): + if matches[i]: try: replacement = spec.read_replacement() except OSError as e:
{"golden_diff": "diff --git a/mitmproxy/addons/modifyheaders.py b/mitmproxy/addons/modifyheaders.py\n--- a/mitmproxy/addons/modifyheaders.py\n+++ b/mitmproxy/addons/modifyheaders.py\n@@ -83,14 +83,21 @@\n self.run(flow, flow.response.headers)\n \n def run(self, flow: http.HTTPFlow, hdrs: Headers) -> None:\n- # unset all specified headers\n+ matches = []\n+\n+ # first check all the filters against the original, unmodified flow\n for spec in self.replacements:\n- if spec.matches(flow):\n+ matches.append(spec.matches(flow))\n+\n+ # unset all specified headers\n+ for i, spec in enumerate(self.replacements):\n+ if matches[i]:\n hdrs.pop(spec.subject, None)\n \n # set all specified headers if the replacement string is not empty\n- for spec in self.replacements:\n- if spec.matches(flow):\n+\n+ for i, spec in enumerate(self.replacements):\n+ if matches[i]:\n try:\n replacement = spec.read_replacement()\n except OSError as e:\n", "issue": "v4 --replacements vs v5 --modify-headers\nI'm trying to replace the `User-Agent` request header if it contains a certain string.\r\n\r\nThis works with \"mitmproxy-4.0.4-linux\":\r\n\r\n```\r\n./mitmproxy --replacements \":~hq User-Agent:Mozilla(.+):CUSTOMAGENT\"\r\n```\r\n\r\nWith \"mitmproxy-5.2-linux\", this at least replaces the `User-Agent`, but is missing my \"certain string condition\":\r\n\r\n```\r\n./mitmproxy --modify-headers \"|~hq .+|User-Agent|CUSTOMAGENT\"\r\n```\r\n\r\nHow do I add my `Mozilla` condition in v5?\r\n\r\nNone of these work:\r\n\r\n```\r\n./mitmproxy --modify-headers \"|~hq ^(.*?)Mozilla(.*?)$|User-Agent|CUSTOMAGENT\"\r\n\r\n./mitmproxy --modify-headers \"/~hq .*?Mozilla.*?/User-Agent/CUSTOMAGENT\"\r\n\r\n./mitmproxy --modify-headers \"|~hq Mozilla|User-Agent|CUSTOMAGENT\"\r\n\r\n./mitmproxy --modify-headers \"|~hq User-Agent: Mozilla|User-Agent|CUSTOMAGENT\"\r\n\r\n./mitmproxy --modify-headers \"|~hq \\\"^(.*?)Mozilla(.*?)$\\\"|User-Agent|CUSTOMAGENT\"\r\n```\r\n\r\nI've been trying for hours, and I feel like I've tried every variation under the sun. There's a very small chance it's a bug, but most likely I'm just doing it wrong. If it matters, this system is Ubuntu 16.04.\r\n\r\n\r\n\n", "before_files": [{"content": "import re\nimport typing\nfrom pathlib import Path\n\nfrom mitmproxy import ctx, exceptions, flowfilter, http\nfrom mitmproxy.net.http import Headers\nfrom mitmproxy.utils import strutils\nfrom mitmproxy.utils.spec import parse_spec\n\n\nclass ModifySpec(typing.NamedTuple):\n matches: flowfilter.TFilter\n subject: bytes\n replacement_str: str\n\n def read_replacement(self) -> bytes:\n \"\"\"\n Process the replacement str. This usually just involves converting it to bytes.\n However, if it starts with `@`, we interpret the rest as a file path to read from.\n\n Raises:\n - IOError if the file cannot be read.\n \"\"\"\n if self.replacement_str.startswith(\"@\"):\n return Path(self.replacement_str[1:]).expanduser().read_bytes()\n else:\n # We could cache this at some point, but unlikely to be a problem.\n return strutils.escaped_str_to_bytes(self.replacement_str)\n\n\ndef parse_modify_spec(option: str, subject_is_regex: bool) -> ModifySpec:\n flow_filter, subject_str, replacement = parse_spec(option)\n\n subject = strutils.escaped_str_to_bytes(subject_str)\n if subject_is_regex:\n try:\n re.compile(subject)\n except re.error as e:\n raise ValueError(f\"Invalid regular expression {subject!r} ({e})\")\n\n spec = ModifySpec(flow_filter, subject, replacement)\n\n try:\n spec.read_replacement()\n except OSError as e:\n raise ValueError(f\"Invalid file path: {replacement[1:]} ({e})\")\n\n return spec\n\n\nclass ModifyHeaders:\n def __init__(self):\n self.replacements: typing.List[ModifySpec] = []\n\n def load(self, loader):\n loader.add_option(\n \"modify_headers\", typing.Sequence[str], [],\n \"\"\"\n Header modify pattern of the form \"[/flow-filter]/header-name/[@]header-value\", where the\n separator can be any character. The @ allows to provide a file path that is used to read\n the header value string. An empty header-value removes existing header-name headers.\n \"\"\"\n )\n\n def configure(self, updated):\n if \"modify_headers\" in updated:\n self.replacements = []\n for option in ctx.options.modify_headers:\n try:\n spec = parse_modify_spec(option, False)\n except ValueError as e:\n raise exceptions.OptionsError(f\"Cannot parse modify_headers option {option}: {e}\") from e\n self.replacements.append(spec)\n\n def request(self, flow):\n if flow.response or flow.error or flow.reply.state == \"taken\":\n return\n self.run(flow, flow.request.headers)\n\n def response(self, flow):\n if flow.error or flow.reply.state == \"taken\":\n return\n self.run(flow, flow.response.headers)\n\n def run(self, flow: http.HTTPFlow, hdrs: Headers) -> None:\n # unset all specified headers\n for spec in self.replacements:\n if spec.matches(flow):\n hdrs.pop(spec.subject, None)\n\n # set all specified headers if the replacement string is not empty\n for spec in self.replacements:\n if spec.matches(flow):\n try:\n replacement = spec.read_replacement()\n except OSError as e:\n ctx.log.warn(f\"Could not read replacement file: {e}\")\n continue\n else:\n if replacement:\n hdrs.add(spec.subject, replacement)\n", "path": "mitmproxy/addons/modifyheaders.py"}]}
1,801
247
gh_patches_debug_38553
rasdani/github-patches
git_diff
ansible__awx-9295
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> awx.awx.tower_settings does not return expected changes when running in check mode with diff <!-- Issues are for **concrete, actionable bugs and feature requests** only - if you're just asking for debugging help or technical support, please use: - http://webchat.freenode.net/?channels=ansible-awx - https://groups.google.com/forum/#!forum/awx-project We have to limit this because of limited volunteer time to respond to issues! --> ##### ISSUE TYPE - Bug Report ##### SUMMARY Running the module with check and diff mode simultaneously does not return the list of expected changes. It only returns that the module will change some unknown thing (by setting the `changed` variable true). As check and diff mode is mainly used for, erm.. checking, this makes it hard to actually check the correct invocation of the module, especially when multiple settings are set. ##### ENVIRONMENT * AWX version: 15.0.1 * AWX install method: docker on linux * Ansible version: 2.9.10 * Operating System: Debian 10 * Web Browser: Chrome ##### STEPS TO REPRODUCE Task extract: ``` - name: Set LDAP tower_settings: settings: AUTH_LDAP_SERVER_URI: "ldap://ldap.example.com:389" AUTH_LDAP_BIND_DN: CN=user,DC=example,DC=com AUTH_LDAP_BIND_PASSWORD: password AUTH_LDAP_START_TLS: true ``` Make sure that some of these settings have a different value in AWX. Run the task with and without check&diff mode. ##### EXPECTED RESULTS Expected at least some insight why the module reports changed status when running with check&diff mode. For eg if AWX server's AUTH_LDAP_SERVER_URI is `ldap://nonexistent.com:389`, then running check&diff mode could produce similar output when running in normal mode: ``` changed: [localhost] => { "changed": true, "invocation": { "module_args": { "settings": { "AUTH_LDAP_SERVER_URI": "ldap://ldap.example.com:389", "AUTH_LDAP_BIND_DN": "CN=user,DC=example,DC=com", "AUTH_LDAP_BIND_PASSWORD": "password", "AUTH_LDAP_START_TLS": true }, <...other module args ...> } }, "old_values": { "AUTH_LDAP_SERVER_URI": "ldap://nonexistent.com:389" }, "values": { "AUTH_LDAP_SERVER_URI": "ldap://ldap.example.com:389" } } ``` ##### ACTUAL RESULTS Running the module with check&diff mode produces less usable output regarding the variables to be changed: ``` changed: [localhost] => { "changed": true, "invocation": { "module_args": { "settings": { "AUTH_LDAP_SERVER_URI": "ldap://ldap.example.com:389", "AUTH_LDAP_BIND_DN": "CN=user,DC=example,DC=com", "AUTH_LDAP_BIND_PASSWORD": "password", "AUTH_LDAP_START_TLS": true }, <...other module args ...> } } ``` ##### ADDITIONAL INFORMATION As i see the main reason behind such behaviour is because a [new json_response dict is created](https://github.com/ansible/awx/blob/29926ba5d99b760b32de35e07c211fd3e7691c8d/awx_collection/plugins/modules/tower_settings.py#L136) in the module's code, but the [check_mode exit point](https://github.com/ansible/awx/blob/29926ba5d99b760b32de35e07c211fd3e7691c8d/awx_collection/plugins/module_utils/tower_api.py#L85) uses its own `json_output` dict. Edit: Completely forgot about diff mode, but ofc thats where i expect to see the detailed changes. </issue> <code> [start of awx_collection/plugins/modules/tower_settings.py] 1 #!/usr/bin/python 2 # coding: utf-8 -*- 3 4 # (c) 2018, Nikhil Jain <[email protected]> 5 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) 6 7 from __future__ import absolute_import, division, print_function 8 __metaclass__ = type 9 10 11 ANSIBLE_METADATA = {'metadata_version': '1.1', 12 'status': ['preview'], 13 'supported_by': 'community'} 14 15 16 DOCUMENTATION = ''' 17 --- 18 module: tower_settings 19 author: "Nikhil Jain (@jainnikhil30)" 20 short_description: Modify Ansible Tower settings. 21 description: 22 - Modify Ansible Tower settings. See 23 U(https://www.ansible.com/tower) for an overview. 24 options: 25 name: 26 description: 27 - Name of setting to modify 28 type: str 29 value: 30 description: 31 - Value to be modified for given setting. 32 - If given a non-string type, will make best effort to cast it to type API expects. 33 - For better control over types, use the C(settings) param instead. 34 type: str 35 settings: 36 description: 37 - A data structure to be sent into the settings endpoint 38 type: dict 39 requirements: 40 - pyyaml 41 extends_documentation_fragment: awx.awx.auth 42 ''' 43 44 EXAMPLES = ''' 45 - name: Set the value of AWX_PROOT_BASE_PATH 46 tower_settings: 47 name: AWX_PROOT_BASE_PATH 48 value: "/tmp" 49 register: testing_settings 50 51 - name: Set the value of AWX_PROOT_SHOW_PATHS 52 tower_settings: 53 name: "AWX_PROOT_SHOW_PATHS" 54 value: "'/var/lib/awx/projects/', '/tmp'" 55 register: testing_settings 56 57 - name: Set the LDAP Auth Bind Password 58 tower_settings: 59 name: "AUTH_LDAP_BIND_PASSWORD" 60 value: "Password" 61 no_log: true 62 63 - name: Set all the LDAP Auth Bind Params 64 tower_settings: 65 settings: 66 AUTH_LDAP_BIND_PASSWORD: "password" 67 AUTH_LDAP_USER_ATTR_MAP: 68 email: "mail" 69 first_name: "givenName" 70 last_name: "surname" 71 ''' 72 73 from ..module_utils.tower_api import TowerAPIModule 74 75 try: 76 import yaml 77 HAS_YAML = True 78 except ImportError: 79 HAS_YAML = False 80 81 82 def coerce_type(module, value): 83 # If our value is already None we can just return directly 84 if value is None: 85 return value 86 87 yaml_ish = bool(( 88 value.startswith('{') and value.endswith('}') 89 ) or ( 90 value.startswith('[') and value.endswith(']')) 91 ) 92 if yaml_ish: 93 if not HAS_YAML: 94 module.fail_json(msg="yaml is not installed, try 'pip install pyyaml'") 95 return yaml.safe_load(value) 96 elif value.lower in ('true', 'false', 't', 'f'): 97 return {'t': True, 'f': False}[value[0].lower()] 98 try: 99 return int(value) 100 except ValueError: 101 pass 102 return value 103 104 105 def main(): 106 # Any additional arguments that are not fields of the item can be added here 107 argument_spec = dict( 108 name=dict(), 109 value=dict(), 110 settings=dict(type='dict'), 111 ) 112 113 # Create a module for ourselves 114 module = TowerAPIModule( 115 argument_spec=argument_spec, 116 required_one_of=[['name', 'settings']], 117 mutually_exclusive=[['name', 'settings']], 118 required_if=[['name', 'present', ['value']]] 119 ) 120 121 # Extract our parameters 122 name = module.params.get('name') 123 value = module.params.get('value') 124 new_settings = module.params.get('settings') 125 126 # If we were given a name/value pair we will just make settings out of that and proceed normally 127 if new_settings is None: 128 new_value = coerce_type(module, value) 129 130 new_settings = {name: new_value} 131 132 # Load the existing settings 133 existing_settings = module.get_endpoint('settings/all')['json'] 134 135 # Begin a json response 136 json_response = {'changed': False, 'old_values': {}} 137 138 # Check any of the settings to see if anything needs to be updated 139 needs_update = False 140 for a_setting in new_settings: 141 if a_setting not in existing_settings or existing_settings[a_setting] != new_settings[a_setting]: 142 # At least one thing is different so we need to patch 143 needs_update = True 144 json_response['old_values'][a_setting] = existing_settings[a_setting] 145 146 # If nothing needs an update we can simply exit with the response (as not changed) 147 if not needs_update: 148 module.exit_json(**json_response) 149 150 # Make the call to update the settings 151 response = module.patch_endpoint('settings/all', **{'data': new_settings}) 152 153 if response['status_code'] == 200: 154 # Set the changed response to True 155 json_response['changed'] = True 156 157 # To deal with the old style values we need to return 'value' in the response 158 new_values = {} 159 for a_setting in new_settings: 160 new_values[a_setting] = response['json'][a_setting] 161 162 # If we were using a name we will just add a value of a string, otherwise we will return an array in values 163 if name is not None: 164 json_response['value'] = new_values[name] 165 else: 166 json_response['values'] = new_values 167 168 module.exit_json(**json_response) 169 elif 'json' in response and '__all__' in response['json']: 170 module.fail_json(msg=response['json']['__all__']) 171 else: 172 module.fail_json(**{'msg': "Unable to update settings, see response", 'response': response}) 173 174 175 if __name__ == '__main__': 176 main() 177 [end of awx_collection/plugins/modules/tower_settings.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/awx_collection/plugins/modules/tower_settings.py b/awx_collection/plugins/modules/tower_settings.py --- a/awx_collection/plugins/modules/tower_settings.py +++ b/awx_collection/plugins/modules/tower_settings.py @@ -133,7 +133,7 @@ existing_settings = module.get_endpoint('settings/all')['json'] # Begin a json response - json_response = {'changed': False, 'old_values': {}} + json_output = {'changed': False, 'old_values': {}, 'new_values': {}} # Check any of the settings to see if anything needs to be updated needs_update = False @@ -141,18 +141,29 @@ if a_setting not in existing_settings or existing_settings[a_setting] != new_settings[a_setting]: # At least one thing is different so we need to patch needs_update = True - json_response['old_values'][a_setting] = existing_settings[a_setting] + json_output['old_values'][a_setting] = existing_settings[a_setting] + json_output['new_values'][a_setting] = new_settings[a_setting] + + if module._diff: + json_output['diff'] = { + 'before': json_output['old_values'], + 'after': json_output['new_values'] + } # If nothing needs an update we can simply exit with the response (as not changed) if not needs_update: - module.exit_json(**json_response) + module.exit_json(**json_output) + + if module.check_mode and module._diff: + json_output['changed'] = True + module.exit_json(**json_output) # Make the call to update the settings response = module.patch_endpoint('settings/all', **{'data': new_settings}) if response['status_code'] == 200: # Set the changed response to True - json_response['changed'] = True + json_output['changed'] = True # To deal with the old style values we need to return 'value' in the response new_values = {} @@ -161,11 +172,11 @@ # If we were using a name we will just add a value of a string, otherwise we will return an array in values if name is not None: - json_response['value'] = new_values[name] + json_output['value'] = new_values[name] else: - json_response['values'] = new_values + json_output['values'] = new_values - module.exit_json(**json_response) + module.exit_json(**json_output) elif 'json' in response and '__all__' in response['json']: module.fail_json(msg=response['json']['__all__']) else:
{"golden_diff": "diff --git a/awx_collection/plugins/modules/tower_settings.py b/awx_collection/plugins/modules/tower_settings.py\n--- a/awx_collection/plugins/modules/tower_settings.py\n+++ b/awx_collection/plugins/modules/tower_settings.py\n@@ -133,7 +133,7 @@\n existing_settings = module.get_endpoint('settings/all')['json']\n \n # Begin a json response\n- json_response = {'changed': False, 'old_values': {}}\n+ json_output = {'changed': False, 'old_values': {}, 'new_values': {}}\n \n # Check any of the settings to see if anything needs to be updated\n needs_update = False\n@@ -141,18 +141,29 @@\n if a_setting not in existing_settings or existing_settings[a_setting] != new_settings[a_setting]:\n # At least one thing is different so we need to patch\n needs_update = True\n- json_response['old_values'][a_setting] = existing_settings[a_setting]\n+ json_output['old_values'][a_setting] = existing_settings[a_setting]\n+ json_output['new_values'][a_setting] = new_settings[a_setting]\n+\n+ if module._diff:\n+ json_output['diff'] = {\n+ 'before': json_output['old_values'],\n+ 'after': json_output['new_values']\n+ }\n \n # If nothing needs an update we can simply exit with the response (as not changed)\n if not needs_update:\n- module.exit_json(**json_response)\n+ module.exit_json(**json_output)\n+\n+ if module.check_mode and module._diff:\n+ json_output['changed'] = True\n+ module.exit_json(**json_output)\n \n # Make the call to update the settings\n response = module.patch_endpoint('settings/all', **{'data': new_settings})\n \n if response['status_code'] == 200:\n # Set the changed response to True\n- json_response['changed'] = True\n+ json_output['changed'] = True\n \n # To deal with the old style values we need to return 'value' in the response\n new_values = {}\n@@ -161,11 +172,11 @@\n \n # If we were using a name we will just add a value of a string, otherwise we will return an array in values\n if name is not None:\n- json_response['value'] = new_values[name]\n+ json_output['value'] = new_values[name]\n else:\n- json_response['values'] = new_values\n+ json_output['values'] = new_values\n \n- module.exit_json(**json_response)\n+ module.exit_json(**json_output)\n elif 'json' in response and '__all__' in response['json']:\n module.fail_json(msg=response['json']['__all__'])\n else:\n", "issue": "awx.awx.tower_settings does not return expected changes when running in check mode with diff\n<!-- Issues are for **concrete, actionable bugs and feature requests** only - if you're just asking for debugging help or technical support, please use:\r\n\r\n- http://webchat.freenode.net/?channels=ansible-awx\r\n- https://groups.google.com/forum/#!forum/awx-project\r\n\r\nWe have to limit this because of limited volunteer time to respond to issues! -->\r\n\r\n##### ISSUE TYPE\r\n - Bug Report\r\n\r\n##### SUMMARY\r\nRunning the module with check and diff mode simultaneously does not return the list of expected changes. It only returns that the module will change some unknown thing (by setting the `changed` variable true).\r\nAs check and diff mode is mainly used for, erm.. checking, this makes it hard to actually check the correct invocation of the module, especially when multiple settings are set.\r\n\r\n##### ENVIRONMENT\r\n* AWX version: 15.0.1\r\n* AWX install method: docker on linux\r\n* Ansible version: 2.9.10\r\n* Operating System: Debian 10\r\n* Web Browser: Chrome\r\n\r\n##### STEPS TO REPRODUCE\r\n\r\nTask extract:\r\n```\r\n- name: Set LDAP\r\n tower_settings:\r\n settings:\r\n AUTH_LDAP_SERVER_URI: \"ldap://ldap.example.com:389\"\r\n AUTH_LDAP_BIND_DN: CN=user,DC=example,DC=com\r\n AUTH_LDAP_BIND_PASSWORD: password\r\n AUTH_LDAP_START_TLS: true\r\n```\r\nMake sure that some of these settings have a different value in AWX.\r\nRun the task with and without check&diff mode.\r\n\r\n##### EXPECTED RESULTS\r\nExpected at least some insight why the module reports changed status when running with check&diff mode.\r\n\r\nFor eg if AWX server's AUTH_LDAP_SERVER_URI is `ldap://nonexistent.com:389`, then running check&diff mode could produce similar output when running in normal mode:\r\n```\r\nchanged: [localhost] => {\r\n \"changed\": true,\r\n \"invocation\": {\r\n \"module_args\": {\r\n \"settings\": {\r\n \"AUTH_LDAP_SERVER_URI\": \"ldap://ldap.example.com:389\",\r\n \"AUTH_LDAP_BIND_DN\": \"CN=user,DC=example,DC=com\",\r\n \"AUTH_LDAP_BIND_PASSWORD\": \"password\",\r\n \"AUTH_LDAP_START_TLS\": true\r\n },\r\n <...other module args ...>\r\n }\r\n },\r\n \"old_values\": {\r\n \"AUTH_LDAP_SERVER_URI\": \"ldap://nonexistent.com:389\"\r\n },\r\n \"values\": {\r\n \"AUTH_LDAP_SERVER_URI\": \"ldap://ldap.example.com:389\"\r\n }\r\n}\r\n```\r\n\r\n##### ACTUAL RESULTS\r\nRunning the module with check&diff mode produces less usable output regarding the variables to be changed:\r\n```\r\nchanged: [localhost] => {\r\n \"changed\": true,\r\n \"invocation\": {\r\n \"module_args\": {\r\n \"settings\": {\r\n \"AUTH_LDAP_SERVER_URI\": \"ldap://ldap.example.com:389\",\r\n \"AUTH_LDAP_BIND_DN\": \"CN=user,DC=example,DC=com\",\r\n \"AUTH_LDAP_BIND_PASSWORD\": \"password\",\r\n \"AUTH_LDAP_START_TLS\": true\r\n },\r\n <...other module args ...>\r\n }\r\n}\r\n```\r\n##### ADDITIONAL INFORMATION\r\n\r\nAs i see the main reason behind such behaviour is because a [new json_response dict is created](https://github.com/ansible/awx/blob/29926ba5d99b760b32de35e07c211fd3e7691c8d/awx_collection/plugins/modules/tower_settings.py#L136) in the module's code, but the [check_mode exit point](https://github.com/ansible/awx/blob/29926ba5d99b760b32de35e07c211fd3e7691c8d/awx_collection/plugins/module_utils/tower_api.py#L85) uses its own `json_output` dict.\r\n\r\nEdit:\r\nCompletely forgot about diff mode, but ofc thats where i expect to see the detailed changes.\n", "before_files": [{"content": "#!/usr/bin/python\n# coding: utf-8 -*-\n\n# (c) 2018, Nikhil Jain <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'}\n\n\nDOCUMENTATION = '''\n---\nmodule: tower_settings\nauthor: \"Nikhil Jain (@jainnikhil30)\"\nshort_description: Modify Ansible Tower settings.\ndescription:\n - Modify Ansible Tower settings. See\n U(https://www.ansible.com/tower) for an overview.\noptions:\n name:\n description:\n - Name of setting to modify\n type: str\n value:\n description:\n - Value to be modified for given setting.\n - If given a non-string type, will make best effort to cast it to type API expects.\n - For better control over types, use the C(settings) param instead.\n type: str\n settings:\n description:\n - A data structure to be sent into the settings endpoint\n type: dict\nrequirements:\n - pyyaml\nextends_documentation_fragment: awx.awx.auth\n'''\n\nEXAMPLES = '''\n- name: Set the value of AWX_PROOT_BASE_PATH\n tower_settings:\n name: AWX_PROOT_BASE_PATH\n value: \"/tmp\"\n register: testing_settings\n\n- name: Set the value of AWX_PROOT_SHOW_PATHS\n tower_settings:\n name: \"AWX_PROOT_SHOW_PATHS\"\n value: \"'/var/lib/awx/projects/', '/tmp'\"\n register: testing_settings\n\n- name: Set the LDAP Auth Bind Password\n tower_settings:\n name: \"AUTH_LDAP_BIND_PASSWORD\"\n value: \"Password\"\n no_log: true\n\n- name: Set all the LDAP Auth Bind Params\n tower_settings:\n settings:\n AUTH_LDAP_BIND_PASSWORD: \"password\"\n AUTH_LDAP_USER_ATTR_MAP:\n email: \"mail\"\n first_name: \"givenName\"\n last_name: \"surname\"\n'''\n\nfrom ..module_utils.tower_api import TowerAPIModule\n\ntry:\n import yaml\n HAS_YAML = True\nexcept ImportError:\n HAS_YAML = False\n\n\ndef coerce_type(module, value):\n # If our value is already None we can just return directly\n if value is None:\n return value\n\n yaml_ish = bool((\n value.startswith('{') and value.endswith('}')\n ) or (\n value.startswith('[') and value.endswith(']'))\n )\n if yaml_ish:\n if not HAS_YAML:\n module.fail_json(msg=\"yaml is not installed, try 'pip install pyyaml'\")\n return yaml.safe_load(value)\n elif value.lower in ('true', 'false', 't', 'f'):\n return {'t': True, 'f': False}[value[0].lower()]\n try:\n return int(value)\n except ValueError:\n pass\n return value\n\n\ndef main():\n # Any additional arguments that are not fields of the item can be added here\n argument_spec = dict(\n name=dict(),\n value=dict(),\n settings=dict(type='dict'),\n )\n\n # Create a module for ourselves\n module = TowerAPIModule(\n argument_spec=argument_spec,\n required_one_of=[['name', 'settings']],\n mutually_exclusive=[['name', 'settings']],\n required_if=[['name', 'present', ['value']]]\n )\n\n # Extract our parameters\n name = module.params.get('name')\n value = module.params.get('value')\n new_settings = module.params.get('settings')\n\n # If we were given a name/value pair we will just make settings out of that and proceed normally\n if new_settings is None:\n new_value = coerce_type(module, value)\n\n new_settings = {name: new_value}\n\n # Load the existing settings\n existing_settings = module.get_endpoint('settings/all')['json']\n\n # Begin a json response\n json_response = {'changed': False, 'old_values': {}}\n\n # Check any of the settings to see if anything needs to be updated\n needs_update = False\n for a_setting in new_settings:\n if a_setting not in existing_settings or existing_settings[a_setting] != new_settings[a_setting]:\n # At least one thing is different so we need to patch\n needs_update = True\n json_response['old_values'][a_setting] = existing_settings[a_setting]\n\n # If nothing needs an update we can simply exit with the response (as not changed)\n if not needs_update:\n module.exit_json(**json_response)\n\n # Make the call to update the settings\n response = module.patch_endpoint('settings/all', **{'data': new_settings})\n\n if response['status_code'] == 200:\n # Set the changed response to True\n json_response['changed'] = True\n\n # To deal with the old style values we need to return 'value' in the response\n new_values = {}\n for a_setting in new_settings:\n new_values[a_setting] = response['json'][a_setting]\n\n # If we were using a name we will just add a value of a string, otherwise we will return an array in values\n if name is not None:\n json_response['value'] = new_values[name]\n else:\n json_response['values'] = new_values\n\n module.exit_json(**json_response)\n elif 'json' in response and '__all__' in response['json']:\n module.fail_json(msg=response['json']['__all__'])\n else:\n module.fail_json(**{'msg': \"Unable to update settings, see response\", 'response': response})\n\n\nif __name__ == '__main__':\n main()\n", "path": "awx_collection/plugins/modules/tower_settings.py"}]}
3,165
618
gh_patches_debug_23291
rasdani/github-patches
git_diff
scikit-hep__awkward-3115
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> typing ak.Array for numba.cuda.jit signature ### Version of Awkward Array 2.6.2 ### Description and code to reproduce Hey guys, I followed a hint from the discussion in [#696](https://github.com/scikit-hep/awkward/discussions/696#discussion-2571850) to type `ak.Array` for numba signatures. So I tried something like ```python import awkward as ak import numba as nb from numba import types cpu_arr_type = ak.Array([[[0, 1], [2, 3]], [[4, 5]]], backend='cpu').numba_type @nb.njit(types.void(cpu_arr_type)) def cpu_kernel(arr): do_something_with_arr ``` and this works like a charm. However, I'm interested in the same case but with a cuda kernel. So I tried what appeared more natural to do: ```python gpu_arr_type = ak.Array([[[0, 1], [2, 3]], [[4, 5]]], backend='cuda').numba_type @nb.cuda.jit(types.void(gpu_arr_type), extensions=[ak.numba.cuda]) def cuda_kernel(arr): do_something_with_arr ``` This time, I get the error: ```python self = <awkward._connect.numba.arrayview_cuda.ArrayViewArgHandler object at 0x784afbc13fa0> ty = ak.ArrayView(ak.ListArrayType(array(int64, 1d, C), ak.ListArrayType(array(int64, 1d, C), ak.NumpyArrayType(array(int64, 1d, C), {}), {}), {}), None, ()) val = <Array [[[4, 1], [2, -1]], [...], [[4, 0]]] type='3 * var * var * int64'> stream = 0, retr = [] def prepare_args(self, ty, val, stream, retr): if isinstance(val, ak.Array): if isinstance(val.layout.backend, CupyBackend): # Use uint64 for pos, start, stop, the array pointers values, and the pylookup value tys = numba.types.UniTuple(numba.types.uint64, 5) > start = val._numbaview.start E AttributeError: 'NoneType' object has no attribute 'start' .../site-packages/awkward/_connect/numba/arrayview_cuda.py:21: AttributeError ``` How should this latter case be correctly treated? Note that, without typing, the thing works as expected: ```python @nb.cuda.jit(extensions=[ak.numba.cuda]) def cuda_kernel_no_typing(arr): do_something_with_arr ``` However, I'm interested in `ak.Array`s with the 3D layout of integers (as above) and would like to take advantage of numba's eager compilation. I'm passing the `arr` for testing as ```python backend = 'cpu' # or 'cuda' arr = ak.to_backend( ak.Array([ [[4, 1], [2, -1]], [[0, -1], [1, 1], [3, -1]], [[4, 0]] ]), backend ) ``` Any help is appreciated! </issue> <code> [start of src/awkward/_connect/numba/arrayview_cuda.py] 1 # BSD 3-Clause License; see https://github.com/scikit-hep/awkward/blob/main/LICENSE 2 3 from __future__ import annotations 4 5 import numba 6 from numba.core.errors import NumbaTypeError 7 8 import awkward as ak 9 from awkward._backends.cupy import CupyBackend 10 11 ########## ArrayView Arguments Handler for CUDA JIT 12 13 14 class ArrayViewArgHandler: 15 def prepare_args(self, ty, val, stream, retr): 16 if isinstance(val, ak.Array): 17 if isinstance(val.layout.backend, CupyBackend): 18 # Use uint64 for pos, start, stop, the array pointers values, and the pylookup value 19 tys = numba.types.UniTuple(numba.types.uint64, 5) 20 21 start = val._numbaview.start 22 stop = val._numbaview.stop 23 pos = val._numbaview.pos 24 arrayptrs = val._numbaview.lookup.arrayptrs.data.ptr 25 pylookup = 0 26 27 return tys, (pos, start, stop, arrayptrs, pylookup) 28 else: 29 raise NumbaTypeError( 30 '`ak.to_backend` should be called with `backend="cuda"` to put ' 31 "the array on the GPU before using it: " 32 'ak.to_backend(array, backend="cuda")' 33 ) 34 35 else: 36 return ty, val 37 [end of src/awkward/_connect/numba/arrayview_cuda.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/awkward/_connect/numba/arrayview_cuda.py b/src/awkward/_connect/numba/arrayview_cuda.py --- a/src/awkward/_connect/numba/arrayview_cuda.py +++ b/src/awkward/_connect/numba/arrayview_cuda.py @@ -15,13 +15,22 @@ def prepare_args(self, ty, val, stream, retr): if isinstance(val, ak.Array): if isinstance(val.layout.backend, CupyBackend): + if ty is not val.numba_type: + raise NumbaTypeError( + f"the array type: {val.numba_type} does not match " + f"the kernel signature type: {ty}" + ) + # Use uint64 for pos, start, stop, the array pointers values, and the pylookup value tys = numba.types.UniTuple(numba.types.uint64, 5) - start = val._numbaview.start - stop = val._numbaview.stop - pos = val._numbaview.pos - arrayptrs = val._numbaview.lookup.arrayptrs.data.ptr + view = val._numbaview + assert view is not None + + start = view.start + stop = view.stop + pos = view.pos + arrayptrs = view.lookup.arrayptrs.data.ptr pylookup = 0 return tys, (pos, start, stop, arrayptrs, pylookup)
{"golden_diff": "diff --git a/src/awkward/_connect/numba/arrayview_cuda.py b/src/awkward/_connect/numba/arrayview_cuda.py\n--- a/src/awkward/_connect/numba/arrayview_cuda.py\n+++ b/src/awkward/_connect/numba/arrayview_cuda.py\n@@ -15,13 +15,22 @@\n def prepare_args(self, ty, val, stream, retr):\n if isinstance(val, ak.Array):\n if isinstance(val.layout.backend, CupyBackend):\n+ if ty is not val.numba_type:\n+ raise NumbaTypeError(\n+ f\"the array type: {val.numba_type} does not match \"\n+ f\"the kernel signature type: {ty}\"\n+ )\n+\n # Use uint64 for pos, start, stop, the array pointers values, and the pylookup value\n tys = numba.types.UniTuple(numba.types.uint64, 5)\n \n- start = val._numbaview.start\n- stop = val._numbaview.stop\n- pos = val._numbaview.pos\n- arrayptrs = val._numbaview.lookup.arrayptrs.data.ptr\n+ view = val._numbaview\n+ assert view is not None\n+\n+ start = view.start\n+ stop = view.stop\n+ pos = view.pos\n+ arrayptrs = view.lookup.arrayptrs.data.ptr\n pylookup = 0\n \n return tys, (pos, start, stop, arrayptrs, pylookup)\n", "issue": "typing ak.Array for numba.cuda.jit signature\n### Version of Awkward Array\n\n2.6.2\n\n### Description and code to reproduce\n\nHey guys, I followed a hint from the discussion in [#696](https://github.com/scikit-hep/awkward/discussions/696#discussion-2571850) to type `ak.Array` for numba signatures. So I tried something like\r\n\r\n```python\r\nimport awkward as ak\r\nimport numba as nb\r\nfrom numba import types\r\n\r\ncpu_arr_type = ak.Array([[[0, 1], [2, 3]], [[4, 5]]], backend='cpu').numba_type\r\n\r\[email protected](types.void(cpu_arr_type))\r\ndef cpu_kernel(arr):\r\n do_something_with_arr\r\n```\r\nand this works like a charm.\r\n\r\nHowever, I'm interested in the same case but with a cuda kernel. So I tried what appeared more natural to do:\r\n```python\r\ngpu_arr_type = ak.Array([[[0, 1], [2, 3]], [[4, 5]]], backend='cuda').numba_type\r\n\r\[email protected](types.void(gpu_arr_type), extensions=[ak.numba.cuda])\r\ndef cuda_kernel(arr):\r\n do_something_with_arr\r\n```\r\nThis time, I get the error:\r\n```python\r\nself = <awkward._connect.numba.arrayview_cuda.ArrayViewArgHandler object at 0x784afbc13fa0>\r\nty = ak.ArrayView(ak.ListArrayType(array(int64, 1d, C), ak.ListArrayType(array(int64, 1d, C), ak.NumpyArrayType(array(int64, 1d, C), {}), {}), {}), None, ())\r\nval = <Array [[[4, 1], [2, -1]], [...], [[4, 0]]] type='3 * var * var * int64'>\r\nstream = 0, retr = []\r\n\r\n def prepare_args(self, ty, val, stream, retr):\r\n if isinstance(val, ak.Array):\r\n if isinstance(val.layout.backend, CupyBackend):\r\n # Use uint64 for pos, start, stop, the array pointers values, and the pylookup value\r\n tys = numba.types.UniTuple(numba.types.uint64, 5)\r\n \r\n> start = val._numbaview.start\r\nE AttributeError: 'NoneType' object has no attribute 'start'\r\n\r\n.../site-packages/awkward/_connect/numba/arrayview_cuda.py:21: AttributeError\r\n```\r\nHow should this latter case be correctly treated? Note that, without typing, the thing works as expected:\r\n```python\r\[email protected](extensions=[ak.numba.cuda])\r\ndef cuda_kernel_no_typing(arr):\r\n do_something_with_arr\r\n```\r\nHowever, I'm interested in `ak.Array`s with the 3D layout of integers (as above) and would like to take advantage of numba's eager compilation. I'm passing the `arr` for testing as\r\n```python\r\nbackend = 'cpu' # or 'cuda'\r\narr = ak.to_backend(\r\n ak.Array([\r\n [[4, 1], [2, -1]],\r\n [[0, -1], [1, 1], [3, -1]],\r\n [[4, 0]]\r\n ]),\r\n backend\r\n)\r\n```\r\nAny help is appreciated!\r\n\n", "before_files": [{"content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward/blob/main/LICENSE\n\nfrom __future__ import annotations\n\nimport numba\nfrom numba.core.errors import NumbaTypeError\n\nimport awkward as ak\nfrom awkward._backends.cupy import CupyBackend\n\n########## ArrayView Arguments Handler for CUDA JIT\n\n\nclass ArrayViewArgHandler:\n def prepare_args(self, ty, val, stream, retr):\n if isinstance(val, ak.Array):\n if isinstance(val.layout.backend, CupyBackend):\n # Use uint64 for pos, start, stop, the array pointers values, and the pylookup value\n tys = numba.types.UniTuple(numba.types.uint64, 5)\n\n start = val._numbaview.start\n stop = val._numbaview.stop\n pos = val._numbaview.pos\n arrayptrs = val._numbaview.lookup.arrayptrs.data.ptr\n pylookup = 0\n\n return tys, (pos, start, stop, arrayptrs, pylookup)\n else:\n raise NumbaTypeError(\n '`ak.to_backend` should be called with `backend=\"cuda\"` to put '\n \"the array on the GPU before using it: \"\n 'ak.to_backend(array, backend=\"cuda\")'\n )\n\n else:\n return ty, val\n", "path": "src/awkward/_connect/numba/arrayview_cuda.py"}]}
1,646
342
gh_patches_debug_4491
rasdani/github-patches
git_diff
fossasia__open-event-server-4616
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Generated reset password link is correct but not clickable **I'm submitting a ...** (check one with "x") - [x] bug report - [ ] feature request - [ ] support request => Please do not submit support requests here, instead ask your query in out Gitter channel at https://gitter.im/fossasia/open-event-server **Current behavior:** <!-- Describe how the bug manifests. --> ![image](https://user-images.githubusercontent.com/17252805/35488202-a4284b54-04aa-11e8-981a-f8bf29de8615.png) The link appears as static text in the email. **Expected behavior:** <!-- Describe what the behavior would be without the bug. --> The link should be clickable. </issue> <code> [start of app/api/helpers/system_mails.py] 1 """ 2 All the System mails 3 Register a mail here before using it 4 """ 5 from app.models.mail import INVITE_PAPERS, NEW_SESSION, USER_CONFIRM, \ 6 USER_REGISTER, PASSWORD_RESET, EVENT_ROLE, SESSION_ACCEPT_REJECT, \ 7 SESSION_SCHEDULE, NEXT_EVENT, EVENT_PUBLISH, AFTER_EVENT, USER_CHANGE_EMAIL, USER_REGISTER_WITH_PASSWORD, \ 8 TICKET_PURCHASED, EVENT_EXPORTED, EVENT_EXPORT_FAIL, MAIL_TO_EXPIRED_ORDERS, MONTHLY_PAYMENT_EMAIL, \ 9 MONTHLY_PAYMENT_FOLLOWUP_EMAIL, EVENT_IMPORTED, EVENT_IMPORT_FAIL, TICKET_PURCHASED_ORGANIZER, TICKET_CANCELLED, \ 10 TICKET_PURCHASED_ATTENDEE, PASSWORD_CHANGE 11 12 MAILS = { 13 EVENT_PUBLISH: { 14 'recipient': 'Organizer, Speaker', 15 'subject': u'{event_name} is Live', 16 'message': ( 17 u"Hi {email}<br/>" + 18 u"Event, {event_name}, is up and running and ready for action. Go ahead and check it out." + 19 u"<br/> Visit this link to view it: {link}" 20 ) 21 }, 22 INVITE_PAPERS: { 23 'recipient': 'Speaker', 24 'subject': u'Invitation to Submit Papers for {event_name}', 25 'message': ( 26 u"Hi {email}<br/>" + 27 u"You are invited to submit papers for event: {event_name}" + 28 u"<br/> Visit this link to fill up details: {link}" 29 ) 30 }, 31 SESSION_ACCEPT_REJECT: { 32 'recipient': 'Speaker', 33 'subject': u'Session {session_name} has been {acceptance}', 34 'message': ( 35 u"Hi {email},<br/>" + 36 u"The session <strong>{session_name}</strong> has been <strong>{acceptance}</strong> by the organizer. " + 37 u"<br/> Visit this link to view the session: {link}" 38 ) 39 }, 40 SESSION_SCHEDULE: { 41 'recipient': 'Organizer, Speaker', 42 'subject': u'Schedule for Session {session_name} has been changed', 43 'message': ( 44 u"Hi {email},<br/>" + 45 u"The schedule for session <strong>{session_name}</strong> has been changed. " + 46 u"<br/> Visit this link to view the session: {link}" 47 ) 48 }, 49 NEXT_EVENT: { 50 'recipient': 'Organizer, Speaker', 51 'subject': u'Event {event_name} is coming soon', 52 'message': ( 53 u"Hi {email},<br/>" + 54 u"Here are the upcoming events: {up_coming_events} .Get ready!! " + 55 u"<br/> Visit this link to view the event: {link}" 56 ) 57 }, 58 AFTER_EVENT: { 59 'recipient': 'Organizer, Speaker', 60 'subject': u'Event {event_name} is over', 61 'message': ( 62 u"Hi {email},<br/>" + 63 u"Thank You for participating in our event. We hope you enjoyed it. " 64 u"Please check the list of more upcoming events" + 65 u"Here are the upcoming events: {upcoming_events} .Get ready!! " 66 ), 67 'sent_at': '1 day after the event' 68 }, 69 NEW_SESSION: { 70 'recipient': 'Organizer', 71 'subject': u'New session proposal for {event_name}', 72 'message': ( 73 u"Hi {email},<br/>" + 74 u"The event <strong>{event_name}</strong> has received a new session proposal. " + 75 u"<br/> Visit this link to view the session: {link}" 76 ) 77 }, 78 USER_REGISTER: { 79 'recipient': 'User', 80 'subject': u'Account Created on {app_name}', 81 'message': ( 82 u"Your Account Has Been Created! Congratulations!" + 83 u"<br/> Your login: {email}" 84 ) 85 }, 86 USER_REGISTER_WITH_PASSWORD: { 87 'recipient': 'User', 88 'subject': u'Welcome to {app_name}', 89 'message': ( 90 u"Your Account Has Been Created! Congratulations!" + 91 u"<br/> <strong>Your login:</strong><br><strong>Email:</strong> {email}<br>" 92 ) 93 }, 94 USER_CONFIRM: { 95 'recipient': 'User', 96 'subject': u'Email Confirmation to Create Account for Open-Event', 97 'message': ( 98 u"Hi {email},<br/>" + 99 u"Please visit this link to confirm your email: {link}" 100 ) 101 }, 102 USER_CHANGE_EMAIL: { 103 'recipient': 'User', 104 'subject': u'Your email has been already changed', 105 'message': ( 106 u"Hi {email},<br/>" + 107 u"Your email has been already changed from {email} to {new_email}. You should verify your new email" 108 ) 109 }, 110 PASSWORD_RESET: { 111 'recipient': 'User', 112 'subject': u'{app_name}: Password Reset', 113 'message': ( 114 u"Please use the following link to reset your password.<br> {link}" 115 ) 116 }, 117 PASSWORD_CHANGE: { 118 'recipient': 'User', 119 'subject': u'{app_name}: Password Change', 120 'message': ( 121 u"Your password has been successfully changed. Please login with your new password." 122 ) 123 }, 124 EVENT_ROLE: { 125 'recipient': 'User', 126 'subject': u'Invitation to be {role} at {event}', 127 'message': ( 128 u"Hello {email},<br><br>" + 129 u"You've been invited to be a <strong>{role}</strong> at <strong>{event}</strong>.<br>" + 130 u"To accept the role please sign up using the following link: <a href='{link}' target='_blank'>Link</a>." 131 ) 132 }, 133 TICKET_PURCHASED: { 134 'recipient': 'User', 135 'subject': u'Your order invoice and tickets for {event_name} ({invoice_id}) ', 136 'message': ( 137 u"Hi, this is a confirmation mail of your tickets for the event {event_name}" 138 u"<br/>Your order has been processed successfully." + 139 u"<br/> <a href='{pdf_url}'>Click here</a> to view/download your invoice." 140 u"<br><br><em>Looking forward to seeing you at the event." 141 u"<br/>Login to manage your orders at https://eventyay.com </em>" 142 ) 143 }, 144 TICKET_PURCHASED_ATTENDEE: { 145 'recipient': 'Attendee', 146 'subject': u'Your tickets for {event_name} ({invoice_id}) ', 147 'message': ( 148 u"Hi, this is a confirmation mail of your tickets for the event {event_name}" 149 u"<br/>Your order has been processed successfully." + 150 u"<br/> <a href='{pdf_url}'>Click here</a> to view/download your ticket." 151 u"<br><br><em>Looking forward to seeing you at the event." 152 ) 153 }, 154 155 TICKET_PURCHASED_ORGANIZER: { 156 'recipient': 'Organizer, Coorganizer', 157 'subject': u'New ticket purchase for {event_name} by {buyer_email} ({invoice_id}) ', 158 'message': ( 159 u"Hi, {buyer_email} just bought tickets for the event {event_name}" 160 u"<br/>The order has been processed successfully." + 161 u"<br/> <a href='{order_url}'>Click here</a> to view/download the invoice." 162 u"<br/>Login to manage the orders at https://eventyay.com </em>" 163 ) 164 }, 165 TICKET_CANCELLED: { 166 'recipient': 'User', 167 'subject': u'Your order for {event_name} has been cancelled ({invoice_id})', 168 'message': ( 169 u"Hi,Your order for {event_name} has been cancelled has been cancelled by the organizer" 170 u"<br/>Please contact the organizer for more info" + 171 u"<br/>Message from the organizer: {cancel_note}" 172 u"<br/> <a href='{order_url}'>Click here</a> to view/download the invoice." 173 u"<br/>Login to manage the orders at https://eventyay.com </em>" 174 ) 175 }, 176 EVENT_EXPORTED: { 177 'recipient': 'User', 178 'subject': u'Event {event_name} has been exported', 179 'message': ( 180 u"Click on the following link to download the event." + 181 u"<br> <a href='{download_url}'>Download</a>" 182 ) 183 }, 184 EVENT_EXPORT_FAIL: { 185 'recipient': 'User', 186 'subject': u'Export of event {event_name} failed', 187 'message': ( 188 u"The error was as follows - <br>" + 189 u"<pre>{error_text}</pre>" 190 ) 191 }, 192 MAIL_TO_EXPIRED_ORDERS: { 193 'recipient': 'User', 194 'subject': u'Tickets for {event_name} are still available ', 195 'message': ( 196 u"This is just a gentle reminder that the payment for your order {invoice_id} is still left." + 197 u"<br/> The tickets for this event are still available. <a href='{order_url}'>Click here</a> to " 198 u"purchase your ticket for this event." 199 u"<br><br><em>Looking forward to seeing you at the event.</em>" 200 ) 201 }, 202 MONTHLY_PAYMENT_EMAIL: { 203 'recipient': 'Organizer', 204 'subject': u'{date} - Monthly service fee invoice for {event_name}', 205 'message': ( 206 u"The total service fee for the ticket sales of {event_name} in the month of {date} is {amount}." + 207 u"<br/> That payment for the same has to be made in two weeks. <a href='{payment_url}'>Click here</a> to " 208 u"view your invoice and complete the payment." 209 u"<br><br><em>Thank you for using {app_name}.</em>" 210 ), 211 'sent_at': '1st day of the month' 212 }, 213 MONTHLY_PAYMENT_FOLLOWUP_EMAIL: { 214 'recipient': 'Organizer', 215 'subject': u'Past Due: {date} - Monthly service fee invoice for {event_name}', 216 'message': ( 217 u"The total service fee for the ticket sales of {event_name} in the month of {date} is {amount}." + 218 u"<br/> That payment for the same is past the due date. <a href='{payment_url}'>Click here</a> to " 219 u"view your invoice and complete the payment to prevent loss of functionality." 220 u"<br><br><em>Thank you for using {app_name}.</em>" 221 ), 222 'sent_at': '15th day of the month' 223 }, 224 EVENT_IMPORTED: { 225 'recipient': 'User', 226 'subject': u'Event {event_name} has been imported', 227 'message': ( 228 u"Click on the following link to manage your event" + 229 u"<br> <a href='{event_url}'>Link</a>" 230 ) 231 }, 232 EVENT_IMPORT_FAIL: { 233 'recipient': 'User', 234 'subject': u'Import of event failed', 235 'message': ( 236 u"The error was as follows - <br>" + 237 u"<pre>{error_text}</pre>" 238 ) 239 } 240 } 241 [end of app/api/helpers/system_mails.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/app/api/helpers/system_mails.py b/app/api/helpers/system_mails.py --- a/app/api/helpers/system_mails.py +++ b/app/api/helpers/system_mails.py @@ -111,7 +111,7 @@ 'recipient': 'User', 'subject': u'{app_name}: Password Reset', 'message': ( - u"Please use the following link to reset your password.<br> {link}" + u"Please use the following link to reset your password.<br> <a href='{link}' target='_blank'>{link}</a>" ) }, PASSWORD_CHANGE: {
{"golden_diff": "diff --git a/app/api/helpers/system_mails.py b/app/api/helpers/system_mails.py\n--- a/app/api/helpers/system_mails.py\n+++ b/app/api/helpers/system_mails.py\n@@ -111,7 +111,7 @@\n 'recipient': 'User',\n 'subject': u'{app_name}: Password Reset',\n 'message': (\n- u\"Please use the following link to reset your password.<br> {link}\"\n+ u\"Please use the following link to reset your password.<br> <a href='{link}' target='_blank'>{link}</a>\"\n )\n },\n PASSWORD_CHANGE: {\n", "issue": "Generated reset password link is correct but not clickable\n**I'm submitting a ...** (check one with \"x\")\r\n- [x] bug report\r\n- [ ] feature request\r\n- [ ] support request => Please do not submit support requests here, instead ask your query in out Gitter channel at https://gitter.im/fossasia/open-event-server\r\n\r\n**Current behavior:**\r\n<!-- Describe how the bug manifests. -->\r\n![image](https://user-images.githubusercontent.com/17252805/35488202-a4284b54-04aa-11e8-981a-f8bf29de8615.png)\r\nThe link appears as static text in the email.\r\n**Expected behavior:**\r\n<!-- Describe what the behavior would be without the bug. -->\r\nThe link should be clickable.\r\n\n", "before_files": [{"content": "\"\"\"\nAll the System mails\nRegister a mail here before using it\n\"\"\"\nfrom app.models.mail import INVITE_PAPERS, NEW_SESSION, USER_CONFIRM, \\\n USER_REGISTER, PASSWORD_RESET, EVENT_ROLE, SESSION_ACCEPT_REJECT, \\\n SESSION_SCHEDULE, NEXT_EVENT, EVENT_PUBLISH, AFTER_EVENT, USER_CHANGE_EMAIL, USER_REGISTER_WITH_PASSWORD, \\\n TICKET_PURCHASED, EVENT_EXPORTED, EVENT_EXPORT_FAIL, MAIL_TO_EXPIRED_ORDERS, MONTHLY_PAYMENT_EMAIL, \\\n MONTHLY_PAYMENT_FOLLOWUP_EMAIL, EVENT_IMPORTED, EVENT_IMPORT_FAIL, TICKET_PURCHASED_ORGANIZER, TICKET_CANCELLED, \\\n TICKET_PURCHASED_ATTENDEE, PASSWORD_CHANGE\n\nMAILS = {\n EVENT_PUBLISH: {\n 'recipient': 'Organizer, Speaker',\n 'subject': u'{event_name} is Live',\n 'message': (\n u\"Hi {email}<br/>\" +\n u\"Event, {event_name}, is up and running and ready for action. Go ahead and check it out.\" +\n u\"<br/> Visit this link to view it: {link}\"\n )\n },\n INVITE_PAPERS: {\n 'recipient': 'Speaker',\n 'subject': u'Invitation to Submit Papers for {event_name}',\n 'message': (\n u\"Hi {email}<br/>\" +\n u\"You are invited to submit papers for event: {event_name}\" +\n u\"<br/> Visit this link to fill up details: {link}\"\n )\n },\n SESSION_ACCEPT_REJECT: {\n 'recipient': 'Speaker',\n 'subject': u'Session {session_name} has been {acceptance}',\n 'message': (\n u\"Hi {email},<br/>\" +\n u\"The session <strong>{session_name}</strong> has been <strong>{acceptance}</strong> by the organizer. \" +\n u\"<br/> Visit this link to view the session: {link}\"\n )\n },\n SESSION_SCHEDULE: {\n 'recipient': 'Organizer, Speaker',\n 'subject': u'Schedule for Session {session_name} has been changed',\n 'message': (\n u\"Hi {email},<br/>\" +\n u\"The schedule for session <strong>{session_name}</strong> has been changed. \" +\n u\"<br/> Visit this link to view the session: {link}\"\n )\n },\n NEXT_EVENT: {\n 'recipient': 'Organizer, Speaker',\n 'subject': u'Event {event_name} is coming soon',\n 'message': (\n u\"Hi {email},<br/>\" +\n u\"Here are the upcoming events: {up_coming_events} .Get ready!! \" +\n u\"<br/> Visit this link to view the event: {link}\"\n )\n },\n AFTER_EVENT: {\n 'recipient': 'Organizer, Speaker',\n 'subject': u'Event {event_name} is over',\n 'message': (\n u\"Hi {email},<br/>\" +\n u\"Thank You for participating in our event. We hope you enjoyed it. \"\n u\"Please check the list of more upcoming events\" +\n u\"Here are the upcoming events: {upcoming_events} .Get ready!! \"\n ),\n 'sent_at': '1 day after the event'\n },\n NEW_SESSION: {\n 'recipient': 'Organizer',\n 'subject': u'New session proposal for {event_name}',\n 'message': (\n u\"Hi {email},<br/>\" +\n u\"The event <strong>{event_name}</strong> has received a new session proposal. \" +\n u\"<br/> Visit this link to view the session: {link}\"\n )\n },\n USER_REGISTER: {\n 'recipient': 'User',\n 'subject': u'Account Created on {app_name}',\n 'message': (\n u\"Your Account Has Been Created! Congratulations!\" +\n u\"<br/> Your login: {email}\"\n )\n },\n USER_REGISTER_WITH_PASSWORD: {\n 'recipient': 'User',\n 'subject': u'Welcome to {app_name}',\n 'message': (\n u\"Your Account Has Been Created! Congratulations!\" +\n u\"<br/> <strong>Your login:</strong><br><strong>Email:</strong> {email}<br>\"\n )\n },\n USER_CONFIRM: {\n 'recipient': 'User',\n 'subject': u'Email Confirmation to Create Account for Open-Event',\n 'message': (\n u\"Hi {email},<br/>\" +\n u\"Please visit this link to confirm your email: {link}\"\n )\n },\n USER_CHANGE_EMAIL: {\n 'recipient': 'User',\n 'subject': u'Your email has been already changed',\n 'message': (\n u\"Hi {email},<br/>\" +\n u\"Your email has been already changed from {email} to {new_email}. You should verify your new email\"\n )\n },\n PASSWORD_RESET: {\n 'recipient': 'User',\n 'subject': u'{app_name}: Password Reset',\n 'message': (\n u\"Please use the following link to reset your password.<br> {link}\"\n )\n },\n PASSWORD_CHANGE: {\n 'recipient': 'User',\n 'subject': u'{app_name}: Password Change',\n 'message': (\n u\"Your password has been successfully changed. Please login with your new password.\"\n )\n },\n EVENT_ROLE: {\n 'recipient': 'User',\n 'subject': u'Invitation to be {role} at {event}',\n 'message': (\n u\"Hello {email},<br><br>\" +\n u\"You've been invited to be a <strong>{role}</strong> at <strong>{event}</strong>.<br>\" +\n u\"To accept the role please sign up using the following link: <a href='{link}' target='_blank'>Link</a>.\"\n )\n },\n TICKET_PURCHASED: {\n 'recipient': 'User',\n 'subject': u'Your order invoice and tickets for {event_name} ({invoice_id}) ',\n 'message': (\n u\"Hi, this is a confirmation mail of your tickets for the event {event_name}\"\n u\"<br/>Your order has been processed successfully.\" +\n u\"<br/> <a href='{pdf_url}'>Click here</a> to view/download your invoice.\"\n u\"<br><br><em>Looking forward to seeing you at the event.\"\n u\"<br/>Login to manage your orders at https://eventyay.com </em>\"\n )\n },\n TICKET_PURCHASED_ATTENDEE: {\n 'recipient': 'Attendee',\n 'subject': u'Your tickets for {event_name} ({invoice_id}) ',\n 'message': (\n u\"Hi, this is a confirmation mail of your tickets for the event {event_name}\"\n u\"<br/>Your order has been processed successfully.\" +\n u\"<br/> <a href='{pdf_url}'>Click here</a> to view/download your ticket.\"\n u\"<br><br><em>Looking forward to seeing you at the event.\"\n )\n },\n\n TICKET_PURCHASED_ORGANIZER: {\n 'recipient': 'Organizer, Coorganizer',\n 'subject': u'New ticket purchase for {event_name} by {buyer_email} ({invoice_id}) ',\n 'message': (\n u\"Hi, {buyer_email} just bought tickets for the event {event_name}\"\n u\"<br/>The order has been processed successfully.\" +\n u\"<br/> <a href='{order_url}'>Click here</a> to view/download the invoice.\"\n u\"<br/>Login to manage the orders at https://eventyay.com </em>\"\n )\n },\n TICKET_CANCELLED: {\n 'recipient': 'User',\n 'subject': u'Your order for {event_name} has been cancelled ({invoice_id})',\n 'message': (\n u\"Hi,Your order for {event_name} has been cancelled has been cancelled by the organizer\"\n u\"<br/>Please contact the organizer for more info\" +\n u\"<br/>Message from the organizer: {cancel_note}\"\n u\"<br/> <a href='{order_url}'>Click here</a> to view/download the invoice.\"\n u\"<br/>Login to manage the orders at https://eventyay.com </em>\"\n )\n },\n EVENT_EXPORTED: {\n 'recipient': 'User',\n 'subject': u'Event {event_name} has been exported',\n 'message': (\n u\"Click on the following link to download the event.\" +\n u\"<br> <a href='{download_url}'>Download</a>\"\n )\n },\n EVENT_EXPORT_FAIL: {\n 'recipient': 'User',\n 'subject': u'Export of event {event_name} failed',\n 'message': (\n u\"The error was as follows - <br>\" +\n u\"<pre>{error_text}</pre>\"\n )\n },\n MAIL_TO_EXPIRED_ORDERS: {\n 'recipient': 'User',\n 'subject': u'Tickets for {event_name} are still available ',\n 'message': (\n u\"This is just a gentle reminder that the payment for your order {invoice_id} is still left.\" +\n u\"<br/> The tickets for this event are still available. <a href='{order_url}'>Click here</a> to \"\n u\"purchase your ticket for this event.\"\n u\"<br><br><em>Looking forward to seeing you at the event.</em>\"\n )\n },\n MONTHLY_PAYMENT_EMAIL: {\n 'recipient': 'Organizer',\n 'subject': u'{date} - Monthly service fee invoice for {event_name}',\n 'message': (\n u\"The total service fee for the ticket sales of {event_name} in the month of {date} is {amount}.\" +\n u\"<br/> That payment for the same has to be made in two weeks. <a href='{payment_url}'>Click here</a> to \"\n u\"view your invoice and complete the payment.\"\n u\"<br><br><em>Thank you for using {app_name}.</em>\"\n ),\n 'sent_at': '1st day of the month'\n },\n MONTHLY_PAYMENT_FOLLOWUP_EMAIL: {\n 'recipient': 'Organizer',\n 'subject': u'Past Due: {date} - Monthly service fee invoice for {event_name}',\n 'message': (\n u\"The total service fee for the ticket sales of {event_name} in the month of {date} is {amount}.\" +\n u\"<br/> That payment for the same is past the due date. <a href='{payment_url}'>Click here</a> to \"\n u\"view your invoice and complete the payment to prevent loss of functionality.\"\n u\"<br><br><em>Thank you for using {app_name}.</em>\"\n ),\n 'sent_at': '15th day of the month'\n },\n EVENT_IMPORTED: {\n 'recipient': 'User',\n 'subject': u'Event {event_name} has been imported',\n 'message': (\n u\"Click on the following link to manage your event\" +\n u\"<br> <a href='{event_url}'>Link</a>\"\n )\n },\n EVENT_IMPORT_FAIL: {\n 'recipient': 'User',\n 'subject': u'Import of event failed',\n 'message': (\n u\"The error was as follows - <br>\" +\n u\"<pre>{error_text}</pre>\"\n )\n }\n}\n", "path": "app/api/helpers/system_mails.py"}]}
3,812
136
gh_patches_debug_21885
rasdani/github-patches
git_diff
numba__numba-3578
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> simulate bug func_or_sig vs fn_or_sig named parameter There seems to be a difference in the named parameter func_or_sig/fn_or_sig between the cuda.jit() in the simulator vs gpu code. </issue> <code> [start of numba/cuda/simulator/api.py] 1 ''' 2 Contains CUDA API functions 3 ''' 4 from __future__ import absolute_import 5 6 from contextlib import contextmanager 7 from .cudadrv.devices import require_context, reset, gpus 8 from .kernel import FakeCUDAKernel 9 from numba.typing import Signature 10 from warnings import warn 11 from ..args import In, Out, InOut 12 13 14 def select_device(dev=0): 15 assert dev == 0, 'Only a single device supported by the simulator' 16 17 18 class stream(object): 19 ''' 20 The stream API is supported in the simulator - however, all execution 21 occurs synchronously, so synchronization requires no operation. 22 ''' 23 @contextmanager 24 def auto_synchronize(self): 25 yield 26 27 def synchronize(self): 28 pass 29 30 31 def synchronize(): 32 pass 33 34 def close(): 35 gpus.closed = True 36 37 38 def declare_device(*args, **kwargs): 39 pass 40 41 42 def detect(): 43 print('Found 1 CUDA devices') 44 print('id %d %20s %40s' % (0, 'SIMULATOR', '[SUPPORTED]')) 45 print('%40s: 5.2' % 'compute capability') 46 47 48 def list_devices(): 49 return gpus 50 51 52 # Events 53 54 class Event(object): 55 ''' 56 The simulator supports the event API, but they do not record timing info, 57 and all simulation is synchronous. Execution time is not recorded. 58 ''' 59 def record(self, stream=0): 60 pass 61 62 def wait(self, stream=0): 63 pass 64 65 def synchronize(self): 66 pass 67 68 def elapsed_time(self, event): 69 warn('Simulator timings are bogus') 70 return 0.0 71 72 event = Event 73 74 75 def jit(fn_or_sig=None, device=False, debug=False, argtypes=None, inline=False, restype=None, 76 fastmath=False, link=None): 77 if link is not None: 78 raise NotImplementedError('Cannot link PTX in the simulator') 79 # Check for first argument specifying types - in that case the 80 # decorator is not being passed a function 81 if fn_or_sig is None or isinstance(fn_or_sig, (str, tuple, Signature)): 82 def jitwrapper(fn): 83 return FakeCUDAKernel(fn, 84 device=device, 85 fastmath=fastmath) 86 return jitwrapper 87 return FakeCUDAKernel(fn_or_sig, device=device) 88 89 autojit = jit 90 91 92 @contextmanager 93 def defer_cleanup(): 94 # No effect for simulator 95 yield 96 [end of numba/cuda/simulator/api.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/numba/cuda/simulator/api.py b/numba/cuda/simulator/api.py --- a/numba/cuda/simulator/api.py +++ b/numba/cuda/simulator/api.py @@ -72,19 +72,19 @@ event = Event -def jit(fn_or_sig=None, device=False, debug=False, argtypes=None, inline=False, restype=None, - fastmath=False, link=None): +def jit(func_or_sig=None, device=False, debug=False, argtypes=None, + inline=False, restype=None, fastmath=False, link=None): if link is not None: raise NotImplementedError('Cannot link PTX in the simulator') # Check for first argument specifying types - in that case the # decorator is not being passed a function - if fn_or_sig is None or isinstance(fn_or_sig, (str, tuple, Signature)): + if func_or_sig is None or isinstance(func_or_sig, (str, tuple, Signature)): def jitwrapper(fn): return FakeCUDAKernel(fn, device=device, fastmath=fastmath) return jitwrapper - return FakeCUDAKernel(fn_or_sig, device=device) + return FakeCUDAKernel(func_or_sig, device=device) autojit = jit
{"golden_diff": "diff --git a/numba/cuda/simulator/api.py b/numba/cuda/simulator/api.py\n--- a/numba/cuda/simulator/api.py\n+++ b/numba/cuda/simulator/api.py\n@@ -72,19 +72,19 @@\n event = Event\n \n \n-def jit(fn_or_sig=None, device=False, debug=False, argtypes=None, inline=False, restype=None,\n- fastmath=False, link=None):\n+def jit(func_or_sig=None, device=False, debug=False, argtypes=None,\n+ inline=False, restype=None, fastmath=False, link=None):\n if link is not None:\n raise NotImplementedError('Cannot link PTX in the simulator')\n # Check for first argument specifying types - in that case the\n # decorator is not being passed a function\n- if fn_or_sig is None or isinstance(fn_or_sig, (str, tuple, Signature)):\n+ if func_or_sig is None or isinstance(func_or_sig, (str, tuple, Signature)):\n def jitwrapper(fn):\n return FakeCUDAKernel(fn,\n device=device,\n fastmath=fastmath)\n return jitwrapper\n- return FakeCUDAKernel(fn_or_sig, device=device)\n+ return FakeCUDAKernel(func_or_sig, device=device)\n \n autojit = jit\n", "issue": "simulate bug func_or_sig vs fn_or_sig named parameter\nThere seems to be a difference in the named parameter func_or_sig/fn_or_sig between the cuda.jit() in the simulator vs gpu code. \n", "before_files": [{"content": "'''\nContains CUDA API functions\n'''\nfrom __future__ import absolute_import\n\nfrom contextlib import contextmanager\nfrom .cudadrv.devices import require_context, reset, gpus\nfrom .kernel import FakeCUDAKernel\nfrom numba.typing import Signature\nfrom warnings import warn\nfrom ..args import In, Out, InOut\n\n\ndef select_device(dev=0):\n assert dev == 0, 'Only a single device supported by the simulator'\n\n\nclass stream(object):\n '''\n The stream API is supported in the simulator - however, all execution\n occurs synchronously, so synchronization requires no operation.\n '''\n @contextmanager\n def auto_synchronize(self):\n yield\n\n def synchronize(self):\n pass\n\n\ndef synchronize():\n pass\n\ndef close():\n gpus.closed = True\n\n\ndef declare_device(*args, **kwargs):\n pass\n\n\ndef detect():\n print('Found 1 CUDA devices')\n print('id %d %20s %40s' % (0, 'SIMULATOR', '[SUPPORTED]'))\n print('%40s: 5.2' % 'compute capability')\n\n\ndef list_devices():\n return gpus\n\n\n# Events\n\nclass Event(object):\n '''\n The simulator supports the event API, but they do not record timing info,\n and all simulation is synchronous. Execution time is not recorded.\n '''\n def record(self, stream=0):\n pass\n\n def wait(self, stream=0):\n pass\n\n def synchronize(self):\n pass\n\n def elapsed_time(self, event):\n warn('Simulator timings are bogus')\n return 0.0\n\nevent = Event\n\n\ndef jit(fn_or_sig=None, device=False, debug=False, argtypes=None, inline=False, restype=None,\n fastmath=False, link=None):\n if link is not None:\n raise NotImplementedError('Cannot link PTX in the simulator')\n # Check for first argument specifying types - in that case the\n # decorator is not being passed a function\n if fn_or_sig is None or isinstance(fn_or_sig, (str, tuple, Signature)):\n def jitwrapper(fn):\n return FakeCUDAKernel(fn,\n device=device,\n fastmath=fastmath)\n return jitwrapper\n return FakeCUDAKernel(fn_or_sig, device=device)\n\nautojit = jit\n\n\n@contextmanager\ndef defer_cleanup():\n # No effect for simulator\n yield\n", "path": "numba/cuda/simulator/api.py"}]}
1,304
286
gh_patches_debug_31503
rasdani/github-patches
git_diff
translate__pootle-3631
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add default project descriptions In order to get `initdb` running on `core` we had to drop the project [descriptions] since these move to staticpages on core. This bug is to add those back. To achieve that initdb should create static pages for these default projects that contain the content previously held in the [descriptions](https://github.com/translate/pootle/blob/96edf539/pootle/core/initdb.py#L300-L306). </issue> <code> [start of pootle/core/initdb.py] 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 # 4 # Copyright 2013 Zuza Software Foundation 5 # Copyright 2014-2015 Evernote Corporation 6 # 7 # This file is part of Pootle. 8 # 9 # Pootle is free software; you can redistribute it and/or modify it under the 10 # terms of the GNU General Public License as published by the Free Software 11 # Foundation; either version 2 of the License, or (at your option) any later 12 # version. 13 # 14 # Pootle is distributed in the hope that it will be useful, but WITHOUT ANY 15 # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR 16 # A PARTICULAR PURPOSE. See the GNU General Public License for more details. 17 # 18 # You should have received a copy of the GNU General Public License along with 19 # Pootle; if not, see <http://www.gnu.org/licenses/>. 20 21 22 from django.contrib.auth import get_user_model 23 from django.contrib.auth.models import Permission 24 from django.contrib.contenttypes.models import ContentType 25 from django.utils.translation import ugettext_noop as _ 26 27 from pootle.core.models import Revision 28 from pootle_app.models import Directory 29 from pootle_app.models.permissions import PermissionSet, get_pootle_permission 30 from pootle_language.models import Language 31 from pootle_project.models import Project 32 33 34 def initdb(): 35 """Populate the database with default initial data. 36 37 This creates the default database to get a working Pootle installation. 38 """ 39 create_revision() 40 create_essential_users() 41 create_root_directories() 42 create_template_languages() 43 create_terminology_project() 44 create_pootle_permissions() 45 create_pootle_permission_sets() 46 47 create_default_projects() 48 create_default_languages() 49 create_default_admin() 50 51 52 def create_revision(): 53 Revision.initialize() 54 55 56 def create_essential_users(): 57 """Create the 'default' and 'nobody' User instances. 58 59 These users are required for Pootle's permission system. 60 """ 61 User = get_user_model() 62 63 # The nobody user is used to represent an anonymous user in cases where 64 # we need to associate model information with such a user. An example is 65 # in the permission system: we need a way to store rights for anonymous 66 # users; thus we use the nobody user. 67 criteria = { 68 'username': u"nobody", 69 'full_name': u"any anonymous user", 70 'is_active': True, 71 } 72 nobody, created = User.objects.get_or_create(**criteria) 73 if created: 74 nobody.set_unusable_password() 75 nobody.save() 76 77 # The 'default' user represents any valid, non-anonymous user and is used 78 # to associate information any such user. An example is in the permission 79 # system: we need a way to store default rights for users. We use the 80 # 'default' user for this. 81 # 82 # In a future version of Pootle we should think about using Django's 83 # groups to do better permissions handling. 84 criteria = { 85 'username': u"default", 86 'full_name': u"any authenticated user", 87 'is_active': True, 88 } 89 default, created = User.objects.get_or_create(**criteria) 90 if created: 91 default.set_unusable_password() 92 default.save() 93 94 # The system user represents a system, and is used to 95 # associate updates done by bulk commands as update_stores. 96 criteria = { 97 'username': u"system", 98 'full_name': u"system user", 99 'is_active': True, 100 } 101 system, created = User.objects.get_or_create(**criteria) 102 if created: 103 system.set_unusable_password() 104 system.save() 105 106 107 def create_pootle_permissions(): 108 """Create Pootle's directory level permissions.""" 109 110 args = { 111 'app_label': "pootle_app", 112 'model': "directory", 113 } 114 pootle_content_type, created = ContentType.objects.get_or_create(**args) 115 pootle_content_type.name = 'pootle' 116 pootle_content_type.save() 117 118 # Create the permissions. 119 permissions = [ 120 { 121 'name': _("Can access a project"), 122 'codename': "view", 123 }, 124 { 125 'name': _("Cannot access a project"), 126 'codename': "hide", 127 }, 128 { 129 'name': _("Can make a suggestion for a translation"), 130 'codename': "suggest", 131 }, 132 { 133 'name': _("Can submit a translation"), 134 'codename': "translate", 135 }, 136 { 137 'name': _("Can review translations"), 138 'codename': "review", 139 }, 140 { 141 'name': _("Can administrate a translation project"), 142 'codename': "administrate", 143 }, 144 ] 145 146 criteria = { 147 'content_type': pootle_content_type, 148 } 149 150 for permission in permissions: 151 criteria.update(permission) 152 obj, created = Permission.objects.get_or_create(**criteria) 153 154 155 def create_pootle_permission_sets(): 156 """Create the default permission set for the 'nobody' and 'default' users. 157 158 'nobody' is the anonymous (non-logged in) user, and 'default' is the logged 159 in user. 160 """ 161 User = get_user_model() 162 163 nobody = User.objects.get(username='nobody') 164 default = User.objects.get(username='default') 165 166 view = get_pootle_permission('view') 167 suggest = get_pootle_permission('suggest') 168 translate = get_pootle_permission('translate') 169 170 # Default permissions for tree root. 171 criteria = { 172 'user': nobody, 173 'directory': Directory.objects.root, 174 } 175 permission_set, created = PermissionSet.objects.get_or_create(**criteria) 176 if created: 177 permission_set.positive_permissions = [view, suggest] 178 permission_set.save() 179 180 criteria['user'] = default 181 permission_set, created = PermissionSet.objects.get_or_create(**criteria) 182 if created: 183 permission_set.positive_permissions = [view, suggest, translate] 184 permission_set.save() 185 186 # Default permissions for templates language. 187 # Override with no permissions for templates language. 188 criteria = { 189 'user': nobody, 190 'directory': Directory.objects.get(pootle_path="/templates/"), 191 } 192 permission_set, created = PermissionSet.objects.get_or_create(**criteria) 193 if created: 194 permission_set.positive_permissions = [] 195 permission_set.save() 196 197 criteria['user'] = default 198 permission_set, created = PermissionSet.objects.get_or_create(**criteria) 199 if created: 200 permission_set.positive_permissions = [] 201 permission_set.save() 202 203 204 def require_english(): 205 """Create the English Language item.""" 206 criteria = { 207 'code': "en", 208 'fullname': u"English", 209 'nplurals': 2, 210 'pluralequation': "(n != 1)", 211 } 212 en, created = Language.objects.get_or_create(**criteria) 213 return en 214 215 216 def create_root_directories(): 217 """Create the root Directory items.""" 218 root, created = Directory.objects.get_or_create(name='') 219 projects, created = Directory.objects.get_or_create(name='projects', 220 parent=root) 221 222 223 def create_template_languages(): 224 """Create the 'templates' and English languages. 225 226 The 'templates' language is used to give users access to the untranslated 227 template files. 228 """ 229 templates, created = Language.objects.get_or_create(code="templates", 230 fullname=u'Templates') 231 require_english() 232 233 234 def create_terminology_project(): 235 """Create the terminology project. 236 237 The terminology project is used to display terminology suggestions while 238 translating. 239 """ 240 criteria = { 241 'code': "terminology", 242 'fullname': u"Terminology", 243 'source_language': require_english(), 244 'checkstyle': "terminology", 245 } 246 terminology, created = Project.objects.get_or_create(**criteria) 247 248 249 def create_default_projects(): 250 """Create the default projects that we host. 251 252 You might want to add your projects here, although you can also add things 253 through the web interface later. 254 """ 255 from pootle_project.models import Project 256 257 en = require_english() 258 259 #criteria = { 260 # 'code': u"pootle", 261 # 'source_language': en, 262 # 'fullname': u"Pootle", 263 # 'description': ('<div dir="ltr" lang="en">Interface translations for ' 264 # 'Pootle.<br />See the <a href="http://' 265 # 'pootle.locamotion.org">official Pootle server</a> ' 266 # 'for the translations of Pootle.</div>') 267 # 'checkstyle': "standard", 268 # 'localfiletype': "po", 269 # 'treestyle': "auto", 270 #} 271 #pootle = Project(**criteria) 272 #pootle.save() 273 274 criteria = { 275 'code': u"tutorial", 276 'source_language': en, 277 'fullname': u"Tutorial", 278 'checkstyle': "standard", 279 'localfiletype': "po", 280 'treestyle': "auto", 281 } 282 tutorial = Project(**criteria) 283 tutorial.save() 284 285 286 def create_default_languages(): 287 """Create the default languages.""" 288 from translate.lang import data, factory 289 290 from pootle_language.models import Language 291 292 # import languages from toolkit 293 for code in data.languages.keys(): 294 try: 295 tk_lang = factory.getlanguage(code) 296 criteria = { 297 'code': code, 298 'fullname': tk_lang.fullname, 299 'nplurals': tk_lang.nplurals, 300 'pluralequation': tk_lang.pluralequation, 301 } 302 try: 303 criteria['specialchars'] = tk_lang.specialchars 304 except AttributeError: 305 pass 306 lang, created = Language.objects.get_or_create(**criteria) 307 except: 308 pass 309 310 311 def create_default_admin(): 312 """Create the default admin user for Pootle. 313 314 You definitely want to change the admin account so that your default 315 install is not accessible with the default credentials. The users 'noboby' 316 and 'default' should be left as is. 317 """ 318 User = get_user_model() 319 320 criteria = { 321 'username': u"admin", 322 'full_name': u"Administrator", 323 'is_active': True, 324 'is_superuser': True, 325 } 326 admin = User(**criteria) 327 admin.set_password("admin") 328 admin.save() 329 [end of pootle/core/initdb.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pootle/core/initdb.py b/pootle/core/initdb.py --- a/pootle/core/initdb.py +++ b/pootle/core/initdb.py @@ -29,6 +29,7 @@ from pootle_app.models.permissions import PermissionSet, get_pootle_permission from pootle_language.models import Language from pootle_project.models import Project +from staticpages.models import StaticPage as Announcement def initdb(): @@ -256,21 +257,6 @@ en = require_english() - #criteria = { - # 'code': u"pootle", - # 'source_language': en, - # 'fullname': u"Pootle", - # 'description': ('<div dir="ltr" lang="en">Interface translations for ' - # 'Pootle.<br />See the <a href="http://' - # 'pootle.locamotion.org">official Pootle server</a> ' - # 'for the translations of Pootle.</div>') - # 'checkstyle': "standard", - # 'localfiletype': "po", - # 'treestyle': "auto", - #} - #pootle = Project(**criteria) - #pootle.save() - criteria = { 'code': u"tutorial", 'source_language': en, @@ -282,6 +268,20 @@ tutorial = Project(**criteria) tutorial.save() + criteria = { + 'active': True, + 'title': "Project instructions", + 'body': ('<div dir="ltr" lang="en">Tutorial project where users can ' + 'play with Pootle and learn more about translation and ' + 'localisation.<br />For more help on localisation, visit the ' + '<a href="http://docs.translatehouse.org/projects/' + 'localization-guide/en/latest/guide/start.html">localisation ' + 'guide</a>.</div>'), + 'virtual_path': "announcements/projects/"+tutorial.code, + } + ann = Announcement(**criteria) + ann.save() + def create_default_languages(): """Create the default languages."""
{"golden_diff": "diff --git a/pootle/core/initdb.py b/pootle/core/initdb.py\n--- a/pootle/core/initdb.py\n+++ b/pootle/core/initdb.py\n@@ -29,6 +29,7 @@\n from pootle_app.models.permissions import PermissionSet, get_pootle_permission\n from pootle_language.models import Language\n from pootle_project.models import Project\n+from staticpages.models import StaticPage as Announcement\n \n \n def initdb():\n@@ -256,21 +257,6 @@\n \n en = require_english()\n \n- #criteria = {\n- # 'code': u\"pootle\",\n- # 'source_language': en,\n- # 'fullname': u\"Pootle\",\n- # 'description': ('<div dir=\"ltr\" lang=\"en\">Interface translations for '\n- # 'Pootle.<br />See the <a href=\"http://'\n- # 'pootle.locamotion.org\">official Pootle server</a> '\n- # 'for the translations of Pootle.</div>')\n- # 'checkstyle': \"standard\",\n- # 'localfiletype': \"po\",\n- # 'treestyle': \"auto\",\n- #}\n- #pootle = Project(**criteria)\n- #pootle.save()\n-\n criteria = {\n 'code': u\"tutorial\",\n 'source_language': en,\n@@ -282,6 +268,20 @@\n tutorial = Project(**criteria)\n tutorial.save()\n \n+ criteria = {\n+ 'active': True,\n+ 'title': \"Project instructions\",\n+ 'body': ('<div dir=\"ltr\" lang=\"en\">Tutorial project where users can '\n+ 'play with Pootle and learn more about translation and '\n+ 'localisation.<br />For more help on localisation, visit the '\n+ '<a href=\"http://docs.translatehouse.org/projects/'\n+ 'localization-guide/en/latest/guide/start.html\">localisation '\n+ 'guide</a>.</div>'),\n+ 'virtual_path': \"announcements/projects/\"+tutorial.code,\n+ }\n+ ann = Announcement(**criteria)\n+ ann.save()\n+\n \n def create_default_languages():\n \"\"\"Create the default languages.\"\"\"\n", "issue": "Add default project descriptions\nIn order to get `initdb` running on `core` we had to drop the project [descriptions] since these move to staticpages on core. This bug is to add those back.\n\nTo achieve that initdb should create static pages for these default projects that contain the content previously held in the [descriptions](https://github.com/translate/pootle/blob/96edf539/pootle/core/initdb.py#L300-L306).\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright 2013 Zuza Software Foundation\n# Copyright 2014-2015 Evernote Corporation\n#\n# This file is part of Pootle.\n#\n# Pootle is free software; you can redistribute it and/or modify it under the\n# terms of the GNU General Public License as published by the Free Software\n# Foundation; either version 2 of the License, or (at your option) any later\n# version.\n#\n# Pootle is distributed in the hope that it will be useful, but WITHOUT ANY\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR\n# A PARTICULAR PURPOSE. See the GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along with\n# Pootle; if not, see <http://www.gnu.org/licenses/>.\n\n\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import Permission\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.utils.translation import ugettext_noop as _\n\nfrom pootle.core.models import Revision\nfrom pootle_app.models import Directory\nfrom pootle_app.models.permissions import PermissionSet, get_pootle_permission\nfrom pootle_language.models import Language\nfrom pootle_project.models import Project\n\n\ndef initdb():\n \"\"\"Populate the database with default initial data.\n\n This creates the default database to get a working Pootle installation.\n \"\"\"\n create_revision()\n create_essential_users()\n create_root_directories()\n create_template_languages()\n create_terminology_project()\n create_pootle_permissions()\n create_pootle_permission_sets()\n\n create_default_projects()\n create_default_languages()\n create_default_admin()\n\n\ndef create_revision():\n Revision.initialize()\n\n\ndef create_essential_users():\n \"\"\"Create the 'default' and 'nobody' User instances.\n\n These users are required for Pootle's permission system.\n \"\"\"\n User = get_user_model()\n\n # The nobody user is used to represent an anonymous user in cases where\n # we need to associate model information with such a user. An example is\n # in the permission system: we need a way to store rights for anonymous\n # users; thus we use the nobody user.\n criteria = {\n 'username': u\"nobody\",\n 'full_name': u\"any anonymous user\",\n 'is_active': True,\n }\n nobody, created = User.objects.get_or_create(**criteria)\n if created:\n nobody.set_unusable_password()\n nobody.save()\n\n # The 'default' user represents any valid, non-anonymous user and is used\n # to associate information any such user. An example is in the permission\n # system: we need a way to store default rights for users. We use the\n # 'default' user for this.\n #\n # In a future version of Pootle we should think about using Django's\n # groups to do better permissions handling.\n criteria = {\n 'username': u\"default\",\n 'full_name': u\"any authenticated user\",\n 'is_active': True,\n }\n default, created = User.objects.get_or_create(**criteria)\n if created:\n default.set_unusable_password()\n default.save()\n\n # The system user represents a system, and is used to\n # associate updates done by bulk commands as update_stores.\n criteria = {\n 'username': u\"system\",\n 'full_name': u\"system user\",\n 'is_active': True,\n }\n system, created = User.objects.get_or_create(**criteria)\n if created:\n system.set_unusable_password()\n system.save()\n\n\ndef create_pootle_permissions():\n \"\"\"Create Pootle's directory level permissions.\"\"\"\n\n args = {\n 'app_label': \"pootle_app\",\n 'model': \"directory\",\n }\n pootle_content_type, created = ContentType.objects.get_or_create(**args)\n pootle_content_type.name = 'pootle'\n pootle_content_type.save()\n\n # Create the permissions.\n permissions = [\n {\n 'name': _(\"Can access a project\"),\n 'codename': \"view\",\n },\n {\n 'name': _(\"Cannot access a project\"),\n 'codename': \"hide\",\n },\n {\n 'name': _(\"Can make a suggestion for a translation\"),\n 'codename': \"suggest\",\n },\n {\n 'name': _(\"Can submit a translation\"),\n 'codename': \"translate\",\n },\n {\n 'name': _(\"Can review translations\"),\n 'codename': \"review\",\n },\n {\n 'name': _(\"Can administrate a translation project\"),\n 'codename': \"administrate\",\n },\n ]\n\n criteria = {\n 'content_type': pootle_content_type,\n }\n\n for permission in permissions:\n criteria.update(permission)\n obj, created = Permission.objects.get_or_create(**criteria)\n\n\ndef create_pootle_permission_sets():\n \"\"\"Create the default permission set for the 'nobody' and 'default' users.\n\n 'nobody' is the anonymous (non-logged in) user, and 'default' is the logged\n in user.\n \"\"\"\n User = get_user_model()\n\n nobody = User.objects.get(username='nobody')\n default = User.objects.get(username='default')\n\n view = get_pootle_permission('view')\n suggest = get_pootle_permission('suggest')\n translate = get_pootle_permission('translate')\n\n # Default permissions for tree root.\n criteria = {\n 'user': nobody,\n 'directory': Directory.objects.root,\n }\n permission_set, created = PermissionSet.objects.get_or_create(**criteria)\n if created:\n permission_set.positive_permissions = [view, suggest]\n permission_set.save()\n\n criteria['user'] = default\n permission_set, created = PermissionSet.objects.get_or_create(**criteria)\n if created:\n permission_set.positive_permissions = [view, suggest, translate]\n permission_set.save()\n\n # Default permissions for templates language.\n # Override with no permissions for templates language.\n criteria = {\n 'user': nobody,\n 'directory': Directory.objects.get(pootle_path=\"/templates/\"),\n }\n permission_set, created = PermissionSet.objects.get_or_create(**criteria)\n if created:\n permission_set.positive_permissions = []\n permission_set.save()\n\n criteria['user'] = default\n permission_set, created = PermissionSet.objects.get_or_create(**criteria)\n if created:\n permission_set.positive_permissions = []\n permission_set.save()\n\n\ndef require_english():\n \"\"\"Create the English Language item.\"\"\"\n criteria = {\n 'code': \"en\",\n 'fullname': u\"English\",\n 'nplurals': 2,\n 'pluralequation': \"(n != 1)\",\n }\n en, created = Language.objects.get_or_create(**criteria)\n return en\n\n\ndef create_root_directories():\n \"\"\"Create the root Directory items.\"\"\"\n root, created = Directory.objects.get_or_create(name='')\n projects, created = Directory.objects.get_or_create(name='projects',\n parent=root)\n\n\ndef create_template_languages():\n \"\"\"Create the 'templates' and English languages.\n\n The 'templates' language is used to give users access to the untranslated\n template files.\n \"\"\"\n templates, created = Language.objects.get_or_create(code=\"templates\",\n fullname=u'Templates')\n require_english()\n\n\ndef create_terminology_project():\n \"\"\"Create the terminology project.\n\n The terminology project is used to display terminology suggestions while\n translating.\n \"\"\"\n criteria = {\n 'code': \"terminology\",\n 'fullname': u\"Terminology\",\n 'source_language': require_english(),\n 'checkstyle': \"terminology\",\n }\n terminology, created = Project.objects.get_or_create(**criteria)\n\n\ndef create_default_projects():\n \"\"\"Create the default projects that we host.\n\n You might want to add your projects here, although you can also add things\n through the web interface later.\n \"\"\"\n from pootle_project.models import Project\n\n en = require_english()\n\n #criteria = {\n # 'code': u\"pootle\",\n # 'source_language': en,\n # 'fullname': u\"Pootle\",\n # 'description': ('<div dir=\"ltr\" lang=\"en\">Interface translations for '\n # 'Pootle.<br />See the <a href=\"http://'\n # 'pootle.locamotion.org\">official Pootle server</a> '\n # 'for the translations of Pootle.</div>')\n # 'checkstyle': \"standard\",\n # 'localfiletype': \"po\",\n # 'treestyle': \"auto\",\n #}\n #pootle = Project(**criteria)\n #pootle.save()\n\n criteria = {\n 'code': u\"tutorial\",\n 'source_language': en,\n 'fullname': u\"Tutorial\",\n 'checkstyle': \"standard\",\n 'localfiletype': \"po\",\n 'treestyle': \"auto\",\n }\n tutorial = Project(**criteria)\n tutorial.save()\n\n\ndef create_default_languages():\n \"\"\"Create the default languages.\"\"\"\n from translate.lang import data, factory\n\n from pootle_language.models import Language\n\n # import languages from toolkit\n for code in data.languages.keys():\n try:\n tk_lang = factory.getlanguage(code)\n criteria = {\n 'code': code,\n 'fullname': tk_lang.fullname,\n 'nplurals': tk_lang.nplurals,\n 'pluralequation': tk_lang.pluralequation,\n }\n try:\n criteria['specialchars'] = tk_lang.specialchars\n except AttributeError:\n pass\n lang, created = Language.objects.get_or_create(**criteria)\n except:\n pass\n\n\ndef create_default_admin():\n \"\"\"Create the default admin user for Pootle.\n\n You definitely want to change the admin account so that your default\n install is not accessible with the default credentials. The users 'noboby'\n and 'default' should be left as is.\n \"\"\"\n User = get_user_model()\n\n criteria = {\n 'username': u\"admin\",\n 'full_name': u\"Administrator\",\n 'is_active': True,\n 'is_superuser': True,\n }\n admin = User(**criteria)\n admin.set_password(\"admin\")\n admin.save()\n", "path": "pootle/core/initdb.py"}]}
3,804
511
gh_patches_debug_26820
rasdani/github-patches
git_diff
googleapis__python-bigquery-571
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> ConnectionError in Client.insert_rows_json() We have a http Cloud Function that does some data processing and then streams to BQ. The function errors out sometimes because of either the bq client losing connection or it is the insert_rows that can't connect. See below an example of a stack trace captured in the GCP logs. ``` File "/layers/google.python.pip/pip/lib/python3.8/site-packages/flask/app.py", line 2447, in wsgi_app response = self.full_dispatch_request() File "/layers/google.python.pip/pip/lib/python3.8/site-packages/flask/app.py", line 1952, in full_dispatch_request rv = self.handle_user_exception(e) File "/layers/google.python.pip/pip/lib/python3.8/site-packages/flask/app.py", line 1821, in handle_user_exception reraise(exc_type, exc_value, tb) File "/layers/google.python.pip/pip/lib/python3.8/site-packages/flask/_compat.py", line 39, in reraise raise value File "/layers/google.python.pip/pip/lib/python3.8/site-packages/flask/app.py", line 1950, in full_dispatch_request rv = self.dispatch_request() File "/layers/google.python.pip/pip/lib/python3.8/site-packages/flask/app.py", line 1936, in dispatch_request return self.view_functions[rule.endpoint](**req.view_args) File "/layers/google.python.functions-framework/functions-framework/lib/python3.8/site-packages/functions_framework/__init__.py", line 66, in view_func return function(request._get_current_object()) File "/workspace/main.py", line 162, in stream_tax errors = bq.insert_rows_json(table=dataset_table, File "/layers/google.python.pip/pip/lib/python3.8/site-packages/google/cloud/bigquery/client.py", line 3013, in insert_rows_json response = self._call_api( File "/layers/google.python.pip/pip/lib/python3.8/site-packages/google/cloud/bigquery/client.py", line 636, in _call_api return call() File "/layers/google.python.pip/pip/lib/python3.8/site-packages/google/api_core/retry.py", line 281, in retry_wrapped_func return retry_target( File "/layers/google.python.pip/pip/lib/python3.8/site-packages/google/api_core/retry.py", line 184, in retry_target return target() File "/layers/google.python.pip/pip/lib/python3.8/site-packages/google/cloud/_http.py", line 427, in api_request response = self._make_request( File "/layers/google.python.pip/pip/lib/python3.8/site-packages/google/cloud/_http.py", line 291, in _make_request return self._do_request( File "/layers/google.python.pip/pip/lib/python3.8/site-packages/google/cloud/_http.py", line 329, in _do_request return self.http.request( File "/layers/google.python.pip/pip/lib/python3.8/site-packages/google/auth/transport/requests.py", line 464, in request response = super(AuthorizedSession, self).request( File "/layers/google.python.pip/pip/lib/python3.8/site-packages/requests/sessions.py", line 542, in request resp = self.send(prep, **send_kwargs) File "/layers/google.python.pip/pip/lib/python3.8/site-packages/requests/sessions.py", line 655, in send r = adapter.send(request, **kwargs) File "/layers/google.python.pip/pip/lib/python3.8/site-packages/requests/adapters.py", line 498, in send raise ConnectionError(err, request=request) requests.exceptions.ConnectionError: ('Connection aborted.', ConnectionResetError(104, 'Connection reset by peer')) ``` - `bq` (=`bigquery.Client()`) in the trace is instantiated as a global variable as recommended here: https://cloud.google.com/functions/docs/bestpractices/networking#accessing_google_apis - error is logged 30 secs after function is invoked - so can't be the 60s default timeout in `-http` Thoughts ? </issue> <code> [start of setup.py] 1 # Copyright 2018 Google LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import io 16 import os 17 18 import setuptools 19 20 21 # Package metadata. 22 23 name = "google-cloud-bigquery" 24 description = "Google BigQuery API client library" 25 26 # Should be one of: 27 # 'Development Status :: 3 - Alpha' 28 # 'Development Status :: 4 - Beta' 29 # 'Development Status :: 5 - Production/Stable' 30 release_status = "Development Status :: 5 - Production/Stable" 31 dependencies = [ 32 "google-api-core[grpc] >= 1.23.0, < 2.0.0dev", 33 "proto-plus >= 1.10.0", 34 "google-cloud-core >= 1.4.1, < 2.0dev", 35 "google-resumable-media >= 0.6.0, < 2.0dev", 36 "packaging >= 14.3", 37 "protobuf >= 3.12.0", 38 ] 39 extras = { 40 "bqstorage": [ 41 "google-cloud-bigquery-storage >= 2.0.0, <3.0.0dev", 42 # Due to an issue in pip's dependency resolver, the `grpc` extra is not 43 # installed, even though `google-cloud-bigquery-storage` specifies it 44 # as `google-api-core[grpc]`. We thus need to explicitly specify it here. 45 # See: https://github.com/googleapis/python-bigquery/issues/83 The 46 # grpc.Channel.close() method isn't added until 1.32.0. 47 # https://github.com/grpc/grpc/pull/15254 48 "grpcio >= 1.32.0, < 2.0dev", 49 "pyarrow >= 1.0.0, < 4.0dev", 50 ], 51 "pandas": ["pandas>=0.23.0", "pyarrow >= 1.0.0, < 4.0dev"], 52 "bignumeric_type": ["pyarrow >= 3.0.0, < 4.0dev"], 53 "tqdm": ["tqdm >= 4.7.4, <5.0.0dev"], 54 "opentelemetry": [ 55 "opentelemetry-api==0.11b0", 56 "opentelemetry-sdk==0.11b0", 57 "opentelemetry-instrumentation==0.11b0", 58 ], 59 } 60 61 all_extras = [] 62 63 for extra in extras: 64 # Exclude this extra from all to avoid overly strict dependencies on core 65 # libraries such as pyarrow. 66 # https://github.com/googleapis/python-bigquery/issues/563 67 if extra in {"bignumeric_type"}: 68 continue 69 all_extras.extend(extras[extra]) 70 71 extras["all"] = all_extras 72 73 # Setup boilerplate below this line. 74 75 package_root = os.path.abspath(os.path.dirname(__file__)) 76 77 readme_filename = os.path.join(package_root, "README.rst") 78 with io.open(readme_filename, encoding="utf-8") as readme_file: 79 readme = readme_file.read() 80 81 version = {} 82 with open(os.path.join(package_root, "google/cloud/bigquery/version.py")) as fp: 83 exec(fp.read(), version) 84 version = version["__version__"] 85 86 # Only include packages under the 'google' namespace. Do not include tests, 87 # benchmarks, etc. 88 packages = [ 89 package 90 for package in setuptools.PEP420PackageFinder.find() 91 if package.startswith("google") 92 ] 93 94 # Determine which namespaces are needed. 95 namespaces = ["google"] 96 if "google.cloud" in packages: 97 namespaces.append("google.cloud") 98 99 100 setuptools.setup( 101 name=name, 102 version=version, 103 description=description, 104 long_description=readme, 105 author="Google LLC", 106 author_email="[email protected]", 107 license="Apache 2.0", 108 url="https://github.com/googleapis/python-bigquery", 109 classifiers=[ 110 release_status, 111 "Intended Audience :: Developers", 112 "License :: OSI Approved :: Apache Software License", 113 "Programming Language :: Python", 114 "Programming Language :: Python :: 3", 115 "Programming Language :: Python :: 3.6", 116 "Programming Language :: Python :: 3.7", 117 "Programming Language :: Python :: 3.8", 118 "Programming Language :: Python :: 3.9", 119 "Operating System :: OS Independent", 120 "Topic :: Internet", 121 ], 122 platforms="Posix; MacOS X; Windows", 123 packages=packages, 124 namespace_packages=namespaces, 125 install_requires=dependencies, 126 extras_require=extras, 127 python_requires=">=3.6, <3.10", 128 include_package_data=True, 129 zip_safe=False, 130 ) 131 [end of setup.py] [start of google/cloud/bigquery/retry.py] 1 # Copyright 2018 Google LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from google.api_core import exceptions 16 from google.api_core import retry 17 18 19 _RETRYABLE_REASONS = frozenset( 20 ["rateLimitExceeded", "backendError", "internalError", "badGateway"] 21 ) 22 23 _UNSTRUCTURED_RETRYABLE_TYPES = ( 24 exceptions.TooManyRequests, 25 exceptions.InternalServerError, 26 exceptions.BadGateway, 27 ) 28 29 30 def _should_retry(exc): 31 """Predicate for determining when to retry. 32 33 We retry if and only if the 'reason' is 'backendError' 34 or 'rateLimitExceeded'. 35 """ 36 if not hasattr(exc, "errors"): 37 return False 38 39 if len(exc.errors) == 0: 40 # Check for unstructured error returns, e.g. from GFE 41 return isinstance(exc, _UNSTRUCTURED_RETRYABLE_TYPES) 42 43 reason = exc.errors[0]["reason"] 44 return reason in _RETRYABLE_REASONS 45 46 47 DEFAULT_RETRY = retry.Retry(predicate=_should_retry) 48 """The default retry object. 49 50 Any method with a ``retry`` parameter will be retried automatically, 51 with reasonable defaults. To disable retry, pass ``retry=None``. 52 To modify the default retry behavior, call a ``with_XXX`` method 53 on ``DEFAULT_RETRY``. For example, to change the deadline to 30 seconds, 54 pass ``retry=bigquery.DEFAULT_RETRY.with_deadline(30)``. 55 """ 56 [end of google/cloud/bigquery/retry.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/google/cloud/bigquery/retry.py b/google/cloud/bigquery/retry.py --- a/google/cloud/bigquery/retry.py +++ b/google/cloud/bigquery/retry.py @@ -14,6 +14,7 @@ from google.api_core import exceptions from google.api_core import retry +import requests.exceptions _RETRYABLE_REASONS = frozenset( @@ -21,9 +22,11 @@ ) _UNSTRUCTURED_RETRYABLE_TYPES = ( + ConnectionError, exceptions.TooManyRequests, exceptions.InternalServerError, exceptions.BadGateway, + requests.exceptions.ConnectionError, ) @@ -33,10 +36,7 @@ We retry if and only if the 'reason' is 'backendError' or 'rateLimitExceeded'. """ - if not hasattr(exc, "errors"): - return False - - if len(exc.errors) == 0: + if not hasattr(exc, "errors") or len(exc.errors) == 0: # Check for unstructured error returns, e.g. from GFE return isinstance(exc, _UNSTRUCTURED_RETRYABLE_TYPES) diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -35,6 +35,7 @@ "google-resumable-media >= 0.6.0, < 2.0dev", "packaging >= 14.3", "protobuf >= 3.12.0", + "requests >= 2.18.0, < 3.0.0dev", ] extras = { "bqstorage": [
{"golden_diff": "diff --git a/google/cloud/bigquery/retry.py b/google/cloud/bigquery/retry.py\n--- a/google/cloud/bigquery/retry.py\n+++ b/google/cloud/bigquery/retry.py\n@@ -14,6 +14,7 @@\n \n from google.api_core import exceptions\n from google.api_core import retry\n+import requests.exceptions\n \n \n _RETRYABLE_REASONS = frozenset(\n@@ -21,9 +22,11 @@\n )\n \n _UNSTRUCTURED_RETRYABLE_TYPES = (\n+ ConnectionError,\n exceptions.TooManyRequests,\n exceptions.InternalServerError,\n exceptions.BadGateway,\n+ requests.exceptions.ConnectionError,\n )\n \n \n@@ -33,10 +36,7 @@\n We retry if and only if the 'reason' is 'backendError'\n or 'rateLimitExceeded'.\n \"\"\"\n- if not hasattr(exc, \"errors\"):\n- return False\n-\n- if len(exc.errors) == 0:\n+ if not hasattr(exc, \"errors\") or len(exc.errors) == 0:\n # Check for unstructured error returns, e.g. from GFE\n return isinstance(exc, _UNSTRUCTURED_RETRYABLE_TYPES)\n \ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -35,6 +35,7 @@\n \"google-resumable-media >= 0.6.0, < 2.0dev\",\n \"packaging >= 14.3\",\n \"protobuf >= 3.12.0\",\n+ \"requests >= 2.18.0, < 3.0.0dev\",\n ]\n extras = {\n \"bqstorage\": [\n", "issue": "ConnectionError in Client.insert_rows_json()\nWe have a http Cloud Function that does some data processing and then streams to BQ. The function errors out sometimes because of either the bq client losing connection or it is the insert_rows that can't connect. \r\nSee below an example of a stack trace captured in the GCP logs.\r\n\r\n\r\n```\r\n File \"/layers/google.python.pip/pip/lib/python3.8/site-packages/flask/app.py\", line 2447, in wsgi_app\r\n response = self.full_dispatch_request()\r\n File \"/layers/google.python.pip/pip/lib/python3.8/site-packages/flask/app.py\", line 1952, in full_dispatch_request\r\n rv = self.handle_user_exception(e)\r\n File \"/layers/google.python.pip/pip/lib/python3.8/site-packages/flask/app.py\", line 1821, in handle_user_exception\r\n reraise(exc_type, exc_value, tb)\r\n File \"/layers/google.python.pip/pip/lib/python3.8/site-packages/flask/_compat.py\", line 39, in reraise\r\n raise value\r\n File \"/layers/google.python.pip/pip/lib/python3.8/site-packages/flask/app.py\", line 1950, in full_dispatch_request\r\n rv = self.dispatch_request()\r\n File \"/layers/google.python.pip/pip/lib/python3.8/site-packages/flask/app.py\", line 1936, in dispatch_request\r\n return self.view_functions[rule.endpoint](**req.view_args)\r\n File \"/layers/google.python.functions-framework/functions-framework/lib/python3.8/site-packages/functions_framework/__init__.py\", line 66, in view_func\r\n return function(request._get_current_object())\r\n File \"/workspace/main.py\", line 162, in stream_tax\r\n errors = bq.insert_rows_json(table=dataset_table,\r\n File \"/layers/google.python.pip/pip/lib/python3.8/site-packages/google/cloud/bigquery/client.py\", line 3013, in insert_rows_json\r\n response = self._call_api(\r\n File \"/layers/google.python.pip/pip/lib/python3.8/site-packages/google/cloud/bigquery/client.py\", line 636, in _call_api\r\n return call()\r\n File \"/layers/google.python.pip/pip/lib/python3.8/site-packages/google/api_core/retry.py\", line 281, in retry_wrapped_func\r\n return retry_target(\r\n File \"/layers/google.python.pip/pip/lib/python3.8/site-packages/google/api_core/retry.py\", line 184, in retry_target\r\n return target()\r\n File \"/layers/google.python.pip/pip/lib/python3.8/site-packages/google/cloud/_http.py\", line 427, in api_request\r\n response = self._make_request(\r\n File \"/layers/google.python.pip/pip/lib/python3.8/site-packages/google/cloud/_http.py\", line 291, in _make_request\r\n return self._do_request(\r\n File \"/layers/google.python.pip/pip/lib/python3.8/site-packages/google/cloud/_http.py\", line 329, in _do_request\r\n return self.http.request(\r\n File \"/layers/google.python.pip/pip/lib/python3.8/site-packages/google/auth/transport/requests.py\", line 464, in request\r\n response = super(AuthorizedSession, self).request(\r\n File \"/layers/google.python.pip/pip/lib/python3.8/site-packages/requests/sessions.py\", line 542, in request\r\n resp = self.send(prep, **send_kwargs)\r\n File \"/layers/google.python.pip/pip/lib/python3.8/site-packages/requests/sessions.py\", line 655, in send\r\n r = adapter.send(request, **kwargs)\r\n File \"/layers/google.python.pip/pip/lib/python3.8/site-packages/requests/adapters.py\", line 498, in send\r\n raise ConnectionError(err, request=request)\r\nrequests.exceptions.ConnectionError: ('Connection aborted.', ConnectionResetError(104, 'Connection reset by peer'))\r\n```\r\n\r\n\r\n- `bq` (=`bigquery.Client()`) in the trace is instantiated as a global variable as recommended here: https://cloud.google.com/functions/docs/bestpractices/networking#accessing_google_apis\r\n\r\n- error is logged 30 secs after function is invoked - so can't be the 60s default timeout in `-http`\r\n\r\nThoughts ?\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = \"google-cloud-bigquery\"\ndescription = \"Google BigQuery API client library\"\n\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = \"Development Status :: 5 - Production/Stable\"\ndependencies = [\n \"google-api-core[grpc] >= 1.23.0, < 2.0.0dev\",\n \"proto-plus >= 1.10.0\",\n \"google-cloud-core >= 1.4.1, < 2.0dev\",\n \"google-resumable-media >= 0.6.0, < 2.0dev\",\n \"packaging >= 14.3\",\n \"protobuf >= 3.12.0\",\n]\nextras = {\n \"bqstorage\": [\n \"google-cloud-bigquery-storage >= 2.0.0, <3.0.0dev\",\n # Due to an issue in pip's dependency resolver, the `grpc` extra is not\n # installed, even though `google-cloud-bigquery-storage` specifies it\n # as `google-api-core[grpc]`. We thus need to explicitly specify it here.\n # See: https://github.com/googleapis/python-bigquery/issues/83 The\n # grpc.Channel.close() method isn't added until 1.32.0.\n # https://github.com/grpc/grpc/pull/15254\n \"grpcio >= 1.32.0, < 2.0dev\",\n \"pyarrow >= 1.0.0, < 4.0dev\",\n ],\n \"pandas\": [\"pandas>=0.23.0\", \"pyarrow >= 1.0.0, < 4.0dev\"],\n \"bignumeric_type\": [\"pyarrow >= 3.0.0, < 4.0dev\"],\n \"tqdm\": [\"tqdm >= 4.7.4, <5.0.0dev\"],\n \"opentelemetry\": [\n \"opentelemetry-api==0.11b0\",\n \"opentelemetry-sdk==0.11b0\",\n \"opentelemetry-instrumentation==0.11b0\",\n ],\n}\n\nall_extras = []\n\nfor extra in extras:\n # Exclude this extra from all to avoid overly strict dependencies on core\n # libraries such as pyarrow.\n # https://github.com/googleapis/python-bigquery/issues/563\n if extra in {\"bignumeric_type\"}:\n continue\n all_extras.extend(extras[extra])\n\nextras[\"all\"] = all_extras\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.rst\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\nversion = {}\nwith open(os.path.join(package_root, \"google/cloud/bigquery/version.py\")) as fp:\n exec(fp.read(), version)\nversion = version[\"__version__\"]\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package\n for package in setuptools.PEP420PackageFinder.find()\n if package.startswith(\"google\")\n]\n\n# Determine which namespaces are needed.\nnamespaces = [\"google\"]\nif \"google.cloud\" in packages:\n namespaces.append(\"google.cloud\")\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n license=\"Apache 2.0\",\n url=\"https://github.com/googleapis/python-bigquery\",\n classifiers=[\n release_status,\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet\",\n ],\n platforms=\"Posix; MacOS X; Windows\",\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n python_requires=\">=3.6, <3.10\",\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "setup.py"}, {"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom google.api_core import exceptions\nfrom google.api_core import retry\n\n\n_RETRYABLE_REASONS = frozenset(\n [\"rateLimitExceeded\", \"backendError\", \"internalError\", \"badGateway\"]\n)\n\n_UNSTRUCTURED_RETRYABLE_TYPES = (\n exceptions.TooManyRequests,\n exceptions.InternalServerError,\n exceptions.BadGateway,\n)\n\n\ndef _should_retry(exc):\n \"\"\"Predicate for determining when to retry.\n\n We retry if and only if the 'reason' is 'backendError'\n or 'rateLimitExceeded'.\n \"\"\"\n if not hasattr(exc, \"errors\"):\n return False\n\n if len(exc.errors) == 0:\n # Check for unstructured error returns, e.g. from GFE\n return isinstance(exc, _UNSTRUCTURED_RETRYABLE_TYPES)\n\n reason = exc.errors[0][\"reason\"]\n return reason in _RETRYABLE_REASONS\n\n\nDEFAULT_RETRY = retry.Retry(predicate=_should_retry)\n\"\"\"The default retry object.\n\nAny method with a ``retry`` parameter will be retried automatically,\nwith reasonable defaults. To disable retry, pass ``retry=None``.\nTo modify the default retry behavior, call a ``with_XXX`` method\non ``DEFAULT_RETRY``. For example, to change the deadline to 30 seconds,\npass ``retry=bigquery.DEFAULT_RETRY.with_deadline(30)``.\n\"\"\"\n", "path": "google/cloud/bigquery/retry.py"}]}
3,469
365
gh_patches_debug_17976
rasdani/github-patches
git_diff
projectmesa__mesa-1330
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> random.sample with a set is removed in Python 3.11 Using [`random.sample`](https://docs.python.org/3/library/random.html#random.sample) with a set has been depreciated in Python 3.9 and was removed in Python 3.11. > Changed in version 3.11: The population must be a sequence. Automatic conversion of sets to lists is no longer supported. Two cases of this function being used with a set have been detected by the CI in Mesa, both in example models: 1. https://github.com/projectmesa/mesa/blob/5135be8759d9fdac551c5c6ed13e9ae5205bce3e/examples/boltzmann_wealth_model_network/boltzmann_wealth_model_network/model.py#L28 2. https://github.com/projectmesa/mesa/blob/5135be8759d9fdac551c5c6ed13e9ae5205bce3e/examples/virus_on_network/virus_on_network/model.py#L84 What would be the most efficient way to get `self.G.nodes()` in a sequence form (so probably list or tuple)? I feel like just putting it inside a `list()` isn't the proper way to do it. </issue> <code> [start of examples/virus_on_network/virus_on_network/model.py] 1 import math 2 from enum import Enum 3 import networkx as nx 4 5 from mesa import Agent, Model 6 from mesa.time import RandomActivation 7 from mesa.datacollection import DataCollector 8 from mesa.space import NetworkGrid 9 10 11 class State(Enum): 12 SUSCEPTIBLE = 0 13 INFECTED = 1 14 RESISTANT = 2 15 16 17 def number_state(model, state): 18 return sum(1 for a in model.grid.get_all_cell_contents() if a.state is state) 19 20 21 def number_infected(model): 22 return number_state(model, State.INFECTED) 23 24 25 def number_susceptible(model): 26 return number_state(model, State.SUSCEPTIBLE) 27 28 29 def number_resistant(model): 30 return number_state(model, State.RESISTANT) 31 32 33 class VirusOnNetwork(Model): 34 """A virus model with some number of agents""" 35 36 def __init__( 37 self, 38 num_nodes=10, 39 avg_node_degree=3, 40 initial_outbreak_size=1, 41 virus_spread_chance=0.4, 42 virus_check_frequency=0.4, 43 recovery_chance=0.3, 44 gain_resistance_chance=0.5, 45 ): 46 47 self.num_nodes = num_nodes 48 prob = avg_node_degree / self.num_nodes 49 self.G = nx.erdos_renyi_graph(n=self.num_nodes, p=prob) 50 self.grid = NetworkGrid(self.G) 51 self.schedule = RandomActivation(self) 52 self.initial_outbreak_size = ( 53 initial_outbreak_size if initial_outbreak_size <= num_nodes else num_nodes 54 ) 55 self.virus_spread_chance = virus_spread_chance 56 self.virus_check_frequency = virus_check_frequency 57 self.recovery_chance = recovery_chance 58 self.gain_resistance_chance = gain_resistance_chance 59 60 self.datacollector = DataCollector( 61 { 62 "Infected": number_infected, 63 "Susceptible": number_susceptible, 64 "Resistant": number_resistant, 65 } 66 ) 67 68 # Create agents 69 for i, node in enumerate(self.G.nodes()): 70 a = VirusAgent( 71 i, 72 self, 73 State.SUSCEPTIBLE, 74 self.virus_spread_chance, 75 self.virus_check_frequency, 76 self.recovery_chance, 77 self.gain_resistance_chance, 78 ) 79 self.schedule.add(a) 80 # Add the agent to the node 81 self.grid.place_agent(a, node) 82 83 # Infect some nodes 84 infected_nodes = self.random.sample(self.G.nodes(), self.initial_outbreak_size) 85 for a in self.grid.get_cell_list_contents(infected_nodes): 86 a.state = State.INFECTED 87 88 self.running = True 89 self.datacollector.collect(self) 90 91 def resistant_susceptible_ratio(self): 92 try: 93 return number_state(self, State.RESISTANT) / number_state( 94 self, State.SUSCEPTIBLE 95 ) 96 except ZeroDivisionError: 97 return math.inf 98 99 def step(self): 100 self.schedule.step() 101 # collect data 102 self.datacollector.collect(self) 103 104 def run_model(self, n): 105 for i in range(n): 106 self.step() 107 108 109 class VirusAgent(Agent): 110 def __init__( 111 self, 112 unique_id, 113 model, 114 initial_state, 115 virus_spread_chance, 116 virus_check_frequency, 117 recovery_chance, 118 gain_resistance_chance, 119 ): 120 super().__init__(unique_id, model) 121 122 self.state = initial_state 123 124 self.virus_spread_chance = virus_spread_chance 125 self.virus_check_frequency = virus_check_frequency 126 self.recovery_chance = recovery_chance 127 self.gain_resistance_chance = gain_resistance_chance 128 129 def try_to_infect_neighbors(self): 130 neighbors_nodes = self.model.grid.get_neighbors(self.pos, include_center=False) 131 susceptible_neighbors = [ 132 agent 133 for agent in self.model.grid.get_cell_list_contents(neighbors_nodes) 134 if agent.state is State.SUSCEPTIBLE 135 ] 136 for a in susceptible_neighbors: 137 if self.random.random() < self.virus_spread_chance: 138 a.state = State.INFECTED 139 140 def try_gain_resistance(self): 141 if self.random.random() < self.gain_resistance_chance: 142 self.state = State.RESISTANT 143 144 def try_remove_infection(self): 145 # Try to remove 146 if self.random.random() < self.recovery_chance: 147 # Success 148 self.state = State.SUSCEPTIBLE 149 self.try_gain_resistance() 150 else: 151 # Failed 152 self.state = State.INFECTED 153 154 def try_check_situation(self): 155 if self.random.random() < self.virus_check_frequency: 156 # Checking... 157 if self.state is State.INFECTED: 158 self.try_remove_infection() 159 160 def step(self): 161 if self.state is State.INFECTED: 162 self.try_to_infect_neighbors() 163 self.try_check_situation() 164 [end of examples/virus_on_network/virus_on_network/model.py] [start of examples/boltzmann_wealth_model_network/boltzmann_wealth_model_network/model.py] 1 import mesa 2 import networkx as nx 3 4 5 def compute_gini(model): 6 agent_wealths = [agent.wealth for agent in model.schedule.agents] 7 x = sorted(agent_wealths) 8 N = model.num_agents 9 B = sum(xi * (N - i) for i, xi in enumerate(x)) / (N * sum(x)) 10 return 1 + (1 / N) - 2 * B 11 12 13 class BoltzmannWealthModelNetwork(mesa.Model): 14 """A model with some number of agents.""" 15 16 def __init__(self, num_agents=7, num_nodes=10): 17 18 self.num_agents = num_agents 19 self.num_nodes = num_nodes if num_nodes >= self.num_agents else self.num_agents 20 self.G = nx.erdos_renyi_graph(n=self.num_nodes, p=0.5) 21 self.grid = mesa.space.NetworkGrid(self.G) 22 self.schedule = mesa.time.RandomActivation(self) 23 self.datacollector = mesa.DataCollector( 24 model_reporters={"Gini": compute_gini}, 25 agent_reporters={"Wealth": lambda _: _.wealth}, 26 ) 27 28 list_of_random_nodes = self.random.sample(self.G.nodes(), self.num_agents) 29 30 # Create agents 31 for i in range(self.num_agents): 32 a = MoneyAgent(i, self) 33 self.schedule.add(a) 34 # Add the agent to a random node 35 self.grid.place_agent(a, list_of_random_nodes[i]) 36 37 self.running = True 38 self.datacollector.collect(self) 39 40 def step(self): 41 self.schedule.step() 42 # collect data 43 self.datacollector.collect(self) 44 45 def run_model(self, n): 46 for i in range(n): 47 self.step() 48 49 50 class MoneyAgent(mesa.Agent): 51 """An agent with fixed initial wealth.""" 52 53 def __init__(self, unique_id, model): 54 super().__init__(unique_id, model) 55 self.wealth = 1 56 57 def move(self): 58 possible_steps = [ 59 node 60 for node in self.model.grid.get_neighbors(self.pos, include_center=False) 61 if self.model.grid.is_cell_empty(node) 62 ] 63 if len(possible_steps) > 0: 64 new_position = self.random.choice(possible_steps) 65 self.model.grid.move_agent(self, new_position) 66 67 def give_money(self): 68 69 neighbors_nodes = self.model.grid.get_neighbors(self.pos, include_center=False) 70 neighbors = self.model.grid.get_cell_list_contents(neighbors_nodes) 71 if len(neighbors) > 0: 72 other = self.random.choice(neighbors) 73 other.wealth += 1 74 self.wealth -= 1 75 76 def step(self): 77 self.move() 78 if self.wealth > 0: 79 self.give_money() 80 [end of examples/boltzmann_wealth_model_network/boltzmann_wealth_model_network/model.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/examples/boltzmann_wealth_model_network/boltzmann_wealth_model_network/model.py b/examples/boltzmann_wealth_model_network/boltzmann_wealth_model_network/model.py --- a/examples/boltzmann_wealth_model_network/boltzmann_wealth_model_network/model.py +++ b/examples/boltzmann_wealth_model_network/boltzmann_wealth_model_network/model.py @@ -25,7 +25,7 @@ agent_reporters={"Wealth": lambda _: _.wealth}, ) - list_of_random_nodes = self.random.sample(self.G.nodes(), self.num_agents) + list_of_random_nodes = self.random.sample(list(self.G), self.num_agents) # Create agents for i in range(self.num_agents): diff --git a/examples/virus_on_network/virus_on_network/model.py b/examples/virus_on_network/virus_on_network/model.py --- a/examples/virus_on_network/virus_on_network/model.py +++ b/examples/virus_on_network/virus_on_network/model.py @@ -81,7 +81,7 @@ self.grid.place_agent(a, node) # Infect some nodes - infected_nodes = self.random.sample(self.G.nodes(), self.initial_outbreak_size) + infected_nodes = self.random.sample(list(self.G), self.initial_outbreak_size) for a in self.grid.get_cell_list_contents(infected_nodes): a.state = State.INFECTED
{"golden_diff": "diff --git a/examples/boltzmann_wealth_model_network/boltzmann_wealth_model_network/model.py b/examples/boltzmann_wealth_model_network/boltzmann_wealth_model_network/model.py\n--- a/examples/boltzmann_wealth_model_network/boltzmann_wealth_model_network/model.py\n+++ b/examples/boltzmann_wealth_model_network/boltzmann_wealth_model_network/model.py\n@@ -25,7 +25,7 @@\n agent_reporters={\"Wealth\": lambda _: _.wealth},\n )\n \n- list_of_random_nodes = self.random.sample(self.G.nodes(), self.num_agents)\n+ list_of_random_nodes = self.random.sample(list(self.G), self.num_agents)\n \n # Create agents\n for i in range(self.num_agents):\ndiff --git a/examples/virus_on_network/virus_on_network/model.py b/examples/virus_on_network/virus_on_network/model.py\n--- a/examples/virus_on_network/virus_on_network/model.py\n+++ b/examples/virus_on_network/virus_on_network/model.py\n@@ -81,7 +81,7 @@\n self.grid.place_agent(a, node)\n \n # Infect some nodes\n- infected_nodes = self.random.sample(self.G.nodes(), self.initial_outbreak_size)\n+ infected_nodes = self.random.sample(list(self.G), self.initial_outbreak_size)\n for a in self.grid.get_cell_list_contents(infected_nodes):\n a.state = State.INFECTED\n", "issue": "random.sample with a set is removed in Python 3.11\nUsing [`random.sample`](https://docs.python.org/3/library/random.html#random.sample) with a set has been depreciated in Python 3.9 and was removed in Python 3.11.\r\n\r\n> Changed in version 3.11: The population must be a sequence. Automatic conversion of sets to lists is no longer supported.\r\n\r\nTwo cases of this function being used with a set have been detected by the CI in Mesa, both in example models:\r\n\r\n1. https://github.com/projectmesa/mesa/blob/5135be8759d9fdac551c5c6ed13e9ae5205bce3e/examples/boltzmann_wealth_model_network/boltzmann_wealth_model_network/model.py#L28\r\n2. https://github.com/projectmesa/mesa/blob/5135be8759d9fdac551c5c6ed13e9ae5205bce3e/examples/virus_on_network/virus_on_network/model.py#L84\r\n\r\nWhat would be the most efficient way to get `self.G.nodes()` in a sequence form (so probably list or tuple)? I feel like just putting it inside a `list()` isn't the proper way to do it.\n", "before_files": [{"content": "import math\nfrom enum import Enum\nimport networkx as nx\n\nfrom mesa import Agent, Model\nfrom mesa.time import RandomActivation\nfrom mesa.datacollection import DataCollector\nfrom mesa.space import NetworkGrid\n\n\nclass State(Enum):\n SUSCEPTIBLE = 0\n INFECTED = 1\n RESISTANT = 2\n\n\ndef number_state(model, state):\n return sum(1 for a in model.grid.get_all_cell_contents() if a.state is state)\n\n\ndef number_infected(model):\n return number_state(model, State.INFECTED)\n\n\ndef number_susceptible(model):\n return number_state(model, State.SUSCEPTIBLE)\n\n\ndef number_resistant(model):\n return number_state(model, State.RESISTANT)\n\n\nclass VirusOnNetwork(Model):\n \"\"\"A virus model with some number of agents\"\"\"\n\n def __init__(\n self,\n num_nodes=10,\n avg_node_degree=3,\n initial_outbreak_size=1,\n virus_spread_chance=0.4,\n virus_check_frequency=0.4,\n recovery_chance=0.3,\n gain_resistance_chance=0.5,\n ):\n\n self.num_nodes = num_nodes\n prob = avg_node_degree / self.num_nodes\n self.G = nx.erdos_renyi_graph(n=self.num_nodes, p=prob)\n self.grid = NetworkGrid(self.G)\n self.schedule = RandomActivation(self)\n self.initial_outbreak_size = (\n initial_outbreak_size if initial_outbreak_size <= num_nodes else num_nodes\n )\n self.virus_spread_chance = virus_spread_chance\n self.virus_check_frequency = virus_check_frequency\n self.recovery_chance = recovery_chance\n self.gain_resistance_chance = gain_resistance_chance\n\n self.datacollector = DataCollector(\n {\n \"Infected\": number_infected,\n \"Susceptible\": number_susceptible,\n \"Resistant\": number_resistant,\n }\n )\n\n # Create agents\n for i, node in enumerate(self.G.nodes()):\n a = VirusAgent(\n i,\n self,\n State.SUSCEPTIBLE,\n self.virus_spread_chance,\n self.virus_check_frequency,\n self.recovery_chance,\n self.gain_resistance_chance,\n )\n self.schedule.add(a)\n # Add the agent to the node\n self.grid.place_agent(a, node)\n\n # Infect some nodes\n infected_nodes = self.random.sample(self.G.nodes(), self.initial_outbreak_size)\n for a in self.grid.get_cell_list_contents(infected_nodes):\n a.state = State.INFECTED\n\n self.running = True\n self.datacollector.collect(self)\n\n def resistant_susceptible_ratio(self):\n try:\n return number_state(self, State.RESISTANT) / number_state(\n self, State.SUSCEPTIBLE\n )\n except ZeroDivisionError:\n return math.inf\n\n def step(self):\n self.schedule.step()\n # collect data\n self.datacollector.collect(self)\n\n def run_model(self, n):\n for i in range(n):\n self.step()\n\n\nclass VirusAgent(Agent):\n def __init__(\n self,\n unique_id,\n model,\n initial_state,\n virus_spread_chance,\n virus_check_frequency,\n recovery_chance,\n gain_resistance_chance,\n ):\n super().__init__(unique_id, model)\n\n self.state = initial_state\n\n self.virus_spread_chance = virus_spread_chance\n self.virus_check_frequency = virus_check_frequency\n self.recovery_chance = recovery_chance\n self.gain_resistance_chance = gain_resistance_chance\n\n def try_to_infect_neighbors(self):\n neighbors_nodes = self.model.grid.get_neighbors(self.pos, include_center=False)\n susceptible_neighbors = [\n agent\n for agent in self.model.grid.get_cell_list_contents(neighbors_nodes)\n if agent.state is State.SUSCEPTIBLE\n ]\n for a in susceptible_neighbors:\n if self.random.random() < self.virus_spread_chance:\n a.state = State.INFECTED\n\n def try_gain_resistance(self):\n if self.random.random() < self.gain_resistance_chance:\n self.state = State.RESISTANT\n\n def try_remove_infection(self):\n # Try to remove\n if self.random.random() < self.recovery_chance:\n # Success\n self.state = State.SUSCEPTIBLE\n self.try_gain_resistance()\n else:\n # Failed\n self.state = State.INFECTED\n\n def try_check_situation(self):\n if self.random.random() < self.virus_check_frequency:\n # Checking...\n if self.state is State.INFECTED:\n self.try_remove_infection()\n\n def step(self):\n if self.state is State.INFECTED:\n self.try_to_infect_neighbors()\n self.try_check_situation()\n", "path": "examples/virus_on_network/virus_on_network/model.py"}, {"content": "import mesa\nimport networkx as nx\n\n\ndef compute_gini(model):\n agent_wealths = [agent.wealth for agent in model.schedule.agents]\n x = sorted(agent_wealths)\n N = model.num_agents\n B = sum(xi * (N - i) for i, xi in enumerate(x)) / (N * sum(x))\n return 1 + (1 / N) - 2 * B\n\n\nclass BoltzmannWealthModelNetwork(mesa.Model):\n \"\"\"A model with some number of agents.\"\"\"\n\n def __init__(self, num_agents=7, num_nodes=10):\n\n self.num_agents = num_agents\n self.num_nodes = num_nodes if num_nodes >= self.num_agents else self.num_agents\n self.G = nx.erdos_renyi_graph(n=self.num_nodes, p=0.5)\n self.grid = mesa.space.NetworkGrid(self.G)\n self.schedule = mesa.time.RandomActivation(self)\n self.datacollector = mesa.DataCollector(\n model_reporters={\"Gini\": compute_gini},\n agent_reporters={\"Wealth\": lambda _: _.wealth},\n )\n\n list_of_random_nodes = self.random.sample(self.G.nodes(), self.num_agents)\n\n # Create agents\n for i in range(self.num_agents):\n a = MoneyAgent(i, self)\n self.schedule.add(a)\n # Add the agent to a random node\n self.grid.place_agent(a, list_of_random_nodes[i])\n\n self.running = True\n self.datacollector.collect(self)\n\n def step(self):\n self.schedule.step()\n # collect data\n self.datacollector.collect(self)\n\n def run_model(self, n):\n for i in range(n):\n self.step()\n\n\nclass MoneyAgent(mesa.Agent):\n \"\"\"An agent with fixed initial wealth.\"\"\"\n\n def __init__(self, unique_id, model):\n super().__init__(unique_id, model)\n self.wealth = 1\n\n def move(self):\n possible_steps = [\n node\n for node in self.model.grid.get_neighbors(self.pos, include_center=False)\n if self.model.grid.is_cell_empty(node)\n ]\n if len(possible_steps) > 0:\n new_position = self.random.choice(possible_steps)\n self.model.grid.move_agent(self, new_position)\n\n def give_money(self):\n\n neighbors_nodes = self.model.grid.get_neighbors(self.pos, include_center=False)\n neighbors = self.model.grid.get_cell_list_contents(neighbors_nodes)\n if len(neighbors) > 0:\n other = self.random.choice(neighbors)\n other.wealth += 1\n self.wealth -= 1\n\n def step(self):\n self.move()\n if self.wealth > 0:\n self.give_money()\n", "path": "examples/boltzmann_wealth_model_network/boltzmann_wealth_model_network/model.py"}]}
3,071
312
gh_patches_debug_25919
rasdani/github-patches
git_diff
archlinux__archinstall-823
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> mkinitcpio.conf generated incorrectly for AMDGPU. As the archwiki installation guide states [https://wiki.archlinux.org/title/AMDGPU#Specify_the_correct_module_order](https://wiki.archlinux.org/title/AMDGPU#Specify_the_correct_module_order), you must ensure that the amdgpu module is loaded before the radeon one: `MODULES=(amdgpu radeon)` Otherwise the DM will fail to start at boot. </issue> <code> [start of profiles/xorg.py] 1 # A system with "xorg" installed 2 3 import archinstall 4 import logging 5 6 is_top_level_profile = True 7 8 __description__ = 'Installs a minimal system as well as xorg and graphics drivers.' 9 10 __packages__ = [ 11 'dkms', 12 'xorg-server', 13 'xorg-xinit', 14 'nvidia-dkms', 15 *archinstall.lib.hardware.__packages__, 16 ] 17 18 19 def _prep_function(*args, **kwargs): 20 """ 21 Magic function called by the importing installer 22 before continuing any further. It also avoids executing any 23 other code in this stage. So it's a safe way to ask the user 24 for more input before any other installer steps start. 25 """ 26 27 archinstall.storage["gfx_driver_packages"] = archinstall.select_driver() 28 29 # TODO: Add language section and/or merge it with the locale selected 30 # earlier in for instance guided.py installer. 31 32 return True 33 34 35 # Ensures that this code only gets executed if executed 36 # through importlib.util.spec_from_file_location("xorg", "/somewhere/xorg.py") 37 # or through conventional import xorg 38 if __name__ == 'xorg': 39 try: 40 if "nvidia" in archinstall.storage.get("gfx_driver_packages", []): 41 if "linux-zen" in archinstall.storage['installation_session'].base_packages or "linux-lts" in archinstall.storage['installation_session'].base_packages: 42 for kernel in archinstall.storage['installation_session'].kernels: 43 archinstall.storage['installation_session'].add_additional_packages(f"{kernel}-headers") # Fixes https://github.com/archlinux/archinstall/issues/585 44 archinstall.storage['installation_session'].add_additional_packages("dkms") # I've had kernel regen fail if it wasn't installed before nvidia-dkms 45 archinstall.storage['installation_session'].add_additional_packages("xorg-server xorg-xinit nvidia-dkms") 46 else: 47 archinstall.storage['installation_session'].add_additional_packages(f"xorg-server xorg-xinit {' '.join(archinstall.storage.get('gfx_driver_packages', []))}") 48 else: 49 archinstall.storage['installation_session'].add_additional_packages(f"xorg-server xorg-xinit {' '.join(archinstall.storage.get('gfx_driver_packages', []))}") 50 except Exception as err: 51 archinstall.log(f"Could not handle nvidia and linuz-zen specific situations during xorg installation: {err}", level=logging.WARNING, fg="yellow") 52 archinstall.storage['installation_session'].add_additional_packages("xorg-server xorg-xinit") # Prep didn't run, so there's no driver to install 53 [end of profiles/xorg.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/profiles/xorg.py b/profiles/xorg.py --- a/profiles/xorg.py +++ b/profiles/xorg.py @@ -45,6 +45,17 @@ archinstall.storage['installation_session'].add_additional_packages("xorg-server xorg-xinit nvidia-dkms") else: archinstall.storage['installation_session'].add_additional_packages(f"xorg-server xorg-xinit {' '.join(archinstall.storage.get('gfx_driver_packages', []))}") + elif 'amdgpu' in archinstall.storage.get("gfx_driver_packages", []): + # The order of these two are important if amdgpu is installed #808 + if 'amdgpu' in archinstall.storage['installation_session'].MODULES: + archinstall.storage['installation_session'].MODULES.remove('amdgpu') + archinstall.storage['installation_session'].MODULES.append('amdgpu') + + if 'radeon' in archinstall.storage['installation_session'].MODULES: + archinstall.storage['installation_session'].MODULES.remove('radeon') + archinstall.storage['installation_session'].MODULES.append('radeon') + + archinstall.storage['installation_session'].add_additional_packages(f"xorg-server xorg-xinit {' '.join(archinstall.storage.get('gfx_driver_packages', []))}") else: archinstall.storage['installation_session'].add_additional_packages(f"xorg-server xorg-xinit {' '.join(archinstall.storage.get('gfx_driver_packages', []))}") except Exception as err:
{"golden_diff": "diff --git a/profiles/xorg.py b/profiles/xorg.py\n--- a/profiles/xorg.py\n+++ b/profiles/xorg.py\n@@ -45,6 +45,17 @@\n \t\t\t\tarchinstall.storage['installation_session'].add_additional_packages(\"xorg-server xorg-xinit nvidia-dkms\")\n \t\t\telse:\n \t\t\t\tarchinstall.storage['installation_session'].add_additional_packages(f\"xorg-server xorg-xinit {' '.join(archinstall.storage.get('gfx_driver_packages', []))}\")\n+\t\telif 'amdgpu' in archinstall.storage.get(\"gfx_driver_packages\", []):\n+\t\t\t# The order of these two are important if amdgpu is installed #808\n+\t\t\tif 'amdgpu' in archinstall.storage['installation_session'].MODULES:\n+\t\t\t\tarchinstall.storage['installation_session'].MODULES.remove('amdgpu')\n+\t\t\tarchinstall.storage['installation_session'].MODULES.append('amdgpu')\n+\n+\t\t\tif 'radeon' in archinstall.storage['installation_session'].MODULES:\n+\t\t\t\tarchinstall.storage['installation_session'].MODULES.remove('radeon')\n+\t\t\tarchinstall.storage['installation_session'].MODULES.append('radeon')\n+\n+\t\t\tarchinstall.storage['installation_session'].add_additional_packages(f\"xorg-server xorg-xinit {' '.join(archinstall.storage.get('gfx_driver_packages', []))}\")\n \t\telse:\n \t\t\tarchinstall.storage['installation_session'].add_additional_packages(f\"xorg-server xorg-xinit {' '.join(archinstall.storage.get('gfx_driver_packages', []))}\")\n \texcept Exception as err:\n", "issue": "mkinitcpio.conf generated incorrectly for AMDGPU.\nAs the archwiki installation guide states [https://wiki.archlinux.org/title/AMDGPU#Specify_the_correct_module_order](https://wiki.archlinux.org/title/AMDGPU#Specify_the_correct_module_order), you must ensure that the amdgpu module is loaded before the radeon one: `MODULES=(amdgpu radeon)`\r\nOtherwise the DM will fail to start at boot.\n", "before_files": [{"content": "# A system with \"xorg\" installed\n\nimport archinstall\nimport logging\n\nis_top_level_profile = True\n\n__description__ = 'Installs a minimal system as well as xorg and graphics drivers.'\n\n__packages__ = [\n\t'dkms',\n\t'xorg-server',\n\t'xorg-xinit',\n\t'nvidia-dkms',\n\t*archinstall.lib.hardware.__packages__,\n]\n\n\ndef _prep_function(*args, **kwargs):\n\t\"\"\"\n\tMagic function called by the importing installer\n\tbefore continuing any further. It also avoids executing any\n\tother code in this stage. So it's a safe way to ask the user\n\tfor more input before any other installer steps start.\n\t\"\"\"\n\n\tarchinstall.storage[\"gfx_driver_packages\"] = archinstall.select_driver()\n\n\t# TODO: Add language section and/or merge it with the locale selected\n\t# earlier in for instance guided.py installer.\n\n\treturn True\n\n\n# Ensures that this code only gets executed if executed\n# through importlib.util.spec_from_file_location(\"xorg\", \"/somewhere/xorg.py\")\n# or through conventional import xorg\nif __name__ == 'xorg':\n\ttry:\n\t\tif \"nvidia\" in archinstall.storage.get(\"gfx_driver_packages\", []):\n\t\t\tif \"linux-zen\" in archinstall.storage['installation_session'].base_packages or \"linux-lts\" in archinstall.storage['installation_session'].base_packages:\n\t\t\t\tfor kernel in archinstall.storage['installation_session'].kernels:\n\t\t\t\t\tarchinstall.storage['installation_session'].add_additional_packages(f\"{kernel}-headers\") # Fixes https://github.com/archlinux/archinstall/issues/585\n\t\t\t\tarchinstall.storage['installation_session'].add_additional_packages(\"dkms\") # I've had kernel regen fail if it wasn't installed before nvidia-dkms\n\t\t\t\tarchinstall.storage['installation_session'].add_additional_packages(\"xorg-server xorg-xinit nvidia-dkms\")\n\t\t\telse:\n\t\t\t\tarchinstall.storage['installation_session'].add_additional_packages(f\"xorg-server xorg-xinit {' '.join(archinstall.storage.get('gfx_driver_packages', []))}\")\n\t\telse:\n\t\t\tarchinstall.storage['installation_session'].add_additional_packages(f\"xorg-server xorg-xinit {' '.join(archinstall.storage.get('gfx_driver_packages', []))}\")\n\texcept Exception as err:\n\t\tarchinstall.log(f\"Could not handle nvidia and linuz-zen specific situations during xorg installation: {err}\", level=logging.WARNING, fg=\"yellow\")\n\t\tarchinstall.storage['installation_session'].add_additional_packages(\"xorg-server xorg-xinit\") # Prep didn't run, so there's no driver to install\n", "path": "profiles/xorg.py"}]}
1,307
343
gh_patches_debug_2112
rasdani/github-patches
git_diff
Qiskit__qiskit-1940
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> rzz gate <!-- ⚠️ If you do not respect this template, your issue will be closed --> <!-- ⚠️ Make sure to browse the opened and closed issues --> ### Information - **Qiskit Terra version**: 0.7.2 - **Python version**: 3.6.6 - **Operating system**: Windows 10 ### What is the current behavior? rzz gate appears to give incorrect results ### Steps to reproduce the problem rzz gate rule defined in https://github.com/Qiskit/qiskit-terra/blob/master/qiskit/extensions/standard/rzz.py ``` CnotGate(q[0], q[1]), U1Gate(self.params[0], q[0]), CnotGate(q[0], q[1]) ``` ### What is the expected behavior? I think it should be ``` CnotGate(q[0], q[1]), U1Gate(self.params[0], q[1]), CnotGate(q[0], q[1]) ``` the u1 phase should be on the target instead of control ### Suggested solutions modify rzz gate definition to give the right behavior. </issue> <code> [start of qiskit/extensions/standard/rzz.py] 1 # -*- coding: utf-8 -*- 2 3 # Copyright 2017, IBM. 4 # 5 # This source code is licensed under the Apache License, Version 2.0 found in 6 # the LICENSE.txt file in the root directory of this source tree. 7 8 """ 9 two-qubit ZZ-rotation gate. 10 """ 11 from qiskit.circuit import CompositeGate 12 from qiskit.circuit import Gate 13 from qiskit.circuit import QuantumCircuit 14 from qiskit.circuit import QuantumRegister 15 from qiskit.circuit.decorators import _op_expand 16 from qiskit.dagcircuit import DAGCircuit 17 from qiskit.extensions.standard.u1 import U1Gate 18 from qiskit.extensions.standard.cx import CnotGate 19 20 21 class RZZGate(Gate): 22 """Two-qubit ZZ-rotation gate.""" 23 24 def __init__(self, theta, ctl, tgt, circ=None): 25 """Create new rzz gate.""" 26 super().__init__("rzz", [theta], [ctl, tgt], circ) 27 28 def _define_decompositions(self): 29 """ 30 gate rzz(theta) a, b { cx a, b; u1(theta) b; cx a, b; } 31 """ 32 decomposition = DAGCircuit() 33 q = QuantumRegister(2, "q") 34 decomposition.add_qreg(q) 35 rule = [ 36 CnotGate(q[0], q[1]), 37 U1Gate(self.params[0], q[0]), 38 CnotGate(q[0], q[1]) 39 ] 40 for inst in rule: 41 decomposition.apply_operation_back(inst) 42 self._decompositions = [decomposition] 43 44 def inverse(self): 45 """Invert this gate.""" 46 self.params[0] = -self.params[0] 47 self._decompositions = None 48 return self 49 50 def reapply(self, circ): 51 """Reapply this gate to corresponding qubits in circ.""" 52 self._modifiers(circ.rzz(self.params[0], self.qargs[0], self.qargs[1])) 53 54 55 @_op_expand(2, broadcastable=[False, False]) 56 def rzz(self, theta, qubit1, qubit2): 57 """Apply RZZ to circuit.""" 58 self._check_qubit(qubit1) 59 self._check_qubit(qubit2) 60 self._check_dups([qubit1, qubit2]) 61 return self._attach(RZZGate(theta, qubit1, qubit2, self)) 62 63 64 # Add to QuantumCircuit and CompositeGate classes 65 QuantumCircuit.rzz = rzz 66 CompositeGate.rzz = rzz 67 [end of qiskit/extensions/standard/rzz.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/qiskit/extensions/standard/rzz.py b/qiskit/extensions/standard/rzz.py --- a/qiskit/extensions/standard/rzz.py +++ b/qiskit/extensions/standard/rzz.py @@ -34,7 +34,7 @@ decomposition.add_qreg(q) rule = [ CnotGate(q[0], q[1]), - U1Gate(self.params[0], q[0]), + U1Gate(self.params[0], q[1]), CnotGate(q[0], q[1]) ] for inst in rule:
{"golden_diff": "diff --git a/qiskit/extensions/standard/rzz.py b/qiskit/extensions/standard/rzz.py\n--- a/qiskit/extensions/standard/rzz.py\n+++ b/qiskit/extensions/standard/rzz.py\n@@ -34,7 +34,7 @@\n decomposition.add_qreg(q)\n rule = [\n CnotGate(q[0], q[1]),\n- U1Gate(self.params[0], q[0]),\n+ U1Gate(self.params[0], q[1]),\n CnotGate(q[0], q[1])\n ]\n for inst in rule:\n", "issue": "rzz gate\n<!-- \u26a0\ufe0f If you do not respect this template, your issue will be closed -->\r\n<!-- \u26a0\ufe0f Make sure to browse the opened and closed issues -->\r\n\r\n### Information\r\n\r\n- **Qiskit Terra version**: 0.7.2\r\n- **Python version**: 3.6.6\r\n- **Operating system**: Windows 10\r\n\r\n### What is the current behavior?\r\n\r\nrzz gate appears to give incorrect results\r\n\r\n### Steps to reproduce the problem\r\n\r\nrzz gate rule defined in https://github.com/Qiskit/qiskit-terra/blob/master/qiskit/extensions/standard/rzz.py\r\n\r\n```\r\n CnotGate(q[0], q[1]),\r\n U1Gate(self.params[0], q[0]),\r\n CnotGate(q[0], q[1])\r\n```\r\n\r\n### What is the expected behavior?\r\n\r\nI think it should be\r\n```\r\n CnotGate(q[0], q[1]),\r\n U1Gate(self.params[0], q[1]),\r\n CnotGate(q[0], q[1])\r\n```\r\nthe u1 phase should be on the target instead of control\r\n\r\n### Suggested solutions\r\n\r\nmodify rzz gate definition to give the right behavior.\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright 2017, IBM.\n#\n# This source code is licensed under the Apache License, Version 2.0 found in\n# the LICENSE.txt file in the root directory of this source tree.\n\n\"\"\"\ntwo-qubit ZZ-rotation gate.\n\"\"\"\nfrom qiskit.circuit import CompositeGate\nfrom qiskit.circuit import Gate\nfrom qiskit.circuit import QuantumCircuit\nfrom qiskit.circuit import QuantumRegister\nfrom qiskit.circuit.decorators import _op_expand\nfrom qiskit.dagcircuit import DAGCircuit\nfrom qiskit.extensions.standard.u1 import U1Gate\nfrom qiskit.extensions.standard.cx import CnotGate\n\n\nclass RZZGate(Gate):\n \"\"\"Two-qubit ZZ-rotation gate.\"\"\"\n\n def __init__(self, theta, ctl, tgt, circ=None):\n \"\"\"Create new rzz gate.\"\"\"\n super().__init__(\"rzz\", [theta], [ctl, tgt], circ)\n\n def _define_decompositions(self):\n \"\"\"\n gate rzz(theta) a, b { cx a, b; u1(theta) b; cx a, b; }\n \"\"\"\n decomposition = DAGCircuit()\n q = QuantumRegister(2, \"q\")\n decomposition.add_qreg(q)\n rule = [\n CnotGate(q[0], q[1]),\n U1Gate(self.params[0], q[0]),\n CnotGate(q[0], q[1])\n ]\n for inst in rule:\n decomposition.apply_operation_back(inst)\n self._decompositions = [decomposition]\n\n def inverse(self):\n \"\"\"Invert this gate.\"\"\"\n self.params[0] = -self.params[0]\n self._decompositions = None\n return self\n\n def reapply(self, circ):\n \"\"\"Reapply this gate to corresponding qubits in circ.\"\"\"\n self._modifiers(circ.rzz(self.params[0], self.qargs[0], self.qargs[1]))\n\n\n@_op_expand(2, broadcastable=[False, False])\ndef rzz(self, theta, qubit1, qubit2):\n \"\"\"Apply RZZ to circuit.\"\"\"\n self._check_qubit(qubit1)\n self._check_qubit(qubit2)\n self._check_dups([qubit1, qubit2])\n return self._attach(RZZGate(theta, qubit1, qubit2, self))\n\n\n# Add to QuantumCircuit and CompositeGate classes\nQuantumCircuit.rzz = rzz\nCompositeGate.rzz = rzz\n", "path": "qiskit/extensions/standard/rzz.py"}]}
1,489
131
gh_patches_debug_10367
rasdani/github-patches
git_diff
saleor__saleor-705
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> are product class labels switched? Hello, I'm looking at the new product class feature. About the labels ... should not be the other way around? https://github.com/mirumee/saleor/blob/master/saleor/dashboard/product/forms.py#L53-L59 ![schermata del 2016-12-29 17-05-55](https://cloud.githubusercontent.com/assets/650691/21547927/6e6023b4-cde9-11e6-92a3-b1b16fa41ab1.png) ![schermata del 2016-12-29 17-06-31](https://cloud.githubusercontent.com/assets/650691/21547913/592f5636-cde9-11e6-83b3-40160b51b2b5.png) </issue> <code> [start of saleor/dashboard/product/forms.py] 1 from __future__ import unicode_literals 2 3 from django import forms 4 from django.db import transaction 5 from django.db.models import Count 6 from django.forms.models import ModelChoiceIterator, inlineformset_factory 7 from django.utils.encoding import smart_text 8 from django.utils.translation import pgettext_lazy 9 10 from ...product.models import (AttributeChoiceValue, Product, ProductAttribute, 11 ProductClass, ProductImage, ProductVariant, 12 Stock, StockLocation, VariantImage) 13 from .widgets import ImagePreviewWidget 14 from ...search import index as search_index 15 16 17 class ProductClassSelectorForm(forms.Form): 18 MAX_RADIO_SELECT_ITEMS = 5 19 20 def __init__(self, *args, **kwargs): 21 product_classes = kwargs.pop('product_classes', []) 22 super(ProductClassSelectorForm, self).__init__(*args, **kwargs) 23 choices = [(obj.pk, obj.name) for obj in product_classes] 24 if len(product_classes) > self.MAX_RADIO_SELECT_ITEMS: 25 widget = forms.Select 26 else: 27 widget = forms.RadioSelect 28 self.fields['product_cls'] = forms.ChoiceField( 29 label=pgettext_lazy('Product class form label', 'Product type'), 30 choices=choices, widget=widget) 31 32 33 class StockForm(forms.ModelForm): 34 class Meta: 35 model = Stock 36 exclude = ['quantity_allocated'] 37 38 def __init__(self, *args, **kwargs): 39 product = kwargs.pop('product') 40 super(StockForm, self).__init__(*args, **kwargs) 41 if not product.product_class.has_variants: 42 initial = product.variants.first() 43 else: 44 initial = None 45 self.fields['variant'] = forms.ModelChoiceField( 46 queryset=product.variants, initial=initial) 47 48 49 class ProductClassForm(forms.ModelForm): 50 class Meta: 51 model = ProductClass 52 exclude = [] 53 labels = { 54 'product_attributes': pgettext_lazy( 55 'Product class form label', 56 'Attributes specific to each variant'), 57 'variant_attributes': pgettext_lazy( 58 'Product class form label', 59 'Attributes common to all variants')} 60 61 def clean(self): 62 data = super(ProductClassForm, self).clean() 63 has_variants = self.cleaned_data['has_variants'] 64 product_attr = set(self.cleaned_data['product_attributes']) 65 variant_attr = set(self.cleaned_data['variant_attributes']) 66 if not has_variants and len(variant_attr) > 0: 67 msg = pgettext_lazy( 68 'Product Class Errors', 69 'Product variants are disabled.') 70 self.add_error('variant_attributes', msg) 71 if len(product_attr & variant_attr) > 0: 72 msg = pgettext_lazy( 73 'Product Class Errors', 74 'A single attribute can\'t belong to both a product ' 75 'and its variant.') 76 self.add_error('variant_attributes', msg) 77 78 if self.instance.pk: 79 variants_changed = not (self.fields['has_variants'].initial == 80 has_variants) 81 if variants_changed: 82 query = self.instance.products.all() 83 query = query.annotate(variants_counter=Count('variants')) 84 query = query.filter(variants_counter__gt=1) 85 if query.exists(): 86 msg = pgettext_lazy( 87 'Product Class Errors', 88 'Some products of this type have more than ' 89 'one variant.') 90 self.add_error('has_variants', msg) 91 return data 92 93 94 class ProductForm(forms.ModelForm): 95 96 class Meta: 97 model = Product 98 exclude = ['attributes', 'product_class'] 99 100 def __init__(self, *args, **kwargs): 101 self.product_attributes = [] 102 super(ProductForm, self).__init__(*args, **kwargs) 103 field = self.fields['name'] 104 field.widget.attrs['placeholder'] = pgettext_lazy( 105 'Product form labels', 'Give your awesome product a name') 106 field = self.fields['categories'] 107 field.widget.attrs['data-placeholder'] = pgettext_lazy( 108 'Product form labels', 'Search') 109 product_class = self.instance.product_class 110 self.product_attributes = product_class.product_attributes.all() 111 self.product_attributes = self.product_attributes.prefetch_related( 112 'values') 113 self.prepare_fields_for_attributes() 114 115 def prepare_fields_for_attributes(self): 116 for attribute in self.product_attributes: 117 field_defaults = { 118 'label': attribute.display, 119 'required': False, 120 'initial': self.instance.get_attribute(attribute.pk)} 121 if attribute.has_values(): 122 field = CachingModelChoiceField( 123 queryset=attribute.values.all(), **field_defaults) 124 else: 125 field = forms.CharField(**field_defaults) 126 self.fields[attribute.get_formfield_name()] = field 127 128 def iter_attribute_fields(self): 129 for attr in self.product_attributes: 130 yield self[attr.get_formfield_name()] 131 132 def save(self, commit=True): 133 attributes = {} 134 for attr in self.product_attributes: 135 value = self.cleaned_data.pop(attr.get_formfield_name()) 136 if isinstance(value, AttributeChoiceValue): 137 attributes[smart_text(attr.pk)] = smart_text(value.pk) 138 else: 139 attributes[smart_text(attr.pk)] = value 140 self.instance.attributes = attributes 141 instance = super(ProductForm, self).save(commit=commit) 142 search_index.insert_or_update_object(instance) 143 return instance 144 145 146 class ProductVariantForm(forms.ModelForm): 147 class Meta: 148 model = ProductVariant 149 exclude = ['attributes', 'product', 'images'] 150 151 def __init__(self, *args, **kwargs): 152 super(ProductVariantForm, self).__init__(*args, **kwargs) 153 if self.instance.product.pk: 154 self.fields['price_override'].widget.attrs[ 155 'placeholder'] = self.instance.product.price.gross 156 self.fields['weight_override'].widget.attrs[ 157 'placeholder'] = self.instance.product.weight 158 159 160 class CachingModelChoiceIterator(ModelChoiceIterator): 161 def __iter__(self): 162 if self.field.empty_label is not None: 163 yield ('', self.field.empty_label) 164 for obj in self.queryset: 165 yield self.choice(obj) 166 167 168 class CachingModelChoiceField(forms.ModelChoiceField): 169 def _get_choices(self): 170 if hasattr(self, '_choices'): 171 return self._choices 172 return CachingModelChoiceIterator(self) 173 choices = property(_get_choices, forms.ChoiceField._set_choices) 174 175 176 class VariantAttributeForm(forms.ModelForm): 177 class Meta: 178 model = ProductVariant 179 fields = [] 180 181 def __init__(self, *args, **kwargs): 182 super(VariantAttributeForm, self).__init__(*args, **kwargs) 183 attrs = self.instance.product.product_class.variant_attributes.all() 184 self.available_attrs = attrs.prefetch_related('values') 185 for attr in self.available_attrs: 186 field_defaults = {'label': attr.display, 187 'required': True, 188 'initial': self.instance.get_attribute(attr.pk)} 189 if attr.has_values(): 190 field = CachingModelChoiceField( 191 queryset=attr.values.all(), **field_defaults) 192 else: 193 field = forms.CharField(**field_defaults) 194 self.fields[attr.get_formfield_name()] = field 195 196 def save(self, commit=True): 197 attributes = {} 198 for attr in self.available_attrs: 199 value = self.cleaned_data.pop(attr.get_formfield_name()) 200 if isinstance(value, AttributeChoiceValue): 201 attributes[smart_text(attr.pk)] = smart_text(value.pk) 202 else: 203 attributes[smart_text(attr.pk)] = value 204 self.instance.attributes = attributes 205 return super(VariantAttributeForm, self).save(commit=commit) 206 207 208 class VariantBulkDeleteForm(forms.Form): 209 items = forms.ModelMultipleChoiceField(queryset=ProductVariant.objects) 210 211 def delete(self): 212 items = ProductVariant.objects.filter( 213 pk__in=self.cleaned_data['items']) 214 items.delete() 215 216 217 class StockBulkDeleteForm(forms.Form): 218 items = forms.ModelMultipleChoiceField(queryset=Stock.objects) 219 220 def delete(self): 221 items = Stock.objects.filter(pk__in=self.cleaned_data['items']) 222 items.delete() 223 224 225 class ProductImageForm(forms.ModelForm): 226 variants = forms.ModelMultipleChoiceField( 227 queryset=ProductVariant.objects.none(), 228 widget=forms.CheckboxSelectMultiple, required=False) 229 230 class Meta: 231 model = ProductImage 232 exclude = ('product', 'order') 233 234 def __init__(self, *args, **kwargs): 235 super(ProductImageForm, self).__init__(*args, **kwargs) 236 show_variants = self.instance.product.product_class.has_variants 237 if self.instance.product and show_variants: 238 variants = self.fields['variants'] 239 variants.queryset = self.instance.product.variants.all() 240 variants.initial = self.instance.variant_images.values_list( 241 'variant', flat=True) 242 if self.instance.image: 243 self.fields['image'].widget = ImagePreviewWidget() 244 245 @transaction.atomic 246 def save_variant_images(self, instance): 247 variant_images = [] 248 # Clean up old mapping 249 instance.variant_images.all().delete() 250 for variant in self.cleaned_data['variants']: 251 variant_images.append( 252 VariantImage(variant=variant, image=instance)) 253 VariantImage.objects.bulk_create(variant_images) 254 255 def save(self, commit=True): 256 instance = super(ProductImageForm, self).save(commit=commit) 257 self.save_variant_images(instance) 258 return instance 259 260 261 class ProductAttributeForm(forms.ModelForm): 262 class Meta: 263 model = ProductAttribute 264 exclude = [] 265 266 267 class StockLocationForm(forms.ModelForm): 268 class Meta: 269 model = StockLocation 270 exclude = [] 271 272 273 AttributeChoiceValueFormset = inlineformset_factory( 274 ProductAttribute, AttributeChoiceValue, exclude=(), extra=1) 275 [end of saleor/dashboard/product/forms.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/saleor/dashboard/product/forms.py b/saleor/dashboard/product/forms.py --- a/saleor/dashboard/product/forms.py +++ b/saleor/dashboard/product/forms.py @@ -51,10 +51,10 @@ model = ProductClass exclude = [] labels = { - 'product_attributes': pgettext_lazy( + 'variant_attributes': pgettext_lazy( 'Product class form label', 'Attributes specific to each variant'), - 'variant_attributes': pgettext_lazy( + 'product_attributes': pgettext_lazy( 'Product class form label', 'Attributes common to all variants')}
{"golden_diff": "diff --git a/saleor/dashboard/product/forms.py b/saleor/dashboard/product/forms.py\n--- a/saleor/dashboard/product/forms.py\n+++ b/saleor/dashboard/product/forms.py\n@@ -51,10 +51,10 @@\n model = ProductClass\n exclude = []\n labels = {\n- 'product_attributes': pgettext_lazy(\n+ 'variant_attributes': pgettext_lazy(\n 'Product class form label',\n 'Attributes specific to each variant'),\n- 'variant_attributes': pgettext_lazy(\n+ 'product_attributes': pgettext_lazy(\n 'Product class form label',\n 'Attributes common to all variants')}\n", "issue": "are product class labels switched?\nHello,\r\nI'm looking at the new product class feature.\r\n\r\nAbout the labels ... should not be the other way around?\r\nhttps://github.com/mirumee/saleor/blob/master/saleor/dashboard/product/forms.py#L53-L59\r\n\r\n![schermata del 2016-12-29 17-05-55](https://cloud.githubusercontent.com/assets/650691/21547927/6e6023b4-cde9-11e6-92a3-b1b16fa41ab1.png)\r\n\r\n![schermata del 2016-12-29 17-06-31](https://cloud.githubusercontent.com/assets/650691/21547913/592f5636-cde9-11e6-83b3-40160b51b2b5.png)\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nfrom django import forms\nfrom django.db import transaction\nfrom django.db.models import Count\nfrom django.forms.models import ModelChoiceIterator, inlineformset_factory\nfrom django.utils.encoding import smart_text\nfrom django.utils.translation import pgettext_lazy\n\nfrom ...product.models import (AttributeChoiceValue, Product, ProductAttribute,\n ProductClass, ProductImage, ProductVariant,\n Stock, StockLocation, VariantImage)\nfrom .widgets import ImagePreviewWidget\nfrom ...search import index as search_index\n\n\nclass ProductClassSelectorForm(forms.Form):\n MAX_RADIO_SELECT_ITEMS = 5\n\n def __init__(self, *args, **kwargs):\n product_classes = kwargs.pop('product_classes', [])\n super(ProductClassSelectorForm, self).__init__(*args, **kwargs)\n choices = [(obj.pk, obj.name) for obj in product_classes]\n if len(product_classes) > self.MAX_RADIO_SELECT_ITEMS:\n widget = forms.Select\n else:\n widget = forms.RadioSelect\n self.fields['product_cls'] = forms.ChoiceField(\n label=pgettext_lazy('Product class form label', 'Product type'),\n choices=choices, widget=widget)\n\n\nclass StockForm(forms.ModelForm):\n class Meta:\n model = Stock\n exclude = ['quantity_allocated']\n\n def __init__(self, *args, **kwargs):\n product = kwargs.pop('product')\n super(StockForm, self).__init__(*args, **kwargs)\n if not product.product_class.has_variants:\n initial = product.variants.first()\n else:\n initial = None\n self.fields['variant'] = forms.ModelChoiceField(\n queryset=product.variants, initial=initial)\n\n\nclass ProductClassForm(forms.ModelForm):\n class Meta:\n model = ProductClass\n exclude = []\n labels = {\n 'product_attributes': pgettext_lazy(\n 'Product class form label',\n 'Attributes specific to each variant'),\n 'variant_attributes': pgettext_lazy(\n 'Product class form label',\n 'Attributes common to all variants')}\n\n def clean(self):\n data = super(ProductClassForm, self).clean()\n has_variants = self.cleaned_data['has_variants']\n product_attr = set(self.cleaned_data['product_attributes'])\n variant_attr = set(self.cleaned_data['variant_attributes'])\n if not has_variants and len(variant_attr) > 0:\n msg = pgettext_lazy(\n 'Product Class Errors',\n 'Product variants are disabled.')\n self.add_error('variant_attributes', msg)\n if len(product_attr & variant_attr) > 0:\n msg = pgettext_lazy(\n 'Product Class Errors',\n 'A single attribute can\\'t belong to both a product '\n 'and its variant.')\n self.add_error('variant_attributes', msg)\n\n if self.instance.pk:\n variants_changed = not (self.fields['has_variants'].initial ==\n has_variants)\n if variants_changed:\n query = self.instance.products.all()\n query = query.annotate(variants_counter=Count('variants'))\n query = query.filter(variants_counter__gt=1)\n if query.exists():\n msg = pgettext_lazy(\n 'Product Class Errors',\n 'Some products of this type have more than '\n 'one variant.')\n self.add_error('has_variants', msg)\n return data\n\n\nclass ProductForm(forms.ModelForm):\n\n class Meta:\n model = Product\n exclude = ['attributes', 'product_class']\n\n def __init__(self, *args, **kwargs):\n self.product_attributes = []\n super(ProductForm, self).__init__(*args, **kwargs)\n field = self.fields['name']\n field.widget.attrs['placeholder'] = pgettext_lazy(\n 'Product form labels', 'Give your awesome product a name')\n field = self.fields['categories']\n field.widget.attrs['data-placeholder'] = pgettext_lazy(\n 'Product form labels', 'Search')\n product_class = self.instance.product_class\n self.product_attributes = product_class.product_attributes.all()\n self.product_attributes = self.product_attributes.prefetch_related(\n 'values')\n self.prepare_fields_for_attributes()\n\n def prepare_fields_for_attributes(self):\n for attribute in self.product_attributes:\n field_defaults = {\n 'label': attribute.display,\n 'required': False,\n 'initial': self.instance.get_attribute(attribute.pk)}\n if attribute.has_values():\n field = CachingModelChoiceField(\n queryset=attribute.values.all(), **field_defaults)\n else:\n field = forms.CharField(**field_defaults)\n self.fields[attribute.get_formfield_name()] = field\n\n def iter_attribute_fields(self):\n for attr in self.product_attributes:\n yield self[attr.get_formfield_name()]\n\n def save(self, commit=True):\n attributes = {}\n for attr in self.product_attributes:\n value = self.cleaned_data.pop(attr.get_formfield_name())\n if isinstance(value, AttributeChoiceValue):\n attributes[smart_text(attr.pk)] = smart_text(value.pk)\n else:\n attributes[smart_text(attr.pk)] = value\n self.instance.attributes = attributes\n instance = super(ProductForm, self).save(commit=commit)\n search_index.insert_or_update_object(instance)\n return instance\n\n\nclass ProductVariantForm(forms.ModelForm):\n class Meta:\n model = ProductVariant\n exclude = ['attributes', 'product', 'images']\n\n def __init__(self, *args, **kwargs):\n super(ProductVariantForm, self).__init__(*args, **kwargs)\n if self.instance.product.pk:\n self.fields['price_override'].widget.attrs[\n 'placeholder'] = self.instance.product.price.gross\n self.fields['weight_override'].widget.attrs[\n 'placeholder'] = self.instance.product.weight\n\n\nclass CachingModelChoiceIterator(ModelChoiceIterator):\n def __iter__(self):\n if self.field.empty_label is not None:\n yield ('', self.field.empty_label)\n for obj in self.queryset:\n yield self.choice(obj)\n\n\nclass CachingModelChoiceField(forms.ModelChoiceField):\n def _get_choices(self):\n if hasattr(self, '_choices'):\n return self._choices\n return CachingModelChoiceIterator(self)\n choices = property(_get_choices, forms.ChoiceField._set_choices)\n\n\nclass VariantAttributeForm(forms.ModelForm):\n class Meta:\n model = ProductVariant\n fields = []\n\n def __init__(self, *args, **kwargs):\n super(VariantAttributeForm, self).__init__(*args, **kwargs)\n attrs = self.instance.product.product_class.variant_attributes.all()\n self.available_attrs = attrs.prefetch_related('values')\n for attr in self.available_attrs:\n field_defaults = {'label': attr.display,\n 'required': True,\n 'initial': self.instance.get_attribute(attr.pk)}\n if attr.has_values():\n field = CachingModelChoiceField(\n queryset=attr.values.all(), **field_defaults)\n else:\n field = forms.CharField(**field_defaults)\n self.fields[attr.get_formfield_name()] = field\n\n def save(self, commit=True):\n attributes = {}\n for attr in self.available_attrs:\n value = self.cleaned_data.pop(attr.get_formfield_name())\n if isinstance(value, AttributeChoiceValue):\n attributes[smart_text(attr.pk)] = smart_text(value.pk)\n else:\n attributes[smart_text(attr.pk)] = value\n self.instance.attributes = attributes\n return super(VariantAttributeForm, self).save(commit=commit)\n\n\nclass VariantBulkDeleteForm(forms.Form):\n items = forms.ModelMultipleChoiceField(queryset=ProductVariant.objects)\n\n def delete(self):\n items = ProductVariant.objects.filter(\n pk__in=self.cleaned_data['items'])\n items.delete()\n\n\nclass StockBulkDeleteForm(forms.Form):\n items = forms.ModelMultipleChoiceField(queryset=Stock.objects)\n\n def delete(self):\n items = Stock.objects.filter(pk__in=self.cleaned_data['items'])\n items.delete()\n\n\nclass ProductImageForm(forms.ModelForm):\n variants = forms.ModelMultipleChoiceField(\n queryset=ProductVariant.objects.none(),\n widget=forms.CheckboxSelectMultiple, required=False)\n\n class Meta:\n model = ProductImage\n exclude = ('product', 'order')\n\n def __init__(self, *args, **kwargs):\n super(ProductImageForm, self).__init__(*args, **kwargs)\n show_variants = self.instance.product.product_class.has_variants\n if self.instance.product and show_variants:\n variants = self.fields['variants']\n variants.queryset = self.instance.product.variants.all()\n variants.initial = self.instance.variant_images.values_list(\n 'variant', flat=True)\n if self.instance.image:\n self.fields['image'].widget = ImagePreviewWidget()\n\n @transaction.atomic\n def save_variant_images(self, instance):\n variant_images = []\n # Clean up old mapping\n instance.variant_images.all().delete()\n for variant in self.cleaned_data['variants']:\n variant_images.append(\n VariantImage(variant=variant, image=instance))\n VariantImage.objects.bulk_create(variant_images)\n\n def save(self, commit=True):\n instance = super(ProductImageForm, self).save(commit=commit)\n self.save_variant_images(instance)\n return instance\n\n\nclass ProductAttributeForm(forms.ModelForm):\n class Meta:\n model = ProductAttribute\n exclude = []\n\n\nclass StockLocationForm(forms.ModelForm):\n class Meta:\n model = StockLocation\n exclude = []\n\n\nAttributeChoiceValueFormset = inlineformset_factory(\n ProductAttribute, AttributeChoiceValue, exclude=(), extra=1)\n", "path": "saleor/dashboard/product/forms.py"}]}
3,511
138
gh_patches_debug_35923
rasdani/github-patches
git_diff
weecology__retriever-933
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Some new datasets failing on 2.0.0 Two or three of the new datasets that got added today (hooray!) appearing to having issues on `2.0.0`. ``` ethan@croryx:~$ pip install retriever --upgrade ... ethan@croryx:~$ retriever reset all ... ethan@croryx:~$ retriever update Downloading scripts... Download Progress: [##############################] 100.00% The retriever is up-to-date Failed to load script: socean_diet_data (/home/ethan/.retriever/scripts/) Exception: 'main' object has no attribute 'keywords' Failed to load script: flensburg_food_web (/home/ethan/.retriever/scripts/) Exception: 'main' object has no attribute 'keywords' ethan@croryx:~$ retriever ls Failed to load script: socean_diet_data (/home/ethan/.retriever/scripts/) Exception: 'main' object has no attribute 'keywords' Failed to load script: flensburg_food_web (/home/ethan/.retriever/scripts/) Exception: 'main' object has no attribute 'keywords' Failed to load script: socean_diet_data (/home/ethan/.retriever/scripts/) Exception: 'main' object has no attribute 'keywords' Failed to load script: flensburg_food_web (/home/ethan/.retriever/scripts/) Exception: 'main' object has no attribute 'keywords' Available datasets : 78 abalone-age breast-cancer-wi fish-parasite-hosts home-ranges mammal-life-hist mediter-basin-plant-traits plant-life-hist-eu prism-climate vertnet-amphibians amniote-life-hist breed-bird-survey forest-biomass-china intertidal-abund-me mammal-masses mt-st-helens-veg plant-occur-oosting species-exctinction-rates vertnet-birds antarctic-breed-bird breed-bird-survey-50stop forest-fires-portugal iris mammal-metabolic-rate nematode-traits plant-taxonomy-us streamflow-conditions vertnet-fishes aquatic-animal-excretion butterfly-population-network forest-inventory-analysis la-selva-trees mammal-super-tree ngreatplains-flowering-dates poker-hands tree-canopy-geometries vertnet-mammals bioclim car-eval forest-plots-michigan leaf-herbivory mapped-plant-quads-co NPN portal tree-demog-wghats vertnet-reptiles biodiversity-response chytr-disease-distr forest-plots-wghats macroalgal_communities mapped-plant-quads-id nyc-tree-count portal-dev turtle-offspring-nesting wine-composition biomass-allometry-db community-abundance-misc fray-jorge-ecology macrocystis-variation mapped-plant-quads-ks pantheria predator-prey-body-ratio veg-plots-sdl wine-quality bird-migration-data dicerandra-frutescens gentry-forest-transects mammal-community-db mapped-plant-quads-mt phytoplankton-size predator-prey-size-marine vertnet wood-density bird-size elton-traits great-basin-mammal-abundance mammal-diet marine-recruitment-data plant-comp-ok ``` @henrykironde suspects that these datasets only work due to improvements in `master` that were made to get this whole set of datasets running, so we may just need to update their `retriever_minimum_version` values to `2.1.dev0`. </issue> <code> [start of scripts/flensburg_food_web.py] 1 # -*- coding: latin-1 -*- 2 #retriever 3 from retriever.lib.templates import Script 4 from retriever.lib.models import Table, Cleanup, correct_invalid_value 5 from retriever import VERSION 6 from pkg_resources import parse_version 7 8 class main(Script): 9 def __init__(self, **kwargs): 10 Script.__init__(self, **kwargs) 11 self.title="Food web including metazoan parasites for a brackish shallow water ecosystem in Germany and Denmark" 12 self.citation="C. Dieter Zander, Neri Josten, Kim C. Detloff, Robert Poulin, John P. McLaughlin, and David W. Thieltges. 2011. Food web including metazoan parasites for a brackish shallow water ecosystem in Germany and Denmark. Ecology 92:2007." 13 self.name="flensburg-food-web" 14 self.shortname="flensburg-food-web" 15 self.ref="https://figshare.com/articles/Full_Archive/3552066" 16 self.description="This data is of a food web for the Flensburg Fjord, a brackish shallow water inlet on the Baltic Sea, between Germany and Denmark." 17 self.retriever_minimum_version='2.0.dev' 18 self.version='1.0.0' 19 self.urls={"zip": "https://ndownloader.figshare.com/files/5620326"} 20 self.cleanup_func_table = Cleanup(correct_invalid_value, missing_values=['']) 21 22 if parse_version(VERSION) <= parse_version("2.0.0"): 23 self.shortname = self.name 24 self.name = self.title 25 self.tags = self.keywords 26 self.cleanup_func_table = Cleanup(correct_invalid_value, nulls=['', 'unknown']) 27 28 def download(self, engine=None, debug=False): 29 Script.download(self, engine, debug) 30 engine = self.engine 31 file_names = [ ('Flensburg_Data_Links.csv','links'), 32 ('Flensburg_Data_Nodes.csv','nodes') 33 ] 34 35 engine.download_files_from_archive(self.urls["zip"], [i[0] for i in file_names], filetype="zip", archivename="ECOL_92_174") 36 37 for(filename,tablename) in file_names: 38 data_path = self.engine.format_filename(filename) 39 self.engine.auto_create_table(Table(str(tablename), cleanup=self.cleanup_func_table),filename=filename) 40 self.engine.insert_data_from_file(data_path) 41 42 SCRIPT = main() [end of scripts/flensburg_food_web.py] [start of scripts/socean_diet_data.py] 1 # -*- coding: latin-1 -*- 2 #retriever 3 from retriever.lib.templates import Script 4 from retriever.lib.models import Table, Cleanup, correct_invalid_value 5 from retriever import VERSION 6 from pkg_resources import parse_version 7 8 class main(Script): 9 def __init__(self, **kwargs): 10 Script.__init__(self, **kwargs) 11 self.title="A Southern Ocean dietary database" 12 self.citation="Ben Raymond, Michelle Marshall, Gabrielle Nevitt, Chris L. Gillies, John van den Hoff, Jonathan S. Stark, Marcel Losekoot, Eric J. Woehler, and Andrew J. Constable. 2011. A Southern Ocean dietary database. Ecology 92:1188." 13 self.name="socean-diet-data" 14 self.shortname="socean-diet-data" 15 self.ref="https://figshare.com/articles/Full_Archive/3551304" 16 self.description="Diet-related data from published and unpublished data sets and studies" 17 self.retriever_minimum_version='2.0.dev' 18 self.version='1.0.0' 19 self.urls={"zip": "https://ndownloader.figshare.com/files/5618823"} 20 self.cleanup_func_table = Cleanup(correct_invalid_value, missing_values=['', 'unknown']) 21 22 if parse_version(VERSION) <= parse_version("2.0.0"): 23 self.shortname = self.name 24 self.name = self.title 25 self.tags = self.keywords 26 self.cleanup_func_table = Cleanup(correct_invalid_value, nulls=['', 'unknown']) 27 28 def download(self, engine=None, debug=False): 29 Script.download(self, engine, debug) 30 engine = self.engine 31 file_names = [ ('isotopes.csv','isotopes'), 32 ('sources.csv','sources'), 33 ('diet.csv', 'diet') 34 ] 35 36 engine.download_files_from_archive(self.urls["zip"], [i[0] for i in file_names], filetype="zip", archivename="ECOL_92_97") 37 38 for(filename,tablename) in file_names: 39 data_path = self.engine.format_filename(filename) 40 self.engine.auto_create_table(Table(str(tablename), cleanup=self.cleanup_func_table),filename=filename) 41 self.engine.insert_data_from_file(data_path) 42 43 SCRIPT = main() [end of scripts/socean_diet_data.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/scripts/flensburg_food_web.py b/scripts/flensburg_food_web.py --- a/scripts/flensburg_food_web.py +++ b/scripts/flensburg_food_web.py @@ -14,8 +14,9 @@ self.shortname="flensburg-food-web" self.ref="https://figshare.com/articles/Full_Archive/3552066" self.description="This data is of a food web for the Flensburg Fjord, a brackish shallow water inlet on the Baltic Sea, between Germany and Denmark." + self.keywords = [] self.retriever_minimum_version='2.0.dev' - self.version='1.0.0' + self.version='1.0.1' self.urls={"zip": "https://ndownloader.figshare.com/files/5620326"} self.cleanup_func_table = Cleanup(correct_invalid_value, missing_values=['']) @@ -39,4 +40,4 @@ self.engine.auto_create_table(Table(str(tablename), cleanup=self.cleanup_func_table),filename=filename) self.engine.insert_data_from_file(data_path) -SCRIPT = main() \ No newline at end of file +SCRIPT = main() diff --git a/scripts/socean_diet_data.py b/scripts/socean_diet_data.py --- a/scripts/socean_diet_data.py +++ b/scripts/socean_diet_data.py @@ -14,8 +14,9 @@ self.shortname="socean-diet-data" self.ref="https://figshare.com/articles/Full_Archive/3551304" self.description="Diet-related data from published and unpublished data sets and studies" + self.keywords = [] self.retriever_minimum_version='2.0.dev' - self.version='1.0.0' + self.version='1.0.1' self.urls={"zip": "https://ndownloader.figshare.com/files/5618823"} self.cleanup_func_table = Cleanup(correct_invalid_value, missing_values=['', 'unknown']) @@ -40,4 +41,4 @@ self.engine.auto_create_table(Table(str(tablename), cleanup=self.cleanup_func_table),filename=filename) self.engine.insert_data_from_file(data_path) -SCRIPT = main() \ No newline at end of file +SCRIPT = main()
{"golden_diff": "diff --git a/scripts/flensburg_food_web.py b/scripts/flensburg_food_web.py\n--- a/scripts/flensburg_food_web.py\n+++ b/scripts/flensburg_food_web.py\n@@ -14,8 +14,9 @@\n self.shortname=\"flensburg-food-web\"\n self.ref=\"https://figshare.com/articles/Full_Archive/3552066\"\n self.description=\"This data is of a food web for the Flensburg Fjord, a brackish shallow water inlet on the Baltic Sea, between Germany and Denmark.\"\n+ self.keywords = []\n self.retriever_minimum_version='2.0.dev'\n- self.version='1.0.0'\n+ self.version='1.0.1'\n self.urls={\"zip\": \"https://ndownloader.figshare.com/files/5620326\"}\n self.cleanup_func_table = Cleanup(correct_invalid_value, missing_values=[''])\n \n@@ -39,4 +40,4 @@\n self.engine.auto_create_table(Table(str(tablename), cleanup=self.cleanup_func_table),filename=filename)\n self.engine.insert_data_from_file(data_path)\n \n-SCRIPT = main()\n\\ No newline at end of file\n+SCRIPT = main()\ndiff --git a/scripts/socean_diet_data.py b/scripts/socean_diet_data.py\n--- a/scripts/socean_diet_data.py\n+++ b/scripts/socean_diet_data.py\n@@ -14,8 +14,9 @@\n self.shortname=\"socean-diet-data\"\n self.ref=\"https://figshare.com/articles/Full_Archive/3551304\"\n self.description=\"Diet-related data from published and unpublished data sets and studies\"\n+ self.keywords = []\n self.retriever_minimum_version='2.0.dev'\n- self.version='1.0.0'\n+ self.version='1.0.1'\n self.urls={\"zip\": \"https://ndownloader.figshare.com/files/5618823\"}\n self.cleanup_func_table = Cleanup(correct_invalid_value, missing_values=['', 'unknown'])\n \n@@ -40,4 +41,4 @@\n self.engine.auto_create_table(Table(str(tablename), cleanup=self.cleanup_func_table),filename=filename)\n self.engine.insert_data_from_file(data_path)\n \n-SCRIPT = main()\n\\ No newline at end of file\n+SCRIPT = main()\n", "issue": "Some new datasets failing on 2.0.0\nTwo or three of the new datasets that got added today (hooray!) appearing to having issues on `2.0.0`.\r\n\r\n```\r\nethan@croryx:~$ pip install retriever --upgrade\r\n...\r\nethan@croryx:~$ retriever reset all\r\n...\r\nethan@croryx:~$ retriever update\r\nDownloading scripts...\r\nDownload Progress: [##############################] 100.00%\r\nThe retriever is up-to-date\r\nFailed to load script: socean_diet_data (/home/ethan/.retriever/scripts/)\r\nException: 'main' object has no attribute 'keywords' \r\nFailed to load script: flensburg_food_web (/home/ethan/.retriever/scripts/)\r\nException: 'main' object has no attribute 'keywords' \r\n\r\nethan@croryx:~$ retriever ls\r\nFailed to load script: socean_diet_data (/home/ethan/.retriever/scripts/)\r\nException: 'main' object has no attribute 'keywords' \r\nFailed to load script: flensburg_food_web (/home/ethan/.retriever/scripts/)\r\nException: 'main' object has no attribute 'keywords' \r\nFailed to load script: socean_diet_data (/home/ethan/.retriever/scripts/)\r\nException: 'main' object has no attribute 'keywords' \r\nFailed to load script: flensburg_food_web (/home/ethan/.retriever/scripts/)\r\nException: 'main' object has no attribute 'keywords' \r\nAvailable datasets : 78\r\n\r\nabalone-age breast-cancer-wi fish-parasite-hosts home-ranges mammal-life-hist mediter-basin-plant-traits plant-life-hist-eu prism-climate vertnet-amphibians \r\namniote-life-hist breed-bird-survey forest-biomass-china intertidal-abund-me mammal-masses mt-st-helens-veg plant-occur-oosting species-exctinction-rates vertnet-birds \r\nantarctic-breed-bird breed-bird-survey-50stop forest-fires-portugal iris mammal-metabolic-rate nematode-traits plant-taxonomy-us streamflow-conditions vertnet-fishes \r\naquatic-animal-excretion butterfly-population-network forest-inventory-analysis la-selva-trees mammal-super-tree ngreatplains-flowering-dates poker-hands tree-canopy-geometries vertnet-mammals \r\nbioclim car-eval forest-plots-michigan leaf-herbivory mapped-plant-quads-co NPN portal tree-demog-wghats vertnet-reptiles \r\nbiodiversity-response chytr-disease-distr forest-plots-wghats macroalgal_communities mapped-plant-quads-id nyc-tree-count portal-dev turtle-offspring-nesting wine-composition \r\nbiomass-allometry-db community-abundance-misc fray-jorge-ecology macrocystis-variation mapped-plant-quads-ks pantheria predator-prey-body-ratio veg-plots-sdl wine-quality \r\nbird-migration-data dicerandra-frutescens gentry-forest-transects mammal-community-db mapped-plant-quads-mt phytoplankton-size predator-prey-size-marine vertnet wood-density \r\nbird-size elton-traits great-basin-mammal-abundance mammal-diet marine-recruitment-data plant-comp-ok\r\n```\r\n\r\n@henrykironde suspects that these datasets only work due to improvements in `master` that were made to get this whole set of datasets running, so we may just need to update their `retriever_minimum_version` values to `2.1.dev0`.\n", "before_files": [{"content": "# -*- coding: latin-1 -*-\n#retriever\nfrom retriever.lib.templates import Script\nfrom retriever.lib.models import Table, Cleanup, correct_invalid_value\nfrom retriever import VERSION\nfrom pkg_resources import parse_version\n\nclass main(Script):\n def __init__(self, **kwargs):\n Script.__init__(self, **kwargs)\n self.title=\"Food web including metazoan parasites for a brackish shallow water ecosystem in Germany and Denmark\"\n self.citation=\"C. Dieter Zander, Neri Josten, Kim C. Detloff, Robert Poulin, John P. McLaughlin, and David W. Thieltges. 2011. Food web including metazoan parasites for a brackish shallow water ecosystem in Germany and Denmark. Ecology 92:2007.\"\n self.name=\"flensburg-food-web\"\n self.shortname=\"flensburg-food-web\"\n self.ref=\"https://figshare.com/articles/Full_Archive/3552066\"\n self.description=\"This data is of a food web for the Flensburg Fjord, a brackish shallow water inlet on the Baltic Sea, between Germany and Denmark.\"\n self.retriever_minimum_version='2.0.dev'\n self.version='1.0.0'\n self.urls={\"zip\": \"https://ndownloader.figshare.com/files/5620326\"}\n self.cleanup_func_table = Cleanup(correct_invalid_value, missing_values=[''])\n\n if parse_version(VERSION) <= parse_version(\"2.0.0\"):\n self.shortname = self.name\n self.name = self.title\n self.tags = self.keywords\n self.cleanup_func_table = Cleanup(correct_invalid_value, nulls=['', 'unknown'])\n\n def download(self, engine=None, debug=False):\n Script.download(self, engine, debug)\n engine = self.engine\n file_names = [ ('Flensburg_Data_Links.csv','links'),\n ('Flensburg_Data_Nodes.csv','nodes')\n ]\n\n engine.download_files_from_archive(self.urls[\"zip\"], [i[0] for i in file_names], filetype=\"zip\", archivename=\"ECOL_92_174\")\n \n for(filename,tablename) in file_names:\n data_path = self.engine.format_filename(filename)\n self.engine.auto_create_table(Table(str(tablename), cleanup=self.cleanup_func_table),filename=filename)\n self.engine.insert_data_from_file(data_path)\n\nSCRIPT = main()", "path": "scripts/flensburg_food_web.py"}, {"content": "# -*- coding: latin-1 -*-\n#retriever\nfrom retriever.lib.templates import Script\nfrom retriever.lib.models import Table, Cleanup, correct_invalid_value\nfrom retriever import VERSION\nfrom pkg_resources import parse_version\n\nclass main(Script):\n def __init__(self, **kwargs):\n Script.__init__(self, **kwargs)\n self.title=\"A Southern Ocean dietary database\"\n self.citation=\"Ben Raymond, Michelle Marshall, Gabrielle Nevitt, Chris L. Gillies, John van den Hoff, Jonathan S. Stark, Marcel Losekoot, Eric J. Woehler, and Andrew J. Constable. 2011. A Southern Ocean dietary database. Ecology 92:1188.\"\n self.name=\"socean-diet-data\"\n self.shortname=\"socean-diet-data\"\n self.ref=\"https://figshare.com/articles/Full_Archive/3551304\"\n self.description=\"Diet-related data from published and unpublished data sets and studies\"\n self.retriever_minimum_version='2.0.dev'\n self.version='1.0.0'\n self.urls={\"zip\": \"https://ndownloader.figshare.com/files/5618823\"}\n self.cleanup_func_table = Cleanup(correct_invalid_value, missing_values=['', 'unknown'])\n\n if parse_version(VERSION) <= parse_version(\"2.0.0\"):\n self.shortname = self.name\n self.name = self.title\n self.tags = self.keywords\n self.cleanup_func_table = Cleanup(correct_invalid_value, nulls=['', 'unknown'])\n\n def download(self, engine=None, debug=False):\n Script.download(self, engine, debug)\n engine = self.engine\n file_names = [ ('isotopes.csv','isotopes'),\n ('sources.csv','sources'), \n ('diet.csv', 'diet')\n ]\n\n engine.download_files_from_archive(self.urls[\"zip\"], [i[0] for i in file_names], filetype=\"zip\", archivename=\"ECOL_92_97\")\n \n for(filename,tablename) in file_names:\n data_path = self.engine.format_filename(filename)\n self.engine.auto_create_table(Table(str(tablename), cleanup=self.cleanup_func_table),filename=filename)\n self.engine.insert_data_from_file(data_path)\n\nSCRIPT = main()", "path": "scripts/socean_diet_data.py"}]}
2,597
515
gh_patches_debug_31020
rasdani/github-patches
git_diff
OpenMined__PySyft-3150
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Remove ZSTD **Is your feature request related to a problem? Please describe.** ZSTD is used for compression in our serde process. However we don't need extra compression as we move to Protobuf. ZSTD is usually a source of problems when installing PySyft with different hacks to solve it. **Describe the solution you'd like** Remove ZSTD dependency. This will require removing the tests and its use in serde. **Describe alternatives you've considered** Protobuf covers compression. **Additional context** </issue> <code> [start of syft/serde/compression.py] 1 """ 2 This file exists to provide one common place for all compression methods used in 3 simplifying and serializing PySyft objects. 4 """ 5 6 import lz4 7 from lz4 import ( # noqa: F401 8 frame, 9 ) # needed as otherwise we will get: module 'lz4' has no attribute 'frame' 10 import zstd 11 12 from syft.exceptions import CompressionNotFoundException 13 14 # COMPRESSION SCHEME INT CODES 15 NO_COMPRESSION = 40 16 LZ4 = 41 17 ZSTD = 42 18 scheme_to_bytes = { 19 NO_COMPRESSION: NO_COMPRESSION.to_bytes(1, byteorder="big"), 20 LZ4: LZ4.to_bytes(1, byteorder="big"), 21 ZSTD: ZSTD.to_bytes(1, byteorder="big"), 22 } 23 24 ## SECTION: chosen Compression Algorithm 25 26 27 def _apply_compress_scheme(decompressed_input_bin) -> tuple: 28 """ 29 Apply the selected compression scheme. 30 By default is used LZ4 31 32 Args: 33 decompressed_input_bin: the binary to be compressed 34 """ 35 return apply_lz4_compression(decompressed_input_bin) 36 37 38 def apply_lz4_compression(decompressed_input_bin) -> tuple: 39 """ 40 Apply LZ4 compression to the input 41 42 Args: 43 decompressed_input_bin: the binary to be compressed 44 45 Returns: 46 a tuple (compressed_result, LZ4) 47 """ 48 return lz4.frame.compress(decompressed_input_bin), LZ4 49 50 51 def apply_zstd_compression(decompressed_input_bin) -> tuple: 52 """ 53 Apply ZSTD compression to the input 54 55 Args: 56 decompressed_input_bin: the binary to be compressed 57 58 Returns: 59 a tuple (compressed_result, ZSTD) 60 """ 61 62 return zstd.compress(decompressed_input_bin), ZSTD 63 64 65 def apply_no_compression(decompressed_input_bin) -> tuple: 66 """ 67 No compression is applied to the input 68 69 Args: 70 decompressed_input_bin: the binary 71 72 Returns: 73 a tuple (the binary, LZ4) 74 """ 75 76 return decompressed_input_bin, NO_COMPRESSION 77 78 79 def _compress(decompressed_input_bin: bin) -> bin: 80 """ 81 This function compresses a binary using the function _apply_compress_scheme 82 if the input has been already compressed in some step, it will return it as it is 83 84 Args: 85 decompressed_input_bin (bin): binary to be compressed 86 87 Returns: 88 bin: a compressed binary 89 90 """ 91 compress_stream, compress_scheme = _apply_compress_scheme(decompressed_input_bin) 92 try: 93 z = scheme_to_bytes[compress_scheme] + compress_stream 94 return z 95 except KeyError: 96 raise CompressionNotFoundException( 97 f"Compression scheme not found for compression code: {str(compress_scheme)}" 98 ) 99 100 101 def _decompress(binary: bin) -> bin: 102 """ 103 This function decompresses a binary using the scheme defined in the first byte of the input 104 105 Args: 106 binary (bin): a compressed binary 107 108 Returns: 109 bin: decompressed binary 110 111 """ 112 113 # check the 1-byte header to check the compression scheme used 114 compress_scheme = binary[0] 115 116 # remove the 1-byte header from the input stream 117 binary = binary[1:] 118 # 1) Decompress or return the original stream 119 if compress_scheme == LZ4: 120 return lz4.frame.decompress(binary) 121 elif compress_scheme == ZSTD: 122 return zstd.decompress(binary) 123 elif compress_scheme == NO_COMPRESSION: 124 return binary 125 else: 126 raise CompressionNotFoundException( 127 f"Compression scheme not found for compression code: {str(compress_scheme)}" 128 ) 129 [end of syft/serde/compression.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/syft/serde/compression.py b/syft/serde/compression.py --- a/syft/serde/compression.py +++ b/syft/serde/compression.py @@ -7,18 +7,15 @@ from lz4 import ( # noqa: F401 frame, ) # needed as otherwise we will get: module 'lz4' has no attribute 'frame' -import zstd from syft.exceptions import CompressionNotFoundException # COMPRESSION SCHEME INT CODES NO_COMPRESSION = 40 LZ4 = 41 -ZSTD = 42 scheme_to_bytes = { NO_COMPRESSION: NO_COMPRESSION.to_bytes(1, byteorder="big"), LZ4: LZ4.to_bytes(1, byteorder="big"), - ZSTD: ZSTD.to_bytes(1, byteorder="big"), } ## SECTION: chosen Compression Algorithm @@ -48,20 +45,6 @@ return lz4.frame.compress(decompressed_input_bin), LZ4 -def apply_zstd_compression(decompressed_input_bin) -> tuple: - """ - Apply ZSTD compression to the input - - Args: - decompressed_input_bin: the binary to be compressed - - Returns: - a tuple (compressed_result, ZSTD) - """ - - return zstd.compress(decompressed_input_bin), ZSTD - - def apply_no_compression(decompressed_input_bin) -> tuple: """ No compression is applied to the input @@ -118,8 +101,6 @@ # 1) Decompress or return the original stream if compress_scheme == LZ4: return lz4.frame.decompress(binary) - elif compress_scheme == ZSTD: - return zstd.decompress(binary) elif compress_scheme == NO_COMPRESSION: return binary else:
{"golden_diff": "diff --git a/syft/serde/compression.py b/syft/serde/compression.py\n--- a/syft/serde/compression.py\n+++ b/syft/serde/compression.py\n@@ -7,18 +7,15 @@\n from lz4 import ( # noqa: F401\n frame,\n ) # needed as otherwise we will get: module 'lz4' has no attribute 'frame'\n-import zstd\n \n from syft.exceptions import CompressionNotFoundException\n \n # COMPRESSION SCHEME INT CODES\n NO_COMPRESSION = 40\n LZ4 = 41\n-ZSTD = 42\n scheme_to_bytes = {\n NO_COMPRESSION: NO_COMPRESSION.to_bytes(1, byteorder=\"big\"),\n LZ4: LZ4.to_bytes(1, byteorder=\"big\"),\n- ZSTD: ZSTD.to_bytes(1, byteorder=\"big\"),\n }\n \n ## SECTION: chosen Compression Algorithm\n@@ -48,20 +45,6 @@\n return lz4.frame.compress(decompressed_input_bin), LZ4\n \n \n-def apply_zstd_compression(decompressed_input_bin) -> tuple:\n- \"\"\"\n- Apply ZSTD compression to the input\n-\n- Args:\n- decompressed_input_bin: the binary to be compressed\n-\n- Returns:\n- a tuple (compressed_result, ZSTD)\n- \"\"\"\n-\n- return zstd.compress(decompressed_input_bin), ZSTD\n-\n-\n def apply_no_compression(decompressed_input_bin) -> tuple:\n \"\"\"\n No compression is applied to the input\n@@ -118,8 +101,6 @@\n # 1) Decompress or return the original stream\n if compress_scheme == LZ4:\n return lz4.frame.decompress(binary)\n- elif compress_scheme == ZSTD:\n- return zstd.decompress(binary)\n elif compress_scheme == NO_COMPRESSION:\n return binary\n else:\n", "issue": "Remove ZSTD\n**Is your feature request related to a problem? Please describe.**\r\nZSTD is used for compression in our serde process. However we don't need extra compression as we move to Protobuf.\r\nZSTD is usually a source of problems when installing PySyft with different hacks to solve it.\r\n\r\n**Describe the solution you'd like**\r\nRemove ZSTD dependency.\r\nThis will require removing the tests and its use in serde.\r\n\r\n**Describe alternatives you've considered**\r\nProtobuf covers compression.\r\n\r\n**Additional context**\r\n\n", "before_files": [{"content": "\"\"\"\nThis file exists to provide one common place for all compression methods used in\nsimplifying and serializing PySyft objects.\n\"\"\"\n\nimport lz4\nfrom lz4 import ( # noqa: F401\n frame,\n) # needed as otherwise we will get: module 'lz4' has no attribute 'frame'\nimport zstd\n\nfrom syft.exceptions import CompressionNotFoundException\n\n# COMPRESSION SCHEME INT CODES\nNO_COMPRESSION = 40\nLZ4 = 41\nZSTD = 42\nscheme_to_bytes = {\n NO_COMPRESSION: NO_COMPRESSION.to_bytes(1, byteorder=\"big\"),\n LZ4: LZ4.to_bytes(1, byteorder=\"big\"),\n ZSTD: ZSTD.to_bytes(1, byteorder=\"big\"),\n}\n\n## SECTION: chosen Compression Algorithm\n\n\ndef _apply_compress_scheme(decompressed_input_bin) -> tuple:\n \"\"\"\n Apply the selected compression scheme.\n By default is used LZ4\n\n Args:\n decompressed_input_bin: the binary to be compressed\n \"\"\"\n return apply_lz4_compression(decompressed_input_bin)\n\n\ndef apply_lz4_compression(decompressed_input_bin) -> tuple:\n \"\"\"\n Apply LZ4 compression to the input\n\n Args:\n decompressed_input_bin: the binary to be compressed\n\n Returns:\n a tuple (compressed_result, LZ4)\n \"\"\"\n return lz4.frame.compress(decompressed_input_bin), LZ4\n\n\ndef apply_zstd_compression(decompressed_input_bin) -> tuple:\n \"\"\"\n Apply ZSTD compression to the input\n\n Args:\n decompressed_input_bin: the binary to be compressed\n\n Returns:\n a tuple (compressed_result, ZSTD)\n \"\"\"\n\n return zstd.compress(decompressed_input_bin), ZSTD\n\n\ndef apply_no_compression(decompressed_input_bin) -> tuple:\n \"\"\"\n No compression is applied to the input\n\n Args:\n decompressed_input_bin: the binary\n\n Returns:\n a tuple (the binary, LZ4)\n \"\"\"\n\n return decompressed_input_bin, NO_COMPRESSION\n\n\ndef _compress(decompressed_input_bin: bin) -> bin:\n \"\"\"\n This function compresses a binary using the function _apply_compress_scheme\n if the input has been already compressed in some step, it will return it as it is\n\n Args:\n decompressed_input_bin (bin): binary to be compressed\n\n Returns:\n bin: a compressed binary\n\n \"\"\"\n compress_stream, compress_scheme = _apply_compress_scheme(decompressed_input_bin)\n try:\n z = scheme_to_bytes[compress_scheme] + compress_stream\n return z\n except KeyError:\n raise CompressionNotFoundException(\n f\"Compression scheme not found for compression code: {str(compress_scheme)}\"\n )\n\n\ndef _decompress(binary: bin) -> bin:\n \"\"\"\n This function decompresses a binary using the scheme defined in the first byte of the input\n\n Args:\n binary (bin): a compressed binary\n\n Returns:\n bin: decompressed binary\n\n \"\"\"\n\n # check the 1-byte header to check the compression scheme used\n compress_scheme = binary[0]\n\n # remove the 1-byte header from the input stream\n binary = binary[1:]\n # 1) Decompress or return the original stream\n if compress_scheme == LZ4:\n return lz4.frame.decompress(binary)\n elif compress_scheme == ZSTD:\n return zstd.decompress(binary)\n elif compress_scheme == NO_COMPRESSION:\n return binary\n else:\n raise CompressionNotFoundException(\n f\"Compression scheme not found for compression code: {str(compress_scheme)}\"\n )\n", "path": "syft/serde/compression.py"}]}
1,722
414
gh_patches_debug_17202
rasdani/github-patches
git_diff
e2nIEE__pandapower-1007
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> bug in WLSZeroInjectionConstraintsAlgorithm code in line 135 of the file, https://github.com/e2nIEE/pandapower/blob/develop/pandapower/estimation/algorithm/base.py bugs when zero_injection_bus is empty. </issue> <code> [start of pandapower/estimation/algorithm/base.py] 1 # -*- coding: utf-8 -*- 2 3 # Copyright (c) 2016-2020 by University of Kassel and Fraunhofer Institute for Energy Economics 4 # and Energy System Technology (IEE), Kassel. All rights reserved. 5 6 import numpy as np 7 from scipy.sparse import csr_matrix, vstack, hstack 8 from scipy.sparse.linalg import spsolve 9 10 from pandapower.estimation.algorithm.estimator import BaseEstimatorIRWLS, get_estimator 11 from pandapower.estimation.algorithm.matrix_base import BaseAlgebra, \ 12 BaseAlgebraZeroInjConstraints 13 from pandapower.estimation.idx_bus import ZERO_INJ_FLAG, P, P_STD, Q, Q_STD 14 from pandapower.estimation.ppc_conversion import ExtendedPPCI 15 from pandapower.pypower.idx_bus import bus_cols 16 17 try: 18 import pplog as logging 19 except ImportError: 20 import logging 21 std_logger = logging.getLogger(__name__) 22 23 __all__ = ["WLSAlgorithm", "WLSZeroInjectionConstraintsAlgorithm", "IRWLSAlgorithm"] 24 25 26 class BaseAlgorithm: 27 def __init__(self, tolerance, maximum_iterations, logger=std_logger): 28 self.tolerance = tolerance 29 self.max_iterations = maximum_iterations 30 self.logger = logger 31 self.successful = False 32 self.iterations = None 33 34 # Parameters for estimate 35 self.eppci = None 36 self.pp_meas_indices = None 37 38 def check_observability(self, eppci: ExtendedPPCI, z): 39 # Check if observability criterion is fulfilled and the state estimation is possible 40 if len(z) < 2 * eppci["bus"].shape[0] - 1: 41 self.logger.error("System is not observable (cancelling)") 42 self.logger.error("Measurements available: %d. Measurements required: %d" % 43 (len(z), 2 * eppci["bus"].shape[0] - 1)) 44 raise UserWarning("Measurements available: %d. Measurements required: %d" % 45 (len(z), 2 * eppci["bus"].shape[0] - 1)) 46 47 def check_result(self, current_error, cur_it): 48 # print output for results 49 if current_error <= self.tolerance: 50 self.successful = True 51 self.logger.debug("State Estimation successful ({:d} iterations)".format(cur_it)) 52 else: 53 self.successful = False 54 self.logger.debug("State Estimation not successful ({:d}/{:d} iterations)".format(cur_it, 55 self.max_iterations)) 56 57 def initialize(self, eppci: ExtendedPPCI): 58 # Check observability 59 self.eppci = eppci 60 self.pp_meas_indices = eppci.pp_meas_indices 61 self.check_observability(eppci, eppci.z) 62 63 def estimate(self, ppci: ExtendedPPCI, **kwargs): 64 # Must be implemented individually!! 65 pass 66 67 68 class WLSAlgorithm(BaseAlgorithm): 69 def __init__(self, tolerance, maximum_iterations, logger=std_logger): 70 super(WLSAlgorithm, self).__init__(tolerance, maximum_iterations, logger) 71 72 # Parameters for Bad data detection 73 self.R_inv = None 74 self.Gm = None 75 self.r = None 76 self.H = None 77 self.hx = None 78 79 def estimate(self, eppci, **kwargs): 80 self.initialize(eppci) 81 # matrix calculation object 82 sem = BaseAlgebra(eppci) 83 84 current_error, cur_it = 100., 0 85 # invert covariance matrix 86 r_inv = csr_matrix(np.diagflat(1 / eppci.r_cov ** 2)) 87 E = eppci.E 88 while current_error > self.tolerance and cur_it < self.max_iterations: 89 self.logger.debug("Starting iteration {:d}".format(1 + cur_it)) 90 try: 91 # residual r 92 r = csr_matrix(sem.create_rx(E)).T 93 94 # jacobian matrix H 95 H = csr_matrix(sem.create_hx_jacobian(E)) 96 97 # gain matrix G_m 98 # G_m = H^t * R^-1 * H 99 G_m = H.T * (r_inv * H) 100 101 # state vector difference d_E 102 # d_E = G_m^-1 * (H' * R^-1 * r) 103 d_E = spsolve(G_m, H.T * (r_inv * r)) 104 105 # Update E with d_E 106 E += d_E.ravel() 107 eppci.update_E(E) 108 109 # prepare next iteration 110 cur_it += 1 111 current_error = np.max(np.abs(d_E)) 112 self.logger.debug("Current error: {:.7f}".format(current_error)) 113 except np.linalg.linalg.LinAlgError: 114 self.logger.error("A problem appeared while using the linear algebra methods." 115 "Check and change the measurement set.") 116 return False 117 118 # check if the estimation is successfull 119 self.check_result(current_error, cur_it) 120 if self.successful: 121 # store variables required for chi^2 and r_N_max test: 122 self.R_inv = r_inv.toarray() 123 self.Gm = G_m.toarray() 124 self.r = r.toarray() 125 self.H = H.toarray() 126 # create h(x) for the current iteration 127 self.hx = sem.create_hx(eppci.E) 128 return eppci 129 130 131 class WLSZeroInjectionConstraintsAlgorithm(BaseAlgorithm): 132 def estimate(self, eppci, **kwargs): 133 # state vector built from delta, |V| and zero injections 134 # Find pq bus with zero p,q and shunt admittance 135 zero_injection_bus = np.argwhere(eppci["bus"][:, bus_cols + ZERO_INJ_FLAG] == True).ravel() 136 eppci["bus"][zero_injection_bus, [bus_cols + P, bus_cols + P_STD, bus_cols + Q, bus_cols + Q_STD]] = np.NaN 137 # Withn pq buses with zero injection identify those who have also no p or q measurement 138 p_zero_injections = zero_injection_bus 139 q_zero_injections = zero_injection_bus 140 new_states = np.zeros(len(p_zero_injections) + len(q_zero_injections)) 141 142 num_bus = eppci["bus"].shape[0] 143 144 # matrix calculation object 145 sem = BaseAlgebraZeroInjConstraints(eppci) 146 147 current_error, cur_it = 100., 0 148 r_inv = csr_matrix((np.diagflat(1 / eppci.r_cov) ** 2)) 149 E = eppci.E 150 # update the E matrix 151 E_ext = np.r_[eppci.E, new_states] 152 153 while current_error > self.tolerance and cur_it < self.max_iterations: 154 self.logger.debug("Starting iteration {:d}".format(1 + cur_it)) 155 try: 156 c_x = sem.create_cx(E, p_zero_injections, q_zero_injections) 157 158 # residual r 159 r = csr_matrix(sem.create_rx(E)).T 160 c_rxh = csr_matrix(c_x).T 161 162 # jacobian matrix H 163 H_temp = sem.create_hx_jacobian(E) 164 C_temp = sem.create_cx_jacobian(E, p_zero_injections, q_zero_injections) 165 H, C = csr_matrix(H_temp), csr_matrix(C_temp) 166 167 # gain matrix G_m 168 # G_m = H^t * R^-1 * H 169 G_m = H.T * (r_inv * H) 170 171 # building a new gain matrix for new constraints. 172 A_1 = vstack([G_m, C]) 173 c_ax = hstack([C, np.zeros((C.shape[0], C.shape[0]))]) 174 c_xT = c_ax.T 175 M_tx = csr_matrix(hstack((A_1, c_xT))) # again adding to the new gain matrix 176 rhs = H.T * (r_inv * r) # original right hand side 177 C_rhs = vstack((rhs, -c_rxh)) # creating the righ hand side with new constraints 178 179 # state vector difference d_E and update E 180 d_E_ext = spsolve(M_tx, C_rhs) 181 E_ext += d_E_ext.ravel() 182 E = E_ext[:E.shape[0]] 183 eppci.update_E(E) 184 185 # prepare next iteration 186 cur_it += 1 187 current_error = np.max(np.abs(d_E_ext[:len(eppci.non_slack_buses) + num_bus])) 188 self.logger.debug("Current error: {:.7f}".format(current_error)) 189 except np.linalg.linalg.LinAlgError: 190 self.logger.error("A problem appeared while using the linear algebra methods." 191 "Check and change the measurement set.") 192 return False 193 194 # check if the estimation is successfull 195 self.check_result(current_error, cur_it) 196 return eppci 197 198 199 class IRWLSAlgorithm(BaseAlgorithm): 200 def estimate(self, eppci, estimator="wls", **kwargs): 201 self.initialize(eppci) 202 203 # matrix calculation object 204 sem = get_estimator(BaseEstimatorIRWLS, estimator)(eppci, **kwargs) 205 206 current_error, cur_it = 100., 0 207 E = eppci.E 208 while current_error > self.tolerance and cur_it < self.max_iterations: 209 self.logger.debug("Starting iteration {:d}".format(1 + cur_it)) 210 try: 211 # residual r 212 r = csr_matrix(sem.create_rx(E)).T 213 214 # jacobian matrix H 215 H = csr_matrix(sem.create_hx_jacobian(E)) 216 217 # gain matrix G_m 218 # G_m = H^t * Phi * H 219 phi = csr_matrix(sem.create_phi(E)) 220 G_m = H.T * (phi * H) 221 222 # state vector difference d_E and update E 223 d_E = spsolve(G_m, H.T * (phi * r)) 224 E += d_E.ravel() 225 eppci.update_E(E) 226 227 # prepare next iteration 228 cur_it += 1 229 current_error = np.max(np.abs(d_E)) 230 self.logger.debug("Current error: {:.7f}".format(current_error)) 231 except np.linalg.linalg.LinAlgError: 232 self.logger.error("A problem appeared while using the linear algebra methods." 233 "Check and change the measurement set.") 234 return False 235 236 # check if the estimation is successfull 237 self.check_result(current_error, cur_it) 238 # update V/delta 239 return eppci 240 [end of pandapower/estimation/algorithm/base.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pandapower/estimation/algorithm/base.py b/pandapower/estimation/algorithm/base.py --- a/pandapower/estimation/algorithm/base.py +++ b/pandapower/estimation/algorithm/base.py @@ -132,6 +132,8 @@ def estimate(self, eppci, **kwargs): # state vector built from delta, |V| and zero injections # Find pq bus with zero p,q and shunt admittance + if not np.any(eppci["bus"][:, bus_cols + ZERO_INJ_FLAG]): + raise UserWarning("Network has no bus with zero injections! Please use WLS instead!") zero_injection_bus = np.argwhere(eppci["bus"][:, bus_cols + ZERO_INJ_FLAG] == True).ravel() eppci["bus"][zero_injection_bus, [bus_cols + P, bus_cols + P_STD, bus_cols + Q, bus_cols + Q_STD]] = np.NaN # Withn pq buses with zero injection identify those who have also no p or q measurement
{"golden_diff": "diff --git a/pandapower/estimation/algorithm/base.py b/pandapower/estimation/algorithm/base.py\n--- a/pandapower/estimation/algorithm/base.py\n+++ b/pandapower/estimation/algorithm/base.py\n@@ -132,6 +132,8 @@\n def estimate(self, eppci, **kwargs):\n # state vector built from delta, |V| and zero injections\n # Find pq bus with zero p,q and shunt admittance\n+ if not np.any(eppci[\"bus\"][:, bus_cols + ZERO_INJ_FLAG]):\n+ raise UserWarning(\"Network has no bus with zero injections! Please use WLS instead!\")\n zero_injection_bus = np.argwhere(eppci[\"bus\"][:, bus_cols + ZERO_INJ_FLAG] == True).ravel()\n eppci[\"bus\"][zero_injection_bus, [bus_cols + P, bus_cols + P_STD, bus_cols + Q, bus_cols + Q_STD]] = np.NaN\n # Withn pq buses with zero injection identify those who have also no p or q measurement\n", "issue": "bug in WLSZeroInjectionConstraintsAlgorithm\ncode in line 135 of the file, https://github.com/e2nIEE/pandapower/blob/develop/pandapower/estimation/algorithm/base.py\r\n\r\nbugs when zero_injection_bus is empty.\r\n\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright (c) 2016-2020 by University of Kassel and Fraunhofer Institute for Energy Economics\n# and Energy System Technology (IEE), Kassel. All rights reserved.\n\nimport numpy as np\nfrom scipy.sparse import csr_matrix, vstack, hstack\nfrom scipy.sparse.linalg import spsolve\n\nfrom pandapower.estimation.algorithm.estimator import BaseEstimatorIRWLS, get_estimator\nfrom pandapower.estimation.algorithm.matrix_base import BaseAlgebra, \\\n BaseAlgebraZeroInjConstraints\nfrom pandapower.estimation.idx_bus import ZERO_INJ_FLAG, P, P_STD, Q, Q_STD\nfrom pandapower.estimation.ppc_conversion import ExtendedPPCI\nfrom pandapower.pypower.idx_bus import bus_cols\n\ntry:\n import pplog as logging\nexcept ImportError:\n import logging\nstd_logger = logging.getLogger(__name__)\n\n__all__ = [\"WLSAlgorithm\", \"WLSZeroInjectionConstraintsAlgorithm\", \"IRWLSAlgorithm\"]\n\n\nclass BaseAlgorithm:\n def __init__(self, tolerance, maximum_iterations, logger=std_logger):\n self.tolerance = tolerance\n self.max_iterations = maximum_iterations\n self.logger = logger\n self.successful = False\n self.iterations = None\n\n # Parameters for estimate\n self.eppci = None\n self.pp_meas_indices = None\n\n def check_observability(self, eppci: ExtendedPPCI, z):\n # Check if observability criterion is fulfilled and the state estimation is possible\n if len(z) < 2 * eppci[\"bus\"].shape[0] - 1:\n self.logger.error(\"System is not observable (cancelling)\")\n self.logger.error(\"Measurements available: %d. Measurements required: %d\" %\n (len(z), 2 * eppci[\"bus\"].shape[0] - 1))\n raise UserWarning(\"Measurements available: %d. Measurements required: %d\" %\n (len(z), 2 * eppci[\"bus\"].shape[0] - 1))\n\n def check_result(self, current_error, cur_it):\n # print output for results\n if current_error <= self.tolerance:\n self.successful = True\n self.logger.debug(\"State Estimation successful ({:d} iterations)\".format(cur_it))\n else:\n self.successful = False\n self.logger.debug(\"State Estimation not successful ({:d}/{:d} iterations)\".format(cur_it,\n self.max_iterations))\n\n def initialize(self, eppci: ExtendedPPCI):\n # Check observability\n self.eppci = eppci\n self.pp_meas_indices = eppci.pp_meas_indices\n self.check_observability(eppci, eppci.z)\n\n def estimate(self, ppci: ExtendedPPCI, **kwargs):\n # Must be implemented individually!!\n pass\n\n\nclass WLSAlgorithm(BaseAlgorithm):\n def __init__(self, tolerance, maximum_iterations, logger=std_logger):\n super(WLSAlgorithm, self).__init__(tolerance, maximum_iterations, logger)\n\n # Parameters for Bad data detection\n self.R_inv = None\n self.Gm = None\n self.r = None\n self.H = None\n self.hx = None\n\n def estimate(self, eppci, **kwargs):\n self.initialize(eppci)\n # matrix calculation object\n sem = BaseAlgebra(eppci)\n\n current_error, cur_it = 100., 0\n # invert covariance matrix\n r_inv = csr_matrix(np.diagflat(1 / eppci.r_cov ** 2))\n E = eppci.E\n while current_error > self.tolerance and cur_it < self.max_iterations:\n self.logger.debug(\"Starting iteration {:d}\".format(1 + cur_it))\n try:\n # residual r\n r = csr_matrix(sem.create_rx(E)).T\n\n # jacobian matrix H\n H = csr_matrix(sem.create_hx_jacobian(E))\n\n # gain matrix G_m\n # G_m = H^t * R^-1 * H\n G_m = H.T * (r_inv * H)\n\n # state vector difference d_E\n # d_E = G_m^-1 * (H' * R^-1 * r)\n d_E = spsolve(G_m, H.T * (r_inv * r))\n\n # Update E with d_E\n E += d_E.ravel()\n eppci.update_E(E)\n\n # prepare next iteration\n cur_it += 1\n current_error = np.max(np.abs(d_E))\n self.logger.debug(\"Current error: {:.7f}\".format(current_error))\n except np.linalg.linalg.LinAlgError:\n self.logger.error(\"A problem appeared while using the linear algebra methods.\"\n \"Check and change the measurement set.\")\n return False\n\n # check if the estimation is successfull\n self.check_result(current_error, cur_it)\n if self.successful:\n # store variables required for chi^2 and r_N_max test:\n self.R_inv = r_inv.toarray()\n self.Gm = G_m.toarray()\n self.r = r.toarray()\n self.H = H.toarray()\n # create h(x) for the current iteration\n self.hx = sem.create_hx(eppci.E)\n return eppci\n\n\nclass WLSZeroInjectionConstraintsAlgorithm(BaseAlgorithm):\n def estimate(self, eppci, **kwargs):\n # state vector built from delta, |V| and zero injections\n # Find pq bus with zero p,q and shunt admittance\n zero_injection_bus = np.argwhere(eppci[\"bus\"][:, bus_cols + ZERO_INJ_FLAG] == True).ravel()\n eppci[\"bus\"][zero_injection_bus, [bus_cols + P, bus_cols + P_STD, bus_cols + Q, bus_cols + Q_STD]] = np.NaN\n # Withn pq buses with zero injection identify those who have also no p or q measurement\n p_zero_injections = zero_injection_bus\n q_zero_injections = zero_injection_bus\n new_states = np.zeros(len(p_zero_injections) + len(q_zero_injections))\n\n num_bus = eppci[\"bus\"].shape[0]\n\n # matrix calculation object\n sem = BaseAlgebraZeroInjConstraints(eppci)\n\n current_error, cur_it = 100., 0\n r_inv = csr_matrix((np.diagflat(1 / eppci.r_cov) ** 2))\n E = eppci.E\n # update the E matrix\n E_ext = np.r_[eppci.E, new_states]\n\n while current_error > self.tolerance and cur_it < self.max_iterations:\n self.logger.debug(\"Starting iteration {:d}\".format(1 + cur_it))\n try:\n c_x = sem.create_cx(E, p_zero_injections, q_zero_injections)\n\n # residual r\n r = csr_matrix(sem.create_rx(E)).T\n c_rxh = csr_matrix(c_x).T\n\n # jacobian matrix H\n H_temp = sem.create_hx_jacobian(E)\n C_temp = sem.create_cx_jacobian(E, p_zero_injections, q_zero_injections)\n H, C = csr_matrix(H_temp), csr_matrix(C_temp)\n\n # gain matrix G_m\n # G_m = H^t * R^-1 * H\n G_m = H.T * (r_inv * H)\n\n # building a new gain matrix for new constraints.\n A_1 = vstack([G_m, C])\n c_ax = hstack([C, np.zeros((C.shape[0], C.shape[0]))])\n c_xT = c_ax.T\n M_tx = csr_matrix(hstack((A_1, c_xT))) # again adding to the new gain matrix\n rhs = H.T * (r_inv * r) # original right hand side\n C_rhs = vstack((rhs, -c_rxh)) # creating the righ hand side with new constraints\n\n # state vector difference d_E and update E\n d_E_ext = spsolve(M_tx, C_rhs)\n E_ext += d_E_ext.ravel()\n E = E_ext[:E.shape[0]]\n eppci.update_E(E)\n\n # prepare next iteration\n cur_it += 1\n current_error = np.max(np.abs(d_E_ext[:len(eppci.non_slack_buses) + num_bus]))\n self.logger.debug(\"Current error: {:.7f}\".format(current_error))\n except np.linalg.linalg.LinAlgError:\n self.logger.error(\"A problem appeared while using the linear algebra methods.\"\n \"Check and change the measurement set.\")\n return False\n\n # check if the estimation is successfull\n self.check_result(current_error, cur_it)\n return eppci\n\n\nclass IRWLSAlgorithm(BaseAlgorithm):\n def estimate(self, eppci, estimator=\"wls\", **kwargs):\n self.initialize(eppci)\n\n # matrix calculation object\n sem = get_estimator(BaseEstimatorIRWLS, estimator)(eppci, **kwargs)\n\n current_error, cur_it = 100., 0\n E = eppci.E\n while current_error > self.tolerance and cur_it < self.max_iterations:\n self.logger.debug(\"Starting iteration {:d}\".format(1 + cur_it))\n try:\n # residual r\n r = csr_matrix(sem.create_rx(E)).T\n\n # jacobian matrix H\n H = csr_matrix(sem.create_hx_jacobian(E))\n\n # gain matrix G_m\n # G_m = H^t * Phi * H\n phi = csr_matrix(sem.create_phi(E))\n G_m = H.T * (phi * H)\n\n # state vector difference d_E and update E\n d_E = spsolve(G_m, H.T * (phi * r))\n E += d_E.ravel()\n eppci.update_E(E)\n\n # prepare next iteration\n cur_it += 1\n current_error = np.max(np.abs(d_E))\n self.logger.debug(\"Current error: {:.7f}\".format(current_error))\n except np.linalg.linalg.LinAlgError:\n self.logger.error(\"A problem appeared while using the linear algebra methods.\"\n \"Check and change the measurement set.\")\n return False\n\n # check if the estimation is successfull\n self.check_result(current_error, cur_it)\n # update V/delta\n return eppci\n", "path": "pandapower/estimation/algorithm/base.py"}]}
3,531
242
gh_patches_debug_10830
rasdani/github-patches
git_diff
Mailu__Mailu-2177
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Manage user authentication and permissions Currently no authentication is implemented. Multiple issues will have to be tackled: - complete permission scheme or simple admin role plus admins per domain? - how to store user passwords (shared format between Flask-admin and dovecot)? - how should the initial use be created? </issue> <code> [start of core/admin/start.py] 1 #!/usr/bin/python3 2 3 import os 4 import logging as log 5 import sys 6 7 log.basicConfig(stream=sys.stderr, level=os.environ.get("LOG_LEVEL", "INFO")) 8 9 os.system("flask mailu advertise") 10 os.system("flask db upgrade") 11 12 account = os.environ.get("INITIAL_ADMIN_ACCOUNT") 13 domain = os.environ.get("INITIAL_ADMIN_DOMAIN") 14 password = os.environ.get("INITIAL_ADMIN_PW") 15 16 if account is not None and domain is not None and password is not None: 17 mode = os.environ.get("INITIAL_ADMIN_MODE", default="ifmissing") 18 log.info("Creating initial admin accout %s@%s with mode %s",account,domain,mode) 19 os.system("flask mailu admin %s %s '%s' --mode %s" % (account, domain, password, mode)) 20 21 def test_DNS(): 22 import dns.resolver 23 import dns.exception 24 import dns.flags 25 import dns.rdtypes 26 import dns.rdatatype 27 import dns.rdataclass 28 import time 29 # DNS stub configured to do DNSSEC enabled queries 30 resolver = dns.resolver.Resolver() 31 resolver.use_edns(0, 0, 1232) 32 resolver.flags = dns.flags.AD | dns.flags.RD 33 nameservers = resolver.nameservers 34 for ns in nameservers: 35 resolver.nameservers=[ns] 36 while True: 37 try: 38 result = resolver.query('example.org', dns.rdatatype.A, dns.rdataclass.IN, lifetime=10) 39 except Exception as e: 40 log.critical("Your DNS resolver at %s is not working (%s). Please use another resolver or enable unbound via https://setup.mailu.io.", ns, e); 41 else: 42 if result.response.flags & dns.flags.AD: 43 break 44 log.critical("Your DNS resolver at %s isn't doing DNSSEC validation; Please use another resolver or enable unbound via https://setup.mailu.io.", ns) 45 time.sleep(5) 46 47 test_DNS() 48 49 start_command="".join([ 50 "gunicorn --threads ", str(os.cpu_count()), 51 " -b :80 ", 52 "--access-logfile - " if (log.root.level<=log.INFO) else "", 53 "--error-logfile - ", 54 "--preload ", 55 "'mailu:create_app()'"]) 56 57 os.system(start_command) 58 [end of core/admin/start.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/core/admin/start.py b/core/admin/start.py --- a/core/admin/start.py +++ b/core/admin/start.py @@ -35,7 +35,7 @@ resolver.nameservers=[ns] while True: try: - result = resolver.query('example.org', dns.rdatatype.A, dns.rdataclass.IN, lifetime=10) + result = resolver.resolve('example.org', dns.rdatatype.A, dns.rdataclass.IN, lifetime=10) except Exception as e: log.critical("Your DNS resolver at %s is not working (%s). Please use another resolver or enable unbound via https://setup.mailu.io.", ns, e); else:
{"golden_diff": "diff --git a/core/admin/start.py b/core/admin/start.py\n--- a/core/admin/start.py\n+++ b/core/admin/start.py\n@@ -35,7 +35,7 @@\n resolver.nameservers=[ns]\n while True:\n try:\n- result = resolver.query('example.org', dns.rdatatype.A, dns.rdataclass.IN, lifetime=10)\n+ result = resolver.resolve('example.org', dns.rdatatype.A, dns.rdataclass.IN, lifetime=10)\n except Exception as e:\n log.critical(\"Your DNS resolver at %s is not working (%s). Please use another resolver or enable unbound via https://setup.mailu.io.\", ns, e);\n else:\n", "issue": "Manage user authentication and permissions\nCurrently no authentication is implemented. Multiple issues will have to be tackled:\n- complete permission scheme or simple admin role plus admins per domain?\n- how to store user passwords (shared format between Flask-admin and dovecot)?\n- how should the initial use be created?\n\n", "before_files": [{"content": "#!/usr/bin/python3\n\nimport os\nimport logging as log\nimport sys\n\nlog.basicConfig(stream=sys.stderr, level=os.environ.get(\"LOG_LEVEL\", \"INFO\"))\n\nos.system(\"flask mailu advertise\")\nos.system(\"flask db upgrade\")\n\naccount = os.environ.get(\"INITIAL_ADMIN_ACCOUNT\")\ndomain = os.environ.get(\"INITIAL_ADMIN_DOMAIN\")\npassword = os.environ.get(\"INITIAL_ADMIN_PW\")\n\nif account is not None and domain is not None and password is not None:\n mode = os.environ.get(\"INITIAL_ADMIN_MODE\", default=\"ifmissing\")\n log.info(\"Creating initial admin accout %s@%s with mode %s\",account,domain,mode)\n os.system(\"flask mailu admin %s %s '%s' --mode %s\" % (account, domain, password, mode))\n\ndef test_DNS():\n import dns.resolver\n import dns.exception\n import dns.flags\n import dns.rdtypes\n import dns.rdatatype\n import dns.rdataclass\n import time\n # DNS stub configured to do DNSSEC enabled queries\n resolver = dns.resolver.Resolver()\n resolver.use_edns(0, 0, 1232)\n resolver.flags = dns.flags.AD | dns.flags.RD\n nameservers = resolver.nameservers\n for ns in nameservers:\n resolver.nameservers=[ns]\n while True:\n try:\n result = resolver.query('example.org', dns.rdatatype.A, dns.rdataclass.IN, lifetime=10)\n except Exception as e:\n log.critical(\"Your DNS resolver at %s is not working (%s). Please use another resolver or enable unbound via https://setup.mailu.io.\", ns, e);\n else:\n if result.response.flags & dns.flags.AD:\n break\n log.critical(\"Your DNS resolver at %s isn't doing DNSSEC validation; Please use another resolver or enable unbound via https://setup.mailu.io.\", ns)\n time.sleep(5)\n\ntest_DNS()\n\nstart_command=\"\".join([\n \"gunicorn --threads \", str(os.cpu_count()),\n \" -b :80 \",\n \"--access-logfile - \" if (log.root.level<=log.INFO) else \"\",\n \"--error-logfile - \",\n \"--preload \",\n \"'mailu:create_app()'\"])\n\nos.system(start_command)\n", "path": "core/admin/start.py"}]}
1,205
152
gh_patches_debug_1727
rasdani/github-patches
git_diff
mitmproxy__mitmproxy-1904
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> ValueError: data must be bytes, but is str Hi , When i use 'e' to edit form , sometimes i get this . ``` ➜ ~ mitmproxy -b 192.168.1.2 -p 8080 Traceback (most recent call last): File "mitmproxy/tools/console/master.py", line 292, in run File "site-packages/urwid/main_loop.py", line 278, in run File "site-packages/urwid/main_loop.py", line 376, in _run File "site-packages/urwid/main_loop.py", line 682, in run File "site-packages/urwid/main_loop.py", line 719, in _loop File "site-packages/urwid/raw_display.py", line 393, in <lambda> File "site-packages/urwid/raw_display.py", line 493, in parse_input File "site-packages/urwid/main_loop.py", line 403, in _update File "site-packages/urwid/main_loop.py", line 503, in process_input File "mitmproxy/tools/console/window.py", line 84, in keypress File "site-packages/urwid/container.py", line 1116, in keypress File "mitmproxy/tools/console/statusbar.py", line 155, in keypress File "mitmproxy/tools/console/statusbar.py", line 108, in keypress File "mitmproxy/tools/console/statusbar.py", line 133, in prompt_execute File "mitmproxy/tools/console/statusbar.py", line 31, in __call__ File "mitmproxy/tools/console/flowview.py", line 415, in edit File "mitmproxy/tools/console/flowview.py", line 351, in edit_form File "mitmproxy/tools/console/master.py", line 352, in view_grideditor File "site-packages/blinker/base.py", line 267, in send File "site-packages/blinker/base.py", line 267, in <listcomp> File "mitmproxy/tools/console/master.py", line 144, in sig_push_view_state File "site-packages/urwid/main_loop.py", line 578, in draw_screen File "site-packages/urwid/widget.py", line 141, in cached_render File "site-packages/urwid/container.py", line 1083, in render File "site-packages/urwid/widget.py", line 141, in cached_render File "site-packages/urwid/decoration.py", line 225, in render File "site-packages/urwid/widget.py", line 141, in cached_render File "site-packages/urwid/widget.py", line 1750, in render File "site-packages/urwid/widget.py", line 141, in cached_render File "site-packages/urwid/container.py", line 1083, in render File "site-packages/urwid/widget.py", line 141, in cached_render File "site-packages/urwid/listbox.py", line 455, in render File "site-packages/urwid/listbox.py", line 337, in calculate_visible File "site-packages/urwid/listbox.py", line 702, in _set_focus_complete File "site-packages/urwid/listbox.py", line 672, in _set_focus_first_selectable File "site-packages/urwid/listbox.py", line 340, in calculate_visible File "mitmproxy/tools/console/grideditor/base.py", line 223, in get_focus File "mitmproxy/tools/console/grideditor/base.py", line 77, in __init__ File "mitmproxy/tools/console/grideditor/col_bytes.py", line 33, in Display File "mitmproxy/tools/console/grideditor/col_bytes.py", line 73, in __init__ File "mitmproxy/utils/strutils.py", line 72, in bytes_to_escaped_str ValueError: data must be bytes, but is str mitmproxy has crashed! Please lodge a bug report at: https://github.com/mitmproxy/mitmproxy Shutting down... ``` systeminfo: Mitmproxy version: 1.0.0 Python version: 3.5.2 Platform: Darwin-15.6.0-x86_64-i386-64bit SSL version: OpenSSL 1.0.2j 26 Sep 2016 Mac version: 10.11.6 ('', '', '') x86_64 </issue> <code> [start of mitmproxy/tools/console/grideditor/editors.py] 1 import re 2 import urwid 3 from mitmproxy import exceptions 4 from mitmproxy import flowfilter 5 from mitmproxy.addons import script 6 from mitmproxy.tools.console import common 7 from mitmproxy.tools.console.grideditor import base 8 from mitmproxy.tools.console.grideditor import col_text 9 from mitmproxy.tools.console.grideditor import col_bytes 10 from mitmproxy.tools.console.grideditor import col_subgrid 11 from mitmproxy.tools.console import signals 12 from mitmproxy.net.http import user_agents 13 14 15 class QueryEditor(base.GridEditor): 16 title = "Editing query" 17 columns = [ 18 col_text.Column("Key"), 19 col_text.Column("Value") 20 ] 21 22 23 class HeaderEditor(base.GridEditor): 24 title = "Editing headers" 25 columns = [ 26 col_bytes.Column("Key"), 27 col_bytes.Column("Value") 28 ] 29 30 def make_help(self): 31 h = super().make_help() 32 text = [ 33 urwid.Text([("text", "Special keys:\n")]) 34 ] 35 keys = [ 36 ("U", "add User-Agent header"), 37 ] 38 text.extend( 39 common.format_keyvals(keys, key="key", val="text", indent=4) 40 ) 41 text.append(urwid.Text([("text", "\n")])) 42 text.extend(h) 43 return text 44 45 def set_user_agent(self, k): 46 ua = user_agents.get_by_shortcut(k) 47 if ua: 48 self.walker.add_value( 49 [ 50 b"User-Agent", 51 ua[2].encode() 52 ] 53 ) 54 55 def handle_key(self, key): 56 if key == "U": 57 signals.status_prompt_onekey.send( 58 prompt="Add User-Agent header:", 59 keys=[(i[0], i[1]) for i in user_agents.UASTRINGS], 60 callback=self.set_user_agent, 61 ) 62 return True 63 64 65 class URLEncodedFormEditor(base.GridEditor): 66 title = "Editing URL-encoded form" 67 columns = [ 68 col_bytes.Column("Key"), 69 col_bytes.Column("Value") 70 ] 71 72 73 class ReplaceEditor(base.GridEditor): 74 title = "Editing replacement patterns" 75 columns = [ 76 col_text.Column("Filter"), 77 col_text.Column("Regex"), 78 col_text.Column("Replacement"), 79 ] 80 81 def is_error(self, col, val): 82 if col == 0: 83 if not flowfilter.parse(val): 84 return "Invalid filter specification." 85 elif col == 1: 86 try: 87 re.compile(val) 88 except re.error: 89 return "Invalid regular expression." 90 return False 91 92 93 class SetHeadersEditor(base.GridEditor): 94 title = "Editing header set patterns" 95 columns = [ 96 col_text.Column("Filter"), 97 col_text.Column("Header"), 98 col_text.Column("Value"), 99 ] 100 101 def is_error(self, col, val): 102 if col == 0: 103 if not flowfilter.parse(val): 104 return "Invalid filter specification" 105 return False 106 107 def make_help(self): 108 h = super().make_help() 109 text = [ 110 urwid.Text([("text", "Special keys:\n")]) 111 ] 112 keys = [ 113 ("U", "add User-Agent header"), 114 ] 115 text.extend( 116 common.format_keyvals(keys, key="key", val="text", indent=4) 117 ) 118 text.append(urwid.Text([("text", "\n")])) 119 text.extend(h) 120 return text 121 122 def set_user_agent(self, k): 123 ua = user_agents.get_by_shortcut(k) 124 if ua: 125 self.walker.add_value( 126 [ 127 ".*", 128 b"User-Agent", 129 ua[2].encode() 130 ] 131 ) 132 133 def handle_key(self, key): 134 if key == "U": 135 signals.status_prompt_onekey.send( 136 prompt="Add User-Agent header:", 137 keys=[(i[0], i[1]) for i in user_agents.UASTRINGS], 138 callback=self.set_user_agent, 139 ) 140 return True 141 142 143 class PathEditor(base.GridEditor): 144 # TODO: Next row on enter? 145 146 title = "Editing URL path components" 147 columns = [ 148 col_text.Column("Component"), 149 ] 150 151 def data_in(self, data): 152 return [[i] for i in data] 153 154 def data_out(self, data): 155 return [i[0] for i in data] 156 157 158 class ScriptEditor(base.GridEditor): 159 title = "Editing scripts" 160 columns = [ 161 col_text.Column("Command"), 162 ] 163 164 def is_error(self, col, val): 165 try: 166 script.parse_command(val) 167 except exceptions.OptionsError as e: 168 return str(e) 169 170 171 class HostPatternEditor(base.GridEditor): 172 title = "Editing host patterns" 173 columns = [ 174 col_text.Column("Regex (matched on hostname:port / ip:port)") 175 ] 176 177 def is_error(self, col, val): 178 try: 179 re.compile(val, re.IGNORECASE) 180 except re.error as e: 181 return "Invalid regex: %s" % str(e) 182 183 def data_in(self, data): 184 return [[i] for i in data] 185 186 def data_out(self, data): 187 return [i[0] for i in data] 188 189 190 class CookieEditor(base.GridEditor): 191 title = "Editing request Cookie header" 192 columns = [ 193 col_text.Column("Name"), 194 col_text.Column("Value"), 195 ] 196 197 198 class CookieAttributeEditor(base.GridEditor): 199 title = "Editing Set-Cookie attributes" 200 columns = [ 201 col_text.Column("Name"), 202 col_text.Column("Value"), 203 ] 204 205 def data_in(self, data): 206 return [(k, v or "") for k, v in data] 207 208 def data_out(self, data): 209 ret = [] 210 for i in data: 211 if not i[1]: 212 ret.append([i[0], None]) 213 else: 214 ret.append(i) 215 return ret 216 217 218 class SetCookieEditor(base.GridEditor): 219 title = "Editing response SetCookie header" 220 columns = [ 221 col_text.Column("Name"), 222 col_text.Column("Value"), 223 col_subgrid.Column("Attributes", CookieAttributeEditor), 224 ] 225 226 def data_in(self, data): 227 flattened = [] 228 for key, (value, attrs) in data: 229 flattened.append([key, value, attrs.items(multi=True)]) 230 return flattened 231 232 def data_out(self, data): 233 vals = [] 234 for key, value, attrs in data: 235 vals.append( 236 [ 237 key, 238 (value, attrs) 239 ] 240 ) 241 return vals 242 [end of mitmproxy/tools/console/grideditor/editors.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mitmproxy/tools/console/grideditor/editors.py b/mitmproxy/tools/console/grideditor/editors.py --- a/mitmproxy/tools/console/grideditor/editors.py +++ b/mitmproxy/tools/console/grideditor/editors.py @@ -65,8 +65,8 @@ class URLEncodedFormEditor(base.GridEditor): title = "Editing URL-encoded form" columns = [ - col_bytes.Column("Key"), - col_bytes.Column("Value") + col_text.Column("Key"), + col_text.Column("Value") ]
{"golden_diff": "diff --git a/mitmproxy/tools/console/grideditor/editors.py b/mitmproxy/tools/console/grideditor/editors.py\n--- a/mitmproxy/tools/console/grideditor/editors.py\n+++ b/mitmproxy/tools/console/grideditor/editors.py\n@@ -65,8 +65,8 @@\n class URLEncodedFormEditor(base.GridEditor):\n title = \"Editing URL-encoded form\"\n columns = [\n- col_bytes.Column(\"Key\"),\n- col_bytes.Column(\"Value\")\n+ col_text.Column(\"Key\"),\n+ col_text.Column(\"Value\")\n ]\n", "issue": "ValueError: data must be bytes, but is str\nHi , \r\n When i use 'e' to edit form , sometimes i get this .\r\n\r\n```\r\n\u279c ~ mitmproxy -b 192.168.1.2 -p 8080\r\nTraceback (most recent call last):\r\n File \"mitmproxy/tools/console/master.py\", line 292, in run\r\n File \"site-packages/urwid/main_loop.py\", line 278, in run\r\n File \"site-packages/urwid/main_loop.py\", line 376, in _run\r\n File \"site-packages/urwid/main_loop.py\", line 682, in run\r\n File \"site-packages/urwid/main_loop.py\", line 719, in _loop\r\n File \"site-packages/urwid/raw_display.py\", line 393, in <lambda>\r\n File \"site-packages/urwid/raw_display.py\", line 493, in parse_input\r\n File \"site-packages/urwid/main_loop.py\", line 403, in _update\r\n File \"site-packages/urwid/main_loop.py\", line 503, in process_input\r\n File \"mitmproxy/tools/console/window.py\", line 84, in keypress\r\n File \"site-packages/urwid/container.py\", line 1116, in keypress\r\n File \"mitmproxy/tools/console/statusbar.py\", line 155, in keypress\r\n File \"mitmproxy/tools/console/statusbar.py\", line 108, in keypress\r\n File \"mitmproxy/tools/console/statusbar.py\", line 133, in prompt_execute\r\n File \"mitmproxy/tools/console/statusbar.py\", line 31, in __call__\r\n File \"mitmproxy/tools/console/flowview.py\", line 415, in edit\r\n File \"mitmproxy/tools/console/flowview.py\", line 351, in edit_form\r\n File \"mitmproxy/tools/console/master.py\", line 352, in view_grideditor\r\n File \"site-packages/blinker/base.py\", line 267, in send\r\n File \"site-packages/blinker/base.py\", line 267, in <listcomp>\r\n File \"mitmproxy/tools/console/master.py\", line 144, in sig_push_view_state\r\n File \"site-packages/urwid/main_loop.py\", line 578, in draw_screen\r\n File \"site-packages/urwid/widget.py\", line 141, in cached_render\r\n File \"site-packages/urwid/container.py\", line 1083, in render\r\n File \"site-packages/urwid/widget.py\", line 141, in cached_render\r\n File \"site-packages/urwid/decoration.py\", line 225, in render\r\n File \"site-packages/urwid/widget.py\", line 141, in cached_render\r\n File \"site-packages/urwid/widget.py\", line 1750, in render\r\n File \"site-packages/urwid/widget.py\", line 141, in cached_render\r\n File \"site-packages/urwid/container.py\", line 1083, in render\r\n File \"site-packages/urwid/widget.py\", line 141, in cached_render\r\n File \"site-packages/urwid/listbox.py\", line 455, in render\r\n File \"site-packages/urwid/listbox.py\", line 337, in calculate_visible\r\n File \"site-packages/urwid/listbox.py\", line 702, in _set_focus_complete\r\n File \"site-packages/urwid/listbox.py\", line 672, in _set_focus_first_selectable\r\n File \"site-packages/urwid/listbox.py\", line 340, in calculate_visible\r\n File \"mitmproxy/tools/console/grideditor/base.py\", line 223, in get_focus\r\n File \"mitmproxy/tools/console/grideditor/base.py\", line 77, in __init__\r\n File \"mitmproxy/tools/console/grideditor/col_bytes.py\", line 33, in Display\r\n File \"mitmproxy/tools/console/grideditor/col_bytes.py\", line 73, in __init__\r\n File \"mitmproxy/utils/strutils.py\", line 72, in bytes_to_escaped_str\r\nValueError: data must be bytes, but is str\r\n\r\nmitmproxy has crashed!\r\nPlease lodge a bug report at:\r\n\thttps://github.com/mitmproxy/mitmproxy\r\nShutting down...\r\n\r\n```\r\nsysteminfo:\r\nMitmproxy version: 1.0.0\r\nPython version: 3.5.2\r\nPlatform: Darwin-15.6.0-x86_64-i386-64bit\r\nSSL version: OpenSSL 1.0.2j 26 Sep 2016\r\nMac version: 10.11.6 ('', '', '') x86_64\r\n\n", "before_files": [{"content": "import re\nimport urwid\nfrom mitmproxy import exceptions\nfrom mitmproxy import flowfilter\nfrom mitmproxy.addons import script\nfrom mitmproxy.tools.console import common\nfrom mitmproxy.tools.console.grideditor import base\nfrom mitmproxy.tools.console.grideditor import col_text\nfrom mitmproxy.tools.console.grideditor import col_bytes\nfrom mitmproxy.tools.console.grideditor import col_subgrid\nfrom mitmproxy.tools.console import signals\nfrom mitmproxy.net.http import user_agents\n\n\nclass QueryEditor(base.GridEditor):\n title = \"Editing query\"\n columns = [\n col_text.Column(\"Key\"),\n col_text.Column(\"Value\")\n ]\n\n\nclass HeaderEditor(base.GridEditor):\n title = \"Editing headers\"\n columns = [\n col_bytes.Column(\"Key\"),\n col_bytes.Column(\"Value\")\n ]\n\n def make_help(self):\n h = super().make_help()\n text = [\n urwid.Text([(\"text\", \"Special keys:\\n\")])\n ]\n keys = [\n (\"U\", \"add User-Agent header\"),\n ]\n text.extend(\n common.format_keyvals(keys, key=\"key\", val=\"text\", indent=4)\n )\n text.append(urwid.Text([(\"text\", \"\\n\")]))\n text.extend(h)\n return text\n\n def set_user_agent(self, k):\n ua = user_agents.get_by_shortcut(k)\n if ua:\n self.walker.add_value(\n [\n b\"User-Agent\",\n ua[2].encode()\n ]\n )\n\n def handle_key(self, key):\n if key == \"U\":\n signals.status_prompt_onekey.send(\n prompt=\"Add User-Agent header:\",\n keys=[(i[0], i[1]) for i in user_agents.UASTRINGS],\n callback=self.set_user_agent,\n )\n return True\n\n\nclass URLEncodedFormEditor(base.GridEditor):\n title = \"Editing URL-encoded form\"\n columns = [\n col_bytes.Column(\"Key\"),\n col_bytes.Column(\"Value\")\n ]\n\n\nclass ReplaceEditor(base.GridEditor):\n title = \"Editing replacement patterns\"\n columns = [\n col_text.Column(\"Filter\"),\n col_text.Column(\"Regex\"),\n col_text.Column(\"Replacement\"),\n ]\n\n def is_error(self, col, val):\n if col == 0:\n if not flowfilter.parse(val):\n return \"Invalid filter specification.\"\n elif col == 1:\n try:\n re.compile(val)\n except re.error:\n return \"Invalid regular expression.\"\n return False\n\n\nclass SetHeadersEditor(base.GridEditor):\n title = \"Editing header set patterns\"\n columns = [\n col_text.Column(\"Filter\"),\n col_text.Column(\"Header\"),\n col_text.Column(\"Value\"),\n ]\n\n def is_error(self, col, val):\n if col == 0:\n if not flowfilter.parse(val):\n return \"Invalid filter specification\"\n return False\n\n def make_help(self):\n h = super().make_help()\n text = [\n urwid.Text([(\"text\", \"Special keys:\\n\")])\n ]\n keys = [\n (\"U\", \"add User-Agent header\"),\n ]\n text.extend(\n common.format_keyvals(keys, key=\"key\", val=\"text\", indent=4)\n )\n text.append(urwid.Text([(\"text\", \"\\n\")]))\n text.extend(h)\n return text\n\n def set_user_agent(self, k):\n ua = user_agents.get_by_shortcut(k)\n if ua:\n self.walker.add_value(\n [\n \".*\",\n b\"User-Agent\",\n ua[2].encode()\n ]\n )\n\n def handle_key(self, key):\n if key == \"U\":\n signals.status_prompt_onekey.send(\n prompt=\"Add User-Agent header:\",\n keys=[(i[0], i[1]) for i in user_agents.UASTRINGS],\n callback=self.set_user_agent,\n )\n return True\n\n\nclass PathEditor(base.GridEditor):\n # TODO: Next row on enter?\n\n title = \"Editing URL path components\"\n columns = [\n col_text.Column(\"Component\"),\n ]\n\n def data_in(self, data):\n return [[i] for i in data]\n\n def data_out(self, data):\n return [i[0] for i in data]\n\n\nclass ScriptEditor(base.GridEditor):\n title = \"Editing scripts\"\n columns = [\n col_text.Column(\"Command\"),\n ]\n\n def is_error(self, col, val):\n try:\n script.parse_command(val)\n except exceptions.OptionsError as e:\n return str(e)\n\n\nclass HostPatternEditor(base.GridEditor):\n title = \"Editing host patterns\"\n columns = [\n col_text.Column(\"Regex (matched on hostname:port / ip:port)\")\n ]\n\n def is_error(self, col, val):\n try:\n re.compile(val, re.IGNORECASE)\n except re.error as e:\n return \"Invalid regex: %s\" % str(e)\n\n def data_in(self, data):\n return [[i] for i in data]\n\n def data_out(self, data):\n return [i[0] for i in data]\n\n\nclass CookieEditor(base.GridEditor):\n title = \"Editing request Cookie header\"\n columns = [\n col_text.Column(\"Name\"),\n col_text.Column(\"Value\"),\n ]\n\n\nclass CookieAttributeEditor(base.GridEditor):\n title = \"Editing Set-Cookie attributes\"\n columns = [\n col_text.Column(\"Name\"),\n col_text.Column(\"Value\"),\n ]\n\n def data_in(self, data):\n return [(k, v or \"\") for k, v in data]\n\n def data_out(self, data):\n ret = []\n for i in data:\n if not i[1]:\n ret.append([i[0], None])\n else:\n ret.append(i)\n return ret\n\n\nclass SetCookieEditor(base.GridEditor):\n title = \"Editing response SetCookie header\"\n columns = [\n col_text.Column(\"Name\"),\n col_text.Column(\"Value\"),\n col_subgrid.Column(\"Attributes\", CookieAttributeEditor),\n ]\n\n def data_in(self, data):\n flattened = []\n for key, (value, attrs) in data:\n flattened.append([key, value, attrs.items(multi=True)])\n return flattened\n\n def data_out(self, data):\n vals = []\n for key, value, attrs in data:\n vals.append(\n [\n key,\n (value, attrs)\n ]\n )\n return vals\n", "path": "mitmproxy/tools/console/grideditor/editors.py"}]}
3,678
122
gh_patches_debug_40592
rasdani/github-patches
git_diff
hpcaitech__ColossalAI-5731
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [tensor] fix some unittests [tensor] fix some unittests [tensor] fix some unittests </issue> <code> [start of colossalai/zero/gemini/placement_policy.py] 1 import functools 2 import warnings 3 from abc import ABC, abstractmethod 4 from time import time 5 from typing import Dict, List, Optional, Tuple, Type 6 7 import torch 8 9 from colossalai.accelerator import get_accelerator 10 from colossalai.legacy.utils.memory import colo_device_memory_capacity 11 from colossalai.zero.gemini.chunk import Chunk 12 13 from .chunk import Chunk, ChunkManager 14 from .memory_tracer import ChunkMemStatsCollector 15 16 17 class PlacementPolicy(ABC): 18 need_mem_stats: bool = False 19 20 def __init__( 21 self, 22 gemini_manager: "GeminiManager", 23 chunk_manager: ChunkManager, 24 mem_stats_collector: Optional[ChunkMemStatsCollector] = None, 25 max_prefetch: int = 0, 26 **kwargs, 27 ) -> None: 28 self.gemini_manager = gemini_manager 29 self.chunk_manager = chunk_manager 30 self.mem_stats_collector: Optional[ChunkMemStatsCollector] = mem_stats_collector 31 self.max_prefetch = max_prefetch 32 33 @abstractmethod 34 def evict_tensors(self, can_evict_chunks: List[Chunk], **kwargs) -> Tuple[int, float]: 35 raise NotImplementedError 36 37 @abstractmethod 38 def setup_grads_device( 39 self, params: List[torch.Tensor], grads_device_map: Dict[torch.Tensor, torch.device] 40 ) -> None: 41 raise NotImplementedError 42 43 @abstractmethod 44 def get_prefetch_chunks(self) -> List[Chunk]: 45 raise NotImplementedError 46 47 48 class StaticPlacementPolicy(PlacementPolicy): 49 def __init__( 50 self, 51 gemini_manager: "GeminiManager", 52 chunk_manager: ChunkManager, 53 mem_stats_collector: Optional[ChunkMemStatsCollector] = None, 54 max_prefetch: int = 0, 55 shard_param_frac: float = 1.0, 56 offload_optim_frac: float = 0.0, 57 offload_param_frac: float = 0.0, 58 **kwargs, 59 ) -> None: 60 super().__init__( 61 gemini_manager, chunk_manager, mem_stats_collector=mem_stats_collector, max_prefetch=max_prefetch 62 ) 63 if offload_param_frac > 0.0 and (shard_param_frac != 1.0 or offload_optim_frac != 1.0): 64 warnings.warn("offload_param_frac is ignored when shard_param_frac != 1.0 or offload_optim_frac != 1.0") 65 offload_param_frac = 0.0 66 self.shard_param_frac = shard_param_frac 67 self.offload_optim_frac = offload_optim_frac 68 self.offload_param_frac = offload_param_frac 69 # these should be initialized in setup_grads_device 70 self.keep_gathered_chunk_mem = 0.0 71 self.keep_cuda_chunk_mem = 0.0 72 73 def evict_tensors(self, can_evict_chunks: List[Chunk], **kwargs) -> Tuple[int, float]: 74 can_shard_chunk_mem = sum(chunk.chunk_mem for chunk in can_evict_chunks) 75 can_offload_chunk_mem = can_shard_chunk_mem 76 for chunk in can_evict_chunks: 77 if can_shard_chunk_mem <= self.keep_gathered_chunk_mem: 78 break 79 self.chunk_manager.release_chunk(chunk) 80 # real saved mem is chunk_mem - shard_mem, for simplicity we use chunk_mem 81 can_shard_chunk_mem -= chunk.chunk_mem 82 for chunk in can_evict_chunks: 83 if can_offload_chunk_mem <= self.keep_cuda_chunk_mem: 84 break 85 self.chunk_manager.move_chunk(chunk, torch.device("cpu")) 86 # real saved mem is shard_mem, for simplicity we use chunk_mem 87 can_offload_chunk_mem -= chunk.chunk_mem 88 return 0, 0.0 89 90 def setup_grads_device( 91 self, params: List[torch.Tensor], grads_device_map: Dict[torch.Tensor, torch.device] 92 ) -> None: 93 total_chunk_mem = sum(self.chunk_manager.get_chunk(p).chunk_mem for p in params) 94 95 offload_optim_chunk_mem = total_chunk_mem * self.offload_optim_frac 96 offloaded_optim_chunk_mem = 0 97 chunks = set(self.chunk_manager.get_chunk(p) for p in params) 98 for chunk in chunks: 99 params = chunk.get_tensors() 100 # init offload optim settings 101 # keep gathered chunks are in CUDA 102 if chunk.keep_gathered or offloaded_optim_chunk_mem >= offload_optim_chunk_mem: 103 device = get_accelerator().get_current_device() 104 else: 105 device = torch.device("cpu") 106 # real offloaded mem is chunk.shard_mem, for simplicity we use chunk mem here 107 offloaded_optim_chunk_mem += chunk.chunk_mem 108 for p in params: 109 grads_device_map[p] = device 110 self.keep_gathered_chunk_mem = total_chunk_mem * (1 - self.shard_param_frac) 111 self.keep_cuda_chunk_mem = total_chunk_mem * (1 - self.offload_param_frac) 112 113 def get_prefetch_chunks(self) -> List[Chunk]: 114 if self.gemini_manager.is_warmup(): # no prefetch during warmup since we need compute_list 115 return [] 116 can_prefetch = self.max_prefetch - len(self.gemini_manager._async_works) 117 prefetch = [] 118 for i in range(self.gemini_manager.compute_idx + 1, len(self.gemini_manager.compute_list)): 119 for chunk in self.gemini_manager.compute_list[i]: 120 if len(prefetch) >= can_prefetch: 121 break 122 if chunk not in prefetch and chunk not in self.chunk_manager.accessed_chunks: 123 prefetch.append(chunk) 124 if len(prefetch) >= can_prefetch: 125 break 126 return prefetch 127 128 129 class AutoPlacementPolicy(PlacementPolicy): 130 need_mem_stats: bool = True 131 132 def __init__( 133 self, 134 gemini_manager: "GeminiManager", 135 chunk_manager: ChunkManager, 136 mem_stats_collector: Optional[ChunkMemStatsCollector] = None, 137 max_prefetch: int = 0, 138 warmup_non_model_data_ratio: float = 0.8, 139 steady_cuda_cap_ratio: float = 0.9, 140 **kwargs, 141 ) -> None: 142 super().__init__( 143 gemini_manager, chunk_manager, mem_stats_collector=mem_stats_collector, max_prefetch=max_prefetch 144 ) 145 # model data will use 1-_warmup_non_model_data_ratio CUDA memory in warmup phase 146 # you can set them by AutoPlacementPolicy.set_warmup_non_model_data_ratio() 147 # and AutoPlacementPolicy.set_steady_cuda_cap_ratio() 148 self._warmup_non_model_data_ratio = warmup_non_model_data_ratio 149 self._steady_cuda_cap_ratio = steady_cuda_cap_ratio 150 151 def evict_tensors( 152 self, 153 can_evict_chunks: List[Chunk], 154 cuda_demand: int = 0, 155 warmup: bool = True, 156 compute_list: Optional[List[Tuple[Chunk, ...]]] = None, 157 compute_idx: int = 0, 158 **kwargs, 159 ) -> Tuple[int, float]: 160 """ 161 Evict tensors from CUDA device. 162 163 Args: 164 can_evict_chunks (List[StatefulTensor]): the list of tensors that can be evicted. 165 cuda_demand (int, optional): the volume of data needed on cuda device. Defaults to 0. 166 warmup (bool, optional): a flag indicates whether in the phase of warmup. Defaults to True. 167 compute_list (List[StatefulTensor], optional): TODO. Defaults to []. 168 compute_idx (int, optional): the idx of computing device. Defaults to 0. 169 170 Raises: 171 RuntimeError: 172 173 Returns: 174 int: the volume of memory that is evicted 175 """ 176 start = time() 177 cuda_capacity = colo_device_memory_capacity(get_accelerator().get_current_device()) 178 used_cuda_model_data = self.chunk_manager.total_mem["cuda"] 179 if warmup: 180 # We designate a part of CUDA memory for model data in warmup iterations. 181 max_cuda_non_model_data_per_period = cuda_capacity * self._warmup_non_model_data_ratio 182 else: 183 # max non-model-data cuda memory consumption of this sampling moment and the next sampling moment. 184 max_cuda_non_model_data_per_period = self.mem_stats_collector.next_period_non_model_data_usage("cuda") 185 cuda_capacity *= self._steady_cuda_cap_ratio 186 total_cuda_model_data = cuda_capacity - max_cuda_non_model_data_per_period 187 avail_cuda_model_data = total_cuda_model_data - used_cuda_model_data 188 freed_cuda_model_data = 0 189 190 if avail_cuda_model_data < cuda_demand: 191 # Move cuda_demand - avail_cuda_model_data volume of tensors 192 # to_free_cuda_model_data = cuda_demand - avail_cuda_model_data 193 to_free_cuda_model_data = cuda_demand - avail_cuda_model_data 194 to_free_chunks = can_evict_chunks 195 if not warmup: 196 to_free_chunks = self._sort_can_evict_chunks(tuple(to_free_chunks), compute_idx, tuple(compute_list)) 197 # print(self._sort_can_evict_chunks.cache_info()) 198 for chunk in to_free_chunks: 199 if freed_cuda_model_data >= to_free_cuda_model_data: 200 break 201 202 self.chunk_manager.release_chunk(chunk) 203 self.chunk_manager.move_chunk(chunk, torch.device("cpu")) 204 freed_cuda_model_data += chunk.chunk_mem 205 if freed_cuda_model_data < to_free_cuda_model_data: 206 raise RuntimeError( 207 f"Adjust layout failed! No enough CUDA memory! " 208 f"Need {to_free_cuda_model_data}, freed {freed_cuda_model_data}" 209 ) 210 return freed_cuda_model_data, time() - start 211 212 @staticmethod 213 @functools.lru_cache(maxsize=None) 214 def _sort_can_evict_chunks(can_evict_chunks: tuple, compute_idx: int, compute_list: tuple) -> list: 215 next_compute_idx = {chunk: len(compute_list) for chunk in can_evict_chunks} 216 for i in range(len(compute_list) - 1, compute_idx, -1): 217 for chunk in compute_list[i]: 218 if chunk in next_compute_idx: 219 next_compute_idx[chunk] = i 220 next_compute_idx = sorted(next_compute_idx.items(), key=lambda pair: pair[1], reverse=True) 221 return [t for (t, idx) in next_compute_idx] 222 223 def setup_grads_device( 224 self, params: List[torch.Tensor], grads_device_map: Dict[torch.Tensor, torch.device] 225 ) -> None: 226 for p in params: 227 chunk = self.chunk_manager.get_chunk(p) 228 # init offload optim settings 229 # keep gathered chunks are in CUDA 230 if chunk.keep_gathered: 231 grads_device_map[p] = get_accelerator().get_current_device() 232 else: 233 grads_device_map[p] = torch.device("cpu") 234 235 def get_prefetch_chunks(self, max_prefetch: int) -> List[Chunk]: 236 return [] # TODO @botbw: implement prefetching for auto 237 238 239 class PlacementPolicyFactory: 240 policies: Dict[str, Type[PlacementPolicy]] = { 241 "auto": AutoPlacementPolicy, 242 "static": StaticPlacementPolicy, 243 } 244 245 @staticmethod 246 def create(policy_name: str) -> Type[PlacementPolicy]: 247 if policy_name not in PlacementPolicyFactory.policies: 248 raise TypeError(f"Unknown tensor placement policy {policy_name}") 249 return PlacementPolicyFactory.policies[policy_name] 250 251 @staticmethod 252 def get_policy_names(): 253 return tuple(PlacementPolicyFactory.policies.keys()) 254 [end of colossalai/zero/gemini/placement_policy.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/colossalai/zero/gemini/placement_policy.py b/colossalai/zero/gemini/placement_policy.py --- a/colossalai/zero/gemini/placement_policy.py +++ b/colossalai/zero/gemini/placement_policy.py @@ -19,7 +19,7 @@ def __init__( self, - gemini_manager: "GeminiManager", + gemini_manager: "GeminiManager", # TODO @botbw: solve circular import chunk_manager: ChunkManager, mem_stats_collector: Optional[ChunkMemStatsCollector] = None, max_prefetch: int = 0, @@ -40,9 +40,8 @@ ) -> None: raise NotImplementedError - @abstractmethod def get_prefetch_chunks(self) -> List[Chunk]: - raise NotImplementedError + return [] # no prefetch by default class StaticPlacementPolicy(PlacementPolicy): @@ -116,12 +115,14 @@ can_prefetch = self.max_prefetch - len(self.gemini_manager._async_works) prefetch = [] for i in range(self.gemini_manager.compute_idx + 1, len(self.gemini_manager.compute_list)): + break_flag = False for chunk in self.gemini_manager.compute_list[i]: if len(prefetch) >= can_prefetch: + break_flag = True break if chunk not in prefetch and chunk not in self.chunk_manager.accessed_chunks: prefetch.append(chunk) - if len(prefetch) >= can_prefetch: + if break_flag: break return prefetch @@ -232,8 +233,33 @@ else: grads_device_map[p] = torch.device("cpu") - def get_prefetch_chunks(self, max_prefetch: int) -> List[Chunk]: - return [] # TODO @botbw: implement prefetching for auto + def get_prefetch_chunks(self) -> List[Chunk]: + if self.gemini_manager.is_warmup(): # no prefetch during warmup since we need compute_list + return [] + # modified from self.evict_tensors + cuda_capacity = self._steady_cuda_cap_ratio * colo_device_memory_capacity( + get_accelerator().get_current_device() + ) + max_cuda_non_model_data_per_period = self.mem_stats_collector.next_period_non_model_data_usage("cuda") + used_cuda_model_data = self.chunk_manager.total_mem["cuda"] + total_cuda_model_data = cuda_capacity - max_cuda_non_model_data_per_period + avail_cuda_model_data = total_cuda_model_data - used_cuda_model_data + + prefetch_chunk_memory = 0 + can_prefetch = self.max_prefetch - len(self.gemini_manager._async_works) + prefetch = [] + for i in range(self.gemini_manager.compute_idx + 1, len(self.gemini_manager.compute_list)): + break_flag = False + for chunk in self.gemini_manager.compute_list[i]: + chunk: Chunk + if len(prefetch) >= can_prefetch or prefetch_chunk_memory + chunk.chunk_mem > avail_cuda_model_data: + break_flag = True + break + if chunk not in prefetch and chunk not in self.chunk_manager.accessed_chunks: + prefetch.append(chunk) + if break_flag: + break + return prefetch class PlacementPolicyFactory:
{"golden_diff": "diff --git a/colossalai/zero/gemini/placement_policy.py b/colossalai/zero/gemini/placement_policy.py\n--- a/colossalai/zero/gemini/placement_policy.py\n+++ b/colossalai/zero/gemini/placement_policy.py\n@@ -19,7 +19,7 @@\n \n def __init__(\n self,\n- gemini_manager: \"GeminiManager\",\n+ gemini_manager: \"GeminiManager\", # TODO @botbw: solve circular import\n chunk_manager: ChunkManager,\n mem_stats_collector: Optional[ChunkMemStatsCollector] = None,\n max_prefetch: int = 0,\n@@ -40,9 +40,8 @@\n ) -> None:\n raise NotImplementedError\n \n- @abstractmethod\n def get_prefetch_chunks(self) -> List[Chunk]:\n- raise NotImplementedError\n+ return [] # no prefetch by default\n \n \n class StaticPlacementPolicy(PlacementPolicy):\n@@ -116,12 +115,14 @@\n can_prefetch = self.max_prefetch - len(self.gemini_manager._async_works)\n prefetch = []\n for i in range(self.gemini_manager.compute_idx + 1, len(self.gemini_manager.compute_list)):\n+ break_flag = False\n for chunk in self.gemini_manager.compute_list[i]:\n if len(prefetch) >= can_prefetch:\n+ break_flag = True\n break\n if chunk not in prefetch and chunk not in self.chunk_manager.accessed_chunks:\n prefetch.append(chunk)\n- if len(prefetch) >= can_prefetch:\n+ if break_flag:\n break\n return prefetch\n \n@@ -232,8 +233,33 @@\n else:\n grads_device_map[p] = torch.device(\"cpu\")\n \n- def get_prefetch_chunks(self, max_prefetch: int) -> List[Chunk]:\n- return [] # TODO @botbw: implement prefetching for auto\n+ def get_prefetch_chunks(self) -> List[Chunk]:\n+ if self.gemini_manager.is_warmup(): # no prefetch during warmup since we need compute_list\n+ return []\n+ # modified from self.evict_tensors\n+ cuda_capacity = self._steady_cuda_cap_ratio * colo_device_memory_capacity(\n+ get_accelerator().get_current_device()\n+ )\n+ max_cuda_non_model_data_per_period = self.mem_stats_collector.next_period_non_model_data_usage(\"cuda\")\n+ used_cuda_model_data = self.chunk_manager.total_mem[\"cuda\"]\n+ total_cuda_model_data = cuda_capacity - max_cuda_non_model_data_per_period\n+ avail_cuda_model_data = total_cuda_model_data - used_cuda_model_data\n+\n+ prefetch_chunk_memory = 0\n+ can_prefetch = self.max_prefetch - len(self.gemini_manager._async_works)\n+ prefetch = []\n+ for i in range(self.gemini_manager.compute_idx + 1, len(self.gemini_manager.compute_list)):\n+ break_flag = False\n+ for chunk in self.gemini_manager.compute_list[i]:\n+ chunk: Chunk\n+ if len(prefetch) >= can_prefetch or prefetch_chunk_memory + chunk.chunk_mem > avail_cuda_model_data:\n+ break_flag = True\n+ break\n+ if chunk not in prefetch and chunk not in self.chunk_manager.accessed_chunks:\n+ prefetch.append(chunk)\n+ if break_flag:\n+ break\n+ return prefetch\n \n \n class PlacementPolicyFactory:\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "import functools\nimport warnings\nfrom abc import ABC, abstractmethod\nfrom time import time\nfrom typing import Dict, List, Optional, Tuple, Type\n\nimport torch\n\nfrom colossalai.accelerator import get_accelerator\nfrom colossalai.legacy.utils.memory import colo_device_memory_capacity\nfrom colossalai.zero.gemini.chunk import Chunk\n\nfrom .chunk import Chunk, ChunkManager\nfrom .memory_tracer import ChunkMemStatsCollector\n\n\nclass PlacementPolicy(ABC):\n need_mem_stats: bool = False\n\n def __init__(\n self,\n gemini_manager: \"GeminiManager\",\n chunk_manager: ChunkManager,\n mem_stats_collector: Optional[ChunkMemStatsCollector] = None,\n max_prefetch: int = 0,\n **kwargs,\n ) -> None:\n self.gemini_manager = gemini_manager\n self.chunk_manager = chunk_manager\n self.mem_stats_collector: Optional[ChunkMemStatsCollector] = mem_stats_collector\n self.max_prefetch = max_prefetch\n\n @abstractmethod\n def evict_tensors(self, can_evict_chunks: List[Chunk], **kwargs) -> Tuple[int, float]:\n raise NotImplementedError\n\n @abstractmethod\n def setup_grads_device(\n self, params: List[torch.Tensor], grads_device_map: Dict[torch.Tensor, torch.device]\n ) -> None:\n raise NotImplementedError\n\n @abstractmethod\n def get_prefetch_chunks(self) -> List[Chunk]:\n raise NotImplementedError\n\n\nclass StaticPlacementPolicy(PlacementPolicy):\n def __init__(\n self,\n gemini_manager: \"GeminiManager\",\n chunk_manager: ChunkManager,\n mem_stats_collector: Optional[ChunkMemStatsCollector] = None,\n max_prefetch: int = 0,\n shard_param_frac: float = 1.0,\n offload_optim_frac: float = 0.0,\n offload_param_frac: float = 0.0,\n **kwargs,\n ) -> None:\n super().__init__(\n gemini_manager, chunk_manager, mem_stats_collector=mem_stats_collector, max_prefetch=max_prefetch\n )\n if offload_param_frac > 0.0 and (shard_param_frac != 1.0 or offload_optim_frac != 1.0):\n warnings.warn(\"offload_param_frac is ignored when shard_param_frac != 1.0 or offload_optim_frac != 1.0\")\n offload_param_frac = 0.0\n self.shard_param_frac = shard_param_frac\n self.offload_optim_frac = offload_optim_frac\n self.offload_param_frac = offload_param_frac\n # these should be initialized in setup_grads_device\n self.keep_gathered_chunk_mem = 0.0\n self.keep_cuda_chunk_mem = 0.0\n\n def evict_tensors(self, can_evict_chunks: List[Chunk], **kwargs) -> Tuple[int, float]:\n can_shard_chunk_mem = sum(chunk.chunk_mem for chunk in can_evict_chunks)\n can_offload_chunk_mem = can_shard_chunk_mem\n for chunk in can_evict_chunks:\n if can_shard_chunk_mem <= self.keep_gathered_chunk_mem:\n break\n self.chunk_manager.release_chunk(chunk)\n # real saved mem is chunk_mem - shard_mem, for simplicity we use chunk_mem\n can_shard_chunk_mem -= chunk.chunk_mem\n for chunk in can_evict_chunks:\n if can_offload_chunk_mem <= self.keep_cuda_chunk_mem:\n break\n self.chunk_manager.move_chunk(chunk, torch.device(\"cpu\"))\n # real saved mem is shard_mem, for simplicity we use chunk_mem\n can_offload_chunk_mem -= chunk.chunk_mem\n return 0, 0.0\n\n def setup_grads_device(\n self, params: List[torch.Tensor], grads_device_map: Dict[torch.Tensor, torch.device]\n ) -> None:\n total_chunk_mem = sum(self.chunk_manager.get_chunk(p).chunk_mem for p in params)\n\n offload_optim_chunk_mem = total_chunk_mem * self.offload_optim_frac\n offloaded_optim_chunk_mem = 0\n chunks = set(self.chunk_manager.get_chunk(p) for p in params)\n for chunk in chunks:\n params = chunk.get_tensors()\n # init offload optim settings\n # keep gathered chunks are in CUDA\n if chunk.keep_gathered or offloaded_optim_chunk_mem >= offload_optim_chunk_mem:\n device = get_accelerator().get_current_device()\n else:\n device = torch.device(\"cpu\")\n # real offloaded mem is chunk.shard_mem, for simplicity we use chunk mem here\n offloaded_optim_chunk_mem += chunk.chunk_mem\n for p in params:\n grads_device_map[p] = device\n self.keep_gathered_chunk_mem = total_chunk_mem * (1 - self.shard_param_frac)\n self.keep_cuda_chunk_mem = total_chunk_mem * (1 - self.offload_param_frac)\n\n def get_prefetch_chunks(self) -> List[Chunk]:\n if self.gemini_manager.is_warmup(): # no prefetch during warmup since we need compute_list\n return []\n can_prefetch = self.max_prefetch - len(self.gemini_manager._async_works)\n prefetch = []\n for i in range(self.gemini_manager.compute_idx + 1, len(self.gemini_manager.compute_list)):\n for chunk in self.gemini_manager.compute_list[i]:\n if len(prefetch) >= can_prefetch:\n break\n if chunk not in prefetch and chunk not in self.chunk_manager.accessed_chunks:\n prefetch.append(chunk)\n if len(prefetch) >= can_prefetch:\n break\n return prefetch\n\n\nclass AutoPlacementPolicy(PlacementPolicy):\n need_mem_stats: bool = True\n\n def __init__(\n self,\n gemini_manager: \"GeminiManager\",\n chunk_manager: ChunkManager,\n mem_stats_collector: Optional[ChunkMemStatsCollector] = None,\n max_prefetch: int = 0,\n warmup_non_model_data_ratio: float = 0.8,\n steady_cuda_cap_ratio: float = 0.9,\n **kwargs,\n ) -> None:\n super().__init__(\n gemini_manager, chunk_manager, mem_stats_collector=mem_stats_collector, max_prefetch=max_prefetch\n )\n # model data will use 1-_warmup_non_model_data_ratio CUDA memory in warmup phase\n # you can set them by AutoPlacementPolicy.set_warmup_non_model_data_ratio()\n # and AutoPlacementPolicy.set_steady_cuda_cap_ratio()\n self._warmup_non_model_data_ratio = warmup_non_model_data_ratio\n self._steady_cuda_cap_ratio = steady_cuda_cap_ratio\n\n def evict_tensors(\n self,\n can_evict_chunks: List[Chunk],\n cuda_demand: int = 0,\n warmup: bool = True,\n compute_list: Optional[List[Tuple[Chunk, ...]]] = None,\n compute_idx: int = 0,\n **kwargs,\n ) -> Tuple[int, float]:\n \"\"\"\n Evict tensors from CUDA device.\n\n Args:\n can_evict_chunks (List[StatefulTensor]): the list of tensors that can be evicted.\n cuda_demand (int, optional): the volume of data needed on cuda device. Defaults to 0.\n warmup (bool, optional): a flag indicates whether in the phase of warmup. Defaults to True.\n compute_list (List[StatefulTensor], optional): TODO. Defaults to [].\n compute_idx (int, optional): the idx of computing device. Defaults to 0.\n\n Raises:\n RuntimeError:\n\n Returns:\n int: the volume of memory that is evicted\n \"\"\"\n start = time()\n cuda_capacity = colo_device_memory_capacity(get_accelerator().get_current_device())\n used_cuda_model_data = self.chunk_manager.total_mem[\"cuda\"]\n if warmup:\n # We designate a part of CUDA memory for model data in warmup iterations.\n max_cuda_non_model_data_per_period = cuda_capacity * self._warmup_non_model_data_ratio\n else:\n # max non-model-data cuda memory consumption of this sampling moment and the next sampling moment.\n max_cuda_non_model_data_per_period = self.mem_stats_collector.next_period_non_model_data_usage(\"cuda\")\n cuda_capacity *= self._steady_cuda_cap_ratio\n total_cuda_model_data = cuda_capacity - max_cuda_non_model_data_per_period\n avail_cuda_model_data = total_cuda_model_data - used_cuda_model_data\n freed_cuda_model_data = 0\n\n if avail_cuda_model_data < cuda_demand:\n # Move cuda_demand - avail_cuda_model_data volume of tensors\n # to_free_cuda_model_data = cuda_demand - avail_cuda_model_data\n to_free_cuda_model_data = cuda_demand - avail_cuda_model_data\n to_free_chunks = can_evict_chunks\n if not warmup:\n to_free_chunks = self._sort_can_evict_chunks(tuple(to_free_chunks), compute_idx, tuple(compute_list))\n # print(self._sort_can_evict_chunks.cache_info())\n for chunk in to_free_chunks:\n if freed_cuda_model_data >= to_free_cuda_model_data:\n break\n\n self.chunk_manager.release_chunk(chunk)\n self.chunk_manager.move_chunk(chunk, torch.device(\"cpu\"))\n freed_cuda_model_data += chunk.chunk_mem\n if freed_cuda_model_data < to_free_cuda_model_data:\n raise RuntimeError(\n f\"Adjust layout failed! No enough CUDA memory! \"\n f\"Need {to_free_cuda_model_data}, freed {freed_cuda_model_data}\"\n )\n return freed_cuda_model_data, time() - start\n\n @staticmethod\n @functools.lru_cache(maxsize=None)\n def _sort_can_evict_chunks(can_evict_chunks: tuple, compute_idx: int, compute_list: tuple) -> list:\n next_compute_idx = {chunk: len(compute_list) for chunk in can_evict_chunks}\n for i in range(len(compute_list) - 1, compute_idx, -1):\n for chunk in compute_list[i]:\n if chunk in next_compute_idx:\n next_compute_idx[chunk] = i\n next_compute_idx = sorted(next_compute_idx.items(), key=lambda pair: pair[1], reverse=True)\n return [t for (t, idx) in next_compute_idx]\n\n def setup_grads_device(\n self, params: List[torch.Tensor], grads_device_map: Dict[torch.Tensor, torch.device]\n ) -> None:\n for p in params:\n chunk = self.chunk_manager.get_chunk(p)\n # init offload optim settings\n # keep gathered chunks are in CUDA\n if chunk.keep_gathered:\n grads_device_map[p] = get_accelerator().get_current_device()\n else:\n grads_device_map[p] = torch.device(\"cpu\")\n\n def get_prefetch_chunks(self, max_prefetch: int) -> List[Chunk]:\n return [] # TODO @botbw: implement prefetching for auto\n\n\nclass PlacementPolicyFactory:\n policies: Dict[str, Type[PlacementPolicy]] = {\n \"auto\": AutoPlacementPolicy,\n \"static\": StaticPlacementPolicy,\n }\n\n @staticmethod\n def create(policy_name: str) -> Type[PlacementPolicy]:\n if policy_name not in PlacementPolicyFactory.policies:\n raise TypeError(f\"Unknown tensor placement policy {policy_name}\")\n return PlacementPolicyFactory.policies[policy_name]\n\n @staticmethod\n def get_policy_names():\n return tuple(PlacementPolicyFactory.policies.keys())\n", "path": "colossalai/zero/gemini/placement_policy.py"}]}
3,715
767
gh_patches_debug_30075
rasdani/github-patches
git_diff
ansible-collections__community.general-7090
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> lookup bitwarden field=notes resulting in KeyError ### Summary When using the community.general.bitwarden lookup there is an error trying to lookup a secure note. Playbook: ```yaml - debug: msg: >- "{{ lookup('community.general.bitwarden', 'secure note 2023', field='notes') }}" ``` Error: ``` fatal: [my.hostname.net]: FAILED! => { "msg": "An unhandled exception occurred while running the lookup plugin 'community.general.bitwarden'. Error was a <class 'KeyError'>, original message: 'fields'. 'fields'" } ``` With debug: ```console exception during Jinja2 execution: Traceback (most recent call last): File "/opt/homebrew/Cellar/ansible/8.0.0/libexec/lib/python3.11/site-packages/ansible/template/__init__.py", line 831, in _lookup ran = instance.run(loop_terms, variables=self._available_variables, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/opt/homebrew/Cellar/ansible/8.0.0/libexec/lib/python3.11/site-packages/ansible_collections/community/general/plugins/lookup/bitwarden.py", line 159, in run return [_bitwarden.get_field(field, term, search_field, collection_id) for term in terms] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/opt/homebrew/Cellar/ansible/8.0.0/libexec/lib/python3.11/site-packages/ansible_collections/community/general/plugins/lookup/bitwarden.py", line 159, in <listcomp> return [_bitwarden.get_field(field, term, search_field, collection_id) for term in terms] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/opt/homebrew/Cellar/ansible/8.0.0/libexec/lib/python3.11/site-packages/ansible_collections/community/general/plugins/lookup/bitwarden.py", line 141, in get_field for custom_field in match['fields']: ~~~~~^^^^^^^^^^ KeyError: 'fields' fatal: [my.hostname.net]: FAILED! => { "msg": "An unhandled exception occurred while running the lookup plugin 'community.general.bitwarden'. Error was a <class 'KeyError'>, original message: 'fields'. 'fields'" } ``` Same thing when trying to do the lookup by id: ```yaml - debug: msg: >- "{{ lookup('community.general.bitwarden', 'ba9f1125-e52a-4d4f-9b2b-af1c43a00bd6', search='id', field='notes') }}" ``` ### Issue Type Bug Report ### Component Name bitwarden ### Ansible Version ```console (paste below) $ ansible --version ansible [core 2.15.0] config file = /Users/sjoerd/.ansible.cfg configured module search path = ['/Users/name/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /opt/homebrew/Cellar/ansible/8.0.0/libexec/lib/python3.11/site-packages/ansible ansible collection location = /Users/name/.ansible/collections:/usr/share/ansible/collections executable location = /opt/homebrew/bin/ansible python version = 3.11.3 (main, Apr 7 2023, 20:13:31) [Clang 14.0.0 (clang-1400.0.29.202)] (/opt/homebrew/Cellar/ansible/8.0.0/libexec/bin/python3.11) jinja version = 3.1.2 libyaml = True ``` ### Community.general Version ```console (paste below) $ ansible-galaxy collection list community.general # /opt/homebrew/Cellar/ansible/8.0.0/libexec/lib/python3.11/site-packages/ansible_collections Collection Version ----------------- ------- community.general 7.0.1 ``` ### Configuration ```console (paste below) $ ansible-config dump --only-changed ``` ### OS / Environment Source: MacOS Target: CentOS Stream 9 ### Steps to Reproduce <!--- Paste example playbooks or commands between quotes below --> ```yaml (paste below) - debug: msg: >- "{{ lookup('community.general.bitwarden', 'secure note 2023', field='notes') }}" ``` ### Expected Results Expected the contents of the secure not to be printed to the terminal ### Actual Results ```console (paste below) fatal: [my.hostname.net]: FAILED! => { "msg": "An unhandled exception occurred while running the lookup plugin 'community.general.bitwarden'. Error was a <class 'KeyError'>, original message: 'fields'. 'fields'" } ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct </issue> <code> [start of plugins/lookup/bitwarden.py] 1 # -*- coding: utf-8 -*- 2 # Copyright (c) 2022, Jonathan Lung <[email protected]> 3 # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) 4 # SPDX-License-Identifier: GPL-3.0-or-later 5 from __future__ import (absolute_import, division, print_function) 6 __metaclass__ = type 7 8 DOCUMENTATION = """ 9 name: bitwarden 10 author: 11 - Jonathan Lung (@lungj) <[email protected]> 12 requirements: 13 - bw (command line utility) 14 - be logged into bitwarden 15 - bitwarden vault unlocked 16 - E(BW_SESSION) environment variable set 17 short_description: Retrieve secrets from Bitwarden 18 version_added: 5.4.0 19 description: 20 - Retrieve secrets from Bitwarden. 21 options: 22 _terms: 23 description: Key(s) to fetch values for from login info. 24 required: true 25 type: list 26 elements: str 27 search: 28 description: Field to retrieve, for example V(name) or V(id). 29 type: str 30 default: name 31 version_added: 5.7.0 32 field: 33 description: Field to fetch. Leave unset to fetch whole response. 34 type: str 35 collection_id: 36 description: Collection ID to filter results by collection. Leave unset to skip filtering. 37 type: str 38 version_added: 6.3.0 39 """ 40 41 EXAMPLES = """ 42 - name: "Get 'password' from Bitwarden record named 'a_test'" 43 ansible.builtin.debug: 44 msg: >- 45 {{ lookup('community.general.bitwarden', 'a_test', field='password') }} 46 47 - name: "Get 'password' from Bitwarden record with id 'bafba515-af11-47e6-abe3-af1200cd18b2'" 48 ansible.builtin.debug: 49 msg: >- 50 {{ lookup('community.general.bitwarden', 'bafba515-af11-47e6-abe3-af1200cd18b2', search='id', field='password') }} 51 52 - name: "Get 'password' from Bitwarden record named 'a_test' from collection" 53 ansible.builtin.debug: 54 msg: >- 55 {{ lookup('community.general.bitwarden', 'a_test', field='password', collection_id='bafba515-af11-47e6-abe3-af1200cd18b2') }} 56 57 - name: "Get full Bitwarden record named 'a_test'" 58 ansible.builtin.debug: 59 msg: >- 60 {{ lookup('community.general.bitwarden', 'a_test') }} 61 62 - name: "Get custom field 'api_key' from Bitwarden record named 'a_test'" 63 ansible.builtin.debug: 64 msg: >- 65 {{ lookup('community.general.bitwarden', 'a_test', field='api_key') }} 66 """ 67 68 RETURN = """ 69 _raw: 70 description: List of requested field or JSON object of list of matches. 71 type: list 72 elements: raw 73 """ 74 75 from subprocess import Popen, PIPE 76 77 from ansible.errors import AnsibleError 78 from ansible.module_utils.common.text.converters import to_bytes, to_text 79 from ansible.parsing.ajson import AnsibleJSONDecoder 80 from ansible.plugins.lookup import LookupBase 81 82 83 class BitwardenException(AnsibleError): 84 pass 85 86 87 class Bitwarden(object): 88 89 def __init__(self, path='bw'): 90 self._cli_path = path 91 92 @property 93 def cli_path(self): 94 return self._cli_path 95 96 @property 97 def unlocked(self): 98 out, err = self._run(['status'], stdin="") 99 decoded = AnsibleJSONDecoder().raw_decode(out)[0] 100 return decoded['status'] == 'unlocked' 101 102 def _run(self, args, stdin=None, expected_rc=0): 103 p = Popen([self.cli_path] + args, stdout=PIPE, stderr=PIPE, stdin=PIPE) 104 out, err = p.communicate(to_bytes(stdin)) 105 rc = p.wait() 106 if rc != expected_rc: 107 raise BitwardenException(err) 108 return to_text(out, errors='surrogate_or_strict'), to_text(err, errors='surrogate_or_strict') 109 110 def _get_matches(self, search_value, search_field, collection_id): 111 """Return matching records whose search_field is equal to key. 112 """ 113 114 # Prepare set of params for Bitwarden CLI 115 params = ['list', 'items', '--search', search_value] 116 117 if collection_id: 118 params.extend(['--collectionid', collection_id]) 119 120 out, err = self._run(params) 121 122 # This includes things that matched in different fields. 123 initial_matches = AnsibleJSONDecoder().raw_decode(out)[0] 124 125 # Filter to only include results from the right field. 126 return [item for item in initial_matches if item[search_field] == search_value] 127 128 def get_field(self, field, search_value, search_field="name", collection_id=None): 129 """Return a list of the specified field for records whose search_field match search_value 130 and filtered by collection if collection has been provided. 131 132 If field is None, return the whole record for each match. 133 """ 134 matches = self._get_matches(search_value, search_field, collection_id) 135 136 if field in ['autofillOnPageLoad', 'password', 'passwordRevisionDate', 'totp', 'uris', 'username']: 137 return [match['login'][field] for match in matches] 138 elif not field: 139 return matches 140 else: 141 custom_field_matches = [] 142 for match in matches: 143 for custom_field in match['fields']: 144 if custom_field['name'] == field: 145 custom_field_matches.append(custom_field['value']) 146 if matches and not custom_field_matches: 147 raise AnsibleError("Custom field {field} does not exist in {search_value}".format(field=field, search_value=search_value)) 148 return custom_field_matches 149 150 151 class LookupModule(LookupBase): 152 153 def run(self, terms, variables=None, **kwargs): 154 self.set_options(var_options=variables, direct=kwargs) 155 field = self.get_option('field') 156 search_field = self.get_option('search') 157 collection_id = self.get_option('collection_id') 158 if not _bitwarden.unlocked: 159 raise AnsibleError("Bitwarden Vault locked. Run 'bw unlock'.") 160 161 return [_bitwarden.get_field(field, term, search_field, collection_id) for term in terms] 162 163 164 _bitwarden = Bitwarden() 165 [end of plugins/lookup/bitwarden.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/plugins/lookup/bitwarden.py b/plugins/lookup/bitwarden.py --- a/plugins/lookup/bitwarden.py +++ b/plugins/lookup/bitwarden.py @@ -132,20 +132,29 @@ If field is None, return the whole record for each match. """ matches = self._get_matches(search_value, search_field, collection_id) - - if field in ['autofillOnPageLoad', 'password', 'passwordRevisionDate', 'totp', 'uris', 'username']: - return [match['login'][field] for match in matches] - elif not field: + if not field: return matches - else: - custom_field_matches = [] - for match in matches: + field_matches = [] + for match in matches: + # if there are no custom fields, then `match` has no key 'fields' + if 'fields' in match: + custom_field_found = False for custom_field in match['fields']: - if custom_field['name'] == field: - custom_field_matches.append(custom_field['value']) - if matches and not custom_field_matches: - raise AnsibleError("Custom field {field} does not exist in {search_value}".format(field=field, search_value=search_value)) - return custom_field_matches + if field == custom_field['name']: + field_matches.append(custom_field['value']) + custom_field_found = True + break + if custom_field_found: + continue + if 'login' in match and field in match['login']: + field_matches.append(match['login'][field]) + continue + if field in match: + field_matches.append(match[field]) + continue + if matches and not field_matches: + raise AnsibleError("field {field} does not exist in {search_value}".format(field=field, search_value=search_value)) + return field_matches class LookupModule(LookupBase):
{"golden_diff": "diff --git a/plugins/lookup/bitwarden.py b/plugins/lookup/bitwarden.py\n--- a/plugins/lookup/bitwarden.py\n+++ b/plugins/lookup/bitwarden.py\n@@ -132,20 +132,29 @@\n If field is None, return the whole record for each match.\n \"\"\"\n matches = self._get_matches(search_value, search_field, collection_id)\n-\n- if field in ['autofillOnPageLoad', 'password', 'passwordRevisionDate', 'totp', 'uris', 'username']:\n- return [match['login'][field] for match in matches]\n- elif not field:\n+ if not field:\n return matches\n- else:\n- custom_field_matches = []\n- for match in matches:\n+ field_matches = []\n+ for match in matches:\n+ # if there are no custom fields, then `match` has no key 'fields'\n+ if 'fields' in match:\n+ custom_field_found = False\n for custom_field in match['fields']:\n- if custom_field['name'] == field:\n- custom_field_matches.append(custom_field['value'])\n- if matches and not custom_field_matches:\n- raise AnsibleError(\"Custom field {field} does not exist in {search_value}\".format(field=field, search_value=search_value))\n- return custom_field_matches\n+ if field == custom_field['name']:\n+ field_matches.append(custom_field['value'])\n+ custom_field_found = True\n+ break\n+ if custom_field_found:\n+ continue\n+ if 'login' in match and field in match['login']:\n+ field_matches.append(match['login'][field])\n+ continue\n+ if field in match:\n+ field_matches.append(match[field])\n+ continue\n+ if matches and not field_matches:\n+ raise AnsibleError(\"field {field} does not exist in {search_value}\".format(field=field, search_value=search_value))\n+ return field_matches\n \n \n class LookupModule(LookupBase):\n", "issue": "lookup bitwarden field=notes resulting in KeyError\n### Summary\n\nWhen using the community.general.bitwarden lookup there is an error trying to lookup a secure note.\r\n\r\nPlaybook:\r\n```yaml\r\n- debug:\r\n msg: >-\r\n \"{{ lookup('community.general.bitwarden', 'secure note 2023', field='notes') }}\"\r\n```\r\n\r\nError:\r\n```\r\nfatal: [my.hostname.net]: FAILED! => {\r\n \"msg\": \"An unhandled exception occurred while running the lookup plugin 'community.general.bitwarden'. Error was a <class 'KeyError'>, original message: 'fields'. 'fields'\"\r\n}\r\n```\r\nWith debug:\r\n```console\r\nexception during Jinja2 execution: Traceback (most recent call last):\r\n File \"/opt/homebrew/Cellar/ansible/8.0.0/libexec/lib/python3.11/site-packages/ansible/template/__init__.py\", line 831, in _lookup\r\n ran = instance.run(loop_terms, variables=self._available_variables, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/opt/homebrew/Cellar/ansible/8.0.0/libexec/lib/python3.11/site-packages/ansible_collections/community/general/plugins/lookup/bitwarden.py\", line 159, in run\r\n return [_bitwarden.get_field(field, term, search_field, collection_id) for term in terms]\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/opt/homebrew/Cellar/ansible/8.0.0/libexec/lib/python3.11/site-packages/ansible_collections/community/general/plugins/lookup/bitwarden.py\", line 159, in <listcomp>\r\n return [_bitwarden.get_field(field, term, search_field, collection_id) for term in terms]\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/opt/homebrew/Cellar/ansible/8.0.0/libexec/lib/python3.11/site-packages/ansible_collections/community/general/plugins/lookup/bitwarden.py\", line 141, in get_field\r\n for custom_field in match['fields']:\r\n ~~~~~^^^^^^^^^^\r\nKeyError: 'fields'\r\nfatal: [my.hostname.net]: FAILED! => {\r\n \"msg\": \"An unhandled exception occurred while running the lookup plugin 'community.general.bitwarden'. Error was a <class 'KeyError'>, original message: 'fields'. 'fields'\"\r\n}\r\n```\r\n\r\nSame thing when trying to do the lookup by id:\r\n```yaml\r\n- debug:\r\n msg: >-\r\n \"{{ lookup('community.general.bitwarden', 'ba9f1125-e52a-4d4f-9b2b-af1c43a00bd6', search='id', field='notes') }}\"\r\n```\n\n### Issue Type\n\nBug Report\n\n### Component Name\n\nbitwarden\n\n### Ansible Version\n\n```console (paste below)\r\n$ ansible --version\r\nansible [core 2.15.0]\r\n config file = /Users/sjoerd/.ansible.cfg\r\n configured module search path = ['/Users/name/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']\r\n ansible python module location = /opt/homebrew/Cellar/ansible/8.0.0/libexec/lib/python3.11/site-packages/ansible\r\n ansible collection location = /Users/name/.ansible/collections:/usr/share/ansible/collections\r\n executable location = /opt/homebrew/bin/ansible\r\n python version = 3.11.3 (main, Apr 7 2023, 20:13:31) [Clang 14.0.0 (clang-1400.0.29.202)] (/opt/homebrew/Cellar/ansible/8.0.0/libexec/bin/python3.11)\r\n jinja version = 3.1.2\r\n libyaml = True\r\n```\r\n\n\n### Community.general Version\n\n```console (paste below)\r\n$ ansible-galaxy collection list community.general\r\n\r\n# /opt/homebrew/Cellar/ansible/8.0.0/libexec/lib/python3.11/site-packages/ansible_collections\r\nCollection Version\r\n----------------- -------\r\ncommunity.general 7.0.1\r\n```\r\n\n\n### Configuration\n\n```console (paste below)\r\n$ ansible-config dump --only-changed\r\n\r\n```\r\n\n\n### OS / Environment\n\nSource: MacOS\r\nTarget: CentOS Stream 9\r\n\n\n### Steps to Reproduce\n\n<!--- Paste example playbooks or commands between quotes below -->\r\n```yaml (paste below)\r\n- debug:\r\n msg: >-\r\n \"{{ lookup('community.general.bitwarden', 'secure note 2023', field='notes') }}\"\r\n\r\n```\r\n\n\n### Expected Results\n\nExpected the contents of the secure not to be printed to the terminal\n\n### Actual Results\n\n```console (paste below)\r\nfatal: [my.hostname.net]: FAILED! => {\r\n \"msg\": \"An unhandled exception occurred while running the lookup plugin 'community.general.bitwarden'. Error was a <class 'KeyError'>, original message: 'fields'. 'fields'\"\r\n}\r\n```\r\n\n\n### Code of Conduct\n\n- [X] I agree to follow the Ansible Code of Conduct\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2022, Jonathan Lung <[email protected]>\n# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)\n# SPDX-License-Identifier: GPL-3.0-or-later\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nDOCUMENTATION = \"\"\"\n name: bitwarden\n author:\n - Jonathan Lung (@lungj) <[email protected]>\n requirements:\n - bw (command line utility)\n - be logged into bitwarden\n - bitwarden vault unlocked\n - E(BW_SESSION) environment variable set\n short_description: Retrieve secrets from Bitwarden\n version_added: 5.4.0\n description:\n - Retrieve secrets from Bitwarden.\n options:\n _terms:\n description: Key(s) to fetch values for from login info.\n required: true\n type: list\n elements: str\n search:\n description: Field to retrieve, for example V(name) or V(id).\n type: str\n default: name\n version_added: 5.7.0\n field:\n description: Field to fetch. Leave unset to fetch whole response.\n type: str\n collection_id:\n description: Collection ID to filter results by collection. Leave unset to skip filtering.\n type: str\n version_added: 6.3.0\n\"\"\"\n\nEXAMPLES = \"\"\"\n- name: \"Get 'password' from Bitwarden record named 'a_test'\"\n ansible.builtin.debug:\n msg: >-\n {{ lookup('community.general.bitwarden', 'a_test', field='password') }}\n\n- name: \"Get 'password' from Bitwarden record with id 'bafba515-af11-47e6-abe3-af1200cd18b2'\"\n ansible.builtin.debug:\n msg: >-\n {{ lookup('community.general.bitwarden', 'bafba515-af11-47e6-abe3-af1200cd18b2', search='id', field='password') }}\n\n- name: \"Get 'password' from Bitwarden record named 'a_test' from collection\"\n ansible.builtin.debug:\n msg: >-\n {{ lookup('community.general.bitwarden', 'a_test', field='password', collection_id='bafba515-af11-47e6-abe3-af1200cd18b2') }}\n\n- name: \"Get full Bitwarden record named 'a_test'\"\n ansible.builtin.debug:\n msg: >-\n {{ lookup('community.general.bitwarden', 'a_test') }}\n\n- name: \"Get custom field 'api_key' from Bitwarden record named 'a_test'\"\n ansible.builtin.debug:\n msg: >-\n {{ lookup('community.general.bitwarden', 'a_test', field='api_key') }}\n\"\"\"\n\nRETURN = \"\"\"\n _raw:\n description: List of requested field or JSON object of list of matches.\n type: list\n elements: raw\n\"\"\"\n\nfrom subprocess import Popen, PIPE\n\nfrom ansible.errors import AnsibleError\nfrom ansible.module_utils.common.text.converters import to_bytes, to_text\nfrom ansible.parsing.ajson import AnsibleJSONDecoder\nfrom ansible.plugins.lookup import LookupBase\n\n\nclass BitwardenException(AnsibleError):\n pass\n\n\nclass Bitwarden(object):\n\n def __init__(self, path='bw'):\n self._cli_path = path\n\n @property\n def cli_path(self):\n return self._cli_path\n\n @property\n def unlocked(self):\n out, err = self._run(['status'], stdin=\"\")\n decoded = AnsibleJSONDecoder().raw_decode(out)[0]\n return decoded['status'] == 'unlocked'\n\n def _run(self, args, stdin=None, expected_rc=0):\n p = Popen([self.cli_path] + args, stdout=PIPE, stderr=PIPE, stdin=PIPE)\n out, err = p.communicate(to_bytes(stdin))\n rc = p.wait()\n if rc != expected_rc:\n raise BitwardenException(err)\n return to_text(out, errors='surrogate_or_strict'), to_text(err, errors='surrogate_or_strict')\n\n def _get_matches(self, search_value, search_field, collection_id):\n \"\"\"Return matching records whose search_field is equal to key.\n \"\"\"\n\n # Prepare set of params for Bitwarden CLI\n params = ['list', 'items', '--search', search_value]\n\n if collection_id:\n params.extend(['--collectionid', collection_id])\n\n out, err = self._run(params)\n\n # This includes things that matched in different fields.\n initial_matches = AnsibleJSONDecoder().raw_decode(out)[0]\n\n # Filter to only include results from the right field.\n return [item for item in initial_matches if item[search_field] == search_value]\n\n def get_field(self, field, search_value, search_field=\"name\", collection_id=None):\n \"\"\"Return a list of the specified field for records whose search_field match search_value\n and filtered by collection if collection has been provided.\n\n If field is None, return the whole record for each match.\n \"\"\"\n matches = self._get_matches(search_value, search_field, collection_id)\n\n if field in ['autofillOnPageLoad', 'password', 'passwordRevisionDate', 'totp', 'uris', 'username']:\n return [match['login'][field] for match in matches]\n elif not field:\n return matches\n else:\n custom_field_matches = []\n for match in matches:\n for custom_field in match['fields']:\n if custom_field['name'] == field:\n custom_field_matches.append(custom_field['value'])\n if matches and not custom_field_matches:\n raise AnsibleError(\"Custom field {field} does not exist in {search_value}\".format(field=field, search_value=search_value))\n return custom_field_matches\n\n\nclass LookupModule(LookupBase):\n\n def run(self, terms, variables=None, **kwargs):\n self.set_options(var_options=variables, direct=kwargs)\n field = self.get_option('field')\n search_field = self.get_option('search')\n collection_id = self.get_option('collection_id')\n if not _bitwarden.unlocked:\n raise AnsibleError(\"Bitwarden Vault locked. Run 'bw unlock'.\")\n\n return [_bitwarden.get_field(field, term, search_field, collection_id) for term in terms]\n\n\n_bitwarden = Bitwarden()\n", "path": "plugins/lookup/bitwarden.py"}]}
3,572
443
gh_patches_debug_20282
rasdani/github-patches
git_diff
PaddlePaddle__models-449
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Need to set the version of CTC decoders formally </issue> <code> [start of deep_speech_2/decoders/swig/setup.py] 1 """Script to build and install decoder package.""" 2 from __future__ import absolute_import 3 from __future__ import division 4 from __future__ import print_function 5 6 from setuptools import setup, Extension, distutils 7 import glob 8 import platform 9 import os, sys 10 import multiprocessing.pool 11 import argparse 12 13 parser = argparse.ArgumentParser(description=__doc__) 14 parser.add_argument( 15 "--num_processes", 16 default=1, 17 type=int, 18 help="Number of cpu processes to build package. (default: %(default)d)") 19 args = parser.parse_known_args() 20 21 # reconstruct sys.argv to pass to setup below 22 sys.argv = [sys.argv[0]] + args[1] 23 24 25 # monkey-patch for parallel compilation 26 # See: https://stackoverflow.com/a/13176803 27 def parallelCCompile(self, 28 sources, 29 output_dir=None, 30 macros=None, 31 include_dirs=None, 32 debug=0, 33 extra_preargs=None, 34 extra_postargs=None, 35 depends=None): 36 # those lines are copied from distutils.ccompiler.CCompiler directly 37 macros, objects, extra_postargs, pp_opts, build = self._setup_compile( 38 output_dir, macros, include_dirs, sources, depends, extra_postargs) 39 cc_args = self._get_cc_args(pp_opts, debug, extra_preargs) 40 41 # parallel code 42 def _single_compile(obj): 43 try: 44 src, ext = build[obj] 45 except KeyError: 46 return 47 self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts) 48 49 # convert to list, imap is evaluated on-demand 50 thread_pool = multiprocessing.pool.ThreadPool(args[0].num_processes) 51 list(thread_pool.imap(_single_compile, objects)) 52 return objects 53 54 55 def compile_test(header, library): 56 dummy_path = os.path.join(os.path.dirname(__file__), "dummy") 57 command = "bash -c \"g++ -include " + header \ 58 + " -l" + library + " -x c++ - <<<'int main() {}' -o " \ 59 + dummy_path + " >/dev/null 2>/dev/null && rm " \ 60 + dummy_path + " 2>/dev/null\"" 61 return os.system(command) == 0 62 63 64 # hack compile to support parallel compiling 65 distutils.ccompiler.CCompiler.compile = parallelCCompile 66 67 FILES = glob.glob('kenlm/util/*.cc') \ 68 + glob.glob('kenlm/lm/*.cc') \ 69 + glob.glob('kenlm/util/double-conversion/*.cc') 70 71 FILES += glob.glob('openfst-1.6.3/src/lib/*.cc') 72 73 # FILES + glob.glob('glog/src/*.cc') 74 FILES = [ 75 fn for fn in FILES 76 if not (fn.endswith('main.cc') or fn.endswith('test.cc') or fn.endswith( 77 'unittest.cc')) 78 ] 79 80 LIBS = ['stdc++'] 81 if platform.system() != 'Darwin': 82 LIBS.append('rt') 83 84 ARGS = ['-O3', '-DNDEBUG', '-DKENLM_MAX_ORDER=6', '-std=c++11'] 85 86 if compile_test('zlib.h', 'z'): 87 ARGS.append('-DHAVE_ZLIB') 88 LIBS.append('z') 89 90 if compile_test('bzlib.h', 'bz2'): 91 ARGS.append('-DHAVE_BZLIB') 92 LIBS.append('bz2') 93 94 if compile_test('lzma.h', 'lzma'): 95 ARGS.append('-DHAVE_XZLIB') 96 LIBS.append('lzma') 97 98 os.system('swig -python -c++ ./decoders.i') 99 100 decoders_module = [ 101 Extension( 102 name='_swig_decoders', 103 sources=FILES + glob.glob('*.cxx') + glob.glob('*.cpp'), 104 language='c++', 105 include_dirs=[ 106 '.', 107 'kenlm', 108 'openfst-1.6.3/src/include', 109 'ThreadPool', 110 #'glog/src' 111 ], 112 libraries=LIBS, 113 extra_compile_args=ARGS) 114 ] 115 116 setup( 117 name='swig_decoders', 118 version='0.1', 119 description="""CTC decoders""", 120 ext_modules=decoders_module, 121 py_modules=['swig_decoders'], ) 122 [end of deep_speech_2/decoders/swig/setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/deep_speech_2/decoders/swig/setup.py b/deep_speech_2/decoders/swig/setup.py --- a/deep_speech_2/decoders/swig/setup.py +++ b/deep_speech_2/decoders/swig/setup.py @@ -70,7 +70,6 @@ FILES += glob.glob('openfst-1.6.3/src/lib/*.cc') -# FILES + glob.glob('glog/src/*.cc') FILES = [ fn for fn in FILES if not (fn.endswith('main.cc') or fn.endswith('test.cc') or fn.endswith( @@ -107,7 +106,6 @@ 'kenlm', 'openfst-1.6.3/src/include', 'ThreadPool', - #'glog/src' ], libraries=LIBS, extra_compile_args=ARGS) @@ -115,7 +113,7 @@ setup( name='swig_decoders', - version='0.1', + version='1.0', description="""CTC decoders""", ext_modules=decoders_module, py_modules=['swig_decoders'], )
{"golden_diff": "diff --git a/deep_speech_2/decoders/swig/setup.py b/deep_speech_2/decoders/swig/setup.py\n--- a/deep_speech_2/decoders/swig/setup.py\n+++ b/deep_speech_2/decoders/swig/setup.py\n@@ -70,7 +70,6 @@\n \n FILES += glob.glob('openfst-1.6.3/src/lib/*.cc')\n \n-# FILES + glob.glob('glog/src/*.cc')\n FILES = [\n fn for fn in FILES\n if not (fn.endswith('main.cc') or fn.endswith('test.cc') or fn.endswith(\n@@ -107,7 +106,6 @@\n 'kenlm',\n 'openfst-1.6.3/src/include',\n 'ThreadPool',\n- #'glog/src'\n ],\n libraries=LIBS,\n extra_compile_args=ARGS)\n@@ -115,7 +113,7 @@\n \n setup(\n name='swig_decoders',\n- version='0.1',\n+ version='1.0',\n description=\"\"\"CTC decoders\"\"\",\n ext_modules=decoders_module,\n py_modules=['swig_decoders'], )\n", "issue": "Need to set the version of CTC decoders formally\n\n", "before_files": [{"content": "\"\"\"Script to build and install decoder package.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom setuptools import setup, Extension, distutils\nimport glob\nimport platform\nimport os, sys\nimport multiprocessing.pool\nimport argparse\n\nparser = argparse.ArgumentParser(description=__doc__)\nparser.add_argument(\n \"--num_processes\",\n default=1,\n type=int,\n help=\"Number of cpu processes to build package. (default: %(default)d)\")\nargs = parser.parse_known_args()\n\n# reconstruct sys.argv to pass to setup below\nsys.argv = [sys.argv[0]] + args[1]\n\n\n# monkey-patch for parallel compilation\n# See: https://stackoverflow.com/a/13176803\ndef parallelCCompile(self,\n sources,\n output_dir=None,\n macros=None,\n include_dirs=None,\n debug=0,\n extra_preargs=None,\n extra_postargs=None,\n depends=None):\n # those lines are copied from distutils.ccompiler.CCompiler directly\n macros, objects, extra_postargs, pp_opts, build = self._setup_compile(\n output_dir, macros, include_dirs, sources, depends, extra_postargs)\n cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)\n\n # parallel code\n def _single_compile(obj):\n try:\n src, ext = build[obj]\n except KeyError:\n return\n self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)\n\n # convert to list, imap is evaluated on-demand\n thread_pool = multiprocessing.pool.ThreadPool(args[0].num_processes)\n list(thread_pool.imap(_single_compile, objects))\n return objects\n\n\ndef compile_test(header, library):\n dummy_path = os.path.join(os.path.dirname(__file__), \"dummy\")\n command = \"bash -c \\\"g++ -include \" + header \\\n + \" -l\" + library + \" -x c++ - <<<'int main() {}' -o \" \\\n + dummy_path + \" >/dev/null 2>/dev/null && rm \" \\\n + dummy_path + \" 2>/dev/null\\\"\"\n return os.system(command) == 0\n\n\n# hack compile to support parallel compiling\ndistutils.ccompiler.CCompiler.compile = parallelCCompile\n\nFILES = glob.glob('kenlm/util/*.cc') \\\n + glob.glob('kenlm/lm/*.cc') \\\n + glob.glob('kenlm/util/double-conversion/*.cc')\n\nFILES += glob.glob('openfst-1.6.3/src/lib/*.cc')\n\n# FILES + glob.glob('glog/src/*.cc')\nFILES = [\n fn for fn in FILES\n if not (fn.endswith('main.cc') or fn.endswith('test.cc') or fn.endswith(\n 'unittest.cc'))\n]\n\nLIBS = ['stdc++']\nif platform.system() != 'Darwin':\n LIBS.append('rt')\n\nARGS = ['-O3', '-DNDEBUG', '-DKENLM_MAX_ORDER=6', '-std=c++11']\n\nif compile_test('zlib.h', 'z'):\n ARGS.append('-DHAVE_ZLIB')\n LIBS.append('z')\n\nif compile_test('bzlib.h', 'bz2'):\n ARGS.append('-DHAVE_BZLIB')\n LIBS.append('bz2')\n\nif compile_test('lzma.h', 'lzma'):\n ARGS.append('-DHAVE_XZLIB')\n LIBS.append('lzma')\n\nos.system('swig -python -c++ ./decoders.i')\n\ndecoders_module = [\n Extension(\n name='_swig_decoders',\n sources=FILES + glob.glob('*.cxx') + glob.glob('*.cpp'),\n language='c++',\n include_dirs=[\n '.',\n 'kenlm',\n 'openfst-1.6.3/src/include',\n 'ThreadPool',\n #'glog/src'\n ],\n libraries=LIBS,\n extra_compile_args=ARGS)\n]\n\nsetup(\n name='swig_decoders',\n version='0.1',\n description=\"\"\"CTC decoders\"\"\",\n ext_modules=decoders_module,\n py_modules=['swig_decoders'], )\n", "path": "deep_speech_2/decoders/swig/setup.py"}]}
1,727
265
gh_patches_debug_23631
rasdani/github-patches
git_diff
e-valuation__EvaP-762
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Test management commands Because in three years, run_tasks will silently fail on the production system and nobody will notice. - [x] **run_tasks** - shouldn't be too hard and is rather important - [x] **anonymize** - might be a bit of work to cover it properly, but should be straightforward. - [x] **refresh_results_cache** - should be easy - [x] **dump_testdata** - don't know how not to overwrite the file during testing, but should be possible the other commands are already tested or rather unsuitable for testing - [x] **merge_users** - already has a test (#703) and is shown to be pretty broken. - [x] **run** - don't know how to test this and there isn't really anything that could break. still, somehow running it to check that it doesn't crash right away on e.g. imports would be cool - [x] **reload_testdata** - don't know whether it's possible at all to test that, i mean it drops the whole database... - [ ] **import_ad** - we never used it and i don't know whether it's feasible to mock ldap use `self.stdout.write` instead of `print` and `call_command("command_name", stdout=StringIO())` to avoid console output during tests. don't know what to do about calls to `input`. </issue> <code> [start of evap/evaluation/management/commands/import_ad.py] 1 import getpass 2 import ldap 3 import sys 4 5 from django.core.management.base import BaseCommand 6 7 from evap.evaluation.models import UserProfile 8 9 10 class Command(BaseCommand): 11 args = '<ldap server> <username>' 12 help = 'Imports user data from Active Directory. The username should be specified with realm.' 13 14 def handle(self, *args, **options): 15 try: 16 # connect 17 l = ldap.initialize(args[0]) 18 19 # bind 20 l.bind_s(args[1], getpass.getpass("AD Password: ")) 21 22 # find all users 23 result = l.search_s("OU=INSTITUT,DC=hpi,DC=uni-potsdam,DC=de", ldap.SCOPE_SUBTREE, filterstr="(&(&(objectClass=user)(!(objectClass=computer)))(givenName=*)(sn=*)(mail=*))") 24 for _, attrs in result: 25 try: 26 user = UserProfile.objects.get(username__iexact=attrs['sAMAccountName'][0]) 27 user.first_name = attrs['givenName'][0] 28 user.last_name = attrs['sn'][0] 29 user.email = attrs['mail'][0] 30 user.save() 31 32 print("Successfully updated: '{0}'".format(user.username)) 33 except UserProfile.DoesNotExist: 34 pass 35 except Exception as e: 36 print(e) 37 38 l.unbind_s() 39 40 except KeyboardInterrupt: 41 sys.stderr.write("\nOperation cancelled.\n") 42 sys.exit(1) 43 [end of evap/evaluation/management/commands/import_ad.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/evap/evaluation/management/commands/import_ad.py b/evap/evaluation/management/commands/import_ad.py deleted file mode 100644 --- a/evap/evaluation/management/commands/import_ad.py +++ /dev/null @@ -1,42 +0,0 @@ -import getpass -import ldap -import sys - -from django.core.management.base import BaseCommand - -from evap.evaluation.models import UserProfile - - -class Command(BaseCommand): - args = '<ldap server> <username>' - help = 'Imports user data from Active Directory. The username should be specified with realm.' - - def handle(self, *args, **options): - try: - # connect - l = ldap.initialize(args[0]) - - # bind - l.bind_s(args[1], getpass.getpass("AD Password: ")) - - # find all users - result = l.search_s("OU=INSTITUT,DC=hpi,DC=uni-potsdam,DC=de", ldap.SCOPE_SUBTREE, filterstr="(&(&(objectClass=user)(!(objectClass=computer)))(givenName=*)(sn=*)(mail=*))") - for _, attrs in result: - try: - user = UserProfile.objects.get(username__iexact=attrs['sAMAccountName'][0]) - user.first_name = attrs['givenName'][0] - user.last_name = attrs['sn'][0] - user.email = attrs['mail'][0] - user.save() - - print("Successfully updated: '{0}'".format(user.username)) - except UserProfile.DoesNotExist: - pass - except Exception as e: - print(e) - - l.unbind_s() - - except KeyboardInterrupt: - sys.stderr.write("\nOperation cancelled.\n") - sys.exit(1)
{"golden_diff": "diff --git a/evap/evaluation/management/commands/import_ad.py b/evap/evaluation/management/commands/import_ad.py\ndeleted file mode 100644\n--- a/evap/evaluation/management/commands/import_ad.py\n+++ /dev/null\n@@ -1,42 +0,0 @@\n-import getpass\n-import ldap\n-import sys\n-\n-from django.core.management.base import BaseCommand\n-\n-from evap.evaluation.models import UserProfile\n-\n-\n-class Command(BaseCommand):\n- args = '<ldap server> <username>'\n- help = 'Imports user data from Active Directory. The username should be specified with realm.'\n-\n- def handle(self, *args, **options):\n- try:\n- # connect\n- l = ldap.initialize(args[0])\n-\n- # bind\n- l.bind_s(args[1], getpass.getpass(\"AD Password: \"))\n-\n- # find all users\n- result = l.search_s(\"OU=INSTITUT,DC=hpi,DC=uni-potsdam,DC=de\", ldap.SCOPE_SUBTREE, filterstr=\"(&(&(objectClass=user)(!(objectClass=computer)))(givenName=*)(sn=*)(mail=*))\")\n- for _, attrs in result:\n- try:\n- user = UserProfile.objects.get(username__iexact=attrs['sAMAccountName'][0])\n- user.first_name = attrs['givenName'][0]\n- user.last_name = attrs['sn'][0]\n- user.email = attrs['mail'][0]\n- user.save()\n-\n- print(\"Successfully updated: '{0}'\".format(user.username))\n- except UserProfile.DoesNotExist:\n- pass\n- except Exception as e:\n- print(e)\n-\n- l.unbind_s()\n-\n- except KeyboardInterrupt:\n- sys.stderr.write(\"\\nOperation cancelled.\\n\")\n- sys.exit(1)\n", "issue": "Test management commands\nBecause in three years, run_tasks will silently fail on the production system and nobody will notice.\n- [x] **run_tasks** - shouldn't be too hard and is rather important\n- [x] **anonymize** - might be a bit of work to cover it properly, but should be straightforward.\n- [x] **refresh_results_cache** - should be easy\n- [x] **dump_testdata** - don't know how not to overwrite the file during testing, but should be possible\n\nthe other commands are already tested or rather unsuitable for testing\n- [x] **merge_users** - already has a test (#703) and is shown to be pretty broken.\n- [x] **run** - don't know how to test this and there isn't really anything that could break. still, somehow running it to check that it doesn't crash right away on e.g. imports would be cool\n- [x] **reload_testdata** - don't know whether it's possible at all to test that, i mean it drops the whole database...\n- [ ] **import_ad** - we never used it and i don't know whether it's feasible to mock ldap\n\nuse `self.stdout.write` instead of `print` and `call_command(\"command_name\", stdout=StringIO())` to avoid console output during tests. don't know what to do about calls to `input`.\n\n", "before_files": [{"content": "import getpass\nimport ldap\nimport sys\n\nfrom django.core.management.base import BaseCommand\n\nfrom evap.evaluation.models import UserProfile\n\n\nclass Command(BaseCommand):\n args = '<ldap server> <username>'\n help = 'Imports user data from Active Directory. The username should be specified with realm.'\n\n def handle(self, *args, **options):\n try:\n # connect\n l = ldap.initialize(args[0])\n\n # bind\n l.bind_s(args[1], getpass.getpass(\"AD Password: \"))\n\n # find all users\n result = l.search_s(\"OU=INSTITUT,DC=hpi,DC=uni-potsdam,DC=de\", ldap.SCOPE_SUBTREE, filterstr=\"(&(&(objectClass=user)(!(objectClass=computer)))(givenName=*)(sn=*)(mail=*))\")\n for _, attrs in result:\n try:\n user = UserProfile.objects.get(username__iexact=attrs['sAMAccountName'][0])\n user.first_name = attrs['givenName'][0]\n user.last_name = attrs['sn'][0]\n user.email = attrs['mail'][0]\n user.save()\n\n print(\"Successfully updated: '{0}'\".format(user.username))\n except UserProfile.DoesNotExist:\n pass\n except Exception as e:\n print(e)\n\n l.unbind_s()\n\n except KeyboardInterrupt:\n sys.stderr.write(\"\\nOperation cancelled.\\n\")\n sys.exit(1)\n", "path": "evap/evaluation/management/commands/import_ad.py"}]}
1,233
410
gh_patches_debug_11447
rasdani/github-patches
git_diff
aws-cloudformation__cfn-lint-2236
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Support for the docker lambda runtime cfn-lint 0.43.0 This is a feature request for supporting docker lambda sam templates. Please provide as much information as possible: * SAM templates produced by aws-sam-cli with the docker deploy option don't pass validation Running on the template.yaml in the base directory outputs the following: ``` % cfn-lint template.yaml E0001 Error transforming template: Resource with id [HelloWorldFunction] is invalid. 'ImageUri' must be set. template.yaml:1:1 ``` Running on the packaged template at `.aws-sam/build/template.yaml` produces the following: ``` E3002 Invalid Property Resources/HelloWorldFunction/Properties/Code/ImageUri .aws-sam/build/template.yaml:12:3 E3002 Invalid Property Resources/HelloWorldFunction/Properties/PackageType .aws-sam/build/template.yaml:12:3 E3003 Property Handler missing at Resources/HelloWorldFunction/Properties .aws-sam/build/template.yaml:12:3 E3003 Property Runtime missing at Resources/HelloWorldFunction/Properties .aws-sam/build/template.yaml:12:3 ``` </issue> <code> [start of src/cfnlint/transform.py] 1 """ 2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 SPDX-License-Identifier: MIT-0 4 """ 5 import os 6 import logging 7 import six 8 import samtranslator 9 from samtranslator.parser import parser 10 from samtranslator.translator.translator import Translator 11 from samtranslator.public.exceptions import InvalidDocumentException 12 13 from cfnlint.helpers import load_resource, convert_dict, format_json_string 14 from cfnlint.data import Serverless 15 from cfnlint.rules import Match, TransformError 16 LOGGER = logging.getLogger('cfnlint') 17 18 samtranslator_logger = logging.getLogger('samtranslator') 19 samtranslator_logger.setLevel(logging.CRITICAL) 20 21 class Transform(object): 22 """ 23 Application Serverless Module tranform Wrapper. 24 Based on code from AWS SAM CLI: 25 https://github.com/awslabs/aws-sam-cli/blob/develop/samcli/commands/validate/lib/sam_template_validator.py 26 """ 27 28 def __init__(self, filename, template, region): 29 """ 30 Initialize Transform class 31 """ 32 self._filename = filename 33 self._template = template 34 self._region = region 35 self._parameters = {} 36 37 self._managed_policy_map = self.load_managed_policies() 38 self._sam_parser = parser.Parser() 39 40 def template(self): 41 """Get the template""" 42 return self._template 43 44 def load_managed_policies(self): 45 """ 46 Load the ManagedPolicies locally, based on the AWS-CLI: 47 https://github.com/awslabs/aws-sam-cli/blob/develop/samcli/lib/samlib/default_managed_policies.json 48 """ 49 return load_resource(Serverless, 'ManagedPolicies.json') 50 51 def _replace_local_codeuri(self): 52 """ 53 Replaces the CodeUri in AWS::Serverless::Function and DefinitionUri in 54 AWS::Serverless::Api to a fake S3 Uri. This is to support running the 55 SAM Translator with valid values for these fields. If this is not done, 56 the template is invalid in the eyes of SAM Translator (the translator 57 does not support local paths) 58 """ 59 60 all_resources = self._template.get('Resources', {}) 61 62 template_globals = self._template.get('Globals', {}) 63 auto_publish_alias = template_globals.get('Function', {}).get('AutoPublishAlias') 64 if isinstance(auto_publish_alias, dict): 65 if len(auto_publish_alias) == 1: 66 for k, v in auto_publish_alias.items(): 67 if k == 'Ref': 68 if v in self._template.get('Parameters'): 69 self._parameters[v] = 'Alias' 70 71 72 for _, resource in all_resources.items(): 73 74 resource_type = resource.get('Type') 75 resource_dict = resource.get('Properties') 76 77 if resource_type == 'AWS::Serverless::Function': 78 79 Transform._update_to_s3_uri('CodeUri', resource_dict) 80 auto_publish_alias = resource_dict.get('AutoPublishAlias') 81 if isinstance(auto_publish_alias, dict): 82 if len(auto_publish_alias) == 1: 83 for k, v in auto_publish_alias.items(): 84 if k == 'Ref': 85 if v in self._template.get('Parameters'): 86 self._parameters[v] = 'Alias' 87 if resource_type in ['AWS::Serverless::LayerVersion']: 88 if resource_dict.get('ContentUri'): 89 Transform._update_to_s3_uri('ContentUri', resource_dict) 90 if resource_type == 'AWS::Serverless::Application': 91 if resource_dict.get('Location'): 92 resource_dict['Location'] = '' 93 Transform._update_to_s3_uri('Location', resource_dict) 94 if resource_type == 'AWS::Serverless::Api': 95 if ('DefinitionBody' not in resource_dict and 96 'Auth' not in resource_dict and 'Cors' not in resource_dict): 97 Transform._update_to_s3_uri('DefinitionUri', resource_dict) 98 else: 99 resource_dict['DefinitionBody'] = '' 100 if resource_type == 'AWS::Serverless::StateMachine' and resource_dict.get('DefinitionUri'): 101 Transform._update_to_s3_uri('DefinitionUri', resource_dict) 102 103 def transform_template(self): 104 """ 105 Transform the Template using the Serverless Application Model. 106 """ 107 matches = [] 108 109 try: 110 # Output the SAM Translator version in debug mode 111 LOGGER.info('SAM Translator: %s', samtranslator.__version__) 112 113 sam_translator = Translator( 114 managed_policy_map=self._managed_policy_map, 115 sam_parser=self._sam_parser) 116 117 self._replace_local_codeuri() 118 119 # Tell SAM to use the region we're linting in, this has to be 120 # controlled using the default AWS mechanisms, see also: 121 # https://github.com/awslabs/serverless-application-model/blob/master/samtranslator/translator/arn_generator.py 122 LOGGER.info('Setting AWS_DEFAULT_REGION to %s', self._region) 123 os.environ['AWS_DEFAULT_REGION'] = self._region 124 125 self._template = convert_dict( 126 sam_translator.translate(sam_template=self._template, 127 parameter_values=self._parameters)) 128 129 LOGGER.info('Transformed template: \n%s', 130 format_json_string(self._template)) 131 except InvalidDocumentException as e: 132 message = 'Error transforming template: {0}' 133 for cause in e.causes: 134 matches.append(Match( 135 1, 1, 136 1, 1, 137 self._filename, 138 TransformError(), message.format(cause.message))) 139 except Exception as e: # pylint: disable=W0703 140 LOGGER.debug('Error transforming template: %s', str(e)) 141 LOGGER.debug('Stack trace: %s', e, exc_info=True) 142 message = 'Error transforming template: {0}' 143 matches.append(Match( 144 1, 1, 145 1, 1, 146 self._filename, 147 TransformError(), message.format(str(e)))) 148 149 return matches 150 151 @staticmethod 152 def is_s3_uri(uri): 153 """ 154 Checks the uri and determines if it is a valid S3 Uri 155 Parameters 156 ---------- 157 uri str, required 158 Uri to check 159 Returns 160 ------- 161 bool 162 Returns True if the uri given is an S3 uri, otherwise False 163 """ 164 return isinstance(uri, six.string_types) and uri.startswith('s3://') 165 166 @staticmethod 167 def _update_to_s3_uri( 168 property_key, resource_property_dict, 169 s3_uri_value='s3://bucket/value'): 170 """ 171 Updates the 'property_key' in the 'resource_property_dict' to the 172 value of 's3_uri_value' 173 Note: The function will mutate the resource_property_dict that is pass 174 in Parameters 175 ---------- 176 property_key str, required 177 Key in the resource_property_dict 178 resource_property_dict dict, required 179 Property dictionary of a Resource in the template to replace 180 s3_uri_value str, optional 181 Value to update the value of the property_key to 182 """ 183 uri_property = resource_property_dict.get(property_key, '.') 184 185 # ignore if dict or already an S3 Uri 186 if isinstance(uri_property, dict): 187 if len(uri_property) == 1: 188 for k in uri_property.keys(): 189 if k == 'Ref': 190 resource_property_dict[property_key] = s3_uri_value 191 return 192 if Transform.is_s3_uri(uri_property): 193 return 194 195 resource_property_dict[property_key] = s3_uri_value 196 [end of src/cfnlint/transform.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/cfnlint/transform.py b/src/cfnlint/transform.py --- a/src/cfnlint/transform.py +++ b/src/cfnlint/transform.py @@ -76,7 +76,10 @@ if resource_type == 'AWS::Serverless::Function': - Transform._update_to_s3_uri('CodeUri', resource_dict) + if resource_dict.get('PackageType') == 'Image': + Transform._update_to_s3_uri('ImageUri', resource_dict) + else: + Transform._update_to_s3_uri('CodeUri', resource_dict) auto_publish_alias = resource_dict.get('AutoPublishAlias') if isinstance(auto_publish_alias, dict): if len(auto_publish_alias) == 1:
{"golden_diff": "diff --git a/src/cfnlint/transform.py b/src/cfnlint/transform.py\n--- a/src/cfnlint/transform.py\n+++ b/src/cfnlint/transform.py\n@@ -76,7 +76,10 @@\n \n if resource_type == 'AWS::Serverless::Function':\n \n- Transform._update_to_s3_uri('CodeUri', resource_dict)\n+ if resource_dict.get('PackageType') == 'Image':\n+ Transform._update_to_s3_uri('ImageUri', resource_dict)\n+ else:\n+ Transform._update_to_s3_uri('CodeUri', resource_dict)\n auto_publish_alias = resource_dict.get('AutoPublishAlias')\n if isinstance(auto_publish_alias, dict):\n if len(auto_publish_alias) == 1:\n", "issue": "Support for the docker lambda runtime\ncfn-lint 0.43.0\r\n\r\nThis is a feature request for supporting docker lambda sam templates.\r\n\r\nPlease provide as much information as possible:\r\n* SAM templates produced by aws-sam-cli with the docker deploy option don't pass validation\r\n\r\nRunning on the template.yaml in the base directory outputs the following:\r\n```\r\n% cfn-lint template.yaml \r\nE0001 Error transforming template: Resource with id [HelloWorldFunction] is invalid. 'ImageUri' must be set.\r\ntemplate.yaml:1:1\r\n\r\n```\r\n\r\nRunning on the packaged template at `.aws-sam/build/template.yaml` produces the following:\r\n```\r\nE3002 Invalid Property Resources/HelloWorldFunction/Properties/Code/ImageUri\r\n.aws-sam/build/template.yaml:12:3\r\n\r\nE3002 Invalid Property Resources/HelloWorldFunction/Properties/PackageType\r\n.aws-sam/build/template.yaml:12:3\r\n\r\nE3003 Property Handler missing at Resources/HelloWorldFunction/Properties\r\n.aws-sam/build/template.yaml:12:3\r\n\r\nE3003 Property Runtime missing at Resources/HelloWorldFunction/Properties\r\n.aws-sam/build/template.yaml:12:3\r\n\r\n```\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport os\nimport logging\nimport six\nimport samtranslator\nfrom samtranslator.parser import parser\nfrom samtranslator.translator.translator import Translator\nfrom samtranslator.public.exceptions import InvalidDocumentException\n\nfrom cfnlint.helpers import load_resource, convert_dict, format_json_string\nfrom cfnlint.data import Serverless\nfrom cfnlint.rules import Match, TransformError\nLOGGER = logging.getLogger('cfnlint')\n\nsamtranslator_logger = logging.getLogger('samtranslator')\nsamtranslator_logger.setLevel(logging.CRITICAL)\n\nclass Transform(object):\n \"\"\"\n Application Serverless Module tranform Wrapper.\n Based on code from AWS SAM CLI:\n https://github.com/awslabs/aws-sam-cli/blob/develop/samcli/commands/validate/lib/sam_template_validator.py\n \"\"\"\n\n def __init__(self, filename, template, region):\n \"\"\"\n Initialize Transform class\n \"\"\"\n self._filename = filename\n self._template = template\n self._region = region\n self._parameters = {}\n\n self._managed_policy_map = self.load_managed_policies()\n self._sam_parser = parser.Parser()\n\n def template(self):\n \"\"\"Get the template\"\"\"\n return self._template\n\n def load_managed_policies(self):\n \"\"\"\n Load the ManagedPolicies locally, based on the AWS-CLI:\n https://github.com/awslabs/aws-sam-cli/blob/develop/samcli/lib/samlib/default_managed_policies.json\n \"\"\"\n return load_resource(Serverless, 'ManagedPolicies.json')\n\n def _replace_local_codeuri(self):\n \"\"\"\n Replaces the CodeUri in AWS::Serverless::Function and DefinitionUri in\n AWS::Serverless::Api to a fake S3 Uri. This is to support running the\n SAM Translator with valid values for these fields. If this is not done,\n the template is invalid in the eyes of SAM Translator (the translator\n does not support local paths)\n \"\"\"\n\n all_resources = self._template.get('Resources', {})\n\n template_globals = self._template.get('Globals', {})\n auto_publish_alias = template_globals.get('Function', {}).get('AutoPublishAlias')\n if isinstance(auto_publish_alias, dict):\n if len(auto_publish_alias) == 1:\n for k, v in auto_publish_alias.items():\n if k == 'Ref':\n if v in self._template.get('Parameters'):\n self._parameters[v] = 'Alias'\n\n\n for _, resource in all_resources.items():\n\n resource_type = resource.get('Type')\n resource_dict = resource.get('Properties')\n\n if resource_type == 'AWS::Serverless::Function':\n\n Transform._update_to_s3_uri('CodeUri', resource_dict)\n auto_publish_alias = resource_dict.get('AutoPublishAlias')\n if isinstance(auto_publish_alias, dict):\n if len(auto_publish_alias) == 1:\n for k, v in auto_publish_alias.items():\n if k == 'Ref':\n if v in self._template.get('Parameters'):\n self._parameters[v] = 'Alias'\n if resource_type in ['AWS::Serverless::LayerVersion']:\n if resource_dict.get('ContentUri'):\n Transform._update_to_s3_uri('ContentUri', resource_dict)\n if resource_type == 'AWS::Serverless::Application':\n if resource_dict.get('Location'):\n resource_dict['Location'] = ''\n Transform._update_to_s3_uri('Location', resource_dict)\n if resource_type == 'AWS::Serverless::Api':\n if ('DefinitionBody' not in resource_dict and\n 'Auth' not in resource_dict and 'Cors' not in resource_dict):\n Transform._update_to_s3_uri('DefinitionUri', resource_dict)\n else:\n resource_dict['DefinitionBody'] = ''\n if resource_type == 'AWS::Serverless::StateMachine' and resource_dict.get('DefinitionUri'):\n Transform._update_to_s3_uri('DefinitionUri', resource_dict)\n\n def transform_template(self):\n \"\"\"\n Transform the Template using the Serverless Application Model.\n \"\"\"\n matches = []\n\n try:\n # Output the SAM Translator version in debug mode\n LOGGER.info('SAM Translator: %s', samtranslator.__version__)\n\n sam_translator = Translator(\n managed_policy_map=self._managed_policy_map,\n sam_parser=self._sam_parser)\n\n self._replace_local_codeuri()\n\n # Tell SAM to use the region we're linting in, this has to be\n # controlled using the default AWS mechanisms, see also:\n # https://github.com/awslabs/serverless-application-model/blob/master/samtranslator/translator/arn_generator.py\n LOGGER.info('Setting AWS_DEFAULT_REGION to %s', self._region)\n os.environ['AWS_DEFAULT_REGION'] = self._region\n\n self._template = convert_dict(\n sam_translator.translate(sam_template=self._template,\n parameter_values=self._parameters))\n\n LOGGER.info('Transformed template: \\n%s',\n format_json_string(self._template))\n except InvalidDocumentException as e:\n message = 'Error transforming template: {0}'\n for cause in e.causes:\n matches.append(Match(\n 1, 1,\n 1, 1,\n self._filename,\n TransformError(), message.format(cause.message)))\n except Exception as e: # pylint: disable=W0703\n LOGGER.debug('Error transforming template: %s', str(e))\n LOGGER.debug('Stack trace: %s', e, exc_info=True)\n message = 'Error transforming template: {0}'\n matches.append(Match(\n 1, 1,\n 1, 1,\n self._filename,\n TransformError(), message.format(str(e))))\n\n return matches\n\n @staticmethod\n def is_s3_uri(uri):\n \"\"\"\n Checks the uri and determines if it is a valid S3 Uri\n Parameters\n ----------\n uri str, required\n Uri to check\n Returns\n -------\n bool\n Returns True if the uri given is an S3 uri, otherwise False\n \"\"\"\n return isinstance(uri, six.string_types) and uri.startswith('s3://')\n\n @staticmethod\n def _update_to_s3_uri(\n property_key, resource_property_dict,\n s3_uri_value='s3://bucket/value'):\n \"\"\"\n Updates the 'property_key' in the 'resource_property_dict' to the\n value of 's3_uri_value'\n Note: The function will mutate the resource_property_dict that is pass\n in Parameters\n ----------\n property_key str, required\n Key in the resource_property_dict\n resource_property_dict dict, required\n Property dictionary of a Resource in the template to replace\n s3_uri_value str, optional\n Value to update the value of the property_key to\n \"\"\"\n uri_property = resource_property_dict.get(property_key, '.')\n\n # ignore if dict or already an S3 Uri\n if isinstance(uri_property, dict):\n if len(uri_property) == 1:\n for k in uri_property.keys():\n if k == 'Ref':\n resource_property_dict[property_key] = s3_uri_value\n return\n if Transform.is_s3_uri(uri_property):\n return\n\n resource_property_dict[property_key] = s3_uri_value\n", "path": "src/cfnlint/transform.py"}]}
2,856
167
gh_patches_debug_19945
rasdani/github-patches
git_diff
onnx__onnx-5736
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Refine docs for check_model Current version: > Check the consistency of a model. An exception is raised if the test fails. It would be good if we document the kind of checks done and the type of exception raised so users know what to catch for; as well as clarify that it also runs shape inference when strict is True. (Right now it says `if True, the function checks shapes can be inferred`) Should we default `strict` to `True`? @jcwchen </issue> <code> [start of onnx/checker.py] 1 # Copyright (c) ONNX Project Contributors 2 # 3 # SPDX-License-Identifier: Apache-2.0 4 """Graph utilities for checking whether an ONNX proto message is legal.""" 5 6 from __future__ import annotations 7 8 __all__ = [ 9 "check_attribute", 10 "check_function", 11 "check_graph", 12 "check_model", 13 "check_node", 14 "check_sparse_tensor", 15 "check_tensor", 16 "check_value_info", 17 "DEFAULT_CONTEXT", 18 "ValidationError", 19 "C", 20 "MAXIMUM_PROTOBUF", 21 ] 22 23 import os 24 import sys 25 from typing import Any, Callable, TypeVar 26 27 from google.protobuf.message import Message 28 29 import onnx.defs 30 import onnx.onnx_cpp2py_export.checker as C # noqa: N812 31 import onnx.shape_inference 32 from onnx import ( 33 IR_VERSION, 34 AttributeProto, 35 FunctionProto, 36 GraphProto, 37 ModelProto, 38 NodeProto, 39 SparseTensorProto, 40 TensorProto, 41 ValueInfoProto, 42 helper, 43 ) 44 45 # Limitation of single protobuf file is 2GB 46 MAXIMUM_PROTOBUF = 2000000000 47 48 # TODO: This thing where we reserialize the protobuf back into the 49 # string, only to deserialize it at the call site, is really goofy. 50 # Stop doing that. 51 52 53 # NB: Please don't edit this context! 54 DEFAULT_CONTEXT = C.CheckerContext() 55 DEFAULT_CONTEXT.ir_version = IR_VERSION 56 # TODO: Maybe ONNX-ML should also be defaulted? 57 DEFAULT_CONTEXT.opset_imports = {"": onnx.defs.onnx_opset_version()} 58 59 60 FuncType = TypeVar("FuncType", bound=Callable[..., Any]) 61 62 63 def _ensure_proto_type(proto: Message, proto_type: type[Message]) -> None: 64 if not isinstance(proto, proto_type): 65 raise TypeError( 66 f"The proto message needs to be of type '{proto_type.__name__}'" 67 ) 68 69 70 def check_value_info( 71 value_info: ValueInfoProto, ctx: C.CheckerContext = DEFAULT_CONTEXT 72 ) -> None: 73 _ensure_proto_type(value_info, ValueInfoProto) 74 return C.check_value_info(value_info.SerializeToString(), ctx) 75 76 77 def check_tensor(tensor: TensorProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None: 78 _ensure_proto_type(tensor, TensorProto) 79 return C.check_tensor(tensor.SerializeToString(), ctx) 80 81 82 def check_attribute( 83 attr: AttributeProto, ctx: C.CheckerContext = DEFAULT_CONTEXT 84 ) -> None: 85 _ensure_proto_type(attr, AttributeProto) 86 return C.check_attribute(attr.SerializeToString(), ctx) 87 88 89 def check_node(node: NodeProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None: 90 _ensure_proto_type(node, NodeProto) 91 return C.check_node(node.SerializeToString(), ctx) 92 93 94 def check_function( 95 function: FunctionProto, ctx: C.CheckerContext | None = None 96 ) -> None: 97 _ensure_proto_type(function, FunctionProto) 98 if ctx is None: 99 ctx = C.CheckerContext() 100 ctx.ir_version = helper.find_min_ir_version_for( 101 list(function.opset_import), True 102 ) 103 function_opset_dic = {} 104 for domain_version in function.opset_import: 105 function_opset_dic[domain_version.domain] = domain_version.version 106 ctx.opset_imports = function_opset_dic 107 C.check_function(function.SerializeToString(), ctx) 108 109 110 def check_graph(graph: GraphProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None: 111 _ensure_proto_type(graph, GraphProto) 112 return C.check_graph(graph.SerializeToString(), ctx) 113 114 115 def check_sparse_tensor( 116 sparse: SparseTensorProto, ctx: C.CheckerContext = DEFAULT_CONTEXT 117 ) -> None: 118 _ensure_proto_type(sparse, SparseTensorProto) 119 C.check_sparse_tensor(sparse.SerializeToString(), ctx) 120 121 122 def check_model( 123 model: ModelProto | str | bytes | os.PathLike, 124 full_check: bool = False, 125 skip_opset_compatibility_check: bool = False, 126 ) -> None: 127 """Check the consistency of a model. An exception is raised if the test fails. 128 129 Args: 130 model: Model to check. 131 full_check: If True, the function also checks for shapes that can be inferred. 132 skip_opset_compatibility_check: If True, the function skips the check for 133 opset compatibility. 134 """ 135 # If model is a path instead of ModelProto 136 if isinstance(model, (str, os.PathLike)): 137 C.check_model_path(os.fspath(model), full_check, skip_opset_compatibility_check) 138 else: 139 protobuf_string = ( 140 model if isinstance(model, bytes) else model.SerializeToString() 141 ) 142 # If the protobuf is larger than 2GB, 143 # remind users should use the model path to check 144 if sys.getsizeof(protobuf_string) > MAXIMUM_PROTOBUF: 145 raise ValueError( 146 "This protobuf of onnx model is too large (>2GB). Call check_model with model path instead." 147 ) 148 C.check_model(protobuf_string, full_check, skip_opset_compatibility_check) 149 150 151 ValidationError = C.ValidationError 152 [end of onnx/checker.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/onnx/checker.py b/onnx/checker.py --- a/onnx/checker.py +++ b/onnx/checker.py @@ -124,11 +124,20 @@ full_check: bool = False, skip_opset_compatibility_check: bool = False, ) -> None: - """Check the consistency of a model. An exception is raised if the test fails. + """Check the consistency of a model. + + An exception will be raised if the model's ir_version is not set + properly or is higher than checker's ir_version, or if the model + has duplicate keys in metadata_props. + + If IR version >= 3, the model must specify opset_import. + If IR version < 3, the model cannot have any opset_import specified. Args: - model: Model to check. - full_check: If True, the function also checks for shapes that can be inferred. + model: Model to check. If model is a path, the function checks model + path first. If the model bytes size is larger than 2GB, function + should be called using model path. + full_check: If True, the function also runs shape inference check. skip_opset_compatibility_check: If True, the function skips the check for opset compatibility. """
{"golden_diff": "diff --git a/onnx/checker.py b/onnx/checker.py\n--- a/onnx/checker.py\n+++ b/onnx/checker.py\n@@ -124,11 +124,20 @@\n full_check: bool = False,\n skip_opset_compatibility_check: bool = False,\n ) -> None:\n- \"\"\"Check the consistency of a model. An exception is raised if the test fails.\n+ \"\"\"Check the consistency of a model.\n+\n+ An exception will be raised if the model's ir_version is not set\n+ properly or is higher than checker's ir_version, or if the model\n+ has duplicate keys in metadata_props.\n+\n+ If IR version >= 3, the model must specify opset_import.\n+ If IR version < 3, the model cannot have any opset_import specified.\n \n Args:\n- model: Model to check.\n- full_check: If True, the function also checks for shapes that can be inferred.\n+ model: Model to check. If model is a path, the function checks model\n+ path first. If the model bytes size is larger than 2GB, function\n+ should be called using model path.\n+ full_check: If True, the function also runs shape inference check.\n skip_opset_compatibility_check: If True, the function skips the check for\n opset compatibility.\n \"\"\"\n", "issue": "Refine docs for check_model\nCurrent version:\r\n\r\n> Check the consistency of a model. An exception is raised if the test fails.\r\n\r\nIt would be good if we document the kind of checks done and the type of exception raised so users know what to catch for; as well as clarify that it also runs shape inference when strict is True. (Right now it says `if True, the function checks shapes can be inferred`\uff09\r\n\r\nShould we default `strict` to `True`? @jcwchen \n", "before_files": [{"content": "# Copyright (c) ONNX Project Contributors\n#\n# SPDX-License-Identifier: Apache-2.0\n\"\"\"Graph utilities for checking whether an ONNX proto message is legal.\"\"\"\n\nfrom __future__ import annotations\n\n__all__ = [\n \"check_attribute\",\n \"check_function\",\n \"check_graph\",\n \"check_model\",\n \"check_node\",\n \"check_sparse_tensor\",\n \"check_tensor\",\n \"check_value_info\",\n \"DEFAULT_CONTEXT\",\n \"ValidationError\",\n \"C\",\n \"MAXIMUM_PROTOBUF\",\n]\n\nimport os\nimport sys\nfrom typing import Any, Callable, TypeVar\n\nfrom google.protobuf.message import Message\n\nimport onnx.defs\nimport onnx.onnx_cpp2py_export.checker as C # noqa: N812\nimport onnx.shape_inference\nfrom onnx import (\n IR_VERSION,\n AttributeProto,\n FunctionProto,\n GraphProto,\n ModelProto,\n NodeProto,\n SparseTensorProto,\n TensorProto,\n ValueInfoProto,\n helper,\n)\n\n# Limitation of single protobuf file is 2GB\nMAXIMUM_PROTOBUF = 2000000000\n\n# TODO: This thing where we reserialize the protobuf back into the\n# string, only to deserialize it at the call site, is really goofy.\n# Stop doing that.\n\n\n# NB: Please don't edit this context!\nDEFAULT_CONTEXT = C.CheckerContext()\nDEFAULT_CONTEXT.ir_version = IR_VERSION\n# TODO: Maybe ONNX-ML should also be defaulted?\nDEFAULT_CONTEXT.opset_imports = {\"\": onnx.defs.onnx_opset_version()}\n\n\nFuncType = TypeVar(\"FuncType\", bound=Callable[..., Any])\n\n\ndef _ensure_proto_type(proto: Message, proto_type: type[Message]) -> None:\n if not isinstance(proto, proto_type):\n raise TypeError(\n f\"The proto message needs to be of type '{proto_type.__name__}'\"\n )\n\n\ndef check_value_info(\n value_info: ValueInfoProto, ctx: C.CheckerContext = DEFAULT_CONTEXT\n) -> None:\n _ensure_proto_type(value_info, ValueInfoProto)\n return C.check_value_info(value_info.SerializeToString(), ctx)\n\n\ndef check_tensor(tensor: TensorProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:\n _ensure_proto_type(tensor, TensorProto)\n return C.check_tensor(tensor.SerializeToString(), ctx)\n\n\ndef check_attribute(\n attr: AttributeProto, ctx: C.CheckerContext = DEFAULT_CONTEXT\n) -> None:\n _ensure_proto_type(attr, AttributeProto)\n return C.check_attribute(attr.SerializeToString(), ctx)\n\n\ndef check_node(node: NodeProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:\n _ensure_proto_type(node, NodeProto)\n return C.check_node(node.SerializeToString(), ctx)\n\n\ndef check_function(\n function: FunctionProto, ctx: C.CheckerContext | None = None\n) -> None:\n _ensure_proto_type(function, FunctionProto)\n if ctx is None:\n ctx = C.CheckerContext()\n ctx.ir_version = helper.find_min_ir_version_for(\n list(function.opset_import), True\n )\n function_opset_dic = {}\n for domain_version in function.opset_import:\n function_opset_dic[domain_version.domain] = domain_version.version\n ctx.opset_imports = function_opset_dic\n C.check_function(function.SerializeToString(), ctx)\n\n\ndef check_graph(graph: GraphProto, ctx: C.CheckerContext = DEFAULT_CONTEXT) -> None:\n _ensure_proto_type(graph, GraphProto)\n return C.check_graph(graph.SerializeToString(), ctx)\n\n\ndef check_sparse_tensor(\n sparse: SparseTensorProto, ctx: C.CheckerContext = DEFAULT_CONTEXT\n) -> None:\n _ensure_proto_type(sparse, SparseTensorProto)\n C.check_sparse_tensor(sparse.SerializeToString(), ctx)\n\n\ndef check_model(\n model: ModelProto | str | bytes | os.PathLike,\n full_check: bool = False,\n skip_opset_compatibility_check: bool = False,\n) -> None:\n \"\"\"Check the consistency of a model. An exception is raised if the test fails.\n\n Args:\n model: Model to check.\n full_check: If True, the function also checks for shapes that can be inferred.\n skip_opset_compatibility_check: If True, the function skips the check for\n opset compatibility.\n \"\"\"\n # If model is a path instead of ModelProto\n if isinstance(model, (str, os.PathLike)):\n C.check_model_path(os.fspath(model), full_check, skip_opset_compatibility_check)\n else:\n protobuf_string = (\n model if isinstance(model, bytes) else model.SerializeToString()\n )\n # If the protobuf is larger than 2GB,\n # remind users should use the model path to check\n if sys.getsizeof(protobuf_string) > MAXIMUM_PROTOBUF:\n raise ValueError(\n \"This protobuf of onnx model is too large (>2GB). Call check_model with model path instead.\"\n )\n C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)\n\n\nValidationError = C.ValidationError\n", "path": "onnx/checker.py"}]}
2,094
301
gh_patches_debug_15335
rasdani/github-patches
git_diff
google__osv.dev-84
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Switch ecosystem from "" to "OSS-Fuzz" for oss-fuzz sourced bugs </issue> <code> [start of gcp/api/server.py] 1 # Copyright 2021 Google LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 """API server implementation.""" 15 16 import argparse 17 from concurrent import futures 18 import functools 19 import logging 20 import os 21 import sys 22 import time 23 24 from google.cloud import ndb 25 import grpc 26 27 import osv 28 import osv_service_v1_pb2 29 import osv_service_v1_pb2_grpc 30 31 _PROJECT = 'oss-vdb' 32 _OSS_FUZZ_TRACKER_URL = 'https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=' 33 34 _SHUTDOWN_GRACE_DURATION = 5 35 36 _AUTHORIZATION_HEADER_PREFIX = 'Bearer ' 37 _EXPECTED_AUDIENCE = 'https://db.oss-fuzz.com' 38 39 _ndb_client = ndb.Client() 40 41 42 def ndb_context(func): 43 """Wrapper to create an NDB context.""" 44 45 @functools.wraps(func) 46 def wrapper(*args, **kwargs): 47 with _ndb_client.context(): 48 return func(*args, **kwargs) 49 50 return wrapper 51 52 53 class BaseServicer: 54 """Base servicer.""" 55 56 def is_privileged(self, context): 57 """Check whether if the calling client is privileged.""" 58 for key, _ in context.invocation_metadata(): 59 # If we have this metadata value, it means it passed JWT validation. 60 if key == 'x-endpoint-api-userinfo': 61 return True 62 63 return False 64 65 66 class OSVServicer(osv_service_v1_pb2_grpc.OSVServicer, BaseServicer): 67 """V1 OSV servicer.""" 68 69 @ndb_context 70 def GetVulnById(self, request, context): 71 """Return a `Vulnerability` object for a given OSV ID. 72 """ 73 bug = osv.Bug.get_by_id(request.id) 74 if not bug or bug.status == osv.BugStatus.UNPROCESSED: 75 context.abort(grpc.StatusCode.NOT_FOUND, 'Bug not found.') 76 return None 77 78 if not bug.public and not self.is_privileged(context): 79 context.abort(grpc.StatusCode.PERMISSION_DENIED, 'Permission denied.') 80 return None 81 82 return bug_to_response(bug) 83 84 @ndb_context 85 def QueryAffected(self, request, context): 86 """Query vulnerabilities for a particular project at a given commit or 87 version.""" 88 privileged = self.is_privileged(context) 89 if request.query.HasField('package'): 90 package_name = request.query.package.name 91 ecosystem = request.query.package.ecosystem 92 else: 93 package_name = '' 94 ecosystem = '' 95 96 if request.query.WhichOneof('param') == 'commit': 97 bugs = query_by_commit( 98 package_name, 99 ecosystem, 100 request.query.commit, 101 privileged, 102 to_response=bug_to_response) 103 elif request.query.WhichOneof('param') == 'version': 104 bugs = query_by_version( 105 package_name, 106 ecosystem, 107 request.query.version, 108 privileged, 109 to_response=bug_to_response) 110 else: 111 context.abort(grpc.StatusCode.INVALID_ARGUMENT, 'Invalid query.') 112 113 return osv_service_v1_pb2.VulnerabilityList(vulns=bugs) 114 115 def GetVulnByIdNew(self, request, context): 116 """Return a `Vulnerability` object for a given OSV ID. 117 """ 118 return self.GetVulnById(request, context) 119 120 def QueryAffectedNew(self, request, context): 121 """Query vulnerabilities for a particular project at a given commit or 122 version.""" 123 return self.QueryAffected(request, context) 124 125 126 def bug_to_response(bug): 127 """Convert a Bug entity to a response object.""" 128 return bug.to_vulnerability() 129 130 131 def _get_bugs(bug_ids, to_response=bug_to_response): 132 """Get bugs from bug ids.""" 133 bugs = ndb.get_multi([ndb.Key(osv.Bug, bug_id) for bug_id in bug_ids]) 134 return [ 135 to_response(bug) 136 for bug in bugs 137 if bug and bug.status == osv.BugStatus.PROCESSED 138 ] 139 140 141 def query_by_commit(project, 142 ecosystem, 143 commit, 144 privileged, 145 to_response=bug_to_response): 146 """Query by commit.""" 147 query = osv.AffectedCommit.query(osv.AffectedCommit.commit == commit) 148 149 if project: 150 query = query.filter(osv.AffectedCommit.project == project) 151 152 if ecosystem: 153 query = query.filter(osv.AffectedCommit.ecosystem == ecosystem) 154 155 if not privileged: 156 query = query.filter(osv.AffectedCommit.public == True) # pylint: disable=singleton-comparison 157 158 bug_ids = [] 159 for affected_commit in query: 160 bug_ids.append(affected_commit.bug_id) 161 162 return _get_bugs(bug_ids, to_response=to_response) 163 164 165 def query_by_tag(project, 166 ecosystem, 167 tag, 168 privileged, 169 to_response=bug_to_response): 170 """Query by tag.""" 171 query = osv.Bug.query(osv.Bug.project == project, 172 osv.Bug.ecosystem == ecosystem, osv.Bug.affected == tag) 173 174 if not privileged: 175 query = query.filter(osv.Bug.public == True) # pylint: disable=singleton-comparison 176 177 bugs = [] 178 for bug in query: 179 bugs.append(bug) 180 181 return [to_response(bug) for bug in bugs] 182 183 184 def query_by_version(project, 185 ecosystem, 186 version, 187 privileged, 188 to_response=bug_to_response): 189 """Query by (fuzzy) version.""" 190 query = osv.Bug.query(osv.Bug.status == osv.BugStatus.PROCESSED, 191 osv.Bug.project == project, 192 osv.Bug.ecosystem == ecosystem, 193 osv.Bug.affected_fuzzy == osv.normalize_tag(version)) 194 195 if not privileged: 196 query = query.filter(osv.Bug.public == True) # pylint: disable=singleton-comparison 197 198 bugs = [] 199 for bug in query: 200 bugs.append(bug) 201 202 return [to_response(bug) for bug in bugs] 203 204 205 def serve(port): 206 """Configures and runs the bookstore API server.""" 207 server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) 208 osv_service_v1_pb2_grpc.add_OSVServicer_to_server(OSVServicer(), server) 209 server.add_insecure_port('[::]:{}'.format(port)) 210 server.start() 211 212 print('Listening on port {}'.format(port)) 213 try: 214 while True: 215 time.sleep(3600) 216 except KeyboardInterrupt: 217 server.stop(_SHUTDOWN_GRACE_DURATION) 218 219 220 def main(): 221 """Entrypoint.""" 222 logging.basicConfig(stream=sys.stderr) 223 logging.getLogger().setLevel(logging.INFO) 224 225 parser = argparse.ArgumentParser( 226 formatter_class=argparse.RawDescriptionHelpFormatter) 227 parser.add_argument( 228 '--port', 229 type=int, 230 default=None, 231 help='The port to listen on.' 232 'If arg is not set, will listen on the $PORT env var.' 233 'If env var is empty, defaults to 8000.') 234 235 args = parser.parse_args() 236 port = args.port 237 if not port: 238 port = os.environ.get('PORT') 239 if not port: 240 port = 8000 241 242 serve(port) 243 244 245 if __name__ == '__main__': 246 main() 247 [end of gcp/api/server.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/gcp/api/server.py b/gcp/api/server.py --- a/gcp/api/server.py +++ b/gcp/api/server.py @@ -187,11 +187,14 @@ privileged, to_response=bug_to_response): """Query by (fuzzy) version.""" + query = osv.Bug.query(osv.Bug.status == osv.BugStatus.PROCESSED, osv.Bug.project == project, - osv.Bug.ecosystem == ecosystem, osv.Bug.affected_fuzzy == osv.normalize_tag(version)) + if ecosystem: + query = query.filter(osv.Bug.ecosystem == ecosystem) + if not privileged: query = query.filter(osv.Bug.public == True) # pylint: disable=singleton-comparison
{"golden_diff": "diff --git a/gcp/api/server.py b/gcp/api/server.py\n--- a/gcp/api/server.py\n+++ b/gcp/api/server.py\n@@ -187,11 +187,14 @@\n privileged,\n to_response=bug_to_response):\n \"\"\"Query by (fuzzy) version.\"\"\"\n+\n query = osv.Bug.query(osv.Bug.status == osv.BugStatus.PROCESSED,\n osv.Bug.project == project,\n- osv.Bug.ecosystem == ecosystem,\n osv.Bug.affected_fuzzy == osv.normalize_tag(version))\n \n+ if ecosystem:\n+ query = query.filter(osv.Bug.ecosystem == ecosystem)\n+\n if not privileged:\n query = query.filter(osv.Bug.public == True) # pylint: disable=singleton-comparison\n", "issue": "Switch ecosystem from \"\" to \"OSS-Fuzz\" for oss-fuzz sourced bugs\n\n", "before_files": [{"content": "# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"API server implementation.\"\"\"\n\nimport argparse\nfrom concurrent import futures\nimport functools\nimport logging\nimport os\nimport sys\nimport time\n\nfrom google.cloud import ndb\nimport grpc\n\nimport osv\nimport osv_service_v1_pb2\nimport osv_service_v1_pb2_grpc\n\n_PROJECT = 'oss-vdb'\n_OSS_FUZZ_TRACKER_URL = 'https://bugs.chromium.org/p/oss-fuzz/issues/detail?id='\n\n_SHUTDOWN_GRACE_DURATION = 5\n\n_AUTHORIZATION_HEADER_PREFIX = 'Bearer '\n_EXPECTED_AUDIENCE = 'https://db.oss-fuzz.com'\n\n_ndb_client = ndb.Client()\n\n\ndef ndb_context(func):\n \"\"\"Wrapper to create an NDB context.\"\"\"\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n with _ndb_client.context():\n return func(*args, **kwargs)\n\n return wrapper\n\n\nclass BaseServicer:\n \"\"\"Base servicer.\"\"\"\n\n def is_privileged(self, context):\n \"\"\"Check whether if the calling client is privileged.\"\"\"\n for key, _ in context.invocation_metadata():\n # If we have this metadata value, it means it passed JWT validation.\n if key == 'x-endpoint-api-userinfo':\n return True\n\n return False\n\n\nclass OSVServicer(osv_service_v1_pb2_grpc.OSVServicer, BaseServicer):\n \"\"\"V1 OSV servicer.\"\"\"\n\n @ndb_context\n def GetVulnById(self, request, context):\n \"\"\"Return a `Vulnerability` object for a given OSV ID.\n \"\"\"\n bug = osv.Bug.get_by_id(request.id)\n if not bug or bug.status == osv.BugStatus.UNPROCESSED:\n context.abort(grpc.StatusCode.NOT_FOUND, 'Bug not found.')\n return None\n\n if not bug.public and not self.is_privileged(context):\n context.abort(grpc.StatusCode.PERMISSION_DENIED, 'Permission denied.')\n return None\n\n return bug_to_response(bug)\n\n @ndb_context\n def QueryAffected(self, request, context):\n \"\"\"Query vulnerabilities for a particular project at a given commit or\n version.\"\"\"\n privileged = self.is_privileged(context)\n if request.query.HasField('package'):\n package_name = request.query.package.name\n ecosystem = request.query.package.ecosystem\n else:\n package_name = ''\n ecosystem = ''\n\n if request.query.WhichOneof('param') == 'commit':\n bugs = query_by_commit(\n package_name,\n ecosystem,\n request.query.commit,\n privileged,\n to_response=bug_to_response)\n elif request.query.WhichOneof('param') == 'version':\n bugs = query_by_version(\n package_name,\n ecosystem,\n request.query.version,\n privileged,\n to_response=bug_to_response)\n else:\n context.abort(grpc.StatusCode.INVALID_ARGUMENT, 'Invalid query.')\n\n return osv_service_v1_pb2.VulnerabilityList(vulns=bugs)\n\n def GetVulnByIdNew(self, request, context):\n \"\"\"Return a `Vulnerability` object for a given OSV ID.\n \"\"\"\n return self.GetVulnById(request, context)\n\n def QueryAffectedNew(self, request, context):\n \"\"\"Query vulnerabilities for a particular project at a given commit or\n version.\"\"\"\n return self.QueryAffected(request, context)\n\n\ndef bug_to_response(bug):\n \"\"\"Convert a Bug entity to a response object.\"\"\"\n return bug.to_vulnerability()\n\n\ndef _get_bugs(bug_ids, to_response=bug_to_response):\n \"\"\"Get bugs from bug ids.\"\"\"\n bugs = ndb.get_multi([ndb.Key(osv.Bug, bug_id) for bug_id in bug_ids])\n return [\n to_response(bug)\n for bug in bugs\n if bug and bug.status == osv.BugStatus.PROCESSED\n ]\n\n\ndef query_by_commit(project,\n ecosystem,\n commit,\n privileged,\n to_response=bug_to_response):\n \"\"\"Query by commit.\"\"\"\n query = osv.AffectedCommit.query(osv.AffectedCommit.commit == commit)\n\n if project:\n query = query.filter(osv.AffectedCommit.project == project)\n\n if ecosystem:\n query = query.filter(osv.AffectedCommit.ecosystem == ecosystem)\n\n if not privileged:\n query = query.filter(osv.AffectedCommit.public == True) # pylint: disable=singleton-comparison\n\n bug_ids = []\n for affected_commit in query:\n bug_ids.append(affected_commit.bug_id)\n\n return _get_bugs(bug_ids, to_response=to_response)\n\n\ndef query_by_tag(project,\n ecosystem,\n tag,\n privileged,\n to_response=bug_to_response):\n \"\"\"Query by tag.\"\"\"\n query = osv.Bug.query(osv.Bug.project == project,\n osv.Bug.ecosystem == ecosystem, osv.Bug.affected == tag)\n\n if not privileged:\n query = query.filter(osv.Bug.public == True) # pylint: disable=singleton-comparison\n\n bugs = []\n for bug in query:\n bugs.append(bug)\n\n return [to_response(bug) for bug in bugs]\n\n\ndef query_by_version(project,\n ecosystem,\n version,\n privileged,\n to_response=bug_to_response):\n \"\"\"Query by (fuzzy) version.\"\"\"\n query = osv.Bug.query(osv.Bug.status == osv.BugStatus.PROCESSED,\n osv.Bug.project == project,\n osv.Bug.ecosystem == ecosystem,\n osv.Bug.affected_fuzzy == osv.normalize_tag(version))\n\n if not privileged:\n query = query.filter(osv.Bug.public == True) # pylint: disable=singleton-comparison\n\n bugs = []\n for bug in query:\n bugs.append(bug)\n\n return [to_response(bug) for bug in bugs]\n\n\ndef serve(port):\n \"\"\"Configures and runs the bookstore API server.\"\"\"\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))\n osv_service_v1_pb2_grpc.add_OSVServicer_to_server(OSVServicer(), server)\n server.add_insecure_port('[::]:{}'.format(port))\n server.start()\n\n print('Listening on port {}'.format(port))\n try:\n while True:\n time.sleep(3600)\n except KeyboardInterrupt:\n server.stop(_SHUTDOWN_GRACE_DURATION)\n\n\ndef main():\n \"\"\"Entrypoint.\"\"\"\n logging.basicConfig(stream=sys.stderr)\n logging.getLogger().setLevel(logging.INFO)\n\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument(\n '--port',\n type=int,\n default=None,\n help='The port to listen on.'\n 'If arg is not set, will listen on the $PORT env var.'\n 'If env var is empty, defaults to 8000.')\n\n args = parser.parse_args()\n port = args.port\n if not port:\n port = os.environ.get('PORT')\n if not port:\n port = 8000\n\n serve(port)\n\n\nif __name__ == '__main__':\n main()\n", "path": "gcp/api/server.py"}]}
2,898
181
gh_patches_debug_19401
rasdani/github-patches
git_diff
geopandas__geopandas-643
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> GeoDataFrame.to_file fail on bool column When converting GeoDataFrame with bool column to shp file, got following error ```sh ValueError: 'bool' is not in list ``` </issue> <code> [start of geopandas/io/file.py] 1 import os 2 3 import fiona 4 import numpy as np 5 import six 6 7 from geopandas import GeoDataFrame 8 9 # Adapted from pandas.io.common 10 if six.PY3: 11 from urllib.request import urlopen as _urlopen 12 from urllib.parse import urlparse as parse_url 13 from urllib.parse import uses_relative, uses_netloc, uses_params 14 else: 15 from urllib2 import urlopen as _urlopen 16 from urlparse import urlparse as parse_url 17 from urlparse import uses_relative, uses_netloc, uses_params 18 19 _VALID_URLS = set(uses_relative + uses_netloc + uses_params) 20 _VALID_URLS.discard('') 21 22 23 def _is_url(url): 24 """Check to see if *url* has a valid protocol.""" 25 try: 26 return parse_url(url).scheme in _VALID_URLS 27 except: 28 return False 29 30 31 def read_file(filename, **kwargs): 32 """ 33 Returns a GeoDataFrame from a file or URL. 34 35 Parameters 36 ---------- 37 filename: str 38 Either the absolute or relative path to the file or URL to 39 be opened. 40 **kwargs: 41 Keyword args to be passed to the `open` or `BytesCollection` method 42 in the fiona library when opening the file. For more information on 43 possible keywords, type: 44 ``import fiona; help(fiona.open)`` 45 46 Examples 47 -------- 48 >>> df = geopandas.read_file("nybb.shp") 49 50 Returns 51 ------- 52 geodataframe : GeoDataFrame 53 """ 54 bbox = kwargs.pop('bbox', None) 55 if _is_url(filename): 56 req = _urlopen(filename) 57 path_or_bytes = req.read() 58 reader = fiona.BytesCollection 59 else: 60 path_or_bytes = filename 61 reader = fiona.open 62 with reader(path_or_bytes, **kwargs) as f: 63 crs = f.crs 64 if bbox is not None: 65 assert len(bbox) == 4 66 f_filt = f.filter(bbox=bbox) 67 else: 68 f_filt = f 69 gdf = GeoDataFrame.from_features(f_filt, crs=crs) 70 # re-order with column order from metadata, with geometry last 71 columns = list(f.meta["schema"]["properties"]) + ["geometry"] 72 gdf = gdf[columns] 73 74 return gdf 75 76 77 def to_file(df, filename, driver="ESRI Shapefile", schema=None, 78 **kwargs): 79 """ 80 Write this GeoDataFrame to an OGR data source 81 82 A dictionary of supported OGR providers is available via: 83 >>> import fiona 84 >>> fiona.supported_drivers 85 86 Parameters 87 ---------- 88 df : GeoDataFrame to be written 89 filename : string 90 File path or file handle to write to. 91 driver : string, default 'ESRI Shapefile' 92 The OGR format driver used to write the vector file. 93 schema : dict, default None 94 If specified, the schema dictionary is passed to Fiona to 95 better control how the file is written. If None, GeoPandas 96 will determine the schema based on each column's dtype 97 98 The *kwargs* are passed to fiona.open and can be used to write 99 to multi-layer data, store data within archives (zip files), etc. 100 """ 101 if schema is None: 102 schema = infer_schema(df) 103 filename = os.path.abspath(os.path.expanduser(filename)) 104 with fiona.drivers(): 105 with fiona.open(filename, 'w', driver=driver, crs=df.crs, 106 schema=schema, **kwargs) as colxn: 107 colxn.writerecords(df.iterfeatures()) 108 109 110 def infer_schema(df): 111 try: 112 from collections import OrderedDict 113 except ImportError: 114 from ordereddict import OrderedDict 115 116 def convert_type(in_type): 117 if in_type == object: 118 return 'str' 119 out_type = type(np.asscalar(np.zeros(1, in_type))).__name__ 120 if out_type == 'long': 121 out_type = 'int' 122 return out_type 123 124 properties = OrderedDict([ 125 (col, convert_type(_type)) for col, _type in 126 zip(df.columns, df.dtypes) if col != df._geometry_column_name 127 ]) 128 129 geom_type = _common_geom_type(df) 130 if not geom_type: 131 raise ValueError("Geometry column cannot contain mutiple " 132 "geometry types when writing to file.") 133 134 schema = {'geometry': geom_type, 'properties': properties} 135 136 return schema 137 138 139 def _common_geom_type(df): 140 # Need to check geom_types before we write to file... 141 # Some (most?) providers expect a single geometry type: 142 # Point, LineString, or Polygon 143 geom_types = df.geometry.geom_type.unique() 144 145 from os.path import commonprefix # To find longest common prefix 146 geom_type = commonprefix([g[::-1] for g in geom_types if g])[::-1] # Reverse 147 if not geom_type: 148 geom_type = None 149 150 return geom_type 151 [end of geopandas/io/file.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/geopandas/io/file.py b/geopandas/io/file.py --- a/geopandas/io/file.py +++ b/geopandas/io/file.py @@ -113,16 +113,20 @@ except ImportError: from ordereddict import OrderedDict - def convert_type(in_type): + def convert_type(column, in_type): if in_type == object: return 'str' out_type = type(np.asscalar(np.zeros(1, in_type))).__name__ if out_type == 'long': out_type = 'int' + if out_type == 'bool': + raise ValueError('column "{}" is boolean type, '.format(column) + + 'which is unsupported in file writing. ' + 'Consider casting the column to int type.') return out_type properties = OrderedDict([ - (col, convert_type(_type)) for col, _type in + (col, convert_type(col, _type)) for col, _type in zip(df.columns, df.dtypes) if col != df._geometry_column_name ])
{"golden_diff": "diff --git a/geopandas/io/file.py b/geopandas/io/file.py\n--- a/geopandas/io/file.py\n+++ b/geopandas/io/file.py\n@@ -113,16 +113,20 @@\n except ImportError:\n from ordereddict import OrderedDict\n \n- def convert_type(in_type):\n+ def convert_type(column, in_type):\n if in_type == object:\n return 'str'\n out_type = type(np.asscalar(np.zeros(1, in_type))).__name__\n if out_type == 'long':\n out_type = 'int'\n+ if out_type == 'bool':\n+ raise ValueError('column \"{}\" is boolean type, '.format(column) +\n+ 'which is unsupported in file writing. '\n+ 'Consider casting the column to int type.')\n return out_type\n \n properties = OrderedDict([\n- (col, convert_type(_type)) for col, _type in\n+ (col, convert_type(col, _type)) for col, _type in\n zip(df.columns, df.dtypes) if col != df._geometry_column_name\n ])\n", "issue": "GeoDataFrame.to_file fail on bool column\nWhen converting GeoDataFrame with bool column to shp file, got following error\r\n```sh\r\nValueError: 'bool' is not in list\r\n```\n", "before_files": [{"content": "import os\n\nimport fiona\nimport numpy as np\nimport six\n\nfrom geopandas import GeoDataFrame\n\n# Adapted from pandas.io.common\nif six.PY3:\n from urllib.request import urlopen as _urlopen\n from urllib.parse import urlparse as parse_url\n from urllib.parse import uses_relative, uses_netloc, uses_params\nelse:\n from urllib2 import urlopen as _urlopen\n from urlparse import urlparse as parse_url\n from urlparse import uses_relative, uses_netloc, uses_params\n\n_VALID_URLS = set(uses_relative + uses_netloc + uses_params)\n_VALID_URLS.discard('')\n\n\ndef _is_url(url):\n \"\"\"Check to see if *url* has a valid protocol.\"\"\"\n try:\n return parse_url(url).scheme in _VALID_URLS\n except:\n return False\n\n\ndef read_file(filename, **kwargs):\n \"\"\"\n Returns a GeoDataFrame from a file or URL.\n\n Parameters\n ----------\n filename: str\n Either the absolute or relative path to the file or URL to\n be opened.\n **kwargs:\n Keyword args to be passed to the `open` or `BytesCollection` method\n in the fiona library when opening the file. For more information on\n possible keywords, type:\n ``import fiona; help(fiona.open)``\n\n Examples\n --------\n >>> df = geopandas.read_file(\"nybb.shp\")\n\n Returns\n -------\n geodataframe : GeoDataFrame\n \"\"\"\n bbox = kwargs.pop('bbox', None)\n if _is_url(filename):\n req = _urlopen(filename)\n path_or_bytes = req.read()\n reader = fiona.BytesCollection\n else:\n path_or_bytes = filename\n reader = fiona.open\n with reader(path_or_bytes, **kwargs) as f:\n crs = f.crs\n if bbox is not None:\n assert len(bbox) == 4\n f_filt = f.filter(bbox=bbox)\n else:\n f_filt = f\n gdf = GeoDataFrame.from_features(f_filt, crs=crs)\n # re-order with column order from metadata, with geometry last\n columns = list(f.meta[\"schema\"][\"properties\"]) + [\"geometry\"]\n gdf = gdf[columns]\n\n return gdf\n\n\ndef to_file(df, filename, driver=\"ESRI Shapefile\", schema=None,\n **kwargs):\n \"\"\"\n Write this GeoDataFrame to an OGR data source\n\n A dictionary of supported OGR providers is available via:\n >>> import fiona\n >>> fiona.supported_drivers\n\n Parameters\n ----------\n df : GeoDataFrame to be written\n filename : string\n File path or file handle to write to.\n driver : string, default 'ESRI Shapefile'\n The OGR format driver used to write the vector file.\n schema : dict, default None\n If specified, the schema dictionary is passed to Fiona to\n better control how the file is written. If None, GeoPandas\n will determine the schema based on each column's dtype\n\n The *kwargs* are passed to fiona.open and can be used to write\n to multi-layer data, store data within archives (zip files), etc.\n \"\"\"\n if schema is None:\n schema = infer_schema(df)\n filename = os.path.abspath(os.path.expanduser(filename))\n with fiona.drivers():\n with fiona.open(filename, 'w', driver=driver, crs=df.crs,\n schema=schema, **kwargs) as colxn:\n colxn.writerecords(df.iterfeatures())\n\n\ndef infer_schema(df):\n try:\n from collections import OrderedDict\n except ImportError:\n from ordereddict import OrderedDict\n\n def convert_type(in_type):\n if in_type == object:\n return 'str'\n out_type = type(np.asscalar(np.zeros(1, in_type))).__name__\n if out_type == 'long':\n out_type = 'int'\n return out_type\n\n properties = OrderedDict([\n (col, convert_type(_type)) for col, _type in\n zip(df.columns, df.dtypes) if col != df._geometry_column_name\n ])\n\n geom_type = _common_geom_type(df)\n if not geom_type:\n raise ValueError(\"Geometry column cannot contain mutiple \"\n \"geometry types when writing to file.\")\n\n schema = {'geometry': geom_type, 'properties': properties}\n\n return schema\n\n\ndef _common_geom_type(df):\n # Need to check geom_types before we write to file...\n # Some (most?) providers expect a single geometry type:\n # Point, LineString, or Polygon\n geom_types = df.geometry.geom_type.unique()\n\n from os.path import commonprefix # To find longest common prefix\n geom_type = commonprefix([g[::-1] for g in geom_types if g])[::-1] # Reverse\n if not geom_type:\n geom_type = None\n\n return geom_type\n", "path": "geopandas/io/file.py"}]}
2,009
242
gh_patches_debug_31489
rasdani/github-patches
git_diff
tensorlayer__TensorLayer-550
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> install error ### New Issue Checklist - [ ]ImportError: No module named progressbar ### Issue Description when i use the command pip install -e .,error occurs as Complete output from command python setup.py egg_info: Traceback (most recent call last): File "<string>", line 1, in <module> File "/home/dega/Downloads/degawong/tensorflow/tensorlayer/setup.py", line 18, in <module> from tensorlayer import ( File "tensorlayer/__init__.py", line 14, in <module> from . import files File "tensorlayer/files.py", line 12, in <module> import progressbar ImportError: No module named progressbar ---------------------------------------- Command "python setup.py egg_info" failed with error code 1 in /home/.../tensorflow/tensorlayer/ but if i use import progressbar,it works well and get current screen picture someone helps me,thanks ### Reproducible Code - ubuntu 17.10 [INSERT CODE HERE] ```python pip install -e . </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 import os 3 import codecs 4 5 try: 6 from setuptools import ( 7 setup, 8 find_packages 9 ) 10 11 except ImportError: 12 from distutils.core import ( 13 setup, 14 find_packages 15 ) 16 17 18 from tensorlayer import ( 19 __contact_emails__, 20 __contact_names__, 21 __description__, 22 __download_url__, 23 __homepage__, 24 __keywords__, 25 __license__, 26 __package_name__, 27 __repository_url__, 28 __version__ 29 ) 30 31 32 # =================== Reading Readme file as TXT files =================== 33 34 if os.path.exists('README.rst'): 35 # codec is used for consistent encoding 36 long_description = codecs.open( 37 os.path.join(os.path.abspath(os.path.dirname(__file__)), 'README.rst'), 38 'r', 'utf-8' 39 ).read() 40 41 else: 42 long_description = 'See ' + __homepage__ 43 44 # ======================= Reading Requirements files as TXT files ======================= 45 46 def req_file(filename): 47 with open(filename) as f: 48 content = f.readlines() 49 # you may also want to remove whitespace characters 50 # Example: `\n` at the end of each line 51 return [x.strip() for x in content] 52 53 # ======================= Defining the requirements var ======================= 54 55 install_requires = req_file("requirements.txt") 56 57 extras_require = { 58 'tf_cpu': ['tensorflow>=1.8.0,<1.9'], 59 'tf_gpu': ['tensorflow-gpu>=1.8.0,<1.9'], 60 'dev': req_file("requirements_dev.txt"), 61 'doc': req_file("docs/requirements.txt"), 62 'test': req_file("tests/requirements.txt") 63 } 64 65 # Readthedocs requires TF 1.5.0 to build properly 66 if os.environ.get('READTHEDOCS', None) == 'True': 67 install_requires.append("tensorflow==1.5.0") 68 69 # ======================= Define the package setup ======================= 70 71 setup( 72 name=__package_name__, 73 74 # Versions should comply with PEP440. For a discussion on single-sourcing 75 # the version across setup.py and the project code, see 76 # https://packaging.python.org/en/latest/single_source_version.html 77 version=__version__, 78 79 description=__description__, 80 long_description=long_description, 81 82 # The project's main homepage. 83 url=__repository_url__, 84 download_url=__download_url__, 85 86 # Author details 87 author=__contact_names__, 88 author_email=__contact_emails__, 89 90 # maintainer Details 91 maintainer=__contact_names__, 92 maintainer_email=__contact_emails__, 93 94 # The licence under which the project is released 95 license=__license__, 96 97 classifiers=[ 98 # How mature is this project? Common values are 99 # 1 - Planning 100 # 2 - Pre-Alpha 101 # 3 - Alpha 102 # 4 - Beta 103 # 5 - Production/Stable 104 # 6 - Mature 105 # 7 - Inactive 106 'Development Status :: 5 - Production/Stable', 107 108 # Indicate who your project is intended for 109 'Intended Audience :: Developers', 110 'Intended Audience :: Science/Research', 111 'Intended Audience :: Information Technology', 112 113 # Indicate what your project relates to 114 'Topic :: Scientific/Engineering', 115 'Topic :: Scientific/Engineering :: Image Recognition', 116 'Topic :: Scientific/Engineering :: Artificial Intelligence', 117 'Topic :: Software Development :: Libraries', 118 'Topic :: Utilities', 119 120 # Pick your license as you wish (should match "license" above) 121 'License :: OSI Approved :: Apache Software License', 122 123 # Specify the Python versions you support here. In particular, ensure 124 # that you indicate whether you support Python 2, Python 3 or both. 125 'Programming Language :: Python :: 2', 126 'Programming Language :: Python :: 2.7', 127 'Programming Language :: Python :: 3', 128 'Programming Language :: Python :: 3.4', 129 'Programming Language :: Python :: 3.5', 130 'Programming Language :: Python :: 3.6', 131 132 # Additionnal Settings 133 'Environment :: Console', 134 'Natural Language :: English', 135 'Operating System :: OS Independent', 136 ], 137 138 keywords=__keywords__, 139 packages=find_packages(), 140 141 # List run-time dependencies here. These will be installed by pip when 142 # your project is installed. For an analysis of "install_requires" vs pip's 143 # requirements files see: 144 # https://packaging.python.org/en/latest/requirements.html 145 install_requires=install_requires, 146 147 # List additional groups of dependencies here (e.g. development 148 # dependencies). You can install these using the following syntax, 149 # $ pip install -e .[test] 150 extras_require=extras_require, 151 scripts=[ 152 'tl', 153 ], 154 platform=['any'], 155 ) 156 [end of setup.py] [start of tensorlayer/__init__.py] 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 """Deep learning and Reinforcement learning library for Researchers and Engineers""" 4 from __future__ import absolute_import 5 6 import pkg_resources 7 installed_packages = [d for d in pkg_resources.working_set] 8 9 TF_is_installed = False 10 TL_is_installed = False 11 12 for package in installed_packages: 13 if 'tensorflow' in package.project_name: 14 TF_is_installed = True 15 if 'tensorlayer' in package.project_name and 'site-packages' in package.location: 16 TL_is_installed = True 17 18 if TF_is_installed: # The tensorlayer package is installed 19 import tensorflow 20 21 from . import activation 22 from . import cost 23 from . import files 24 from . import iterate 25 from . import layers 26 from . import models 27 from . import utils 28 from . import visualize 29 from . import prepro 30 from . import nlp 31 from . import rein 32 from . import distributed 33 34 # alias 35 act = activation 36 vis = visualize 37 38 global_flag = {} 39 global_dict = {} 40 41 elif TL_is_installed: 42 install_instr = "Please make sure you install a recent enough version of TensorFlow." 43 raise ImportError("__init__.py : Could not import TensorFlow. {}".format(install_instr)) 44 45 # Use the following formating: (major, minor, patch, prerelease) 46 VERSION = (1, 8, 5, 'rc2') 47 __shortversion__ = '.'.join(map(str, VERSION[:3])) 48 __version__ = '.'.join(map(str, VERSION[:3])) + "".join(VERSION[3:]) 49 50 __package_name__ = 'tensorlayer' 51 __contact_names__ = 'TensorLayer Contributors' 52 __contact_emails__ = '[email protected]' 53 __homepage__ = 'http://tensorlayer.readthedocs.io/en/latest/' 54 __repository_url__ = 'https://github.com/tensorlayer/tensorlayer' 55 __download_url__ = 'https://github.com/tensorlayer/tensorlayer' 56 __description__ = 'Reinforcement Learning and Deep Learning Library for Researcher and Engineer.' 57 __license__ = 'apache' 58 __keywords__ = 'deep learning, machine learning, computer vision, nlp, supervised learning, unsupervised learning, reinforcement learning, tensorflow' 59 [end of tensorlayer/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -57,6 +57,7 @@ extras_require = { 'tf_cpu': ['tensorflow>=1.8.0,<1.9'], 'tf_gpu': ['tensorflow-gpu>=1.8.0,<1.9'], + 'db': req_file("requirements_db.txt"), 'dev': req_file("requirements_dev.txt"), 'doc': req_file("docs/requirements.txt"), 'test': req_file("tests/requirements.txt") diff --git a/tensorlayer/__init__.py b/tensorlayer/__init__.py --- a/tensorlayer/__init__.py +++ b/tensorlayer/__init__.py @@ -3,19 +3,7 @@ """Deep learning and Reinforcement learning library for Researchers and Engineers""" from __future__ import absolute_import -import pkg_resources -installed_packages = [d for d in pkg_resources.working_set] - -TF_is_installed = False -TL_is_installed = False - -for package in installed_packages: - if 'tensorflow' in package.project_name: - TF_is_installed = True - if 'tensorlayer' in package.project_name and 'site-packages' in package.location: - TL_is_installed = True - -if TF_is_installed: # The tensorlayer package is installed +try: import tensorflow from . import activation @@ -38,9 +26,14 @@ global_flag = {} global_dict = {} -elif TL_is_installed: - install_instr = "Please make sure you install a recent enough version of TensorFlow." - raise ImportError("__init__.py : Could not import TensorFlow. {}".format(install_instr)) +except Exception as e: + + import pkg_resources + installed_packages = [d for d in pkg_resources.working_set] + + for package in installed_packages: + if 'tensorlayer' in package.project_name and 'site-packages' in package.location: + raise ImportError("__init__.py : Could not import TensorLayer.\nError: {}".format(e)) # Use the following formating: (major, minor, patch, prerelease) VERSION = (1, 8, 5, 'rc2')
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -57,6 +57,7 @@\n extras_require = {\n 'tf_cpu': ['tensorflow>=1.8.0,<1.9'],\n 'tf_gpu': ['tensorflow-gpu>=1.8.0,<1.9'],\n+\t'db': req_file(\"requirements_db.txt\"),\n \t'dev': req_file(\"requirements_dev.txt\"),\n \t'doc': req_file(\"docs/requirements.txt\"),\n \t'test': req_file(\"tests/requirements.txt\")\ndiff --git a/tensorlayer/__init__.py b/tensorlayer/__init__.py\n--- a/tensorlayer/__init__.py\n+++ b/tensorlayer/__init__.py\n@@ -3,19 +3,7 @@\n \"\"\"Deep learning and Reinforcement learning library for Researchers and Engineers\"\"\"\n from __future__ import absolute_import\n \n-import pkg_resources\n-installed_packages = [d for d in pkg_resources.working_set]\n-\n-TF_is_installed = False\n-TL_is_installed = False\n-\n-for package in installed_packages:\n- if 'tensorflow' in package.project_name:\n- TF_is_installed = True\n- if 'tensorlayer' in package.project_name and 'site-packages' in package.location:\n- TL_is_installed = True\n-\n-if TF_is_installed: # The tensorlayer package is installed\n+try:\n import tensorflow\n \n from . import activation\n@@ -38,9 +26,14 @@\n global_flag = {}\n global_dict = {}\n \n-elif TL_is_installed:\n- install_instr = \"Please make sure you install a recent enough version of TensorFlow.\"\n- raise ImportError(\"__init__.py : Could not import TensorFlow. {}\".format(install_instr))\n+except Exception as e:\n+\n+ import pkg_resources\n+ installed_packages = [d for d in pkg_resources.working_set]\n+\n+ for package in installed_packages:\n+ if 'tensorlayer' in package.project_name and 'site-packages' in package.location:\n+ raise ImportError(\"__init__.py : Could not import TensorLayer.\\nError: {}\".format(e))\n \n # Use the following formating: (major, minor, patch, prerelease)\n VERSION = (1, 8, 5, 'rc2')\n", "issue": "install error\n### New Issue Checklist\r\n\r\n- [ ]ImportError: No module named progressbar\r\n\r\n### Issue Description\r\n\r\n when i use the command pip install -e .,error occurs as \r\n Complete output from command python setup.py egg_info:\r\n Traceback (most recent call last):\r\n File \"<string>\", line 1, in <module>\r\n File \"/home/dega/Downloads/degawong/tensorflow/tensorlayer/setup.py\", line 18, in <module>\r\n from tensorlayer import (\r\n File \"tensorlayer/__init__.py\", line 14, in <module>\r\n from . import files\r\n File \"tensorlayer/files.py\", line 12, in <module>\r\n import progressbar\r\n ImportError: No module named progressbar\r\n \r\n ----------------------------------------\r\nCommand \"python setup.py egg_info\" failed with error code 1 in /home/.../tensorflow/tensorlayer/\r\n\r\nbut if i use import progressbar,it works well and get current screen picture\r\nsomeone helps me,thanks\r\n\r\n### Reproducible Code\r\n\r\n- ubuntu 17.10\r\n\r\n[INSERT CODE HERE]\r\n\r\n```python\r\npip install -e .\r\n\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport os\nimport codecs\n\ntry:\n from setuptools import (\n setup,\n find_packages\n )\n\nexcept ImportError:\n from distutils.core import (\n setup,\n find_packages\n )\n\n\nfrom tensorlayer import (\n __contact_emails__,\n __contact_names__,\n __description__,\n __download_url__,\n __homepage__,\n __keywords__,\n __license__,\n __package_name__,\n __repository_url__,\n __version__\n)\n\n\n# =================== Reading Readme file as TXT files ===================\n\nif os.path.exists('README.rst'):\n # codec is used for consistent encoding\n long_description = codecs.open(\n os.path.join(os.path.abspath(os.path.dirname(__file__)), 'README.rst'),\n 'r', 'utf-8'\n ).read()\n\nelse:\n long_description = 'See ' + __homepage__\n\n# ======================= Reading Requirements files as TXT files =======================\n\ndef req_file(filename):\n with open(filename) as f:\n content = f.readlines()\n # you may also want to remove whitespace characters\n # Example: `\\n` at the end of each line\n return [x.strip() for x in content]\n\n# ======================= Defining the requirements var =======================\n\ninstall_requires = req_file(\"requirements.txt\")\n\nextras_require = {\n 'tf_cpu': ['tensorflow>=1.8.0,<1.9'],\n 'tf_gpu': ['tensorflow-gpu>=1.8.0,<1.9'],\n\t'dev': req_file(\"requirements_dev.txt\"),\n\t'doc': req_file(\"docs/requirements.txt\"),\n\t'test': req_file(\"tests/requirements.txt\")\n}\n\n# Readthedocs requires TF 1.5.0 to build properly\nif os.environ.get('READTHEDOCS', None) == 'True':\n install_requires.append(\"tensorflow==1.5.0\")\n\n# ======================= Define the package setup =======================\n\nsetup(\n name=__package_name__,\n\n # Versions should comply with PEP440. For a discussion on single-sourcing\n # the version across setup.py and the project code, see\n # https://packaging.python.org/en/latest/single_source_version.html\n version=__version__,\n\n description=__description__,\n long_description=long_description,\n\n # The project's main homepage.\n url=__repository_url__,\n download_url=__download_url__,\n\n # Author details\n author=__contact_names__,\n author_email=__contact_emails__,\n\n # maintainer Details\n maintainer=__contact_names__,\n maintainer_email=__contact_emails__,\n\n # The licence under which the project is released\n license=__license__,\n\n classifiers=[\n # How mature is this project? Common values are\n # 1 - Planning\n # 2 - Pre-Alpha\n # 3 - Alpha\n # 4 - Beta\n # 5 - Production/Stable\n # 6 - Mature\n # 7 - Inactive\n 'Development Status :: 5 - Production/Stable',\n\n # Indicate who your project is intended for\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Information Technology',\n\n # Indicate what your project relates to\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Image Recognition',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Utilities',\n\n # Pick your license as you wish (should match \"license\" above)\n 'License :: OSI Approved :: Apache Software License',\n\n # Specify the Python versions you support here. In particular, ensure\n # that you indicate whether you support Python 2, Python 3 or both.\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n\n # Additionnal Settings\n 'Environment :: Console',\n 'Natural Language :: English',\n 'Operating System :: OS Independent',\n ],\n\n keywords=__keywords__,\n packages=find_packages(),\n\n # List run-time dependencies here. These will be installed by pip when\n # your project is installed. For an analysis of \"install_requires\" vs pip's\n # requirements files see:\n # https://packaging.python.org/en/latest/requirements.html\n install_requires=install_requires,\n\n # List additional groups of dependencies here (e.g. development\n # dependencies). You can install these using the following syntax,\n # $ pip install -e .[test]\n extras_require=extras_require,\n scripts=[\n 'tl',\n ],\n platform=['any'],\n)\n", "path": "setup.py"}, {"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Deep learning and Reinforcement learning library for Researchers and Engineers\"\"\"\nfrom __future__ import absolute_import\n\nimport pkg_resources\ninstalled_packages = [d for d in pkg_resources.working_set]\n\nTF_is_installed = False\nTL_is_installed = False\n\nfor package in installed_packages:\n if 'tensorflow' in package.project_name:\n TF_is_installed = True\n if 'tensorlayer' in package.project_name and 'site-packages' in package.location:\n TL_is_installed = True\n\nif TF_is_installed: # The tensorlayer package is installed\n import tensorflow\n\n from . import activation\n from . import cost\n from . import files\n from . import iterate\n from . import layers\n from . import models\n from . import utils\n from . import visualize\n from . import prepro\n from . import nlp\n from . import rein\n from . import distributed\n\n # alias\n act = activation\n vis = visualize\n\n global_flag = {}\n global_dict = {}\n\nelif TL_is_installed:\n install_instr = \"Please make sure you install a recent enough version of TensorFlow.\"\n raise ImportError(\"__init__.py : Could not import TensorFlow. {}\".format(install_instr))\n\n# Use the following formating: (major, minor, patch, prerelease)\nVERSION = (1, 8, 5, 'rc2')\n__shortversion__ = '.'.join(map(str, VERSION[:3]))\n__version__ = '.'.join(map(str, VERSION[:3])) + \"\".join(VERSION[3:])\n\n__package_name__ = 'tensorlayer'\n__contact_names__ = 'TensorLayer Contributors'\n__contact_emails__ = '[email protected]'\n__homepage__ = 'http://tensorlayer.readthedocs.io/en/latest/'\n__repository_url__ = 'https://github.com/tensorlayer/tensorlayer'\n__download_url__ = 'https://github.com/tensorlayer/tensorlayer'\n__description__ = 'Reinforcement Learning and Deep Learning Library for Researcher and Engineer.'\n__license__ = 'apache'\n__keywords__ = 'deep learning, machine learning, computer vision, nlp, supervised learning, unsupervised learning, reinforcement learning, tensorflow'\n", "path": "tensorlayer/__init__.py"}]}
2,813
493
gh_patches_debug_10215
rasdani/github-patches
git_diff
pfnet__pytorch-pfn-extras-150
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add test for `LogWriterSaveFunc` Add tests check if the output of `LogWriterSaveFunc` follows the specified format. </issue> <code> [start of pytorch_pfn_extras/training/extensions/log_report.py] 1 import json 2 3 from pytorch_pfn_extras import reporting 4 from pytorch_pfn_extras.training import extension 5 from pytorch_pfn_extras.training import trigger as trigger_module 6 7 try: 8 import pandas 9 10 _pandas_available = True 11 except ImportError: 12 _pandas_available = False 13 14 15 class LogWriterSaveFunc: 16 17 def __init__(self, format, append): 18 self._format = format 19 self._append = append 20 21 def __call__(self, target, file_o): 22 if self._format == 'json': 23 if self._append: 24 raise ValueError( 25 'LogReport does not support json format with append mode.') 26 log = json.dumps(target, indent=4) 27 elif self._format == 'json-lines': 28 if self._append: 29 target = target[-1] 30 log = '\n'.join([json.dumps(x) for x in target]) 31 elif self._format == 'yaml': 32 if self._append: 33 target = [target[-1]] 34 import yaml 35 log = yaml.dump(target) 36 else: 37 raise ValueError('Unknown format: {}'.format(self._format)) 38 file_o.write(bytes(log.encode('ascii'))) 39 40 41 class LogReport(extension.Extension): 42 43 """__init__(\ 44 keys=None, trigger=(1, 'epoch'), postprocess=None, filename='log', writer=None) 45 46 An extension to output the accumulated results to a log file. 47 48 This extension accumulates the observations of the manager to 49 :class:`~pytorch_pfn_extras.DictSummary` at a regular interval specified 50 by a supplied trigger, and writes them into a log file in JSON format. 51 52 There are two triggers to handle this extension. One is the trigger to 53 invoke this extension, which is used to handle the timing of accumulating 54 the results. It is set to ``1, 'iteration'`` by default. The other is the 55 trigger to determine when to emit the result. When this trigger returns 56 True, this extension appends the summary of accumulated values to the list 57 of past summaries, and writes the list to the log file. Then, this 58 extension makes a new fresh summary object which is used until the next 59 time that the trigger fires. 60 61 It also adds some entries to each result dictionary. 62 63 - ``'epoch'`` and ``'iteration'`` are the epoch and iteration counts at the 64 output, respectively. 65 - ``'elapsed_time'`` is the elapsed time in seconds since the training 66 begins. The value is taken from :attr:`ExtensionsManager.elapsed_time`. 67 68 Args: 69 keys (iterable of strs): Keys of values to accumulate. If this is None, 70 all the values are accumulated and output to the log file. 71 trigger: Trigger that decides when to aggregate the result and output 72 the values. This is distinct from the trigger of this extension 73 itself. If it is a tuple in the form ``<int>, 'epoch'`` or 74 ``<int>, 'iteration'``, it is passed to :class:`IntervalTrigger`. 75 postprocess: Callback to postprocess the result dictionaries. Each 76 result dictionary is passed to this callback on the output. This 77 callback can modify the result dictionaries, which are used to 78 output to the log file. 79 filename (str): Name of the log file under the output directory. It can 80 be a format string: the last result dictionary is passed for the 81 formatting. For example, users can use '{iteration}' to separate 82 the log files for different iterations. If the log name is None, it 83 does not output the log to any file. 84 For historical reasons ``log_name`` is also accepted as an alias 85 of this argument. 86 writer (writer object, optional): must be callable. 87 object to dump the log to. If specified, it needs to have a correct 88 `savefun` defined. The writer can override the save location in 89 the :class:`pytorch_pfn_extras.training.ExtensionsManager` object 90 91 """ 92 93 def __init__(self, keys=None, trigger=(1, 'epoch'), postprocess=None, 94 filename=None, append=False, format=None, **kwargs): 95 self._keys = keys 96 self._trigger = trigger_module.get_trigger(trigger) 97 self._postprocess = postprocess 98 self._log = [] 99 # When using a writer, it needs to have a savefun defined 100 # to deal with a string. 101 self._writer = kwargs.get('writer', None) 102 103 log_name = kwargs.get('log_name', 'log') 104 if filename is None: 105 filename = log_name 106 del log_name # avoid accidental use 107 self._log_name = filename 108 109 if format is None and filename is not None: 110 if filename.endswith('.jsonl'): 111 format = 'json-lines' 112 elif filename.endswith('.yaml'): 113 format = 'yaml' 114 else: 115 format = 'json' 116 117 self._append = append 118 self._format = format 119 self._init_summary() 120 121 def __call__(self, manager): 122 # accumulate the observations 123 keys = self._keys 124 observation = manager.observation 125 summary = self._summary 126 127 if keys is None: 128 summary.add(observation) 129 else: 130 summary.add({k: observation[k] for k in keys if k in observation}) 131 132 writer = manager.writer if self._writer is None else self._writer 133 134 if manager.is_before_training or self._trigger(manager): 135 # output the result 136 stats = self._summary.compute_mean() 137 stats_cpu = {} 138 for name, value in stats.items(): 139 stats_cpu[name] = float(value) # copy to CPU 140 141 stats_cpu['epoch'] = manager.epoch 142 stats_cpu['iteration'] = manager.iteration 143 stats_cpu['elapsed_time'] = manager.elapsed_time 144 145 if self._postprocess is not None: 146 self._postprocess(stats_cpu) 147 148 self._log.append(stats_cpu) 149 150 # write to the log file 151 if self._log_name is not None: 152 log_name = self._log_name.format(**stats_cpu) 153 out = manager.out 154 savefun = LogWriterSaveFunc(self._format, self._append) 155 writer(log_name, out, self._log, 156 savefun=savefun, append=self._append) 157 158 # reset the summary for the next output 159 self._init_summary() 160 161 @property 162 def log(self): 163 """The current list of observation dictionaries.""" 164 return self._log 165 166 def state_dict(self): 167 state = {} 168 if hasattr(self._trigger, 'state_dict'): 169 state['_trigger'] = self._trigger.state_dict() 170 171 try: 172 state['_summary'] = self._summary.state_dict() 173 except KeyError: 174 pass 175 state['_log'] = json.dumps(self._log) 176 return state 177 178 def load_state_dict(self, to_load): 179 if hasattr(self._trigger, 'load_state_dict'): 180 self._trigger.load_state_dict(to_load['_trigger']) 181 self._summary.load_state_dict(to_load['_summary']) 182 self._log = json.loads(to_load['_log']) 183 184 def _init_summary(self): 185 self._summary = reporting.DictSummary() 186 187 def to_dataframe(self): 188 if not _pandas_available: 189 raise ImportError( 190 "Need to install pandas to use `to_dataframe` method." 191 ) 192 return pandas.DataFrame(self._log) 193 [end of pytorch_pfn_extras/training/extensions/log_report.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pytorch_pfn_extras/training/extensions/log_report.py b/pytorch_pfn_extras/training/extensions/log_report.py --- a/pytorch_pfn_extras/training/extensions/log_report.py +++ b/pytorch_pfn_extras/training/extensions/log_report.py @@ -26,8 +26,9 @@ log = json.dumps(target, indent=4) elif self._format == 'json-lines': if self._append: - target = target[-1] - log = '\n'.join([json.dumps(x) for x in target]) + target = [target[-1]] + # Add a new line at the end for subsequent appends + log = '\n'.join([json.dumps(x) for x in target]) + '\n' elif self._format == 'yaml': if self._append: target = [target[-1]]
{"golden_diff": "diff --git a/pytorch_pfn_extras/training/extensions/log_report.py b/pytorch_pfn_extras/training/extensions/log_report.py\n--- a/pytorch_pfn_extras/training/extensions/log_report.py\n+++ b/pytorch_pfn_extras/training/extensions/log_report.py\n@@ -26,8 +26,9 @@\n log = json.dumps(target, indent=4)\n elif self._format == 'json-lines':\n if self._append:\n- target = target[-1]\n- log = '\\n'.join([json.dumps(x) for x in target])\n+ target = [target[-1]]\n+ # Add a new line at the end for subsequent appends\n+ log = '\\n'.join([json.dumps(x) for x in target]) + '\\n'\n elif self._format == 'yaml':\n if self._append:\n target = [target[-1]]\n", "issue": "Add test for `LogWriterSaveFunc`\nAdd tests check if the output of `LogWriterSaveFunc` follows the specified format.\n", "before_files": [{"content": "import json\n\nfrom pytorch_pfn_extras import reporting\nfrom pytorch_pfn_extras.training import extension\nfrom pytorch_pfn_extras.training import trigger as trigger_module\n\ntry:\n import pandas\n\n _pandas_available = True\nexcept ImportError:\n _pandas_available = False\n\n\nclass LogWriterSaveFunc:\n\n def __init__(self, format, append):\n self._format = format\n self._append = append\n\n def __call__(self, target, file_o):\n if self._format == 'json':\n if self._append:\n raise ValueError(\n 'LogReport does not support json format with append mode.')\n log = json.dumps(target, indent=4)\n elif self._format == 'json-lines':\n if self._append:\n target = target[-1]\n log = '\\n'.join([json.dumps(x) for x in target])\n elif self._format == 'yaml':\n if self._append:\n target = [target[-1]]\n import yaml\n log = yaml.dump(target)\n else:\n raise ValueError('Unknown format: {}'.format(self._format))\n file_o.write(bytes(log.encode('ascii')))\n\n\nclass LogReport(extension.Extension):\n\n \"\"\"__init__(\\\nkeys=None, trigger=(1, 'epoch'), postprocess=None, filename='log', writer=None)\n\n An extension to output the accumulated results to a log file.\n\n This extension accumulates the observations of the manager to\n :class:`~pytorch_pfn_extras.DictSummary` at a regular interval specified\n by a supplied trigger, and writes them into a log file in JSON format.\n\n There are two triggers to handle this extension. One is the trigger to\n invoke this extension, which is used to handle the timing of accumulating\n the results. It is set to ``1, 'iteration'`` by default. The other is the\n trigger to determine when to emit the result. When this trigger returns\n True, this extension appends the summary of accumulated values to the list\n of past summaries, and writes the list to the log file. Then, this\n extension makes a new fresh summary object which is used until the next\n time that the trigger fires.\n\n It also adds some entries to each result dictionary.\n\n - ``'epoch'`` and ``'iteration'`` are the epoch and iteration counts at the\n output, respectively.\n - ``'elapsed_time'`` is the elapsed time in seconds since the training\n begins. The value is taken from :attr:`ExtensionsManager.elapsed_time`.\n\n Args:\n keys (iterable of strs): Keys of values to accumulate. If this is None,\n all the values are accumulated and output to the log file.\n trigger: Trigger that decides when to aggregate the result and output\n the values. This is distinct from the trigger of this extension\n itself. If it is a tuple in the form ``<int>, 'epoch'`` or\n ``<int>, 'iteration'``, it is passed to :class:`IntervalTrigger`.\n postprocess: Callback to postprocess the result dictionaries. Each\n result dictionary is passed to this callback on the output. This\n callback can modify the result dictionaries, which are used to\n output to the log file.\n filename (str): Name of the log file under the output directory. It can\n be a format string: the last result dictionary is passed for the\n formatting. For example, users can use '{iteration}' to separate\n the log files for different iterations. If the log name is None, it\n does not output the log to any file.\n For historical reasons ``log_name`` is also accepted as an alias\n of this argument.\n writer (writer object, optional): must be callable.\n object to dump the log to. If specified, it needs to have a correct\n `savefun` defined. The writer can override the save location in\n the :class:`pytorch_pfn_extras.training.ExtensionsManager` object\n\n \"\"\"\n\n def __init__(self, keys=None, trigger=(1, 'epoch'), postprocess=None,\n filename=None, append=False, format=None, **kwargs):\n self._keys = keys\n self._trigger = trigger_module.get_trigger(trigger)\n self._postprocess = postprocess\n self._log = []\n # When using a writer, it needs to have a savefun defined\n # to deal with a string.\n self._writer = kwargs.get('writer', None)\n\n log_name = kwargs.get('log_name', 'log')\n if filename is None:\n filename = log_name\n del log_name # avoid accidental use\n self._log_name = filename\n\n if format is None and filename is not None:\n if filename.endswith('.jsonl'):\n format = 'json-lines'\n elif filename.endswith('.yaml'):\n format = 'yaml'\n else:\n format = 'json'\n\n self._append = append\n self._format = format\n self._init_summary()\n\n def __call__(self, manager):\n # accumulate the observations\n keys = self._keys\n observation = manager.observation\n summary = self._summary\n\n if keys is None:\n summary.add(observation)\n else:\n summary.add({k: observation[k] for k in keys if k in observation})\n\n writer = manager.writer if self._writer is None else self._writer\n\n if manager.is_before_training or self._trigger(manager):\n # output the result\n stats = self._summary.compute_mean()\n stats_cpu = {}\n for name, value in stats.items():\n stats_cpu[name] = float(value) # copy to CPU\n\n stats_cpu['epoch'] = manager.epoch\n stats_cpu['iteration'] = manager.iteration\n stats_cpu['elapsed_time'] = manager.elapsed_time\n\n if self._postprocess is not None:\n self._postprocess(stats_cpu)\n\n self._log.append(stats_cpu)\n\n # write to the log file\n if self._log_name is not None:\n log_name = self._log_name.format(**stats_cpu)\n out = manager.out\n savefun = LogWriterSaveFunc(self._format, self._append)\n writer(log_name, out, self._log,\n savefun=savefun, append=self._append)\n\n # reset the summary for the next output\n self._init_summary()\n\n @property\n def log(self):\n \"\"\"The current list of observation dictionaries.\"\"\"\n return self._log\n\n def state_dict(self):\n state = {}\n if hasattr(self._trigger, 'state_dict'):\n state['_trigger'] = self._trigger.state_dict()\n\n try:\n state['_summary'] = self._summary.state_dict()\n except KeyError:\n pass\n state['_log'] = json.dumps(self._log)\n return state\n\n def load_state_dict(self, to_load):\n if hasattr(self._trigger, 'load_state_dict'):\n self._trigger.load_state_dict(to_load['_trigger'])\n self._summary.load_state_dict(to_load['_summary'])\n self._log = json.loads(to_load['_log'])\n\n def _init_summary(self):\n self._summary = reporting.DictSummary()\n\n def to_dataframe(self):\n if not _pandas_available:\n raise ImportError(\n \"Need to install pandas to use `to_dataframe` method.\"\n )\n return pandas.DataFrame(self._log)\n", "path": "pytorch_pfn_extras/training/extensions/log_report.py"}]}
2,651
194
gh_patches_debug_39731
rasdani/github-patches
git_diff
OCHA-DAP__hdx-ckan-1835
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Redirect a non-new user to Newsfeed instead of My Organisations </issue> <code> [start of ckanext-hdx_users/ckanext/hdx_users/controllers/login_controller.py] 1 import datetime 2 import dateutil 3 4 import ckan.controllers.user as ckan_user 5 import ckan.lib.helpers as h 6 import ckan.lib.base as base 7 from ckan.common import _, c, g, request 8 import ckan.logic as logic 9 from pylons import config 10 11 get_action = logic.get_action 12 13 class LoginController(ckan_user.UserController): 14 def logged_in(self): 15 # redirect if needed 16 came_from = request.params.get('came_from', '') 17 if self._sane_came_from(came_from): 18 return h.redirect_to(str(came_from)) 19 20 if c.user: 21 context = None 22 data_dict = {'id': c.user} 23 24 user_dict = get_action('user_show')(context, data_dict) 25 26 if 'created' in user_dict: 27 time_passed = datetime.datetime.now() - dateutil.parser.parse( user_dict['created'] ) 28 else: 29 time_passed = None 30 31 if not user_dict['activity'] and time_passed and time_passed.days < 3: 32 #/dataset/new 33 contribute_url = h.url_for(controller='package', action='new') 34 # message = ''' Now that you've registered an account , you can <a href="%s">start adding datasets</a>. 35 # If you want to associate this dataset with an organization, either click on "My Organizations" below 36 # to create a new organization or ask the admin of an existing organization to add you as a member.''' % contribute_url 37 #h.flash_success(_(message), True) 38 else: 39 h.flash_success(_("%s is now logged in") % 40 user_dict['display_name']) 41 #return self.me() 42 # Instead redirect to My orgs page 43 return h.redirect_to(controller='user', 44 action='dashboard_organizations') 45 else: 46 err = _('Login failed. Bad username or password.') 47 if g.openid_enabled: 48 err += _(' (Or if using OpenID, it hasn\'t been associated ' 49 'with a user account.)') 50 if h.asbool(config.get('ckan.legacy_templates', 'false')): 51 h.flash_error(err) 52 h.redirect_to(controller='user', 53 action='login', came_from=came_from) 54 else: 55 return self.login(error=err) 56 57 def contribute(self, error=None): 58 self.login(error) 59 vars = {'contribute':True} 60 return base.render('user/login.html', extra_vars=vars) [end of ckanext-hdx_users/ckanext/hdx_users/controllers/login_controller.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ckanext-hdx_users/ckanext/hdx_users/controllers/login_controller.py b/ckanext-hdx_users/ckanext/hdx_users/controllers/login_controller.py --- a/ckanext-hdx_users/ckanext/hdx_users/controllers/login_controller.py +++ b/ckanext-hdx_users/ckanext/hdx_users/controllers/login_controller.py @@ -10,7 +10,9 @@ get_action = logic.get_action + class LoginController(ckan_user.UserController): + def logged_in(self): # redirect if needed came_from = request.params.get('came_from', '') @@ -24,24 +26,22 @@ user_dict = get_action('user_show')(context, data_dict) if 'created' in user_dict: - time_passed = datetime.datetime.now() - dateutil.parser.parse( user_dict['created'] ) + time_passed = datetime.datetime.now( + ) - dateutil.parser.parse(user_dict['created']) else: - time_passed = None - + time_passed = None if not user_dict['activity'] and time_passed and time_passed.days < 3: - #/dataset/new - contribute_url = h.url_for(controller='package', action='new') - # message = ''' Now that you've registered an account , you can <a href="%s">start adding datasets</a>. - # If you want to associate this dataset with an organization, either click on "My Organizations" below + #/dataset/new + contribute_url = h.url_for(controller='package', action='new') + # message = ''' Now that you've registered an account , you can <a href="%s">start adding datasets</a>. + # If you want to associate this dataset with an organization, either click on "My Organizations" below # to create a new organization or ask the admin of an existing organization to add you as a member.''' % contribute_url #h.flash_success(_(message), True) + return h.redirect_to(controller='user', action='dashboard_organizations') else: h.flash_success(_("%s is now logged in") % - user_dict['display_name']) - #return self.me() - # Instead redirect to My orgs page - return h.redirect_to(controller='user', - action='dashboard_organizations') + user_dict['display_name']) + return self.me() else: err = _('Login failed. Bad username or password.') if g.openid_enabled: @@ -53,8 +53,8 @@ action='login', came_from=came_from) else: return self.login(error=err) - + def contribute(self, error=None): self.login(error) - vars = {'contribute':True} - return base.render('user/login.html', extra_vars=vars) \ No newline at end of file + vars = {'contribute': True} + return base.render('user/login.html', extra_vars=vars)
{"golden_diff": "diff --git a/ckanext-hdx_users/ckanext/hdx_users/controllers/login_controller.py b/ckanext-hdx_users/ckanext/hdx_users/controllers/login_controller.py\n--- a/ckanext-hdx_users/ckanext/hdx_users/controllers/login_controller.py\n+++ b/ckanext-hdx_users/ckanext/hdx_users/controllers/login_controller.py\n@@ -10,7 +10,9 @@\n \n get_action = logic.get_action\n \n+\n class LoginController(ckan_user.UserController):\n+\n def logged_in(self):\n # redirect if needed\n came_from = request.params.get('came_from', '')\n@@ -24,24 +26,22 @@\n user_dict = get_action('user_show')(context, data_dict)\n \n if 'created' in user_dict:\n- time_passed = datetime.datetime.now() - dateutil.parser.parse( user_dict['created'] )\n+ time_passed = datetime.datetime.now(\n+ ) - dateutil.parser.parse(user_dict['created'])\n else:\n- time_passed = None \n- \n+ time_passed = None\n if not user_dict['activity'] and time_passed and time_passed.days < 3:\n- #/dataset/new \n- contribute_url = h.url_for(controller='package', action='new')\n- # message = ''' Now that you've registered an account , you can <a href=\"%s\">start adding datasets</a>. \n- # If you want to associate this dataset with an organization, either click on \"My Organizations\" below \n+ #/dataset/new\n+ contribute_url = h.url_for(controller='package', action='new')\n+ # message = ''' Now that you've registered an account , you can <a href=\"%s\">start adding datasets</a>.\n+ # If you want to associate this dataset with an organization, either click on \"My Organizations\" below\n # to create a new organization or ask the admin of an existing organization to add you as a member.''' % contribute_url\n #h.flash_success(_(message), True)\n+ return h.redirect_to(controller='user', action='dashboard_organizations')\n else:\n h.flash_success(_(\"%s is now logged in\") %\n- user_dict['display_name'])\n- #return self.me()\n- # Instead redirect to My orgs page\n- return h.redirect_to(controller='user',\n- action='dashboard_organizations')\n+ user_dict['display_name'])\n+ return self.me()\n else:\n err = _('Login failed. Bad username or password.')\n if g.openid_enabled:\n@@ -53,8 +53,8 @@\n action='login', came_from=came_from)\n else:\n return self.login(error=err)\n- \n+\n def contribute(self, error=None):\n self.login(error)\n- vars = {'contribute':True}\n- return base.render('user/login.html', extra_vars=vars)\n\\ No newline at end of file\n+ vars = {'contribute': True}\n+ return base.render('user/login.html', extra_vars=vars)\n", "issue": "Redirect a non-new user to Newsfeed instead of My Organisations\n\n", "before_files": [{"content": "import datetime\nimport dateutil\n\nimport ckan.controllers.user as ckan_user\nimport ckan.lib.helpers as h\nimport ckan.lib.base as base\nfrom ckan.common import _, c, g, request\nimport ckan.logic as logic\nfrom pylons import config\n\nget_action = logic.get_action\n\nclass LoginController(ckan_user.UserController):\n def logged_in(self):\n # redirect if needed\n came_from = request.params.get('came_from', '')\n if self._sane_came_from(came_from):\n return h.redirect_to(str(came_from))\n\n if c.user:\n context = None\n data_dict = {'id': c.user}\n\n user_dict = get_action('user_show')(context, data_dict)\n\n if 'created' in user_dict:\n time_passed = datetime.datetime.now() - dateutil.parser.parse( user_dict['created'] )\n else:\n time_passed = None \n \n if not user_dict['activity'] and time_passed and time_passed.days < 3:\n #/dataset/new \n contribute_url = h.url_for(controller='package', action='new')\n # message = ''' Now that you've registered an account , you can <a href=\"%s\">start adding datasets</a>. \n # If you want to associate this dataset with an organization, either click on \"My Organizations\" below \n # to create a new organization or ask the admin of an existing organization to add you as a member.''' % contribute_url\n #h.flash_success(_(message), True)\n else:\n h.flash_success(_(\"%s is now logged in\") %\n user_dict['display_name'])\n #return self.me()\n # Instead redirect to My orgs page\n return h.redirect_to(controller='user',\n action='dashboard_organizations')\n else:\n err = _('Login failed. Bad username or password.')\n if g.openid_enabled:\n err += _(' (Or if using OpenID, it hasn\\'t been associated '\n 'with a user account.)')\n if h.asbool(config.get('ckan.legacy_templates', 'false')):\n h.flash_error(err)\n h.redirect_to(controller='user',\n action='login', came_from=came_from)\n else:\n return self.login(error=err)\n \n def contribute(self, error=None):\n self.login(error)\n vars = {'contribute':True}\n return base.render('user/login.html', extra_vars=vars)", "path": "ckanext-hdx_users/ckanext/hdx_users/controllers/login_controller.py"}]}
1,209
671
gh_patches_debug_53600
rasdani/github-patches
git_diff
aws__aws-cli-577
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> typo in s3api list-objects documentation The documentation for the s3api list-objects --max-items parameter says that a `NextMarker` will be provided, while the --starting-token parameter refers to this as `NextToken` which is the actual name of the returned token in JSON. So in short I think that the `NextMarker` should really say `NextToken` to prevent any confusion. </issue> <code> [start of awscli/customizations/paginate.py] 1 # Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"). You 4 # may not use this file except in compliance with the License. A copy of 5 # the License is located at 6 # 7 # http://aws.amazon.com/apache2.0/ 8 # 9 # or in the "license" file accompanying this file. This file is 10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF 11 # ANY KIND, either express or implied. See the License for the specific 12 # language governing permissions and limitations under the License. 13 """This module has customizations to unify paging paramters. 14 15 For any operation that can be paginated, we will: 16 17 * Remove the service specific pagination params. This can vary across 18 services and we're going to replace them with a consistent set of 19 arguments. 20 * Add a ``--starting-token`` and a ``--max-items`` argument. 21 22 """ 23 import logging 24 25 from awscli.arguments import BaseCLIArgument 26 from botocore.parameters import StringParameter 27 28 logger = logging.getLogger(__name__) 29 30 31 STARTING_TOKEN_HELP = """ 32 <p>A token to specify where to start paginating. This is the 33 <code>NextToken</code> from a previously truncated response.</p> 34 """ 35 36 MAX_ITEMS_HELP = """ 37 <p>The total number of items to return. If the total number 38 of items available is more than the value specified in 39 max-items then a <code>NextMarker</code> will 40 be provided in the output that you can use to resume pagination. 41 """ 42 43 44 def unify_paging_params(argument_table, operation, **kwargs): 45 if not operation.can_paginate: 46 # We only apply these customizations to paginated responses. 47 return 48 logger.debug("Modifying paging parameters for operation: %s", operation) 49 _remove_existing_paging_arguments(argument_table, operation) 50 argument_table['starting-token'] = PageArgument('starting-token', 51 STARTING_TOKEN_HELP, 52 operation, 53 parse_type='string') 54 argument_table['max-items'] = PageArgument('max-items', MAX_ITEMS_HELP, 55 operation, parse_type='integer') 56 57 58 def _remove_existing_paging_arguments(argument_table, operation): 59 tokens = _get_input_tokens(operation) 60 for token_name in tokens: 61 cli_name = _get_cli_name(operation.params, token_name) 62 del argument_table[cli_name] 63 if 'limit_key' in operation.pagination: 64 key_name = operation.pagination['limit_key'] 65 cli_name = _get_cli_name(operation.params, key_name) 66 del argument_table[cli_name] 67 68 69 def _get_input_tokens(operation): 70 config = operation.pagination 71 tokens = config['input_token'] 72 if not isinstance(tokens, list): 73 return [tokens] 74 return tokens 75 76 77 def _get_cli_name(param_objects, token_name): 78 for param in param_objects: 79 if param.name == token_name: 80 return param.cli_name.lstrip('-') 81 82 83 class PageArgument(BaseCLIArgument): 84 type_map = { 85 'string': str, 86 'integer': int, 87 } 88 89 def __init__(self, name, documentation, operation, parse_type): 90 param = StringParameter(operation, name=name, type=parse_type) 91 self._name = name 92 self.argument_object = param 93 self._name = name 94 self._documentation = documentation 95 self._parse_type = parse_type 96 97 @property 98 def cli_name(self): 99 return '--' + self._name 100 101 @property 102 def cli_type_name(self): 103 return self._parse_type 104 105 @property 106 def required(self): 107 return False 108 109 @property 110 def documentation(self): 111 return self._documentation 112 113 def add_to_parser(self, parser): 114 parser.add_argument(self.cli_name, dest=self.py_name, 115 type=self.type_map[self._parse_type]) 116 117 def add_to_params(self, parameters, value): 118 if value is not None: 119 parameters[self.py_name] = value 120 [end of awscli/customizations/paginate.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/awscli/customizations/paginate.py b/awscli/customizations/paginate.py --- a/awscli/customizations/paginate.py +++ b/awscli/customizations/paginate.py @@ -36,7 +36,7 @@ MAX_ITEMS_HELP = """ <p>The total number of items to return. If the total number of items available is more than the value specified in -max-items then a <code>NextMarker</code> will +max-items then a <code>NextToken</code> will be provided in the output that you can use to resume pagination. """
{"golden_diff": "diff --git a/awscli/customizations/paginate.py b/awscli/customizations/paginate.py\n--- a/awscli/customizations/paginate.py\n+++ b/awscli/customizations/paginate.py\n@@ -36,7 +36,7 @@\n MAX_ITEMS_HELP = \"\"\"\n <p>The total number of items to return. If the total number\n of items available is more than the value specified in\n-max-items then a <code>NextMarker</code> will\n+max-items then a <code>NextToken</code> will\n be provided in the output that you can use to resume pagination.\n \"\"\"\n", "issue": "typo in s3api list-objects documentation\nThe documentation for the s3api list-objects --max-items parameter says that a `NextMarker` will be provided, while the --starting-token parameter refers to this as `NextToken` which is the actual name of the returned token in JSON.\n\nSo in short I think that the `NextMarker` should really say `NextToken` to prevent any confusion.\n\n", "before_files": [{"content": "# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\"\"\"This module has customizations to unify paging paramters.\n\nFor any operation that can be paginated, we will:\n\n * Remove the service specific pagination params. This can vary across\n services and we're going to replace them with a consistent set of\n arguments.\n * Add a ``--starting-token`` and a ``--max-items`` argument.\n\n\"\"\"\nimport logging\n\nfrom awscli.arguments import BaseCLIArgument\nfrom botocore.parameters import StringParameter\n\nlogger = logging.getLogger(__name__)\n\n\nSTARTING_TOKEN_HELP = \"\"\"\n<p>A token to specify where to start paginating. This is the\n<code>NextToken</code> from a previously truncated response.</p>\n\"\"\"\n\nMAX_ITEMS_HELP = \"\"\"\n<p>The total number of items to return. If the total number\nof items available is more than the value specified in\nmax-items then a <code>NextMarker</code> will\nbe provided in the output that you can use to resume pagination.\n\"\"\"\n\n\ndef unify_paging_params(argument_table, operation, **kwargs):\n if not operation.can_paginate:\n # We only apply these customizations to paginated responses.\n return\n logger.debug(\"Modifying paging parameters for operation: %s\", operation)\n _remove_existing_paging_arguments(argument_table, operation)\n argument_table['starting-token'] = PageArgument('starting-token',\n STARTING_TOKEN_HELP,\n operation,\n parse_type='string')\n argument_table['max-items'] = PageArgument('max-items', MAX_ITEMS_HELP,\n operation, parse_type='integer')\n\n\ndef _remove_existing_paging_arguments(argument_table, operation):\n tokens = _get_input_tokens(operation)\n for token_name in tokens:\n cli_name = _get_cli_name(operation.params, token_name)\n del argument_table[cli_name]\n if 'limit_key' in operation.pagination:\n key_name = operation.pagination['limit_key']\n cli_name = _get_cli_name(operation.params, key_name)\n del argument_table[cli_name]\n\n\ndef _get_input_tokens(operation):\n config = operation.pagination\n tokens = config['input_token']\n if not isinstance(tokens, list):\n return [tokens]\n return tokens\n\n\ndef _get_cli_name(param_objects, token_name):\n for param in param_objects:\n if param.name == token_name:\n return param.cli_name.lstrip('-')\n\n\nclass PageArgument(BaseCLIArgument):\n type_map = {\n 'string': str,\n 'integer': int,\n }\n\n def __init__(self, name, documentation, operation, parse_type):\n param = StringParameter(operation, name=name, type=parse_type)\n self._name = name\n self.argument_object = param\n self._name = name\n self._documentation = documentation\n self._parse_type = parse_type\n\n @property\n def cli_name(self):\n return '--' + self._name\n\n @property\n def cli_type_name(self):\n return self._parse_type\n\n @property\n def required(self):\n return False\n\n @property\n def documentation(self):\n return self._documentation\n\n def add_to_parser(self, parser):\n parser.add_argument(self.cli_name, dest=self.py_name,\n type=self.type_map[self._parse_type])\n\n def add_to_params(self, parameters, value):\n if value is not None:\n parameters[self.py_name] = value\n", "path": "awscli/customizations/paginate.py"}]}
1,746
131
gh_patches_debug_12275
rasdani/github-patches
git_diff
conan-io__conan-5052
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> MSBuild should allow "do not adjust PlatformToolset" with an argument Hey, In https://github.com/conan-io/conan/pull/4250 the default behavior of VisualStudio's toolset selection was changed to compile the whole solution using the same toolset (the `/p:PlatformToolset` is always set - see discussion here https://github.com/conan-io/conan/pull/4250#pullrequestreview-193994771). This change prevents using two different toolsets in the same solution. In my case building a driver using the `WindowsKernelModeDriver10.0` (which is not even a valid option right now) toolset and user mode library using the `v141` toolset. I will be happy to write the code if this behavior change is accepted. </issue> <code> [start of conans/client/build/msbuild.py] 1 import copy 2 import os 3 import re 4 import subprocess 5 6 from conans.client import tools 7 from conans.client.build.visual_environment import (VisualStudioBuildEnvironment, 8 vs_build_type_flags, vs_std_cpp) 9 from conans.client.tools.oss import cpu_count 10 from conans.client.tools.win import vcvars_command 11 from conans.errors import ConanException 12 from conans.model.conan_file import ConanFile 13 from conans.model.version import Version 14 from conans.tools import vcvars_command as tools_vcvars_command 15 from conans.util.env_reader import get_env 16 from conans.util.files import decode_text, save 17 18 19 class MSBuild(object): 20 21 def __init__(self, conanfile): 22 if isinstance(conanfile, ConanFile): 23 self._conanfile = conanfile 24 self._settings = self._conanfile.settings 25 self._output = self._conanfile.output 26 self.build_env = VisualStudioBuildEnvironment(self._conanfile, 27 with_build_type_flags=False) 28 else: # backwards compatible with build_sln_command 29 self._settings = conanfile 30 self.build_env = None 31 32 def build(self, project_file, targets=None, upgrade_project=True, build_type=None, arch=None, 33 parallel=True, force_vcvars=False, toolset=None, platforms=None, use_env=True, 34 vcvars_ver=None, winsdk_version=None, properties=None, output_binary_log=None, 35 property_file_name=None, verbosity=None, definitions=None): 36 """ 37 :param project_file: Path to the .sln file. 38 :param targets: List of targets to build. 39 :param upgrade_project: Will call devenv to upgrade the solution to your current Visual Studio. 40 :param build_type: Use a custom build type name instead of the default settings.build_type one. 41 :param arch: Use a custom architecture name instead of the settings.arch one. 42 It will be used to build the /p:Configuration= parameter of MSBuild. 43 It can be used as the key of the platforms parameter. E.g. arch="x86", platforms={"x86": "i386"} 44 :param parallel: Will use the configured number of cores in the conan.conf file or tools.cpu_count(): 45 In the solution: Building the solution with the projects in parallel. (/m: parameter). 46 CL compiler: Building the sources in parallel. (/MP: compiler flag) 47 :param force_vcvars: Will ignore if the environment is already set for a different Visual Studio version. 48 :param toolset: Specify a toolset. Will append a /p:PlatformToolset option. 49 :param platforms: Dictionary with the mapping of archs/platforms from Conan naming to another one. 50 It is useful for Visual Studio solutions that have a different naming in architectures. 51 Example: platforms={"x86":"Win32"} (Visual solution uses "Win32" instead of "x86"). 52 This dictionary will update the default one: 53 msvc_arch = {'x86': 'x86', 'x86_64': 'x64', 'armv7': 'ARM', 'armv8': 'ARM64'} 54 :param use_env: Applies the argument /p:UseEnv=true to the MSBuild call. 55 :param vcvars_ver: Specifies the Visual Studio compiler toolset to use. 56 :param winsdk_version: Specifies the version of the Windows SDK to use. 57 :param properties: Dictionary with new properties, for each element in the dictionary {name: value} 58 it will append a /p:name="value" option. 59 :param output_binary_log: If set to True then MSBuild will output a binary log file called msbuild.binlog in 60 the working directory. It can also be used to set the name of log file like this 61 output_binary_log="my_log.binlog". 62 This parameter is only supported starting from MSBuild version 15.3 and onwards. 63 :param property_file_name: When None it will generate a file named conan_build.props. 64 You can specify a different name for the generated properties file. 65 :param verbosity: Specifies verbosity level (/verbosity: parameter) 66 :param definitions: Dictionary with additional compiler definitions to be applied during the build. 67 Use value of None to set compiler definition with no value. 68 :return: status code of the MSBuild command invocation 69 """ 70 71 property_file_name = property_file_name or "conan_build.props" 72 self.build_env.parallel = parallel 73 74 with tools.environment_append(self.build_env.vars): 75 # Path for custom properties file 76 props_file_contents = self._get_props_file_contents(definitions) 77 property_file_name = os.path.abspath(property_file_name) 78 save(property_file_name, props_file_contents) 79 vcvars = vcvars_command(self._conanfile.settings, force=force_vcvars, 80 vcvars_ver=vcvars_ver, winsdk_version=winsdk_version, 81 output=self._output) 82 command = self.get_command(project_file, property_file_name, 83 targets=targets, upgrade_project=upgrade_project, 84 build_type=build_type, arch=arch, parallel=parallel, 85 toolset=toolset, platforms=platforms, 86 use_env=use_env, properties=properties, 87 output_binary_log=output_binary_log, 88 verbosity=verbosity) 89 command = "%s && %s" % (vcvars, command) 90 return self._conanfile.run(command) 91 92 def get_command(self, project_file, props_file_path=None, targets=None, upgrade_project=True, 93 build_type=None, arch=None, parallel=True, toolset=None, platforms=None, 94 use_env=False, properties=None, output_binary_log=None, verbosity=None): 95 96 targets = targets or [] 97 properties = properties or {} 98 command = [] 99 100 if upgrade_project and not get_env("CONAN_SKIP_VS_PROJECTS_UPGRADE", False): 101 command.append('devenv "%s" /upgrade &&' % project_file) 102 else: 103 self._output.info("Skipped sln project upgrade") 104 105 build_type = build_type or self._settings.get_safe("build_type") 106 arch = arch or self._settings.get_safe("arch") 107 toolset = toolset or tools.msvs_toolset(self._settings) 108 verbosity = os.getenv("CONAN_MSBUILD_VERBOSITY") or verbosity or "minimal" 109 if not build_type: 110 raise ConanException("Cannot build_sln_command, build_type not defined") 111 if not arch: 112 raise ConanException("Cannot build_sln_command, arch not defined") 113 114 command.append('msbuild "%s" /p:Configuration="%s"' % (project_file, build_type)) 115 msvc_arch = {'x86': 'x86', 116 'x86_64': 'x64', 117 'armv7': 'ARM', 118 'armv8': 'ARM64'} 119 if platforms: 120 msvc_arch.update(platforms) 121 msvc_arch = msvc_arch.get(str(arch)) 122 if self._settings.get_safe("os") == "WindowsCE": 123 msvc_arch = self._settings.get_safe("os.platform") 124 try: 125 sln = tools.load(project_file) 126 pattern = re.compile(r"GlobalSection\(SolutionConfigurationPlatforms\)" 127 r"(.*?)EndGlobalSection", re.DOTALL) 128 solution_global = pattern.search(sln).group(1) 129 lines = solution_global.splitlines() 130 lines = [s.split("=")[0].strip() for s in lines] 131 except Exception: 132 pass # TODO: !!! what are we catching here? tools.load? .group(1)? .splitlines? 133 else: 134 config = "%s|%s" % (build_type, msvc_arch) 135 if config not in "".join(lines): 136 self._output.warn("***** The configuration %s does not exist in this solution *****" 137 % config) 138 self._output.warn("Use 'platforms' argument to define your architectures") 139 140 if output_binary_log: 141 msbuild_version = MSBuild.get_version(self._settings) 142 if msbuild_version >= "15.3": # http://msbuildlog.com/ 143 command.append('/bl' if isinstance(output_binary_log, bool) 144 else '/bl:"%s"' % output_binary_log) 145 else: 146 raise ConanException("MSBuild version detected (%s) does not support " 147 "'output_binary_log' ('/bl')" % msbuild_version) 148 149 if use_env: 150 command.append('/p:UseEnv=true') 151 152 if msvc_arch: 153 command.append('/p:Platform="%s"' % msvc_arch) 154 155 if parallel: 156 command.append('/m:%s' % cpu_count(output=self._output)) 157 158 if targets: 159 command.append("/target:%s" % ";".join(targets)) 160 161 if toolset: 162 command.append('/p:PlatformToolset="%s"' % toolset) 163 164 if verbosity: 165 command.append('/verbosity:%s' % verbosity) 166 167 if props_file_path: 168 command.append('/p:ForceImportBeforeCppTargets="%s"' 169 % os.path.abspath(props_file_path)) 170 171 for name, value in properties.items(): 172 command.append('/p:%s="%s"' % (name, value)) 173 174 return " ".join(command) 175 176 def _get_props_file_contents(self, definitions=None): 177 178 def format_macro(name, value): 179 return "%s=%s" % (name, value) if value else name 180 181 # how to specify runtime in command line: 182 # https://stackoverflow.com/questions/38840332/msbuild-overrides-properties-while-building-vc-project 183 runtime_library = {"MT": "MultiThreaded", 184 "MTd": "MultiThreadedDebug", 185 "MD": "MultiThreadedDLL", 186 "MDd": "MultiThreadedDebugDLL"}.get( 187 self._settings.get_safe("compiler.runtime"), "") 188 189 if self.build_env: 190 # Take the flags from the build env, the user was able to alter them if needed 191 flags = copy.copy(self.build_env.flags) 192 flags.append(self.build_env.std) 193 else: # To be removed when build_sln_command is deprecated 194 flags = vs_build_type_flags(self._settings, with_flags=False) 195 flags.append(vs_std_cpp(self._settings)) 196 197 if definitions: 198 definitions = ";".join([format_macro(name, definitions[name]) for name in definitions]) 199 200 flags_str = " ".join(list(filter(None, flags))) # Removes empty and None elements 201 additional_node = "<AdditionalOptions>" \ 202 "{} %(AdditionalOptions)" \ 203 "</AdditionalOptions>".format(flags_str) if flags_str else "" 204 runtime_node = "<RuntimeLibrary>" \ 205 "{}" \ 206 "</RuntimeLibrary>".format(runtime_library) if runtime_library else "" 207 definitions_node = "<PreprocessorDefinitions>" \ 208 "{};%(PreprocessorDefinitions)" \ 209 "</PreprocessorDefinitions>".format(definitions) if definitions else "" 210 template = """<?xml version="1.0" encoding="utf-8"?> 211 <Project xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> 212 <ItemDefinitionGroup> 213 <ClCompile> 214 {runtime_node} 215 {additional_node} 216 {definitions_node} 217 </ClCompile> 218 </ItemDefinitionGroup> 219 </Project>""".format(**{"runtime_node": runtime_node, 220 "additional_node": additional_node, 221 "definitions_node": definitions_node}) 222 return template 223 224 @staticmethod 225 def get_version(settings): 226 msbuild_cmd = "msbuild -version" 227 vcvars = tools_vcvars_command(settings) 228 command = "%s && %s" % (vcvars, msbuild_cmd) 229 try: 230 out, _ = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True).communicate() 231 version_line = decode_text(out).split("\n")[-1] 232 prog = re.compile("(\d+\.){2,3}\d+") 233 result = prog.match(version_line).group() 234 return Version(result) 235 except Exception as e: 236 raise ConanException("Error retrieving MSBuild version: '{}'".format(e)) 237 [end of conans/client/build/msbuild.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/conans/client/build/msbuild.py b/conans/client/build/msbuild.py --- a/conans/client/build/msbuild.py +++ b/conans/client/build/msbuild.py @@ -104,7 +104,8 @@ build_type = build_type or self._settings.get_safe("build_type") arch = arch or self._settings.get_safe("arch") - toolset = toolset or tools.msvs_toolset(self._settings) + if toolset is None: # False value to skip adjusting + toolset = tools.msvs_toolset(self._settings) verbosity = os.getenv("CONAN_MSBUILD_VERBOSITY") or verbosity or "minimal" if not build_type: raise ConanException("Cannot build_sln_command, build_type not defined")
{"golden_diff": "diff --git a/conans/client/build/msbuild.py b/conans/client/build/msbuild.py\n--- a/conans/client/build/msbuild.py\n+++ b/conans/client/build/msbuild.py\n@@ -104,7 +104,8 @@\n \n build_type = build_type or self._settings.get_safe(\"build_type\")\n arch = arch or self._settings.get_safe(\"arch\")\n- toolset = toolset or tools.msvs_toolset(self._settings)\n+ if toolset is None: # False value to skip adjusting\n+ toolset = tools.msvs_toolset(self._settings)\n verbosity = os.getenv(\"CONAN_MSBUILD_VERBOSITY\") or verbosity or \"minimal\"\n if not build_type:\n raise ConanException(\"Cannot build_sln_command, build_type not defined\")\n", "issue": "MSBuild should allow \"do not adjust PlatformToolset\" with an argument\nHey,\r\n\r\nIn https://github.com/conan-io/conan/pull/4250 the default behavior of VisualStudio's toolset selection was changed to compile the whole solution using the same toolset (the `/p:PlatformToolset` is always set - see discussion here https://github.com/conan-io/conan/pull/4250#pullrequestreview-193994771).\r\n\r\nThis change prevents using two different toolsets in the same solution. In my case building a driver using the `WindowsKernelModeDriver10.0` (which is not even a valid option right now) toolset and user mode library using the `v141` toolset.\r\n\r\nI will be happy to write the code if this behavior change is accepted.\n", "before_files": [{"content": "import copy\nimport os\nimport re\nimport subprocess\n\nfrom conans.client import tools\nfrom conans.client.build.visual_environment import (VisualStudioBuildEnvironment,\n vs_build_type_flags, vs_std_cpp)\nfrom conans.client.tools.oss import cpu_count\nfrom conans.client.tools.win import vcvars_command\nfrom conans.errors import ConanException\nfrom conans.model.conan_file import ConanFile\nfrom conans.model.version import Version\nfrom conans.tools import vcvars_command as tools_vcvars_command\nfrom conans.util.env_reader import get_env\nfrom conans.util.files import decode_text, save\n\n\nclass MSBuild(object):\n\n def __init__(self, conanfile):\n if isinstance(conanfile, ConanFile):\n self._conanfile = conanfile\n self._settings = self._conanfile.settings\n self._output = self._conanfile.output\n self.build_env = VisualStudioBuildEnvironment(self._conanfile,\n with_build_type_flags=False)\n else: # backwards compatible with build_sln_command\n self._settings = conanfile\n self.build_env = None\n\n def build(self, project_file, targets=None, upgrade_project=True, build_type=None, arch=None,\n parallel=True, force_vcvars=False, toolset=None, platforms=None, use_env=True,\n vcvars_ver=None, winsdk_version=None, properties=None, output_binary_log=None,\n property_file_name=None, verbosity=None, definitions=None):\n \"\"\"\n :param project_file: Path to the .sln file.\n :param targets: List of targets to build.\n :param upgrade_project: Will call devenv to upgrade the solution to your current Visual Studio.\n :param build_type: Use a custom build type name instead of the default settings.build_type one.\n :param arch: Use a custom architecture name instead of the settings.arch one.\n It will be used to build the /p:Configuration= parameter of MSBuild.\n It can be used as the key of the platforms parameter. E.g. arch=\"x86\", platforms={\"x86\": \"i386\"}\n :param parallel: Will use the configured number of cores in the conan.conf file or tools.cpu_count():\n In the solution: Building the solution with the projects in parallel. (/m: parameter).\n CL compiler: Building the sources in parallel. (/MP: compiler flag)\n :param force_vcvars: Will ignore if the environment is already set for a different Visual Studio version.\n :param toolset: Specify a toolset. Will append a /p:PlatformToolset option.\n :param platforms: Dictionary with the mapping of archs/platforms from Conan naming to another one.\n It is useful for Visual Studio solutions that have a different naming in architectures.\n Example: platforms={\"x86\":\"Win32\"} (Visual solution uses \"Win32\" instead of \"x86\").\n This dictionary will update the default one:\n msvc_arch = {'x86': 'x86', 'x86_64': 'x64', 'armv7': 'ARM', 'armv8': 'ARM64'}\n :param use_env: Applies the argument /p:UseEnv=true to the MSBuild call.\n :param vcvars_ver: Specifies the Visual Studio compiler toolset to use.\n :param winsdk_version: Specifies the version of the Windows SDK to use.\n :param properties: Dictionary with new properties, for each element in the dictionary {name: value}\n it will append a /p:name=\"value\" option.\n :param output_binary_log: If set to True then MSBuild will output a binary log file called msbuild.binlog in\n the working directory. It can also be used to set the name of log file like this\n output_binary_log=\"my_log.binlog\".\n This parameter is only supported starting from MSBuild version 15.3 and onwards.\n :param property_file_name: When None it will generate a file named conan_build.props.\n You can specify a different name for the generated properties file.\n :param verbosity: Specifies verbosity level (/verbosity: parameter)\n :param definitions: Dictionary with additional compiler definitions to be applied during the build.\n Use value of None to set compiler definition with no value.\n :return: status code of the MSBuild command invocation\n \"\"\"\n\n property_file_name = property_file_name or \"conan_build.props\"\n self.build_env.parallel = parallel\n\n with tools.environment_append(self.build_env.vars):\n # Path for custom properties file\n props_file_contents = self._get_props_file_contents(definitions)\n property_file_name = os.path.abspath(property_file_name)\n save(property_file_name, props_file_contents)\n vcvars = vcvars_command(self._conanfile.settings, force=force_vcvars,\n vcvars_ver=vcvars_ver, winsdk_version=winsdk_version,\n output=self._output)\n command = self.get_command(project_file, property_file_name,\n targets=targets, upgrade_project=upgrade_project,\n build_type=build_type, arch=arch, parallel=parallel,\n toolset=toolset, platforms=platforms,\n use_env=use_env, properties=properties,\n output_binary_log=output_binary_log,\n verbosity=verbosity)\n command = \"%s && %s\" % (vcvars, command)\n return self._conanfile.run(command)\n\n def get_command(self, project_file, props_file_path=None, targets=None, upgrade_project=True,\n build_type=None, arch=None, parallel=True, toolset=None, platforms=None,\n use_env=False, properties=None, output_binary_log=None, verbosity=None):\n\n targets = targets or []\n properties = properties or {}\n command = []\n\n if upgrade_project and not get_env(\"CONAN_SKIP_VS_PROJECTS_UPGRADE\", False):\n command.append('devenv \"%s\" /upgrade &&' % project_file)\n else:\n self._output.info(\"Skipped sln project upgrade\")\n\n build_type = build_type or self._settings.get_safe(\"build_type\")\n arch = arch or self._settings.get_safe(\"arch\")\n toolset = toolset or tools.msvs_toolset(self._settings)\n verbosity = os.getenv(\"CONAN_MSBUILD_VERBOSITY\") or verbosity or \"minimal\"\n if not build_type:\n raise ConanException(\"Cannot build_sln_command, build_type not defined\")\n if not arch:\n raise ConanException(\"Cannot build_sln_command, arch not defined\")\n\n command.append('msbuild \"%s\" /p:Configuration=\"%s\"' % (project_file, build_type))\n msvc_arch = {'x86': 'x86',\n 'x86_64': 'x64',\n 'armv7': 'ARM',\n 'armv8': 'ARM64'}\n if platforms:\n msvc_arch.update(platforms)\n msvc_arch = msvc_arch.get(str(arch))\n if self._settings.get_safe(\"os\") == \"WindowsCE\":\n msvc_arch = self._settings.get_safe(\"os.platform\")\n try:\n sln = tools.load(project_file)\n pattern = re.compile(r\"GlobalSection\\(SolutionConfigurationPlatforms\\)\"\n r\"(.*?)EndGlobalSection\", re.DOTALL)\n solution_global = pattern.search(sln).group(1)\n lines = solution_global.splitlines()\n lines = [s.split(\"=\")[0].strip() for s in lines]\n except Exception:\n pass # TODO: !!! what are we catching here? tools.load? .group(1)? .splitlines?\n else:\n config = \"%s|%s\" % (build_type, msvc_arch)\n if config not in \"\".join(lines):\n self._output.warn(\"***** The configuration %s does not exist in this solution *****\"\n % config)\n self._output.warn(\"Use 'platforms' argument to define your architectures\")\n\n if output_binary_log:\n msbuild_version = MSBuild.get_version(self._settings)\n if msbuild_version >= \"15.3\": # http://msbuildlog.com/\n command.append('/bl' if isinstance(output_binary_log, bool)\n else '/bl:\"%s\"' % output_binary_log)\n else:\n raise ConanException(\"MSBuild version detected (%s) does not support \"\n \"'output_binary_log' ('/bl')\" % msbuild_version)\n\n if use_env:\n command.append('/p:UseEnv=true')\n\n if msvc_arch:\n command.append('/p:Platform=\"%s\"' % msvc_arch)\n\n if parallel:\n command.append('/m:%s' % cpu_count(output=self._output))\n\n if targets:\n command.append(\"/target:%s\" % \";\".join(targets))\n\n if toolset:\n command.append('/p:PlatformToolset=\"%s\"' % toolset)\n\n if verbosity:\n command.append('/verbosity:%s' % verbosity)\n\n if props_file_path:\n command.append('/p:ForceImportBeforeCppTargets=\"%s\"'\n % os.path.abspath(props_file_path))\n\n for name, value in properties.items():\n command.append('/p:%s=\"%s\"' % (name, value))\n\n return \" \".join(command)\n\n def _get_props_file_contents(self, definitions=None):\n\n def format_macro(name, value):\n return \"%s=%s\" % (name, value) if value else name\n\n # how to specify runtime in command line:\n # https://stackoverflow.com/questions/38840332/msbuild-overrides-properties-while-building-vc-project\n runtime_library = {\"MT\": \"MultiThreaded\",\n \"MTd\": \"MultiThreadedDebug\",\n \"MD\": \"MultiThreadedDLL\",\n \"MDd\": \"MultiThreadedDebugDLL\"}.get(\n self._settings.get_safe(\"compiler.runtime\"), \"\")\n\n if self.build_env:\n # Take the flags from the build env, the user was able to alter them if needed\n flags = copy.copy(self.build_env.flags)\n flags.append(self.build_env.std)\n else: # To be removed when build_sln_command is deprecated\n flags = vs_build_type_flags(self._settings, with_flags=False)\n flags.append(vs_std_cpp(self._settings))\n\n if definitions:\n definitions = \";\".join([format_macro(name, definitions[name]) for name in definitions])\n\n flags_str = \" \".join(list(filter(None, flags))) # Removes empty and None elements\n additional_node = \"<AdditionalOptions>\" \\\n \"{} %(AdditionalOptions)\" \\\n \"</AdditionalOptions>\".format(flags_str) if flags_str else \"\"\n runtime_node = \"<RuntimeLibrary>\" \\\n \"{}\" \\\n \"</RuntimeLibrary>\".format(runtime_library) if runtime_library else \"\"\n definitions_node = \"<PreprocessorDefinitions>\" \\\n \"{};%(PreprocessorDefinitions)\" \\\n \"</PreprocessorDefinitions>\".format(definitions) if definitions else \"\"\n template = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<Project xmlns=\"http://schemas.microsoft.com/developer/msbuild/2003\">\n <ItemDefinitionGroup>\n <ClCompile>\n {runtime_node}\n {additional_node}\n {definitions_node}\n </ClCompile>\n </ItemDefinitionGroup>\n</Project>\"\"\".format(**{\"runtime_node\": runtime_node,\n \"additional_node\": additional_node,\n \"definitions_node\": definitions_node})\n return template\n\n @staticmethod\n def get_version(settings):\n msbuild_cmd = \"msbuild -version\"\n vcvars = tools_vcvars_command(settings)\n command = \"%s && %s\" % (vcvars, msbuild_cmd)\n try:\n out, _ = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True).communicate()\n version_line = decode_text(out).split(\"\\n\")[-1]\n prog = re.compile(\"(\\d+\\.){2,3}\\d+\")\n result = prog.match(version_line).group()\n return Version(result)\n except Exception as e:\n raise ConanException(\"Error retrieving MSBuild version: '{}'\".format(e))\n", "path": "conans/client/build/msbuild.py"}]}
3,912
176
gh_patches_debug_7438
rasdani/github-patches
git_diff
ranaroussi__yfinance-1237
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> fix(sec): upgrade lxml to 4.9.1 ### What happened? There are 1 security vulnerabilities found in lxml 4.5.1 - [CVE-2022-2309](https://www.oscs1024.com/hd/CVE-2022-2309) ### What did I do? Upgrade lxml from 4.5.1 to 4.9.1 for vulnerability fix ### What did you expect to happen? Ideally, no insecure libs should be used. ### The specification of the pull request [PR Specification](https://www.oscs1024.com/docs/pr-specification/) from OSCS </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 # -*- coding: UTF-8 -*- 3 # 4 # yfinance - market data downloader 5 # https://github.com/ranaroussi/yfinance 6 7 """yfinance - market data downloader""" 8 9 from setuptools import setup, find_packages 10 # from codecs import open 11 import io 12 from os import path 13 14 # --- get version --- 15 version = "unknown" 16 with open("yfinance/version.py") as f: 17 line = f.read().strip() 18 version = line.replace("version = ", "").replace('"', '') 19 # --- /get version --- 20 21 22 here = path.abspath(path.dirname(__file__)) 23 24 # Get the long description from the README file 25 with io.open(path.join(here, 'README.md'), encoding='utf-8') as f: 26 long_description = f.read() 27 28 setup( 29 name='yfinance', 30 version=version, 31 description='Download market data from Yahoo! Finance API', 32 long_description=long_description, 33 long_description_content_type='text/markdown', 34 url='https://github.com/ranaroussi/yfinance', 35 author='Ran Aroussi', 36 author_email='[email protected]', 37 license='Apache', 38 classifiers=[ 39 'License :: OSI Approved :: Apache Software License', 40 # 'Development Status :: 3 - Alpha', 41 # 'Development Status :: 4 - Beta', 42 'Development Status :: 5 - Production/Stable', 43 44 45 'Operating System :: OS Independent', 46 'Intended Audience :: Developers', 47 'Topic :: Office/Business :: Financial', 48 'Topic :: Office/Business :: Financial :: Investment', 49 'Topic :: Scientific/Engineering :: Interface Engine/Protocol Translator', 50 'Topic :: Software Development :: Libraries', 51 'Topic :: Software Development :: Libraries :: Python Modules', 52 53 'Programming Language :: Python :: 2.7', 54 'Programming Language :: Python :: 3.4', 55 'Programming Language :: Python :: 3.5', 56 # 'Programming Language :: Python :: 3.6', 57 'Programming Language :: Python :: 3.7', 58 'Programming Language :: Python :: 3.8', 59 'Programming Language :: Python :: 3.9', 60 ], 61 platforms=['any'], 62 keywords='pandas, yahoo finance, pandas datareader', 63 packages=find_packages(exclude=['contrib', 'docs', 'tests', 'examples']), 64 install_requires=['pandas>=1.3.0', 'numpy>=1.16.5', 65 'requests>=2.26', 'multitasking>=0.0.7', 66 'appdirs>=1.4.4'], 67 entry_points={ 68 'console_scripts': [ 69 'sample=sample:main', 70 ], 71 }, 72 ) 73 74 print(""" 75 NOTE: yfinance is not affiliated, endorsed, or vetted by Yahoo, Inc. 76 77 You should refer to Yahoo!'s terms of use for details on your rights 78 to use the actual data downloaded.""") 79 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -63,7 +63,7 @@ packages=find_packages(exclude=['contrib', 'docs', 'tests', 'examples']), install_requires=['pandas>=1.3.0', 'numpy>=1.16.5', 'requests>=2.26', 'multitasking>=0.0.7', - 'appdirs>=1.4.4'], + 'lxml>=4.9.1', 'appdirs>=1.4.4'], entry_points={ 'console_scripts': [ 'sample=sample:main',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -63,7 +63,7 @@\n packages=find_packages(exclude=['contrib', 'docs', 'tests', 'examples']),\n install_requires=['pandas>=1.3.0', 'numpy>=1.16.5',\n 'requests>=2.26', 'multitasking>=0.0.7',\n- 'appdirs>=1.4.4'],\n+ 'lxml>=4.9.1', 'appdirs>=1.4.4'],\n entry_points={\n 'console_scripts': [\n 'sample=sample:main',\n", "issue": "fix(sec): upgrade lxml to 4.9.1\n### What happened\uff1f\nThere are 1 security vulnerabilities found in lxml 4.5.1\n- [CVE-2022-2309](https://www.oscs1024.com/hd/CVE-2022-2309)\n\n\n### What did I do\uff1f\nUpgrade lxml from 4.5.1 to 4.9.1 for vulnerability fix\n\n### What did you expect to happen\uff1f\nIdeally, no insecure libs should be used.\n\n### The specification of the pull request\n[PR Specification](https://www.oscs1024.com/docs/pr-specification/) from OSCS\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n#\n# yfinance - market data downloader\n# https://github.com/ranaroussi/yfinance\n\n\"\"\"yfinance - market data downloader\"\"\"\n\nfrom setuptools import setup, find_packages\n# from codecs import open\nimport io\nfrom os import path\n\n# --- get version ---\nversion = \"unknown\"\nwith open(\"yfinance/version.py\") as f:\n line = f.read().strip()\n version = line.replace(\"version = \", \"\").replace('\"', '')\n# --- /get version ---\n\n\nhere = path.abspath(path.dirname(__file__))\n\n# Get the long description from the README file\nwith io.open(path.join(here, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name='yfinance',\n version=version,\n description='Download market data from Yahoo! Finance API',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/ranaroussi/yfinance',\n author='Ran Aroussi',\n author_email='[email protected]',\n license='Apache',\n classifiers=[\n 'License :: OSI Approved :: Apache Software License',\n # 'Development Status :: 3 - Alpha',\n # 'Development Status :: 4 - Beta',\n 'Development Status :: 5 - Production/Stable',\n\n\n 'Operating System :: OS Independent',\n 'Intended Audience :: Developers',\n 'Topic :: Office/Business :: Financial',\n 'Topic :: Office/Business :: Financial :: Investment',\n 'Topic :: Scientific/Engineering :: Interface Engine/Protocol Translator',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n # 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n ],\n platforms=['any'],\n keywords='pandas, yahoo finance, pandas datareader',\n packages=find_packages(exclude=['contrib', 'docs', 'tests', 'examples']),\n install_requires=['pandas>=1.3.0', 'numpy>=1.16.5',\n 'requests>=2.26', 'multitasking>=0.0.7',\n 'appdirs>=1.4.4'],\n entry_points={\n 'console_scripts': [\n 'sample=sample:main',\n ],\n },\n)\n\nprint(\"\"\"\nNOTE: yfinance is not affiliated, endorsed, or vetted by Yahoo, Inc.\n\nYou should refer to Yahoo!'s terms of use for details on your rights\nto use the actual data downloaded.\"\"\")\n", "path": "setup.py"}]}
1,447
145
gh_patches_debug_12394
rasdani/github-patches
git_diff
aws__aws-cli-341
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> argparse dependency is only needed for Python 2.6 We currently have a dependency on argparse because it's not in stdlib for Python 2.6. We should make this dependency specific to 2.6 and not install it for other Python versions. </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 import os 3 import sys 4 5 from setuptools import setup, find_packages 6 7 import awscli 8 9 10 requires = ['botocore>=0.16.0,<0.17.0', 11 'bcdoc>=0.9.0,<0.10.0', 12 'six>=1.1.0', 13 'colorama==0.2.5', 14 'argparse>=1.1', 15 'docutils>=0.10', 16 'rsa==3.1.1'] 17 18 19 setup_options = dict( 20 name='awscli', 21 version=awscli.__version__, 22 description='Universal Command Line Environment for AWS.', 23 long_description=open('README.rst').read(), 24 author='Mitch Garnaat', 25 author_email='[email protected]', 26 url='http://aws.amazon.com/cli/', 27 scripts=['bin/aws', 'bin/aws.cmd', 28 'bin/aws_completer', 'bin/aws_zsh_completer.sh'], 29 packages=find_packages('.', exclude=['tests*']), 30 package_dir={'awscli': 'awscli'}, 31 package_data={'awscli': ['data/*.json', 'examples/*/*']}, 32 install_requires=requires, 33 license=open("LICENSE.txt").read(), 34 classifiers=( 35 'Development Status :: 5 - Production/Stable', 36 'Intended Audience :: Developers', 37 'Intended Audience :: System Administrators', 38 'Natural Language :: English', 39 'License :: OSI Approved :: Apache Software License', 40 'Programming Language :: Python', 41 'Programming Language :: Python :: 2.6', 42 'Programming Language :: Python :: 2.7', 43 'Programming Language :: Python :: 3', 44 'Programming Language :: Python :: 3.3', 45 ), 46 ) 47 48 if 'py2exe' in sys.argv: 49 # This will actually give us a py2exe command. 50 import py2exe 51 # And we have some py2exe specific options. 52 setup_options['options'] = { 53 'py2exe': { 54 'optimize': 0, 55 'skip_archive': True, 56 'includes': ['ConfigParser', 'urllib', 'httplib', 57 'docutils.readers.standalone', 58 'docutils.parsers.rst', 59 'docutils.languages.en', 60 'xml.etree.ElementTree', 'HTMLParser', 61 'awscli.handlers'], 62 } 63 } 64 setup_options['console'] = ['bin/aws'] 65 66 67 setup(**setup_options) 68 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -import os import sys from setuptools import setup, find_packages @@ -11,10 +10,14 @@ 'bcdoc>=0.9.0,<0.10.0', 'six>=1.1.0', 'colorama==0.2.5', - 'argparse>=1.1', 'docutils>=0.10', 'rsa==3.1.1'] +if sys.version_info[:2] == (2, 6): + # For python2.6 we have to require argparse since it + # was not in stdlib until 2.7. + requires.append('argparse>=1.1') + setup_options = dict( name='awscli',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,5 +1,4 @@\n #!/usr/bin/env python\n-import os\n import sys\n \n from setuptools import setup, find_packages\n@@ -11,10 +10,14 @@\n 'bcdoc>=0.9.0,<0.10.0',\n 'six>=1.1.0',\n 'colorama==0.2.5',\n- 'argparse>=1.1',\n 'docutils>=0.10',\n 'rsa==3.1.1']\n \n+if sys.version_info[:2] == (2, 6):\n+ # For python2.6 we have to require argparse since it\n+ # was not in stdlib until 2.7.\n+ requires.append('argparse>=1.1')\n+\n \n setup_options = dict(\n name='awscli',\n", "issue": "argparse dependency is only needed for Python 2.6\nWe currently have a dependency on argparse because it's not in stdlib for Python 2.6. We should make this dependency specific to 2.6 and not install it for other Python versions.\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport os\nimport sys\n\nfrom setuptools import setup, find_packages\n\nimport awscli\n\n\nrequires = ['botocore>=0.16.0,<0.17.0',\n 'bcdoc>=0.9.0,<0.10.0',\n 'six>=1.1.0',\n 'colorama==0.2.5',\n 'argparse>=1.1',\n 'docutils>=0.10',\n 'rsa==3.1.1']\n\n\nsetup_options = dict(\n name='awscli',\n version=awscli.__version__,\n description='Universal Command Line Environment for AWS.',\n long_description=open('README.rst').read(),\n author='Mitch Garnaat',\n author_email='[email protected]',\n url='http://aws.amazon.com/cli/',\n scripts=['bin/aws', 'bin/aws.cmd',\n 'bin/aws_completer', 'bin/aws_zsh_completer.sh'],\n packages=find_packages('.', exclude=['tests*']),\n package_dir={'awscli': 'awscli'},\n package_data={'awscli': ['data/*.json', 'examples/*/*']},\n install_requires=requires,\n license=open(\"LICENSE.txt\").read(),\n classifiers=(\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Natural Language :: English',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n ),\n)\n\nif 'py2exe' in sys.argv:\n # This will actually give us a py2exe command.\n import py2exe\n # And we have some py2exe specific options.\n setup_options['options'] = {\n 'py2exe': {\n 'optimize': 0,\n 'skip_archive': True,\n 'includes': ['ConfigParser', 'urllib', 'httplib',\n 'docutils.readers.standalone',\n 'docutils.parsers.rst',\n 'docutils.languages.en',\n 'xml.etree.ElementTree', 'HTMLParser',\n 'awscli.handlers'],\n }\n }\n setup_options['console'] = ['bin/aws']\n\n\nsetup(**setup_options)\n", "path": "setup.py"}]}
1,239
205
gh_patches_debug_31754
rasdani/github-patches
git_diff
dask__distributed-4530
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Accessing `ipython` on workers through `client.start_ipython_workers()` I am trying to see if I can run ipython on workers interactively. The first example in the documentation (https://distributed.dask.org/en/latest/ipython.html) works, while the second one is not working for me. Here's the minimal code to reproduce: ```python from dask.distributed import Client client = Client() info = client.start_ipython_workers(magic_names="w*") %w0 1+1 # this gives TimeoutError: Timeout waiting for IPython output ``` This is more of a curiosity for me, just trying to explore what is possible. I posted it also as a question on StackOverflow: https://stackoverflow.com/questions/66167619/how-to-use-client-start-ipython-workers-in-dask-distributed **Environment**: - Dask version: 2021.02.0 - Python version: Python 3.7.9 - Operating System: MacOS Catalina - Install method (conda, pip, source): conda </issue> <code> [start of distributed/_ipython_utils.py] 1 """Utilities for integrating with IPython 2 3 These functions should probably reside in Jupyter and IPython repositories, 4 after which we can import them instead of having our own definitions. 5 """ 6 7 import atexit 8 import os 9 10 try: 11 import queue 12 except ImportError: 13 # Python 2 14 import Queue as queue 15 from subprocess import Popen 16 import sys 17 from threading import Thread 18 from uuid import uuid4 19 20 from tornado.gen import TimeoutError 21 from tornado.ioloop import IOLoop 22 from threading import Event 23 24 from IPython import get_ipython 25 from jupyter_client import BlockingKernelClient, write_connection_file 26 from jupyter_core.paths import jupyter_runtime_dir 27 28 29 OUTPUT_TIMEOUT = 10 30 31 32 def run_cell_remote(ip, kc, cell): 33 """Run a cell on a KernelClient 34 35 Any output from the cell will be redisplayed in the local session. 36 """ 37 msg_id = kc.execute(cell) 38 39 in_kernel = getattr(ip, "kernel", False) 40 if in_kernel: 41 socket = ip.display_pub.pub_socket 42 session = ip.display_pub.session 43 parent_header = ip.display_pub.parent_header 44 45 while True: 46 try: 47 msg = kc.get_iopub_msg(timeout=OUTPUT_TIMEOUT) 48 except queue.Empty: 49 raise TimeoutError("Timeout waiting for IPython output") 50 51 if msg["parent_header"].get("msg_id") != msg_id: 52 continue 53 msg_type = msg["header"]["msg_type"] 54 content = msg["content"] 55 if msg_type == "status": 56 if content["execution_state"] == "idle": 57 # idle means output is done 58 break 59 elif msg_type == "stream": 60 stream = getattr(sys, content["name"]) 61 stream.write(content["text"]) 62 elif msg_type in ("display_data", "execute_result", "error"): 63 if in_kernel: 64 session.send(socket, msg_type, content, parent=parent_header) 65 else: 66 if msg_type == "error": 67 print("\n".join(content["traceback"]), file=sys.stderr) 68 else: 69 sys.stdout.write(content["data"].get("text/plain", "")) 70 else: 71 pass 72 73 74 def register_worker_magic(connection_info, magic_name="worker"): 75 """Register a %worker magic, given connection_info. 76 77 Both a line and cell magic are registered, 78 which run the given cell in a remote kernel. 79 """ 80 ip = get_ipython() 81 info = dict(connection_info) # copy 82 key = info.pop("key") 83 kc = BlockingKernelClient(**connection_info) 84 kc.session.key = key 85 kc.start_channels() 86 87 def remote(line, cell=None): 88 """Run the current cell on a remote IPython kernel""" 89 if cell is None: 90 # both line and cell magic 91 cell = line 92 run_cell_remote(ip, kc, cell) 93 94 remote.client = kc # preserve reference on kc, largely for mocking 95 ip.register_magic_function(remote, magic_kind="line", magic_name=magic_name) 96 ip.register_magic_function(remote, magic_kind="cell", magic_name=magic_name) 97 98 99 def remote_magic(line, cell=None): 100 """A magic for running code on a specified remote worker 101 102 The connection_info dict of the worker will be looked up 103 as the first positional arg to the magic. 104 The rest of the line (or the entire cell for a %%cell magic) 105 will be passed to the remote kernel. 106 107 Usage: 108 109 info = e.start_ipython(worker)[worker] 110 %remote info print(worker.data) 111 """ 112 # get connection info from IPython's user namespace 113 ip = get_ipython() 114 split_line = line.split(None, 1) 115 info_name = split_line[0] 116 if info_name not in ip.user_ns: 117 raise NameError(info_name) 118 connection_info = dict(ip.user_ns[info_name]) 119 120 if not cell: # line magic, use the rest of the line 121 if len(split_line) == 1: 122 raise ValueError("I need some code to run!") 123 cell = split_line[1] 124 125 # turn info dict to hashable str for use as lookup key in _clients cache 126 key = ",".join(map(str, sorted(connection_info.items()))) 127 session_key = connection_info.pop("key") 128 129 if key in remote_magic._clients: 130 kc = remote_magic._clients[key] 131 else: 132 kc = BlockingKernelClient(**connection_info) 133 kc.session.key = session_key 134 kc.start_channels() 135 kc.wait_for_ready(timeout=10) 136 remote_magic._clients[key] = kc 137 138 # actually run the code 139 run_cell_remote(ip, kc, cell) 140 141 142 # cache clients for re-use in remote magic 143 remote_magic._clients = {} 144 145 146 def register_remote_magic(magic_name="remote"): 147 """Define the parameterized %remote magic 148 149 See remote_magic above for details. 150 """ 151 ip = get_ipython() 152 if ip is None: 153 return # do nothing if IPython's not running 154 ip.register_magic_function(remote_magic, magic_kind="line", magic_name=magic_name) 155 ip.register_magic_function(remote_magic, magic_kind="cell", magic_name=magic_name) 156 157 158 def connect_qtconsole(connection_info, name=None, extra_args=None): 159 """Open a QtConsole connected to a worker who has the given future 160 161 - identify worker with who_has 162 - start IPython kernel on the worker 163 - start qtconsole connected to the kernel 164 """ 165 runtime_dir = jupyter_runtime_dir() 166 if name is None: 167 name = uuid4().hex 168 169 path = os.path.join(runtime_dir, name + ".json") 170 write_connection_file(path, **connection_info) 171 cmd = ["jupyter", "qtconsole", "--existing", path] 172 if extra_args: 173 cmd.extend(extra_args) 174 Popen(cmd) 175 176 @atexit.register 177 def _cleanup_connection_file(): 178 """Cleanup our connection file when we exit.""" 179 try: 180 os.remove(path) 181 except OSError: 182 pass 183 184 185 def start_ipython(ip=None, ns=None, log=None): 186 """Start an IPython kernel in a thread 187 188 Parameters 189 ---------- 190 ip : str 191 The IP address to listen on (likely the parent object's ip). 192 ns : dict 193 Any names that should be injected into the IPython namespace. 194 log : logger instance 195 Hook up IPython's logging to an existing logger instead of the default. 196 """ 197 from IPython import get_ipython 198 199 if get_ipython() is not None: 200 raise RuntimeError("Cannot start IPython, it's already running.") 201 202 from zmq.eventloop.ioloop import ZMQIOLoop 203 from ipykernel.kernelapp import IPKernelApp 204 205 # save the global IOLoop instance 206 # since IPython relies on it, but we are going to put it in a thread. 207 save_inst = IOLoop.instance() 208 IOLoop.clear_instance() 209 zmq_loop = ZMQIOLoop() 210 zmq_loop.install() 211 212 # start IPython, disabling its signal handlers that won't work due to running in a thread: 213 app = IPKernelApp.instance(log=log) 214 # Don't connect to the history database 215 app.config.HistoryManager.hist_file = ":memory:" 216 # listen on all interfaces, so remote clients can connect: 217 if ip: 218 app.ip = ip 219 # disable some signal handling, logging 220 221 def noop(): 222 return None 223 224 app.init_signal = noop 225 app.log_connection_info = noop 226 227 # start IPython in a thread 228 # initialization happens in the thread to avoid threading problems 229 # with the sqlite history 230 evt = Event() 231 232 def _start(): 233 app.initialize([]) 234 app.kernel.pre_handler_hook = noop 235 app.kernel.post_handler_hook = noop 236 app.kernel.start() 237 app.kernel.loop = IOLoop.instance() 238 # save self in the IPython namespace as 'worker' 239 # inject things into the IPython namespace 240 if ns: 241 app.kernel.shell.user_ns.update(ns) 242 evt.set() 243 zmq_loop.start() 244 245 zmq_loop_thread = Thread(target=_start) 246 zmq_loop_thread.daemon = True 247 zmq_loop_thread.start() 248 assert evt.wait(timeout=5), "IPython didn't start in a reasonable amount of time." 249 250 # put the global IOLoop instance back: 251 IOLoop.clear_instance() 252 save_inst.install() 253 return app 254 [end of distributed/_ipython_utils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/distributed/_ipython_utils.py b/distributed/_ipython_utils.py --- a/distributed/_ipython_utils.py +++ b/distributed/_ipython_utils.py @@ -199,16 +199,8 @@ if get_ipython() is not None: raise RuntimeError("Cannot start IPython, it's already running.") - from zmq.eventloop.ioloop import ZMQIOLoop from ipykernel.kernelapp import IPKernelApp - # save the global IOLoop instance - # since IPython relies on it, but we are going to put it in a thread. - save_inst = IOLoop.instance() - IOLoop.clear_instance() - zmq_loop = ZMQIOLoop() - zmq_loop.install() - # start IPython, disabling its signal handlers that won't work due to running in a thread: app = IPKernelApp.instance(log=log) # Don't connect to the history database @@ -234,20 +226,17 @@ app.kernel.pre_handler_hook = noop app.kernel.post_handler_hook = noop app.kernel.start() - app.kernel.loop = IOLoop.instance() # save self in the IPython namespace as 'worker' # inject things into the IPython namespace if ns: app.kernel.shell.user_ns.update(ns) evt.set() - zmq_loop.start() + # start the app's IOLoop in its thread + IOLoop.current().start() zmq_loop_thread = Thread(target=_start) zmq_loop_thread.daemon = True zmq_loop_thread.start() assert evt.wait(timeout=5), "IPython didn't start in a reasonable amount of time." - # put the global IOLoop instance back: - IOLoop.clear_instance() - save_inst.install() return app
{"golden_diff": "diff --git a/distributed/_ipython_utils.py b/distributed/_ipython_utils.py\n--- a/distributed/_ipython_utils.py\n+++ b/distributed/_ipython_utils.py\n@@ -199,16 +199,8 @@\n if get_ipython() is not None:\n raise RuntimeError(\"Cannot start IPython, it's already running.\")\n \n- from zmq.eventloop.ioloop import ZMQIOLoop\n from ipykernel.kernelapp import IPKernelApp\n \n- # save the global IOLoop instance\n- # since IPython relies on it, but we are going to put it in a thread.\n- save_inst = IOLoop.instance()\n- IOLoop.clear_instance()\n- zmq_loop = ZMQIOLoop()\n- zmq_loop.install()\n-\n # start IPython, disabling its signal handlers that won't work due to running in a thread:\n app = IPKernelApp.instance(log=log)\n # Don't connect to the history database\n@@ -234,20 +226,17 @@\n app.kernel.pre_handler_hook = noop\n app.kernel.post_handler_hook = noop\n app.kernel.start()\n- app.kernel.loop = IOLoop.instance()\n # save self in the IPython namespace as 'worker'\n # inject things into the IPython namespace\n if ns:\n app.kernel.shell.user_ns.update(ns)\n evt.set()\n- zmq_loop.start()\n+ # start the app's IOLoop in its thread\n+ IOLoop.current().start()\n \n zmq_loop_thread = Thread(target=_start)\n zmq_loop_thread.daemon = True\n zmq_loop_thread.start()\n assert evt.wait(timeout=5), \"IPython didn't start in a reasonable amount of time.\"\n \n- # put the global IOLoop instance back:\n- IOLoop.clear_instance()\n- save_inst.install()\n return app\n", "issue": "Accessing `ipython` on workers through `client.start_ipython_workers()`\nI am trying to see if I can run ipython on workers interactively. The first example in the documentation (https://distributed.dask.org/en/latest/ipython.html) works, while the second one is not working for me. Here's the minimal code to reproduce:\r\n\r\n```python\r\nfrom dask.distributed import Client\r\nclient = Client()\r\n\r\ninfo = client.start_ipython_workers(magic_names=\"w*\")\r\n%w0 1+1\r\n# this gives TimeoutError: Timeout waiting for IPython output\r\n```\r\n\r\nThis is more of a curiosity for me, just trying to explore what is possible. I posted it also as a question on StackOverflow: https://stackoverflow.com/questions/66167619/how-to-use-client-start-ipython-workers-in-dask-distributed\r\n\r\n**Environment**:\r\n\r\n- Dask version: 2021.02.0\r\n- Python version: Python 3.7.9\r\n- Operating System: MacOS Catalina\r\n- Install method (conda, pip, source): conda\r\n\n", "before_files": [{"content": "\"\"\"Utilities for integrating with IPython\n\nThese functions should probably reside in Jupyter and IPython repositories,\nafter which we can import them instead of having our own definitions.\n\"\"\"\n\nimport atexit\nimport os\n\ntry:\n import queue\nexcept ImportError:\n # Python 2\n import Queue as queue\nfrom subprocess import Popen\nimport sys\nfrom threading import Thread\nfrom uuid import uuid4\n\nfrom tornado.gen import TimeoutError\nfrom tornado.ioloop import IOLoop\nfrom threading import Event\n\nfrom IPython import get_ipython\nfrom jupyter_client import BlockingKernelClient, write_connection_file\nfrom jupyter_core.paths import jupyter_runtime_dir\n\n\nOUTPUT_TIMEOUT = 10\n\n\ndef run_cell_remote(ip, kc, cell):\n \"\"\"Run a cell on a KernelClient\n\n Any output from the cell will be redisplayed in the local session.\n \"\"\"\n msg_id = kc.execute(cell)\n\n in_kernel = getattr(ip, \"kernel\", False)\n if in_kernel:\n socket = ip.display_pub.pub_socket\n session = ip.display_pub.session\n parent_header = ip.display_pub.parent_header\n\n while True:\n try:\n msg = kc.get_iopub_msg(timeout=OUTPUT_TIMEOUT)\n except queue.Empty:\n raise TimeoutError(\"Timeout waiting for IPython output\")\n\n if msg[\"parent_header\"].get(\"msg_id\") != msg_id:\n continue\n msg_type = msg[\"header\"][\"msg_type\"]\n content = msg[\"content\"]\n if msg_type == \"status\":\n if content[\"execution_state\"] == \"idle\":\n # idle means output is done\n break\n elif msg_type == \"stream\":\n stream = getattr(sys, content[\"name\"])\n stream.write(content[\"text\"])\n elif msg_type in (\"display_data\", \"execute_result\", \"error\"):\n if in_kernel:\n session.send(socket, msg_type, content, parent=parent_header)\n else:\n if msg_type == \"error\":\n print(\"\\n\".join(content[\"traceback\"]), file=sys.stderr)\n else:\n sys.stdout.write(content[\"data\"].get(\"text/plain\", \"\"))\n else:\n pass\n\n\ndef register_worker_magic(connection_info, magic_name=\"worker\"):\n \"\"\"Register a %worker magic, given connection_info.\n\n Both a line and cell magic are registered,\n which run the given cell in a remote kernel.\n \"\"\"\n ip = get_ipython()\n info = dict(connection_info) # copy\n key = info.pop(\"key\")\n kc = BlockingKernelClient(**connection_info)\n kc.session.key = key\n kc.start_channels()\n\n def remote(line, cell=None):\n \"\"\"Run the current cell on a remote IPython kernel\"\"\"\n if cell is None:\n # both line and cell magic\n cell = line\n run_cell_remote(ip, kc, cell)\n\n remote.client = kc # preserve reference on kc, largely for mocking\n ip.register_magic_function(remote, magic_kind=\"line\", magic_name=magic_name)\n ip.register_magic_function(remote, magic_kind=\"cell\", magic_name=magic_name)\n\n\ndef remote_magic(line, cell=None):\n \"\"\"A magic for running code on a specified remote worker\n\n The connection_info dict of the worker will be looked up\n as the first positional arg to the magic.\n The rest of the line (or the entire cell for a %%cell magic)\n will be passed to the remote kernel.\n\n Usage:\n\n info = e.start_ipython(worker)[worker]\n %remote info print(worker.data)\n \"\"\"\n # get connection info from IPython's user namespace\n ip = get_ipython()\n split_line = line.split(None, 1)\n info_name = split_line[0]\n if info_name not in ip.user_ns:\n raise NameError(info_name)\n connection_info = dict(ip.user_ns[info_name])\n\n if not cell: # line magic, use the rest of the line\n if len(split_line) == 1:\n raise ValueError(\"I need some code to run!\")\n cell = split_line[1]\n\n # turn info dict to hashable str for use as lookup key in _clients cache\n key = \",\".join(map(str, sorted(connection_info.items())))\n session_key = connection_info.pop(\"key\")\n\n if key in remote_magic._clients:\n kc = remote_magic._clients[key]\n else:\n kc = BlockingKernelClient(**connection_info)\n kc.session.key = session_key\n kc.start_channels()\n kc.wait_for_ready(timeout=10)\n remote_magic._clients[key] = kc\n\n # actually run the code\n run_cell_remote(ip, kc, cell)\n\n\n# cache clients for re-use in remote magic\nremote_magic._clients = {}\n\n\ndef register_remote_magic(magic_name=\"remote\"):\n \"\"\"Define the parameterized %remote magic\n\n See remote_magic above for details.\n \"\"\"\n ip = get_ipython()\n if ip is None:\n return # do nothing if IPython's not running\n ip.register_magic_function(remote_magic, magic_kind=\"line\", magic_name=magic_name)\n ip.register_magic_function(remote_magic, magic_kind=\"cell\", magic_name=magic_name)\n\n\ndef connect_qtconsole(connection_info, name=None, extra_args=None):\n \"\"\"Open a QtConsole connected to a worker who has the given future\n\n - identify worker with who_has\n - start IPython kernel on the worker\n - start qtconsole connected to the kernel\n \"\"\"\n runtime_dir = jupyter_runtime_dir()\n if name is None:\n name = uuid4().hex\n\n path = os.path.join(runtime_dir, name + \".json\")\n write_connection_file(path, **connection_info)\n cmd = [\"jupyter\", \"qtconsole\", \"--existing\", path]\n if extra_args:\n cmd.extend(extra_args)\n Popen(cmd)\n\n @atexit.register\n def _cleanup_connection_file():\n \"\"\"Cleanup our connection file when we exit.\"\"\"\n try:\n os.remove(path)\n except OSError:\n pass\n\n\ndef start_ipython(ip=None, ns=None, log=None):\n \"\"\"Start an IPython kernel in a thread\n\n Parameters\n ----------\n ip : str\n The IP address to listen on (likely the parent object's ip).\n ns : dict\n Any names that should be injected into the IPython namespace.\n log : logger instance\n Hook up IPython's logging to an existing logger instead of the default.\n \"\"\"\n from IPython import get_ipython\n\n if get_ipython() is not None:\n raise RuntimeError(\"Cannot start IPython, it's already running.\")\n\n from zmq.eventloop.ioloop import ZMQIOLoop\n from ipykernel.kernelapp import IPKernelApp\n\n # save the global IOLoop instance\n # since IPython relies on it, but we are going to put it in a thread.\n save_inst = IOLoop.instance()\n IOLoop.clear_instance()\n zmq_loop = ZMQIOLoop()\n zmq_loop.install()\n\n # start IPython, disabling its signal handlers that won't work due to running in a thread:\n app = IPKernelApp.instance(log=log)\n # Don't connect to the history database\n app.config.HistoryManager.hist_file = \":memory:\"\n # listen on all interfaces, so remote clients can connect:\n if ip:\n app.ip = ip\n # disable some signal handling, logging\n\n def noop():\n return None\n\n app.init_signal = noop\n app.log_connection_info = noop\n\n # start IPython in a thread\n # initialization happens in the thread to avoid threading problems\n # with the sqlite history\n evt = Event()\n\n def _start():\n app.initialize([])\n app.kernel.pre_handler_hook = noop\n app.kernel.post_handler_hook = noop\n app.kernel.start()\n app.kernel.loop = IOLoop.instance()\n # save self in the IPython namespace as 'worker'\n # inject things into the IPython namespace\n if ns:\n app.kernel.shell.user_ns.update(ns)\n evt.set()\n zmq_loop.start()\n\n zmq_loop_thread = Thread(target=_start)\n zmq_loop_thread.daemon = True\n zmq_loop_thread.start()\n assert evt.wait(timeout=5), \"IPython didn't start in a reasonable amount of time.\"\n\n # put the global IOLoop instance back:\n IOLoop.clear_instance()\n save_inst.install()\n return app\n", "path": "distributed/_ipython_utils.py"}]}
3,256
414
gh_patches_debug_11235
rasdani/github-patches
git_diff
saleor__saleor-5311
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Broken multiple interface notation in schema ### What I'm trying to achieve To use Apollo tooling to generate TS types for the application queries. However, it fails because Saleor's schema uses comma as a separator instead of ampersand. More: https://github.com/apollographql/apollo-tooling/issues/434 ### Steps to reproduce the problem 1. Go to mirumee/saleor-dashboard repository and clone it 2. Copy schema from core to dashboard 3. `npm run build-types` 4. Notice that it fails at multiple interface implementation. </issue> <code> [start of saleor/graphql/management/commands/get_graphql_schema.py] 1 from django.core.management.base import BaseCommand 2 from graphql import print_schema 3 4 from ...api import schema 5 6 7 class Command(BaseCommand): 8 help = "Writes SDL for GraphQL API schema to stdout" 9 10 def handle(self, *args, **options): 11 self.stdout.write(print_schema(schema)) 12 [end of saleor/graphql/management/commands/get_graphql_schema.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/saleor/graphql/management/commands/get_graphql_schema.py b/saleor/graphql/management/commands/get_graphql_schema.py --- a/saleor/graphql/management/commands/get_graphql_schema.py +++ b/saleor/graphql/management/commands/get_graphql_schema.py @@ -8,4 +8,14 @@ help = "Writes SDL for GraphQL API schema to stdout" def handle(self, *args, **options): - self.stdout.write(print_schema(schema)) + """Support multiple interface notation in schema for Apollo tooling. + + In `graphql-core` V2 separator for interaces is `,`. + Apollo tooling to generate TypeScript types using `&` as interfaces separator. + https://github.com/graphql-python/graphql-core/pull/258 + """ + printed_schema = print_schema(schema) + for line in printed_schema.splitlines(): + if "implements" in line: + line = line.replace(",", " &") + self.stdout.write(f"{line}\n")
{"golden_diff": "diff --git a/saleor/graphql/management/commands/get_graphql_schema.py b/saleor/graphql/management/commands/get_graphql_schema.py\n--- a/saleor/graphql/management/commands/get_graphql_schema.py\n+++ b/saleor/graphql/management/commands/get_graphql_schema.py\n@@ -8,4 +8,14 @@\n help = \"Writes SDL for GraphQL API schema to stdout\"\n \n def handle(self, *args, **options):\n- self.stdout.write(print_schema(schema))\n+ \"\"\"Support multiple interface notation in schema for Apollo tooling.\n+\n+ In `graphql-core` V2 separator for interaces is `,`.\n+ Apollo tooling to generate TypeScript types using `&` as interfaces separator.\n+ https://github.com/graphql-python/graphql-core/pull/258\n+ \"\"\"\n+ printed_schema = print_schema(schema)\n+ for line in printed_schema.splitlines():\n+ if \"implements\" in line:\n+ line = line.replace(\",\", \" &\")\n+ self.stdout.write(f\"{line}\\n\")\n", "issue": "Broken multiple interface notation in schema\n### What I'm trying to achieve\r\nTo use Apollo tooling to generate TS types for the application queries. However, it fails because Saleor's schema uses comma as a separator instead of ampersand. More: https://github.com/apollographql/apollo-tooling/issues/434 \r\n\r\n### Steps to reproduce the problem\r\n1. Go to mirumee/saleor-dashboard repository and clone it\r\n2. Copy schema from core to dashboard\r\n3. `npm run build-types`\r\n4. Notice that it fails at multiple interface implementation.\n", "before_files": [{"content": "from django.core.management.base import BaseCommand\nfrom graphql import print_schema\n\nfrom ...api import schema\n\n\nclass Command(BaseCommand):\n help = \"Writes SDL for GraphQL API schema to stdout\"\n\n def handle(self, *args, **options):\n self.stdout.write(print_schema(schema))\n", "path": "saleor/graphql/management/commands/get_graphql_schema.py"}]}
747
226
gh_patches_debug_10332
rasdani/github-patches
git_diff
ipython__ipython-1882
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> cython_magic uses importlib, which doesn't ship with py2.6 Sorry, I don't have time to fix this right now, but wanted to leave a report. `importlib` was not in the standard library for python2.6, though [it has been backported](http://pypi.python.org/pypi/importlib/1.0.1). Trying to run `%load_ext cythonmagic` results in this traceback: ``` /ipython/IPython/extensions/cythonmagic.py in <module>() 18 import io 19 import os, sys ---> 20 from importlib import import_module 21 import imp 22 ImportError: No module named importlib ``` not sure if we should make people install it themselves, or if it's better to just put a copy in `IPython.external` </issue> <code> [start of IPython/extensions/cythonmagic.py] 1 # -*- coding: utf-8 -*- 2 """ 3 Cython related magics. 4 5 Author: 6 * Brian Granger 7 8 Parts of this code were taken from Cython.inline. 9 """ 10 #----------------------------------------------------------------------------- 11 # Copyright (C) 2010-2011, IPython Development Team. 12 # 13 # Distributed under the terms of the Modified BSD License. 14 # 15 # The full license is in the file COPYING.txt, distributed with this software. 16 #----------------------------------------------------------------------------- 17 18 import io 19 import os, sys 20 from importlib import import_module 21 import imp 22 23 try: 24 import hashlib 25 except ImportError: 26 import md5 as hashlib 27 28 from distutils.core import Distribution, Extension 29 from distutils.command.build_ext import build_ext 30 31 from IPython.core.magic import Magics, magics_class, cell_magic 32 from IPython.testing.skipdoctest import skip_doctest 33 from IPython.core.magic_arguments import ( 34 argument, magic_arguments, parse_argstring 35 ) 36 from IPython.utils import py3compat 37 38 import Cython 39 from Cython.Compiler.Errors import CompileError 40 from Cython.Compiler.Main import Context, default_options 41 from Cython.Build.Dependencies import cythonize 42 43 44 @magics_class 45 class CythonMagics(Magics): 46 47 def __init__(self, shell): 48 super(CythonMagics,self).__init__(shell) 49 self._reloads = {} 50 self._code_cache = {} 51 52 def _import_all(self, module): 53 for k,v in module.__dict__.items(): 54 if not k.startswith('__'): 55 self.shell.push({k:v}) 56 57 @cell_magic 58 def cython_inline(self, line, cell): 59 """Compile and run a Cython code cell using Cython.inline. 60 61 This magic simply passes the body of the cell to Cython.inline 62 and returns the result. If the variables `a` and `b` are defined 63 in the user's namespace, here is a simple example that returns 64 their sum:: 65 66 %%cython_inline 67 return a+b 68 69 For most purposes, we recommend the usage of the `%%cython` magic. 70 """ 71 locs = self.shell.user_global_ns 72 globs = self.shell.user_ns 73 return Cython.inline(cell, locals=locs, globals=globs) 74 75 @cell_magic 76 def cython_pyximport(self, line, cell): 77 """Compile and import a Cython code cell using pyximport. 78 79 The contents of the cell are written to a `.pyx` file in the current 80 working directory, which is then imported using `pyximport`. This 81 magic requires a module name to be passed:: 82 83 %%cython_pyximport modulename 84 def f(x): 85 return 2.0*x 86 87 The compiled module is then imported and all of its symbols are injected into 88 the user's namespace. For most purposes, we recommend the usage of the 89 `%%cython` magic. 90 """ 91 module_name = line.strip() 92 if not module_name: 93 raise ValueError('module name must be given') 94 fname = module_name + '.pyx' 95 with io.open(fname, 'w', encoding='utf-8') as f: 96 f.write(cell) 97 if 'pyximport' not in sys.modules: 98 import pyximport 99 pyximport.install(reload_support=True) 100 if module_name in self._reloads: 101 module = self._reloads[module_name] 102 reload(module) 103 else: 104 module = import_module(module_name) 105 self._reloads[module_name] = module 106 self._import_all(module) 107 108 @magic_arguments() 109 @argument( 110 '-f', '--force', action='store_true', default=False, 111 help="Force the compilation of the pyx module even if it hasn't changed" 112 ) 113 @cell_magic 114 def cython(self, line, cell): 115 """Compile and import everything from a Cython code cell. 116 117 The contents of the cell are written to a `.pyx` file in the 118 directory `IPYTHONDIR/cython` using a filename with the hash of the code. 119 This file is then cythonized and compiled. The resulting module 120 is imported and all of its symbols are injected into the user's 121 namespace. The usage is similar to that of `%%cython_pyximport` but 122 you don't have to pass a module name:: 123 124 %%cython 125 def f(x): 126 return 2.0*x 127 """ 128 args = parse_argstring(self.cython, line) 129 code = cell if cell.endswith('\n') else cell+'\n' 130 lib_dir=os.path.join(self.shell.ipython_dir, 'cython') 131 cython_include_dirs=['.'] 132 force=args.force 133 quiet=True 134 ctx = Context(cython_include_dirs, default_options) 135 key = code, sys.version_info, sys.executable, Cython.__version__ 136 module_name = "_cython_magic_" + hashlib.md5(str(key).encode('utf-8')).hexdigest() 137 so_ext = [ ext for ext,_,mod_type in imp.get_suffixes() if mod_type == imp.C_EXTENSION ][0] 138 module_path = os.path.join(lib_dir, module_name+so_ext) 139 140 if not os.path.exists(lib_dir): 141 os.makedirs(lib_dir) 142 143 if force or not os.path.isfile(module_path): 144 cflags = [] 145 c_include_dirs = [] 146 if 'numpy' in code: 147 import numpy 148 c_include_dirs.append(numpy.get_include()) 149 pyx_file = os.path.join(lib_dir, module_name + '.pyx') 150 pyx_file = py3compat.cast_bytes_py2(pyx_file, encoding=sys.getfilesystemencoding()) 151 with io.open(pyx_file, 'w', encoding='utf-8') as f: 152 f.write(code) 153 extension = Extension( 154 name = module_name, 155 sources = [pyx_file], 156 include_dirs = c_include_dirs, 157 extra_compile_args = cflags 158 ) 159 dist = Distribution() 160 config_files = dist.find_config_files() 161 try: 162 config_files.remove('setup.cfg') 163 except ValueError: 164 pass 165 dist.parse_config_files(config_files) 166 build_extension = build_ext(dist) 167 build_extension.finalize_options() 168 try: 169 build_extension.extensions = cythonize([extension], ctx=ctx, quiet=quiet) 170 except CompileError: 171 return 172 build_extension.build_temp = os.path.dirname(pyx_file) 173 build_extension.build_lib = lib_dir 174 build_extension.run() 175 self._code_cache[key] = module_name 176 177 module = imp.load_dynamic(module_name, module_path) 178 self._import_all(module) 179 180 181 _loaded = False 182 183 def load_ipython_extension(ip): 184 """Load the extension in IPython.""" 185 global _loaded 186 if not _loaded: 187 ip.register_magics(CythonMagics) 188 _loaded = True 189 [end of IPython/extensions/cythonmagic.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/IPython/extensions/cythonmagic.py b/IPython/extensions/cythonmagic.py --- a/IPython/extensions/cythonmagic.py +++ b/IPython/extensions/cythonmagic.py @@ -17,7 +17,6 @@ import io import os, sys -from importlib import import_module import imp try: @@ -101,7 +100,8 @@ module = self._reloads[module_name] reload(module) else: - module = import_module(module_name) + __import__(module_name) + module = sys.modules[module_name] self._reloads[module_name] = module self._import_all(module)
{"golden_diff": "diff --git a/IPython/extensions/cythonmagic.py b/IPython/extensions/cythonmagic.py\n--- a/IPython/extensions/cythonmagic.py\n+++ b/IPython/extensions/cythonmagic.py\n@@ -17,7 +17,6 @@\n \n import io\n import os, sys\n-from importlib import import_module\n import imp\n \n try:\n@@ -101,7 +100,8 @@\n module = self._reloads[module_name]\n reload(module)\n else:\n- module = import_module(module_name)\n+ __import__(module_name)\n+ module = sys.modules[module_name]\n self._reloads[module_name] = module\n self._import_all(module)\n", "issue": "cython_magic uses importlib, which doesn't ship with py2.6\nSorry, I don't have time to fix this right now, but wanted to leave a report.\n\n`importlib` was not in the standard library for python2.6, though [it has been backported](http://pypi.python.org/pypi/importlib/1.0.1).\n\nTrying to run `%load_ext cythonmagic` results in this traceback:\n\n```\n/ipython/IPython/extensions/cythonmagic.py in <module>()\n 18 import io\n 19 import os, sys\n---> 20 from importlib import import_module\n 21 import imp\n 22 \n\nImportError: No module named importlib\n```\n\nnot sure if we should make people install it themselves, or if it's better to just put a copy in `IPython.external`\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nCython related magics.\n\nAuthor:\n* Brian Granger\n\nParts of this code were taken from Cython.inline.\n\"\"\"\n#-----------------------------------------------------------------------------\n# Copyright (C) 2010-2011, IPython Development Team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\nimport io\nimport os, sys\nfrom importlib import import_module\nimport imp\n\ntry:\n import hashlib\nexcept ImportError:\n import md5 as hashlib\n\nfrom distutils.core import Distribution, Extension\nfrom distutils.command.build_ext import build_ext\n\nfrom IPython.core.magic import Magics, magics_class, cell_magic\nfrom IPython.testing.skipdoctest import skip_doctest\nfrom IPython.core.magic_arguments import (\n argument, magic_arguments, parse_argstring\n)\nfrom IPython.utils import py3compat\n\nimport Cython\nfrom Cython.Compiler.Errors import CompileError\nfrom Cython.Compiler.Main import Context, default_options\nfrom Cython.Build.Dependencies import cythonize\n\n\n@magics_class\nclass CythonMagics(Magics):\n\n def __init__(self, shell):\n super(CythonMagics,self).__init__(shell)\n self._reloads = {}\n self._code_cache = {}\n\n def _import_all(self, module):\n for k,v in module.__dict__.items():\n if not k.startswith('__'):\n self.shell.push({k:v})\n\n @cell_magic\n def cython_inline(self, line, cell):\n \"\"\"Compile and run a Cython code cell using Cython.inline.\n\n This magic simply passes the body of the cell to Cython.inline\n and returns the result. If the variables `a` and `b` are defined\n in the user's namespace, here is a simple example that returns\n their sum::\n \n %%cython_inline\n return a+b\n\n For most purposes, we recommend the usage of the `%%cython` magic.\n \"\"\"\n locs = self.shell.user_global_ns\n globs = self.shell.user_ns\n return Cython.inline(cell, locals=locs, globals=globs)\n\n @cell_magic\n def cython_pyximport(self, line, cell):\n \"\"\"Compile and import a Cython code cell using pyximport.\n\n The contents of the cell are written to a `.pyx` file in the current\n working directory, which is then imported using `pyximport`. This\n magic requires a module name to be passed::\n \n %%cython_pyximport modulename\n def f(x):\n return 2.0*x\n\n The compiled module is then imported and all of its symbols are injected into\n the user's namespace. For most purposes, we recommend the usage of the\n `%%cython` magic.\n \"\"\"\n module_name = line.strip()\n if not module_name:\n raise ValueError('module name must be given')\n fname = module_name + '.pyx'\n with io.open(fname, 'w', encoding='utf-8') as f:\n f.write(cell)\n if 'pyximport' not in sys.modules:\n import pyximport\n pyximport.install(reload_support=True)\n if module_name in self._reloads:\n module = self._reloads[module_name]\n reload(module)\n else:\n module = import_module(module_name)\n self._reloads[module_name] = module\n self._import_all(module)\n\n @magic_arguments()\n @argument(\n '-f', '--force', action='store_true', default=False,\n help=\"Force the compilation of the pyx module even if it hasn't changed\"\n )\n @cell_magic\n def cython(self, line, cell):\n \"\"\"Compile and import everything from a Cython code cell.\n\n The contents of the cell are written to a `.pyx` file in the\n directory `IPYTHONDIR/cython` using a filename with the hash of the code.\n This file is then cythonized and compiled. The resulting module\n is imported and all of its symbols are injected into the user's\n namespace. The usage is similar to that of `%%cython_pyximport` but\n you don't have to pass a module name::\n\n %%cython\n def f(x):\n return 2.0*x\n \"\"\"\n args = parse_argstring(self.cython, line)\n code = cell if cell.endswith('\\n') else cell+'\\n'\n lib_dir=os.path.join(self.shell.ipython_dir, 'cython')\n cython_include_dirs=['.']\n force=args.force\n quiet=True\n ctx = Context(cython_include_dirs, default_options)\n key = code, sys.version_info, sys.executable, Cython.__version__\n module_name = \"_cython_magic_\" + hashlib.md5(str(key).encode('utf-8')).hexdigest()\n so_ext = [ ext for ext,_,mod_type in imp.get_suffixes() if mod_type == imp.C_EXTENSION ][0]\n module_path = os.path.join(lib_dir, module_name+so_ext)\n\n if not os.path.exists(lib_dir):\n os.makedirs(lib_dir)\n\n if force or not os.path.isfile(module_path):\n cflags = []\n c_include_dirs = []\n if 'numpy' in code:\n import numpy\n c_include_dirs.append(numpy.get_include())\n pyx_file = os.path.join(lib_dir, module_name + '.pyx')\n pyx_file = py3compat.cast_bytes_py2(pyx_file, encoding=sys.getfilesystemencoding())\n with io.open(pyx_file, 'w', encoding='utf-8') as f:\n f.write(code)\n extension = Extension(\n name = module_name,\n sources = [pyx_file],\n include_dirs = c_include_dirs,\n extra_compile_args = cflags\n )\n dist = Distribution()\n config_files = dist.find_config_files()\n try: \n config_files.remove('setup.cfg')\n except ValueError:\n pass\n dist.parse_config_files(config_files)\n build_extension = build_ext(dist)\n build_extension.finalize_options()\n try:\n build_extension.extensions = cythonize([extension], ctx=ctx, quiet=quiet)\n except CompileError:\n return\n build_extension.build_temp = os.path.dirname(pyx_file)\n build_extension.build_lib = lib_dir\n build_extension.run()\n self._code_cache[key] = module_name\n\n module = imp.load_dynamic(module_name, module_path)\n self._import_all(module)\n\n\n_loaded = False\n\ndef load_ipython_extension(ip):\n \"\"\"Load the extension in IPython.\"\"\"\n global _loaded\n if not _loaded:\n ip.register_magics(CythonMagics)\n _loaded = True\n", "path": "IPython/extensions/cythonmagic.py"}]}
2,661
151
gh_patches_debug_6839
rasdani/github-patches
git_diff
sunpy__sunpy-3255
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> test_mapsequence_solar_derotate fails under 32bit with Astropy 3.2 When `test_mapsequence_solar_derotate` is run with the `clip` keyword (second stage of that test). It fails with this output: ```python # Test that the shape of data is correct when clipped clipped_shape = (25, 18) for m in tmc: > assert(m.data.shape == clipped_shape) E assert (25, 19) == (25, 18) E At index 1 diff: 19 != 18 E Use -v to get the full diff ``` Turns out that the return for where to clip is different. 32bit ```python [_lower_clip(x.value), _upper_clip(x.value)] * u.pix <Quantity [0., 2.] pix> ``` 64bit ```python [_lower_clip(x.value), _upper_clip(x.value)] * u.pix <Quantity [1., 2.] pix> ``` The `x.value` is not the same. 32bit ``` array([-2.96311832e-15, 5.46585361e-01, 1.09445035e+00]) ``` 64bit ``` array([2.96311832e-15, 5.46585361e-01, 1.09445035e+00]) ``` So it comes from `calculate_solar_rotate_shift` 32bit ``` {'x': <Quantity [-5.68434189e-14, -1.04855012e+01, -2.09955502e+01] arcsec>, 'y': <Quantity [5.68434189e-14, 2.68536149e-01, 5.23060756e-01] arcsec>} ``` 64bit ``` {'x': <Quantity [ 5.68434189e-14, -1.04855012e+01, -2.09955502e+01] arcsec>, 'y': <Quantity [1.13686838e-13, 2.68536149e-01, 5.23060756e-01] arcsec>} ``` It would seem that the sign is the issue. Doing a git bisect on astropy got me to this PR that "broke" it. https://github.com/astropy/astropy/pull/8594 For now, the test is skipped on 32bit. However it seems that https://github.com/sunpy/sunpy/pull/3223 fixed it?! </issue> <code> [start of sunpy/physics/solar_rotation.py] 1 """ 2 This module provides routines for applying solar rotation functions to 3 map sequences. 4 """ 5 6 import numpy as np 7 8 import astropy.units as u 9 10 from sunpy.physics.differential_rotation import solar_rotate_coordinate 11 from sunpy.image.coalignment import apply_shifts 12 13 __author__ = 'J. Ireland' 14 15 __all__ = ['calculate_solar_rotate_shift', 'mapsequence_solar_derotate'] 16 17 18 def calculate_solar_rotate_shift(mc, layer_index=0, **kwargs): 19 """ 20 Calculate the shift that must be applied to each map contained in a mapsequence 21 in order to compensate for solar rotation. 22 23 The center of the map is used to calculate the position of each mapsequence 24 layer. Shifts are calculated relative to a specified layer in the mapsequence. 25 When using this functionality, it is a good idea to check that the shifts 26 that were applied to were reasonable and expected. One way of checking this 27 is to animate the original mapsequence, animate the derotated mapsequence, and 28 compare the differences you see to the calculated shifts. An example use is 29 as follows. If you select data from the SDO cutout service, it is common to 30 not use the solar tracking implemented by this service. This is because (at 31 time of writing) the solar tracking implemented by that service moves the 32 image by single pixels at a time. This is not optimal for many use cases, 33 as it introduces artificial jumps in the data. So with solar tracking not 34 chosen, the selected area is like a window through which you can see the 35 Sun rotating underneath. 36 37 Parameters 38 ---------- 39 mc : `sunpy.map.MapSequence` 40 The input mapsequence. 41 layer_index : int 42 The index layer. Shifts are calculated relative to the time of 43 this layer. 44 ``**kwargs`` 45 These keywords are passed to the function 46 `sunpy.physics.differential_rotation.solar_rotate_coordinate`. 47 Returns 48 ------- 49 x, y : `~astropy.units.Quantity`, ~astropy.units.Quantity` 50 The shifts relative to the index layer that can be applied 51 to the input mapsequence in order to compensate for solar rotation. 52 The shifts are given in arcseconds as understood in helioprojective 53 coordinates systems. 54 """ 55 # Size of the data 56 nt = len(mc.maps) 57 58 # Storage for the shifts in arcseconds 59 xshift_arcseconds = np.zeros(nt) * u.arcsec 60 yshift_arcseconds = np.zeros_like(xshift_arcseconds) 61 62 # Layer that 63 rotate_to_this_layer = mc.maps[layer_index] 64 65 # Calculate the rotations and the shifts 66 for i, m in enumerate(mc): 67 # Calculate the rotation of the center of the map 'm' at its 68 # observation time to the observation time of the reference layer 69 # indicated by "layer_index". 70 new_coordinate = solar_rotate_coordinate(m.center, 71 observer=rotate_to_this_layer.observer_coordinate, 72 **kwargs) 73 74 # Calculate the shift in arcseconds 75 xshift_arcseconds[i] = new_coordinate.Tx - rotate_to_this_layer.center.Tx 76 yshift_arcseconds[i] = new_coordinate.Ty - rotate_to_this_layer.center.Ty 77 78 return {"x": xshift_arcseconds, "y": yshift_arcseconds} 79 80 81 def mapsequence_solar_derotate(mc, layer_index=0, clip=True, shift=None, **kwargs): 82 """ 83 Move the layers in a mapsequence according to the input shifts. 84 If an input shift is not given, the shifts due to 85 solar rotation relative to an index layer is calculated and 86 applied. When using this functionality, it is a good idea to check 87 that the shifts that were applied to were reasonable and expected. 88 One way of checking this is to animate the original mapsequence, animate 89 the derotated mapsequence, and compare the differences you see to the 90 calculated shifts. 91 92 Parameters 93 ---------- 94 mc : `sunpy.map.MapSequence` 95 A mapsequence of shape (ny, nx, nt), where nt is the number of layers in 96 the mapsequence. 97 layer_index : int 98 Solar derotation shifts of all maps in the mapsequence are assumed 99 to be relative to the layer in the mapsequence indexed by layer_index. 100 clip : bool 101 If True, then clip off x, y edges in the datasequence that are potentially 102 affected by edges effects. 103 ``**kwargs`` 104 These keywords are passed to the function 105 `sunpy.physics.solar_rotation.calculate_solar_rotate_shift`. 106 107 Returns 108 ------- 109 output : `sunpy.map.MapSequence` 110 The results of the shifts applied to the input mapsequence. 111 112 Examples 113 -------- 114 115 >>> import sunpy.data.sample # doctest: +REMOTE_DATA 116 >>> from sunpy.physics.solar_rotation import mapsequence_solar_derotate 117 >>> map1 = sunpy.map.Map(sunpy.data.sample.AIA_171_IMAGE) # doctest: +REMOTE_DATA 118 >>> map2 = sunpy.map.Map(sunpy.data.sample.EIT_195_IMAGE) # doctest: +REMOTE_DATA 119 >>> mc = sunpy.map.Map([map1, map2], sequence=True) # doctest: +REMOTE_DATA 120 >>> derotated_mc = mapsequence_solar_derotate(mc) # doctest: +REMOTE_DATA 121 >>> derotated_mc = mapsequence_solar_derotate(mc, layer_index=-1) # doctest: +REMOTE_DATA 122 >>> derotated_mc = mapsequence_solar_derotate(mc, clip=False) # doctest: +REMOTE_DATA 123 """ 124 125 # Size of the data 126 nt = len(mc.maps) 127 128 # Storage for the pixel shifts and the shifts in arcseconds 129 xshift_keep = np.zeros(nt) * u.pix 130 yshift_keep = np.zeros_like(xshift_keep) 131 132 # If no shifts are passed in, calculate them. Otherwise, 133 # use the shifts passed in. 134 if shift is None: 135 shift = calculate_solar_rotate_shift(mc, layer_index=layer_index, **kwargs) 136 xshift_arcseconds = shift['x'] 137 yshift_arcseconds = shift['y'] 138 139 # Calculate the pixel shifts 140 for i, m in enumerate(mc): 141 xshift_keep[i] = xshift_arcseconds[i] / m.scale[0] 142 yshift_keep[i] = yshift_arcseconds[i] / m.scale[1] 143 144 # Apply the pixel shifts and return the mapsequence 145 return apply_shifts(mc, yshift_keep, xshift_keep, clip=clip) 146 [end of sunpy/physics/solar_rotation.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/sunpy/physics/solar_rotation.py b/sunpy/physics/solar_rotation.py --- a/sunpy/physics/solar_rotation.py +++ b/sunpy/physics/solar_rotation.py @@ -64,6 +64,10 @@ # Calculate the rotations and the shifts for i, m in enumerate(mc): + # Skip the reference layer + if i == layer_index: + continue + # Calculate the rotation of the center of the map 'm' at its # observation time to the observation time of the reference layer # indicated by "layer_index".
{"golden_diff": "diff --git a/sunpy/physics/solar_rotation.py b/sunpy/physics/solar_rotation.py\n--- a/sunpy/physics/solar_rotation.py\n+++ b/sunpy/physics/solar_rotation.py\n@@ -64,6 +64,10 @@\n \n # Calculate the rotations and the shifts\n for i, m in enumerate(mc):\n+ # Skip the reference layer\n+ if i == layer_index:\n+ continue\n+\n # Calculate the rotation of the center of the map 'm' at its\n # observation time to the observation time of the reference layer\n # indicated by \"layer_index\".\n", "issue": "test_mapsequence_solar_derotate fails under 32bit with Astropy 3.2\nWhen `test_mapsequence_solar_derotate` is run with the `clip` keyword (second stage of that test).\r\n\r\nIt fails with this output:\r\n\r\n```python\r\n # Test that the shape of data is correct when clipped\r\n clipped_shape = (25, 18)\r\n for m in tmc:\r\n> assert(m.data.shape == clipped_shape)\r\nE assert (25, 19) == (25, 18)\r\nE At index 1 diff: 19 != 18\r\nE Use -v to get the full diff\r\n```\r\n\r\nTurns out that the return for where to clip is different.\r\n \r\n32bit\r\n```python\r\n[_lower_clip(x.value), _upper_clip(x.value)] * u.pix\r\n<Quantity [0., 2.] pix>\r\n```\r\n\r\n64bit\r\n```python\r\n[_lower_clip(x.value), _upper_clip(x.value)] * u.pix\r\n<Quantity [1., 2.] pix>\r\n```\r\n\r\nThe `x.value` is not the same.\r\n\r\n32bit\r\n```\r\narray([-2.96311832e-15, 5.46585361e-01, 1.09445035e+00])\r\n```\r\n\r\n64bit\r\n```\r\narray([2.96311832e-15, 5.46585361e-01, 1.09445035e+00])\r\n```\r\n\r\nSo it comes from `calculate_solar_rotate_shift`\r\n\r\n32bit\r\n```\r\n{'x': <Quantity [-5.68434189e-14, -1.04855012e+01, -2.09955502e+01] arcsec>, 'y': <Quantity [5.68434189e-14, 2.68536149e-01, 5.23060756e-01] arcsec>}\r\n```\r\n\r\n64bit\r\n```\r\n{'x': <Quantity [ 5.68434189e-14, -1.04855012e+01, -2.09955502e+01] arcsec>, 'y': <Quantity [1.13686838e-13, 2.68536149e-01, 5.23060756e-01] arcsec>}\r\n```\r\n\r\nIt would seem that the sign is the issue. \r\n\r\nDoing a git bisect on astropy got me to this PR that \"broke\" it.\r\n\r\nhttps://github.com/astropy/astropy/pull/8594\r\n\r\nFor now, the test is skipped on 32bit. \r\nHowever it seems that https://github.com/sunpy/sunpy/pull/3223 fixed it?!\n", "before_files": [{"content": "\"\"\"\nThis module provides routines for applying solar rotation functions to\nmap sequences.\n\"\"\"\n\nimport numpy as np\n\nimport astropy.units as u\n\nfrom sunpy.physics.differential_rotation import solar_rotate_coordinate\nfrom sunpy.image.coalignment import apply_shifts\n\n__author__ = 'J. Ireland'\n\n__all__ = ['calculate_solar_rotate_shift', 'mapsequence_solar_derotate']\n\n\ndef calculate_solar_rotate_shift(mc, layer_index=0, **kwargs):\n \"\"\"\n Calculate the shift that must be applied to each map contained in a mapsequence\n in order to compensate for solar rotation.\n\n The center of the map is used to calculate the position of each mapsequence\n layer. Shifts are calculated relative to a specified layer in the mapsequence.\n When using this functionality, it is a good idea to check that the shifts\n that were applied to were reasonable and expected. One way of checking this\n is to animate the original mapsequence, animate the derotated mapsequence, and\n compare the differences you see to the calculated shifts. An example use is\n as follows. If you select data from the SDO cutout service, it is common to\n not use the solar tracking implemented by this service. This is because (at\n time of writing) the solar tracking implemented by that service moves the\n image by single pixels at a time. This is not optimal for many use cases,\n as it introduces artificial jumps in the data. So with solar tracking not\n chosen, the selected area is like a window through which you can see the\n Sun rotating underneath.\n\n Parameters\n ----------\n mc : `sunpy.map.MapSequence`\n The input mapsequence.\n layer_index : int\n The index layer. Shifts are calculated relative to the time of\n this layer.\n ``**kwargs``\n These keywords are passed to the function\n `sunpy.physics.differential_rotation.solar_rotate_coordinate`.\n Returns\n -------\n x, y : `~astropy.units.Quantity`, ~astropy.units.Quantity`\n The shifts relative to the index layer that can be applied\n to the input mapsequence in order to compensate for solar rotation.\n The shifts are given in arcseconds as understood in helioprojective\n coordinates systems.\n \"\"\"\n # Size of the data\n nt = len(mc.maps)\n\n # Storage for the shifts in arcseconds\n xshift_arcseconds = np.zeros(nt) * u.arcsec\n yshift_arcseconds = np.zeros_like(xshift_arcseconds)\n\n # Layer that\n rotate_to_this_layer = mc.maps[layer_index]\n\n # Calculate the rotations and the shifts\n for i, m in enumerate(mc):\n # Calculate the rotation of the center of the map 'm' at its\n # observation time to the observation time of the reference layer\n # indicated by \"layer_index\".\n new_coordinate = solar_rotate_coordinate(m.center,\n observer=rotate_to_this_layer.observer_coordinate,\n **kwargs)\n\n # Calculate the shift in arcseconds\n xshift_arcseconds[i] = new_coordinate.Tx - rotate_to_this_layer.center.Tx\n yshift_arcseconds[i] = new_coordinate.Ty - rotate_to_this_layer.center.Ty\n\n return {\"x\": xshift_arcseconds, \"y\": yshift_arcseconds}\n\n\ndef mapsequence_solar_derotate(mc, layer_index=0, clip=True, shift=None, **kwargs):\n \"\"\"\n Move the layers in a mapsequence according to the input shifts.\n If an input shift is not given, the shifts due to\n solar rotation relative to an index layer is calculated and\n applied. When using this functionality, it is a good idea to check\n that the shifts that were applied to were reasonable and expected.\n One way of checking this is to animate the original mapsequence, animate\n the derotated mapsequence, and compare the differences you see to the\n calculated shifts.\n\n Parameters\n ----------\n mc : `sunpy.map.MapSequence`\n A mapsequence of shape (ny, nx, nt), where nt is the number of layers in\n the mapsequence.\n layer_index : int\n Solar derotation shifts of all maps in the mapsequence are assumed\n to be relative to the layer in the mapsequence indexed by layer_index.\n clip : bool\n If True, then clip off x, y edges in the datasequence that are potentially\n affected by edges effects.\n ``**kwargs``\n These keywords are passed to the function\n `sunpy.physics.solar_rotation.calculate_solar_rotate_shift`.\n\n Returns\n -------\n output : `sunpy.map.MapSequence`\n The results of the shifts applied to the input mapsequence.\n\n Examples\n --------\n\n >>> import sunpy.data.sample # doctest: +REMOTE_DATA\n >>> from sunpy.physics.solar_rotation import mapsequence_solar_derotate\n >>> map1 = sunpy.map.Map(sunpy.data.sample.AIA_171_IMAGE) # doctest: +REMOTE_DATA\n >>> map2 = sunpy.map.Map(sunpy.data.sample.EIT_195_IMAGE) # doctest: +REMOTE_DATA\n >>> mc = sunpy.map.Map([map1, map2], sequence=True) # doctest: +REMOTE_DATA\n >>> derotated_mc = mapsequence_solar_derotate(mc) # doctest: +REMOTE_DATA\n >>> derotated_mc = mapsequence_solar_derotate(mc, layer_index=-1) # doctest: +REMOTE_DATA\n >>> derotated_mc = mapsequence_solar_derotate(mc, clip=False) # doctest: +REMOTE_DATA\n \"\"\"\n\n # Size of the data\n nt = len(mc.maps)\n\n # Storage for the pixel shifts and the shifts in arcseconds\n xshift_keep = np.zeros(nt) * u.pix\n yshift_keep = np.zeros_like(xshift_keep)\n\n # If no shifts are passed in, calculate them. Otherwise,\n # use the shifts passed in.\n if shift is None:\n shift = calculate_solar_rotate_shift(mc, layer_index=layer_index, **kwargs)\n xshift_arcseconds = shift['x']\n yshift_arcseconds = shift['y']\n\n # Calculate the pixel shifts\n for i, m in enumerate(mc):\n xshift_keep[i] = xshift_arcseconds[i] / m.scale[0]\n yshift_keep[i] = yshift_arcseconds[i] / m.scale[1]\n\n # Apply the pixel shifts and return the mapsequence\n return apply_shifts(mc, yshift_keep, xshift_keep, clip=clip)\n", "path": "sunpy/physics/solar_rotation.py"}]}
3,005
139
gh_patches_debug_7107
rasdani/github-patches
git_diff
pytorch__text-65
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Possible bug in LanguageModelingDataset In the code for [`LanguageModelingDataset`](https://github.com/pytorch/text/blob/master/torchtext/datasets/language_modeling.py), the original text seems to be pre-processed twice, viz.: - `text += text_field.preprocess(line)` [at line 22](https://github.com/pytorch/text/blob/master/torchtext/datasets/language_modeling.py#L22) - `examples = [data.Example.fromlist([text], fields)]` [at line 26](https://github.com/pytorch/text/blob/master/torchtext/datasets/language_modeling.py#L26), which in turn calls `setattr(ex, name, field.preprocess(val))` [at line 44 of example.py](https://github.com/pytorch/text/blob/master/torchtext/data/example.py#L44) In fact, if I try to create a simple LanguageModelingDataset, I am getting an error as follows: ``` Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/home/riddasgu/.local/lib/python2.7/site-packages/torchtext/datasets/language_modeling.py", line 28, in __init__ examples = [data.Example.fromlist([text], fields)] File "/home/riddasgu/.local/lib/python2.7/site-packages/torchtext/data/example.py", line 44, in fromlist setattr(ex, name, field.preprocess(val)) File "/home/riddasgu/.local/lib/python2.7/site-packages/torchtext/data/field.py", line 91, in preprocess x = self.tokenize(x) File "/home/riddasgu/.local/lib/python2.7/site-packages/torchtext/data/field.py", line 63, in <lambda> tokenize=(lambda s: s.split()), include_lengths=False, AttributeError: 'list' object has no attribute 'split' ``` </issue> <code> [start of torchtext/data/field.py] 1 from collections import Counter, OrderedDict 2 import six 3 import torch 4 from torch.autograd import Variable 5 6 from .dataset import Dataset 7 from .pipeline import Pipeline 8 from .utils import get_tokenizer 9 from ..vocab import Vocab 10 11 12 class Field(object): 13 """Defines a datatype together with instructions for converting to Tensor. 14 15 Every dataset consists of one or more types of data. For instance, a text 16 classification dataset contains sentences and their classes, while a 17 machine translation dataset contains paired examples of text in two 18 languages. Each of these types of data is represented by a Field object, 19 which holds a Vocab object that defines the set of possible values for 20 elements of the field and their corresponding numerical representations. 21 The Field object also holds other parameters relating to how a datatype 22 should be numericalized, such as a tokenization method and the kind of 23 Tensor that should be produced. 24 25 If a Field is shared between two columns in a dataset (e.g., question and 26 answer in a QA dataset), then they will have a shared vocabulary. 27 28 Attributes: 29 sequential: Whether the datatype represents sequential data. If False, 30 no tokenization is applied. Default: True. 31 use_vocab: Whether to use a Vocab object. If False, the data in this 32 field should already be numerical. Default: True. 33 init_token: A token that will be prepended to every example using this 34 field, or None for no initial token. Default: None. 35 eos_token: A token that will be appended to every example using this 36 field, or None for no end-of-sentence token. Default: None. 37 fix_length: A fixed length that all examples using this field will be 38 padded to, or None for flexible sequence lengths. Default: None. 39 tensor_type: The torch.Tensor class that represents a batch of examples 40 of this kind of data. Default: torch.LongTensor. 41 preprocessing: The Pipeline that will be applied to examples 42 using this field after tokenizing but before numericalizing. Many 43 Datasets replace this attribute with a custom preprocessor. 44 Default: the identity Pipeline. 45 postprocessing: A Pipeline that will be applied to examples using 46 this field after numericalizing but before the numbers are turned 47 into a Tensor. Default: the identity Pipeline. 48 lower: Whether to lowercase the text in this field. Default: False. 49 tokenize: The function used to tokenize strings using this field into 50 sequential examples. Default: str.split. 51 include_lengths: Whether to return a tuple of a padded minibatch and 52 a list containing the lengths of each examples, or just a padded 53 minibatch. Default: False. 54 batch_first: Whether to produce tensors with the batch dimension first. 55 Default: False. 56 pad_token: The string token used as padding. Default: "<pad>". 57 """ 58 59 def __init__( 60 self, sequential=True, use_vocab=True, init_token=None, 61 eos_token=None, fix_length=None, tensor_type=torch.LongTensor, 62 preprocessing=None, postprocessing=None, lower=False, 63 tokenize=(lambda s: s.split()), include_lengths=False, 64 batch_first=False, pad_token="<pad>"): 65 self.sequential = sequential 66 self.use_vocab = use_vocab 67 self.init_token = init_token 68 self.eos_token = eos_token 69 self.fix_length = fix_length 70 self.tensor_type = tensor_type 71 self.preprocessing = (Pipeline() if preprocessing 72 is None else preprocessing) 73 self.postprocessing = (Pipeline() if postprocessing 74 is None else postprocessing) 75 self.lower = lower 76 self.tokenize = get_tokenizer(tokenize) 77 self.include_lengths = include_lengths 78 self.batch_first = batch_first 79 self.pad_token = pad_token if self.sequential else None 80 81 def preprocess(self, x): 82 """Load a single example using this field, tokenizing if necessary. 83 84 If the input is a Python 2 `str`, it will be converted to Unicode 85 first. If `sequential=True`, it will be tokenized. Then the input 86 will be optionally lowercased and passed to the user-provided 87 `preprocessing` Pipeline.""" 88 if six.PY2 and isinstance(x, six.string_types): 89 x = Pipeline(lambda s: unicode(s, encoding='utf-8'))(x) 90 if self.sequential: 91 x = self.tokenize(x) 92 if self.lower: 93 x = Pipeline(six.text_type.lower)(x) 94 return self.preprocessing(x) 95 96 def pad(self, minibatch): 97 """Pad a batch of examples using this field. 98 99 Pads to self.fix_length if provided, otherwise pads to the length of 100 the longest example in the batch. Prepends self.init_token and appends 101 self.eos_token if those attributes are not None. Returns a tuple of the 102 padded list and a list containing lengths of each example if 103 `self.include_lengths` is `True`, else just returns the padded list. 104 """ 105 minibatch = list(minibatch) 106 if not self.sequential: 107 return minibatch 108 if self.fix_length is None: 109 max_len = max(len(x) for x in minibatch) 110 else: 111 max_len = self.fix_length + ( 112 self.init_token, self.eos_token).count(None) - 2 113 padded, lengths = [], [] 114 for x in minibatch: 115 padded.append( 116 ([] if self.init_token is None else [self.init_token]) + 117 list(x[:max_len]) + 118 ([] if self.eos_token is None else [self.eos_token]) + 119 [self.pad_token] * max(0, max_len - len(x))) 120 lengths.append(len(padded[-1]) - max(0, max_len - len(x))) 121 if self.include_lengths: 122 return (padded, lengths) 123 return padded 124 125 def build_vocab(self, *args, **kwargs): 126 """Construct the Vocab object for this field from one or more datasets. 127 128 Arguments: 129 Positional arguments: Dataset objects or other iterable data 130 sources from which to construct the Vocab object that 131 represents the set of possible values for this field. If 132 a Dataset object is provided, all columns corresponding 133 to this field are used; individual columns can also be 134 provided directly. 135 Remaining keyword arguments: Passed to the constructor of Vocab. 136 """ 137 counter = Counter() 138 sources = [] 139 for arg in args: 140 if isinstance(arg, Dataset): 141 sources += [getattr(arg, name) for name, field in 142 arg.fields.items() if field is self] 143 else: 144 sources.append(arg) 145 for data in sources: 146 for x in data: 147 if not self.sequential: 148 x = [x] 149 counter.update(x) 150 specials = list(OrderedDict.fromkeys( 151 tok for tok in [self.pad_token, self.init_token, self.eos_token] 152 if tok is not None)) 153 self.vocab = Vocab(counter, specials=specials, **kwargs) 154 155 def numericalize(self, arr, device=None, train=True): 156 """Turn a batch of examples that use this field into a Variable. 157 158 If the field has include_lengths=True, a tensor of lengths will be 159 included in the return value. 160 161 Arguments: 162 arr: List of tokenized and padded examples, or tuple of a padded 163 list and a list of lengths if self.include_lengths is True. 164 device: Device to create the Variable's Tensor on. Use -1 for 165 CPU and None for the currently active GPU device. Default: 166 None. 167 train: Whether the batch is for a training set. If False, the 168 Variable will be created with volatile=True. Default: True. 169 """ 170 if isinstance(arr, tuple): 171 arr, lengths = arr 172 if self.use_vocab: 173 if self.sequential: 174 arr = [[self.vocab.stoi[x] for x in ex] for ex in arr] 175 else: 176 arr = [self.vocab.stoi[x] for x in arr] 177 arr = self.postprocessing(arr, self.vocab, train) 178 else: 179 arr = self.postprocessing(arr, train) 180 arr = self.tensor_type(arr) 181 if self.include_lengths: 182 lengths = torch.LongTensor(lengths) 183 if self.sequential and not self.batch_first: 184 arr.t_() 185 if device == -1: 186 if self.sequential: 187 arr = arr.contiguous() 188 else: 189 arr = arr.cuda(device) 190 if self.include_lengths: 191 lengths = lengths.cuda(device) 192 if self.include_lengths: 193 return Variable(arr, volatile=not train), lengths 194 return Variable(arr, volatile=not train) 195 [end of torchtext/data/field.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/torchtext/data/field.py b/torchtext/data/field.py --- a/torchtext/data/field.py +++ b/torchtext/data/field.py @@ -87,7 +87,7 @@ `preprocessing` Pipeline.""" if six.PY2 and isinstance(x, six.string_types): x = Pipeline(lambda s: unicode(s, encoding='utf-8'))(x) - if self.sequential: + if self.sequential and isinstance(x, six.text_type): x = self.tokenize(x) if self.lower: x = Pipeline(six.text_type.lower)(x)
{"golden_diff": "diff --git a/torchtext/data/field.py b/torchtext/data/field.py\n--- a/torchtext/data/field.py\n+++ b/torchtext/data/field.py\n@@ -87,7 +87,7 @@\n `preprocessing` Pipeline.\"\"\"\n if six.PY2 and isinstance(x, six.string_types):\n x = Pipeline(lambda s: unicode(s, encoding='utf-8'))(x)\n- if self.sequential:\n+ if self.sequential and isinstance(x, six.text_type):\n x = self.tokenize(x)\n if self.lower:\n x = Pipeline(six.text_type.lower)(x)\n", "issue": "Possible bug in LanguageModelingDataset\nIn the code for [`LanguageModelingDataset`](https://github.com/pytorch/text/blob/master/torchtext/datasets/language_modeling.py), the original text seems to be pre-processed twice, viz.:\r\n\r\n- `text += text_field.preprocess(line)` [at line 22](https://github.com/pytorch/text/blob/master/torchtext/datasets/language_modeling.py#L22)\r\n- `examples = [data.Example.fromlist([text], fields)]` [at line 26](https://github.com/pytorch/text/blob/master/torchtext/datasets/language_modeling.py#L26), which in turn calls \r\n`setattr(ex, name, field.preprocess(val))` [at line 44 of example.py](https://github.com/pytorch/text/blob/master/torchtext/data/example.py#L44)\r\n\r\nIn fact, if I try to create a simple LanguageModelingDataset, I am getting an error as follows:\r\n```\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/home/riddasgu/.local/lib/python2.7/site-packages/torchtext/datasets/language_modeling.py\", line 28, in __init__\r\n examples = [data.Example.fromlist([text], fields)]\r\n File \"/home/riddasgu/.local/lib/python2.7/site-packages/torchtext/data/example.py\", line 44, in fromlist\r\n setattr(ex, name, field.preprocess(val))\r\n File \"/home/riddasgu/.local/lib/python2.7/site-packages/torchtext/data/field.py\", line 91, in preprocess\r\n x = self.tokenize(x)\r\n File \"/home/riddasgu/.local/lib/python2.7/site-packages/torchtext/data/field.py\", line 63, in <lambda>\r\n tokenize=(lambda s: s.split()), include_lengths=False,\r\nAttributeError: 'list' object has no attribute 'split'\r\n```\n", "before_files": [{"content": "from collections import Counter, OrderedDict\nimport six\nimport torch\nfrom torch.autograd import Variable\n\nfrom .dataset import Dataset\nfrom .pipeline import Pipeline\nfrom .utils import get_tokenizer\nfrom ..vocab import Vocab\n\n\nclass Field(object):\n \"\"\"Defines a datatype together with instructions for converting to Tensor.\n\n Every dataset consists of one or more types of data. For instance, a text\n classification dataset contains sentences and their classes, while a\n machine translation dataset contains paired examples of text in two\n languages. Each of these types of data is represented by a Field object,\n which holds a Vocab object that defines the set of possible values for\n elements of the field and their corresponding numerical representations.\n The Field object also holds other parameters relating to how a datatype\n should be numericalized, such as a tokenization method and the kind of\n Tensor that should be produced.\n\n If a Field is shared between two columns in a dataset (e.g., question and\n answer in a QA dataset), then they will have a shared vocabulary.\n\n Attributes:\n sequential: Whether the datatype represents sequential data. If False,\n no tokenization is applied. Default: True.\n use_vocab: Whether to use a Vocab object. If False, the data in this\n field should already be numerical. Default: True.\n init_token: A token that will be prepended to every example using this\n field, or None for no initial token. Default: None.\n eos_token: A token that will be appended to every example using this\n field, or None for no end-of-sentence token. Default: None.\n fix_length: A fixed length that all examples using this field will be\n padded to, or None for flexible sequence lengths. Default: None.\n tensor_type: The torch.Tensor class that represents a batch of examples\n of this kind of data. Default: torch.LongTensor.\n preprocessing: The Pipeline that will be applied to examples\n using this field after tokenizing but before numericalizing. Many\n Datasets replace this attribute with a custom preprocessor.\n Default: the identity Pipeline.\n postprocessing: A Pipeline that will be applied to examples using\n this field after numericalizing but before the numbers are turned\n into a Tensor. Default: the identity Pipeline.\n lower: Whether to lowercase the text in this field. Default: False.\n tokenize: The function used to tokenize strings using this field into\n sequential examples. Default: str.split.\n include_lengths: Whether to return a tuple of a padded minibatch and\n a list containing the lengths of each examples, or just a padded\n minibatch. Default: False.\n batch_first: Whether to produce tensors with the batch dimension first.\n Default: False.\n pad_token: The string token used as padding. Default: \"<pad>\".\n \"\"\"\n\n def __init__(\n self, sequential=True, use_vocab=True, init_token=None,\n eos_token=None, fix_length=None, tensor_type=torch.LongTensor,\n preprocessing=None, postprocessing=None, lower=False,\n tokenize=(lambda s: s.split()), include_lengths=False,\n batch_first=False, pad_token=\"<pad>\"):\n self.sequential = sequential\n self.use_vocab = use_vocab\n self.init_token = init_token\n self.eos_token = eos_token\n self.fix_length = fix_length\n self.tensor_type = tensor_type\n self.preprocessing = (Pipeline() if preprocessing\n is None else preprocessing)\n self.postprocessing = (Pipeline() if postprocessing\n is None else postprocessing)\n self.lower = lower\n self.tokenize = get_tokenizer(tokenize)\n self.include_lengths = include_lengths\n self.batch_first = batch_first\n self.pad_token = pad_token if self.sequential else None\n\n def preprocess(self, x):\n \"\"\"Load a single example using this field, tokenizing if necessary.\n\n If the input is a Python 2 `str`, it will be converted to Unicode\n first. If `sequential=True`, it will be tokenized. Then the input\n will be optionally lowercased and passed to the user-provided\n `preprocessing` Pipeline.\"\"\"\n if six.PY2 and isinstance(x, six.string_types):\n x = Pipeline(lambda s: unicode(s, encoding='utf-8'))(x)\n if self.sequential:\n x = self.tokenize(x)\n if self.lower:\n x = Pipeline(six.text_type.lower)(x)\n return self.preprocessing(x)\n\n def pad(self, minibatch):\n \"\"\"Pad a batch of examples using this field.\n\n Pads to self.fix_length if provided, otherwise pads to the length of\n the longest example in the batch. Prepends self.init_token and appends\n self.eos_token if those attributes are not None. Returns a tuple of the\n padded list and a list containing lengths of each example if\n `self.include_lengths` is `True`, else just returns the padded list.\n \"\"\"\n minibatch = list(minibatch)\n if not self.sequential:\n return minibatch\n if self.fix_length is None:\n max_len = max(len(x) for x in minibatch)\n else:\n max_len = self.fix_length + (\n self.init_token, self.eos_token).count(None) - 2\n padded, lengths = [], []\n for x in minibatch:\n padded.append(\n ([] if self.init_token is None else [self.init_token]) +\n list(x[:max_len]) +\n ([] if self.eos_token is None else [self.eos_token]) +\n [self.pad_token] * max(0, max_len - len(x)))\n lengths.append(len(padded[-1]) - max(0, max_len - len(x)))\n if self.include_lengths:\n return (padded, lengths)\n return padded\n\n def build_vocab(self, *args, **kwargs):\n \"\"\"Construct the Vocab object for this field from one or more datasets.\n\n Arguments:\n Positional arguments: Dataset objects or other iterable data\n sources from which to construct the Vocab object that\n represents the set of possible values for this field. If\n a Dataset object is provided, all columns corresponding\n to this field are used; individual columns can also be\n provided directly.\n Remaining keyword arguments: Passed to the constructor of Vocab.\n \"\"\"\n counter = Counter()\n sources = []\n for arg in args:\n if isinstance(arg, Dataset):\n sources += [getattr(arg, name) for name, field in\n arg.fields.items() if field is self]\n else:\n sources.append(arg)\n for data in sources:\n for x in data:\n if not self.sequential:\n x = [x]\n counter.update(x)\n specials = list(OrderedDict.fromkeys(\n tok for tok in [self.pad_token, self.init_token, self.eos_token]\n if tok is not None))\n self.vocab = Vocab(counter, specials=specials, **kwargs)\n\n def numericalize(self, arr, device=None, train=True):\n \"\"\"Turn a batch of examples that use this field into a Variable.\n\n If the field has include_lengths=True, a tensor of lengths will be\n included in the return value.\n\n Arguments:\n arr: List of tokenized and padded examples, or tuple of a padded\n list and a list of lengths if self.include_lengths is True.\n device: Device to create the Variable's Tensor on. Use -1 for\n CPU and None for the currently active GPU device. Default:\n None.\n train: Whether the batch is for a training set. If False, the\n Variable will be created with volatile=True. Default: True.\n \"\"\"\n if isinstance(arr, tuple):\n arr, lengths = arr\n if self.use_vocab:\n if self.sequential:\n arr = [[self.vocab.stoi[x] for x in ex] for ex in arr]\n else:\n arr = [self.vocab.stoi[x] for x in arr]\n arr = self.postprocessing(arr, self.vocab, train)\n else:\n arr = self.postprocessing(arr, train)\n arr = self.tensor_type(arr)\n if self.include_lengths:\n lengths = torch.LongTensor(lengths)\n if self.sequential and not self.batch_first:\n arr.t_()\n if device == -1:\n if self.sequential:\n arr = arr.contiguous()\n else:\n arr = arr.cuda(device)\n if self.include_lengths:\n lengths = lengths.cuda(device)\n if self.include_lengths:\n return Variable(arr, volatile=not train), lengths\n return Variable(arr, volatile=not train)\n", "path": "torchtext/data/field.py"}]}
3,290
137
gh_patches_debug_17697
rasdani/github-patches
git_diff
Cog-Creators__Red-DiscordBot-1277
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [V3] Instance deletion requires making a backup Please be sure to read through other issues as well to make sure what you are suggesting/reporting has not already been suggested/reported ### Type: - [ ] Suggestion - [x] Bug ### Brief description of the problem Instance deletion doesn't work if you don't want to make a backup ### Expected behavior Should just remove the instance if the user opts not to take a backup ### Actual behavior It just exits ### Steps to reproduce 1. Run `redbot-setup --delete` 2. Select an instance 3. Answer `n` to the question about making a backup ### Temporary workaround Have it make a backup and just delete the backup manually if you don't want it </issue> <code> [start of redbot/setup.py] 1 #!/usr/bin/env python 2 3 import argparse 4 import os 5 import shutil 6 import sys 7 import tarfile 8 from copy import deepcopy 9 from datetime import datetime as dt 10 from pathlib import Path 11 12 import appdirs 13 from redbot.core.cli import confirm 14 from redbot.core.data_manager import basic_config_default 15 from redbot.core.json_io import JsonIO 16 17 config_dir = None 18 appdir = appdirs.AppDirs("Red-DiscordBot") 19 if sys.platform == 'linux': 20 if 0 < os.getuid() < 1000: 21 config_dir = Path(appdir.site_data_dir) 22 if not config_dir: 23 config_dir = Path(appdir.user_config_dir) 24 try: 25 config_dir.mkdir(parents=True, exist_ok=True) 26 except PermissionError: 27 print( 28 "You don't have permission to write to " 29 "'{}'\nExiting...".format(config_dir)) 30 sys.exit(1) 31 config_file = config_dir / 'config.json' 32 33 34 def parse_cli_args(): 35 parser = argparse.ArgumentParser( 36 description="Red - Discord Bot's instance manager (V3)" 37 ) 38 parser.add_argument( 39 "--delete", "-d", 40 help="Interactively delete an instance", 41 action="store_true" 42 ) 43 return parser.parse_known_args() 44 45 46 def load_existing_config(): 47 if not config_file.exists(): 48 return {} 49 50 return JsonIO(config_file)._load_json() 51 52 53 def save_config(name, data, remove=False): 54 config = load_existing_config() 55 if remove and name in config: 56 config.pop(name) 57 else: 58 config[name] = data 59 JsonIO(config_file)._save_json(config) 60 61 62 def basic_setup(): 63 """ 64 Creates the data storage folder. 65 :return: 66 """ 67 68 default_data_dir = Path(appdir.user_data_dir) 69 70 print("Hello! Before we begin the full configuration process we need to" 71 " gather some initial information about where you'd like us" 72 " to store your bot's data. We've attempted to figure out a" 73 " sane default data location which is printed below. If you don't" 74 " want to change this default please press [ENTER], otherwise" 75 " input your desired data location.") 76 print() 77 print("Default: {}".format(default_data_dir)) 78 79 new_path = input('> ') 80 81 if new_path != '': 82 new_path = Path(new_path) 83 default_data_dir = new_path 84 85 if not default_data_dir.exists(): 86 try: 87 default_data_dir.mkdir(parents=True, exist_ok=True) 88 except OSError: 89 print("We were unable to create your chosen directory." 90 " You may need to restart this process with admin" 91 " privileges.") 92 sys.exit(1) 93 94 print("You have chosen {} to be your data directory." 95 "".format(default_data_dir)) 96 if not confirm("Please confirm (y/n):"): 97 print("Please start the process over.") 98 sys.exit(0) 99 100 default_dirs = deepcopy(basic_config_default) 101 default_dirs['DATA_PATH'] = str(default_data_dir.resolve()) 102 103 storage_dict = { 104 1: "JSON", 105 2: "MongoDB" 106 } 107 storage = None 108 while storage is None: 109 print() 110 print("Please choose your storage backend (if you're unsure, choose 1).") 111 print("1. JSON (file storage, requires no database).") 112 print("2. MongoDB") 113 storage = input("> ") 114 try: 115 storage = int(storage) 116 except ValueError: 117 storage = None 118 else: 119 if storage not in storage_dict: 120 storage = None 121 122 default_dirs['STORAGE_TYPE'] = storage_dict.get(storage, 1) 123 124 if storage_dict.get(storage, 1) == "MongoDB": 125 from redbot.core.drivers.red_mongo import get_config_details 126 default_dirs['STORAGE_DETAILS'] = get_config_details() 127 else: 128 default_dirs['STORAGE_DETAILS'] = {} 129 130 name = "" 131 while len(name) == 0: 132 print() 133 print("Please enter a name for your instance, this name cannot include spaces" 134 " and it will be used to run your bot from here on out.") 135 name = input("> ") 136 if " " in name: 137 name = "" 138 139 save_config(name, default_dirs) 140 141 print() 142 print("Your basic configuration has been saved. Please run `redbot <name>` to" 143 " continue your setup process and to run the bot.") 144 145 146 def remove_instance(): 147 instance_list = load_existing_config() 148 if not instance_list: 149 print("No instances have been set up!") 150 return 151 152 print( 153 "You have chosen to remove an instance. The following " 154 "is a list of instances that currently exist:\n" 155 ) 156 for instance in instance_list.keys(): 157 print("{}\n".format(instance)) 158 print("Please select one of the above by entering its name") 159 selected = input("> ") 160 161 if selected not in instance_list.keys(): 162 print("That isn't a valid instance!") 163 return 164 instance_data = instance_list[selected] 165 print( 166 "Would you like to make a backup of " 167 "the data for this instance (y/n)?" 168 ) 169 yesno = input("> ") 170 if yesno.lower() == "y": 171 if instance_data["STORAGE_TYPE"] == "MongoDB": 172 raise NotImplementedError( 173 "Support for removing instances with MongoDB as the storage " 174 "is not implemented at this time due to backup support." 175 ) 176 else: 177 print("Backing up the instance's data...") 178 backup_filename = "redv3-{}-{}.tar.gz".format( 179 selected, dt.utcnow().strftime("%Y-%m-%d %H:%M:%S") 180 ) 181 pth = Path(instance_data["DATA_PATH"]) 182 home = pth.home() 183 backup_file = home / backup_filename 184 os.chdir(str(pth.parent)) # str is used here because 3.5 support 185 with tarfile.open(str(backup_file), "w:gz") as tar: 186 tar.add(pth.stem) # add all files in that directory 187 print( 188 "A backup of {} has been made. It is at {}".format( 189 selected, backup_file 190 ) 191 ) 192 print("Removing the instance...") 193 shutil.rmtree(str(pth)) 194 save_config(selected, {}, remove=True) 195 print("The instance has been removed") 196 return 197 elif yesno.lower() == "n": 198 print("Ok then") 199 return 200 else: 201 print("That's not a valid option!") 202 return 203 204 205 def main(): 206 if args.delete: 207 try: 208 remove_instance() 209 except NotImplementedError as e: 210 print(str(e)) 211 else: 212 basic_setup() 213 214 args, _ = parse_cli_args() 215 216 if __name__ == "__main__": 217 try: 218 main() 219 except KeyboardInterrupt: 220 print("Exiting...") 221 else: 222 print("Exiting...") 223 [end of redbot/setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/redbot/setup.py b/redbot/setup.py --- a/redbot/setup.py +++ b/redbot/setup.py @@ -190,12 +190,22 @@ ) ) print("Removing the instance...") - shutil.rmtree(str(pth)) + try: + shutil.rmtree(str(pth)) + except FileNotFoundError: + pass # data dir was removed manually save_config(selected, {}, remove=True) print("The instance has been removed") return elif yesno.lower() == "n": - print("Ok then") + pth = Path(instance_data["DATA_PATH"]) + print("Removing the instance...") + try: + shutil.rmtree(str(pth)) + except FileNotFoundError: + pass # data dir was removed manually + save_config(selected, {}, remove=True) + print("The instance has been removed") return else: print("That's not a valid option!")
{"golden_diff": "diff --git a/redbot/setup.py b/redbot/setup.py\n--- a/redbot/setup.py\n+++ b/redbot/setup.py\n@@ -190,12 +190,22 @@\n )\n )\n print(\"Removing the instance...\")\n- shutil.rmtree(str(pth))\n+ try:\n+ shutil.rmtree(str(pth))\n+ except FileNotFoundError:\n+ pass # data dir was removed manually\n save_config(selected, {}, remove=True)\n print(\"The instance has been removed\")\n return\n elif yesno.lower() == \"n\":\n- print(\"Ok then\")\n+ pth = Path(instance_data[\"DATA_PATH\"])\n+ print(\"Removing the instance...\")\n+ try:\n+ shutil.rmtree(str(pth))\n+ except FileNotFoundError:\n+ pass # data dir was removed manually\n+ save_config(selected, {}, remove=True)\n+ print(\"The instance has been removed\")\n return\n else:\n print(\"That's not a valid option!\")\n", "issue": "[V3] Instance deletion requires making a backup\nPlease be sure to read through other issues as well to make sure what you are suggesting/reporting has not already\r\nbeen suggested/reported\r\n\r\n### Type:\r\n\r\n- [ ] Suggestion\r\n- [x] Bug\r\n\r\n### Brief description of the problem\r\nInstance deletion doesn't work if you don't want to make a backup\r\n### Expected behavior\r\nShould just remove the instance if the user opts not to take a backup\r\n### Actual behavior\r\nIt just exits\r\n### Steps to reproduce\r\n\r\n1. Run `redbot-setup --delete`\r\n2. Select an instance\r\n3. Answer `n` to the question about making a backup\r\n\r\n### Temporary workaround\r\nHave it make a backup and just delete the backup manually if you don't want it\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport argparse\nimport os\nimport shutil\nimport sys\nimport tarfile\nfrom copy import deepcopy\nfrom datetime import datetime as dt\nfrom pathlib import Path\n\nimport appdirs\nfrom redbot.core.cli import confirm\nfrom redbot.core.data_manager import basic_config_default\nfrom redbot.core.json_io import JsonIO\n\nconfig_dir = None\nappdir = appdirs.AppDirs(\"Red-DiscordBot\")\nif sys.platform == 'linux':\n if 0 < os.getuid() < 1000:\n config_dir = Path(appdir.site_data_dir)\nif not config_dir:\n config_dir = Path(appdir.user_config_dir)\ntry:\n config_dir.mkdir(parents=True, exist_ok=True)\nexcept PermissionError:\n print(\n \"You don't have permission to write to \"\n \"'{}'\\nExiting...\".format(config_dir))\n sys.exit(1)\nconfig_file = config_dir / 'config.json'\n\n\ndef parse_cli_args():\n parser = argparse.ArgumentParser(\n description=\"Red - Discord Bot's instance manager (V3)\"\n )\n parser.add_argument(\n \"--delete\", \"-d\",\n help=\"Interactively delete an instance\",\n action=\"store_true\"\n )\n return parser.parse_known_args()\n\n\ndef load_existing_config():\n if not config_file.exists():\n return {}\n\n return JsonIO(config_file)._load_json()\n\n\ndef save_config(name, data, remove=False):\n config = load_existing_config()\n if remove and name in config:\n config.pop(name)\n else:\n config[name] = data\n JsonIO(config_file)._save_json(config)\n\n\ndef basic_setup():\n \"\"\"\n Creates the data storage folder.\n :return:\n \"\"\"\n\n default_data_dir = Path(appdir.user_data_dir)\n\n print(\"Hello! Before we begin the full configuration process we need to\"\n \" gather some initial information about where you'd like us\"\n \" to store your bot's data. We've attempted to figure out a\"\n \" sane default data location which is printed below. If you don't\"\n \" want to change this default please press [ENTER], otherwise\"\n \" input your desired data location.\")\n print()\n print(\"Default: {}\".format(default_data_dir))\n\n new_path = input('> ')\n\n if new_path != '':\n new_path = Path(new_path)\n default_data_dir = new_path\n\n if not default_data_dir.exists():\n try:\n default_data_dir.mkdir(parents=True, exist_ok=True)\n except OSError:\n print(\"We were unable to create your chosen directory.\"\n \" You may need to restart this process with admin\"\n \" privileges.\")\n sys.exit(1)\n\n print(\"You have chosen {} to be your data directory.\"\n \"\".format(default_data_dir))\n if not confirm(\"Please confirm (y/n):\"):\n print(\"Please start the process over.\")\n sys.exit(0)\n\n default_dirs = deepcopy(basic_config_default)\n default_dirs['DATA_PATH'] = str(default_data_dir.resolve())\n\n storage_dict = {\n 1: \"JSON\",\n 2: \"MongoDB\"\n }\n storage = None\n while storage is None:\n print()\n print(\"Please choose your storage backend (if you're unsure, choose 1).\")\n print(\"1. JSON (file storage, requires no database).\")\n print(\"2. MongoDB\")\n storage = input(\"> \")\n try:\n storage = int(storage)\n except ValueError:\n storage = None\n else:\n if storage not in storage_dict:\n storage = None\n\n default_dirs['STORAGE_TYPE'] = storage_dict.get(storage, 1)\n\n if storage_dict.get(storage, 1) == \"MongoDB\":\n from redbot.core.drivers.red_mongo import get_config_details\n default_dirs['STORAGE_DETAILS'] = get_config_details()\n else:\n default_dirs['STORAGE_DETAILS'] = {}\n\n name = \"\"\n while len(name) == 0:\n print()\n print(\"Please enter a name for your instance, this name cannot include spaces\"\n \" and it will be used to run your bot from here on out.\")\n name = input(\"> \")\n if \" \" in name:\n name = \"\"\n\n save_config(name, default_dirs)\n\n print()\n print(\"Your basic configuration has been saved. Please run `redbot <name>` to\"\n \" continue your setup process and to run the bot.\")\n\n\ndef remove_instance():\n instance_list = load_existing_config()\n if not instance_list:\n print(\"No instances have been set up!\")\n return\n\n print(\n \"You have chosen to remove an instance. The following \"\n \"is a list of instances that currently exist:\\n\"\n )\n for instance in instance_list.keys():\n print(\"{}\\n\".format(instance))\n print(\"Please select one of the above by entering its name\")\n selected = input(\"> \")\n \n if selected not in instance_list.keys():\n print(\"That isn't a valid instance!\")\n return\n instance_data = instance_list[selected]\n print(\n \"Would you like to make a backup of \"\n \"the data for this instance (y/n)?\"\n )\n yesno = input(\"> \")\n if yesno.lower() == \"y\":\n if instance_data[\"STORAGE_TYPE\"] == \"MongoDB\":\n raise NotImplementedError(\n \"Support for removing instances with MongoDB as the storage \"\n \"is not implemented at this time due to backup support.\"\n )\n else:\n print(\"Backing up the instance's data...\")\n backup_filename = \"redv3-{}-{}.tar.gz\".format(\n selected, dt.utcnow().strftime(\"%Y-%m-%d %H:%M:%S\")\n )\n pth = Path(instance_data[\"DATA_PATH\"])\n home = pth.home()\n backup_file = home / backup_filename\n os.chdir(str(pth.parent)) # str is used here because 3.5 support\n with tarfile.open(str(backup_file), \"w:gz\") as tar:\n tar.add(pth.stem) # add all files in that directory\n print(\n \"A backup of {} has been made. It is at {}\".format(\n selected, backup_file\n )\n )\n print(\"Removing the instance...\")\n shutil.rmtree(str(pth))\n save_config(selected, {}, remove=True)\n print(\"The instance has been removed\")\n return\n elif yesno.lower() == \"n\":\n print(\"Ok then\")\n return\n else:\n print(\"That's not a valid option!\")\n return\n\n\ndef main():\n if args.delete:\n try:\n remove_instance()\n except NotImplementedError as e:\n print(str(e))\n else:\n basic_setup()\n\nargs, _ = parse_cli_args()\n\nif __name__ == \"__main__\":\n try:\n main()\n except KeyboardInterrupt:\n print(\"Exiting...\")\n else:\n print(\"Exiting...\")\n", "path": "redbot/setup.py"}]}
2,747
213
gh_patches_debug_11846
rasdani/github-patches
git_diff
DataDog__dd-trace-py-2715
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> mypy stub files mypy throws an error when ddtrace is in use: ![image](https://user-images.githubusercontent.com/284476/58525411-a7ff0c00-8199-11e9-8bcb-770aeeeda3fb.png) It would be nice if this library could ship with stub files or be put on https://github.com/python/typeshed. I'm not sure how to do that yet but maybe someone here is. More info from mypy side here https://mypy.readthedocs.io/en/latest/running_mypy.html#missing-imports </issue> <code> [start of setup.py] 1 import os 2 import platform 3 import sys 4 5 from setuptools import setup, find_packages, Extension 6 from setuptools.command.test import test as TestCommand 7 8 # ORDER MATTERS 9 # Import this after setuptools or it will fail 10 from Cython.Build import cythonize # noqa: I100 11 import Cython.Distutils 12 13 14 HERE = os.path.dirname(os.path.abspath(__file__)) 15 16 17 def load_module_from_project_file(mod_name, fname): 18 """ 19 Helper used to load a module from a file in this project 20 21 DEV: Loading this way will by-pass loading all parent modules 22 e.g. importing `ddtrace.vendor.psutil.setup` will load `ddtrace/__init__.py` 23 which has side effects like loading the tracer 24 """ 25 fpath = os.path.join(HERE, fname) 26 27 if sys.version_info >= (3, 5): 28 import importlib.util 29 30 spec = importlib.util.spec_from_file_location(mod_name, fpath) 31 mod = importlib.util.module_from_spec(spec) 32 spec.loader.exec_module(mod) 33 return mod 34 elif sys.version_info >= (3, 3): 35 from importlib.machinery import SourceFileLoader 36 37 return SourceFileLoader(mod_name, fpath).load_module() 38 else: 39 import imp 40 41 return imp.load_source(mod_name, fpath) 42 43 44 class Tox(TestCommand): 45 46 user_options = [("tox-args=", "a", "Arguments to pass to tox")] 47 48 def initialize_options(self): 49 TestCommand.initialize_options(self) 50 self.tox_args = None 51 52 def finalize_options(self): 53 TestCommand.finalize_options(self) 54 self.test_args = [] 55 self.test_suite = True 56 57 def run_tests(self): 58 # import here, cause outside the eggs aren't loaded 59 import tox 60 import shlex 61 62 args = self.tox_args 63 if args: 64 args = shlex.split(self.tox_args) 65 errno = tox.cmdline(args=args) 66 sys.exit(errno) 67 68 69 long_description = """ 70 # dd-trace-py 71 72 `ddtrace` is Datadog's tracing library for Python. It is used to trace requests 73 as they flow across web servers, databases and microservices so that developers 74 have great visibility into bottlenecks and troublesome requests. 75 76 ## Getting Started 77 78 For a basic product overview, installation and quick start, check out our 79 [setup documentation][setup docs]. 80 81 For more advanced usage and configuration, check out our [API 82 documentation][api docs]. 83 84 For descriptions of terminology used in APM, take a look at the [official 85 documentation][visualization docs]. 86 87 [setup docs]: https://docs.datadoghq.com/tracing/setup/python/ 88 [api docs]: https://ddtrace.readthedocs.io/ 89 [visualization docs]: https://docs.datadoghq.com/tracing/visualization/ 90 """ 91 92 93 def get_exts_for(name): 94 try: 95 mod = load_module_from_project_file( 96 "ddtrace.vendor.{}.setup".format(name), "ddtrace/vendor/{}/setup.py".format(name) 97 ) 98 return mod.get_extensions() 99 except Exception as e: 100 print("WARNING: Failed to load %s extensions, skipping: %s" % (name, e)) 101 return [] 102 103 104 if sys.byteorder == "big": 105 encoding_macros = [("__BIG_ENDIAN__", "1")] 106 else: 107 encoding_macros = [("__LITTLE_ENDIAN__", "1")] 108 109 110 if platform.system() == "Windows": 111 encoding_libraries = ["ws2_32"] 112 extra_compile_args = [] 113 debug_compile_args = [] 114 else: 115 encoding_libraries = [] 116 extra_compile_args = ["-DPy_BUILD_CORE"] 117 if "DD_COMPILE_DEBUG" in os.environ: 118 if platform.system() == "Linux": 119 debug_compile_args = ["-g", "-O0", "-Werror", "-Wall", "-Wextra", "-Wpedantic", "-fanalyzer"] 120 else: 121 debug_compile_args = [ 122 "-g", 123 "-O0", 124 "-Werror", 125 "-Wall", 126 "-Wextra", 127 "-Wpedantic", 128 # Cython is not deprecation-proof 129 "-Wno-deprecated-declarations", 130 ] 131 else: 132 debug_compile_args = [] 133 134 135 if sys.version_info[:2] >= (3, 4): 136 ext_modules = [ 137 Extension( 138 "ddtrace.profiling.collector._memalloc", 139 sources=[ 140 "ddtrace/profiling/collector/_memalloc.c", 141 "ddtrace/profiling/collector/_memalloc_tb.c", 142 "ddtrace/profiling/collector/_memalloc_heap.c", 143 ], 144 extra_compile_args=debug_compile_args, 145 ), 146 ] 147 else: 148 ext_modules = [] 149 150 setup( 151 name="ddtrace", 152 description="Datadog tracing code", 153 url="https://github.com/DataDog/dd-trace-py", 154 author="Datadog, Inc.", 155 author_email="[email protected]", 156 long_description=long_description, 157 long_description_content_type="text/markdown", 158 license="BSD", 159 packages=find_packages(exclude=["tests*"]), 160 py_modules=["ddtrace_gevent_check"], 161 python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*", 162 # enum34 is an enum backport for earlier versions of python 163 # funcsigs backport required for vendored debtcollector 164 install_requires=[ 165 "enum34; python_version<'3.4'", 166 "funcsigs>=1.0.0; python_version=='2.7'", 167 "typing; python_version<'3.5'", 168 "packaging>=17.1", 169 "protobuf>=3", 170 "tenacity>=5", 171 "attrs>=19.2.0", 172 "six>=1.12.0", 173 "pep562; python_version<'3.7'", 174 ], 175 extras_require={ 176 # users can include opentracing by having: 177 # install_requires=['ddtrace[opentracing]', ...] 178 "opentracing": ["opentracing>=2.0.0"], 179 }, 180 # plugin tox 181 tests_require=["tox", "flake8"], 182 cmdclass={"test": Tox}, 183 entry_points={ 184 "console_scripts": [ 185 "ddtrace-run = ddtrace.commands.ddtrace_run:main", 186 ], 187 "pytest11": ["ddtrace = ddtrace.contrib.pytest.plugin"], 188 "gevent.plugins.monkey.did_patch_all": [ 189 "ddtrace_gevent_check = ddtrace_gevent_check:gevent_patch_all", 190 ], 191 }, 192 classifiers=[ 193 "Programming Language :: Python", 194 "Programming Language :: Python :: 2.7", 195 "Programming Language :: Python :: 3.5", 196 "Programming Language :: Python :: 3.6", 197 "Programming Language :: Python :: 3.7", 198 "Programming Language :: Python :: 3.8", 199 "Programming Language :: Python :: 3.9", 200 ], 201 use_scm_version=True, 202 setup_requires=["setuptools_scm[toml]>=4", "cython"], 203 ext_modules=ext_modules 204 + cythonize( 205 [ 206 Cython.Distutils.Extension( 207 "ddtrace.internal._rand", 208 sources=["ddtrace/internal/_rand.pyx"], 209 language="c", 210 ), 211 Extension( 212 "ddtrace.internal._encoding", 213 ["ddtrace/internal/_encoding.pyx"], 214 include_dirs=["."], 215 libraries=encoding_libraries, 216 define_macros=encoding_macros, 217 ), 218 Cython.Distutils.Extension( 219 "ddtrace.profiling.collector.stack", 220 sources=["ddtrace/profiling/collector/stack.pyx"], 221 language="c", 222 extra_compile_args=extra_compile_args, 223 ), 224 Cython.Distutils.Extension( 225 "ddtrace.profiling.collector._traceback", 226 sources=["ddtrace/profiling/collector/_traceback.pyx"], 227 language="c", 228 ), 229 Cython.Distutils.Extension( 230 "ddtrace.profiling.collector._threading", 231 sources=["ddtrace/profiling/collector/_threading.pyx"], 232 language="c", 233 ), 234 Cython.Distutils.Extension( 235 "ddtrace.profiling.exporter.pprof", 236 sources=["ddtrace/profiling/exporter/pprof.pyx"], 237 language="c", 238 ), 239 Cython.Distutils.Extension( 240 "ddtrace.profiling._build", 241 sources=["ddtrace/profiling/_build.pyx"], 242 language="c", 243 ), 244 ], 245 compile_time_env={ 246 "PY_MAJOR_VERSION": sys.version_info.major, 247 "PY_MINOR_VERSION": sys.version_info.minor, 248 "PY_MICRO_VERSION": sys.version_info.micro, 249 }, 250 force=True, 251 ) 252 + get_exts_for("wrapt") 253 + get_exts_for("psutil"), 254 ) 255 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -157,8 +157,10 @@ long_description_content_type="text/markdown", license="BSD", packages=find_packages(exclude=["tests*"]), + package_data={"ddtrace": ["py.typed"]}, py_modules=["ddtrace_gevent_check"], python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*", + zip_safe=False, # enum34 is an enum backport for earlier versions of python # funcsigs backport required for vendored debtcollector install_requires=[
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -157,8 +157,10 @@\n long_description_content_type=\"text/markdown\",\n license=\"BSD\",\n packages=find_packages(exclude=[\"tests*\"]),\n+ package_data={\"ddtrace\": [\"py.typed\"]},\n py_modules=[\"ddtrace_gevent_check\"],\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*\",\n+ zip_safe=False,\n # enum34 is an enum backport for earlier versions of python\n # funcsigs backport required for vendored debtcollector\n install_requires=[\n", "issue": "mypy stub files\nmypy throws an error when ddtrace is in use:\r\n\r\n![image](https://user-images.githubusercontent.com/284476/58525411-a7ff0c00-8199-11e9-8bcb-770aeeeda3fb.png)\r\n\r\n\r\nIt would be nice if this library could ship with stub files or be put on https://github.com/python/typeshed. I'm not sure how to do that yet but maybe someone here is.\r\n\r\nMore info from mypy side here https://mypy.readthedocs.io/en/latest/running_mypy.html#missing-imports\n", "before_files": [{"content": "import os\nimport platform\nimport sys\n\nfrom setuptools import setup, find_packages, Extension\nfrom setuptools.command.test import test as TestCommand\n\n# ORDER MATTERS\n# Import this after setuptools or it will fail\nfrom Cython.Build import cythonize # noqa: I100\nimport Cython.Distutils\n\n\nHERE = os.path.dirname(os.path.abspath(__file__))\n\n\ndef load_module_from_project_file(mod_name, fname):\n \"\"\"\n Helper used to load a module from a file in this project\n\n DEV: Loading this way will by-pass loading all parent modules\n e.g. importing `ddtrace.vendor.psutil.setup` will load `ddtrace/__init__.py`\n which has side effects like loading the tracer\n \"\"\"\n fpath = os.path.join(HERE, fname)\n\n if sys.version_info >= (3, 5):\n import importlib.util\n\n spec = importlib.util.spec_from_file_location(mod_name, fpath)\n mod = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(mod)\n return mod\n elif sys.version_info >= (3, 3):\n from importlib.machinery import SourceFileLoader\n\n return SourceFileLoader(mod_name, fpath).load_module()\n else:\n import imp\n\n return imp.load_source(mod_name, fpath)\n\n\nclass Tox(TestCommand):\n\n user_options = [(\"tox-args=\", \"a\", \"Arguments to pass to tox\")]\n\n def initialize_options(self):\n TestCommand.initialize_options(self)\n self.tox_args = None\n\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n def run_tests(self):\n # import here, cause outside the eggs aren't loaded\n import tox\n import shlex\n\n args = self.tox_args\n if args:\n args = shlex.split(self.tox_args)\n errno = tox.cmdline(args=args)\n sys.exit(errno)\n\n\nlong_description = \"\"\"\n# dd-trace-py\n\n`ddtrace` is Datadog's tracing library for Python. It is used to trace requests\nas they flow across web servers, databases and microservices so that developers\nhave great visibility into bottlenecks and troublesome requests.\n\n## Getting Started\n\nFor a basic product overview, installation and quick start, check out our\n[setup documentation][setup docs].\n\nFor more advanced usage and configuration, check out our [API\ndocumentation][api docs].\n\nFor descriptions of terminology used in APM, take a look at the [official\ndocumentation][visualization docs].\n\n[setup docs]: https://docs.datadoghq.com/tracing/setup/python/\n[api docs]: https://ddtrace.readthedocs.io/\n[visualization docs]: https://docs.datadoghq.com/tracing/visualization/\n\"\"\"\n\n\ndef get_exts_for(name):\n try:\n mod = load_module_from_project_file(\n \"ddtrace.vendor.{}.setup\".format(name), \"ddtrace/vendor/{}/setup.py\".format(name)\n )\n return mod.get_extensions()\n except Exception as e:\n print(\"WARNING: Failed to load %s extensions, skipping: %s\" % (name, e))\n return []\n\n\nif sys.byteorder == \"big\":\n encoding_macros = [(\"__BIG_ENDIAN__\", \"1\")]\nelse:\n encoding_macros = [(\"__LITTLE_ENDIAN__\", \"1\")]\n\n\nif platform.system() == \"Windows\":\n encoding_libraries = [\"ws2_32\"]\n extra_compile_args = []\n debug_compile_args = []\nelse:\n encoding_libraries = []\n extra_compile_args = [\"-DPy_BUILD_CORE\"]\n if \"DD_COMPILE_DEBUG\" in os.environ:\n if platform.system() == \"Linux\":\n debug_compile_args = [\"-g\", \"-O0\", \"-Werror\", \"-Wall\", \"-Wextra\", \"-Wpedantic\", \"-fanalyzer\"]\n else:\n debug_compile_args = [\n \"-g\",\n \"-O0\",\n \"-Werror\",\n \"-Wall\",\n \"-Wextra\",\n \"-Wpedantic\",\n # Cython is not deprecation-proof\n \"-Wno-deprecated-declarations\",\n ]\n else:\n debug_compile_args = []\n\n\nif sys.version_info[:2] >= (3, 4):\n ext_modules = [\n Extension(\n \"ddtrace.profiling.collector._memalloc\",\n sources=[\n \"ddtrace/profiling/collector/_memalloc.c\",\n \"ddtrace/profiling/collector/_memalloc_tb.c\",\n \"ddtrace/profiling/collector/_memalloc_heap.c\",\n ],\n extra_compile_args=debug_compile_args,\n ),\n ]\nelse:\n ext_modules = []\n\nsetup(\n name=\"ddtrace\",\n description=\"Datadog tracing code\",\n url=\"https://github.com/DataDog/dd-trace-py\",\n author=\"Datadog, Inc.\",\n author_email=\"[email protected]\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n license=\"BSD\",\n packages=find_packages(exclude=[\"tests*\"]),\n py_modules=[\"ddtrace_gevent_check\"],\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*\",\n # enum34 is an enum backport for earlier versions of python\n # funcsigs backport required for vendored debtcollector\n install_requires=[\n \"enum34; python_version<'3.4'\",\n \"funcsigs>=1.0.0; python_version=='2.7'\",\n \"typing; python_version<'3.5'\",\n \"packaging>=17.1\",\n \"protobuf>=3\",\n \"tenacity>=5\",\n \"attrs>=19.2.0\",\n \"six>=1.12.0\",\n \"pep562; python_version<'3.7'\",\n ],\n extras_require={\n # users can include opentracing by having:\n # install_requires=['ddtrace[opentracing]', ...]\n \"opentracing\": [\"opentracing>=2.0.0\"],\n },\n # plugin tox\n tests_require=[\"tox\", \"flake8\"],\n cmdclass={\"test\": Tox},\n entry_points={\n \"console_scripts\": [\n \"ddtrace-run = ddtrace.commands.ddtrace_run:main\",\n ],\n \"pytest11\": [\"ddtrace = ddtrace.contrib.pytest.plugin\"],\n \"gevent.plugins.monkey.did_patch_all\": [\n \"ddtrace_gevent_check = ddtrace_gevent_check:gevent_patch_all\",\n ],\n },\n classifiers=[\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n ],\n use_scm_version=True,\n setup_requires=[\"setuptools_scm[toml]>=4\", \"cython\"],\n ext_modules=ext_modules\n + cythonize(\n [\n Cython.Distutils.Extension(\n \"ddtrace.internal._rand\",\n sources=[\"ddtrace/internal/_rand.pyx\"],\n language=\"c\",\n ),\n Extension(\n \"ddtrace.internal._encoding\",\n [\"ddtrace/internal/_encoding.pyx\"],\n include_dirs=[\".\"],\n libraries=encoding_libraries,\n define_macros=encoding_macros,\n ),\n Cython.Distutils.Extension(\n \"ddtrace.profiling.collector.stack\",\n sources=[\"ddtrace/profiling/collector/stack.pyx\"],\n language=\"c\",\n extra_compile_args=extra_compile_args,\n ),\n Cython.Distutils.Extension(\n \"ddtrace.profiling.collector._traceback\",\n sources=[\"ddtrace/profiling/collector/_traceback.pyx\"],\n language=\"c\",\n ),\n Cython.Distutils.Extension(\n \"ddtrace.profiling.collector._threading\",\n sources=[\"ddtrace/profiling/collector/_threading.pyx\"],\n language=\"c\",\n ),\n Cython.Distutils.Extension(\n \"ddtrace.profiling.exporter.pprof\",\n sources=[\"ddtrace/profiling/exporter/pprof.pyx\"],\n language=\"c\",\n ),\n Cython.Distutils.Extension(\n \"ddtrace.profiling._build\",\n sources=[\"ddtrace/profiling/_build.pyx\"],\n language=\"c\",\n ),\n ],\n compile_time_env={\n \"PY_MAJOR_VERSION\": sys.version_info.major,\n \"PY_MINOR_VERSION\": sys.version_info.minor,\n \"PY_MICRO_VERSION\": sys.version_info.micro,\n },\n force=True,\n )\n + get_exts_for(\"wrapt\")\n + get_exts_for(\"psutil\"),\n)\n", "path": "setup.py"}]}
3,250
158
gh_patches_debug_23060
rasdani/github-patches
git_diff
deis__deis-661
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> nodes:scale fails on Vagrant Looks like a path issue when we shifted around the project in #641. The problem that exists here though is that the `contrib` directory no longer exists in the `deis/controller` image, since it only adds the `controller/` path as per https://github.com/opdemand/deis/blob/master/controller/Dockerfile#L19. Should we move `contrib` to `controller/contrib`, so that the directory is present in the docker image? ``` ><> deis nodes:scale dev runtime=1 Scaling nodes... but first, coffee! 400 BAD REQUEST [Errno 2] No such file or directory: u'/app/deis/contrib/vagrant/util/nodes_vagrantfile_template.rb' ``` Note that the source code now exists at `/app` in the container, so there will have to be another PR to change the path hardcoded at https://github.com/opdemand/deis/blob/master/controller/provider/vagrant.py#L82. </issue> <code> [start of controller/provider/vagrant.py] 1 """ 2 Deis cloud provider implementation for local vagrant setups. 3 """ 4 5 from __future__ import unicode_literals 6 7 from api.ssh import exec_ssh, connect_ssh 8 9 import json 10 import logging 11 import string 12 import subprocess 13 import uuid 14 15 from api.models import Layer 16 from api.models import Node 17 18 logger = logging.getLogger(__name__) 19 20 CONTRIB_PATH = '/app/deis/contrib/vagrant' 21 22 # Collect details for connecting to the host machine 23 try: 24 HOST_NODES_DIR = open('{}/.host_nodes_dir'.format(CONTRIB_PATH)).read().strip() 25 PKEY = open('{}/util/ssh_keys/id_rsa_vagrant-deis-controller'.format(CONTRIB_PATH)).read() 26 except IOError as err: 27 logger.warn(err) 28 29 30 def seed_flavors(): 31 """Seed the database with default flavors for vagrant. 32 33 :rtype: list of dicts containing flavor data 34 """ 35 flavors = [] 36 for m in ['512', '1024', '2048']: 37 flavors.append({ 38 'id': "vagrant-{}".format(m), 39 'provider': 'vagrant', 40 'params': json.dumps({ 41 'memory': m 42 }) 43 }) 44 return flavors 45 46 47 def build_layer(layer): 48 """ 49 Build a layer. 50 51 :param layer: a dict containing formation, id, params, and creds info 52 """ 53 54 # This can also be done with `deis layers:update` now. 55 layer_ = Layer.objects.get(id=layer['id'], formation__id=layer['formation']) 56 layer_.ssh_username = 'vagrant' 57 layer_.save() 58 59 60 def destroy_layer(layer): 61 """ 62 Destroy a layer. 63 64 :param layer: a dict containing formation, id, params, and creds info 65 """ 66 pass 67 68 69 def build_node(node): 70 """ 71 Build a node. 72 73 :param node: a dict containing formation, layer, params, and creds info. 74 :rtype: a tuple of (provider_id, fully_qualified_domain_name, metadata) 75 """ 76 77 # Can't use the vagrant UUID because it's not booted yet 78 uid = str(uuid.uuid1()) 79 80 # Create a new Vagrantfile from a template 81 node['params'].setdefault('memory', '512') 82 template = open('/app/deis/contrib/vagrant/util/nodes_vagrantfile_template.rb') 83 raw = string.Template(template.read()) 84 ip_addr = '192.168.61.' + str(Node.objects.all().count() + 100) 85 result = raw.substitute({ 86 'id': uid, 87 'ipaddress': ip_addr, 88 'memory': node['params']['memory'] 89 }) 90 91 # Make a folder for the VM with its own Vagrantfile. Vagrant will then create a .vagrant folder 92 # there too when it first gets booted. 93 node_dir = HOST_NODES_DIR + '/' + uid 94 mkdir = 'mkdir -p "{}"'.format(node_dir) 95 cp_tpl = 'echo "' + result.replace('"', '\\"') + '" > "{}/Vagrantfile"'.format(node_dir) 96 _host_ssh(commands=[mkdir, cp_tpl], creds=node['creds']) 97 98 # Boot the VM 99 _run_vagrant_command(uid, args=['up'], creds=node['creds']) 100 101 # Copy the layer's public SSH key to the VM so that the Controller can access it. 102 _run_vagrant_command( 103 uid, 104 args=[ 105 'ssh', 106 '-c', 107 '"echo \\"' + node['ssh_public_key'] + '\\" >> /home/vagrant/.ssh/authorized_keys"' 108 ], 109 creds=node['creds'], 110 ) 111 112 provider_id = uid 113 metadata = { 114 'id': uid, 115 'fqdn': ip_addr, 116 'flavor': node['params']['memory'] 117 } 118 return provider_id, ip_addr, metadata 119 120 121 def destroy_node(node): 122 """ 123 Destroy a node. 124 125 :param node: a dict containing a node's provider_id, params, and creds 126 """ 127 128 # This is useful if node creation failed. So that there's a record in the DB, but it has no 129 # ID associated with it. 130 if node['provider_id'] is None: 131 return 132 133 # Shut the VM down and destroy it 134 try: 135 _run_vagrant_command(node['provider_id'], args=['destroy', '--force'], creds=node['creds']) 136 node_dir = HOST_NODES_DIR + '/' + node['provider_id'] 137 138 # Sanity check before `rm -rf` 139 if 'contrib/vagrant' not in node_dir: 140 raise RuntimeError( 141 "Aborted node destruction: attempting to 'rm -rf' unexpected directory") 142 143 # Completely remove the folder that contained the VM 144 rm_vagrantfile = 'rm "{}/Vagrantfile"'.format(node_dir) 145 rm_node_dir = 'rm -rf "{}"'.format(node_dir) 146 _host_ssh(commands=[rm_vagrantfile, rm_node_dir], creds=node['creds']) 147 except RuntimeError as err: 148 # If we couldn't cd to the node dir, just log that as a warning 149 if 'no such file or directory' in str(err).lower(): 150 logger.warn(err) 151 else: 152 raise 153 154 155 def _run_vagrant_command(node_id, args=[], creds={}): 156 """ 157 args: A tuple of arguments to a vagrant command line. 158 e.g. ['up', 'my_vm_name', '--no-provision'] 159 """ 160 161 cd = 'cd "{}/{}"'.format(HOST_NODES_DIR, node_id) 162 command = ['vagrant'] + [arg for arg in args if arg is not None] 163 return _host_ssh(commands=[cd, ' '.join(command)], creds=creds) 164 165 166 def _host_ssh(creds={}, commands=[]): 167 """ 168 Connect to the host machine. Namely the user's local machine. 169 """ 170 if creds == {}: 171 raise RuntimeError("No credentials provided to _host_ssh()") 172 command = ' && '.join(commands) 173 174 # First check if we can access the host machine. It's likely that their 175 # IP address changes every time they request a DHCP lease. 176 # TODO: Find a way of passing this error onto the CLI client. 177 try: 178 subprocess.check_call([ 179 'nc', '-z', '-w2', creds['host'], '22' 180 ], stderr=subprocess.PIPE) 181 except subprocess.CalledProcessError: 182 raise RuntimeError("Couldn't ping port 22 at host with IP " + creds['host']) 183 184 ssh = connect_ssh(creds['user'], creds['host'], 22, PKEY, timeout=120) 185 result, status = exec_ssh(ssh, command) 186 if status > 0: 187 raise RuntimeError( 188 'SSH to Vagrant host error: ' + result.decode('utf-8') + 189 'Command: ' + command.decode('utf-8')) 190 return result 191 [end of controller/provider/vagrant.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/controller/provider/vagrant.py b/controller/provider/vagrant.py --- a/controller/provider/vagrant.py +++ b/controller/provider/vagrant.py @@ -17,12 +17,12 @@ logger = logging.getLogger(__name__) -CONTRIB_PATH = '/app/deis/contrib/vagrant' +VAGRANT_UTIL_PATH = '/app/provider/vagrant-util' # Collect details for connecting to the host machine try: - HOST_NODES_DIR = open('{}/.host_nodes_dir'.format(CONTRIB_PATH)).read().strip() - PKEY = open('{}/util/ssh_keys/id_rsa_vagrant-deis-controller'.format(CONTRIB_PATH)).read() + HOST_NODES_DIR = open('{}/.host_nodes_dir'.format(VAGRANT_UTIL_PATH)).read().strip() + PKEY = open('{}/ssh_keys/id_rsa_vagrant-deis-controller'.format(VAGRANT_UTIL_PATH)).read() except IOError as err: logger.warn(err) @@ -79,7 +79,7 @@ # Create a new Vagrantfile from a template node['params'].setdefault('memory', '512') - template = open('/app/deis/contrib/vagrant/util/nodes_vagrantfile_template.rb') + template = open('{}/nodes_vagrantfile_template.rb'.format(VAGRANT_UTIL_PATH)) raw = string.Template(template.read()) ip_addr = '192.168.61.' + str(Node.objects.all().count() + 100) result = raw.substitute({
{"golden_diff": "diff --git a/controller/provider/vagrant.py b/controller/provider/vagrant.py\n--- a/controller/provider/vagrant.py\n+++ b/controller/provider/vagrant.py\n@@ -17,12 +17,12 @@\n \n logger = logging.getLogger(__name__)\n \n-CONTRIB_PATH = '/app/deis/contrib/vagrant'\n+VAGRANT_UTIL_PATH = '/app/provider/vagrant-util'\n \n # Collect details for connecting to the host machine\n try:\n- HOST_NODES_DIR = open('{}/.host_nodes_dir'.format(CONTRIB_PATH)).read().strip()\n- PKEY = open('{}/util/ssh_keys/id_rsa_vagrant-deis-controller'.format(CONTRIB_PATH)).read()\n+ HOST_NODES_DIR = open('{}/.host_nodes_dir'.format(VAGRANT_UTIL_PATH)).read().strip()\n+ PKEY = open('{}/ssh_keys/id_rsa_vagrant-deis-controller'.format(VAGRANT_UTIL_PATH)).read()\n except IOError as err:\n logger.warn(err)\n \n@@ -79,7 +79,7 @@\n \n # Create a new Vagrantfile from a template\n node['params'].setdefault('memory', '512')\n- template = open('/app/deis/contrib/vagrant/util/nodes_vagrantfile_template.rb')\n+ template = open('{}/nodes_vagrantfile_template.rb'.format(VAGRANT_UTIL_PATH))\n raw = string.Template(template.read())\n ip_addr = '192.168.61.' + str(Node.objects.all().count() + 100)\n result = raw.substitute({\n", "issue": "nodes:scale fails on Vagrant\nLooks like a path issue when we shifted around the project in #641. The problem that exists here though is that the `contrib` directory no longer exists in the `deis/controller` image, since it only adds the `controller/` path as per https://github.com/opdemand/deis/blob/master/controller/Dockerfile#L19. Should we move `contrib` to `controller/contrib`, so that the directory is present in the docker image?\n\n```\n><> deis nodes:scale dev runtime=1\nScaling nodes... but first, coffee!\n400 BAD REQUEST\n[Errno 2] No such file or directory: u'/app/deis/contrib/vagrant/util/nodes_vagrantfile_template.rb'\n```\n\nNote that the source code now exists at `/app` in the container, so there will have to be another PR to change the path hardcoded at https://github.com/opdemand/deis/blob/master/controller/provider/vagrant.py#L82.\n\n", "before_files": [{"content": "\"\"\"\nDeis cloud provider implementation for local vagrant setups.\n\"\"\"\n\nfrom __future__ import unicode_literals\n\nfrom api.ssh import exec_ssh, connect_ssh\n\nimport json\nimport logging\nimport string\nimport subprocess\nimport uuid\n\nfrom api.models import Layer\nfrom api.models import Node\n\nlogger = logging.getLogger(__name__)\n\nCONTRIB_PATH = '/app/deis/contrib/vagrant'\n\n# Collect details for connecting to the host machine\ntry:\n HOST_NODES_DIR = open('{}/.host_nodes_dir'.format(CONTRIB_PATH)).read().strip()\n PKEY = open('{}/util/ssh_keys/id_rsa_vagrant-deis-controller'.format(CONTRIB_PATH)).read()\nexcept IOError as err:\n logger.warn(err)\n\n\ndef seed_flavors():\n \"\"\"Seed the database with default flavors for vagrant.\n\n :rtype: list of dicts containing flavor data\n \"\"\"\n flavors = []\n for m in ['512', '1024', '2048']:\n flavors.append({\n 'id': \"vagrant-{}\".format(m),\n 'provider': 'vagrant',\n 'params': json.dumps({\n 'memory': m\n })\n })\n return flavors\n\n\ndef build_layer(layer):\n \"\"\"\n Build a layer.\n\n :param layer: a dict containing formation, id, params, and creds info\n \"\"\"\n\n # This can also be done with `deis layers:update` now.\n layer_ = Layer.objects.get(id=layer['id'], formation__id=layer['formation'])\n layer_.ssh_username = 'vagrant'\n layer_.save()\n\n\ndef destroy_layer(layer):\n \"\"\"\n Destroy a layer.\n\n :param layer: a dict containing formation, id, params, and creds info\n \"\"\"\n pass\n\n\ndef build_node(node):\n \"\"\"\n Build a node.\n\n :param node: a dict containing formation, layer, params, and creds info.\n :rtype: a tuple of (provider_id, fully_qualified_domain_name, metadata)\n \"\"\"\n\n # Can't use the vagrant UUID because it's not booted yet\n uid = str(uuid.uuid1())\n\n # Create a new Vagrantfile from a template\n node['params'].setdefault('memory', '512')\n template = open('/app/deis/contrib/vagrant/util/nodes_vagrantfile_template.rb')\n raw = string.Template(template.read())\n ip_addr = '192.168.61.' + str(Node.objects.all().count() + 100)\n result = raw.substitute({\n 'id': uid,\n 'ipaddress': ip_addr,\n 'memory': node['params']['memory']\n })\n\n # Make a folder for the VM with its own Vagrantfile. Vagrant will then create a .vagrant folder\n # there too when it first gets booted.\n node_dir = HOST_NODES_DIR + '/' + uid\n mkdir = 'mkdir -p \"{}\"'.format(node_dir)\n cp_tpl = 'echo \"' + result.replace('\"', '\\\\\"') + '\" > \"{}/Vagrantfile\"'.format(node_dir)\n _host_ssh(commands=[mkdir, cp_tpl], creds=node['creds'])\n\n # Boot the VM\n _run_vagrant_command(uid, args=['up'], creds=node['creds'])\n\n # Copy the layer's public SSH key to the VM so that the Controller can access it.\n _run_vagrant_command(\n uid,\n args=[\n 'ssh',\n '-c',\n '\"echo \\\\\"' + node['ssh_public_key'] + '\\\\\" >> /home/vagrant/.ssh/authorized_keys\"'\n ],\n creds=node['creds'],\n )\n\n provider_id = uid\n metadata = {\n 'id': uid,\n 'fqdn': ip_addr,\n 'flavor': node['params']['memory']\n }\n return provider_id, ip_addr, metadata\n\n\ndef destroy_node(node):\n \"\"\"\n Destroy a node.\n\n :param node: a dict containing a node's provider_id, params, and creds\n \"\"\"\n\n # This is useful if node creation failed. So that there's a record in the DB, but it has no\n # ID associated with it.\n if node['provider_id'] is None:\n return\n\n # Shut the VM down and destroy it\n try:\n _run_vagrant_command(node['provider_id'], args=['destroy', '--force'], creds=node['creds'])\n node_dir = HOST_NODES_DIR + '/' + node['provider_id']\n\n # Sanity check before `rm -rf`\n if 'contrib/vagrant' not in node_dir:\n raise RuntimeError(\n \"Aborted node destruction: attempting to 'rm -rf' unexpected directory\")\n\n # Completely remove the folder that contained the VM\n rm_vagrantfile = 'rm \"{}/Vagrantfile\"'.format(node_dir)\n rm_node_dir = 'rm -rf \"{}\"'.format(node_dir)\n _host_ssh(commands=[rm_vagrantfile, rm_node_dir], creds=node['creds'])\n except RuntimeError as err:\n # If we couldn't cd to the node dir, just log that as a warning\n if 'no such file or directory' in str(err).lower():\n logger.warn(err)\n else:\n raise\n\n\ndef _run_vagrant_command(node_id, args=[], creds={}):\n \"\"\"\n args: A tuple of arguments to a vagrant command line.\n e.g. ['up', 'my_vm_name', '--no-provision']\n \"\"\"\n\n cd = 'cd \"{}/{}\"'.format(HOST_NODES_DIR, node_id)\n command = ['vagrant'] + [arg for arg in args if arg is not None]\n return _host_ssh(commands=[cd, ' '.join(command)], creds=creds)\n\n\ndef _host_ssh(creds={}, commands=[]):\n \"\"\"\n Connect to the host machine. Namely the user's local machine.\n \"\"\"\n if creds == {}:\n raise RuntimeError(\"No credentials provided to _host_ssh()\")\n command = ' && '.join(commands)\n\n # First check if we can access the host machine. It's likely that their\n # IP address changes every time they request a DHCP lease.\n # TODO: Find a way of passing this error onto the CLI client.\n try:\n subprocess.check_call([\n 'nc', '-z', '-w2', creds['host'], '22'\n ], stderr=subprocess.PIPE)\n except subprocess.CalledProcessError:\n raise RuntimeError(\"Couldn't ping port 22 at host with IP \" + creds['host'])\n\n ssh = connect_ssh(creds['user'], creds['host'], 22, PKEY, timeout=120)\n result, status = exec_ssh(ssh, command)\n if status > 0:\n raise RuntimeError(\n 'SSH to Vagrant host error: ' + result.decode('utf-8') +\n 'Command: ' + command.decode('utf-8'))\n return result\n", "path": "controller/provider/vagrant.py"}]}
2,720
340
gh_patches_debug_22720
rasdani/github-patches
git_diff
privacyidea__privacyidea-3675
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Error 904 is still shown with policy Action login_mode: privacyIDEA privacyidea 3.8 ubuntu 20 Since 3.7 the error message :´ERR904: The user can not be found in any resolver in this realm!¨ is deactivated when you enter a wrong username. But this is still true if you have a policy : Scope webui Action login_mode: privacyIDEA if you have a policy: Scope webui login_mode: userstore Then the error (ERR904: The user can not be found in any resolver in this realm!) Is deactivated. Kind regards Sebastien </issue> <code> [start of privacyidea/lib/auth.py] 1 # -*- coding: utf-8 -*- 2 # 3 # 2015-11-03 Cornelius Kölbel <[email protected]> 4 # Add check if an admin user exists 5 # 2014-12-15 Cornelius Kölbel, [email protected] 6 # Initial creation 7 # 8 # (c) Cornelius Kölbel 9 # Info: http://www.privacyidea.org 10 # 11 # This code is free software; you can redistribute it and/or 12 # modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE 13 # License as published by the Free Software Foundation; either 14 # version 3 of the License, or any later version. 15 # 16 # This code is distributed in the hope that it will be useful, 17 # but WITHOUT ANY WARRANTY; without even the implied warranty of 18 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 # GNU AFFERO GENERAL PUBLIC LICENSE for more details. 20 # 21 # You should have received a copy of the GNU Affero General Public 22 # License along with this program. If not, see <http://www.gnu.org/licenses/>. 23 # 24 from privacyidea.models import Admin 25 from privacyidea.lib.token import check_user_pass 26 from privacyidea.lib.policydecorators import libpolicy, login_mode 27 from privacyidea.lib.crypto import hash_with_pepper, verify_with_pepper 28 from privacyidea.lib.utils import fetch_one_resource 29 30 31 class ROLE(object): 32 ADMIN = "admin" 33 USER = "user" 34 VALIDATE = "validate" 35 36 37 def verify_db_admin(username, password): 38 """ 39 This function is used to verify the username and the password against the 40 database table "Admin". 41 :param username: The administrator username 42 :param password: The password 43 :return: True if password is correct for the admin 44 :rtype: bool 45 """ 46 success = False 47 qa = Admin.query.filter(Admin.username == username).first() 48 if qa: 49 success = verify_with_pepper(qa.password, password) 50 51 return success 52 53 54 def db_admin_exist(username): 55 """ 56 Checks if a local admin in the database exists 57 58 :param username: The username of the admin 59 :return: True, if exist 60 """ 61 return bool(get_db_admin(username)) 62 63 64 def create_db_admin(app, username, email=None, password=None): 65 pw_dig = None 66 if password: 67 pw_dig = hash_with_pepper(password) 68 user = Admin(email=email, username=username, password=pw_dig) 69 user.save() 70 71 72 def list_db_admin(): 73 admins = Admin.query.all() 74 print("Name \t email") 75 print(30*"=") 76 for admin in admins: 77 print("{0!s} \t {1!s}".format(admin.username, admin.email)) 78 79 80 def get_db_admins(): 81 admins = Admin.query.all() 82 return admins 83 84 85 def get_db_admin(username): 86 return Admin.query.filter(Admin.username == username).first() 87 88 89 def delete_db_admin(username): 90 print("Deleting admin {0!s}".format(username)) 91 fetch_one_resource(Admin, username=username).delete() 92 93 94 @libpolicy(login_mode) 95 def check_webui_user(user_obj, 96 password, 97 options=None, 98 superuser_realms=None, 99 check_otp=False): 100 """ 101 This function is used to authenticate the user at the web ui. 102 It checks against the userstore or against OTP/privacyidea (check_otp). 103 It returns a tuple of 104 105 * true/false if the user authenticated successfully 106 * the role of the user 107 * the "detail" dictionary of the response 108 109 :param user_obj: The user who tries to authenticate 110 :type user_obj: User Object 111 :param password: Password, static and or OTP 112 :param options: additional options like g and clientip 113 :type options: dict 114 :param superuser_realms: list of realms, that contain admins 115 :type superuser_realms: list 116 :param check_otp: If set, the user is not authenticated against the 117 userstore but against privacyidea 118 :return: tuple of bool, string and dict/None 119 """ 120 options = options or {} 121 superuser_realms = superuser_realms or [] 122 user_auth = False 123 role = ROLE.USER 124 details = None 125 126 if check_otp: 127 # check if the given password matches an OTP token 128 check, details = check_user_pass(user_obj, password, options=options) 129 details["loginmode"] = "privacyIDEA" 130 if check: 131 user_auth = True 132 else: 133 # check the password of the user against the userstore 134 if user_obj.check_password(password): 135 user_auth = True 136 137 # If the realm is in the SUPERUSER_REALM then the authorization role 138 # is risen to "admin". 139 if user_obj.realm in superuser_realms: 140 role = ROLE.ADMIN 141 142 return user_auth, role, details 143 [end of privacyidea/lib/auth.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/privacyidea/lib/auth.py b/privacyidea/lib/auth.py --- a/privacyidea/lib/auth.py +++ b/privacyidea/lib/auth.py @@ -26,6 +26,10 @@ from privacyidea.lib.policydecorators import libpolicy, login_mode from privacyidea.lib.crypto import hash_with_pepper, verify_with_pepper from privacyidea.lib.utils import fetch_one_resource +import logging + +log = logging.getLogger(__name__) + class ROLE(object): @@ -125,10 +129,13 @@ if check_otp: # check if the given password matches an OTP token - check, details = check_user_pass(user_obj, password, options=options) - details["loginmode"] = "privacyIDEA" - if check: - user_auth = True + try: + check, details = check_user_pass(user_obj, password, options=options) + details["loginmode"] = "privacyIDEA" + if check: + user_auth = True + except Exception as e: + log.debug("Error authenticating user against privacyIDEA: {0!r}".format(e)) else: # check the password of the user against the userstore if user_obj.check_password(password):
{"golden_diff": "diff --git a/privacyidea/lib/auth.py b/privacyidea/lib/auth.py\n--- a/privacyidea/lib/auth.py\n+++ b/privacyidea/lib/auth.py\n@@ -26,6 +26,10 @@\n from privacyidea.lib.policydecorators import libpolicy, login_mode\n from privacyidea.lib.crypto import hash_with_pepper, verify_with_pepper\n from privacyidea.lib.utils import fetch_one_resource\n+import logging\n+\n+log = logging.getLogger(__name__)\n+\n \n \n class ROLE(object):\n@@ -125,10 +129,13 @@\n \n if check_otp:\n # check if the given password matches an OTP token\n- check, details = check_user_pass(user_obj, password, options=options)\n- details[\"loginmode\"] = \"privacyIDEA\"\n- if check:\n- user_auth = True\n+ try:\n+ check, details = check_user_pass(user_obj, password, options=options)\n+ details[\"loginmode\"] = \"privacyIDEA\"\n+ if check:\n+ user_auth = True\n+ except Exception as e:\n+ log.debug(\"Error authenticating user against privacyIDEA: {0!r}\".format(e))\n else:\n # check the password of the user against the userstore\n if user_obj.check_password(password):\n", "issue": "Error 904 is still shown with policy Action login_mode: privacyIDEA \n\r\nprivacyidea 3.8\r\nubuntu 20\r\n\r\nSince 3.7 the error message :\u00b4ERR904: The user can not be found in any resolver in this realm!\u00a8 is deactivated when you enter a wrong username.\r\n\r\nBut this is still true if you have a policy :\r\nScope webui\r\nAction login_mode: privacyIDEA \r\n\r\nif you have a policy:\r\nScope webui\r\nlogin_mode: userstore \r\nThen the error (ERR904: The user can not be found in any resolver in this realm!) Is deactivated.\r\n\r\nKind regards\r\nSebastien\r\n\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# 2015-11-03 Cornelius K\u00f6lbel <[email protected]>\n# Add check if an admin user exists\n# 2014-12-15 Cornelius K\u00f6lbel, [email protected]\n# Initial creation\n#\n# (c) Cornelius K\u00f6lbel\n# Info: http://www.privacyidea.org\n#\n# This code is free software; you can redistribute it and/or\n# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE\n# License as published by the Free Software Foundation; either\n# version 3 of the License, or any later version.\n#\n# This code is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU AFFERO GENERAL PUBLIC LICENSE for more details.\n#\n# You should have received a copy of the GNU Affero General Public\n# License along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\nfrom privacyidea.models import Admin\nfrom privacyidea.lib.token import check_user_pass\nfrom privacyidea.lib.policydecorators import libpolicy, login_mode\nfrom privacyidea.lib.crypto import hash_with_pepper, verify_with_pepper\nfrom privacyidea.lib.utils import fetch_one_resource\n\n\nclass ROLE(object):\n ADMIN = \"admin\"\n USER = \"user\"\n VALIDATE = \"validate\"\n\n\ndef verify_db_admin(username, password):\n \"\"\"\n This function is used to verify the username and the password against the\n database table \"Admin\".\n :param username: The administrator username\n :param password: The password\n :return: True if password is correct for the admin\n :rtype: bool\n \"\"\"\n success = False\n qa = Admin.query.filter(Admin.username == username).first()\n if qa:\n success = verify_with_pepper(qa.password, password)\n\n return success\n\n\ndef db_admin_exist(username):\n \"\"\"\n Checks if a local admin in the database exists\n\n :param username: The username of the admin\n :return: True, if exist\n \"\"\"\n return bool(get_db_admin(username))\n\n\ndef create_db_admin(app, username, email=None, password=None):\n pw_dig = None\n if password:\n pw_dig = hash_with_pepper(password)\n user = Admin(email=email, username=username, password=pw_dig)\n user.save()\n\n\ndef list_db_admin():\n admins = Admin.query.all()\n print(\"Name \\t email\")\n print(30*\"=\")\n for admin in admins:\n print(\"{0!s} \\t {1!s}\".format(admin.username, admin.email))\n\n\ndef get_db_admins():\n admins = Admin.query.all()\n return admins\n\n\ndef get_db_admin(username):\n return Admin.query.filter(Admin.username == username).first()\n\n\ndef delete_db_admin(username):\n print(\"Deleting admin {0!s}\".format(username))\n fetch_one_resource(Admin, username=username).delete()\n\n\n@libpolicy(login_mode)\ndef check_webui_user(user_obj,\n password,\n options=None,\n superuser_realms=None,\n check_otp=False):\n \"\"\"\n This function is used to authenticate the user at the web ui.\n It checks against the userstore or against OTP/privacyidea (check_otp).\n It returns a tuple of\n\n * true/false if the user authenticated successfully\n * the role of the user\n * the \"detail\" dictionary of the response\n\n :param user_obj: The user who tries to authenticate\n :type user_obj: User Object\n :param password: Password, static and or OTP\n :param options: additional options like g and clientip\n :type options: dict\n :param superuser_realms: list of realms, that contain admins\n :type superuser_realms: list\n :param check_otp: If set, the user is not authenticated against the\n userstore but against privacyidea\n :return: tuple of bool, string and dict/None\n \"\"\"\n options = options or {}\n superuser_realms = superuser_realms or []\n user_auth = False\n role = ROLE.USER\n details = None\n\n if check_otp:\n # check if the given password matches an OTP token\n check, details = check_user_pass(user_obj, password, options=options)\n details[\"loginmode\"] = \"privacyIDEA\"\n if check:\n user_auth = True\n else:\n # check the password of the user against the userstore\n if user_obj.check_password(password):\n user_auth = True\n\n # If the realm is in the SUPERUSER_REALM then the authorization role\n # is risen to \"admin\".\n if user_obj.realm in superuser_realms:\n role = ROLE.ADMIN\n\n return user_auth, role, details\n", "path": "privacyidea/lib/auth.py"}]}
2,077
283
gh_patches_debug_14761
rasdani/github-patches
git_diff
iterative__dvc-7965
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add TOML support for metrics Right now, there is only TOML file support for params files. We need to add TOML support for metrics as well. Here's a [link to the Discord question](https://discord.com/channels/485586884165107732/485596304961962003/865974923079319563) that brought this up. </issue> <code> [start of dvc/repo/metrics/show.py] 1 import logging 2 import os 3 from typing import List 4 5 from scmrepo.exceptions import SCMError 6 7 from dvc.fs.dvc import DvcFileSystem 8 from dvc.output import Output 9 from dvc.repo import locked 10 from dvc.repo.collect import StrPaths, collect 11 from dvc.repo.live import summary_fs_path 12 from dvc.scm import NoSCMError 13 from dvc.utils import error_handler, errored_revisions, onerror_collect 14 from dvc.utils.collections import ensure_list 15 from dvc.utils.serialize import load_yaml 16 17 logger = logging.getLogger(__name__) 18 19 20 def _is_metric(out: Output) -> bool: 21 return bool(out.metric) or bool(out.live) 22 23 24 def _to_fs_paths(metrics: List[Output]) -> StrPaths: 25 result = [] 26 for out in metrics: 27 if out.metric: 28 result.append(out.repo.dvcfs.from_os_path(out.fs_path)) 29 elif out.live: 30 fs_path = summary_fs_path(out) 31 if fs_path: 32 result.append(out.repo.dvcfs.from_os_path(fs_path)) 33 return result 34 35 36 def _collect_metrics(repo, targets, revision, recursive): 37 metrics, fs_paths = collect( 38 repo, 39 targets=targets, 40 output_filter=_is_metric, 41 recursive=recursive, 42 rev=revision, 43 ) 44 return _to_fs_paths(metrics) + list(fs_paths) 45 46 47 def _extract_metrics(metrics, path, rev): 48 if isinstance(metrics, (int, float)): 49 return metrics 50 51 if not isinstance(metrics, dict): 52 return None 53 54 ret = {} 55 for key, val in metrics.items(): 56 m = _extract_metrics(val, path, rev) 57 if m not in (None, {}): 58 ret[key] = m 59 else: 60 logger.debug( 61 "Could not parse '%s' metric from '%s' at '%s' " 62 "due to its unsupported type: '%s'", 63 key, 64 path, 65 rev, 66 type(val).__name__, 67 ) 68 69 return ret 70 71 72 @error_handler 73 def _read_metric(path, fs, rev, **kwargs): 74 val = load_yaml(path, fs=fs) 75 val = _extract_metrics(val, path, rev) 76 return val or {} 77 78 79 def _read_metrics(repo, metrics, rev, onerror=None): 80 fs = DvcFileSystem(repo=repo) 81 82 relpath = "" 83 if repo.root_dir != repo.fs.path.getcwd(): 84 relpath = repo.fs.path.relpath(repo.root_dir, repo.fs.path.getcwd()) 85 86 res = {} 87 for metric in metrics: 88 if not fs.isfile(metric): 89 continue 90 91 res[os.path.join(relpath, *fs.path.parts(metric))] = _read_metric( 92 metric, fs, rev, onerror=onerror 93 ) 94 95 return res 96 97 98 def _gather_metrics(repo, targets, rev, recursive, onerror=None): 99 metrics = _collect_metrics(repo, targets, rev, recursive) 100 return _read_metrics(repo, metrics, rev, onerror=onerror) 101 102 103 @locked 104 def show( 105 repo, 106 targets=None, 107 all_branches=False, 108 all_tags=False, 109 recursive=False, 110 revs=None, 111 all_commits=False, 112 onerror=None, 113 ): 114 if onerror is None: 115 onerror = onerror_collect 116 117 targets = ensure_list(targets) 118 targets = [repo.dvcfs.from_os_path(target) for target in targets] 119 120 res = {} 121 for rev in repo.brancher( 122 revs=revs, 123 all_branches=all_branches, 124 all_tags=all_tags, 125 all_commits=all_commits, 126 ): 127 res[rev] = error_handler(_gather_metrics)( 128 repo, targets, rev, recursive, onerror=onerror 129 ) 130 131 # Hide workspace metrics if they are the same as in the active branch 132 try: 133 active_branch = repo.scm.active_branch() 134 except (SCMError, NoSCMError): 135 # SCMError - detached head 136 # NoSCMError - no repo case 137 pass 138 else: 139 if res.get("workspace") == res.get(active_branch): 140 res.pop("workspace", None) 141 142 errored = errored_revisions(res) 143 if errored: 144 from dvc.ui import ui 145 146 ui.error_write( 147 "DVC failed to load some metrics for following revisions:" 148 f" '{', '.join(errored)}'." 149 ) 150 151 return res 152 [end of dvc/repo/metrics/show.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/dvc/repo/metrics/show.py b/dvc/repo/metrics/show.py --- a/dvc/repo/metrics/show.py +++ b/dvc/repo/metrics/show.py @@ -12,7 +12,7 @@ from dvc.scm import NoSCMError from dvc.utils import error_handler, errored_revisions, onerror_collect from dvc.utils.collections import ensure_list -from dvc.utils.serialize import load_yaml +from dvc.utils.serialize import LOADERS logger = logging.getLogger(__name__) @@ -71,7 +71,9 @@ @error_handler def _read_metric(path, fs, rev, **kwargs): - val = load_yaml(path, fs=fs) + suffix = fs.path.suffix(path).lower() + loader = LOADERS[suffix] + val = loader(path, fs=fs) val = _extract_metrics(val, path, rev) return val or {}
{"golden_diff": "diff --git a/dvc/repo/metrics/show.py b/dvc/repo/metrics/show.py\n--- a/dvc/repo/metrics/show.py\n+++ b/dvc/repo/metrics/show.py\n@@ -12,7 +12,7 @@\n from dvc.scm import NoSCMError\n from dvc.utils import error_handler, errored_revisions, onerror_collect\n from dvc.utils.collections import ensure_list\n-from dvc.utils.serialize import load_yaml\n+from dvc.utils.serialize import LOADERS\n \n logger = logging.getLogger(__name__)\n \n@@ -71,7 +71,9 @@\n \n @error_handler\n def _read_metric(path, fs, rev, **kwargs):\n- val = load_yaml(path, fs=fs)\n+ suffix = fs.path.suffix(path).lower()\n+ loader = LOADERS[suffix]\n+ val = loader(path, fs=fs)\n val = _extract_metrics(val, path, rev)\n return val or {}\n", "issue": "Add TOML support for metrics\nRight now, there is only TOML file support for params files. We need to add TOML support for metrics as well.\r\n\r\nHere's a [link to the Discord question](https://discord.com/channels/485586884165107732/485596304961962003/865974923079319563) that brought this up.\n", "before_files": [{"content": "import logging\nimport os\nfrom typing import List\n\nfrom scmrepo.exceptions import SCMError\n\nfrom dvc.fs.dvc import DvcFileSystem\nfrom dvc.output import Output\nfrom dvc.repo import locked\nfrom dvc.repo.collect import StrPaths, collect\nfrom dvc.repo.live import summary_fs_path\nfrom dvc.scm import NoSCMError\nfrom dvc.utils import error_handler, errored_revisions, onerror_collect\nfrom dvc.utils.collections import ensure_list\nfrom dvc.utils.serialize import load_yaml\n\nlogger = logging.getLogger(__name__)\n\n\ndef _is_metric(out: Output) -> bool:\n return bool(out.metric) or bool(out.live)\n\n\ndef _to_fs_paths(metrics: List[Output]) -> StrPaths:\n result = []\n for out in metrics:\n if out.metric:\n result.append(out.repo.dvcfs.from_os_path(out.fs_path))\n elif out.live:\n fs_path = summary_fs_path(out)\n if fs_path:\n result.append(out.repo.dvcfs.from_os_path(fs_path))\n return result\n\n\ndef _collect_metrics(repo, targets, revision, recursive):\n metrics, fs_paths = collect(\n repo,\n targets=targets,\n output_filter=_is_metric,\n recursive=recursive,\n rev=revision,\n )\n return _to_fs_paths(metrics) + list(fs_paths)\n\n\ndef _extract_metrics(metrics, path, rev):\n if isinstance(metrics, (int, float)):\n return metrics\n\n if not isinstance(metrics, dict):\n return None\n\n ret = {}\n for key, val in metrics.items():\n m = _extract_metrics(val, path, rev)\n if m not in (None, {}):\n ret[key] = m\n else:\n logger.debug(\n \"Could not parse '%s' metric from '%s' at '%s' \"\n \"due to its unsupported type: '%s'\",\n key,\n path,\n rev,\n type(val).__name__,\n )\n\n return ret\n\n\n@error_handler\ndef _read_metric(path, fs, rev, **kwargs):\n val = load_yaml(path, fs=fs)\n val = _extract_metrics(val, path, rev)\n return val or {}\n\n\ndef _read_metrics(repo, metrics, rev, onerror=None):\n fs = DvcFileSystem(repo=repo)\n\n relpath = \"\"\n if repo.root_dir != repo.fs.path.getcwd():\n relpath = repo.fs.path.relpath(repo.root_dir, repo.fs.path.getcwd())\n\n res = {}\n for metric in metrics:\n if not fs.isfile(metric):\n continue\n\n res[os.path.join(relpath, *fs.path.parts(metric))] = _read_metric(\n metric, fs, rev, onerror=onerror\n )\n\n return res\n\n\ndef _gather_metrics(repo, targets, rev, recursive, onerror=None):\n metrics = _collect_metrics(repo, targets, rev, recursive)\n return _read_metrics(repo, metrics, rev, onerror=onerror)\n\n\n@locked\ndef show(\n repo,\n targets=None,\n all_branches=False,\n all_tags=False,\n recursive=False,\n revs=None,\n all_commits=False,\n onerror=None,\n):\n if onerror is None:\n onerror = onerror_collect\n\n targets = ensure_list(targets)\n targets = [repo.dvcfs.from_os_path(target) for target in targets]\n\n res = {}\n for rev in repo.brancher(\n revs=revs,\n all_branches=all_branches,\n all_tags=all_tags,\n all_commits=all_commits,\n ):\n res[rev] = error_handler(_gather_metrics)(\n repo, targets, rev, recursive, onerror=onerror\n )\n\n # Hide workspace metrics if they are the same as in the active branch\n try:\n active_branch = repo.scm.active_branch()\n except (SCMError, NoSCMError):\n # SCMError - detached head\n # NoSCMError - no repo case\n pass\n else:\n if res.get(\"workspace\") == res.get(active_branch):\n res.pop(\"workspace\", None)\n\n errored = errored_revisions(res)\n if errored:\n from dvc.ui import ui\n\n ui.error_write(\n \"DVC failed to load some metrics for following revisions:\"\n f\" '{', '.join(errored)}'.\"\n )\n\n return res\n", "path": "dvc/repo/metrics/show.py"}]}
1,963
211
gh_patches_debug_31073
rasdani/github-patches
git_diff
fossasia__open-event-server-4162
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Ticket-tag: remove GET for /ticket-tags Parent issue #4101. Related issue: #4119. Make `/ticket-tags` POST only. </issue> <code> [start of app/api/ticket_tags.py] 1 from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship 2 from marshmallow_jsonapi.flask import Schema, Relationship 3 from marshmallow_jsonapi import fields 4 from sqlalchemy.orm.exc import NoResultFound 5 from flask_rest_jsonapi.exceptions import ObjectNotFound 6 7 from app.api.helpers.utilities import dasherize 8 from app.api.helpers.permissions import jwt_required 9 from app.models import db 10 from app.models.ticket import Ticket, TicketTag, ticket_tags_table 11 from app.models.event import Event 12 from app.api.helpers.db import safe_query 13 from app.api.helpers.utilities import require_relationship 14 from app.api.helpers.exceptions import ForbiddenException 15 from app.api.helpers.permission_manager import has_access 16 17 18 class TicketTagSchema(Schema): 19 """ 20 Api schema for TicketTag Model 21 """ 22 23 class Meta: 24 """ 25 Meta class for TicketTag Api Schema 26 """ 27 type_ = 'ticket-tag' 28 self_view = 'v1.ticket_tag_detail' 29 self_view_kwargs = {'id': '<id>'} 30 inflect = dasherize 31 32 id = fields.Str(dump_only=True) 33 name = fields.Str(allow_none=True) 34 tickets = Relationship(attribute='tickets', 35 self_view='v1.ticket_tag_ticket', 36 self_view_kwargs={'id': '<id>'}, 37 related_view='v1.ticket_list', 38 related_view_kwargs={'ticket_tag_id': '<id>'}, 39 schema='TicketSchema', 40 many=True, 41 type_='ticket') 42 event = Relationship(attribute='event', 43 self_view='v1.ticket_tag_event', 44 self_view_kwargs={'id': '<id>'}, 45 related_view='v1.event_detail', 46 related_view_kwargs={'ticket_tag_id': '<id>'}, 47 schema='EventSchema', 48 type_='event') 49 50 51 class TicketTagListPost(ResourceList): 52 """ 53 List and create TicketTag 54 """ 55 def before_post(self, args, kwargs, data): 56 """ 57 before post method for checking required relationship 58 :param args: 59 :param kwargs: 60 :param data: 61 :return: 62 """ 63 require_relationship(['event'], data) 64 65 if not has_access('is_coorganizer', event_id=data['event']): 66 raise ForbiddenException({'source': ''}, 'Co-organizer access is required.') 67 68 def after_create_object(self, obj, data, view_kwargs): 69 """ 70 method to add ticket tags and ticket in association table 71 :param obj: 72 :param data: 73 :param view_kwargs: 74 :return: 75 """ 76 if 'tickets' in data: 77 ticket_ids = data['tickets'] 78 for ticket_id in ticket_ids: 79 try: 80 ticket = Ticket.query.filter_by(id=ticket_id).one() 81 except NoResultFound: 82 raise ObjectNotFound({'parameter': 'ticket_id'}, 83 "Ticket: {} not found".format(ticket_id)) 84 else: 85 ticket.tags.append(obj) 86 self.session.commit() 87 88 schema = TicketTagSchema 89 data_layer = {'session': db.session, 90 'model': TicketTag, 91 'methods': { 92 'after_create_object': after_create_object 93 }} 94 95 96 class TicketTagList(ResourceList): 97 """ 98 List TicketTags based on event_id or ticket_id 99 """ 100 def query(self, view_kwargs): 101 """ 102 method to query Ticket tags based on different params 103 :param view_kwargs: 104 :return: 105 """ 106 query_ = self.session.query(TicketTag) 107 if view_kwargs.get('ticket_id'): 108 ticket = safe_query(self, Ticket, 'id', view_kwargs['ticket_id'], 'ticket_id') 109 query_ = query_.join(ticket_tags_table).filter_by(ticket_id=ticket.id) 110 if view_kwargs.get('event_id'): 111 event = safe_query(self, Event, 'id', view_kwargs['event_id'], 'event_id') 112 query_ = query_.join(Event).filter(Event.id == event.id) 113 elif view_kwargs.get('event_identifier'): 114 event = safe_query(self, Event, 'identifier', view_kwargs['event_identifier'], 'event_identifier') 115 query_ = query_.join(Event).filter(Event.id == event.id) 116 return query_ 117 118 view_kwargs = True 119 schema = TicketTagSchema 120 methods = ['GET', ] 121 data_layer = {'session': db.session, 122 'model': TicketTag, 123 'methods': { 124 'query': query 125 }} 126 127 128 class TicketTagDetail(ResourceDetail): 129 """ 130 TicketTag detail by id 131 """ 132 decorators = (jwt_required,) 133 schema = TicketTagSchema 134 data_layer = {'session': db.session, 135 'model': TicketTag} 136 137 138 class TicketTagRelationshipRequired(ResourceRelationship): 139 """ 140 TicketTag Relationship 141 """ 142 decorators = (jwt_required,) 143 methods = ['GET', 'PATCH'] 144 schema = TicketTagSchema 145 data_layer = {'session': db.session, 146 'model': TicketTag} 147 148 149 class TicketTagRelationshipOptional(ResourceRelationship): 150 """ 151 TicketTag Relationship 152 """ 153 decorators = (jwt_required,) 154 schema = TicketTagSchema 155 data_layer = {'session': db.session, 156 'model': TicketTag} 157 [end of app/api/ticket_tags.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/app/api/ticket_tags.py b/app/api/ticket_tags.py --- a/app/api/ticket_tags.py +++ b/app/api/ticket_tags.py @@ -1,8 +1,6 @@ from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship from marshmallow_jsonapi.flask import Schema, Relationship from marshmallow_jsonapi import fields -from sqlalchemy.orm.exc import NoResultFound -from flask_rest_jsonapi.exceptions import ObjectNotFound from app.api.helpers.utilities import dasherize from app.api.helpers.permissions import jwt_required @@ -65,32 +63,10 @@ if not has_access('is_coorganizer', event_id=data['event']): raise ForbiddenException({'source': ''}, 'Co-organizer access is required.') - def after_create_object(self, obj, data, view_kwargs): - """ - method to add ticket tags and ticket in association table - :param obj: - :param data: - :param view_kwargs: - :return: - """ - if 'tickets' in data: - ticket_ids = data['tickets'] - for ticket_id in ticket_ids: - try: - ticket = Ticket.query.filter_by(id=ticket_id).one() - except NoResultFound: - raise ObjectNotFound({'parameter': 'ticket_id'}, - "Ticket: {} not found".format(ticket_id)) - else: - ticket.tags.append(obj) - self.session.commit() - schema = TicketTagSchema + methods = ['POST', ] data_layer = {'session': db.session, - 'model': TicketTag, - 'methods': { - 'after_create_object': after_create_object - }} + 'model': TicketTag} class TicketTagList(ResourceList):
{"golden_diff": "diff --git a/app/api/ticket_tags.py b/app/api/ticket_tags.py\n--- a/app/api/ticket_tags.py\n+++ b/app/api/ticket_tags.py\n@@ -1,8 +1,6 @@\n from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\n from marshmallow_jsonapi.flask import Schema, Relationship\n from marshmallow_jsonapi import fields\n-from sqlalchemy.orm.exc import NoResultFound\n-from flask_rest_jsonapi.exceptions import ObjectNotFound\n \n from app.api.helpers.utilities import dasherize\n from app.api.helpers.permissions import jwt_required\n@@ -65,32 +63,10 @@\n if not has_access('is_coorganizer', event_id=data['event']):\n raise ForbiddenException({'source': ''}, 'Co-organizer access is required.')\n \n- def after_create_object(self, obj, data, view_kwargs):\n- \"\"\"\n- method to add ticket tags and ticket in association table\n- :param obj:\n- :param data:\n- :param view_kwargs:\n- :return:\n- \"\"\"\n- if 'tickets' in data:\n- ticket_ids = data['tickets']\n- for ticket_id in ticket_ids:\n- try:\n- ticket = Ticket.query.filter_by(id=ticket_id).one()\n- except NoResultFound:\n- raise ObjectNotFound({'parameter': 'ticket_id'},\n- \"Ticket: {} not found\".format(ticket_id))\n- else:\n- ticket.tags.append(obj)\n- self.session.commit()\n-\n schema = TicketTagSchema\n+ methods = ['POST', ]\n data_layer = {'session': db.session,\n- 'model': TicketTag,\n- 'methods': {\n- 'after_create_object': after_create_object\n- }}\n+ 'model': TicketTag}\n \n \n class TicketTagList(ResourceList):\n", "issue": "Ticket-tag: remove GET for /ticket-tags \nParent issue #4101.\r\nRelated issue: #4119.\r\n\r\nMake `/ticket-tags` POST only.\n", "before_files": [{"content": "from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\nfrom marshmallow_jsonapi.flask import Schema, Relationship\nfrom marshmallow_jsonapi import fields\nfrom sqlalchemy.orm.exc import NoResultFound\nfrom flask_rest_jsonapi.exceptions import ObjectNotFound\n\nfrom app.api.helpers.utilities import dasherize\nfrom app.api.helpers.permissions import jwt_required\nfrom app.models import db\nfrom app.models.ticket import Ticket, TicketTag, ticket_tags_table\nfrom app.models.event import Event\nfrom app.api.helpers.db import safe_query\nfrom app.api.helpers.utilities import require_relationship\nfrom app.api.helpers.exceptions import ForbiddenException\nfrom app.api.helpers.permission_manager import has_access\n\n\nclass TicketTagSchema(Schema):\n \"\"\"\n Api schema for TicketTag Model\n \"\"\"\n\n class Meta:\n \"\"\"\n Meta class for TicketTag Api Schema\n \"\"\"\n type_ = 'ticket-tag'\n self_view = 'v1.ticket_tag_detail'\n self_view_kwargs = {'id': '<id>'}\n inflect = dasherize\n\n id = fields.Str(dump_only=True)\n name = fields.Str(allow_none=True)\n tickets = Relationship(attribute='tickets',\n self_view='v1.ticket_tag_ticket',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.ticket_list',\n related_view_kwargs={'ticket_tag_id': '<id>'},\n schema='TicketSchema',\n many=True,\n type_='ticket')\n event = Relationship(attribute='event',\n self_view='v1.ticket_tag_event',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.event_detail',\n related_view_kwargs={'ticket_tag_id': '<id>'},\n schema='EventSchema',\n type_='event')\n\n\nclass TicketTagListPost(ResourceList):\n \"\"\"\n List and create TicketTag\n \"\"\"\n def before_post(self, args, kwargs, data):\n \"\"\"\n before post method for checking required relationship\n :param args:\n :param kwargs:\n :param data:\n :return:\n \"\"\"\n require_relationship(['event'], data)\n\n if not has_access('is_coorganizer', event_id=data['event']):\n raise ForbiddenException({'source': ''}, 'Co-organizer access is required.')\n\n def after_create_object(self, obj, data, view_kwargs):\n \"\"\"\n method to add ticket tags and ticket in association table\n :param obj:\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n if 'tickets' in data:\n ticket_ids = data['tickets']\n for ticket_id in ticket_ids:\n try:\n ticket = Ticket.query.filter_by(id=ticket_id).one()\n except NoResultFound:\n raise ObjectNotFound({'parameter': 'ticket_id'},\n \"Ticket: {} not found\".format(ticket_id))\n else:\n ticket.tags.append(obj)\n self.session.commit()\n\n schema = TicketTagSchema\n data_layer = {'session': db.session,\n 'model': TicketTag,\n 'methods': {\n 'after_create_object': after_create_object\n }}\n\n\nclass TicketTagList(ResourceList):\n \"\"\"\n List TicketTags based on event_id or ticket_id\n \"\"\"\n def query(self, view_kwargs):\n \"\"\"\n method to query Ticket tags based on different params\n :param view_kwargs:\n :return:\n \"\"\"\n query_ = self.session.query(TicketTag)\n if view_kwargs.get('ticket_id'):\n ticket = safe_query(self, Ticket, 'id', view_kwargs['ticket_id'], 'ticket_id')\n query_ = query_.join(ticket_tags_table).filter_by(ticket_id=ticket.id)\n if view_kwargs.get('event_id'):\n event = safe_query(self, Event, 'id', view_kwargs['event_id'], 'event_id')\n query_ = query_.join(Event).filter(Event.id == event.id)\n elif view_kwargs.get('event_identifier'):\n event = safe_query(self, Event, 'identifier', view_kwargs['event_identifier'], 'event_identifier')\n query_ = query_.join(Event).filter(Event.id == event.id)\n return query_\n\n view_kwargs = True\n schema = TicketTagSchema\n methods = ['GET', ]\n data_layer = {'session': db.session,\n 'model': TicketTag,\n 'methods': {\n 'query': query\n }}\n\n\nclass TicketTagDetail(ResourceDetail):\n \"\"\"\n TicketTag detail by id\n \"\"\"\n decorators = (jwt_required,)\n schema = TicketTagSchema\n data_layer = {'session': db.session,\n 'model': TicketTag}\n\n\nclass TicketTagRelationshipRequired(ResourceRelationship):\n \"\"\"\n TicketTag Relationship\n \"\"\"\n decorators = (jwt_required,)\n methods = ['GET', 'PATCH']\n schema = TicketTagSchema\n data_layer = {'session': db.session,\n 'model': TicketTag}\n\n\nclass TicketTagRelationshipOptional(ResourceRelationship):\n \"\"\"\n TicketTag Relationship\n \"\"\"\n decorators = (jwt_required,)\n schema = TicketTagSchema\n data_layer = {'session': db.session,\n 'model': TicketTag}\n", "path": "app/api/ticket_tags.py"}]}
2,014
392
gh_patches_debug_27001
rasdani/github-patches
git_diff
Kinto__kinto-1015
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> updated default kinto config breaks docker (kinto server not reacheable from host) In e4e70fb, the default listen ip has been changed to 127.0.0.1. I couldn't manage to export the port using docker, using this command from the [install documentation](http://kinto.readthedocs.io/en/latest/tutorials/install.html#environment-variables): ``` docker run --env-file kinto.env --link kinto_db:db -p 127.0.0.1:8888:8888 --name kinto_web croco/kinto:latest ``` For instance, when I do `curl http://127.0.0.1:8888` from the host I get `Recv failure: Connection reset by peer`. When I reverted kinto.ini to listen on 0.0.0.0, curl from the host succeeds. Maybe there is an environment variable to override the `host` entry in the `server:main` section? Then it would be fine, provided an updated documentation... This is not visible with the published kinto/kinto-server image, since it's still 5.1. I built my own from master because I needed an armhf image. </issue> <code> [start of kinto/config/__init__.py] 1 import os 2 import codecs 3 from time import strftime 4 5 from kinto.core import utils as core_utils 6 7 from kinto import logger 8 from kinto import __version__ 9 10 HERE = os.path.abspath(os.path.dirname(__file__)) 11 12 13 def render_template(template, destination, **kwargs): 14 template = os.path.join(HERE, template) 15 folder = os.path.dirname(destination) 16 17 if folder and not os.path.exists(folder): 18 os.makedirs(folder) 19 20 logger.info("Created config {}".format(os.path.abspath(destination))) 21 22 with codecs.open(template, 'r', encoding='utf-8') as f: 23 raw_template = f.read() 24 rendered = raw_template.format(**kwargs) 25 with codecs.open(destination, 'w+', encoding='utf-8') as output: 26 output.write(rendered) 27 28 29 def init(config_file, backend): 30 values = {} 31 32 values['secret'] = core_utils.random_bytes_hex(32) 33 34 values['kinto_version'] = __version__ 35 values['config_file_timestamp'] = core_utils._encoded( 36 strftime('%a, %d %b %Y %H:%M:%S %z')) 37 38 values['storage_backend'] = "kinto.core.storage.%s" % backend 39 values['cache_backend'] = "kinto.core.cache.%s" % backend 40 values['permission_backend'] = "kinto.core.permission.%s" % backend 41 42 if backend == 'postgresql': 43 postgresql_url = "postgres://postgres:postgres@localhost/postgres" 44 values['storage_url'] = postgresql_url 45 values['cache_url'] = postgresql_url 46 values['permission_url'] = postgresql_url 47 48 elif backend == 'redis': 49 redis_url = "redis://localhost:6379" 50 values['storage_backend'] = "kinto_redis.storage" 51 values['cache_backend'] = "kinto_redis.cache" 52 values['permission_backend'] = "kinto_redis.permission" 53 54 values['storage_url'] = redis_url + "/1" 55 values['cache_url'] = redis_url + "/2" 56 values['permission_url'] = redis_url + "/3" 57 58 else: 59 values['storage_url'] = '' 60 values['cache_url'] = '' 61 values['permission_url'] = '' 62 63 render_template("kinto.tpl", config_file, **values) 64 [end of kinto/config/__init__.py] [start of kinto/__main__.py] 1 from __future__ import print_function 2 import argparse 3 import os 4 import sys 5 import logging 6 import logging.config 7 from six.moves import input 8 9 from kinto.core import scripts 10 from pyramid.scripts import pserve 11 from pyramid.paster import bootstrap 12 from kinto import __version__ 13 from kinto.config import init 14 15 DEFAULT_CONFIG_FILE = 'config/kinto.ini' 16 DEFAULT_PORT = 8888 17 DEFAULT_LOG_LEVEL = logging.INFO 18 DEFAULT_LOG_FORMAT = "%(levelname)-5.5s %(message)s" 19 20 21 def main(args=None): 22 """The main routine.""" 23 if args is None: 24 args = sys.argv[1:] 25 26 parser = argparse.ArgumentParser(description="Kinto Command-Line " 27 "Interface") 28 # XXX: deprecate this option, unnatural as first argument. 29 parser.add_argument('--ini', 30 help='Application configuration file', 31 dest='ini_file', 32 required=False, 33 default=DEFAULT_CONFIG_FILE) 34 35 parser.add_argument('-q', '--quiet', action='store_const', 36 const=logging.CRITICAL, dest='verbosity', 37 help='Show only critical errors.') 38 39 parser.add_argument('--debug', action='store_const', 40 const=logging.DEBUG, dest='verbosity', 41 help='Show all messages, including debug messages.') 42 43 commands = ('init', 'start', 'migrate', 'delete-collection', 'version') 44 subparsers = parser.add_subparsers(title='subcommands', 45 description='Main Kinto CLI commands', 46 dest='subcommand', 47 help="Choose and run with --help") 48 subparsers.required = True 49 50 for command in commands: 51 subparser = subparsers.add_parser(command) 52 subparser.set_defaults(which=command) 53 54 if command == 'init': 55 subparser.add_argument('--backend', 56 help='{memory,redis,postgresql}', 57 dest='backend', 58 required=False, 59 default=None) 60 elif command == 'migrate': 61 subparser.add_argument('--dry-run', 62 action='store_true', 63 help='Simulate the migration operations ' 64 'and show information', 65 dest='dry_run', 66 required=False, 67 default=False) 68 elif command == 'delete-collection': 69 subparser.add_argument('--bucket', 70 help='The bucket where the collection ' 71 'belongs to.', 72 required=True) 73 subparser.add_argument('--collection', 74 help='The collection to remove.', 75 required=True) 76 77 elif command == 'start': 78 subparser.add_argument('--reload', 79 action='store_true', 80 help='Restart when code or config changes', 81 required=False, 82 default=False) 83 subparser.add_argument('--port', 84 type=int, 85 help='Listening port number', 86 required=False, 87 default=DEFAULT_PORT) 88 89 # Parse command-line arguments 90 parsed_args = vars(parser.parse_args(args)) 91 92 config_file = parsed_args['ini_file'] 93 which_command = parsed_args['which'] 94 95 # Initialize logging from 96 level = parsed_args.get('verbosity') or DEFAULT_LOG_LEVEL 97 logging.basicConfig(level=level, format=DEFAULT_LOG_FORMAT) 98 99 if which_command == 'init': 100 if os.path.exists(config_file): 101 print("%s already exists." % config_file, file=sys.stderr) 102 return 1 103 104 backend = parsed_args['backend'] 105 if not backend: 106 while True: 107 prompt = ("Select the backend you would like to use: " 108 "(1 - postgresql, 2 - redis, default - memory) ") 109 answer = input(prompt).strip() 110 try: 111 backends = {"1": "postgresql", "2": "redis", "": "memory"} 112 backend = backends[answer] 113 break 114 except KeyError: 115 pass 116 117 init(config_file, backend) 118 119 # Install postgresql libraries if necessary 120 if backend == "postgresql": 121 try: 122 import psycopg2 # NOQA 123 except ImportError: 124 import pip 125 pip.main(['install', "kinto[postgresql]"]) 126 elif backend == "redis": 127 try: 128 import kinto_redis # NOQA 129 except ImportError: 130 import pip 131 pip.main(['install', "kinto[redis]"]) 132 133 elif which_command == 'migrate': 134 dry_run = parsed_args['dry_run'] 135 env = bootstrap(config_file) 136 scripts.migrate(env, dry_run=dry_run) 137 138 elif which_command == 'delete-collection': 139 env = bootstrap(config_file) 140 return scripts.delete_collection(env, 141 parsed_args['bucket'], 142 parsed_args['collection']) 143 144 elif which_command == 'start': 145 pserve_argv = ['pserve', config_file] 146 if parsed_args['reload']: 147 pserve_argv.append('--reload') 148 pserve_argv.append('http_port=%s' % parsed_args['port']) 149 pserve.main(pserve_argv) 150 151 elif which_command == 'version': 152 print(__version__) 153 154 return 0 155 [end of kinto/__main__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/kinto/__main__.py b/kinto/__main__.py --- a/kinto/__main__.py +++ b/kinto/__main__.py @@ -57,6 +57,11 @@ dest='backend', required=False, default=None) + subparser.add_argument('--host', + help='Host to listen() on.', + dest='host', + required=False, + default='127.0.0.1') elif command == 'migrate': subparser.add_argument('--dry-run', action='store_true', @@ -114,7 +119,7 @@ except KeyError: pass - init(config_file, backend) + init(config_file, backend, parsed_args['host']) # Install postgresql libraries if necessary if backend == "postgresql": diff --git a/kinto/config/__init__.py b/kinto/config/__init__.py --- a/kinto/config/__init__.py +++ b/kinto/config/__init__.py @@ -26,9 +26,10 @@ output.write(rendered) -def init(config_file, backend): +def init(config_file, backend, host='127.0.0.1'): values = {} + values['host'] = host values['secret'] = core_utils.random_bytes_hex(32) values['kinto_version'] = __version__
{"golden_diff": "diff --git a/kinto/__main__.py b/kinto/__main__.py\n--- a/kinto/__main__.py\n+++ b/kinto/__main__.py\n@@ -57,6 +57,11 @@\n dest='backend',\n required=False,\n default=None)\n+ subparser.add_argument('--host',\n+ help='Host to listen() on.',\n+ dest='host',\n+ required=False,\n+ default='127.0.0.1')\n elif command == 'migrate':\n subparser.add_argument('--dry-run',\n action='store_true',\n@@ -114,7 +119,7 @@\n except KeyError:\n pass\n \n- init(config_file, backend)\n+ init(config_file, backend, parsed_args['host'])\n \n # Install postgresql libraries if necessary\n if backend == \"postgresql\":\ndiff --git a/kinto/config/__init__.py b/kinto/config/__init__.py\n--- a/kinto/config/__init__.py\n+++ b/kinto/config/__init__.py\n@@ -26,9 +26,10 @@\n output.write(rendered)\n \n \n-def init(config_file, backend):\n+def init(config_file, backend, host='127.0.0.1'):\n values = {}\n \n+ values['host'] = host\n values['secret'] = core_utils.random_bytes_hex(32)\n \n values['kinto_version'] = __version__\n", "issue": "updated default kinto config breaks docker (kinto server not reacheable from host)\nIn e4e70fb, the default listen ip has been changed to 127.0.0.1.\r\nI couldn't manage to export the port using docker, using this command from the [install documentation](http://kinto.readthedocs.io/en/latest/tutorials/install.html#environment-variables):\r\n```\r\ndocker run --env-file kinto.env --link kinto_db:db -p 127.0.0.1:8888:8888 --name kinto_web croco/kinto:latest\r\n```\r\nFor instance, when I do `curl http://127.0.0.1:8888` from the host I get `Recv failure: Connection reset by peer`.\r\n\r\nWhen I reverted kinto.ini to listen on 0.0.0.0, curl from the host succeeds.\r\n\r\nMaybe there is an environment variable to override the `host` entry in the `server:main` section? Then it would be fine, provided an updated documentation...\r\n\r\nThis is not visible with the published kinto/kinto-server image, since it's still 5.1. I built my own from master because I needed an armhf image.\n", "before_files": [{"content": "import os\nimport codecs\nfrom time import strftime\n\nfrom kinto.core import utils as core_utils\n\nfrom kinto import logger\nfrom kinto import __version__\n\nHERE = os.path.abspath(os.path.dirname(__file__))\n\n\ndef render_template(template, destination, **kwargs):\n template = os.path.join(HERE, template)\n folder = os.path.dirname(destination)\n\n if folder and not os.path.exists(folder):\n os.makedirs(folder)\n\n logger.info(\"Created config {}\".format(os.path.abspath(destination)))\n\n with codecs.open(template, 'r', encoding='utf-8') as f:\n raw_template = f.read()\n rendered = raw_template.format(**kwargs)\n with codecs.open(destination, 'w+', encoding='utf-8') as output:\n output.write(rendered)\n\n\ndef init(config_file, backend):\n values = {}\n\n values['secret'] = core_utils.random_bytes_hex(32)\n\n values['kinto_version'] = __version__\n values['config_file_timestamp'] = core_utils._encoded(\n strftime('%a, %d %b %Y %H:%M:%S %z'))\n\n values['storage_backend'] = \"kinto.core.storage.%s\" % backend\n values['cache_backend'] = \"kinto.core.cache.%s\" % backend\n values['permission_backend'] = \"kinto.core.permission.%s\" % backend\n\n if backend == 'postgresql':\n postgresql_url = \"postgres://postgres:postgres@localhost/postgres\"\n values['storage_url'] = postgresql_url\n values['cache_url'] = postgresql_url\n values['permission_url'] = postgresql_url\n\n elif backend == 'redis':\n redis_url = \"redis://localhost:6379\"\n values['storage_backend'] = \"kinto_redis.storage\"\n values['cache_backend'] = \"kinto_redis.cache\"\n values['permission_backend'] = \"kinto_redis.permission\"\n\n values['storage_url'] = redis_url + \"/1\"\n values['cache_url'] = redis_url + \"/2\"\n values['permission_url'] = redis_url + \"/3\"\n\n else:\n values['storage_url'] = ''\n values['cache_url'] = ''\n values['permission_url'] = ''\n\n render_template(\"kinto.tpl\", config_file, **values)\n", "path": "kinto/config/__init__.py"}, {"content": "from __future__ import print_function\nimport argparse\nimport os\nimport sys\nimport logging\nimport logging.config\nfrom six.moves import input\n\nfrom kinto.core import scripts\nfrom pyramid.scripts import pserve\nfrom pyramid.paster import bootstrap\nfrom kinto import __version__\nfrom kinto.config import init\n\nDEFAULT_CONFIG_FILE = 'config/kinto.ini'\nDEFAULT_PORT = 8888\nDEFAULT_LOG_LEVEL = logging.INFO\nDEFAULT_LOG_FORMAT = \"%(levelname)-5.5s %(message)s\"\n\n\ndef main(args=None):\n \"\"\"The main routine.\"\"\"\n if args is None:\n args = sys.argv[1:]\n\n parser = argparse.ArgumentParser(description=\"Kinto Command-Line \"\n \"Interface\")\n # XXX: deprecate this option, unnatural as first argument.\n parser.add_argument('--ini',\n help='Application configuration file',\n dest='ini_file',\n required=False,\n default=DEFAULT_CONFIG_FILE)\n\n parser.add_argument('-q', '--quiet', action='store_const',\n const=logging.CRITICAL, dest='verbosity',\n help='Show only critical errors.')\n\n parser.add_argument('--debug', action='store_const',\n const=logging.DEBUG, dest='verbosity',\n help='Show all messages, including debug messages.')\n\n commands = ('init', 'start', 'migrate', 'delete-collection', 'version')\n subparsers = parser.add_subparsers(title='subcommands',\n description='Main Kinto CLI commands',\n dest='subcommand',\n help=\"Choose and run with --help\")\n subparsers.required = True\n\n for command in commands:\n subparser = subparsers.add_parser(command)\n subparser.set_defaults(which=command)\n\n if command == 'init':\n subparser.add_argument('--backend',\n help='{memory,redis,postgresql}',\n dest='backend',\n required=False,\n default=None)\n elif command == 'migrate':\n subparser.add_argument('--dry-run',\n action='store_true',\n help='Simulate the migration operations '\n 'and show information',\n dest='dry_run',\n required=False,\n default=False)\n elif command == 'delete-collection':\n subparser.add_argument('--bucket',\n help='The bucket where the collection '\n 'belongs to.',\n required=True)\n subparser.add_argument('--collection',\n help='The collection to remove.',\n required=True)\n\n elif command == 'start':\n subparser.add_argument('--reload',\n action='store_true',\n help='Restart when code or config changes',\n required=False,\n default=False)\n subparser.add_argument('--port',\n type=int,\n help='Listening port number',\n required=False,\n default=DEFAULT_PORT)\n\n # Parse command-line arguments\n parsed_args = vars(parser.parse_args(args))\n\n config_file = parsed_args['ini_file']\n which_command = parsed_args['which']\n\n # Initialize logging from\n level = parsed_args.get('verbosity') or DEFAULT_LOG_LEVEL\n logging.basicConfig(level=level, format=DEFAULT_LOG_FORMAT)\n\n if which_command == 'init':\n if os.path.exists(config_file):\n print(\"%s already exists.\" % config_file, file=sys.stderr)\n return 1\n\n backend = parsed_args['backend']\n if not backend:\n while True:\n prompt = (\"Select the backend you would like to use: \"\n \"(1 - postgresql, 2 - redis, default - memory) \")\n answer = input(prompt).strip()\n try:\n backends = {\"1\": \"postgresql\", \"2\": \"redis\", \"\": \"memory\"}\n backend = backends[answer]\n break\n except KeyError:\n pass\n\n init(config_file, backend)\n\n # Install postgresql libraries if necessary\n if backend == \"postgresql\":\n try:\n import psycopg2 # NOQA\n except ImportError:\n import pip\n pip.main(['install', \"kinto[postgresql]\"])\n elif backend == \"redis\":\n try:\n import kinto_redis # NOQA\n except ImportError:\n import pip\n pip.main(['install', \"kinto[redis]\"])\n\n elif which_command == 'migrate':\n dry_run = parsed_args['dry_run']\n env = bootstrap(config_file)\n scripts.migrate(env, dry_run=dry_run)\n\n elif which_command == 'delete-collection':\n env = bootstrap(config_file)\n return scripts.delete_collection(env,\n parsed_args['bucket'],\n parsed_args['collection'])\n\n elif which_command == 'start':\n pserve_argv = ['pserve', config_file]\n if parsed_args['reload']:\n pserve_argv.append('--reload')\n pserve_argv.append('http_port=%s' % parsed_args['port'])\n pserve.main(pserve_argv)\n\n elif which_command == 'version':\n print(__version__)\n\n return 0\n", "path": "kinto/__main__.py"}]}
2,840
316
gh_patches_debug_3046
rasdani/github-patches
git_diff
certbot__certbot-6134
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Reuse ACMEv1 accounts for ACMEv2 production The lines removed following the discussion at https://github.com/certbot/certbot/pull/5902#discussion_r192532446 need to be added back. </issue> <code> [start of certbot/constants.py] 1 """Certbot constants.""" 2 import logging 3 import os 4 import pkg_resources 5 6 from acme import challenges 7 8 9 SETUPTOOLS_PLUGINS_ENTRY_POINT = "certbot.plugins" 10 """Setuptools entry point group name for plugins.""" 11 12 OLD_SETUPTOOLS_PLUGINS_ENTRY_POINT = "letsencrypt.plugins" 13 """Plugins Setuptools entry point before rename.""" 14 15 CLI_DEFAULTS = dict( 16 config_files=[ 17 "/etc/letsencrypt/cli.ini", 18 # http://freedesktop.org/wiki/Software/xdg-user-dirs/ 19 os.path.join(os.environ.get("XDG_CONFIG_HOME", "~/.config"), 20 "letsencrypt", "cli.ini"), 21 ], 22 23 # Main parser 24 verbose_count=-int(logging.INFO / 10), 25 text_mode=False, 26 max_log_backups=1000, 27 noninteractive_mode=False, 28 force_interactive=False, 29 domains=[], 30 certname=None, 31 dry_run=False, 32 register_unsafely_without_email=False, 33 update_registration=False, 34 email=None, 35 eff_email=None, 36 reinstall=False, 37 expand=False, 38 renew_by_default=False, 39 renew_with_new_domains=False, 40 autorenew=True, 41 allow_subset_of_names=False, 42 tos=False, 43 account=None, 44 duplicate=False, 45 os_packages_only=False, 46 no_self_upgrade=False, 47 no_bootstrap=False, 48 quiet=False, 49 staging=False, 50 debug=False, 51 debug_challenges=False, 52 no_verify_ssl=False, 53 tls_sni_01_port=challenges.TLSSNI01Response.PORT, 54 tls_sni_01_address="", 55 http01_port=challenges.HTTP01Response.PORT, 56 http01_address="", 57 break_my_certs=False, 58 rsa_key_size=2048, 59 must_staple=False, 60 redirect=None, 61 hsts=None, 62 uir=None, 63 staple=None, 64 strict_permissions=False, 65 pref_challs=[], 66 validate_hooks=True, 67 directory_hooks=True, 68 reuse_key=False, 69 disable_renew_updates=False, 70 71 # Subparsers 72 num=None, 73 user_agent=None, 74 user_agent_comment=None, 75 csr=None, 76 reason=0, 77 delete_after_revoke=None, 78 rollback_checkpoints=1, 79 init=False, 80 prepare=False, 81 ifaces=None, 82 83 # Path parsers 84 auth_cert_path="./cert.pem", 85 auth_chain_path="./chain.pem", 86 key_path=None, 87 config_dir="/etc/letsencrypt", 88 work_dir="/var/lib/letsencrypt", 89 logs_dir="/var/log/letsencrypt", 90 server="https://acme-v01.api.letsencrypt.org/directory", 91 92 # Plugins parsers 93 configurator=None, 94 authenticator=None, 95 installer=None, 96 apache=False, 97 nginx=False, 98 standalone=False, 99 manual=False, 100 webroot=False, 101 dns_cloudflare=False, 102 dns_cloudxns=False, 103 dns_digitalocean=False, 104 dns_dnsimple=False, 105 dns_dnsmadeeasy=False, 106 dns_google=False, 107 dns_luadns=False, 108 dns_nsone=False, 109 dns_rfc2136=False, 110 dns_route53=False 111 112 ) 113 STAGING_URI = "https://acme-staging-v02.api.letsencrypt.org/directory" 114 115 # The set of reasons for revoking a certificate is defined in RFC 5280 in 116 # section 5.3.1. The reasons that users are allowed to submit are restricted to 117 # those accepted by the ACME server implementation. They are listed in 118 # `letsencrypt.boulder.revocation.reasons.go`. 119 REVOCATION_REASONS = { 120 "unspecified": 0, 121 "keycompromise": 1, 122 "affiliationchanged": 3, 123 "superseded": 4, 124 "cessationofoperation": 5} 125 126 """Defaults for CLI flags and `.IConfig` attributes.""" 127 128 QUIET_LOGGING_LEVEL = logging.WARNING 129 """Logging level to use in quiet mode.""" 130 131 RENEWER_DEFAULTS = dict( 132 renewer_enabled="yes", 133 renew_before_expiry="30 days", 134 # This value should ensure that there is never a deployment delay by 135 # default. 136 deploy_before_expiry="99 years", 137 ) 138 """Defaults for renewer script.""" 139 140 141 ENHANCEMENTS = ["redirect", "ensure-http-header", "ocsp-stapling", "spdy"] 142 """List of possible :class:`certbot.interfaces.IInstaller` 143 enhancements. 144 145 List of expected options parameters: 146 - redirect: None 147 - ensure-http-header: name of header (i.e. Strict-Transport-Security) 148 - ocsp-stapling: certificate chain file path 149 - spdy: TODO 150 151 """ 152 153 ARCHIVE_DIR = "archive" 154 """Archive directory, relative to `IConfig.config_dir`.""" 155 156 CONFIG_DIRS_MODE = 0o755 157 """Directory mode for ``.IConfig.config_dir`` et al.""" 158 159 ACCOUNTS_DIR = "accounts" 160 """Directory where all accounts are saved.""" 161 162 LE_REUSE_SERVERS = { 163 'acme-staging-v02.api.letsencrypt.org/directory': 164 'acme-staging.api.letsencrypt.org/directory' 165 } 166 """Servers that can reuse accounts from other servers.""" 167 168 BACKUP_DIR = "backups" 169 """Directory (relative to `IConfig.work_dir`) where backups are kept.""" 170 171 CSR_DIR = "csr" 172 """See `.IConfig.csr_dir`.""" 173 174 IN_PROGRESS_DIR = "IN_PROGRESS" 175 """Directory used before a permanent checkpoint is finalized (relative to 176 `IConfig.work_dir`).""" 177 178 KEY_DIR = "keys" 179 """Directory (relative to `IConfig.config_dir`) where keys are saved.""" 180 181 LIVE_DIR = "live" 182 """Live directory, relative to `IConfig.config_dir`.""" 183 184 TEMP_CHECKPOINT_DIR = "temp_checkpoint" 185 """Temporary checkpoint directory (relative to `IConfig.work_dir`).""" 186 187 RENEWAL_CONFIGS_DIR = "renewal" 188 """Renewal configs directory, relative to `IConfig.config_dir`.""" 189 190 RENEWAL_HOOKS_DIR = "renewal-hooks" 191 """Basename of directory containing hooks to run with the renew command.""" 192 193 RENEWAL_PRE_HOOKS_DIR = "pre" 194 """Basename of directory containing pre-hooks to run with the renew command.""" 195 196 RENEWAL_DEPLOY_HOOKS_DIR = "deploy" 197 """Basename of directory containing deploy-hooks to run with the renew command.""" 198 199 RENEWAL_POST_HOOKS_DIR = "post" 200 """Basename of directory containing post-hooks to run with the renew command.""" 201 202 FORCE_INTERACTIVE_FLAG = "--force-interactive" 203 """Flag to disable TTY checking in IDisplay.""" 204 205 EFF_SUBSCRIBE_URI = "https://supporters.eff.org/subscribe/certbot" 206 """EFF URI used to submit the e-mail address of users who opt-in.""" 207 208 SSL_DHPARAMS_DEST = "ssl-dhparams.pem" 209 """Name of the ssl_dhparams file as saved in `IConfig.config_dir`.""" 210 211 SSL_DHPARAMS_SRC = pkg_resources.resource_filename( 212 "certbot", "ssl-dhparams.pem") 213 """Path to the nginx ssl_dhparams file found in the Certbot distribution.""" 214 215 UPDATED_SSL_DHPARAMS_DIGEST = ".updated-ssl-dhparams-pem-digest.txt" 216 """Name of the hash of the updated or informed ssl_dhparams as saved in `IConfig.config_dir`.""" 217 218 ALL_SSL_DHPARAMS_HASHES = [ 219 '9ba6429597aeed2d8617a7705b56e96d044f64b07971659382e426675105654b', 220 ] 221 """SHA256 hashes of the contents of all versions of SSL_DHPARAMS_SRC""" 222 [end of certbot/constants.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/certbot/constants.py b/certbot/constants.py --- a/certbot/constants.py +++ b/certbot/constants.py @@ -160,6 +160,7 @@ """Directory where all accounts are saved.""" LE_REUSE_SERVERS = { + 'acme-v02.api.letsencrypt.org/directory': 'acme-v01.api.letsencrypt.org/directory', 'acme-staging-v02.api.letsencrypt.org/directory': 'acme-staging.api.letsencrypt.org/directory' }
{"golden_diff": "diff --git a/certbot/constants.py b/certbot/constants.py\n--- a/certbot/constants.py\n+++ b/certbot/constants.py\n@@ -160,6 +160,7 @@\n \"\"\"Directory where all accounts are saved.\"\"\"\n \n LE_REUSE_SERVERS = {\n+ 'acme-v02.api.letsencrypt.org/directory': 'acme-v01.api.letsencrypt.org/directory',\n 'acme-staging-v02.api.letsencrypt.org/directory':\n 'acme-staging.api.letsencrypt.org/directory'\n }\n", "issue": "Reuse ACMEv1 accounts for ACMEv2 production\nThe lines removed following the discussion at https://github.com/certbot/certbot/pull/5902#discussion_r192532446 need to be added back.\n", "before_files": [{"content": "\"\"\"Certbot constants.\"\"\"\nimport logging\nimport os\nimport pkg_resources\n\nfrom acme import challenges\n\n\nSETUPTOOLS_PLUGINS_ENTRY_POINT = \"certbot.plugins\"\n\"\"\"Setuptools entry point group name for plugins.\"\"\"\n\nOLD_SETUPTOOLS_PLUGINS_ENTRY_POINT = \"letsencrypt.plugins\"\n\"\"\"Plugins Setuptools entry point before rename.\"\"\"\n\nCLI_DEFAULTS = dict(\n config_files=[\n \"/etc/letsencrypt/cli.ini\",\n # http://freedesktop.org/wiki/Software/xdg-user-dirs/\n os.path.join(os.environ.get(\"XDG_CONFIG_HOME\", \"~/.config\"),\n \"letsencrypt\", \"cli.ini\"),\n ],\n\n # Main parser\n verbose_count=-int(logging.INFO / 10),\n text_mode=False,\n max_log_backups=1000,\n noninteractive_mode=False,\n force_interactive=False,\n domains=[],\n certname=None,\n dry_run=False,\n register_unsafely_without_email=False,\n update_registration=False,\n email=None,\n eff_email=None,\n reinstall=False,\n expand=False,\n renew_by_default=False,\n renew_with_new_domains=False,\n autorenew=True,\n allow_subset_of_names=False,\n tos=False,\n account=None,\n duplicate=False,\n os_packages_only=False,\n no_self_upgrade=False,\n no_bootstrap=False,\n quiet=False,\n staging=False,\n debug=False,\n debug_challenges=False,\n no_verify_ssl=False,\n tls_sni_01_port=challenges.TLSSNI01Response.PORT,\n tls_sni_01_address=\"\",\n http01_port=challenges.HTTP01Response.PORT,\n http01_address=\"\",\n break_my_certs=False,\n rsa_key_size=2048,\n must_staple=False,\n redirect=None,\n hsts=None,\n uir=None,\n staple=None,\n strict_permissions=False,\n pref_challs=[],\n validate_hooks=True,\n directory_hooks=True,\n reuse_key=False,\n disable_renew_updates=False,\n\n # Subparsers\n num=None,\n user_agent=None,\n user_agent_comment=None,\n csr=None,\n reason=0,\n delete_after_revoke=None,\n rollback_checkpoints=1,\n init=False,\n prepare=False,\n ifaces=None,\n\n # Path parsers\n auth_cert_path=\"./cert.pem\",\n auth_chain_path=\"./chain.pem\",\n key_path=None,\n config_dir=\"/etc/letsencrypt\",\n work_dir=\"/var/lib/letsencrypt\",\n logs_dir=\"/var/log/letsencrypt\",\n server=\"https://acme-v01.api.letsencrypt.org/directory\",\n\n # Plugins parsers\n configurator=None,\n authenticator=None,\n installer=None,\n apache=False,\n nginx=False,\n standalone=False,\n manual=False,\n webroot=False,\n dns_cloudflare=False,\n dns_cloudxns=False,\n dns_digitalocean=False,\n dns_dnsimple=False,\n dns_dnsmadeeasy=False,\n dns_google=False,\n dns_luadns=False,\n dns_nsone=False,\n dns_rfc2136=False,\n dns_route53=False\n\n)\nSTAGING_URI = \"https://acme-staging-v02.api.letsencrypt.org/directory\"\n\n# The set of reasons for revoking a certificate is defined in RFC 5280 in\n# section 5.3.1. The reasons that users are allowed to submit are restricted to\n# those accepted by the ACME server implementation. They are listed in\n# `letsencrypt.boulder.revocation.reasons.go`.\nREVOCATION_REASONS = {\n \"unspecified\": 0,\n \"keycompromise\": 1,\n \"affiliationchanged\": 3,\n \"superseded\": 4,\n \"cessationofoperation\": 5}\n\n\"\"\"Defaults for CLI flags and `.IConfig` attributes.\"\"\"\n\nQUIET_LOGGING_LEVEL = logging.WARNING\n\"\"\"Logging level to use in quiet mode.\"\"\"\n\nRENEWER_DEFAULTS = dict(\n renewer_enabled=\"yes\",\n renew_before_expiry=\"30 days\",\n # This value should ensure that there is never a deployment delay by\n # default.\n deploy_before_expiry=\"99 years\",\n)\n\"\"\"Defaults for renewer script.\"\"\"\n\n\nENHANCEMENTS = [\"redirect\", \"ensure-http-header\", \"ocsp-stapling\", \"spdy\"]\n\"\"\"List of possible :class:`certbot.interfaces.IInstaller`\nenhancements.\n\nList of expected options parameters:\n- redirect: None\n- ensure-http-header: name of header (i.e. Strict-Transport-Security)\n- ocsp-stapling: certificate chain file path\n- spdy: TODO\n\n\"\"\"\n\nARCHIVE_DIR = \"archive\"\n\"\"\"Archive directory, relative to `IConfig.config_dir`.\"\"\"\n\nCONFIG_DIRS_MODE = 0o755\n\"\"\"Directory mode for ``.IConfig.config_dir`` et al.\"\"\"\n\nACCOUNTS_DIR = \"accounts\"\n\"\"\"Directory where all accounts are saved.\"\"\"\n\nLE_REUSE_SERVERS = {\n 'acme-staging-v02.api.letsencrypt.org/directory':\n 'acme-staging.api.letsencrypt.org/directory'\n}\n\"\"\"Servers that can reuse accounts from other servers.\"\"\"\n\nBACKUP_DIR = \"backups\"\n\"\"\"Directory (relative to `IConfig.work_dir`) where backups are kept.\"\"\"\n\nCSR_DIR = \"csr\"\n\"\"\"See `.IConfig.csr_dir`.\"\"\"\n\nIN_PROGRESS_DIR = \"IN_PROGRESS\"\n\"\"\"Directory used before a permanent checkpoint is finalized (relative to\n`IConfig.work_dir`).\"\"\"\n\nKEY_DIR = \"keys\"\n\"\"\"Directory (relative to `IConfig.config_dir`) where keys are saved.\"\"\"\n\nLIVE_DIR = \"live\"\n\"\"\"Live directory, relative to `IConfig.config_dir`.\"\"\"\n\nTEMP_CHECKPOINT_DIR = \"temp_checkpoint\"\n\"\"\"Temporary checkpoint directory (relative to `IConfig.work_dir`).\"\"\"\n\nRENEWAL_CONFIGS_DIR = \"renewal\"\n\"\"\"Renewal configs directory, relative to `IConfig.config_dir`.\"\"\"\n\nRENEWAL_HOOKS_DIR = \"renewal-hooks\"\n\"\"\"Basename of directory containing hooks to run with the renew command.\"\"\"\n\nRENEWAL_PRE_HOOKS_DIR = \"pre\"\n\"\"\"Basename of directory containing pre-hooks to run with the renew command.\"\"\"\n\nRENEWAL_DEPLOY_HOOKS_DIR = \"deploy\"\n\"\"\"Basename of directory containing deploy-hooks to run with the renew command.\"\"\"\n\nRENEWAL_POST_HOOKS_DIR = \"post\"\n\"\"\"Basename of directory containing post-hooks to run with the renew command.\"\"\"\n\nFORCE_INTERACTIVE_FLAG = \"--force-interactive\"\n\"\"\"Flag to disable TTY checking in IDisplay.\"\"\"\n\nEFF_SUBSCRIBE_URI = \"https://supporters.eff.org/subscribe/certbot\"\n\"\"\"EFF URI used to submit the e-mail address of users who opt-in.\"\"\"\n\nSSL_DHPARAMS_DEST = \"ssl-dhparams.pem\"\n\"\"\"Name of the ssl_dhparams file as saved in `IConfig.config_dir`.\"\"\"\n\nSSL_DHPARAMS_SRC = pkg_resources.resource_filename(\n \"certbot\", \"ssl-dhparams.pem\")\n\"\"\"Path to the nginx ssl_dhparams file found in the Certbot distribution.\"\"\"\n\nUPDATED_SSL_DHPARAMS_DIGEST = \".updated-ssl-dhparams-pem-digest.txt\"\n\"\"\"Name of the hash of the updated or informed ssl_dhparams as saved in `IConfig.config_dir`.\"\"\"\n\nALL_SSL_DHPARAMS_HASHES = [\n '9ba6429597aeed2d8617a7705b56e96d044f64b07971659382e426675105654b',\n]\n\"\"\"SHA256 hashes of the contents of all versions of SSL_DHPARAMS_SRC\"\"\"\n", "path": "certbot/constants.py"}]}
2,829
123
gh_patches_debug_43065
rasdani/github-patches
git_diff
numba__numba-1692
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Cannot use cffi.from_buffer when explicitly passing signature to JIT I was experimenting with passing arrays to CFFI functions (see https://github.com/numba/numba/pull/1464) but found that it only works when not specifying the signature. This change makes the test fail in 0.23.1: ``` diff diff --git a/numba/tests/test_cffi.py b/numba/tests/test_cffi.py index ca304f9..fa1752d 100644 --- a/numba/tests/test_cffi.py +++ b/numba/tests/test_cffi.py @@ -82,7 +82,9 @@ class TestCFFI(TestCase): np.testing.assert_equal(pyfunc(x), cfunc(x)) def test_pass_numpy_array_float32(self): - self._test_pass_numpy_array(vector_sin_float32, np.float32) + x = np.arange(10).astype(np.float32) + cfunc = jit('float32[:](float32[:])', nopython=True)(vector_sin_float32) + np.testing.assert_equal(vector_sin_float32(x), cfunc(x)) def test_pass_numpy_array_float64(self): self._test_pass_numpy_array(vector_sin_float64, np.float64) ``` I was not able to try it with latest numba since it requires llvmlite 0.9.0 and it's not available in conda yet. The workaround for me has been to provide a separate function without signature, but it's a pity because it is preventing me to provide different specializations for CFFI functions (see https://github.com/Pybonacci/cffi_test/). </issue> <code> [start of numba/typing/cffi_utils.py] 1 # -*- coding: utf-8 -*- 2 """ 3 Support for CFFI. Allows checking whether objects are CFFI functions and 4 obtaining the pointer and numba signature. 5 """ 6 from __future__ import print_function, division, absolute_import 7 8 from types import BuiltinFunctionType 9 import ctypes 10 11 from numba import types 12 from . import templates 13 14 try: 15 import cffi 16 ffi = cffi.FFI() 17 except ImportError: 18 ffi = None 19 20 SUPPORTED = ffi is not None 21 _ool_func_types = {} 22 _ool_func_ptr = {} 23 _ffi_instances = set() 24 25 26 def is_ffi_instance(obj): 27 # Compiled FFI modules have a member, ffi, which is an instance of 28 # CompiledFFI, which behaves similarly to an instance of cffi.FFI. In 29 # order to simplify handling a CompiledFFI object, we treat them as 30 # if they're cffi.FFI instances for typing and lowering purposes. 31 try: 32 return obj in _ffi_instances or isinstance(obj, cffi.FFI) 33 except TypeError: # Unhashable type possible 34 return False 35 36 def is_cffi_func(obj): 37 """Check whether the obj is a CFFI function""" 38 try: 39 return ffi.typeof(obj).kind == 'function' 40 except TypeError: 41 try: 42 return obj in _ool_func_types 43 except: 44 return False 45 46 def get_pointer(cffi_func): 47 """ 48 Get a pointer to the underlying function for a CFFI function as an 49 integer. 50 """ 51 if cffi_func in _ool_func_ptr: 52 return _ool_func_ptr[cffi_func] 53 return int(ffi.cast("uintptr_t", cffi_func)) 54 55 56 _cached_type_map = None 57 58 def _type_map(): 59 """ 60 Lazily compute type map, as calling ffi.typeof() involves costly 61 parsing of C code... 62 """ 63 global _cached_type_map 64 if _cached_type_map is None: 65 _cached_type_map = { 66 ffi.typeof('char') : types.int8, 67 ffi.typeof('short') : types.short, 68 ffi.typeof('int') : types.intc, 69 ffi.typeof('long') : types.long_, 70 ffi.typeof('long long') : types.longlong, 71 ffi.typeof('unsigned char') : types.uchar, 72 ffi.typeof('unsigned short') : types.ushort, 73 ffi.typeof('unsigned int') : types.uintc, 74 ffi.typeof('unsigned long') : types.ulong, 75 ffi.typeof('unsigned long long') : types.ulonglong, 76 ffi.typeof('int8_t') : types.char, 77 ffi.typeof('uint8_t') : types.uchar, 78 ffi.typeof('int16_t') : types.short, 79 ffi.typeof('uint16_t') : types.ushort, 80 ffi.typeof('int32_t') : types.intc, 81 ffi.typeof('uint32_t') : types.uintc, 82 ffi.typeof('int64_t') : types.longlong, 83 ffi.typeof('uint64_t') : types.ulonglong, 84 ffi.typeof('float') : types.float_, 85 ffi.typeof('double') : types.double, 86 ffi.typeof('char *') : types.voidptr, 87 ffi.typeof('void *') : types.voidptr, 88 ffi.typeof('uint8_t *') : types.CPointer(types.uint8), 89 ffi.typeof('float *') : types.CPointer(types.float32), 90 ffi.typeof('double *') : types.CPointer(types.float64), 91 ffi.typeof('ssize_t') : types.intp, 92 ffi.typeof('size_t') : types.uintp, 93 ffi.typeof('void') : types.void, 94 } 95 return _cached_type_map 96 97 98 def map_type(cffi_type): 99 """ 100 Map CFFI type to numba type. 101 """ 102 kind = getattr(cffi_type, 'kind', '') 103 if kind in ('struct', 'union'): 104 raise TypeError("No support for struct or union") 105 elif kind == 'function': 106 if cffi_type.ellipsis: 107 raise TypeError("vararg function is not supported") 108 restype = map_type(cffi_type.result) 109 argtypes = [map_type(arg) for arg in cffi_type.args] 110 return templates.signature(restype, *argtypes) 111 else: 112 result = _type_map().get(cffi_type) 113 114 if result is None: 115 raise TypeError(cffi_type) 116 117 return result 118 119 120 def make_function_type(cffi_func): 121 """ 122 Return a Numba type for the given CFFI function pointer. 123 """ 124 cffi_type = _ool_func_types.get(cffi_func) or ffi.typeof(cffi_func) 125 sig = map_type(cffi_type) 126 return types.ExternalFunctionPointer(sig, get_pointer=get_pointer) 127 128 129 class ExternCFunction(types.ExternalFunction): 130 # XXX unused? 131 132 def __init__(self, symbol, cstring): 133 """Parse C function declaration/signature""" 134 parser = cffi.cparser.Parser() 135 rft = parser.parse_type(cstring) # "RawFunctionType" 136 type_map = _type_map() 137 self.restype = type_map[rft.result.build_backend_type(ffi, None)] 138 self.argtypes = [type_map[arg.build_backend_type(ffi, None)] for arg in rft.args] 139 signature = templates.signature(self.restype, *self.argtypes) 140 super(ExternCFunction, self).__init__(symbol, signature) 141 142 143 registry = templates.Registry() 144 145 @registry.register 146 class FFI_from_buffer(templates.AbstractTemplate): 147 key = 'ffi.from_buffer' 148 149 def generic(self, args, kws): 150 if kws or (len(args) != 1): 151 return 152 [ary] = args 153 if not (isinstance(ary, types.Array) and ary.layout in ('C', 'F')): 154 return 155 ptr = types.CPointer(ary.dtype) 156 return templates.signature(ptr, ary) 157 158 @registry.register_attr 159 class FFIAttribute(templates.AttributeTemplate): 160 key = types.ffi 161 162 def resolve_from_buffer(self, ffi): 163 return types.BoundFunction(FFI_from_buffer, types.ffi) 164 165 166 def register_module(mod): 167 """ 168 Add typing for all functions in an out-of-line CFFI module to the typemap 169 """ 170 for f in dir(mod.lib): 171 f = getattr(mod.lib, f) 172 if isinstance(f, BuiltinFunctionType): 173 _ool_func_types[f] = mod.ffi.typeof(f) 174 addr = mod.ffi.addressof(mod.lib, f.__name__) 175 _ool_func_ptr[f] = int(mod.ffi.cast("uintptr_t", addr)) 176 _ffi_instances.add(mod.ffi) 177 [end of numba/typing/cffi_utils.py] [start of numba/targets/cffiimpl.py] 1 """ 2 Implementation of some CFFI functions 3 """ 4 5 from __future__ import print_function, absolute_import, division 6 7 from numba.targets.imputils import Registry 8 from numba import types 9 from . import arrayobj 10 11 registry = Registry() 12 13 @registry.lower('ffi.from_buffer', types.Array) 14 def from_buffer(context, builder, sig, args): 15 assert len(sig.args) == 1 16 assert len(args) == 1 17 [fromty] = sig.args 18 [val] = args 19 # Type inference should have prevented passing a buffer from an 20 # array to a pointer of the wrong type 21 assert fromty.dtype == sig.return_type.dtype 22 ary = arrayobj.make_array(fromty)(context, builder, val) 23 return ary.data 24 [end of numba/targets/cffiimpl.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/numba/targets/cffiimpl.py b/numba/targets/cffiimpl.py --- a/numba/targets/cffiimpl.py +++ b/numba/targets/cffiimpl.py @@ -10,7 +10,7 @@ registry = Registry() [email protected]('ffi.from_buffer', types.Array) [email protected]('ffi.from_buffer', types.Buffer) def from_buffer(context, builder, sig, args): assert len(sig.args) == 1 assert len(args) == 1 diff --git a/numba/typing/cffi_utils.py b/numba/typing/cffi_utils.py --- a/numba/typing/cffi_utils.py +++ b/numba/typing/cffi_utils.py @@ -9,6 +9,7 @@ import ctypes from numba import types +from numba.errors import TypingError from . import templates try: @@ -63,7 +64,7 @@ global _cached_type_map if _cached_type_map is None: _cached_type_map = { - ffi.typeof('char') : types.int8, + ffi.typeof('char') : types.char, ffi.typeof('short') : types.short, ffi.typeof('int') : types.intc, ffi.typeof('long') : types.long_, @@ -83,11 +84,6 @@ ffi.typeof('uint64_t') : types.ulonglong, ffi.typeof('float') : types.float_, ffi.typeof('double') : types.double, - ffi.typeof('char *') : types.voidptr, - ffi.typeof('void *') : types.voidptr, - ffi.typeof('uint8_t *') : types.CPointer(types.uint8), - ffi.typeof('float *') : types.CPointer(types.float32), - ffi.typeof('double *') : types.CPointer(types.float64), ffi.typeof('ssize_t') : types.intp, ffi.typeof('size_t') : types.uintp, ffi.typeof('void') : types.void, @@ -101,20 +97,24 @@ """ kind = getattr(cffi_type, 'kind', '') if kind in ('struct', 'union'): - raise TypeError("No support for struct or union") + raise TypeError("No support for CFFI %s" % (kind,)) elif kind == 'function': if cffi_type.ellipsis: raise TypeError("vararg function is not supported") restype = map_type(cffi_type.result) argtypes = [map_type(arg) for arg in cffi_type.args] return templates.signature(restype, *argtypes) + elif kind == 'pointer': + pointee = cffi_type.item + if pointee.kind == 'void': + return types.voidptr + else: + return types.CPointer(map_type(pointee)) else: result = _type_map().get(cffi_type) - - if result is None: - raise TypeError(cffi_type) - - return result + if result is None: + raise TypeError(cffi_type) + return result def make_function_type(cffi_func): @@ -147,11 +147,15 @@ key = 'ffi.from_buffer' def generic(self, args, kws): - if kws or (len(args) != 1): + if kws or len(args) != 1: return [ary] = args - if not (isinstance(ary, types.Array) and ary.layout in ('C', 'F')): - return + if not isinstance(ary, types.Buffer): + raise TypingError("from_buffer() expected a buffer object, got %s" + % (ary,)) + if ary.layout not in ('C', 'F'): + raise TypingError("from_buffer() unsupported on non-contiguous buffers (got %s)" + % (ary,)) ptr = types.CPointer(ary.dtype) return templates.signature(ptr, ary)
{"golden_diff": "diff --git a/numba/targets/cffiimpl.py b/numba/targets/cffiimpl.py\n--- a/numba/targets/cffiimpl.py\n+++ b/numba/targets/cffiimpl.py\n@@ -10,7 +10,7 @@\n \n registry = Registry()\n \[email protected]('ffi.from_buffer', types.Array)\[email protected]('ffi.from_buffer', types.Buffer)\n def from_buffer(context, builder, sig, args):\n assert len(sig.args) == 1\n assert len(args) == 1\ndiff --git a/numba/typing/cffi_utils.py b/numba/typing/cffi_utils.py\n--- a/numba/typing/cffi_utils.py\n+++ b/numba/typing/cffi_utils.py\n@@ -9,6 +9,7 @@\n import ctypes\n \n from numba import types\n+from numba.errors import TypingError\n from . import templates\n \n try:\n@@ -63,7 +64,7 @@\n global _cached_type_map\n if _cached_type_map is None:\n _cached_type_map = {\n- ffi.typeof('char') : types.int8,\n+ ffi.typeof('char') : types.char,\n ffi.typeof('short') : types.short,\n ffi.typeof('int') : types.intc,\n ffi.typeof('long') : types.long_,\n@@ -83,11 +84,6 @@\n ffi.typeof('uint64_t') : types.ulonglong,\n ffi.typeof('float') : types.float_,\n ffi.typeof('double') : types.double,\n- ffi.typeof('char *') : types.voidptr,\n- ffi.typeof('void *') : types.voidptr,\n- ffi.typeof('uint8_t *') : types.CPointer(types.uint8),\n- ffi.typeof('float *') : types.CPointer(types.float32),\n- ffi.typeof('double *') : types.CPointer(types.float64),\n ffi.typeof('ssize_t') : types.intp,\n ffi.typeof('size_t') : types.uintp,\n ffi.typeof('void') : types.void,\n@@ -101,20 +97,24 @@\n \"\"\"\n kind = getattr(cffi_type, 'kind', '')\n if kind in ('struct', 'union'):\n- raise TypeError(\"No support for struct or union\")\n+ raise TypeError(\"No support for CFFI %s\" % (kind,))\n elif kind == 'function':\n if cffi_type.ellipsis:\n raise TypeError(\"vararg function is not supported\")\n restype = map_type(cffi_type.result)\n argtypes = [map_type(arg) for arg in cffi_type.args]\n return templates.signature(restype, *argtypes)\n+ elif kind == 'pointer':\n+ pointee = cffi_type.item\n+ if pointee.kind == 'void':\n+ return types.voidptr\n+ else:\n+ return types.CPointer(map_type(pointee))\n else:\n result = _type_map().get(cffi_type)\n-\n- if result is None:\n- raise TypeError(cffi_type)\n-\n- return result\n+ if result is None:\n+ raise TypeError(cffi_type)\n+ return result\n \n \n def make_function_type(cffi_func):\n@@ -147,11 +147,15 @@\n key = 'ffi.from_buffer'\n \n def generic(self, args, kws):\n- if kws or (len(args) != 1):\n+ if kws or len(args) != 1:\n return\n [ary] = args\n- if not (isinstance(ary, types.Array) and ary.layout in ('C', 'F')):\n- return\n+ if not isinstance(ary, types.Buffer):\n+ raise TypingError(\"from_buffer() expected a buffer object, got %s\"\n+ % (ary,))\n+ if ary.layout not in ('C', 'F'):\n+ raise TypingError(\"from_buffer() unsupported on non-contiguous buffers (got %s)\"\n+ % (ary,))\n ptr = types.CPointer(ary.dtype)\n return templates.signature(ptr, ary)\n", "issue": "Cannot use cffi.from_buffer when explicitly passing signature to JIT\nI was experimenting with passing arrays to CFFI functions (see https://github.com/numba/numba/pull/1464) but found that it only works when not specifying the signature. This change makes the test fail in 0.23.1:\n\n``` diff\ndiff --git a/numba/tests/test_cffi.py b/numba/tests/test_cffi.py\nindex ca304f9..fa1752d 100644\n--- a/numba/tests/test_cffi.py\n+++ b/numba/tests/test_cffi.py\n@@ -82,7 +82,9 @@ class TestCFFI(TestCase):\n np.testing.assert_equal(pyfunc(x), cfunc(x))\n\n def test_pass_numpy_array_float32(self):\n- self._test_pass_numpy_array(vector_sin_float32, np.float32)\n+ x = np.arange(10).astype(np.float32)\n+ cfunc = jit('float32[:](float32[:])', nopython=True)(vector_sin_float32)\n+ np.testing.assert_equal(vector_sin_float32(x), cfunc(x))\n\n def test_pass_numpy_array_float64(self):\n self._test_pass_numpy_array(vector_sin_float64, np.float64)\n```\n\nI was not able to try it with latest numba since it requires llvmlite 0.9.0 and it's not available in conda yet.\n\nThe workaround for me has been to provide a separate function without signature, but it's a pity because it is preventing me to provide different specializations for CFFI functions (see https://github.com/Pybonacci/cffi_test/).\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nSupport for CFFI. Allows checking whether objects are CFFI functions and\nobtaining the pointer and numba signature.\n\"\"\"\nfrom __future__ import print_function, division, absolute_import\n\nfrom types import BuiltinFunctionType\nimport ctypes\n\nfrom numba import types\nfrom . import templates\n\ntry:\n import cffi\n ffi = cffi.FFI()\nexcept ImportError:\n ffi = None\n\nSUPPORTED = ffi is not None\n_ool_func_types = {}\n_ool_func_ptr = {}\n_ffi_instances = set()\n\n\ndef is_ffi_instance(obj):\n # Compiled FFI modules have a member, ffi, which is an instance of\n # CompiledFFI, which behaves similarly to an instance of cffi.FFI. In\n # order to simplify handling a CompiledFFI object, we treat them as\n # if they're cffi.FFI instances for typing and lowering purposes.\n try:\n return obj in _ffi_instances or isinstance(obj, cffi.FFI)\n except TypeError: # Unhashable type possible\n return False\n\ndef is_cffi_func(obj):\n \"\"\"Check whether the obj is a CFFI function\"\"\"\n try:\n return ffi.typeof(obj).kind == 'function'\n except TypeError:\n try:\n return obj in _ool_func_types\n except:\n return False\n\ndef get_pointer(cffi_func):\n \"\"\"\n Get a pointer to the underlying function for a CFFI function as an\n integer.\n \"\"\"\n if cffi_func in _ool_func_ptr:\n return _ool_func_ptr[cffi_func]\n return int(ffi.cast(\"uintptr_t\", cffi_func))\n\n\n_cached_type_map = None\n\ndef _type_map():\n \"\"\"\n Lazily compute type map, as calling ffi.typeof() involves costly\n parsing of C code...\n \"\"\"\n global _cached_type_map\n if _cached_type_map is None:\n _cached_type_map = {\n ffi.typeof('char') : types.int8,\n ffi.typeof('short') : types.short,\n ffi.typeof('int') : types.intc,\n ffi.typeof('long') : types.long_,\n ffi.typeof('long long') : types.longlong,\n ffi.typeof('unsigned char') : types.uchar,\n ffi.typeof('unsigned short') : types.ushort,\n ffi.typeof('unsigned int') : types.uintc,\n ffi.typeof('unsigned long') : types.ulong,\n ffi.typeof('unsigned long long') : types.ulonglong,\n ffi.typeof('int8_t') : types.char,\n ffi.typeof('uint8_t') : types.uchar,\n ffi.typeof('int16_t') : types.short,\n ffi.typeof('uint16_t') : types.ushort,\n ffi.typeof('int32_t') : types.intc,\n ffi.typeof('uint32_t') : types.uintc,\n ffi.typeof('int64_t') : types.longlong,\n ffi.typeof('uint64_t') : types.ulonglong,\n ffi.typeof('float') : types.float_,\n ffi.typeof('double') : types.double,\n ffi.typeof('char *') : types.voidptr,\n ffi.typeof('void *') : types.voidptr,\n ffi.typeof('uint8_t *') : types.CPointer(types.uint8),\n ffi.typeof('float *') : types.CPointer(types.float32),\n ffi.typeof('double *') : types.CPointer(types.float64),\n ffi.typeof('ssize_t') : types.intp,\n ffi.typeof('size_t') : types.uintp,\n ffi.typeof('void') : types.void,\n }\n return _cached_type_map\n\n\ndef map_type(cffi_type):\n \"\"\"\n Map CFFI type to numba type.\n \"\"\"\n kind = getattr(cffi_type, 'kind', '')\n if kind in ('struct', 'union'):\n raise TypeError(\"No support for struct or union\")\n elif kind == 'function':\n if cffi_type.ellipsis:\n raise TypeError(\"vararg function is not supported\")\n restype = map_type(cffi_type.result)\n argtypes = [map_type(arg) for arg in cffi_type.args]\n return templates.signature(restype, *argtypes)\n else:\n result = _type_map().get(cffi_type)\n\n if result is None:\n raise TypeError(cffi_type)\n\n return result\n\n\ndef make_function_type(cffi_func):\n \"\"\"\n Return a Numba type for the given CFFI function pointer.\n \"\"\"\n cffi_type = _ool_func_types.get(cffi_func) or ffi.typeof(cffi_func)\n sig = map_type(cffi_type)\n return types.ExternalFunctionPointer(sig, get_pointer=get_pointer)\n\n\nclass ExternCFunction(types.ExternalFunction):\n # XXX unused?\n\n def __init__(self, symbol, cstring):\n \"\"\"Parse C function declaration/signature\"\"\"\n parser = cffi.cparser.Parser()\n rft = parser.parse_type(cstring) # \"RawFunctionType\"\n type_map = _type_map()\n self.restype = type_map[rft.result.build_backend_type(ffi, None)]\n self.argtypes = [type_map[arg.build_backend_type(ffi, None)] for arg in rft.args]\n signature = templates.signature(self.restype, *self.argtypes)\n super(ExternCFunction, self).__init__(symbol, signature)\n\n\nregistry = templates.Registry()\n\[email protected]\nclass FFI_from_buffer(templates.AbstractTemplate):\n key = 'ffi.from_buffer'\n\n def generic(self, args, kws):\n if kws or (len(args) != 1):\n return\n [ary] = args\n if not (isinstance(ary, types.Array) and ary.layout in ('C', 'F')):\n return\n ptr = types.CPointer(ary.dtype)\n return templates.signature(ptr, ary)\n\[email protected]_attr\nclass FFIAttribute(templates.AttributeTemplate):\n key = types.ffi\n\n def resolve_from_buffer(self, ffi):\n return types.BoundFunction(FFI_from_buffer, types.ffi)\n\n\ndef register_module(mod):\n \"\"\"\n Add typing for all functions in an out-of-line CFFI module to the typemap\n \"\"\"\n for f in dir(mod.lib):\n f = getattr(mod.lib, f)\n if isinstance(f, BuiltinFunctionType):\n _ool_func_types[f] = mod.ffi.typeof(f)\n addr = mod.ffi.addressof(mod.lib, f.__name__)\n _ool_func_ptr[f] = int(mod.ffi.cast(\"uintptr_t\", addr))\n _ffi_instances.add(mod.ffi)\n", "path": "numba/typing/cffi_utils.py"}, {"content": "\"\"\"\nImplementation of some CFFI functions\n\"\"\"\n\nfrom __future__ import print_function, absolute_import, division\n\nfrom numba.targets.imputils import Registry\nfrom numba import types\nfrom . import arrayobj\n\nregistry = Registry()\n\[email protected]('ffi.from_buffer', types.Array)\ndef from_buffer(context, builder, sig, args):\n assert len(sig.args) == 1\n assert len(args) == 1\n [fromty] = sig.args\n [val] = args\n # Type inference should have prevented passing a buffer from an\n # array to a pointer of the wrong type\n assert fromty.dtype == sig.return_type.dtype\n ary = arrayobj.make_array(fromty)(context, builder, val)\n return ary.data\n", "path": "numba/targets/cffiimpl.py"}]}
3,061
932
gh_patches_debug_63274
rasdani/github-patches
git_diff
Mailu__Mailu-2603
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Maximum number of connections from user+IP exceeded Hi, we have a problem... :-) We have changed the original value of "AUTH_RATELIMIT" to "AUTH_RATELIMIT=100/minute;6000/hour", but logs continue to say " Maximum number of connections from user+IP exceeded (mail_max_userip_connections=20)" while reading response from upstream..." We have made docker-compose dow and docker-compose up -d, but without result. How can we change the default limit set during the installation? Thanks in advance. </issue> <code> [start of core/admin/mailu/internal/views/dovecot.py] 1 from mailu import models 2 from mailu.internal import internal 3 from flask import current_app as app 4 5 import flask 6 import socket 7 import os 8 import sqlalchemy.exc 9 10 @internal.route("/dovecot/passdb/<path:user_email>") 11 def dovecot_passdb_dict(user_email): 12 user = models.User.query.get(user_email) or flask.abort(404) 13 allow_nets = [] 14 allow_nets.append(app.config["SUBNET"]) 15 if app.config["SUBNET6"]: 16 allow_nets.append(app.config["SUBNET6"]) 17 return flask.jsonify({ 18 "password": None, 19 "nopassword": "Y", 20 "allow_nets": ",".join(allow_nets) 21 }) 22 23 @internal.route("/dovecot/userdb/") 24 def dovecot_userdb_dict_list(): 25 return flask.jsonify([ 26 user[0] for user in models.User.query.filter(models.User.enabled.is_(True)).with_entities(models.User.email).all() 27 ]) 28 29 @internal.route("/dovecot/userdb/<path:user_email>") 30 def dovecot_userdb_dict(user_email): 31 try: 32 quota = models.User.query.filter(models.User.email==user_email).with_entities(models.User.quota_bytes).one_or_none() or flask.abort(404) 33 except sqlalchemy.exc.StatementError as exc: 34 flask.abort(404) 35 return flask.jsonify({ 36 "quota_rule": f"*:bytes={quota[0]}" 37 }) 38 39 40 @internal.route("/dovecot/quota/<ns>/<path:user_email>", methods=["POST"]) 41 def dovecot_quota(ns, user_email): 42 user = models.User.query.get(user_email) or flask.abort(404) 43 if ns == "storage": 44 user.quota_bytes_used = flask.request.get_json() 45 user.dont_change_updated_at() 46 models.db.session.commit() 47 return flask.jsonify(None) 48 49 50 @internal.route("/dovecot/sieve/name/<script>/<path:user_email>") 51 def dovecot_sieve_name(script, user_email): 52 return flask.jsonify(script) 53 54 55 @internal.route("/dovecot/sieve/data/default/<path:user_email>") 56 def dovecot_sieve_data(user_email): 57 user = models.User.query.get(user_email) or flask.abort(404) 58 return flask.jsonify(flask.render_template("default.sieve", user=user)) 59 [end of core/admin/mailu/internal/views/dovecot.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/core/admin/mailu/internal/views/dovecot.py b/core/admin/mailu/internal/views/dovecot.py --- a/core/admin/mailu/internal/views/dovecot.py +++ b/core/admin/mailu/internal/views/dovecot.py @@ -17,7 +17,7 @@ return flask.jsonify({ "password": None, "nopassword": "Y", - "allow_nets": ",".join(allow_nets) + "allow_real_nets": ",".join(allow_nets) }) @internal.route("/dovecot/userdb/")
{"golden_diff": "diff --git a/core/admin/mailu/internal/views/dovecot.py b/core/admin/mailu/internal/views/dovecot.py\n--- a/core/admin/mailu/internal/views/dovecot.py\n+++ b/core/admin/mailu/internal/views/dovecot.py\n@@ -17,7 +17,7 @@\n return flask.jsonify({\n \"password\": None,\n \"nopassword\": \"Y\",\n- \"allow_nets\": \",\".join(allow_nets)\n+ \"allow_real_nets\": \",\".join(allow_nets)\n })\n \n @internal.route(\"/dovecot/userdb/\")\n", "issue": "Maximum number of connections from user+IP exceeded \nHi, we have a problem... :-)\r\nWe have changed the original value of \"AUTH_RATELIMIT\" to \"AUTH_RATELIMIT=100/minute;6000/hour\", but logs continue to say \" Maximum number of connections from user+IP exceeded (mail_max_userip_connections=20)\" while reading response from upstream...\"\r\nWe have made docker-compose dow and docker-compose up -d, but without result.\r\nHow can we change the default limit set during the installation?\r\nThanks in advance.\n", "before_files": [{"content": "from mailu import models\nfrom mailu.internal import internal\nfrom flask import current_app as app\n\nimport flask\nimport socket\nimport os\nimport sqlalchemy.exc\n\[email protected](\"/dovecot/passdb/<path:user_email>\")\ndef dovecot_passdb_dict(user_email):\n user = models.User.query.get(user_email) or flask.abort(404)\n allow_nets = []\n allow_nets.append(app.config[\"SUBNET\"])\n if app.config[\"SUBNET6\"]:\n allow_nets.append(app.config[\"SUBNET6\"])\n return flask.jsonify({\n \"password\": None,\n \"nopassword\": \"Y\",\n \"allow_nets\": \",\".join(allow_nets)\n })\n\[email protected](\"/dovecot/userdb/\")\ndef dovecot_userdb_dict_list():\n return flask.jsonify([\n user[0] for user in models.User.query.filter(models.User.enabled.is_(True)).with_entities(models.User.email).all()\n ])\n\[email protected](\"/dovecot/userdb/<path:user_email>\")\ndef dovecot_userdb_dict(user_email):\n try:\n quota = models.User.query.filter(models.User.email==user_email).with_entities(models.User.quota_bytes).one_or_none() or flask.abort(404)\n except sqlalchemy.exc.StatementError as exc:\n flask.abort(404)\n return flask.jsonify({\n \"quota_rule\": f\"*:bytes={quota[0]}\"\n })\n\n\[email protected](\"/dovecot/quota/<ns>/<path:user_email>\", methods=[\"POST\"])\ndef dovecot_quota(ns, user_email):\n user = models.User.query.get(user_email) or flask.abort(404)\n if ns == \"storage\":\n user.quota_bytes_used = flask.request.get_json()\n user.dont_change_updated_at()\n models.db.session.commit()\n return flask.jsonify(None)\n\n\[email protected](\"/dovecot/sieve/name/<script>/<path:user_email>\")\ndef dovecot_sieve_name(script, user_email):\n return flask.jsonify(script)\n\n\[email protected](\"/dovecot/sieve/data/default/<path:user_email>\")\ndef dovecot_sieve_data(user_email):\n user = models.User.query.get(user_email) or flask.abort(404)\n return flask.jsonify(flask.render_template(\"default.sieve\", user=user))\n", "path": "core/admin/mailu/internal/views/dovecot.py"}]}
1,268
128
gh_patches_debug_20106
rasdani/github-patches
git_diff
microsoft__torchgeo-93
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Jupyter Notebook tutorials We need to figure out how to render Jupyter Notebooks in our documentation so that we can provide easy-to-use tutorials for new users. This should work similarly to https://pytorch.org/tutorials/. Ideally I would like to be able to test these tutorials so that they stay up-to-date. </issue> <code> [start of docs/conf.py] 1 # Configuration file for the Sphinx documentation builder. 2 # 3 # This file only contains a selection of the most common options. For a full 4 # list see the documentation: 5 # https://www.sphinx-doc.org/en/master/usage/configuration.html 6 7 # -- Path setup -------------------------------------------------------------- 8 9 import os 10 import sys 11 12 import pytorch_sphinx_theme 13 14 # If extensions (or modules to document with autodoc) are in another directory, 15 # add these directories to sys.path here. If the directory is relative to the 16 # documentation root, use os.path.abspath to make it absolute, like shown here. 17 sys.path.insert(0, os.path.abspath("..")) 18 19 import torchgeo # noqa: E402 20 21 # -- Project information ----------------------------------------------------- 22 23 project = "torchgeo" 24 copyright = "2021, Microsoft Corporation" 25 author = "Adam J. Stewart" 26 version = ".".join(torchgeo.__version__.split(".")[:2]) 27 release = torchgeo.__version__ 28 29 30 # -- General configuration --------------------------------------------------- 31 32 # Add any Sphinx extension module names here, as strings. They can be 33 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 34 # ones. 35 extensions = [ 36 "sphinx.ext.autodoc", 37 "sphinx.ext.autosectionlabel", 38 "sphinx.ext.intersphinx", 39 "sphinx.ext.napoleon", 40 "sphinx.ext.todo", 41 "sphinx.ext.viewcode", 42 ] 43 44 # List of patterns, relative to source directory, that match files and 45 # directories to ignore when looking for source files. 46 # This pattern also affects html_static_path and html_extra_path. 47 exclude_patterns = ["_build"] 48 49 # Sphinx 3.0+ required for: 50 # autodoc_typehints = "description" 51 needs_sphinx = "3.0" 52 53 nitpicky = True 54 nitpick_ignore = [ 55 # https://github.com/sphinx-doc/sphinx/issues/8127 56 ("py:class", ".."), 57 # TODO: can't figure out why this isn't found 58 ("py:class", "LightningDataModule"), 59 ] 60 61 62 # -- Options for HTML output ------------------------------------------------- 63 64 # The theme to use for HTML and HTML Help pages. See the documentation for 65 # a list of builtin themes. 66 html_theme = "pytorch_sphinx_theme" 67 html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()] 68 69 # Theme options are theme-specific and customize the look and feel of a theme 70 # further. For a list of options available for each theme, see the 71 # documentation. 72 html_theme_options = { 73 "collapse_navigation": False, 74 "display_version": True, 75 "logo_only": True, 76 "pytorch_project": "docs", 77 "navigation_with_keys": True, 78 "analytics_id": "UA-117752657-2", 79 } 80 81 # -- Extension configuration ------------------------------------------------- 82 83 # sphinx.ext.autodoc 84 autodoc_default_options = { 85 "members": True, 86 "special-members": True, 87 "show-inheritance": True, 88 } 89 autodoc_member_order = "bysource" 90 autodoc_typehints = "description" 91 92 # sphinx.ext.intersphinx 93 intersphinx_mapping = { 94 "python": ("https://docs.python.org/3", None), 95 "pytorch-lightning": ("https://pytorch-lightning.readthedocs.io/en/latest/", None), 96 "rasterio": ("https://rasterio.readthedocs.io/en/latest/", None), 97 "rtree": ("https://rtree.readthedocs.io/en/latest/", None), 98 "torch": ("https://pytorch.org/docs/stable", None), 99 } 100 [end of docs/conf.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/docs/conf.py b/docs/conf.py --- a/docs/conf.py +++ b/docs/conf.py @@ -34,11 +34,11 @@ # ones. extensions = [ "sphinx.ext.autodoc", - "sphinx.ext.autosectionlabel", "sphinx.ext.intersphinx", "sphinx.ext.napoleon", "sphinx.ext.todo", "sphinx.ext.viewcode", + "nbsphinx", ] # List of patterns, relative to source directory, that match files and @@ -97,3 +97,17 @@ "rtree": ("https://rtree.readthedocs.io/en/latest/", None), "torch": ("https://pytorch.org/docs/stable", None), } + +# nbsphinx +nbsphinx_execute = "never" +# TODO: branch/tag should change depending on which version of docs you look at +# TODO: :width: may be broken +nbsphinx_prolog = """ +{% set colab = "https://colab.research.google.com" %} +{% set repo = "microsoft/torchgeo" %} +{% set branch = "main" %} + +.. image:: {{ colab }}/assets/colab-badge.svg + :alt: Open in Colab + :target: {{ colab }}/github/{{ repo }}/blob/{{ branch }}/docs/{{ env.docname }} +"""
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -34,11 +34,11 @@\n # ones.\n extensions = [\n \"sphinx.ext.autodoc\",\n- \"sphinx.ext.autosectionlabel\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.viewcode\",\n+ \"nbsphinx\",\n ]\n \n # List of patterns, relative to source directory, that match files and\n@@ -97,3 +97,17 @@\n \"rtree\": (\"https://rtree.readthedocs.io/en/latest/\", None),\n \"torch\": (\"https://pytorch.org/docs/stable\", None),\n }\n+\n+# nbsphinx\n+nbsphinx_execute = \"never\"\n+# TODO: branch/tag should change depending on which version of docs you look at\n+# TODO: :width: may be broken\n+nbsphinx_prolog = \"\"\"\n+{% set colab = \"https://colab.research.google.com\" %}\n+{% set repo = \"microsoft/torchgeo\" %}\n+{% set branch = \"main\" %}\n+\n+.. image:: {{ colab }}/assets/colab-badge.svg\n+ :alt: Open in Colab\n+ :target: {{ colab }}/github/{{ repo }}/blob/{{ branch }}/docs/{{ env.docname }}\n+\"\"\"\n", "issue": "Jupyter Notebook tutorials\nWe need to figure out how to render Jupyter Notebooks in our documentation so that we can provide easy-to-use tutorials for new users. This should work similarly to https://pytorch.org/tutorials/.\r\n\r\nIdeally I would like to be able to test these tutorials so that they stay up-to-date.\n", "before_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\nimport os\nimport sys\n\nimport pytorch_sphinx_theme\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath(\"..\"))\n\nimport torchgeo # noqa: E402\n\n# -- Project information -----------------------------------------------------\n\nproject = \"torchgeo\"\ncopyright = \"2021, Microsoft Corporation\"\nauthor = \"Adam J. Stewart\"\nversion = \".\".join(torchgeo.__version__.split(\".\")[:2])\nrelease = torchgeo.__version__\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.autosectionlabel\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.viewcode\",\n]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\"_build\"]\n\n# Sphinx 3.0+ required for:\n# autodoc_typehints = \"description\"\nneeds_sphinx = \"3.0\"\n\nnitpicky = True\nnitpick_ignore = [\n # https://github.com/sphinx-doc/sphinx/issues/8127\n (\"py:class\", \"..\"),\n # TODO: can't figure out why this isn't found\n (\"py:class\", \"LightningDataModule\"),\n]\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = \"pytorch_sphinx_theme\"\nhtml_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\nhtml_theme_options = {\n \"collapse_navigation\": False,\n \"display_version\": True,\n \"logo_only\": True,\n \"pytorch_project\": \"docs\",\n \"navigation_with_keys\": True,\n \"analytics_id\": \"UA-117752657-2\",\n}\n\n# -- Extension configuration -------------------------------------------------\n\n# sphinx.ext.autodoc\nautodoc_default_options = {\n \"members\": True,\n \"special-members\": True,\n \"show-inheritance\": True,\n}\nautodoc_member_order = \"bysource\"\nautodoc_typehints = \"description\"\n\n# sphinx.ext.intersphinx\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3\", None),\n \"pytorch-lightning\": (\"https://pytorch-lightning.readthedocs.io/en/latest/\", None),\n \"rasterio\": (\"https://rasterio.readthedocs.io/en/latest/\", None),\n \"rtree\": (\"https://rtree.readthedocs.io/en/latest/\", None),\n \"torch\": (\"https://pytorch.org/docs/stable\", None),\n}\n", "path": "docs/conf.py"}]}
1,568
308
gh_patches_debug_32695
rasdani/github-patches
git_diff
conan-io__conan-center-index-3023
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [package] jbig/20160605: Fails to build on iOS <!-- Please don't forget to update the issue title. Include all applicable information to help us reproduce your problem. --> ### Package and Environment Details (include every applicable attribute) * Package Name/Version: **jbig/20160605** * Operating System+version: **iOS 11.0** * Compiler+version: **apple-clang 11.0** * Conan version: **conan 1.29.2** * Python version: **Python 3.8.5** ### Conan profile ``` [settings] arch=x86_64 arch_build=x86_64 build_type=Debug compiler=apple-clang compiler.cppstd=17 compiler.libcxx=libc++ compiler.version=11.0 os=iOS os.version=11.0 os_build=Macos [options] [build_requires] *: darwin-toolchain/1.0.8@theodelrieu/stable [env] ``` ### Steps to reproduce (Include if Applicable) `conan install jbig/20160605@ --profile ios --build=missing` ### Logs (Include/Attach if Applicable) <details><summary>Click to expand log</summary> ``` CMake Error at CMakeLists.txt:31 (install): install TARGETS given no BUNDLE DESTINATION for MACOSX_BUNDLE executable target "jbgtopbm". ``` </details> I would suggest adding an option that disables the `pbmtojbg` and `jbgtopbm` targets from being generated. The recipe could define individual `build_` options for each, which other packages do, or go with a more generically named option that enables/disables both. For reference, `sqlite3`, `bzip2`, and `spirv-cross` have a `build_executable` option, while `glslang` has a `build_executables` option. </issue> <code> [start of recipes/jbig/all/conanfile.py] 1 import os 2 import glob 3 from conans import ConanFile, CMake, tools 4 5 6 class ConanJBig(ConanFile): 7 name = "jbig" 8 url = "https://github.com/conan-io/conan-center-index" 9 homepage = "https://github.com/ImageMagick/jbig" 10 description = "jbig for the Windows build of ImageMagick" 11 topics = ("conan", "jbig", "imagemagick", "window", "graphic") 12 license = "GPL-2.0" 13 exports_sources = ['CMakeLists.txt', "*.patch"] 14 generators = 'cmake' 15 settings = "os", "arch", "compiler", "build_type" 16 options = {"shared": [True, False], "fPIC": [True, False]} 17 default_options = {"shared": False, "fPIC": True} 18 19 @property 20 def _source_subfolder(self): 21 return "source_subfolder" 22 23 @property 24 def _build_subfolder(self): 25 return "build_subfolder" 26 27 def source(self): 28 tools.get(**self.conan_data["sources"][self.version]) 29 for extracted_dir in glob.glob("jbig-*"): 30 os.rename(extracted_dir, self._source_subfolder) 31 32 def config_options(self): 33 if self.settings.os == "Windows": 34 del self.options.fPIC 35 36 def configure(self): 37 del self.settings.compiler.libcxx 38 del self.settings.compiler.cppstd 39 40 def _configure_cmake(self): 41 cmake = CMake(self) 42 cmake.configure(build_folder=self._build_subfolder) 43 return cmake 44 45 def build(self): 46 for patch in self.conan_data["patches"][self.version]: 47 tools.patch(**patch) 48 cmake = self._configure_cmake() 49 cmake.build() 50 51 def package(self): 52 self.copy(pattern="COPYING", dst="licenses", src=self._source_subfolder) 53 cmake = self._configure_cmake() 54 cmake.install() 55 56 def package_info(self): 57 bindir = os.path.join(self.package_folder, "bin") 58 self.output.info("Appending PATH environment variable: {}".format(bindir)) 59 self.env_info.PATH.append(bindir) 60 self.cpp_info.libs = [self.name] 61 if self.options.shared and self.settings.compiler == "Visual Studio": 62 self.cpp_info.defines = ["_JBIGDLL_"] 63 [end of recipes/jbig/all/conanfile.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/recipes/jbig/all/conanfile.py b/recipes/jbig/all/conanfile.py --- a/recipes/jbig/all/conanfile.py +++ b/recipes/jbig/all/conanfile.py @@ -13,8 +13,18 @@ exports_sources = ['CMakeLists.txt', "*.patch"] generators = 'cmake' settings = "os", "arch", "compiler", "build_type" - options = {"shared": [True, False], "fPIC": [True, False]} - default_options = {"shared": False, "fPIC": True} + options = { + "shared": [True, False], + "fPIC": [True, False], + "build_executables": [True, False] + } + default_options = { + "shared": False, + "fPIC": True, + "build_executables": True + } + + _cmake = None @property def _source_subfolder(self): @@ -38,9 +48,13 @@ del self.settings.compiler.cppstd def _configure_cmake(self): - cmake = CMake(self) - cmake.configure(build_folder=self._build_subfolder) - return cmake + if self._cmake: + return self._cmake + + self._cmake = CMake(self) + self._cmake.definitions["BUILD_EXECUTABLES"] = self.options.build_executables + self._cmake.configure(build_folder=self._build_subfolder) + return self._cmake def build(self): for patch in self.conan_data["patches"][self.version]: @@ -54,9 +68,11 @@ cmake.install() def package_info(self): - bindir = os.path.join(self.package_folder, "bin") - self.output.info("Appending PATH environment variable: {}".format(bindir)) - self.env_info.PATH.append(bindir) self.cpp_info.libs = [self.name] if self.options.shared and self.settings.compiler == "Visual Studio": self.cpp_info.defines = ["_JBIGDLL_"] + + if self.options.build_executables: + bin_path = os.path.join(self.package_folder, "bin") + self.output.info("Appending PATH environment variable: {}".format(bin_path)) + self.env_info.PATH.append(bin_path)
{"golden_diff": "diff --git a/recipes/jbig/all/conanfile.py b/recipes/jbig/all/conanfile.py\n--- a/recipes/jbig/all/conanfile.py\n+++ b/recipes/jbig/all/conanfile.py\n@@ -13,8 +13,18 @@\n exports_sources = ['CMakeLists.txt', \"*.patch\"]\n generators = 'cmake'\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n- options = {\"shared\": [True, False], \"fPIC\": [True, False]}\n- default_options = {\"shared\": False, \"fPIC\": True}\n+ options = {\n+ \"shared\": [True, False],\n+ \"fPIC\": [True, False],\n+ \"build_executables\": [True, False]\n+ }\n+ default_options = {\n+ \"shared\": False,\n+ \"fPIC\": True,\n+ \"build_executables\": True\n+ }\n+\n+ _cmake = None\n \n @property\n def _source_subfolder(self):\n@@ -38,9 +48,13 @@\n del self.settings.compiler.cppstd\n \n def _configure_cmake(self):\n- cmake = CMake(self)\n- cmake.configure(build_folder=self._build_subfolder)\n- return cmake\n+ if self._cmake:\n+ return self._cmake\n+\n+ self._cmake = CMake(self)\n+ self._cmake.definitions[\"BUILD_EXECUTABLES\"] = self.options.build_executables\n+ self._cmake.configure(build_folder=self._build_subfolder)\n+ return self._cmake\n \n def build(self):\n for patch in self.conan_data[\"patches\"][self.version]:\n@@ -54,9 +68,11 @@\n cmake.install()\n \n def package_info(self):\n- bindir = os.path.join(self.package_folder, \"bin\")\n- self.output.info(\"Appending PATH environment variable: {}\".format(bindir))\n- self.env_info.PATH.append(bindir)\n self.cpp_info.libs = [self.name]\n if self.options.shared and self.settings.compiler == \"Visual Studio\":\n self.cpp_info.defines = [\"_JBIGDLL_\"]\n+\n+ if self.options.build_executables:\n+ bin_path = os.path.join(self.package_folder, \"bin\")\n+ self.output.info(\"Appending PATH environment variable: {}\".format(bin_path))\n+ self.env_info.PATH.append(bin_path)\n", "issue": "[package] jbig/20160605: Fails to build on iOS\n<!-- \r\n Please don't forget to update the issue title.\r\n Include all applicable information to help us reproduce your problem.\r\n-->\r\n\r\n### Package and Environment Details (include every applicable attribute)\r\n * Package Name/Version: **jbig/20160605**\r\n * Operating System+version: **iOS 11.0**\r\n * Compiler+version: **apple-clang 11.0**\r\n * Conan version: **conan 1.29.2**\r\n * Python version: **Python 3.8.5**\r\n\r\n### Conan profile\r\n```\r\n[settings]\r\narch=x86_64\r\narch_build=x86_64\r\nbuild_type=Debug\r\ncompiler=apple-clang\r\ncompiler.cppstd=17\r\ncompiler.libcxx=libc++\r\ncompiler.version=11.0\r\nos=iOS\r\nos.version=11.0\r\nos_build=Macos\r\n[options]\r\n[build_requires]\r\n*: darwin-toolchain/1.0.8@theodelrieu/stable\r\n[env]\r\n```\r\n\r\n\r\n### Steps to reproduce (Include if Applicable)\r\n\r\n`conan install jbig/20160605@ --profile ios --build=missing`\r\n\r\n### Logs (Include/Attach if Applicable)\r\n<details><summary>Click to expand log</summary>\r\n\r\n```\r\nCMake Error at CMakeLists.txt:31 (install):\r\n install TARGETS given no BUNDLE DESTINATION for MACOSX_BUNDLE executable\r\n target \"jbgtopbm\".\r\n```\r\n\r\n</details>\r\n\r\nI would suggest adding an option that disables the `pbmtojbg` and `jbgtopbm` targets from being generated. The recipe could define individual `build_` options for each, which other packages do, or go with a more generically named option that enables/disables both. For reference, `sqlite3`, `bzip2`, and `spirv-cross` have a `build_executable` option, while `glslang` has a `build_executables` option. \n", "before_files": [{"content": "import os\nimport glob\nfrom conans import ConanFile, CMake, tools\n\n\nclass ConanJBig(ConanFile):\n name = \"jbig\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/ImageMagick/jbig\"\n description = \"jbig for the Windows build of ImageMagick\"\n topics = (\"conan\", \"jbig\", \"imagemagick\", \"window\", \"graphic\")\n license = \"GPL-2.0\"\n exports_sources = ['CMakeLists.txt', \"*.patch\"]\n generators = 'cmake'\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\"shared\": [True, False], \"fPIC\": [True, False]}\n default_options = {\"shared\": False, \"fPIC\": True}\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n @property\n def _build_subfolder(self):\n return \"build_subfolder\"\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n for extracted_dir in glob.glob(\"jbig-*\"):\n os.rename(extracted_dir, self._source_subfolder)\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n\n def _configure_cmake(self):\n cmake = CMake(self)\n cmake.configure(build_folder=self._build_subfolder)\n return cmake\n\n def build(self):\n for patch in self.conan_data[\"patches\"][self.version]:\n tools.patch(**patch)\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n self.copy(pattern=\"COPYING\", dst=\"licenses\", src=self._source_subfolder)\n cmake = self._configure_cmake()\n cmake.install()\n\n def package_info(self):\n bindir = os.path.join(self.package_folder, \"bin\")\n self.output.info(\"Appending PATH environment variable: {}\".format(bindir))\n self.env_info.PATH.append(bindir)\n self.cpp_info.libs = [self.name]\n if self.options.shared and self.settings.compiler == \"Visual Studio\":\n self.cpp_info.defines = [\"_JBIGDLL_\"]\n", "path": "recipes/jbig/all/conanfile.py"}]}
1,624
535
gh_patches_debug_33865
rasdani/github-patches
git_diff
cowrie__cowrie-1022
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Cowrie not set up for py.test framework So I tried running the test in both python2 and python3. For python2 all the tests were passing but for python3 there was some error. ``` py.test --cov=cowrie ===================================================================================== test session starts ===================================================================================== platform linux -- Python 3.7.2, pytest-4.2.0, py-1.7.0, pluggy-0.8.1 rootdir: /home/mzfr/dev/cowrie, inifile: plugins: cov-2.6.1 collected 3 items / 3 errors =========================================================================================== ERRORS ============================================================================================ ___________________________________________________________________ ERROR collecting src/cowrie/test/test_base_commands.py ____________________________________________________________________ ../shell/fs.py:26: in <module> PICKLE = pickle.load(open(CONFIG.get('shell', 'filesystem'), 'rb')) ../core/config.py:29: in get return super(EnvironmentConfigParser, self).get(section, option, **kwargs) /usr/lib/python3.7/configparser.py:780: in get d = self._unify_values(section, vars) /usr/lib/python3.7/configparser.py:1146: in _unify_values raise NoSectionError(section) from None E configparser.NoSectionError: No section: 'shell' During handling of the above exception, another exception occurred: test_base_commands.py:12: in <module> from cowrie.shell import protocol ../shell/protocol.py:21: in <module> from cowrie.shell import command ../shell/command.py:20: in <module> from cowrie.shell import fs ../shell/fs.py:29: in <module> exit(2) /usr/lib/python3.7/_sitebuiltins.py:26: in __call__ raise SystemExit(code) E SystemExit: 2 --------------------------------------------------------------------------------------- Captured stdout --------------------------------------------------------------------------------------- ERROR: Config file not found: etc/cowrie.cfg.dist ________________________________________________________________________ ERROR collecting src/cowrie/test/test_echo.py ________________________________________________________________________ ../shell/fs.py:26: in <module> PICKLE = pickle.load(open(CONFIG.get('shell', 'filesystem'), 'rb')) ../core/config.py:29: in get return super(EnvironmentConfigParser, self).get(section, option, **kwargs) /usr/lib/python3.7/configparser.py:780: in get d = self._unify_values(section, vars) /usr/lib/python3.7/configparser.py:1146: in _unify_values raise NoSectionError(section) from None E configparser.NoSectionError: No section: 'shell' During handling of the above exception, another exception occurred: test_echo.py:16: in <module> from cowrie.shell import protocol ../shell/protocol.py:21: in <module> from cowrie.shell import command ../shell/command.py:20: in <module> from cowrie.shell import fs ../shell/fs.py:29: in <module> exit(2) /usr/lib/python3.7/_sitebuiltins.py:26: in __call__ raise SystemExit(code) E SystemExit: 2 --------------------------------------------------------------------------------------- Captured stdout --------------------------------------------------------------------------------------- ERROR: Config file not found: etc/cowrie.cfg.dist ________________________________________________________________________ ERROR collecting src/cowrie/test/test_tftp.py ________________________________________________________________________ ../shell/fs.py:26: in <module> PICKLE = pickle.load(open(CONFIG.get('shell', 'filesystem'), 'rb')) ../core/config.py:29: in get return super(EnvironmentConfigParser, self).get(section, option, **kwargs) /usr/lib/python3.7/configparser.py:780: in get d = self._unify_values(section, vars) /usr/lib/python3.7/configparser.py:1146: in _unify_values raise NoSectionError(section) from None E configparser.NoSectionError: No section: 'shell' During handling of the above exception, another exception occurred: test_tftp.py:16: in <module> from cowrie.shell import protocol ../shell/protocol.py:21: in <module> from cowrie.shell import command ../shell/command.py:20: in <module> from cowrie.shell import fs ../shell/fs.py:29: in <module> exit(2) /usr/lib/python3.7/_sitebuiltins.py:26: in __call__ raise SystemExit(code) E SystemExit: 2 --------------------------------------------------------------------------------------- Captured stdout --------------------------------------------------------------------------------------- ERROR: Config file not found: etc/cowrie.cfg.dist ``` </issue> <code> [start of src/cowrie/core/config.py] 1 # Copyright (c) 2009-2014 Upi Tamminen <[email protected]> 2 # See the COPYRIGHT file for more information 3 4 """ 5 This module contains ... 6 """ 7 8 from __future__ import absolute_import, division 9 10 import configparser 11 import os 12 13 14 def to_environ_key(key): 15 return key.upper() 16 17 18 class EnvironmentConfigParser(configparser.ConfigParser): 19 20 def has_option(self, section, option): 21 if to_environ_key('_'.join((section, option))) in os.environ: 22 return True 23 return super(EnvironmentConfigParser, self).has_option(section, option) 24 25 def get(self, section, option, **kwargs): 26 key = to_environ_key('_'.join((section, option))) 27 if key in os.environ: 28 return os.environ[key] 29 return super(EnvironmentConfigParser, self).get(section, option, **kwargs) 30 31 32 def readConfigFile(cfgfile): 33 """ 34 Read config files and return ConfigParser object 35 36 @param cfgfile: filename or array of filenames 37 @return: ConfigParser object 38 """ 39 parser = EnvironmentConfigParser(interpolation=configparser.ExtendedInterpolation()) 40 parser.read(cfgfile) 41 return parser 42 43 44 CONFIG = readConfigFile(("etc/cowrie.cfg.dist", "/etc/cowrie/cowrie.cfg", "etc/cowrie.cfg", "cowrie.cfg")) 45 [end of src/cowrie/core/config.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/cowrie/core/config.py b/src/cowrie/core/config.py --- a/src/cowrie/core/config.py +++ b/src/cowrie/core/config.py @@ -8,7 +8,8 @@ from __future__ import absolute_import, division import configparser -import os +from os import environ +from os.path import abspath, dirname, exists, join def to_environ_key(key): @@ -18,15 +19,16 @@ class EnvironmentConfigParser(configparser.ConfigParser): def has_option(self, section, option): - if to_environ_key('_'.join((section, option))) in os.environ: + if to_environ_key('_'.join((section, option))) in environ: return True return super(EnvironmentConfigParser, self).has_option(section, option) def get(self, section, option, **kwargs): key = to_environ_key('_'.join((section, option))) - if key in os.environ: - return os.environ[key] - return super(EnvironmentConfigParser, self).get(section, option, **kwargs) + if key in environ: + return environ[key] + return super(EnvironmentConfigParser, self).get( + section, option, **kwargs) def readConfigFile(cfgfile): @@ -36,9 +38,26 @@ @param cfgfile: filename or array of filenames @return: ConfigParser object """ - parser = EnvironmentConfigParser(interpolation=configparser.ExtendedInterpolation()) + parser = EnvironmentConfigParser( + interpolation=configparser.ExtendedInterpolation()) parser.read(cfgfile) return parser -CONFIG = readConfigFile(("etc/cowrie.cfg.dist", "/etc/cowrie/cowrie.cfg", "etc/cowrie.cfg", "cowrie.cfg")) +def get_config_path(): + """Get absolute path to the config file + """ + config_files = ["etc/cowrie/cowrie.cfg", "etc/cowrie.cfg", + "cowrie.cfg", "etc/cowrie.cfg.dist"] + current_path = abspath(dirname(__file__)) + root = "/".join(current_path.split("/")[:-3]) + + for file in config_files: + absolute_path = join(root, file) + if exists(absolute_path): + return absolute_path + + print("Config file not found") + + +CONFIG = readConfigFile(get_config_path())
{"golden_diff": "diff --git a/src/cowrie/core/config.py b/src/cowrie/core/config.py\n--- a/src/cowrie/core/config.py\n+++ b/src/cowrie/core/config.py\n@@ -8,7 +8,8 @@\n from __future__ import absolute_import, division\n \n import configparser\n-import os\n+from os import environ\n+from os.path import abspath, dirname, exists, join\n \n \n def to_environ_key(key):\n@@ -18,15 +19,16 @@\n class EnvironmentConfigParser(configparser.ConfigParser):\n \n def has_option(self, section, option):\n- if to_environ_key('_'.join((section, option))) in os.environ:\n+ if to_environ_key('_'.join((section, option))) in environ:\n return True\n return super(EnvironmentConfigParser, self).has_option(section, option)\n \n def get(self, section, option, **kwargs):\n key = to_environ_key('_'.join((section, option)))\n- if key in os.environ:\n- return os.environ[key]\n- return super(EnvironmentConfigParser, self).get(section, option, **kwargs)\n+ if key in environ:\n+ return environ[key]\n+ return super(EnvironmentConfigParser, self).get(\n+ section, option, **kwargs)\n \n \n def readConfigFile(cfgfile):\n@@ -36,9 +38,26 @@\n @param cfgfile: filename or array of filenames\n @return: ConfigParser object\n \"\"\"\n- parser = EnvironmentConfigParser(interpolation=configparser.ExtendedInterpolation())\n+ parser = EnvironmentConfigParser(\n+ interpolation=configparser.ExtendedInterpolation())\n parser.read(cfgfile)\n return parser\n \n \n-CONFIG = readConfigFile((\"etc/cowrie.cfg.dist\", \"/etc/cowrie/cowrie.cfg\", \"etc/cowrie.cfg\", \"cowrie.cfg\"))\n+def get_config_path():\n+ \"\"\"Get absolute path to the config file\n+ \"\"\"\n+ config_files = [\"etc/cowrie/cowrie.cfg\", \"etc/cowrie.cfg\",\n+ \"cowrie.cfg\", \"etc/cowrie.cfg.dist\"]\n+ current_path = abspath(dirname(__file__))\n+ root = \"/\".join(current_path.split(\"/\")[:-3])\n+\n+ for file in config_files:\n+ absolute_path = join(root, file)\n+ if exists(absolute_path):\n+ return absolute_path\n+\n+ print(\"Config file not found\")\n+\n+\n+CONFIG = readConfigFile(get_config_path())\n", "issue": "Cowrie not set up for py.test framework\nSo I tried running the test in both python2 and python3. For python2 all the tests were passing but for python3 there was some error.\r\n\r\n```\r\n py.test --cov=cowrie \r\n===================================================================================== test session starts =====================================================================================\r\nplatform linux -- Python 3.7.2, pytest-4.2.0, py-1.7.0, pluggy-0.8.1\r\nrootdir: /home/mzfr/dev/cowrie, inifile:\r\nplugins: cov-2.6.1\r\ncollected 3 items / 3 errors \r\n\r\n=========================================================================================== ERRORS ============================================================================================\r\n___________________________________________________________________ ERROR collecting src/cowrie/test/test_base_commands.py ____________________________________________________________________\r\n../shell/fs.py:26: in <module>\r\n PICKLE = pickle.load(open(CONFIG.get('shell', 'filesystem'), 'rb'))\r\n../core/config.py:29: in get\r\n return super(EnvironmentConfigParser, self).get(section, option, **kwargs)\r\n/usr/lib/python3.7/configparser.py:780: in get\r\n d = self._unify_values(section, vars)\r\n/usr/lib/python3.7/configparser.py:1146: in _unify_values\r\n raise NoSectionError(section) from None\r\nE configparser.NoSectionError: No section: 'shell'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\ntest_base_commands.py:12: in <module>\r\n from cowrie.shell import protocol\r\n../shell/protocol.py:21: in <module>\r\n from cowrie.shell import command\r\n../shell/command.py:20: in <module>\r\n from cowrie.shell import fs\r\n../shell/fs.py:29: in <module>\r\n exit(2)\r\n/usr/lib/python3.7/_sitebuiltins.py:26: in __call__\r\n raise SystemExit(code)\r\nE SystemExit: 2\r\n--------------------------------------------------------------------------------------- Captured stdout ---------------------------------------------------------------------------------------\r\nERROR: Config file not found: etc/cowrie.cfg.dist\r\n________________________________________________________________________ ERROR collecting src/cowrie/test/test_echo.py ________________________________________________________________________\r\n../shell/fs.py:26: in <module>\r\n PICKLE = pickle.load(open(CONFIG.get('shell', 'filesystem'), 'rb'))\r\n../core/config.py:29: in get\r\n return super(EnvironmentConfigParser, self).get(section, option, **kwargs)\r\n/usr/lib/python3.7/configparser.py:780: in get\r\n d = self._unify_values(section, vars)\r\n/usr/lib/python3.7/configparser.py:1146: in _unify_values\r\n raise NoSectionError(section) from None\r\nE configparser.NoSectionError: No section: 'shell'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\ntest_echo.py:16: in <module>\r\n from cowrie.shell import protocol\r\n../shell/protocol.py:21: in <module>\r\n from cowrie.shell import command\r\n../shell/command.py:20: in <module>\r\n from cowrie.shell import fs\r\n../shell/fs.py:29: in <module>\r\n exit(2)\r\n/usr/lib/python3.7/_sitebuiltins.py:26: in __call__\r\n raise SystemExit(code)\r\nE SystemExit: 2\r\n--------------------------------------------------------------------------------------- Captured stdout ---------------------------------------------------------------------------------------\r\nERROR: Config file not found: etc/cowrie.cfg.dist\r\n________________________________________________________________________ ERROR collecting src/cowrie/test/test_tftp.py ________________________________________________________________________\r\n../shell/fs.py:26: in <module>\r\n PICKLE = pickle.load(open(CONFIG.get('shell', 'filesystem'), 'rb'))\r\n../core/config.py:29: in get\r\n return super(EnvironmentConfigParser, self).get(section, option, **kwargs)\r\n/usr/lib/python3.7/configparser.py:780: in get\r\n d = self._unify_values(section, vars)\r\n/usr/lib/python3.7/configparser.py:1146: in _unify_values\r\n raise NoSectionError(section) from None\r\nE configparser.NoSectionError: No section: 'shell'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\ntest_tftp.py:16: in <module>\r\n from cowrie.shell import protocol\r\n../shell/protocol.py:21: in <module>\r\n from cowrie.shell import command\r\n../shell/command.py:20: in <module>\r\n from cowrie.shell import fs\r\n../shell/fs.py:29: in <module>\r\n exit(2)\r\n/usr/lib/python3.7/_sitebuiltins.py:26: in __call__\r\n raise SystemExit(code)\r\nE SystemExit: 2\r\n--------------------------------------------------------------------------------------- Captured stdout ---------------------------------------------------------------------------------------\r\nERROR: Config file not found: etc/cowrie.cfg.dist\r\n```\n", "before_files": [{"content": "# Copyright (c) 2009-2014 Upi Tamminen <[email protected]>\n# See the COPYRIGHT file for more information\n\n\"\"\"\nThis module contains ...\n\"\"\"\n\nfrom __future__ import absolute_import, division\n\nimport configparser\nimport os\n\n\ndef to_environ_key(key):\n return key.upper()\n\n\nclass EnvironmentConfigParser(configparser.ConfigParser):\n\n def has_option(self, section, option):\n if to_environ_key('_'.join((section, option))) in os.environ:\n return True\n return super(EnvironmentConfigParser, self).has_option(section, option)\n\n def get(self, section, option, **kwargs):\n key = to_environ_key('_'.join((section, option)))\n if key in os.environ:\n return os.environ[key]\n return super(EnvironmentConfigParser, self).get(section, option, **kwargs)\n\n\ndef readConfigFile(cfgfile):\n \"\"\"\n Read config files and return ConfigParser object\n\n @param cfgfile: filename or array of filenames\n @return: ConfigParser object\n \"\"\"\n parser = EnvironmentConfigParser(interpolation=configparser.ExtendedInterpolation())\n parser.read(cfgfile)\n return parser\n\n\nCONFIG = readConfigFile((\"etc/cowrie.cfg.dist\", \"/etc/cowrie/cowrie.cfg\", \"etc/cowrie.cfg\", \"cowrie.cfg\"))\n", "path": "src/cowrie/core/config.py"}]}
1,934
540
gh_patches_debug_15272
rasdani/github-patches
git_diff
chainer__chainer-1539
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Invalid CuPy cache problem with different version of CUDAs When a user update CUDA, caches of CuPy for old CUDA is sometimes incompatible with new one. We need to check CUDA version and to store kernel cache with its information. @cosmo__ reported this problem on Twitter. Thank you! </issue> <code> [start of cupy/cuda/compiler.py] 1 import hashlib 2 import os 3 import re 4 import subprocess 5 import sys 6 import tempfile 7 8 import filelock 9 import six 10 11 from cupy.cuda import device 12 from cupy.cuda import function 13 14 15 def _get_arch(): 16 cc = device.Device().compute_capability 17 return 'sm_%s' % cc 18 19 20 class TemporaryDirectory(object): 21 22 def __enter__(self): 23 self.path = tempfile.mkdtemp() 24 return self.path 25 26 def __exit__(self, exc_type, exc_value, traceback): 27 if exc_value is not None: 28 return 29 30 for name in os.listdir(self.path): 31 os.unlink(os.path.join(self.path, name)) 32 os.rmdir(self.path) 33 34 35 def _run_nvcc(cmd, cwd): 36 try: 37 return subprocess.check_output(cmd, cwd=cwd, stderr=subprocess.STDOUT) 38 except subprocess.CalledProcessError as e: 39 msg = ('`nvcc` command returns non-zero exit status. \n' 40 'command: {0}\n' 41 'return-code: {1}\n' 42 'stdout/stderr: \n' 43 '{2}'.format(e.cmd, e.returncode, e.output)) 44 raise RuntimeError(msg) 45 except OSError as e: 46 msg = 'Failed to run `nvcc` command. ' \ 47 'Check PATH environment variable: ' \ 48 + str(e) 49 raise OSError(msg) 50 51 52 def nvcc(source, options=(), arch=None): 53 if not arch: 54 arch = _get_arch() 55 cmd = ['nvcc', '--cubin', '-arch', arch] + list(options) 56 57 with TemporaryDirectory() as root_dir: 58 path = os.path.join(root_dir, 'kern') 59 cu_path = '%s.cu' % path 60 cubin_path = '%s.cubin' % path 61 62 with open(cu_path, 'w') as cu_file: 63 cu_file.write(source) 64 65 cmd.append(cu_path) 66 _run_nvcc(cmd, root_dir) 67 68 with open(cubin_path, 'rb') as bin_file: 69 return bin_file.read() 70 71 72 def preprocess(source, options=()): 73 cmd = ['nvcc', '--preprocess'] + list(options) 74 with TemporaryDirectory() as root_dir: 75 path = os.path.join(root_dir, 'kern') 76 cu_path = '%s.cu' % path 77 78 with open(cu_path, 'w') as cu_file: 79 cu_file.write(source) 80 81 cmd.append(cu_path) 82 pp_src = _run_nvcc(cmd, root_dir) 83 84 if isinstance(pp_src, six.binary_type): 85 pp_src = pp_src.decode('utf-8') 86 return re.sub('(?m)^#.*$', '', pp_src) 87 88 89 _default_cache_dir = os.path.expanduser('~/.cupy/kernel_cache') 90 91 92 def get_cache_dir(): 93 return os.environ.get('CUPY_CACHE_DIR', _default_cache_dir) 94 95 96 _empty_file_preprocess_cache = {} 97 98 99 def compile_with_cache(source, options=(), arch=None, cache_dir=None): 100 global _empty_file_preprocess_cache 101 if cache_dir is None: 102 cache_dir = get_cache_dir() 103 if arch is None: 104 arch = _get_arch() 105 106 if 'win32' == sys.platform: 107 options += ('-Xcompiler', '/wd 4819') 108 if sys.maxsize == 9223372036854775807: 109 options += '-m64', 110 elif sys.maxsize == 2147483647: 111 options += '-m32', 112 113 env = (arch, options) 114 if '#include' in source: 115 pp_src = '%s %s' % (env, preprocess(source, options)) 116 else: 117 base = _empty_file_preprocess_cache.get(env, None) 118 if base is None: 119 base = _empty_file_preprocess_cache[env] = preprocess('', options) 120 pp_src = '%s %s %s' % (env, base, source) 121 122 if isinstance(pp_src, six.text_type): 123 pp_src = pp_src.encode('utf-8') 124 name = '%s.cubin' % hashlib.md5(pp_src).hexdigest() 125 126 mod = function.Module() 127 128 if not os.path.exists(cache_dir): 129 os.makedirs(cache_dir) 130 131 lock_path = os.path.join(cache_dir, 'lock_file.lock') 132 133 path = os.path.join(cache_dir, name) 134 with filelock.FileLock(lock_path) as lock: 135 if os.path.exists(path): 136 with open(path, 'rb') as file: 137 cubin = file.read() 138 mod.load(cubin) 139 else: 140 lock.release() 141 cubin = nvcc(source, options, arch) 142 mod.load(cubin) 143 lock.acquire() 144 with open(path, 'wb') as cubin_file: 145 cubin_file.write(cubin) 146 147 return mod 148 [end of cupy/cuda/compiler.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/cupy/cuda/compiler.py b/cupy/cuda/compiler.py --- a/cupy/cuda/compiler.py +++ b/cupy/cuda/compiler.py @@ -12,6 +12,18 @@ from cupy.cuda import function +_nvcc_version = None + + +def _get_nvcc_version(): + global _nvcc_version + if _nvcc_version is None: + cmd = ['nvcc', '--version'] + _nvcc_version = _run_nvcc(cmd, '.') + + return _nvcc_version + + def _get_arch(): cc = device.Device().compute_capability return 'sm_%s' % cc @@ -110,7 +122,7 @@ elif sys.maxsize == 2147483647: options += '-m32', - env = (arch, options) + env = (arch, options, _get_nvcc_version()) if '#include' in source: pp_src = '%s %s' % (env, preprocess(source, options)) else:
{"golden_diff": "diff --git a/cupy/cuda/compiler.py b/cupy/cuda/compiler.py\n--- a/cupy/cuda/compiler.py\n+++ b/cupy/cuda/compiler.py\n@@ -12,6 +12,18 @@\n from cupy.cuda import function\n \n \n+_nvcc_version = None\n+\n+\n+def _get_nvcc_version():\n+ global _nvcc_version\n+ if _nvcc_version is None:\n+ cmd = ['nvcc', '--version']\n+ _nvcc_version = _run_nvcc(cmd, '.')\n+\n+ return _nvcc_version\n+\n+\n def _get_arch():\n cc = device.Device().compute_capability\n return 'sm_%s' % cc\n@@ -110,7 +122,7 @@\n elif sys.maxsize == 2147483647:\n options += '-m32',\n \n- env = (arch, options)\n+ env = (arch, options, _get_nvcc_version())\n if '#include' in source:\n pp_src = '%s %s' % (env, preprocess(source, options))\n else:\n", "issue": "Invalid CuPy cache problem with different version of CUDAs\nWhen a user update CUDA, caches of CuPy for old CUDA is sometimes incompatible with new one. We need to check CUDA version and to store kernel cache with its information.\n\n@cosmo__ reported this problem on Twitter. Thank you!\n\n", "before_files": [{"content": "import hashlib\nimport os\nimport re\nimport subprocess\nimport sys\nimport tempfile\n\nimport filelock\nimport six\n\nfrom cupy.cuda import device\nfrom cupy.cuda import function\n\n\ndef _get_arch():\n cc = device.Device().compute_capability\n return 'sm_%s' % cc\n\n\nclass TemporaryDirectory(object):\n\n def __enter__(self):\n self.path = tempfile.mkdtemp()\n return self.path\n\n def __exit__(self, exc_type, exc_value, traceback):\n if exc_value is not None:\n return\n\n for name in os.listdir(self.path):\n os.unlink(os.path.join(self.path, name))\n os.rmdir(self.path)\n\n\ndef _run_nvcc(cmd, cwd):\n try:\n return subprocess.check_output(cmd, cwd=cwd, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n msg = ('`nvcc` command returns non-zero exit status. \\n'\n 'command: {0}\\n'\n 'return-code: {1}\\n'\n 'stdout/stderr: \\n'\n '{2}'.format(e.cmd, e.returncode, e.output))\n raise RuntimeError(msg)\n except OSError as e:\n msg = 'Failed to run `nvcc` command. ' \\\n 'Check PATH environment variable: ' \\\n + str(e)\n raise OSError(msg)\n\n\ndef nvcc(source, options=(), arch=None):\n if not arch:\n arch = _get_arch()\n cmd = ['nvcc', '--cubin', '-arch', arch] + list(options)\n\n with TemporaryDirectory() as root_dir:\n path = os.path.join(root_dir, 'kern')\n cu_path = '%s.cu' % path\n cubin_path = '%s.cubin' % path\n\n with open(cu_path, 'w') as cu_file:\n cu_file.write(source)\n\n cmd.append(cu_path)\n _run_nvcc(cmd, root_dir)\n\n with open(cubin_path, 'rb') as bin_file:\n return bin_file.read()\n\n\ndef preprocess(source, options=()):\n cmd = ['nvcc', '--preprocess'] + list(options)\n with TemporaryDirectory() as root_dir:\n path = os.path.join(root_dir, 'kern')\n cu_path = '%s.cu' % path\n\n with open(cu_path, 'w') as cu_file:\n cu_file.write(source)\n\n cmd.append(cu_path)\n pp_src = _run_nvcc(cmd, root_dir)\n\n if isinstance(pp_src, six.binary_type):\n pp_src = pp_src.decode('utf-8')\n return re.sub('(?m)^#.*$', '', pp_src)\n\n\n_default_cache_dir = os.path.expanduser('~/.cupy/kernel_cache')\n\n\ndef get_cache_dir():\n return os.environ.get('CUPY_CACHE_DIR', _default_cache_dir)\n\n\n_empty_file_preprocess_cache = {}\n\n\ndef compile_with_cache(source, options=(), arch=None, cache_dir=None):\n global _empty_file_preprocess_cache\n if cache_dir is None:\n cache_dir = get_cache_dir()\n if arch is None:\n arch = _get_arch()\n\n if 'win32' == sys.platform:\n options += ('-Xcompiler', '/wd 4819')\n if sys.maxsize == 9223372036854775807:\n options += '-m64',\n elif sys.maxsize == 2147483647:\n options += '-m32',\n\n env = (arch, options)\n if '#include' in source:\n pp_src = '%s %s' % (env, preprocess(source, options))\n else:\n base = _empty_file_preprocess_cache.get(env, None)\n if base is None:\n base = _empty_file_preprocess_cache[env] = preprocess('', options)\n pp_src = '%s %s %s' % (env, base, source)\n\n if isinstance(pp_src, six.text_type):\n pp_src = pp_src.encode('utf-8')\n name = '%s.cubin' % hashlib.md5(pp_src).hexdigest()\n\n mod = function.Module()\n\n if not os.path.exists(cache_dir):\n os.makedirs(cache_dir)\n\n lock_path = os.path.join(cache_dir, 'lock_file.lock')\n\n path = os.path.join(cache_dir, name)\n with filelock.FileLock(lock_path) as lock:\n if os.path.exists(path):\n with open(path, 'rb') as file:\n cubin = file.read()\n mod.load(cubin)\n else:\n lock.release()\n cubin = nvcc(source, options, arch)\n mod.load(cubin)\n lock.acquire()\n with open(path, 'wb') as cubin_file:\n cubin_file.write(cubin)\n\n return mod\n", "path": "cupy/cuda/compiler.py"}]}
2,009
247
gh_patches_debug_63916
rasdani/github-patches
git_diff
tensorflow__addons-897
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Nightly build break **System information** - TensorFlow version and how it was installed (source or binary): tf-nightly-**2.2.0.dev20200115** - TensorFlow-Addons version and how it was installed (source or binary): tfa-nightly-**0.8.0.dev20200115** **Describe the bug** Hi, it looks like [this commit](https://github.com/tensorflow/addons/commit/3aae7732998cb233234a2948010b9aaafc24e920) causes the latest nightly build to fail on import ``` ----> 1 import tensorflow_addons /usr/local/lib/python3.6/dist-packages/tensorflow_addons/__init__.py in <module>() 30 31 # Cleanup symbols to avoid polluting namespace. ---> 32 del absolute_import 33 del division 34 del print_function NameError: name 'absolute_import' is not defined ``` @seanpmorgan **Code to reproduce the issue** [colab](https://colab.research.google.com/drive/1fxRshVv0FPJNHdOqWC4GySjPJ_TdJTJU#scrollTo=TTC3gzRLRAvY) </issue> <code> [start of tensorflow_addons/__init__.py] 1 # Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 # ============================================================================== 15 """Useful extra functionality for TensorFlow maintained by SIG-addons.""" 16 17 # Local project imports 18 from tensorflow_addons import activations 19 from tensorflow_addons import callbacks 20 from tensorflow_addons import image 21 from tensorflow_addons import layers 22 from tensorflow_addons import losses 23 from tensorflow_addons import metrics 24 from tensorflow_addons import optimizers 25 from tensorflow_addons import rnn 26 from tensorflow_addons import seq2seq 27 from tensorflow_addons import text 28 29 from tensorflow_addons.version import __version__ 30 31 # Cleanup symbols to avoid polluting namespace. 32 del absolute_import 33 del division 34 del print_function 35 [end of tensorflow_addons/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/tensorflow_addons/__init__.py b/tensorflow_addons/__init__.py --- a/tensorflow_addons/__init__.py +++ b/tensorflow_addons/__init__.py @@ -27,8 +27,3 @@ from tensorflow_addons import text from tensorflow_addons.version import __version__ - -# Cleanup symbols to avoid polluting namespace. -del absolute_import -del division -del print_function
{"golden_diff": "diff --git a/tensorflow_addons/__init__.py b/tensorflow_addons/__init__.py\n--- a/tensorflow_addons/__init__.py\n+++ b/tensorflow_addons/__init__.py\n@@ -27,8 +27,3 @@\n from tensorflow_addons import text\n \n from tensorflow_addons.version import __version__\n-\n-# Cleanup symbols to avoid polluting namespace.\n-del absolute_import\n-del division\n-del print_function\n", "issue": "Nightly build break\n**System information**\r\n- TensorFlow version and how it was installed (source or binary): tf-nightly-**2.2.0.dev20200115** \r\n- TensorFlow-Addons version and how it was installed (source or binary): tfa-nightly-**0.8.0.dev20200115**\r\n\r\n**Describe the bug**\r\nHi, it looks like [this commit](https://github.com/tensorflow/addons/commit/3aae7732998cb233234a2948010b9aaafc24e920) causes the latest nightly build to fail on import\r\n\r\n```\r\n----> 1 import tensorflow_addons\r\n\r\n/usr/local/lib/python3.6/dist-packages/tensorflow_addons/__init__.py in <module>()\r\n 30 \r\n 31 # Cleanup symbols to avoid polluting namespace.\r\n---> 32 del absolute_import\r\n 33 del division\r\n 34 del print_function\r\n\r\nNameError: name 'absolute_import' is not defined\r\n```\r\n@seanpmorgan \r\n\r\n**Code to reproduce the issue**\r\n[colab](https://colab.research.google.com/drive/1fxRshVv0FPJNHdOqWC4GySjPJ_TdJTJU#scrollTo=TTC3gzRLRAvY)\r\n\n", "before_files": [{"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Useful extra functionality for TensorFlow maintained by SIG-addons.\"\"\"\n\n# Local project imports\nfrom tensorflow_addons import activations\nfrom tensorflow_addons import callbacks\nfrom tensorflow_addons import image\nfrom tensorflow_addons import layers\nfrom tensorflow_addons import losses\nfrom tensorflow_addons import metrics\nfrom tensorflow_addons import optimizers\nfrom tensorflow_addons import rnn\nfrom tensorflow_addons import seq2seq\nfrom tensorflow_addons import text\n\nfrom tensorflow_addons.version import __version__\n\n# Cleanup symbols to avoid polluting namespace.\ndel absolute_import\ndel division\ndel print_function\n", "path": "tensorflow_addons/__init__.py"}]}
1,169
98
gh_patches_debug_13053
rasdani/github-patches
git_diff
pymodbus-dev__pymodbus-921
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> AttributeError: module 'asyncio' has no attribute 'exceptions' ``` Traceback (most recent call last): File "/home//.local/bin/pymodbus.server", line 6, in <module> from pymodbus.repl.server.main import server File "/home//.local/lib/python3.7/site-packages/pymodbus/repl/server/main.py", line 16, in <module> CANCELLED_ERROR = asyncio.exceptions.CancelledError AttributeError: module 'asyncio' has no attribute 'exceptions' ``` in the code ``` if IS_PYTHON3 and PYTHON_VERSION > (3, 7): CANCELLED_ERROR = asyncio.exceptions.CancelledError else: CANCELLED_ERROR = asyncio.CancelledError ``` python 3.7 at least 3.7.3 doesn't have `asyncio.exceptions` Maybe it was supposed to be >= 3.8. </issue> <code> [start of pymodbus/repl/server/main.py] 1 """Repl server main. 2 3 Copyright (c) 2020 by RiptideIO 4 All rights reserved. 5 """ 6 import sys 7 import logging 8 import asyncio 9 import json 10 import click 11 from pymodbus.framer.socket_framer import ModbusSocketFramer 12 from pymodbus.server.reactive.main import ( 13 ReactiveServer, 14 DEFAULT_FRAMER, 15 DEFUALT_HANDLERS, 16 ) 17 from pymodbus.server.reactive.default_config import DEFUALT_CONFIG 18 from pymodbus.repl.server.cli import run_repl 19 20 if sys.version_info > (3, 7): 21 CANCELLED_ERROR = asyncio.exceptions.CancelledError 22 else: 23 CANCELLED_ERROR = asyncio.CancelledError # pylint: disable=invalid-name 24 25 26 @click.group("ReactiveModbusServer") 27 @click.option("--host", default="localhost", help="Host address") 28 @click.option("--web-port", default=8080, help="Web app port") 29 @click.option( 30 "--broadcast-support", 31 is_flag=True, 32 default=False, 33 help="Support broadcast messages", 34 ) 35 @click.option( 36 "--repl/--no-repl", 37 is_flag=True, 38 default=True, 39 help="Enable/Disable repl for server", 40 ) 41 @click.option( 42 "--verbose", is_flag=True, help="Run with debug logs enabled for pymodbus" 43 ) 44 @click.pass_context 45 def server(ctx, host, web_port, broadcast_support, repl, verbose): 46 """Run server code.""" 47 FORMAT = ( # pylint: disable=invalid-name 48 "%(asctime)-15s %(threadName)-15s" 49 " %(levelname)-8s %(module)-15s:%(lineno)-8s %(message)s" 50 ) 51 pymodbus_logger = logging.getLogger("pymodbus") 52 logging.basicConfig(format=FORMAT) # NOSONAR 53 if verbose: 54 pymodbus_logger.setLevel(logging.DEBUG) 55 else: 56 pymodbus_logger.setLevel(logging.ERROR) 57 58 ctx.obj = { 59 "repl": repl, 60 "host": host, 61 "web_port": web_port, 62 "broadcast": broadcast_support, 63 } 64 65 66 @server.command("run") 67 @click.option( 68 "--modbus-server", 69 default="tcp", 70 type=click.Choice(["tcp", "serial", "tls", "udp"], case_sensitive=False), 71 help="Modbus server", 72 ) 73 @click.option( 74 "--modbus-framer", 75 default="socket", 76 type=click.Choice( 77 ["socket", "rtu", "tls", "ascii", "binary"], case_sensitive=False 78 ), 79 help="Modbus framer to use", 80 ) 81 @click.option("--modbus-port", default="5020", help="Modbus port") 82 @click.option( 83 "--modbus-unit-id", default=[1], type=int, multiple=True, help="Modbus unit id" 84 ) 85 @click.option( 86 "--modbus-config", 87 type=click.Path(exists=True), 88 help="Path to additional modbus server config", 89 ) 90 @click.option( 91 "-r", 92 "--randomize", 93 default=0, 94 help="Randomize every `r` reads." 95 " 0=never, 1=always, " 96 "2=every-second-read, " 97 "and so on. " 98 "Applicable IR and DI.", 99 ) 100 @click.pass_context 101 def run( 102 ctx, 103 modbus_server, 104 modbus_framer, 105 modbus_port, 106 modbus_unit_id, 107 modbus_config, 108 randomize, 109 ): 110 """Run Reactive Modbus server. 111 112 Exposing REST endpoint for response manipulation. 113 """ 114 repl = ctx.obj.pop("repl") 115 web_app_config = ctx.obj 116 loop = asyncio.get_event_loop() 117 framer = DEFAULT_FRAMER.get(modbus_framer, ModbusSocketFramer) 118 if modbus_config: 119 with open(modbus_config) as my_file: # pylint: disable=unspecified-encoding 120 modbus_config = json.load(my_file) 121 else: 122 modbus_config = DEFUALT_CONFIG 123 modbus_config = modbus_config.get(modbus_server, {}) 124 if modbus_server != "serial": 125 modbus_port = int(modbus_port) 126 handler = modbus_config.pop("handler", "ModbusConnectedRequestHandler") 127 else: 128 handler = modbus_config.pop("handler", "ModbusSingleRequestHandler") 129 handler = DEFUALT_HANDLERS.get(handler.strip()) 130 131 modbus_config["handler"] = handler 132 modbus_config["randomize"] = randomize 133 app = ReactiveServer.factory( 134 modbus_server, 135 framer, 136 modbus_port=modbus_port, 137 unit=modbus_unit_id, 138 loop=loop, 139 **web_app_config, 140 **modbus_config 141 ) 142 try: 143 if repl: 144 loop.run_until_complete(app.run_async()) 145 146 loop.run_until_complete(run_repl(app)) 147 loop.run_forever() 148 else: 149 app.run() 150 151 except CANCELLED_ERROR: 152 print("Done!!!!!") 153 154 155 if __name__ == "__main__": 156 server() # pylint: disable=no-value-for-parameter 157 [end of pymodbus/repl/server/main.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pymodbus/repl/server/main.py b/pymodbus/repl/server/main.py --- a/pymodbus/repl/server/main.py +++ b/pymodbus/repl/server/main.py @@ -3,7 +3,6 @@ Copyright (c) 2020 by RiptideIO All rights reserved. """ -import sys import logging import asyncio import json @@ -17,10 +16,7 @@ from pymodbus.server.reactive.default_config import DEFUALT_CONFIG from pymodbus.repl.server.cli import run_repl -if sys.version_info > (3, 7): - CANCELLED_ERROR = asyncio.exceptions.CancelledError -else: - CANCELLED_ERROR = asyncio.CancelledError # pylint: disable=invalid-name +CANCELLED_ERROR = asyncio.exceptions.CancelledError @click.group("ReactiveModbusServer")
{"golden_diff": "diff --git a/pymodbus/repl/server/main.py b/pymodbus/repl/server/main.py\n--- a/pymodbus/repl/server/main.py\n+++ b/pymodbus/repl/server/main.py\n@@ -3,7 +3,6 @@\n Copyright (c) 2020 by RiptideIO\n All rights reserved.\n \"\"\"\n-import sys\n import logging\n import asyncio\n import json\n@@ -17,10 +16,7 @@\n from pymodbus.server.reactive.default_config import DEFUALT_CONFIG\n from pymodbus.repl.server.cli import run_repl\n \n-if sys.version_info > (3, 7):\n- CANCELLED_ERROR = asyncio.exceptions.CancelledError\n-else:\n- CANCELLED_ERROR = asyncio.CancelledError # pylint: disable=invalid-name\n+CANCELLED_ERROR = asyncio.exceptions.CancelledError\n \n \n @click.group(\"ReactiveModbusServer\")\n", "issue": "AttributeError: module 'asyncio' has no attribute 'exceptions'\n```\r\nTraceback (most recent call last):\r\n File \"/home//.local/bin/pymodbus.server\", line 6, in <module>\r\n from pymodbus.repl.server.main import server\r\n File \"/home//.local/lib/python3.7/site-packages/pymodbus/repl/server/main.py\", line 16, in <module>\r\n CANCELLED_ERROR = asyncio.exceptions.CancelledError\r\nAttributeError: module 'asyncio' has no attribute 'exceptions'\r\n```\r\n\r\nin the code \r\n\r\n```\r\nif IS_PYTHON3 and PYTHON_VERSION > (3, 7):\r\n CANCELLED_ERROR = asyncio.exceptions.CancelledError\r\nelse:\r\n CANCELLED_ERROR = asyncio.CancelledError\r\n\r\n```\r\npython 3.7 at least 3.7.3 doesn't have `asyncio.exceptions`\r\nMaybe it was supposed to be >= 3.8.\r\n\n", "before_files": [{"content": "\"\"\"Repl server main.\n\nCopyright (c) 2020 by RiptideIO\nAll rights reserved.\n\"\"\"\nimport sys\nimport logging\nimport asyncio\nimport json\nimport click\nfrom pymodbus.framer.socket_framer import ModbusSocketFramer\nfrom pymodbus.server.reactive.main import (\n ReactiveServer,\n DEFAULT_FRAMER,\n DEFUALT_HANDLERS,\n)\nfrom pymodbus.server.reactive.default_config import DEFUALT_CONFIG\nfrom pymodbus.repl.server.cli import run_repl\n\nif sys.version_info > (3, 7):\n CANCELLED_ERROR = asyncio.exceptions.CancelledError\nelse:\n CANCELLED_ERROR = asyncio.CancelledError # pylint: disable=invalid-name\n\n\[email protected](\"ReactiveModbusServer\")\[email protected](\"--host\", default=\"localhost\", help=\"Host address\")\[email protected](\"--web-port\", default=8080, help=\"Web app port\")\[email protected](\n \"--broadcast-support\",\n is_flag=True,\n default=False,\n help=\"Support broadcast messages\",\n)\[email protected](\n \"--repl/--no-repl\",\n is_flag=True,\n default=True,\n help=\"Enable/Disable repl for server\",\n)\[email protected](\n \"--verbose\", is_flag=True, help=\"Run with debug logs enabled for pymodbus\"\n)\[email protected]_context\ndef server(ctx, host, web_port, broadcast_support, repl, verbose):\n \"\"\"Run server code.\"\"\"\n FORMAT = ( # pylint: disable=invalid-name\n \"%(asctime)-15s %(threadName)-15s\"\n \" %(levelname)-8s %(module)-15s:%(lineno)-8s %(message)s\"\n )\n pymodbus_logger = logging.getLogger(\"pymodbus\")\n logging.basicConfig(format=FORMAT) # NOSONAR\n if verbose:\n pymodbus_logger.setLevel(logging.DEBUG)\n else:\n pymodbus_logger.setLevel(logging.ERROR)\n\n ctx.obj = {\n \"repl\": repl,\n \"host\": host,\n \"web_port\": web_port,\n \"broadcast\": broadcast_support,\n }\n\n\[email protected](\"run\")\[email protected](\n \"--modbus-server\",\n default=\"tcp\",\n type=click.Choice([\"tcp\", \"serial\", \"tls\", \"udp\"], case_sensitive=False),\n help=\"Modbus server\",\n)\[email protected](\n \"--modbus-framer\",\n default=\"socket\",\n type=click.Choice(\n [\"socket\", \"rtu\", \"tls\", \"ascii\", \"binary\"], case_sensitive=False\n ),\n help=\"Modbus framer to use\",\n)\[email protected](\"--modbus-port\", default=\"5020\", help=\"Modbus port\")\[email protected](\n \"--modbus-unit-id\", default=[1], type=int, multiple=True, help=\"Modbus unit id\"\n)\[email protected](\n \"--modbus-config\",\n type=click.Path(exists=True),\n help=\"Path to additional modbus server config\",\n)\[email protected](\n \"-r\",\n \"--randomize\",\n default=0,\n help=\"Randomize every `r` reads.\"\n \" 0=never, 1=always, \"\n \"2=every-second-read, \"\n \"and so on. \"\n \"Applicable IR and DI.\",\n)\[email protected]_context\ndef run(\n ctx,\n modbus_server,\n modbus_framer,\n modbus_port,\n modbus_unit_id,\n modbus_config,\n randomize,\n):\n \"\"\"Run Reactive Modbus server.\n\n Exposing REST endpoint for response manipulation.\n \"\"\"\n repl = ctx.obj.pop(\"repl\")\n web_app_config = ctx.obj\n loop = asyncio.get_event_loop()\n framer = DEFAULT_FRAMER.get(modbus_framer, ModbusSocketFramer)\n if modbus_config:\n with open(modbus_config) as my_file: # pylint: disable=unspecified-encoding\n modbus_config = json.load(my_file)\n else:\n modbus_config = DEFUALT_CONFIG\n modbus_config = modbus_config.get(modbus_server, {})\n if modbus_server != \"serial\":\n modbus_port = int(modbus_port)\n handler = modbus_config.pop(\"handler\", \"ModbusConnectedRequestHandler\")\n else:\n handler = modbus_config.pop(\"handler\", \"ModbusSingleRequestHandler\")\n handler = DEFUALT_HANDLERS.get(handler.strip())\n\n modbus_config[\"handler\"] = handler\n modbus_config[\"randomize\"] = randomize\n app = ReactiveServer.factory(\n modbus_server,\n framer,\n modbus_port=modbus_port,\n unit=modbus_unit_id,\n loop=loop,\n **web_app_config,\n **modbus_config\n )\n try:\n if repl:\n loop.run_until_complete(app.run_async())\n\n loop.run_until_complete(run_repl(app))\n loop.run_forever()\n else:\n app.run()\n\n except CANCELLED_ERROR:\n print(\"Done!!!!!\")\n\n\nif __name__ == \"__main__\":\n server() # pylint: disable=no-value-for-parameter\n", "path": "pymodbus/repl/server/main.py"}]}
2,219
200
gh_patches_debug_989
rasdani/github-patches
git_diff
hydroshare__hydroshare-5098
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Haystack rest endpoint response serializer does not include short_id **Description of the bug** The Haystack REST endpoint for complex solr searches does not include the short_id into the response serializer. This is a critical piece of information for users of this endpoint. Steps to reproduce the bug: https://github.com/hydroshare/hydroshare/blob/d3bd1737a0179eac74cd68926b3b79b80894410e/hs_rest_api/discovery.py#L12 **Expected behavior** I expect resource ids to be included with search results so I can retrieve resources. </issue> <code> [start of hs_rest_api/discovery.py] 1 from drf_haystack.serializers import HaystackSerializer 2 from drf_haystack.viewsets import HaystackViewSet 3 from hs_core.search_indexes import BaseResourceIndex 4 from hs_core.models import BaseResource 5 from drf_haystack.fields import HaystackCharField, HaystackDateField, HaystackMultiValueField, \ 6 HaystackFloatField 7 from drf_yasg.utils import swagger_auto_schema 8 from rest_framework.decorators import action 9 from rest_framework import serializers 10 11 12 class DiscoveryResourceSerializer(HaystackSerializer): 13 class Meta: 14 index_classes = [BaseResourceIndex] 15 fields = [ 16 "title", 17 "author", 18 "contributor", 19 "subject", 20 "abstract", 21 "resource_type", 22 "content_type", 23 "coverage_type", 24 "availability", 25 "created", 26 "modified", 27 "start_date", 28 "end_date", 29 "east", 30 "north", 31 "eastlimit", 32 "westlimit", 33 "northlimit", 34 "southlimit" 35 ] 36 37 38 class DiscoverResourceValidator(serializers.Serializer): 39 text = HaystackCharField(required=False, 40 help_text='Search across all Resource Fields') 41 author = HaystackCharField(required=False, 42 help_text='Search by author') 43 contributor = HaystackMultiValueField(required=False, 44 help_text='Search by contributor') 45 subject = HaystackMultiValueField(required=False, 46 help_text='Search within subject keywords') 47 abstract = HaystackCharField(required=False, 48 help_text='Search within the abstract') 49 resource_type = HaystackCharField(required=False, 50 help_text='Search by resource type') 51 content_type = HaystackMultiValueField(required=False, 52 help_text='Search by content type') 53 coverage_type = HaystackMultiValueField(required=False, 54 help_text='Search by coverage type ' 55 '(point, box, period)') 56 availability = HaystackMultiValueField(required=False, 57 help_text='Search by availability ' 58 '(discoverable, public, published)') 59 created = HaystackDateField(required=False, 60 help_text='Search by created date') 61 modified = HaystackDateField(required=False, 62 help_text='Search by modified date') 63 start_date = HaystackDateField(required=False, 64 help_text='Search by start date') 65 end_date = HaystackDateField(required=False, 66 help_text='Search by end date') 67 east = HaystackFloatField(required=False, 68 help_text='Search by location or box center east longitude') 69 north = HaystackFloatField(required=False, 70 help_text='Search by location or box center north latitude') 71 eastlimit = HaystackFloatField(required=False, 72 help_text='Search by east limit longitude') 73 westlimit = HaystackFloatField(required=False, 74 help_text='Search by west limit longitude') 75 northlimit = HaystackFloatField(required=False, 76 help_text='Search by north limit latitude') 77 southlimit = HaystackFloatField(required=False, 78 help_text='Search by south limit latitude') 79 80 81 class DiscoverSearchView(HaystackViewSet): 82 index_models = [BaseResource] 83 serializer_class = DiscoveryResourceSerializer 84 85 @action(detail=True, methods=['get']) 86 @swagger_auto_schema(operation_description="Search HydroShare Resources using solr conventions." 87 "We use haystack for queries so you can use all of " 88 "the parameters described here in combination with " 89 "field lookups " 90 "https://django-haystack.readthedocs.io/en/latest/" 91 "searchqueryset_api.html?highlight=lookups#id1", 92 query_serializer=DiscoverResourceValidator) 93 def list(self, request): 94 return super(DiscoverSearchView, self).list(request) 95 [end of hs_rest_api/discovery.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/hs_rest_api/discovery.py b/hs_rest_api/discovery.py --- a/hs_rest_api/discovery.py +++ b/hs_rest_api/discovery.py @@ -13,6 +13,7 @@ class Meta: index_classes = [BaseResourceIndex] fields = [ + "short_id", "title", "author", "contributor",
{"golden_diff": "diff --git a/hs_rest_api/discovery.py b/hs_rest_api/discovery.py\n--- a/hs_rest_api/discovery.py\n+++ b/hs_rest_api/discovery.py\n@@ -13,6 +13,7 @@\n class Meta:\n index_classes = [BaseResourceIndex]\n fields = [\n+ \"short_id\",\n \"title\",\n \"author\",\n \"contributor\",\n", "issue": "Haystack rest endpoint response serializer does not include short_id\n**Description of the bug**\r\nThe Haystack REST endpoint for complex solr searches does not include the short_id into the response serializer. This is a critical piece of information for users of this endpoint. \r\n\r\nSteps to reproduce the bug:\r\nhttps://github.com/hydroshare/hydroshare/blob/d3bd1737a0179eac74cd68926b3b79b80894410e/hs_rest_api/discovery.py#L12\r\n\r\n**Expected behavior**\r\nI expect resource ids to be included with search results so I can retrieve resources.\r\n\n", "before_files": [{"content": "from drf_haystack.serializers import HaystackSerializer\nfrom drf_haystack.viewsets import HaystackViewSet\nfrom hs_core.search_indexes import BaseResourceIndex\nfrom hs_core.models import BaseResource\nfrom drf_haystack.fields import HaystackCharField, HaystackDateField, HaystackMultiValueField, \\\n HaystackFloatField\nfrom drf_yasg.utils import swagger_auto_schema\nfrom rest_framework.decorators import action\nfrom rest_framework import serializers\n\n\nclass DiscoveryResourceSerializer(HaystackSerializer):\n class Meta:\n index_classes = [BaseResourceIndex]\n fields = [\n \"title\",\n \"author\",\n \"contributor\",\n \"subject\",\n \"abstract\",\n \"resource_type\",\n \"content_type\",\n \"coverage_type\",\n \"availability\",\n \"created\",\n \"modified\",\n \"start_date\",\n \"end_date\",\n \"east\",\n \"north\",\n \"eastlimit\",\n \"westlimit\",\n \"northlimit\",\n \"southlimit\"\n ]\n\n\nclass DiscoverResourceValidator(serializers.Serializer):\n text = HaystackCharField(required=False,\n help_text='Search across all Resource Fields')\n author = HaystackCharField(required=False,\n help_text='Search by author')\n contributor = HaystackMultiValueField(required=False,\n help_text='Search by contributor')\n subject = HaystackMultiValueField(required=False,\n help_text='Search within subject keywords')\n abstract = HaystackCharField(required=False,\n help_text='Search within the abstract')\n resource_type = HaystackCharField(required=False,\n help_text='Search by resource type')\n content_type = HaystackMultiValueField(required=False,\n help_text='Search by content type')\n coverage_type = HaystackMultiValueField(required=False,\n help_text='Search by coverage type '\n '(point, box, period)')\n availability = HaystackMultiValueField(required=False,\n help_text='Search by availability '\n '(discoverable, public, published)')\n created = HaystackDateField(required=False,\n help_text='Search by created date')\n modified = HaystackDateField(required=False,\n help_text='Search by modified date')\n start_date = HaystackDateField(required=False,\n help_text='Search by start date')\n end_date = HaystackDateField(required=False,\n help_text='Search by end date')\n east = HaystackFloatField(required=False,\n help_text='Search by location or box center east longitude')\n north = HaystackFloatField(required=False,\n help_text='Search by location or box center north latitude')\n eastlimit = HaystackFloatField(required=False,\n help_text='Search by east limit longitude')\n westlimit = HaystackFloatField(required=False,\n help_text='Search by west limit longitude')\n northlimit = HaystackFloatField(required=False,\n help_text='Search by north limit latitude')\n southlimit = HaystackFloatField(required=False,\n help_text='Search by south limit latitude')\n\n\nclass DiscoverSearchView(HaystackViewSet):\n index_models = [BaseResource]\n serializer_class = DiscoveryResourceSerializer\n\n @action(detail=True, methods=['get'])\n @swagger_auto_schema(operation_description=\"Search HydroShare Resources using solr conventions.\"\n \"We use haystack for queries so you can use all of \"\n \"the parameters described here in combination with \"\n \"field lookups \"\n \"https://django-haystack.readthedocs.io/en/latest/\"\n \"searchqueryset_api.html?highlight=lookups#id1\",\n query_serializer=DiscoverResourceValidator)\n def list(self, request):\n return super(DiscoverSearchView, self).list(request)\n", "path": "hs_rest_api/discovery.py"}]}
1,631
88
gh_patches_debug_20873
rasdani/github-patches
git_diff
GeotrekCE__Geotrek-admin-2223
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Fix translations in package The compilemessages step for geotrek and mapentity is missing somewhere </issue> <code> [start of setup.py] 1 #!/usr/bin/python3 2 import os 3 import distutils.command.build 4 from setuptools import setup, find_packages 5 6 here = os.path.abspath(os.path.dirname(__file__)) 7 8 9 class BuildCommand(distutils.command.build.build): 10 def run(self): 11 print("before") 12 distutils.command.build.build.run(self) 13 print("after") 14 from django.core.management import call_command 15 curdir = os.getcwd() 16 os.chdir(os.path.join(curdir, 'geotrek')) 17 call_command('compilemessages') 18 os.chdir(os.path.join(curdir, 'mapentity')) 19 call_command('compilemessages') 20 os.chdir(curdir) 21 22 23 setup( 24 name='geotrek', 25 version=open(os.path.join(here, 'VERSION')).read().strip(), 26 author='Makina Corpus', 27 author_email='[email protected]', 28 url='http://makina-corpus.com', 29 description="Geotrek", 30 long_description=(open(os.path.join(here, 'README.rst')).read() + '\n\n' 31 + open(os.path.join(here, 'docs', 'changelog.rst')).read()), 32 scripts=['manage.py'], 33 install_requires=[ 34 # pinned by requirements.txt 35 'psycopg2', 36 'docutils', 37 'GDAL', 38 'Pillow', 39 'easy-thumbnails', 40 'simplekml', 41 'pygal', 42 'django-extended-choices', 43 'django-multiselectfield', 44 'geojson', 45 'tif2geojson', 46 'pytz', 47 'djangorestframework-gis', 48 'drf-dynamic-fields', 49 'django-rest-swagger', 50 'django-embed-video', 51 'xlrd', 52 'landez', 53 'redis', 54 'celery', 55 'django-celery-results', 56 'requests[security]', 57 'drf-extensions', 58 'django-colorfield', 59 'factory_boy', 60 ], 61 cmdclass={"build": BuildCommand}, 62 include_package_data=True, 63 license='BSD, see LICENSE file.', 64 packages=find_packages(), 65 classifiers=['Natural Language :: English', 66 'Environment :: Web Environment', 67 'Framework :: Django', 68 'Development Status :: 5 - Production/Stable', 69 'Programming Language :: Python :: 2.7'], 70 ) 71 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -1,23 +1,24 @@ #!/usr/bin/python3 import os import distutils.command.build +from pathlib import Path from setuptools import setup, find_packages +from shutil import copy here = os.path.abspath(os.path.dirname(__file__)) class BuildCommand(distutils.command.build.build): def run(self): - print("before") distutils.command.build.build.run(self) - print("after") from django.core.management import call_command curdir = os.getcwd() - os.chdir(os.path.join(curdir, 'geotrek')) - call_command('compilemessages') - os.chdir(os.path.join(curdir, 'mapentity')) - call_command('compilemessages') - os.chdir(curdir) + for subdir in ('geotrek', 'mapentity'): + os.chdir(subdir) + call_command('compilemessages') + for path in Path('.').rglob('*.mo'): + copy(path, os.path.join(curdir, self.build_lib, subdir, path)) + os.chdir(curdir) setup(
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,23 +1,24 @@\n #!/usr/bin/python3\n import os\n import distutils.command.build\n+from pathlib import Path\n from setuptools import setup, find_packages\n+from shutil import copy\n \n here = os.path.abspath(os.path.dirname(__file__))\n \n \n class BuildCommand(distutils.command.build.build):\n def run(self):\n- print(\"before\")\n distutils.command.build.build.run(self)\n- print(\"after\")\n from django.core.management import call_command\n curdir = os.getcwd()\n- os.chdir(os.path.join(curdir, 'geotrek'))\n- call_command('compilemessages')\n- os.chdir(os.path.join(curdir, 'mapentity'))\n- call_command('compilemessages')\n- os.chdir(curdir)\n+ for subdir in ('geotrek', 'mapentity'):\n+ os.chdir(subdir)\n+ call_command('compilemessages')\n+ for path in Path('.').rglob('*.mo'):\n+ copy(path, os.path.join(curdir, self.build_lib, subdir, path))\n+ os.chdir(curdir)\n \n \n setup(\n", "issue": "Fix translations in package\nThe compilemessages step for geotrek and mapentity is missing somewhere\n", "before_files": [{"content": "#!/usr/bin/python3\nimport os\nimport distutils.command.build\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\nclass BuildCommand(distutils.command.build.build):\n def run(self):\n print(\"before\")\n distutils.command.build.build.run(self)\n print(\"after\")\n from django.core.management import call_command\n curdir = os.getcwd()\n os.chdir(os.path.join(curdir, 'geotrek'))\n call_command('compilemessages')\n os.chdir(os.path.join(curdir, 'mapentity'))\n call_command('compilemessages')\n os.chdir(curdir)\n\n\nsetup(\n name='geotrek',\n version=open(os.path.join(here, 'VERSION')).read().strip(),\n author='Makina Corpus',\n author_email='[email protected]',\n url='http://makina-corpus.com',\n description=\"Geotrek\",\n long_description=(open(os.path.join(here, 'README.rst')).read() + '\\n\\n'\n + open(os.path.join(here, 'docs', 'changelog.rst')).read()),\n scripts=['manage.py'],\n install_requires=[\n # pinned by requirements.txt\n 'psycopg2',\n 'docutils',\n 'GDAL',\n 'Pillow',\n 'easy-thumbnails',\n 'simplekml',\n 'pygal',\n 'django-extended-choices',\n 'django-multiselectfield',\n 'geojson',\n 'tif2geojson',\n 'pytz',\n 'djangorestframework-gis',\n 'drf-dynamic-fields',\n 'django-rest-swagger',\n 'django-embed-video',\n 'xlrd',\n 'landez',\n 'redis',\n 'celery',\n 'django-celery-results',\n 'requests[security]',\n 'drf-extensions',\n 'django-colorfield',\n 'factory_boy',\n ],\n cmdclass={\"build\": BuildCommand},\n include_package_data=True,\n license='BSD, see LICENSE file.',\n packages=find_packages(),\n classifiers=['Natural Language :: English',\n 'Environment :: Web Environment',\n 'Framework :: Django',\n 'Development Status :: 5 - Production/Stable',\n 'Programming Language :: Python :: 2.7'],\n)\n", "path": "setup.py"}]}
1,171
255
gh_patches_debug_26330
rasdani/github-patches
git_diff
streamlink__streamlink-1583
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Vaughnlive changed IP's to break Streamlink This will be a very brief bug report... As of tonight the head vaughnlive.py references IPs which were disconnected by vaughn to thwart streamlinking. I've observed vaughn serving video now from "66.90.93.44","66.90.93.35" and have personally gotten it to work overwriting the IP's in rtmp_server_map with those two alternating. I would submit the commit but I think some more testing is needed as I only use streamlink with one occasional stream and don't know how far those IPs will get more frequent SL users. #1187 contains lengthy discussion on the history of the war vaughn has waged against streamlink, this is probably not the last time the IPs will change. </issue> <code> [start of src/streamlink/plugins/vaughnlive.py] 1 import random 2 import re 3 import itertools 4 import ssl 5 import websocket 6 7 from streamlink.plugin import Plugin 8 from streamlink.plugin.api import useragents, http 9 from streamlink.stream import RTMPStream 10 11 _url_re = re.compile(r""" 12 http(s)?://(\w+\.)? 13 (?P<domain>vaughnlive|breakers|instagib|vapers|pearltime).tv 14 (/embed/video)? 15 /(?P<channel>[^/&?]+) 16 """, re.VERBOSE) 17 18 19 class VLWebSocket(websocket.WebSocket): 20 def __init__(self, **_): 21 self.session = _.pop("session") 22 self.logger = self.session.logger.new_module("plugins.vaughnlive.websocket") 23 sslopt = _.pop("sslopt", {}) 24 sslopt["cert_reqs"] = ssl.CERT_NONE 25 super(VLWebSocket, self).__init__(sslopt=sslopt, **_) 26 27 def send(self, payload, opcode=websocket.ABNF.OPCODE_TEXT): 28 self.logger.debug("Sending message: {0}", payload) 29 return super(VLWebSocket, self).send(payload + "\n\x00", opcode) 30 31 def recv(self): 32 d = super(VLWebSocket, self).recv().replace("\n", "").replace("\x00", "") 33 return d.split(" ", 1) 34 35 36 class VaughnLive(Plugin): 37 servers = ["wss://sapi-ws-{0}x{1:02}.vaughnlive.tv".format(x, y) for x, y in itertools.product(range(1, 3), 38 range(1, 6))] 39 origin = "https://vaughnlive.tv" 40 rtmp_server_map = { 41 "594140c69edad": "66.90.93.42", 42 "585c4cab1bef1": "66.90.93.34", 43 "5940d648b3929": "66.90.93.42", 44 "5941854b39bc4": "198.255.0.10" 45 } 46 name_remap = {"#vl": "live", "#btv": "btv", "#pt": "pt", "#igb": "instagib", "#vtv": "vtv"} 47 domain_map = {"vaughnlive": "#vl", "breakers": "#btv", "instagib": "#igb", "vapers": "#vtv", "pearltime": "#pt"} 48 49 @classmethod 50 def can_handle_url(cls, url): 51 return _url_re.match(url) 52 53 def api_url(self): 54 return random.choice(self.servers) 55 56 def parse_ack(self, action, message): 57 if action.endswith("3"): 58 channel, _, viewers, token, server, choked, is_live, chls, trns, ingest = message.split(";") 59 is_live = is_live == "1" 60 viewers = int(viewers) 61 self.logger.debug("Viewers: {0}, isLive={1}", viewers, is_live) 62 domain, channel = channel.split("-", 1) 63 return is_live, server, domain, channel, token, ingest 64 else: 65 self.logger.error("Unhandled action format: {0}", action) 66 67 def _get_info(self, stream_name): 68 server = self.api_url() 69 self.logger.debug("Connecting to API: {0}", server) 70 ws = websocket.create_connection(server, 71 header=["User-Agent: {0}".format(useragents.CHROME)], 72 origin=self.origin, 73 class_=VLWebSocket, 74 session=self.session) 75 ws.send("MVN LOAD3 {0}".format(stream_name)) 76 action, message = ws.recv() 77 return self.parse_ack(action, message) 78 79 def _get_rtmp_streams(self, server, domain, channel, token): 80 rtmp_server = self.rtmp_server_map.get(server, server) 81 82 url = "rtmp://{0}/live?{1}".format(rtmp_server, token) 83 84 yield "live", RTMPStream(self.session, params={ 85 "rtmp": url, 86 "pageUrl": self.url, 87 "playpath": "{0}_{1}".format(self.name_remap.get(domain, "live"), channel), 88 "live": True 89 }) 90 91 def _get_streams(self): 92 m = _url_re.match(self.url) 93 if m: 94 stream_name = "{0}-{1}".format(self.domain_map[(m.group("domain").lower())], 95 m.group("channel")) 96 97 is_live, server, domain, channel, token, ingest = self._get_info(stream_name) 98 99 if not is_live: 100 self.logger.info("Stream is currently off air") 101 else: 102 for s in self._get_rtmp_streams(server, domain, channel, token): 103 yield s 104 105 106 __plugin__ = VaughnLive 107 [end of src/streamlink/plugins/vaughnlive.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/streamlink/plugins/vaughnlive.py b/src/streamlink/plugins/vaughnlive.py --- a/src/streamlink/plugins/vaughnlive.py +++ b/src/streamlink/plugins/vaughnlive.py @@ -38,10 +38,10 @@ range(1, 6))] origin = "https://vaughnlive.tv" rtmp_server_map = { - "594140c69edad": "66.90.93.42", - "585c4cab1bef1": "66.90.93.34", - "5940d648b3929": "66.90.93.42", - "5941854b39bc4": "198.255.0.10" + "594140c69edad": "192.240.105.171:1935", + "585c4cab1bef1": "192.240.105.171:1935", + "5940d648b3929": "192.240.105.171:1935", + "5941854b39bc4": "192.240.105.171:1935" } name_remap = {"#vl": "live", "#btv": "btv", "#pt": "pt", "#igb": "instagib", "#vtv": "vtv"} domain_map = {"vaughnlive": "#vl", "breakers": "#btv", "instagib": "#igb", "vapers": "#vtv", "pearltime": "#pt"} @@ -99,6 +99,7 @@ if not is_live: self.logger.info("Stream is currently off air") else: + self.logger.info("Stream powered by VaughnSoft - remember to support them.") for s in self._get_rtmp_streams(server, domain, channel, token): yield s
{"golden_diff": "diff --git a/src/streamlink/plugins/vaughnlive.py b/src/streamlink/plugins/vaughnlive.py\n--- a/src/streamlink/plugins/vaughnlive.py\n+++ b/src/streamlink/plugins/vaughnlive.py\n@@ -38,10 +38,10 @@\n range(1, 6))]\n origin = \"https://vaughnlive.tv\"\n rtmp_server_map = {\n- \"594140c69edad\": \"66.90.93.42\",\n- \"585c4cab1bef1\": \"66.90.93.34\",\n- \"5940d648b3929\": \"66.90.93.42\",\n- \"5941854b39bc4\": \"198.255.0.10\"\n+ \"594140c69edad\": \"192.240.105.171:1935\",\n+ \"585c4cab1bef1\": \"192.240.105.171:1935\",\n+ \"5940d648b3929\": \"192.240.105.171:1935\",\n+ \"5941854b39bc4\": \"192.240.105.171:1935\"\n }\n name_remap = {\"#vl\": \"live\", \"#btv\": \"btv\", \"#pt\": \"pt\", \"#igb\": \"instagib\", \"#vtv\": \"vtv\"}\n domain_map = {\"vaughnlive\": \"#vl\", \"breakers\": \"#btv\", \"instagib\": \"#igb\", \"vapers\": \"#vtv\", \"pearltime\": \"#pt\"}\n@@ -99,6 +99,7 @@\n if not is_live:\n self.logger.info(\"Stream is currently off air\")\n else:\n+ self.logger.info(\"Stream powered by VaughnSoft - remember to support them.\")\n for s in self._get_rtmp_streams(server, domain, channel, token):\n yield s\n", "issue": "Vaughnlive changed IP's to break Streamlink\nThis will be a very brief bug report... As of tonight the head vaughnlive.py references IPs which were disconnected by vaughn to thwart streamlinking. I've observed vaughn serving video now from \"66.90.93.44\",\"66.90.93.35\" and have personally gotten it to work overwriting the IP's in rtmp_server_map with those two alternating. I would submit the commit but I think some more testing is needed as I only use streamlink with one occasional stream and don't know how far those IPs will get more frequent SL users.\r\n\r\n #1187 contains lengthy discussion on the history of the war vaughn has waged against streamlink, this is probably not the last time the IPs will change.\n", "before_files": [{"content": "import random\nimport re\nimport itertools\nimport ssl\nimport websocket\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import useragents, http\nfrom streamlink.stream import RTMPStream\n\n_url_re = re.compile(r\"\"\"\n http(s)?://(\\w+\\.)?\n (?P<domain>vaughnlive|breakers|instagib|vapers|pearltime).tv\n (/embed/video)?\n /(?P<channel>[^/&?]+)\n\"\"\", re.VERBOSE)\n\n\nclass VLWebSocket(websocket.WebSocket):\n def __init__(self, **_):\n self.session = _.pop(\"session\")\n self.logger = self.session.logger.new_module(\"plugins.vaughnlive.websocket\")\n sslopt = _.pop(\"sslopt\", {})\n sslopt[\"cert_reqs\"] = ssl.CERT_NONE\n super(VLWebSocket, self).__init__(sslopt=sslopt, **_)\n\n def send(self, payload, opcode=websocket.ABNF.OPCODE_TEXT):\n self.logger.debug(\"Sending message: {0}\", payload)\n return super(VLWebSocket, self).send(payload + \"\\n\\x00\", opcode)\n\n def recv(self):\n d = super(VLWebSocket, self).recv().replace(\"\\n\", \"\").replace(\"\\x00\", \"\")\n return d.split(\" \", 1)\n\n\nclass VaughnLive(Plugin):\n servers = [\"wss://sapi-ws-{0}x{1:02}.vaughnlive.tv\".format(x, y) for x, y in itertools.product(range(1, 3),\n range(1, 6))]\n origin = \"https://vaughnlive.tv\"\n rtmp_server_map = {\n \"594140c69edad\": \"66.90.93.42\",\n \"585c4cab1bef1\": \"66.90.93.34\",\n \"5940d648b3929\": \"66.90.93.42\",\n \"5941854b39bc4\": \"198.255.0.10\"\n }\n name_remap = {\"#vl\": \"live\", \"#btv\": \"btv\", \"#pt\": \"pt\", \"#igb\": \"instagib\", \"#vtv\": \"vtv\"}\n domain_map = {\"vaughnlive\": \"#vl\", \"breakers\": \"#btv\", \"instagib\": \"#igb\", \"vapers\": \"#vtv\", \"pearltime\": \"#pt\"}\n\n @classmethod\n def can_handle_url(cls, url):\n return _url_re.match(url)\n\n def api_url(self):\n return random.choice(self.servers)\n\n def parse_ack(self, action, message):\n if action.endswith(\"3\"):\n channel, _, viewers, token, server, choked, is_live, chls, trns, ingest = message.split(\";\")\n is_live = is_live == \"1\"\n viewers = int(viewers)\n self.logger.debug(\"Viewers: {0}, isLive={1}\", viewers, is_live)\n domain, channel = channel.split(\"-\", 1)\n return is_live, server, domain, channel, token, ingest\n else:\n self.logger.error(\"Unhandled action format: {0}\", action)\n\n def _get_info(self, stream_name):\n server = self.api_url()\n self.logger.debug(\"Connecting to API: {0}\", server)\n ws = websocket.create_connection(server,\n header=[\"User-Agent: {0}\".format(useragents.CHROME)],\n origin=self.origin,\n class_=VLWebSocket,\n session=self.session)\n ws.send(\"MVN LOAD3 {0}\".format(stream_name))\n action, message = ws.recv()\n return self.parse_ack(action, message)\n\n def _get_rtmp_streams(self, server, domain, channel, token):\n rtmp_server = self.rtmp_server_map.get(server, server)\n\n url = \"rtmp://{0}/live?{1}\".format(rtmp_server, token)\n\n yield \"live\", RTMPStream(self.session, params={\n \"rtmp\": url,\n \"pageUrl\": self.url,\n \"playpath\": \"{0}_{1}\".format(self.name_remap.get(domain, \"live\"), channel),\n \"live\": True\n })\n\n def _get_streams(self):\n m = _url_re.match(self.url)\n if m:\n stream_name = \"{0}-{1}\".format(self.domain_map[(m.group(\"domain\").lower())],\n m.group(\"channel\"))\n\n is_live, server, domain, channel, token, ingest = self._get_info(stream_name)\n\n if not is_live:\n self.logger.info(\"Stream is currently off air\")\n else:\n for s in self._get_rtmp_streams(server, domain, channel, token):\n yield s\n\n\n__plugin__ = VaughnLive\n", "path": "src/streamlink/plugins/vaughnlive.py"}]}
2,018
518
gh_patches_debug_26363
rasdani/github-patches
git_diff
mathesar-foundation__mathesar-786
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Implement showing and changing a column's type ## Problem <!-- Please provide a clear and concise description of the problem that this feature request is designed to solve.--> Users might want to change the data type of an existing column on their table. ## Proposed solution <!-- A clear and concise description of your proposed solution or feature. --> The ["Working with Columns" design spec](https://wiki.mathesar.org/en/design/specs/working-with-columns) has a solution for showing and changing column types, which we need to implement on the frontend. Please note that we're only implementing changing the Mathesar data type in this milestone. Options specific to individual data types will be implemented in the next milestone. Number data types should save as `NUMERIC`. Text data types should save as `VARCHAR`. Date/time data types can be disabled for now since they're not fully implemented on the backend. ## Additional context <!-- Add any other context or screenshots about the feature request here.--> - Backend work: - #532 to get the list of types - #199 to get valid target types and change types - Design issue: #324 - Design discussion: #436 - #269 </issue> <code> [start of mathesar/views.py] 1 from django.shortcuts import render, redirect, get_object_or_404 2 3 from mathesar.models import Database, Schema, Table 4 from mathesar.api.serializers.databases import DatabaseSerializer 5 from mathesar.api.serializers.schemas import SchemaSerializer 6 from mathesar.api.serializers.tables import TableSerializer 7 8 9 def get_schema_list(request, database): 10 schema_serializer = SchemaSerializer( 11 Schema.objects.filter(database=database), 12 many=True, 13 context={'request': request} 14 ) 15 return schema_serializer.data 16 17 18 def get_database_list(request): 19 database_serializer = DatabaseSerializer( 20 Database.objects.all(), 21 many=True, 22 context={'request': request} 23 ) 24 return database_serializer.data 25 26 27 def get_table_list(request, schema): 28 if schema is None: 29 return [] 30 table_serializer = TableSerializer( 31 Table.objects.filter(schema=schema), 32 many=True, 33 context={'request': request} 34 ) 35 return table_serializer.data 36 37 38 def get_common_data(request, database, schema=None): 39 return { 40 'current_db': database.name if database else None, 41 'current_schema': schema.id if schema else None, 42 'schemas': get_schema_list(request, database), 43 'databases': get_database_list(request), 44 'tables': get_table_list(request, schema) 45 } 46 47 48 def get_current_database(request, db_name): 49 # if there's a DB name passed in, try to retrieve the database, or return a 404 error. 50 if db_name is not None: 51 return get_object_or_404(Database, name=db_name) 52 else: 53 try: 54 # Try to get the first database available 55 return Database.objects.order_by('id').first() 56 except Database.DoesNotExist: 57 return None 58 59 60 def get_current_schema(request, schema_id, database): 61 # if there's a schema ID passed in, try to retrieve the schema, or return a 404 error. 62 if schema_id is not None: 63 return get_object_or_404(Schema, id=schema_id) 64 else: 65 try: 66 # Try to get the first schema in the DB 67 return Schema.objects.filter(database=database).order_by('id').first() 68 except Schema.DoesNotExist: 69 return None 70 71 72 def render_schema(request, database, schema): 73 # if there's no schema available, redirect to the schemas page. 74 if not schema: 75 return redirect('schemas', db_name=database.name) 76 else: 77 # We are redirecting so that the correct URL is passed to the frontend. 78 return redirect('schema_home', db_name=database.name, schema_id=schema.id) 79 80 81 def home(request): 82 database = get_current_database(request, None) 83 schema = get_current_schema(request, None, database) 84 return render_schema(request, database, schema) 85 86 87 def db_home(request, db_name): 88 database = get_current_database(request, db_name) 89 schema = get_current_schema(request, None, database) 90 return render_schema(request, database, schema) 91 92 93 def schema_home(request, db_name, schema_id): 94 database = get_current_database(request, db_name) 95 schema = get_current_schema(request, schema_id, database) 96 return render(request, 'mathesar/index.html', { 97 'common_data': get_common_data(request, database, schema) 98 }) 99 100 101 def schemas(request, db_name): 102 database = get_current_database(request, db_name) 103 schema = get_current_schema(request, None, database) 104 return render(request, 'mathesar/index.html', { 105 'common_data': get_common_data(request, database, schema) 106 }) 107 [end of mathesar/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mathesar/views.py b/mathesar/views.py --- a/mathesar/views.py +++ b/mathesar/views.py @@ -1,7 +1,7 @@ from django.shortcuts import render, redirect, get_object_or_404 from mathesar.models import Database, Schema, Table -from mathesar.api.serializers.databases import DatabaseSerializer +from mathesar.api.serializers.databases import DatabaseSerializer, TypeSerializer from mathesar.api.serializers.schemas import SchemaSerializer from mathesar.api.serializers.tables import TableSerializer @@ -35,13 +35,25 @@ return table_serializer.data +def get_type_list(request, database): + if database is None: + return [] + type_serializer = TypeSerializer( + database.supported_types, + many=True, + context={'request': request} + ) + return type_serializer.data + + def get_common_data(request, database, schema=None): return { 'current_db': database.name if database else None, 'current_schema': schema.id if schema else None, 'schemas': get_schema_list(request, database), 'databases': get_database_list(request), - 'tables': get_table_list(request, schema) + 'tables': get_table_list(request, schema), + 'abstract_types': get_type_list(request, database) }
{"golden_diff": "diff --git a/mathesar/views.py b/mathesar/views.py\n--- a/mathesar/views.py\n+++ b/mathesar/views.py\n@@ -1,7 +1,7 @@\n from django.shortcuts import render, redirect, get_object_or_404\n \n from mathesar.models import Database, Schema, Table\n-from mathesar.api.serializers.databases import DatabaseSerializer\n+from mathesar.api.serializers.databases import DatabaseSerializer, TypeSerializer\n from mathesar.api.serializers.schemas import SchemaSerializer\n from mathesar.api.serializers.tables import TableSerializer\n \n@@ -35,13 +35,25 @@\n return table_serializer.data\n \n \n+def get_type_list(request, database):\n+ if database is None:\n+ return []\n+ type_serializer = TypeSerializer(\n+ database.supported_types,\n+ many=True,\n+ context={'request': request}\n+ )\n+ return type_serializer.data\n+\n+\n def get_common_data(request, database, schema=None):\n return {\n 'current_db': database.name if database else None,\n 'current_schema': schema.id if schema else None,\n 'schemas': get_schema_list(request, database),\n 'databases': get_database_list(request),\n- 'tables': get_table_list(request, schema)\n+ 'tables': get_table_list(request, schema),\n+ 'abstract_types': get_type_list(request, database)\n }\n", "issue": "Implement showing and changing a column's type\n## Problem\r\n<!-- Please provide a clear and concise description of the problem that this feature request is designed to solve.-->\r\nUsers might want to change the data type of an existing column on their table.\r\n\r\n## Proposed solution\r\n<!-- A clear and concise description of your proposed solution or feature. -->\r\nThe [\"Working with Columns\" design spec](https://wiki.mathesar.org/en/design/specs/working-with-columns) has a solution for showing and changing column types, which we need to implement on the frontend.\r\n\r\nPlease note that we're only implementing changing the Mathesar data type in this milestone. Options specific to individual data types will be implemented in the next milestone.\r\n\r\nNumber data types should save as `NUMERIC`.\r\nText data types should save as `VARCHAR`.\r\nDate/time data types can be disabled for now since they're not fully implemented on the backend.\r\n\r\n## Additional context\r\n<!-- Add any other context or screenshots about the feature request here.-->\r\n- Backend work:\r\n - #532 to get the list of types \r\n - #199 to get valid target types and change types \r\n- Design issue: #324 \r\n- Design discussion: #436\r\n- #269 \n", "before_files": [{"content": "from django.shortcuts import render, redirect, get_object_or_404\n\nfrom mathesar.models import Database, Schema, Table\nfrom mathesar.api.serializers.databases import DatabaseSerializer\nfrom mathesar.api.serializers.schemas import SchemaSerializer\nfrom mathesar.api.serializers.tables import TableSerializer\n\n\ndef get_schema_list(request, database):\n schema_serializer = SchemaSerializer(\n Schema.objects.filter(database=database),\n many=True,\n context={'request': request}\n )\n return schema_serializer.data\n\n\ndef get_database_list(request):\n database_serializer = DatabaseSerializer(\n Database.objects.all(),\n many=True,\n context={'request': request}\n )\n return database_serializer.data\n\n\ndef get_table_list(request, schema):\n if schema is None:\n return []\n table_serializer = TableSerializer(\n Table.objects.filter(schema=schema),\n many=True,\n context={'request': request}\n )\n return table_serializer.data\n\n\ndef get_common_data(request, database, schema=None):\n return {\n 'current_db': database.name if database else None,\n 'current_schema': schema.id if schema else None,\n 'schemas': get_schema_list(request, database),\n 'databases': get_database_list(request),\n 'tables': get_table_list(request, schema)\n }\n\n\ndef get_current_database(request, db_name):\n # if there's a DB name passed in, try to retrieve the database, or return a 404 error.\n if db_name is not None:\n return get_object_or_404(Database, name=db_name)\n else:\n try:\n # Try to get the first database available\n return Database.objects.order_by('id').first()\n except Database.DoesNotExist:\n return None\n\n\ndef get_current_schema(request, schema_id, database):\n # if there's a schema ID passed in, try to retrieve the schema, or return a 404 error.\n if schema_id is not None:\n return get_object_or_404(Schema, id=schema_id)\n else:\n try:\n # Try to get the first schema in the DB\n return Schema.objects.filter(database=database).order_by('id').first()\n except Schema.DoesNotExist:\n return None\n\n\ndef render_schema(request, database, schema):\n # if there's no schema available, redirect to the schemas page.\n if not schema:\n return redirect('schemas', db_name=database.name)\n else:\n # We are redirecting so that the correct URL is passed to the frontend.\n return redirect('schema_home', db_name=database.name, schema_id=schema.id)\n\n\ndef home(request):\n database = get_current_database(request, None)\n schema = get_current_schema(request, None, database)\n return render_schema(request, database, schema)\n\n\ndef db_home(request, db_name):\n database = get_current_database(request, db_name)\n schema = get_current_schema(request, None, database)\n return render_schema(request, database, schema)\n\n\ndef schema_home(request, db_name, schema_id):\n database = get_current_database(request, db_name)\n schema = get_current_schema(request, schema_id, database)\n return render(request, 'mathesar/index.html', {\n 'common_data': get_common_data(request, database, schema)\n })\n\n\ndef schemas(request, db_name):\n database = get_current_database(request, db_name)\n schema = get_current_schema(request, None, database)\n return render(request, 'mathesar/index.html', {\n 'common_data': get_common_data(request, database, schema)\n })\n", "path": "mathesar/views.py"}]}
1,765
295
gh_patches_debug_34062
rasdani/github-patches
git_diff
mampfes__hacs_waste_collection_schedule-1871
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [Bug]: Chichester District Council is not working ### I Have A Problem With: A specific source ### What's Your Problem The source has stopped working since Tuesday 13th February 2024. All the collection days no longer show on the calendar at all. The Chichester District Council website still shows me the days. ### Source (if relevant) chichester_gov_uk ### Logs ```Shell This error originated from a custom integration. Logger: waste_collection_schedule.source_shell Source: custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py:136 Integration: waste_collection_schedule (documentation) First occurred: 11:36:47 (1 occurrences) Last logged: 11:36:47 fetch failed for source Chichester District Council: Traceback (most recent call last): File "/config/custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py", line 134, in fetch entries = self._source.fetch() ^^^^^^^^^^^^^^^^^^^^ File "/config/custom_components/waste_collection_schedule/waste_collection_schedule/source/chichester_gov_uk.py", line 37, in fetch form_url = form["action"] ~~~~^^^^^^^^^^ TypeError: 'NoneType' object is not subscriptable ``` ### Relevant Configuration ```YAML waste_collection_schedule: sources: - name: chichester_gov_uk args: uprn: 10002466648 ``` ### Checklist Source Error - [X] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration) - [X] Checked that the website of your service provider is still working - [X] Tested my attributes on the service provider website (if possible) - [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on "Redownload" and choose master as version) ### Checklist Sensor Error - [X] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used) ### Required - [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been. - [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate. </issue> <code> [start of custom_components/waste_collection_schedule/waste_collection_schedule/source/chichester_gov_uk.py] 1 from datetime import datetime 2 3 import requests 4 from bs4 import BeautifulSoup 5 from waste_collection_schedule import Collection 6 7 TITLE = "Chichester District Council" 8 DESCRIPTION = "Source for chichester.gov.uk services for Chichester" 9 URL = "chichester.gov.uk" 10 11 TEST_CASES = { 12 "Test_001": {"uprn": "010002476348"}, 13 "Test_002": {"uprn": "100062612654"}, 14 "Test_003": {"uprn": "100061745708"}, 15 } 16 17 ICON_MAP = { 18 "General Waste": "mdi:trash-can", 19 "Recycling": "mdi:recycle", 20 "Garden Recycling": "mdi:leaf", 21 } 22 23 24 class Source: 25 def __init__(self, uprn): 26 self._uprn = uprn 27 28 def fetch(self): 29 session = requests.Session() 30 # Start a session 31 r = session.get("https://www.chichester.gov.uk/checkyourbinday") 32 r.raise_for_status() 33 soup = BeautifulSoup(r.text, features="html.parser") 34 35 # Extract form submission url 36 form = soup.find("form", attrs={"id": "WASTECOLLECTIONCALENDARV2_FORM"}) 37 form_url = form["action"] 38 39 # Submit form 40 form_data = { 41 "WASTECOLLECTIONCALENDARV2_FORMACTION_NEXT": "Submit", 42 "WASTECOLLECTIONCALENDARV2_CALENDAR_UPRN": self._uprn, 43 } 44 r = session.post(form_url, data=form_data) 45 r.raise_for_status() 46 47 # Extract collection dates 48 soup = BeautifulSoup(r.text, features="html.parser") 49 entries = [] 50 data = soup.find_all("div", attrs={"class": "bin-days"}) 51 for bin in data: 52 if "print-only" in bin["class"]: 53 continue 54 55 type = bin.find("span").contents[0].replace("bin", "").strip().title() 56 list_items = bin.find_all("li") 57 if list_items: 58 for item in list_items: 59 date = datetime.strptime(item.text, "%d %B %Y").date() 60 entries.append( 61 Collection( 62 date=date, 63 t=type, 64 icon=ICON_MAP.get(type), 65 ) 66 ) 67 68 return entries 69 [end of custom_components/waste_collection_schedule/waste_collection_schedule/source/chichester_gov_uk.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/chichester_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/chichester_gov_uk.py --- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/chichester_gov_uk.py +++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/chichester_gov_uk.py @@ -33,13 +33,13 @@ soup = BeautifulSoup(r.text, features="html.parser") # Extract form submission url - form = soup.find("form", attrs={"id": "WASTECOLLECTIONCALENDARV2_FORM"}) + form = soup.find("form", attrs={"id": "WASTECOLLECTIONCALENDARV5_FORM"}) form_url = form["action"] # Submit form form_data = { - "WASTECOLLECTIONCALENDARV2_FORMACTION_NEXT": "Submit", - "WASTECOLLECTIONCALENDARV2_CALENDAR_UPRN": self._uprn, + "WASTECOLLECTIONCALENDARV5_FORMACTION_NEXT": "Submit", + "WASTECOLLECTIONCALENDARV5_CALENDAR_UPRN": self._uprn, } r = session.post(form_url, data=form_data) r.raise_for_status() @@ -47,16 +47,18 @@ # Extract collection dates soup = BeautifulSoup(r.text, features="html.parser") entries = [] - data = soup.find_all("div", attrs={"class": "bin-days"}) - for bin in data: - if "print-only" in bin["class"]: - continue - - type = bin.find("span").contents[0].replace("bin", "").strip().title() - list_items = bin.find_all("li") - if list_items: - for item in list_items: - date = datetime.strptime(item.text, "%d %B %Y").date() + tables = soup.find_all("table", attrs={"class": "bin-collection-dates"}) + # Data is presented in two tables side-by-side + for table in tables: + # Each collection is a table row + data = table.find_all("tr") + for bin in data: + cells = bin.find_all("td") + # Ignore the header row + if len(cells) == 2: + date = datetime.strptime(cells[0].text, "%d %B %Y").date() + # Maintain backwards compatibility - it used to be General Waste and now it is General waste + type = cells[1].text.title() entries.append( Collection( date=date,
{"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/chichester_gov_uk.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/chichester_gov_uk.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/chichester_gov_uk.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/chichester_gov_uk.py\n@@ -33,13 +33,13 @@\n soup = BeautifulSoup(r.text, features=\"html.parser\")\n \n # Extract form submission url\n- form = soup.find(\"form\", attrs={\"id\": \"WASTECOLLECTIONCALENDARV2_FORM\"})\n+ form = soup.find(\"form\", attrs={\"id\": \"WASTECOLLECTIONCALENDARV5_FORM\"})\n form_url = form[\"action\"]\n \n # Submit form\n form_data = {\n- \"WASTECOLLECTIONCALENDARV2_FORMACTION_NEXT\": \"Submit\",\n- \"WASTECOLLECTIONCALENDARV2_CALENDAR_UPRN\": self._uprn,\n+ \"WASTECOLLECTIONCALENDARV5_FORMACTION_NEXT\": \"Submit\",\n+ \"WASTECOLLECTIONCALENDARV5_CALENDAR_UPRN\": self._uprn,\n }\n r = session.post(form_url, data=form_data)\n r.raise_for_status()\n@@ -47,16 +47,18 @@\n # Extract collection dates\n soup = BeautifulSoup(r.text, features=\"html.parser\")\n entries = []\n- data = soup.find_all(\"div\", attrs={\"class\": \"bin-days\"})\n- for bin in data:\n- if \"print-only\" in bin[\"class\"]:\n- continue\n-\n- type = bin.find(\"span\").contents[0].replace(\"bin\", \"\").strip().title()\n- list_items = bin.find_all(\"li\")\n- if list_items:\n- for item in list_items:\n- date = datetime.strptime(item.text, \"%d %B %Y\").date()\n+ tables = soup.find_all(\"table\", attrs={\"class\": \"bin-collection-dates\"})\n+ # Data is presented in two tables side-by-side\n+ for table in tables:\n+ # Each collection is a table row\n+ data = table.find_all(\"tr\")\n+ for bin in data:\n+ cells = bin.find_all(\"td\")\n+ # Ignore the header row\n+ if len(cells) == 2:\n+ date = datetime.strptime(cells[0].text, \"%d %B %Y\").date()\n+ # Maintain backwards compatibility - it used to be General Waste and now it is General waste\n+ type = cells[1].text.title()\n entries.append(\n Collection(\n date=date,\n", "issue": "[Bug]: Chichester District Council is not working\n### I Have A Problem With:\n\nA specific source\n\n### What's Your Problem\n\nThe source has stopped working since Tuesday 13th February 2024. All the collection days no longer show on the calendar at all. The Chichester District Council website still shows me the days.\n\n### Source (if relevant)\n\nchichester_gov_uk\n\n### Logs\n\n```Shell\nThis error originated from a custom integration.\r\n\r\nLogger: waste_collection_schedule.source_shell\r\nSource: custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py:136\r\nIntegration: waste_collection_schedule (documentation)\r\nFirst occurred: 11:36:47 (1 occurrences)\r\nLast logged: 11:36:47\r\n\r\nfetch failed for source Chichester District Council: Traceback (most recent call last): File \"/config/custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py\", line 134, in fetch entries = self._source.fetch() ^^^^^^^^^^^^^^^^^^^^ File \"/config/custom_components/waste_collection_schedule/waste_collection_schedule/source/chichester_gov_uk.py\", line 37, in fetch form_url = form[\"action\"] ~~~~^^^^^^^^^^ TypeError: 'NoneType' object is not subscriptable\n```\n\n\n### Relevant Configuration\n\n```YAML\nwaste_collection_schedule:\r\n sources:\r\n - name: chichester_gov_uk\r\n args:\r\n uprn: 10002466648\n```\n\n\n### Checklist Source Error\n\n- [X] Use the example parameters for your source (often available in the documentation) (don't forget to restart Home Assistant after changing the configuration)\n- [X] Checked that the website of your service provider is still working\n- [X] Tested my attributes on the service provider website (if possible)\n- [X] I have tested with the latest version of the integration (master) (for HACS in the 3 dot menu of the integration click on \"Redownload\" and choose master as version)\n\n### Checklist Sensor Error\n\n- [X] Checked in the Home Assistant Calendar tab if the event names match the types names (if types argument is used)\n\n### Required\n\n- [X] I have searched past (closed AND opened) issues to see if this bug has already been reported, and it hasn't been.\n- [X] I understand that people give their precious time for free, and thus I've done my very best to make this problem as easy as possible to investigate.\n", "before_files": [{"content": "from datetime import datetime\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom waste_collection_schedule import Collection\n\nTITLE = \"Chichester District Council\"\nDESCRIPTION = \"Source for chichester.gov.uk services for Chichester\"\nURL = \"chichester.gov.uk\"\n\nTEST_CASES = {\n \"Test_001\": {\"uprn\": \"010002476348\"},\n \"Test_002\": {\"uprn\": \"100062612654\"},\n \"Test_003\": {\"uprn\": \"100061745708\"},\n}\n\nICON_MAP = {\n \"General Waste\": \"mdi:trash-can\",\n \"Recycling\": \"mdi:recycle\",\n \"Garden Recycling\": \"mdi:leaf\",\n}\n\n\nclass Source:\n def __init__(self, uprn):\n self._uprn = uprn\n\n def fetch(self):\n session = requests.Session()\n # Start a session\n r = session.get(\"https://www.chichester.gov.uk/checkyourbinday\")\n r.raise_for_status()\n soup = BeautifulSoup(r.text, features=\"html.parser\")\n\n # Extract form submission url\n form = soup.find(\"form\", attrs={\"id\": \"WASTECOLLECTIONCALENDARV2_FORM\"})\n form_url = form[\"action\"]\n\n # Submit form\n form_data = {\n \"WASTECOLLECTIONCALENDARV2_FORMACTION_NEXT\": \"Submit\",\n \"WASTECOLLECTIONCALENDARV2_CALENDAR_UPRN\": self._uprn,\n }\n r = session.post(form_url, data=form_data)\n r.raise_for_status()\n\n # Extract collection dates\n soup = BeautifulSoup(r.text, features=\"html.parser\")\n entries = []\n data = soup.find_all(\"div\", attrs={\"class\": \"bin-days\"})\n for bin in data:\n if \"print-only\" in bin[\"class\"]:\n continue\n\n type = bin.find(\"span\").contents[0].replace(\"bin\", \"\").strip().title()\n list_items = bin.find_all(\"li\")\n if list_items:\n for item in list_items:\n date = datetime.strptime(item.text, \"%d %B %Y\").date()\n entries.append(\n Collection(\n date=date,\n t=type,\n icon=ICON_MAP.get(type),\n )\n )\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/chichester_gov_uk.py"}]}
1,755
596
gh_patches_debug_899
rasdani/github-patches
git_diff
python-telegram-bot__python-telegram-bot-1063
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> User.full_name doesn't handle non-ASCII (in Python 2?) ### Steps to reproduce ```python updater = ext.Updater(token=settings.telegram_token()) def F(bot, update): user = update.effective_user print repr(user.first_name), repr(user.last_name) print '%s %s' % (user.first_name, user.last_name) print user.full_name updater.dispatcher.add_handler(ext.MessageHandler(0, F)) updater.start_polling() updater.idle() ``` ### Expected behaviour ``` u'Dan\u2022iel' u'Reed' Dan•iel Reed Dan•iel Reed ``` ### Actual behaviour ``` u'Dan\u2022iel' u'Reed' Dan•iel Reed ERROR dispatcher.py:301] An uncaught error was raised while processing the update Traceback (most recent call last): File "local/lib/python2.7/site-packages/telegram/ext/dispatcher.py", line 279, in process_update handler.handle_update(update, self) File "local/lib/python2.7/site-packages/telegram/ext/messagehandler.py", line 169, in handle_update return self.callback(dispatcher.bot, update, **optional_args) File "<stdin>", line 5, in F File "local/lib/python2.7/site-packages/telegram/user.py", line 91, in full_name return '{} {}'.format(self.first_name, self.last_name) UnicodeEncodeError: 'ascii' codec can't encode character u'\u2022' in position 3: ordinal not in range(128) ``` ### Configuration **Operating System:** **Version of Python, python-telegram-bot & dependencies:** ``` python-telegram-bot 10.0.1 certifi 2018.01.18 future 0.16.0 Python 2.7.14 (default, Sep 23 2017, 22:06:14) [GCC 7.2.0] ``` I'm a little rushed, but this is works for me: ```python @property def full_name(self): """ :obj:`str`: Convenience property. The user's :attr:`first_name`, followed by (if available) :attr:`last_name`. """ if self.last_name: ! return u'{} {}'.format(self.first_name, self.last_name) return self.first_name ``` </issue> <code> [start of telegram/user.py] 1 #!/usr/bin/env python 2 # pylint: disable=C0103,W0622 3 # 4 # A library that provides a Python interface to the Telegram Bot API 5 # Copyright (C) 2015-2018 6 # Leandro Toledo de Souza <[email protected]> 7 # 8 # This program is free software: you can redistribute it and/or modify 9 # it under the terms of the GNU Lesser Public License as published by 10 # the Free Software Foundation, either version 3 of the License, or 11 # (at your option) any later version. 12 # 13 # This program is distributed in the hope that it will be useful, 14 # but WITHOUT ANY WARRANTY; without even the implied warranty of 15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 # GNU Lesser Public License for more details. 17 # 18 # You should have received a copy of the GNU Lesser Public License 19 # along with this program. If not, see [http://www.gnu.org/licenses/]. 20 """This module contains an object that represents a Telegram User.""" 21 22 from telegram import TelegramObject 23 from telegram.utils.helpers import mention_html as util_mention_html 24 from telegram.utils.helpers import mention_markdown as util_mention_markdown 25 26 27 class User(TelegramObject): 28 """This object represents a Telegram user or bot. 29 30 Attributes: 31 id (:obj:`int`): Unique identifier for this user or bot. 32 is_bot (:obj:`bool`): True, if this user is a bot 33 first_name (:obj:`str`): User's or bot's first name. 34 last_name (:obj:`str`): Optional. User's or bot's last name. 35 username (:obj:`str`): Optional. User's or bot's username. 36 language_code (:obj:`str`): Optional. IETF language tag of the user's language. 37 bot (:class:`telegram.Bot`): Optional. The Bot to use for instance methods. 38 39 Args: 40 id (:obj:`int`): Unique identifier for this user or bot. 41 is_bot (:obj:`bool`): True, if this user is a bot 42 first_name (:obj:`str`): User's or bot's first name. 43 last_name (:obj:`str`, optional): User's or bot's last name. 44 username (:obj:`str`, optional): User's or bot's username. 45 language_code (:obj:`str`, optional): IETF language tag of the user's language. 46 bot (:class:`telegram.Bot`, optional): The Bot to use for instance methods. 47 48 """ 49 50 def __init__(self, 51 id, 52 first_name, 53 is_bot, 54 last_name=None, 55 username=None, 56 language_code=None, 57 bot=None, 58 **kwargs): 59 # Required 60 self.id = int(id) 61 self.first_name = first_name 62 self.is_bot = is_bot 63 # Optionals 64 self.last_name = last_name 65 self.username = username 66 self.language_code = language_code 67 68 self.bot = bot 69 70 self._id_attrs = (self.id,) 71 72 @property 73 def name(self): 74 """ 75 :obj:`str`: Convenience property. If available, returns the user's :attr:`username` 76 prefixed with "@". If :attr:`username` is not available, returns :attr:`full_name`. 77 78 """ 79 if self.username: 80 return '@{}'.format(self.username) 81 return self.full_name 82 83 @property 84 def full_name(self): 85 """ 86 :obj:`str`: Convenience property. The user's :attr:`first_name`, followed by (if available) 87 :attr:`last_name`. 88 89 """ 90 if self.last_name: 91 return '{} {}'.format(self.first_name, self.last_name) 92 return self.first_name 93 94 @classmethod 95 def de_json(cls, data, bot): 96 if not data: 97 return None 98 99 data = super(User, cls).de_json(data, bot) 100 101 return cls(bot=bot, **data) 102 103 def get_profile_photos(self, *args, **kwargs): 104 """ 105 Shortcut for:: 106 107 bot.get_user_profile_photos(update.message.from_user.id, *args, **kwargs) 108 109 """ 110 111 return self.bot.get_user_profile_photos(self.id, *args, **kwargs) 112 113 @classmethod 114 def de_list(cls, data, bot): 115 if not data: 116 return [] 117 118 users = list() 119 for user in data: 120 users.append(cls.de_json(user, bot)) 121 122 return users 123 124 def mention_markdown(self, name=None): 125 """ 126 Args: 127 name (:obj:`str`): If provided, will overwrite the user's name. 128 129 Returns: 130 :obj:`str`: The inline mention for the user as markdown. 131 """ 132 if not name: 133 return util_mention_markdown(self.id, self.name) 134 else: 135 return util_mention_markdown(self.id, name) 136 137 def mention_html(self, name=None): 138 """ 139 Args: 140 name (:obj:`str`): If provided, will overwrite the user's name. 141 142 Returns: 143 :obj:`str`: The inline mention for the user as HTML. 144 """ 145 if not name: 146 return util_mention_html(self.id, self.name) 147 else: 148 return util_mention_html(self.id, name) 149 150 def send_message(self, *args, **kwargs): 151 """Shortcut for:: 152 153 bot.send_message(User.chat_id, *args, **kwargs) 154 155 Where User is the current instance. 156 157 Returns: 158 :class:`telegram.Message`: On success, instance representing the message posted. 159 160 """ 161 return self.bot.send_message(self.id, *args, **kwargs) 162 163 def send_photo(self, *args, **kwargs): 164 """Shortcut for:: 165 166 bot.send_photo(User.chat_id, *args, **kwargs) 167 168 Where User is the current instance. 169 170 Returns: 171 :class:`telegram.Message`: On success, instance representing the message posted. 172 173 """ 174 return self.bot.send_photo(self.id, *args, **kwargs) 175 176 def send_audio(self, *args, **kwargs): 177 """Shortcut for:: 178 179 bot.send_audio(User.chat_id, *args, **kwargs) 180 181 Where User is the current instance. 182 183 Returns: 184 :class:`telegram.Message`: On success, instance representing the message posted. 185 186 """ 187 return self.bot.send_audio(self.id, *args, **kwargs) 188 189 def send_document(self, *args, **kwargs): 190 """Shortcut for:: 191 192 bot.send_document(User.chat_id, *args, **kwargs) 193 194 Where User is the current instance. 195 196 Returns: 197 :class:`telegram.Message`: On success, instance representing the message posted. 198 199 """ 200 return self.bot.send_document(self.id, *args, **kwargs) 201 202 def send_sticker(self, *args, **kwargs): 203 """Shortcut for:: 204 205 bot.send_sticker(User.chat_id, *args, **kwargs) 206 207 Where User is the current instance. 208 209 Returns: 210 :class:`telegram.Message`: On success, instance representing the message posted. 211 212 """ 213 return self.bot.send_sticker(self.id, *args, **kwargs) 214 215 def send_video(self, *args, **kwargs): 216 """Shortcut for:: 217 218 bot.send_video(User.chat_id, *args, **kwargs) 219 220 Where User is the current instance. 221 222 Returns: 223 :class:`telegram.Message`: On success, instance representing the message posted. 224 225 """ 226 return self.bot.send_video(self.id, *args, **kwargs) 227 228 def send_video_note(self, *args, **kwargs): 229 """Shortcut for:: 230 231 bot.send_video_note(User.chat_id, *args, **kwargs) 232 233 Where User is the current instance. 234 235 Returns: 236 :class:`telegram.Message`: On success, instance representing the message posted. 237 238 """ 239 return self.bot.send_video_note(self.id, *args, **kwargs) 240 241 def send_voice(self, *args, **kwargs): 242 """Shortcut for:: 243 244 bot.send_voice(User.chat_id, *args, **kwargs) 245 246 Where User is the current instance. 247 248 Returns: 249 :class:`telegram.Message`: On success, instance representing the message posted. 250 251 """ 252 return self.bot.send_voice(self.id, *args, **kwargs) 253 [end of telegram/user.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/telegram/user.py b/telegram/user.py --- a/telegram/user.py +++ b/telegram/user.py @@ -88,7 +88,7 @@ """ if self.last_name: - return '{} {}'.format(self.first_name, self.last_name) + return u'{} {}'.format(self.first_name, self.last_name) return self.first_name @classmethod
{"golden_diff": "diff --git a/telegram/user.py b/telegram/user.py\n--- a/telegram/user.py\n+++ b/telegram/user.py\n@@ -88,7 +88,7 @@\n \n \"\"\"\n if self.last_name:\n- return '{} {}'.format(self.first_name, self.last_name)\n+ return u'{} {}'.format(self.first_name, self.last_name)\n return self.first_name\n \n @classmethod\n", "issue": "User.full_name doesn't handle non-ASCII (in Python 2?)\n### Steps to reproduce\r\n```python\r\nupdater = ext.Updater(token=settings.telegram_token())\r\ndef F(bot, update):\r\n user = update.effective_user\r\n print repr(user.first_name), repr(user.last_name)\r\n print '%s %s' % (user.first_name, user.last_name)\r\n print user.full_name\r\n\r\nupdater.dispatcher.add_handler(ext.MessageHandler(0, F))\r\nupdater.start_polling()\r\nupdater.idle()\r\n```\r\n\r\n### Expected behaviour\r\n```\r\nu'Dan\\u2022iel' u'Reed'\r\nDan\u2022iel Reed\r\nDan\u2022iel Reed\r\n```\r\n\r\n### Actual behaviour\r\n```\r\nu'Dan\\u2022iel' u'Reed'\r\nDan\u2022iel Reed\r\nERROR dispatcher.py:301] An uncaught error was raised while processing the update\r\nTraceback (most recent call last):\r\n File \"local/lib/python2.7/site-packages/telegram/ext/dispatcher.py\", line 279, in process_update\r\n handler.handle_update(update, self)\r\n File \"local/lib/python2.7/site-packages/telegram/ext/messagehandler.py\", line 169, in handle_update\r\n return self.callback(dispatcher.bot, update, **optional_args)\r\n File \"<stdin>\", line 5, in F\r\n File \"local/lib/python2.7/site-packages/telegram/user.py\", line 91, in full_name\r\n return '{} {}'.format(self.first_name, self.last_name)\r\nUnicodeEncodeError: 'ascii' codec can't encode character u'\\u2022' in position 3: ordinal not in range(128)\r\n```\r\n\r\n### Configuration\r\n**Operating System:**\r\n\r\n\r\n**Version of Python, python-telegram-bot & dependencies:**\r\n\r\n```\r\npython-telegram-bot 10.0.1\r\ncertifi 2018.01.18\r\nfuture 0.16.0\r\nPython 2.7.14 (default, Sep 23 2017, 22:06:14) [GCC 7.2.0]\r\n```\r\n\r\nI'm a little rushed, but this is works for me:\r\n\r\n```python\r\n @property\r\n def full_name(self):\r\n \"\"\"\r\n :obj:`str`: Convenience property. The user's :attr:`first_name`, followed by (if available)\r\n :attr:`last_name`.\r\n\r\n \"\"\"\r\n if self.last_name:\r\n! return u'{} {}'.format(self.first_name, self.last_name)\r\n return self.first_name\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\n# pylint: disable=C0103,W0622\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2018\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains an object that represents a Telegram User.\"\"\"\n\nfrom telegram import TelegramObject\nfrom telegram.utils.helpers import mention_html as util_mention_html\nfrom telegram.utils.helpers import mention_markdown as util_mention_markdown\n\n\nclass User(TelegramObject):\n \"\"\"This object represents a Telegram user or bot.\n\n Attributes:\n id (:obj:`int`): Unique identifier for this user or bot.\n is_bot (:obj:`bool`): True, if this user is a bot\n first_name (:obj:`str`): User's or bot's first name.\n last_name (:obj:`str`): Optional. User's or bot's last name.\n username (:obj:`str`): Optional. User's or bot's username.\n language_code (:obj:`str`): Optional. IETF language tag of the user's language.\n bot (:class:`telegram.Bot`): Optional. The Bot to use for instance methods.\n\n Args:\n id (:obj:`int`): Unique identifier for this user or bot.\n is_bot (:obj:`bool`): True, if this user is a bot\n first_name (:obj:`str`): User's or bot's first name.\n last_name (:obj:`str`, optional): User's or bot's last name.\n username (:obj:`str`, optional): User's or bot's username.\n language_code (:obj:`str`, optional): IETF language tag of the user's language.\n bot (:class:`telegram.Bot`, optional): The Bot to use for instance methods.\n\n \"\"\"\n\n def __init__(self,\n id,\n first_name,\n is_bot,\n last_name=None,\n username=None,\n language_code=None,\n bot=None,\n **kwargs):\n # Required\n self.id = int(id)\n self.first_name = first_name\n self.is_bot = is_bot\n # Optionals\n self.last_name = last_name\n self.username = username\n self.language_code = language_code\n\n self.bot = bot\n\n self._id_attrs = (self.id,)\n\n @property\n def name(self):\n \"\"\"\n :obj:`str`: Convenience property. If available, returns the user's :attr:`username`\n prefixed with \"@\". If :attr:`username` is not available, returns :attr:`full_name`.\n\n \"\"\"\n if self.username:\n return '@{}'.format(self.username)\n return self.full_name\n\n @property\n def full_name(self):\n \"\"\"\n :obj:`str`: Convenience property. The user's :attr:`first_name`, followed by (if available)\n :attr:`last_name`.\n\n \"\"\"\n if self.last_name:\n return '{} {}'.format(self.first_name, self.last_name)\n return self.first_name\n\n @classmethod\n def de_json(cls, data, bot):\n if not data:\n return None\n\n data = super(User, cls).de_json(data, bot)\n\n return cls(bot=bot, **data)\n\n def get_profile_photos(self, *args, **kwargs):\n \"\"\"\n Shortcut for::\n\n bot.get_user_profile_photos(update.message.from_user.id, *args, **kwargs)\n\n \"\"\"\n\n return self.bot.get_user_profile_photos(self.id, *args, **kwargs)\n\n @classmethod\n def de_list(cls, data, bot):\n if not data:\n return []\n\n users = list()\n for user in data:\n users.append(cls.de_json(user, bot))\n\n return users\n\n def mention_markdown(self, name=None):\n \"\"\"\n Args:\n name (:obj:`str`): If provided, will overwrite the user's name.\n\n Returns:\n :obj:`str`: The inline mention for the user as markdown.\n \"\"\"\n if not name:\n return util_mention_markdown(self.id, self.name)\n else:\n return util_mention_markdown(self.id, name)\n\n def mention_html(self, name=None):\n \"\"\"\n Args:\n name (:obj:`str`): If provided, will overwrite the user's name.\n\n Returns:\n :obj:`str`: The inline mention for the user as HTML.\n \"\"\"\n if not name:\n return util_mention_html(self.id, self.name)\n else:\n return util_mention_html(self.id, name)\n\n def send_message(self, *args, **kwargs):\n \"\"\"Shortcut for::\n\n bot.send_message(User.chat_id, *args, **kwargs)\n\n Where User is the current instance.\n\n Returns:\n :class:`telegram.Message`: On success, instance representing the message posted.\n\n \"\"\"\n return self.bot.send_message(self.id, *args, **kwargs)\n\n def send_photo(self, *args, **kwargs):\n \"\"\"Shortcut for::\n\n bot.send_photo(User.chat_id, *args, **kwargs)\n\n Where User is the current instance.\n\n Returns:\n :class:`telegram.Message`: On success, instance representing the message posted.\n\n \"\"\"\n return self.bot.send_photo(self.id, *args, **kwargs)\n\n def send_audio(self, *args, **kwargs):\n \"\"\"Shortcut for::\n\n bot.send_audio(User.chat_id, *args, **kwargs)\n\n Where User is the current instance.\n\n Returns:\n :class:`telegram.Message`: On success, instance representing the message posted.\n\n \"\"\"\n return self.bot.send_audio(self.id, *args, **kwargs)\n\n def send_document(self, *args, **kwargs):\n \"\"\"Shortcut for::\n\n bot.send_document(User.chat_id, *args, **kwargs)\n\n Where User is the current instance.\n\n Returns:\n :class:`telegram.Message`: On success, instance representing the message posted.\n\n \"\"\"\n return self.bot.send_document(self.id, *args, **kwargs)\n\n def send_sticker(self, *args, **kwargs):\n \"\"\"Shortcut for::\n\n bot.send_sticker(User.chat_id, *args, **kwargs)\n\n Where User is the current instance.\n\n Returns:\n :class:`telegram.Message`: On success, instance representing the message posted.\n\n \"\"\"\n return self.bot.send_sticker(self.id, *args, **kwargs)\n\n def send_video(self, *args, **kwargs):\n \"\"\"Shortcut for::\n\n bot.send_video(User.chat_id, *args, **kwargs)\n\n Where User is the current instance.\n\n Returns:\n :class:`telegram.Message`: On success, instance representing the message posted.\n\n \"\"\"\n return self.bot.send_video(self.id, *args, **kwargs)\n\n def send_video_note(self, *args, **kwargs):\n \"\"\"Shortcut for::\n\n bot.send_video_note(User.chat_id, *args, **kwargs)\n\n Where User is the current instance.\n\n Returns:\n :class:`telegram.Message`: On success, instance representing the message posted.\n\n \"\"\"\n return self.bot.send_video_note(self.id, *args, **kwargs)\n\n def send_voice(self, *args, **kwargs):\n \"\"\"Shortcut for::\n\n bot.send_voice(User.chat_id, *args, **kwargs)\n\n Where User is the current instance.\n\n Returns:\n :class:`telegram.Message`: On success, instance representing the message posted.\n\n \"\"\"\n return self.bot.send_voice(self.id, *args, **kwargs)\n", "path": "telegram/user.py"}]}
3,514
89
gh_patches_debug_2252
rasdani/github-patches
git_diff
fonttools__fonttools-337
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> I find the font's line height is bigger than original font. I have tried pyftsubset with command line option --no-recalc-bounds , but the generated subfont's line height is still bigger than original font. I used html font-face render font. @font-face { font-family: 'freetype'; src: url('font.ttf') format('truetype'); } the font file is microsoft chinese liti.ttf. </issue> <code> [start of Lib/fontTools/ttLib/tables/_v_h_e_a.py] 1 from __future__ import print_function, division, absolute_import 2 from fontTools.misc.py23 import * 3 from fontTools.misc import sstruct 4 from fontTools.misc.textTools import safeEval 5 from . import DefaultTable 6 7 vheaFormat = """ 8 > # big endian 9 tableVersion: 16.16F 10 ascent: h 11 descent: h 12 lineGap: h 13 advanceHeightMax: H 14 minTopSideBearing: h 15 minBottomSideBearing: h 16 yMaxExtent: h 17 caretSlopeRise: h 18 caretSlopeRun: h 19 reserved0: h 20 reserved1: h 21 reserved2: h 22 reserved3: h 23 reserved4: h 24 metricDataFormat: h 25 numberOfVMetrics: H 26 """ 27 28 class table__v_h_e_a(DefaultTable.DefaultTable): 29 30 # Note: Keep in sync with table__h_h_e_a 31 32 dependencies = ['vmtx', 'glyf'] 33 34 def decompile(self, data, ttFont): 35 sstruct.unpack(vheaFormat, data, self) 36 37 def compile(self, ttFont): 38 self.recalc(ttFont) 39 return sstruct.pack(vheaFormat, self) 40 41 def recalc(self, ttFont): 42 vtmxTable = ttFont['vmtx'] 43 if 'glyf' in ttFont: 44 glyfTable = ttFont['glyf'] 45 INFINITY = 100000 46 advanceHeightMax = 0 47 minTopSideBearing = +INFINITY # arbitrary big number 48 minBottomSideBearing = +INFINITY # arbitrary big number 49 yMaxExtent = -INFINITY # arbitrary big negative number 50 51 for name in ttFont.getGlyphOrder(): 52 height, tsb = vtmxTable[name] 53 advanceHeightMax = max(advanceHeightMax, height) 54 g = glyfTable[name] 55 if g.numberOfContours == 0: 56 continue 57 if g.numberOfContours < 0 and not hasattr(g, "yMax"): 58 # Composite glyph without extents set. 59 # Calculate those. 60 g.recalcBounds(glyfTable) 61 minTopSideBearing = min(minTopSideBearing, tsb) 62 bsb = height - tsb - (g.yMax - g.yMin) 63 minBottomSideBearing = min(minBottomSideBearing, bsb) 64 extent = tsb + (g.yMax - g.yMin) 65 yMaxExtent = max(yMaxExtent, extent) 66 67 if yMaxExtent == -INFINITY: 68 # No glyph has outlines. 69 minTopSideBearing = 0 70 minBottomSideBearing = 0 71 yMaxExtent = 0 72 73 self.advanceHeightMax = advanceHeightMax 74 self.minTopSideBearing = minTopSideBearing 75 self.minBottomSideBearing = minBottomSideBearing 76 self.yMaxExtent = yMaxExtent 77 else: 78 # XXX CFF recalc... 79 pass 80 81 def toXML(self, writer, ttFont): 82 formatstring, names, fixes = sstruct.getformat(vheaFormat) 83 for name in names: 84 value = getattr(self, name) 85 writer.simpletag(name, value=value) 86 writer.newline() 87 88 def fromXML(self, name, attrs, content, ttFont): 89 setattr(self, name, safeEval(attrs["value"])) 90 [end of Lib/fontTools/ttLib/tables/_v_h_e_a.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/Lib/fontTools/ttLib/tables/_v_h_e_a.py b/Lib/fontTools/ttLib/tables/_v_h_e_a.py --- a/Lib/fontTools/ttLib/tables/_v_h_e_a.py +++ b/Lib/fontTools/ttLib/tables/_v_h_e_a.py @@ -35,7 +35,8 @@ sstruct.unpack(vheaFormat, data, self) def compile(self, ttFont): - self.recalc(ttFont) + if ttFont.isLoaded('glyf') and ttFont.recalcBBoxes: + self.recalc(ttFont) return sstruct.pack(vheaFormat, self) def recalc(self, ttFont):
{"golden_diff": "diff --git a/Lib/fontTools/ttLib/tables/_v_h_e_a.py b/Lib/fontTools/ttLib/tables/_v_h_e_a.py\n--- a/Lib/fontTools/ttLib/tables/_v_h_e_a.py\n+++ b/Lib/fontTools/ttLib/tables/_v_h_e_a.py\n@@ -35,7 +35,8 @@\n \t\tsstruct.unpack(vheaFormat, data, self)\n \n \tdef compile(self, ttFont):\n-\t\tself.recalc(ttFont)\n+\t\tif ttFont.isLoaded('glyf') and ttFont.recalcBBoxes:\n+\t\t\tself.recalc(ttFont)\n \t\treturn sstruct.pack(vheaFormat, self)\n \n \tdef recalc(self, ttFont):\n", "issue": "I find the font's line height is bigger than original font.\n I have tried pyftsubset with command line option --no-recalc-bounds ,\nbut the generated subfont's line height is still bigger than original font.\n\nI used html font-face render font.\n@font-face {\n font-family: 'freetype';\n src: url('font.ttf') format('truetype');\n }\n\nthe font file is microsoft chinese liti.ttf.\n\n", "before_files": [{"content": "from __future__ import print_function, division, absolute_import\nfrom fontTools.misc.py23 import *\nfrom fontTools.misc import sstruct\nfrom fontTools.misc.textTools import safeEval\nfrom . import DefaultTable\n\nvheaFormat = \"\"\"\n\t\t>\t# big endian\n\t\ttableVersion:\t\t16.16F\n\t\tascent:\t\t\th\n\t\tdescent:\t\th\n\t\tlineGap:\t\th\n\t\tadvanceHeightMax:\tH\n\t\tminTopSideBearing:\th\n\t\tminBottomSideBearing:\th\n\t\tyMaxExtent:\t\th\n\t\tcaretSlopeRise:\t\th\n\t\tcaretSlopeRun:\t\th\n\t\treserved0:\t\th\n\t\treserved1:\t\th\n\t\treserved2:\t\th\n\t\treserved3:\t\th\n\t\treserved4:\t\th\n\t\tmetricDataFormat:\th\n\t\tnumberOfVMetrics:\tH\n\"\"\"\n\nclass table__v_h_e_a(DefaultTable.DefaultTable):\n\n\t# Note: Keep in sync with table__h_h_e_a\n\n\tdependencies = ['vmtx', 'glyf']\n\n\tdef decompile(self, data, ttFont):\n\t\tsstruct.unpack(vheaFormat, data, self)\n\n\tdef compile(self, ttFont):\n\t\tself.recalc(ttFont)\n\t\treturn sstruct.pack(vheaFormat, self)\n\n\tdef recalc(self, ttFont):\n\t\tvtmxTable = ttFont['vmtx']\n\t\tif 'glyf' in ttFont:\n\t\t\tglyfTable = ttFont['glyf']\n\t\t\tINFINITY = 100000\n\t\t\tadvanceHeightMax = 0\n\t\t\tminTopSideBearing = +INFINITY # arbitrary big number\n\t\t\tminBottomSideBearing = +INFINITY # arbitrary big number\n\t\t\tyMaxExtent = -INFINITY # arbitrary big negative number\n\n\t\t\tfor name in ttFont.getGlyphOrder():\n\t\t\t\theight, tsb = vtmxTable[name]\n\t\t\t\tadvanceHeightMax = max(advanceHeightMax, height)\n\t\t\t\tg = glyfTable[name]\n\t\t\t\tif g.numberOfContours == 0:\n\t\t\t\t\tcontinue\n\t\t\t\tif g.numberOfContours < 0 and not hasattr(g, \"yMax\"):\n\t\t\t\t\t# Composite glyph without extents set.\n\t\t\t\t\t# Calculate those.\n\t\t\t\t\tg.recalcBounds(glyfTable)\n\t\t\t\tminTopSideBearing = min(minTopSideBearing, tsb)\n\t\t\t\tbsb = height - tsb - (g.yMax - g.yMin)\n\t\t\t\tminBottomSideBearing = min(minBottomSideBearing, bsb)\n\t\t\t\textent = tsb + (g.yMax - g.yMin)\n\t\t\t\tyMaxExtent = max(yMaxExtent, extent)\n\n\t\t\tif yMaxExtent == -INFINITY:\n\t\t\t\t# No glyph has outlines.\n\t\t\t\tminTopSideBearing = 0\n\t\t\t\tminBottomSideBearing = 0\n\t\t\t\tyMaxExtent = 0\n\n\t\t\tself.advanceHeightMax = advanceHeightMax\n\t\t\tself.minTopSideBearing = minTopSideBearing\n\t\t\tself.minBottomSideBearing = minBottomSideBearing\n\t\t\tself.yMaxExtent = yMaxExtent\n\t\telse:\n\t\t\t# XXX CFF recalc...\n\t\t\tpass\n\n\tdef toXML(self, writer, ttFont):\n\t\tformatstring, names, fixes = sstruct.getformat(vheaFormat)\n\t\tfor name in names:\n\t\t\tvalue = getattr(self, name)\n\t\t\twriter.simpletag(name, value=value)\n\t\t\twriter.newline()\n\n\tdef fromXML(self, name, attrs, content, ttFont):\n\t\tsetattr(self, name, safeEval(attrs[\"value\"]))\n", "path": "Lib/fontTools/ttLib/tables/_v_h_e_a.py"}]}
1,601
161
gh_patches_debug_22699
rasdani/github-patches
git_diff
svthalia__concrexit-3592
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Prevent full disk ### Describe the bug Sometimes the server's storage gets full, because for some reason filepond uploads aren't being deleted. Today this caused the server to crash (because the full server disk broke redis). We should prevent this from happening in multiple ways: - Make old uploads be deleted. Would be nice to find out why the uploads aren't being deleted already. But we should also (additionally) periodically remove old files from the media volume. - Maybe limit the volume size such that it getting full does not influence the rest of the server. But docker doesn't really support that nicely. We could make a separate volume for it on the host and bind-mount it I guess. ### How to reproduce <!-- Steps to reproduce the behaviour --> 1. Upload lots of albums to a docker deployment 2. See the media volume get larger. ### Expected behaviour Stuff is cleaned up once it's processed and periodically. </issue> <code> [start of website/photos/tasks.py] 1 from django.db import transaction 2 from django.dispatch import Signal 3 4 from celery import shared_task 5 from django_drf_filepond.models import TemporaryUpload 6 from django_filepond_widget.fields import FilePondFile 7 8 from photos.models import Album 9 10 from .services import extract_archive 11 12 album_uploaded = Signal() 13 14 15 @shared_task 16 def process_album_upload(archive_upload_id: str, album_id: int): 17 try: 18 album = Album.objects.get(id=album_id) 19 except Album.DoesNotExist: 20 return 21 22 archive = TemporaryUpload.objects.get(upload_id=archive_upload_id).file 23 try: 24 with transaction.atomic(): 25 # We make the upload atomic separately, so we can keep using the db if it fails. 26 # See https://docs.djangoproject.com/en/4.2/topics/db/transactions/#handling-exceptions-within-postgresql-transactions. 27 extract_archive(album, archive) 28 album.is_processing = False 29 album.save() 30 31 # Send signal to notify that an album has been uploaded. This is used 32 # by facedetection, and possibly in the future to notify the uploader. 33 album_uploaded.send(sender=None, album=album) 34 finally: 35 if isinstance(archive, FilePondFile): 36 archive.remove() 37 [end of website/photos/tasks.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/website/photos/tasks.py b/website/photos/tasks.py --- a/website/photos/tasks.py +++ b/website/photos/tasks.py @@ -3,7 +3,6 @@ from celery import shared_task from django_drf_filepond.models import TemporaryUpload -from django_filepond_widget.fields import FilePondFile from photos.models import Album @@ -19,7 +18,8 @@ except Album.DoesNotExist: return - archive = TemporaryUpload.objects.get(upload_id=archive_upload_id).file + upload = TemporaryUpload.objects.get(upload_id=archive_upload_id) + archive = upload.file try: with transaction.atomic(): # We make the upload atomic separately, so we can keep using the db if it fails. @@ -32,5 +32,5 @@ # by facedetection, and possibly in the future to notify the uploader. album_uploaded.send(sender=None, album=album) finally: - if isinstance(archive, FilePondFile): - archive.remove() + archive.delete() + upload.delete()
{"golden_diff": "diff --git a/website/photos/tasks.py b/website/photos/tasks.py\n--- a/website/photos/tasks.py\n+++ b/website/photos/tasks.py\n@@ -3,7 +3,6 @@\n \n from celery import shared_task\n from django_drf_filepond.models import TemporaryUpload\n-from django_filepond_widget.fields import FilePondFile\n \n from photos.models import Album\n \n@@ -19,7 +18,8 @@\n except Album.DoesNotExist:\n return\n \n- archive = TemporaryUpload.objects.get(upload_id=archive_upload_id).file\n+ upload = TemporaryUpload.objects.get(upload_id=archive_upload_id)\n+ archive = upload.file\n try:\n with transaction.atomic():\n # We make the upload atomic separately, so we can keep using the db if it fails.\n@@ -32,5 +32,5 @@\n # by facedetection, and possibly in the future to notify the uploader.\n album_uploaded.send(sender=None, album=album)\n finally:\n- if isinstance(archive, FilePondFile):\n- archive.remove()\n+ archive.delete()\n+ upload.delete()\n", "issue": "Prevent full disk\n### Describe the bug\r\nSometimes the server's storage gets full, because for some reason filepond uploads aren't being deleted. Today this caused the server to crash (because the full server disk broke redis). We should prevent this from happening in multiple ways:\r\n\r\n- Make old uploads be deleted. Would be nice to find out why the uploads aren't being deleted already. But we should also (additionally) periodically remove old files from the media volume.\r\n- Maybe limit the volume size such that it getting full does not influence the rest of the server. But docker doesn't really support that nicely. We could make a separate volume for it on the host and bind-mount it I guess.\r\n\r\n### How to reproduce\r\n<!-- Steps to reproduce the behaviour -->\r\n1. Upload lots of albums to a docker deployment\r\n2. See the media volume get larger.\r\n\r\n### Expected behaviour\r\nStuff is cleaned up once it's processed and periodically.\r\n\r\n\r\n\n", "before_files": [{"content": "from django.db import transaction\nfrom django.dispatch import Signal\n\nfrom celery import shared_task\nfrom django_drf_filepond.models import TemporaryUpload\nfrom django_filepond_widget.fields import FilePondFile\n\nfrom photos.models import Album\n\nfrom .services import extract_archive\n\nalbum_uploaded = Signal()\n\n\n@shared_task\ndef process_album_upload(archive_upload_id: str, album_id: int):\n try:\n album = Album.objects.get(id=album_id)\n except Album.DoesNotExist:\n return\n\n archive = TemporaryUpload.objects.get(upload_id=archive_upload_id).file\n try:\n with transaction.atomic():\n # We make the upload atomic separately, so we can keep using the db if it fails.\n # See https://docs.djangoproject.com/en/4.2/topics/db/transactions/#handling-exceptions-within-postgresql-transactions.\n extract_archive(album, archive)\n album.is_processing = False\n album.save()\n\n # Send signal to notify that an album has been uploaded. This is used\n # by facedetection, and possibly in the future to notify the uploader.\n album_uploaded.send(sender=None, album=album)\n finally:\n if isinstance(archive, FilePondFile):\n archive.remove()\n", "path": "website/photos/tasks.py"}]}
1,050
234
gh_patches_debug_16164
rasdani/github-patches
git_diff
mozilla__bugbug-1631
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Make spawn_pipeline not depend on the order of tasks in the yaml file Currently, if a task is defined in the yaml file before its dependencies, the spawn_pipeline script fails with: ``` Traceback (most recent call last): File "/code/spawn_pipeline.py", line 132, in <module> main() File "/code/spawn_pipeline.py", line 110, in main new_dependencies.append(id_mapping[dependency]) KeyError: 'regressor-finder' ``` So things like https://github.com/mozilla/bugbug/commit/aaa67b3b0a1db7530cbf88df644aff076fcd2e4e are needed. We should make the spawn_pipeline script not depend on the order of definition of tasks in the yaml file. </issue> <code> [start of infra/spawn_pipeline.py] 1 #!/bin/env python 2 # -*- coding: utf-8 -*- 3 # 4 # Copyright 2019 Mozilla 5 # 6 # Licensed under the Apache License, Version 2.0 (the "License"); 7 # you may not use this file except in compliance with the License. 8 # You may obtain a copy of the License at 9 # 10 # http://www.apache.org/licenses/LICENSE-2.0 11 # 12 # Unless required by applicable law or agreed to in writing, software 13 # distributed under the License is distributed on an "AS IS" BASIS, 14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 # See the License for the specific language governing permissions and 16 # limitations under the License. 17 18 """ 19 This script triggers the data pipeline for the bugbug project 20 """ 21 22 import argparse 23 import os 24 import sys 25 26 import jsone 27 import requests.packages.urllib3 28 import taskcluster 29 import yaml 30 31 requests.packages.urllib3.disable_warnings() 32 33 TASKCLUSTER_DEFAULT_URL = "https://community-tc.services.mozilla.com" 34 35 36 def get_taskcluster_options(): 37 """ 38 Helper to get the Taskcluster setup options 39 according to current environment (local or Taskcluster) 40 """ 41 options = taskcluster.optionsFromEnvironment() 42 proxy_url = os.environ.get("TASKCLUSTER_PROXY_URL") 43 44 if proxy_url is not None: 45 # Always use proxy url when available 46 options["rootUrl"] = proxy_url 47 48 if "rootUrl" not in options: 49 # Always have a value in root url 50 options["rootUrl"] = TASKCLUSTER_DEFAULT_URL 51 52 return options 53 54 55 def main(): 56 parser = argparse.ArgumentParser(description="Spawn tasks for bugbug data pipeline") 57 parser.add_argument("data_pipeline_json") 58 59 args = parser.parse_args() 60 decision_task_id = os.environ.get("TASK_ID") 61 options = get_taskcluster_options() 62 add_self = False 63 if decision_task_id: 64 add_self = True 65 task_group_id = decision_task_id 66 else: 67 task_group_id = taskcluster.utils.slugId() 68 keys = {"taskGroupId": task_group_id} 69 70 id_mapping = {} 71 72 # First pass, do the template rendering and dependencies resolution 73 tasks = [] 74 75 with open(args.data_pipeline_json) as pipeline_file: 76 raw_tasks = yaml.safe_load(pipeline_file.read()) 77 78 version = os.getenv("TAG", "latest") 79 context = {"version": version} 80 rendered = jsone.render(raw_tasks, context) 81 82 for task in rendered["tasks"]: 83 # We need to generate new unique task ids for taskcluster to be happy 84 # but need to identify dependencies across tasks. So we create a 85 # mapping between an internal ID and the generate ID 86 87 task_id = taskcluster.utils.slugId() 88 task_internal_id = task.pop("ID") 89 90 if task_internal_id in id_mapping: 91 raise ValueError(f"Conflicting IDs {task_internal_id}") 92 93 id_mapping[task_internal_id] = task_id 94 95 for key, value in keys.items(): 96 task[key] = value 97 98 task_payload = task["payload"] 99 100 if "env" in task_payload and task_payload["env"]: 101 task_payload["env"]["TAG"] = version 102 else: 103 task_payload["env"] = { 104 "TAG": version, 105 } 106 107 # Process the dependencies 108 new_dependencies = [] 109 for dependency in task.get("dependencies", []): 110 new_dependencies.append(id_mapping[dependency]) 111 112 if add_self: 113 new_dependencies.append(decision_task_id) 114 115 task["dependencies"] = new_dependencies 116 117 tasks.append((task_id, task)) 118 119 # Now sends them 120 queue = taskcluster.Queue(options) 121 try: 122 for task_id, task_payload in tasks: 123 queue.createTask(task_id, task_payload) 124 125 print(f"https://community-tc.services.mozilla.com/tasks/groups/{task_group_id}") 126 except taskcluster.exceptions.TaskclusterAuthFailure as e: 127 print(f"TaskclusterAuthFailure: {e.body}", file=sys.stderr) 128 raise 129 130 131 if __name__ == "__main__": 132 main() 133 [end of infra/spawn_pipeline.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/infra/spawn_pipeline.py b/infra/spawn_pipeline.py --- a/infra/spawn_pipeline.py +++ b/infra/spawn_pipeline.py @@ -85,13 +85,19 @@ # mapping between an internal ID and the generate ID task_id = taskcluster.utils.slugId() - task_internal_id = task.pop("ID") + task_internal_id = task["ID"] if task_internal_id in id_mapping: raise ValueError(f"Conflicting IDs {task_internal_id}") + # Store each task ID in the id_mapping dictionary before processing dependencies. + # This way, tasks can be defined in any order. id_mapping[task_internal_id] = task_id + for task in rendered["tasks"]: + task_internal_id = task.pop("ID") + task_id = id_mapping[task_internal_id] + for key, value in keys.items(): task[key] = value
{"golden_diff": "diff --git a/infra/spawn_pipeline.py b/infra/spawn_pipeline.py\n--- a/infra/spawn_pipeline.py\n+++ b/infra/spawn_pipeline.py\n@@ -85,13 +85,19 @@\n # mapping between an internal ID and the generate ID\n \n task_id = taskcluster.utils.slugId()\n- task_internal_id = task.pop(\"ID\")\n+ task_internal_id = task[\"ID\"]\n \n if task_internal_id in id_mapping:\n raise ValueError(f\"Conflicting IDs {task_internal_id}\")\n \n+ # Store each task ID in the id_mapping dictionary before processing dependencies.\n+ # This way, tasks can be defined in any order.\n id_mapping[task_internal_id] = task_id\n \n+ for task in rendered[\"tasks\"]:\n+ task_internal_id = task.pop(\"ID\")\n+ task_id = id_mapping[task_internal_id]\n+\n for key, value in keys.items():\n task[key] = value\n", "issue": "Make spawn_pipeline not depend on the order of tasks in the yaml file\nCurrently, if a task is defined in the yaml file before its dependencies, the spawn_pipeline script fails with:\r\n```\r\nTraceback (most recent call last):\r\n File \"/code/spawn_pipeline.py\", line 132, in <module>\r\n main()\r\n File \"/code/spawn_pipeline.py\", line 110, in main\r\n new_dependencies.append(id_mapping[dependency])\r\nKeyError: 'regressor-finder'\r\n```\r\n\r\nSo things like https://github.com/mozilla/bugbug/commit/aaa67b3b0a1db7530cbf88df644aff076fcd2e4e are needed.\r\n\r\nWe should make the spawn_pipeline script not depend on the order of definition of tasks in the yaml file.\n", "before_files": [{"content": "#!/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright 2019 Mozilla\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis script triggers the data pipeline for the bugbug project\n\"\"\"\n\nimport argparse\nimport os\nimport sys\n\nimport jsone\nimport requests.packages.urllib3\nimport taskcluster\nimport yaml\n\nrequests.packages.urllib3.disable_warnings()\n\nTASKCLUSTER_DEFAULT_URL = \"https://community-tc.services.mozilla.com\"\n\n\ndef get_taskcluster_options():\n \"\"\"\n Helper to get the Taskcluster setup options\n according to current environment (local or Taskcluster)\n \"\"\"\n options = taskcluster.optionsFromEnvironment()\n proxy_url = os.environ.get(\"TASKCLUSTER_PROXY_URL\")\n\n if proxy_url is not None:\n # Always use proxy url when available\n options[\"rootUrl\"] = proxy_url\n\n if \"rootUrl\" not in options:\n # Always have a value in root url\n options[\"rootUrl\"] = TASKCLUSTER_DEFAULT_URL\n\n return options\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Spawn tasks for bugbug data pipeline\")\n parser.add_argument(\"data_pipeline_json\")\n\n args = parser.parse_args()\n decision_task_id = os.environ.get(\"TASK_ID\")\n options = get_taskcluster_options()\n add_self = False\n if decision_task_id:\n add_self = True\n task_group_id = decision_task_id\n else:\n task_group_id = taskcluster.utils.slugId()\n keys = {\"taskGroupId\": task_group_id}\n\n id_mapping = {}\n\n # First pass, do the template rendering and dependencies resolution\n tasks = []\n\n with open(args.data_pipeline_json) as pipeline_file:\n raw_tasks = yaml.safe_load(pipeline_file.read())\n\n version = os.getenv(\"TAG\", \"latest\")\n context = {\"version\": version}\n rendered = jsone.render(raw_tasks, context)\n\n for task in rendered[\"tasks\"]:\n # We need to generate new unique task ids for taskcluster to be happy\n # but need to identify dependencies across tasks. So we create a\n # mapping between an internal ID and the generate ID\n\n task_id = taskcluster.utils.slugId()\n task_internal_id = task.pop(\"ID\")\n\n if task_internal_id in id_mapping:\n raise ValueError(f\"Conflicting IDs {task_internal_id}\")\n\n id_mapping[task_internal_id] = task_id\n\n for key, value in keys.items():\n task[key] = value\n\n task_payload = task[\"payload\"]\n\n if \"env\" in task_payload and task_payload[\"env\"]:\n task_payload[\"env\"][\"TAG\"] = version\n else:\n task_payload[\"env\"] = {\n \"TAG\": version,\n }\n\n # Process the dependencies\n new_dependencies = []\n for dependency in task.get(\"dependencies\", []):\n new_dependencies.append(id_mapping[dependency])\n\n if add_self:\n new_dependencies.append(decision_task_id)\n\n task[\"dependencies\"] = new_dependencies\n\n tasks.append((task_id, task))\n\n # Now sends them\n queue = taskcluster.Queue(options)\n try:\n for task_id, task_payload in tasks:\n queue.createTask(task_id, task_payload)\n\n print(f\"https://community-tc.services.mozilla.com/tasks/groups/{task_group_id}\")\n except taskcluster.exceptions.TaskclusterAuthFailure as e:\n print(f\"TaskclusterAuthFailure: {e.body}\", file=sys.stderr)\n raise\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "infra/spawn_pipeline.py"}]}
1,883
208
gh_patches_debug_11927
rasdani/github-patches
git_diff
pytorch__text-280
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> TypeError in Python 2.7 https://github.com/pytorch/text/blob/a2795e5731d1b7c0298a1b5087bb8142e1c39d0b/torchtext/datasets/imdb.py#L32 In python 2.7, it will report that `TypeError: 'encoding' is an invalid keyword argument for this function`. I replace `open` with `io.open` to fix it. </issue> <code> [start of torchtext/datasets/imdb.py] 1 import os 2 import glob 3 4 from .. import data 5 6 7 class IMDB(data.Dataset): 8 9 urls = ['http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz'] 10 name = 'imdb' 11 dirname = 'aclImdb' 12 13 @staticmethod 14 def sort_key(ex): 15 return len(ex.text) 16 17 def __init__(self, path, text_field, label_field, **kwargs): 18 """Create an IMDB dataset instance given a path and fields. 19 20 Arguments: 21 path: Path to the dataset's highest level directory 22 text_field: The field that will be used for text data. 23 label_field: The field that will be used for label data. 24 Remaining keyword arguments: Passed to the constructor of 25 data.Dataset. 26 """ 27 fields = [('text', text_field), ('label', label_field)] 28 examples = [] 29 30 for label in ['pos', 'neg']: 31 for fname in glob.iglob(os.path.join(path, label, '*.txt')): 32 with open(fname, 'r', encoding="utf-8") as f: 33 text = f.readline() 34 examples.append(data.Example.fromlist([text, label], fields)) 35 36 super(IMDB, self).__init__(examples, fields, **kwargs) 37 38 @classmethod 39 def splits(cls, text_field, label_field, root='.data', 40 train='train', test='test', **kwargs): 41 """Create dataset objects for splits of the IMDB dataset. 42 43 Arguments: 44 text_field: The field that will be used for the sentence. 45 label_field: The field that will be used for label data. 46 root: Root dataset storage directory. Default is '.data'. 47 train: The directory that contains the training examples 48 test: The directory that contains the test examples 49 Remaining keyword arguments: Passed to the splits method of 50 Dataset. 51 """ 52 return super(IMDB, cls).splits( 53 root=root, text_field=text_field, label_field=label_field, 54 train=train, validation=None, test=test, **kwargs) 55 56 @classmethod 57 def iters(cls, batch_size=32, device=0, root='.data', vectors=None, **kwargs): 58 """Creater iterator objects for splits of the IMDB dataset. 59 60 Arguments: 61 batch_size: Batch_size 62 device: Device to create batches on. Use - 1 for CPU and None for 63 the currently active GPU device. 64 root: The root directory that contains the imdb dataset subdirectory 65 vectors: one of the available pretrained vectors or a list with each 66 element one of the available pretrained vectors (see Vocab.load_vectors) 67 68 Remaining keyword arguments: Passed to the splits method. 69 """ 70 TEXT = data.Field() 71 LABEL = data.Field(sequential=False) 72 73 train, test = cls.splits(TEXT, LABEL, root=root, **kwargs) 74 75 TEXT.build_vocab(train, vectors=vectors) 76 LABEL.build_vocab(train) 77 78 return data.BucketIterator.splits( 79 (train, test), batch_size=batch_size, device=device) 80 [end of torchtext/datasets/imdb.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/torchtext/datasets/imdb.py b/torchtext/datasets/imdb.py --- a/torchtext/datasets/imdb.py +++ b/torchtext/datasets/imdb.py @@ -1,5 +1,6 @@ import os import glob +import io from .. import data @@ -29,7 +30,7 @@ for label in ['pos', 'neg']: for fname in glob.iglob(os.path.join(path, label, '*.txt')): - with open(fname, 'r', encoding="utf-8") as f: + with io.open(fname, 'r', encoding="utf-8") as f: text = f.readline() examples.append(data.Example.fromlist([text, label], fields))
{"golden_diff": "diff --git a/torchtext/datasets/imdb.py b/torchtext/datasets/imdb.py\n--- a/torchtext/datasets/imdb.py\n+++ b/torchtext/datasets/imdb.py\n@@ -1,5 +1,6 @@\n import os\n import glob\n+import io\n \n from .. import data\n \n@@ -29,7 +30,7 @@\n \n for label in ['pos', 'neg']:\n for fname in glob.iglob(os.path.join(path, label, '*.txt')):\n- with open(fname, 'r', encoding=\"utf-8\") as f:\n+ with io.open(fname, 'r', encoding=\"utf-8\") as f:\n text = f.readline()\n examples.append(data.Example.fromlist([text, label], fields))\n", "issue": "TypeError in Python 2.7\nhttps://github.com/pytorch/text/blob/a2795e5731d1b7c0298a1b5087bb8142e1c39d0b/torchtext/datasets/imdb.py#L32\r\n\r\nIn python 2.7, it will report that `TypeError: 'encoding' is an invalid keyword argument for this function`.\r\n\r\nI replace `open` with `io.open` to fix it.\n", "before_files": [{"content": "import os\nimport glob\n\nfrom .. import data\n\n\nclass IMDB(data.Dataset):\n\n urls = ['http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz']\n name = 'imdb'\n dirname = 'aclImdb'\n\n @staticmethod\n def sort_key(ex):\n return len(ex.text)\n\n def __init__(self, path, text_field, label_field, **kwargs):\n \"\"\"Create an IMDB dataset instance given a path and fields.\n\n Arguments:\n path: Path to the dataset's highest level directory\n text_field: The field that will be used for text data.\n label_field: The field that will be used for label data.\n Remaining keyword arguments: Passed to the constructor of\n data.Dataset.\n \"\"\"\n fields = [('text', text_field), ('label', label_field)]\n examples = []\n\n for label in ['pos', 'neg']:\n for fname in glob.iglob(os.path.join(path, label, '*.txt')):\n with open(fname, 'r', encoding=\"utf-8\") as f:\n text = f.readline()\n examples.append(data.Example.fromlist([text, label], fields))\n\n super(IMDB, self).__init__(examples, fields, **kwargs)\n\n @classmethod\n def splits(cls, text_field, label_field, root='.data',\n train='train', test='test', **kwargs):\n \"\"\"Create dataset objects for splits of the IMDB dataset.\n\n Arguments:\n text_field: The field that will be used for the sentence.\n label_field: The field that will be used for label data.\n root: Root dataset storage directory. Default is '.data'.\n train: The directory that contains the training examples\n test: The directory that contains the test examples\n Remaining keyword arguments: Passed to the splits method of\n Dataset.\n \"\"\"\n return super(IMDB, cls).splits(\n root=root, text_field=text_field, label_field=label_field,\n train=train, validation=None, test=test, **kwargs)\n\n @classmethod\n def iters(cls, batch_size=32, device=0, root='.data', vectors=None, **kwargs):\n \"\"\"Creater iterator objects for splits of the IMDB dataset.\n\n Arguments:\n batch_size: Batch_size\n device: Device to create batches on. Use - 1 for CPU and None for\n the currently active GPU device.\n root: The root directory that contains the imdb dataset subdirectory\n vectors: one of the available pretrained vectors or a list with each\n element one of the available pretrained vectors (see Vocab.load_vectors)\n\n Remaining keyword arguments: Passed to the splits method.\n \"\"\"\n TEXT = data.Field()\n LABEL = data.Field(sequential=False)\n\n train, test = cls.splits(TEXT, LABEL, root=root, **kwargs)\n\n TEXT.build_vocab(train, vectors=vectors)\n LABEL.build_vocab(train)\n\n return data.BucketIterator.splits(\n (train, test), batch_size=batch_size, device=device)\n", "path": "torchtext/datasets/imdb.py"}]}
1,457
168
gh_patches_debug_12119
rasdani/github-patches
git_diff
sanic-org__sanic-647
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> what have done to static.py? On last Friday,everything is ok,my static file test works fine. Today,when I pip install sanic==0.5.1 It raise 404 error. when I pip install sanic==0.5.0 everything is ok again. seems like the code blow has some problem? if not file_path.startswith(root_path): raise FileNotFound('File not found', path=file_or_directory, relative_url=file_uri) </issue> <code> [start of sanic/static.py] 1 from mimetypes import guess_type 2 from os import path 3 from re import sub 4 from time import strftime, gmtime 5 from urllib.parse import unquote 6 7 from aiofiles.os import stat 8 9 from sanic.exceptions import ( 10 ContentRangeError, 11 FileNotFound, 12 HeaderNotFound, 13 InvalidUsage, 14 ) 15 from sanic.handlers import ContentRangeHandler 16 from sanic.response import file, HTTPResponse 17 18 19 def register(app, uri, file_or_directory, pattern, 20 use_modified_since, use_content_range): 21 # TODO: Though sanic is not a file server, I feel like we should at least 22 # make a good effort here. Modified-since is nice, but we could 23 # also look into etags, expires, and caching 24 """ 25 Register a static directory handler with Sanic by adding a route to the 26 router and registering a handler. 27 28 :param app: Sanic 29 :param file_or_directory: File or directory path to serve from 30 :param uri: URL to serve from 31 :param pattern: regular expression used to match files in the URL 32 :param use_modified_since: If true, send file modified time, and return 33 not modified if the browser's matches the 34 server's 35 :param use_content_range: If true, process header for range requests 36 and sends the file part that is requested 37 """ 38 # If we're not trying to match a file directly, 39 # serve from the folder 40 if not path.isfile(file_or_directory): 41 uri += '<file_uri:' + pattern + '>' 42 43 async def _handler(request, file_uri=None): 44 # Using this to determine if the URL is trying to break out of the path 45 # served. os.path.realpath seems to be very slow 46 if file_uri and '../' in file_uri: 47 raise InvalidUsage("Invalid URL") 48 # Merge served directory and requested file if provided 49 # Strip all / that in the beginning of the URL to help prevent python 50 # from herping a derp and treating the uri as an absolute path 51 root_path = file_path = file_or_directory 52 if file_uri: 53 file_path = path.join( 54 file_or_directory, sub('^[/]*', '', file_uri)) 55 56 # URL decode the path sent by the browser otherwise we won't be able to 57 # match filenames which got encoded (filenames with spaces etc) 58 file_path = path.abspath(unquote(file_path)) 59 if not file_path.startswith(root_path): 60 raise FileNotFound('File not found', 61 path=file_or_directory, 62 relative_url=file_uri) 63 try: 64 headers = {} 65 # Check if the client has been sent this file before 66 # and it has not been modified since 67 stats = None 68 if use_modified_since: 69 stats = await stat(file_path) 70 modified_since = strftime( 71 '%a, %d %b %Y %H:%M:%S GMT', gmtime(stats.st_mtime)) 72 if request.headers.get('If-Modified-Since') == modified_since: 73 return HTTPResponse(status=304) 74 headers['Last-Modified'] = modified_since 75 _range = None 76 if use_content_range: 77 _range = None 78 if not stats: 79 stats = await stat(file_path) 80 headers['Accept-Ranges'] = 'bytes' 81 headers['Content-Length'] = str(stats.st_size) 82 if request.method != 'HEAD': 83 try: 84 _range = ContentRangeHandler(request, stats) 85 except HeaderNotFound: 86 pass 87 else: 88 del headers['Content-Length'] 89 for key, value in _range.headers.items(): 90 headers[key] = value 91 if request.method == 'HEAD': 92 return HTTPResponse( 93 headers=headers, 94 content_type=guess_type(file_path)[0] or 'text/plain') 95 else: 96 return await file(file_path, headers=headers, _range=_range) 97 except ContentRangeError: 98 raise 99 except Exception: 100 raise FileNotFound('File not found', 101 path=file_or_directory, 102 relative_url=file_uri) 103 104 app.route(uri, methods=['GET', 'HEAD'])(_handler) 105 [end of sanic/static.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/sanic/static.py b/sanic/static.py --- a/sanic/static.py +++ b/sanic/static.py @@ -56,7 +56,7 @@ # URL decode the path sent by the browser otherwise we won't be able to # match filenames which got encoded (filenames with spaces etc) file_path = path.abspath(unquote(file_path)) - if not file_path.startswith(root_path): + if not file_path.startswith(path.abspath(unquote(root_path))): raise FileNotFound('File not found', path=file_or_directory, relative_url=file_uri)
{"golden_diff": "diff --git a/sanic/static.py b/sanic/static.py\n--- a/sanic/static.py\n+++ b/sanic/static.py\n@@ -56,7 +56,7 @@\n # URL decode the path sent by the browser otherwise we won't be able to\n # match filenames which got encoded (filenames with spaces etc)\n file_path = path.abspath(unquote(file_path))\n- if not file_path.startswith(root_path):\n+ if not file_path.startswith(path.abspath(unquote(root_path))):\n raise FileNotFound('File not found',\n path=file_or_directory,\n relative_url=file_uri)\n", "issue": "what have done to static.py?\nOn last Friday,everything is ok,my static file test works fine.\r\n\r\nToday,when I pip install sanic==0.5.1\r\nIt raise 404 error.\r\n\r\nwhen I pip install sanic==0.5.0\r\neverything is ok again.\r\n\r\nseems like the code blow has some problem?\r\nif not file_path.startswith(root_path):\r\n raise FileNotFound('File not found',\r\n path=file_or_directory,\r\n relative_url=file_uri)\n", "before_files": [{"content": "from mimetypes import guess_type\nfrom os import path\nfrom re import sub\nfrom time import strftime, gmtime\nfrom urllib.parse import unquote\n\nfrom aiofiles.os import stat\n\nfrom sanic.exceptions import (\n ContentRangeError,\n FileNotFound,\n HeaderNotFound,\n InvalidUsage,\n)\nfrom sanic.handlers import ContentRangeHandler\nfrom sanic.response import file, HTTPResponse\n\n\ndef register(app, uri, file_or_directory, pattern,\n use_modified_since, use_content_range):\n # TODO: Though sanic is not a file server, I feel like we should at least\n # make a good effort here. Modified-since is nice, but we could\n # also look into etags, expires, and caching\n \"\"\"\n Register a static directory handler with Sanic by adding a route to the\n router and registering a handler.\n\n :param app: Sanic\n :param file_or_directory: File or directory path to serve from\n :param uri: URL to serve from\n :param pattern: regular expression used to match files in the URL\n :param use_modified_since: If true, send file modified time, and return\n not modified if the browser's matches the\n server's\n :param use_content_range: If true, process header for range requests\n and sends the file part that is requested\n \"\"\"\n # If we're not trying to match a file directly,\n # serve from the folder\n if not path.isfile(file_or_directory):\n uri += '<file_uri:' + pattern + '>'\n\n async def _handler(request, file_uri=None):\n # Using this to determine if the URL is trying to break out of the path\n # served. os.path.realpath seems to be very slow\n if file_uri and '../' in file_uri:\n raise InvalidUsage(\"Invalid URL\")\n # Merge served directory and requested file if provided\n # Strip all / that in the beginning of the URL to help prevent python\n # from herping a derp and treating the uri as an absolute path\n root_path = file_path = file_or_directory\n if file_uri:\n file_path = path.join(\n file_or_directory, sub('^[/]*', '', file_uri))\n\n # URL decode the path sent by the browser otherwise we won't be able to\n # match filenames which got encoded (filenames with spaces etc)\n file_path = path.abspath(unquote(file_path))\n if not file_path.startswith(root_path):\n raise FileNotFound('File not found',\n path=file_or_directory,\n relative_url=file_uri)\n try:\n headers = {}\n # Check if the client has been sent this file before\n # and it has not been modified since\n stats = None\n if use_modified_since:\n stats = await stat(file_path)\n modified_since = strftime(\n '%a, %d %b %Y %H:%M:%S GMT', gmtime(stats.st_mtime))\n if request.headers.get('If-Modified-Since') == modified_since:\n return HTTPResponse(status=304)\n headers['Last-Modified'] = modified_since\n _range = None\n if use_content_range:\n _range = None\n if not stats:\n stats = await stat(file_path)\n headers['Accept-Ranges'] = 'bytes'\n headers['Content-Length'] = str(stats.st_size)\n if request.method != 'HEAD':\n try:\n _range = ContentRangeHandler(request, stats)\n except HeaderNotFound:\n pass\n else:\n del headers['Content-Length']\n for key, value in _range.headers.items():\n headers[key] = value\n if request.method == 'HEAD':\n return HTTPResponse(\n headers=headers,\n content_type=guess_type(file_path)[0] or 'text/plain')\n else:\n return await file(file_path, headers=headers, _range=_range)\n except ContentRangeError:\n raise\n except Exception:\n raise FileNotFound('File not found',\n path=file_or_directory,\n relative_url=file_uri)\n\n app.route(uri, methods=['GET', 'HEAD'])(_handler)\n", "path": "sanic/static.py"}]}
1,727
128
gh_patches_debug_10725
rasdani/github-patches
git_diff
TheAlgorithms__Python-5811
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [mypy] Need help to fix all `mypy` errors in the codebase # Just one left to fix... https://github.com/TheAlgorithms/Python/blob/master/mypy.ini#L5 * [x] other/least_recently_used.py * [x] other/lfu_cache.py #5755 * [x] other/lru_cache.py #5755 --- __UPDATE:__ Our GitHub Actions now run `mypy --ignore-missing-imports` excluding those directories that fail that test. * https://github.com/TheAlgorithms/Python/blob/master/mypy.ini#L5 Currently, we are not running `mypy` in our regular CI tests as there are a lot of errors in the entire codebase, which needs to be fixed. This won't be a one-person job, so we are asking for help from you. I cannot paste the entire message in here as there are around 600 of them, so here's just a gist of it: ```console $ mypy --ignore-missing-imports . strings/word_occurrence.py:17: error: Need type annotation for 'occurrence' strings/min_cost_string_conversion.py:36: error: No overload variant of "__setitem__" of "list" matches argument types "int", "str" strings/min_cost_string_conversion.py:36: note: Possible overload variants: strings/min_cost_string_conversion.py:36: note: def __setitem__(self, int, int) -> None strings/min_cost_string_conversion.py:36: note: def __setitem__(self, slice, Iterable[int]) -> None strings/min_cost_string_conversion.py:40: error: No overload variant of "__setitem__" of "list" matches argument types "int", "str" strings/min_cost_string_conversion.py:40: note: Possible overload variants: strings/min_cost_string_conversion.py:40: note: def __setitem__(self, int, int) -> None strings/min_cost_string_conversion.py:40: note: def __setitem__(self, slice, Iterable[int]) -> None ... backtracking/n_queens_math.py:109: error: List comprehension has incompatible type List[str]; expected List[int] backtracking/n_queens_math.py:110: error: Argument 1 to "append" of "list" has incompatible type "List[int]"; expected "List[str]" backtracking/n_queens_math.py:149: error: Need type annotation for 'boards' (hint: "boards: List[<type>] = ...") backtracking/minimax.py:15: error: "list" is not subscriptable, use "typing.List" instead backtracking/knight_tour.py:6: error: "tuple" is not subscriptable, use "typing.Tuple" instead backtracking/knight_tour.py:6: error: "list" is not subscriptable, use "typing.List" instead ... ``` # Guidelines to follow: - Please make sure you read the [Contributing Guidelines](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md) first. - Please submit a fix for a maximum of 3 files at a time (1 file is also acceptable). - As we are not running `mypy` in our CI tests, the user who is submitting a pull request should run it on their local machine and ensure there are no errors in their submission. - Please ensure your pull request title contains the word `mypy` in it. If possible use this template for your pull request title: ``` [mypy] Fix type annotations for <filenames> ``` ### Which errors to fix? Please follow the below steps to produce all the errors in this library: - Fork this repository if you haven't already. - Clone the forked repository on your local machine using the command: ``` git clone --depth 1 https://github.com/TheAlgorithms/Python.git ``` Then you need to install all the necessary requirements: ``` cd python/ python -m pip install --upgrade pip python -m pip install -r requirements.txt python -m pip install mypy ``` Then run either of the two commands: - `mypy --ignore-missing-imports .` -> To produce all the error messages for the entire codebase. - `mypy --ignore-missing-imports <filepath1> <filepath2> ...` -> To produce error messages for the mentioned file. ### How to fix the errors? - Make a separate branch for your fix with the command: ``` git checkout -b mypy-fix ``` - Make changes to the selected files. - Push it to your forked copy and open a pull request with the appropriate title as mentioned above. ### Focus on one directory at a time: ``` . ├── [x] arithmetic_analysis ├── [x] backtracking ├── [x] bit_manipulation ├── [x] blockchain ├── [x] boolean_algebra ├── [x] cellular_automata ├── [x] ciphers ├── [x] compression ├── [x] computer_vision ├── [x] conversions ├── [ ] data_structures ├── [x] digital_image_processing ├── [x] divide_and_conquer ├── [ ] dynamic_programming ├── [x] electronics ├── [x] file_transfer ├── [x] fractals ├── [x] fuzzy_logic ├── [x] genetic_algorithm ├── [x] geodesy ├── [x] graphics ├── [ ] graphs ├── [x] hashes ├── [x] knapsack ├── [x] linear_algebra ├── [x] machine_learning ├── [ ] maths ├── [ ] matrix ├── [x] networking_flow ├── [x] neural_network ├── [ ] other ├── [ ] project_euler ├── [x] quantum ├── [x] scheduling ├── [x] scripts ├── [ ] searches ├── [x] sorts ├── [ ] strings └── [x] web_programming ``` ### Pre-requisites: - You should be familiar with `mypy`: https://mypy.readthedocs.io - You should be familiar with Python type hints: https://docs.python.org/3/library/typing.html </issue> <code> [start of other/least_recently_used.py] 1 import sys 2 from abc import abstractmethod 3 from collections import deque 4 5 6 class LRUCache: 7 """Page Replacement Algorithm, Least Recently Used (LRU) Caching.""" 8 9 dq_store = object() # Cache store of keys 10 key_reference_map = object() # References of the keys in cache 11 _MAX_CAPACITY: int = 10 # Maximum capacity of cache 12 13 @abstractmethod 14 def __init__(self, n: int): 15 """Creates an empty store and map for the keys. 16 The LRUCache is set the size n. 17 """ 18 self.dq_store = deque() 19 self.key_reference_map = set() 20 if not n: 21 LRUCache._MAX_CAPACITY = sys.maxsize 22 elif n < 0: 23 raise ValueError("n should be an integer greater than 0.") 24 else: 25 LRUCache._MAX_CAPACITY = n 26 27 def refer(self, x): 28 """ 29 Looks for a page in the cache store and adds reference to the set. 30 Remove the least recently used key if the store is full. 31 Update store to reflect recent access. 32 """ 33 if x not in self.key_reference_map: 34 if len(self.dq_store) == LRUCache._MAX_CAPACITY: 35 last_element = self.dq_store.pop() 36 self.key_reference_map.remove(last_element) 37 else: 38 index_remove = 0 39 for idx, key in enumerate(self.dq_store): 40 if key == x: 41 index_remove = idx 42 break 43 self.dq_store.remove(index_remove) 44 45 self.dq_store.appendleft(x) 46 self.key_reference_map.add(x) 47 48 def display(self): 49 """ 50 Prints all the elements in the store. 51 """ 52 for k in self.dq_store: 53 print(k) 54 55 56 if __name__ == "__main__": 57 lru_cache = LRUCache(4) 58 lru_cache.refer(1) 59 lru_cache.refer(2) 60 lru_cache.refer(3) 61 lru_cache.refer(1) 62 lru_cache.refer(4) 63 lru_cache.refer(5) 64 lru_cache.display() 65 [end of other/least_recently_used.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/other/least_recently_used.py b/other/least_recently_used.py --- a/other/least_recently_used.py +++ b/other/least_recently_used.py @@ -1,5 +1,4 @@ import sys -from abc import abstractmethod from collections import deque @@ -10,7 +9,6 @@ key_reference_map = object() # References of the keys in cache _MAX_CAPACITY: int = 10 # Maximum capacity of cache - @abstractmethod def __init__(self, n: int): """Creates an empty store and map for the keys. The LRUCache is set the size n.
{"golden_diff": "diff --git a/other/least_recently_used.py b/other/least_recently_used.py\n--- a/other/least_recently_used.py\n+++ b/other/least_recently_used.py\n@@ -1,5 +1,4 @@\n import sys\n-from abc import abstractmethod\n from collections import deque\n \n \n@@ -10,7 +9,6 @@\n key_reference_map = object() # References of the keys in cache\n _MAX_CAPACITY: int = 10 # Maximum capacity of cache\n \n- @abstractmethod\n def __init__(self, n: int):\n \"\"\"Creates an empty store and map for the keys.\n The LRUCache is set the size n.\n", "issue": "[mypy] Need help to fix all `mypy` errors in the codebase\n# Just one left to fix...\r\nhttps://github.com/TheAlgorithms/Python/blob/master/mypy.ini#L5\r\n* [x] other/least_recently_used.py\r\n* [x] other/lfu_cache.py #5755\r\n* [x] other/lru_cache.py #5755\r\n\r\n---\r\n\r\n__UPDATE:__ Our GitHub Actions now run `mypy --ignore-missing-imports` excluding those directories that fail that test.\r\n* https://github.com/TheAlgorithms/Python/blob/master/mypy.ini#L5\r\n\r\nCurrently, we are not running `mypy` in our regular CI tests as there are a lot of errors in the entire codebase, which needs to be fixed. This won't be a one-person job, so we are asking for help from you. I cannot paste the entire message in here as there are around 600 of them, so here's just a gist of it:\r\n\r\n```console\r\n$ mypy --ignore-missing-imports .\r\nstrings/word_occurrence.py:17: error: Need type annotation for 'occurrence'\r\nstrings/min_cost_string_conversion.py:36: error: No overload variant of \"__setitem__\" of \"list\" matches argument types \"int\", \"str\"\r\nstrings/min_cost_string_conversion.py:36: note: Possible overload variants:\r\nstrings/min_cost_string_conversion.py:36: note: def __setitem__(self, int, int) -> None\r\nstrings/min_cost_string_conversion.py:36: note: def __setitem__(self, slice, Iterable[int]) -> None\r\nstrings/min_cost_string_conversion.py:40: error: No overload variant of \"__setitem__\" of \"list\" matches argument types \"int\", \"str\"\r\nstrings/min_cost_string_conversion.py:40: note: Possible overload variants:\r\nstrings/min_cost_string_conversion.py:40: note: def __setitem__(self, int, int) -> None\r\nstrings/min_cost_string_conversion.py:40: note: def __setitem__(self, slice, Iterable[int]) -> None\r\n...\r\nbacktracking/n_queens_math.py:109: error: List comprehension has incompatible type List[str]; expected List[int]\r\nbacktracking/n_queens_math.py:110: error: Argument 1 to \"append\" of \"list\" has incompatible type \"List[int]\"; expected \"List[str]\"\r\nbacktracking/n_queens_math.py:149: error: Need type annotation for 'boards' (hint: \"boards: List[<type>] = ...\")\r\nbacktracking/minimax.py:15: error: \"list\" is not subscriptable, use \"typing.List\" instead\r\nbacktracking/knight_tour.py:6: error: \"tuple\" is not subscriptable, use \"typing.Tuple\" instead\r\nbacktracking/knight_tour.py:6: error: \"list\" is not subscriptable, use \"typing.List\" instead\r\n...\r\n```\r\n\r\n# Guidelines to follow:\r\n\r\n- Please make sure you read the [Contributing Guidelines](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md) first.\r\n- Please submit a fix for a maximum of 3 files at a time (1 file is also acceptable).\r\n- As we are not running `mypy` in our CI tests, the user who is submitting a pull request should run it on their local machine and ensure there are no errors in their submission.\r\n- Please ensure your pull request title contains the word `mypy` in it. If possible use this template for your pull request title:\r\n```\r\n[mypy] Fix type annotations for <filenames>\r\n```\r\n\r\n### Which errors to fix?\r\n\r\nPlease follow the below steps to produce all the errors in this library:\r\n- Fork this repository if you haven't already.\r\n- Clone the forked repository on your local machine using the command:\r\n\r\n```\r\ngit clone --depth 1 https://github.com/TheAlgorithms/Python.git\r\n```\r\nThen you need to install all the necessary requirements:\r\n```\r\ncd python/\r\npython -m pip install --upgrade pip\r\npython -m pip install -r requirements.txt\r\npython -m pip install mypy\r\n```\r\nThen run either of the two commands:\r\n- `mypy --ignore-missing-imports .` -> To produce all the error messages for the entire codebase.\r\n- `mypy --ignore-missing-imports <filepath1> <filepath2> ...` -> To produce error messages for the mentioned file.\r\n\r\n### How to fix the errors?\r\n\r\n- Make a separate branch for your fix with the command: \r\n```\r\ngit checkout -b mypy-fix\r\n```\r\n- Make changes to the selected files.\r\n- Push it to your forked copy and open a pull request with the appropriate title as mentioned above.\r\n\r\n### Focus on one directory at a time:\r\n\r\n```\r\n.\r\n\u251c\u2500\u2500 [x] arithmetic_analysis\r\n\u251c\u2500\u2500 [x] backtracking\r\n\u251c\u2500\u2500 [x] bit_manipulation\r\n\u251c\u2500\u2500 [x] blockchain\r\n\u251c\u2500\u2500 [x] boolean_algebra\r\n\u251c\u2500\u2500 [x] cellular_automata\r\n\u251c\u2500\u2500 [x] ciphers\r\n\u251c\u2500\u2500 [x] compression\r\n\u251c\u2500\u2500 [x] computer_vision\r\n\u251c\u2500\u2500 [x] conversions\r\n\u251c\u2500\u2500 [ ] data_structures\r\n\u251c\u2500\u2500 [x] digital_image_processing\r\n\u251c\u2500\u2500 [x] divide_and_conquer\r\n\u251c\u2500\u2500 [ ] dynamic_programming\r\n\u251c\u2500\u2500 [x] electronics\r\n\u251c\u2500\u2500 [x] file_transfer\r\n\u251c\u2500\u2500 [x] fractals\r\n\u251c\u2500\u2500 [x] fuzzy_logic\r\n\u251c\u2500\u2500 [x] genetic_algorithm\r\n\u251c\u2500\u2500 [x] geodesy\r\n\u251c\u2500\u2500 [x] graphics\r\n\u251c\u2500\u2500 [ ] graphs\r\n\u251c\u2500\u2500 [x] hashes\r\n\u251c\u2500\u2500 [x] knapsack\r\n\u251c\u2500\u2500 [x] linear_algebra\r\n\u251c\u2500\u2500 [x] machine_learning\r\n\u251c\u2500\u2500 [ ] maths\r\n\u251c\u2500\u2500 [ ] matrix\r\n\u251c\u2500\u2500 [x] networking_flow\r\n\u251c\u2500\u2500 [x] neural_network\r\n\u251c\u2500\u2500 [ ] other\r\n\u251c\u2500\u2500 [ ] project_euler\r\n\u251c\u2500\u2500 [x] quantum\r\n\u251c\u2500\u2500 [x] scheduling\r\n\u251c\u2500\u2500 [x] scripts\r\n\u251c\u2500\u2500 [ ] searches\r\n\u251c\u2500\u2500 [x] sorts\r\n\u251c\u2500\u2500 [ ] strings\r\n\u2514\u2500\u2500 [x] web_programming\r\n```\r\n\r\n### Pre-requisites:\r\n- You should be familiar with `mypy`: https://mypy.readthedocs.io\r\n- You should be familiar with Python type hints: https://docs.python.org/3/library/typing.html\n", "before_files": [{"content": "import sys\nfrom abc import abstractmethod\nfrom collections import deque\n\n\nclass LRUCache:\n \"\"\"Page Replacement Algorithm, Least Recently Used (LRU) Caching.\"\"\"\n\n dq_store = object() # Cache store of keys\n key_reference_map = object() # References of the keys in cache\n _MAX_CAPACITY: int = 10 # Maximum capacity of cache\n\n @abstractmethod\n def __init__(self, n: int):\n \"\"\"Creates an empty store and map for the keys.\n The LRUCache is set the size n.\n \"\"\"\n self.dq_store = deque()\n self.key_reference_map = set()\n if not n:\n LRUCache._MAX_CAPACITY = sys.maxsize\n elif n < 0:\n raise ValueError(\"n should be an integer greater than 0.\")\n else:\n LRUCache._MAX_CAPACITY = n\n\n def refer(self, x):\n \"\"\"\n Looks for a page in the cache store and adds reference to the set.\n Remove the least recently used key if the store is full.\n Update store to reflect recent access.\n \"\"\"\n if x not in self.key_reference_map:\n if len(self.dq_store) == LRUCache._MAX_CAPACITY:\n last_element = self.dq_store.pop()\n self.key_reference_map.remove(last_element)\n else:\n index_remove = 0\n for idx, key in enumerate(self.dq_store):\n if key == x:\n index_remove = idx\n break\n self.dq_store.remove(index_remove)\n\n self.dq_store.appendleft(x)\n self.key_reference_map.add(x)\n\n def display(self):\n \"\"\"\n Prints all the elements in the store.\n \"\"\"\n for k in self.dq_store:\n print(k)\n\n\nif __name__ == \"__main__\":\n lru_cache = LRUCache(4)\n lru_cache.refer(1)\n lru_cache.refer(2)\n lru_cache.refer(3)\n lru_cache.refer(1)\n lru_cache.refer(4)\n lru_cache.refer(5)\n lru_cache.display()\n", "path": "other/least_recently_used.py"}]}
2,505
152
gh_patches_debug_11593
rasdani/github-patches
git_diff
open-telemetry__opentelemetry-python-contrib-847
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Partial celery task time limit is not support Python 3.8 **Steps to reproduce** - Create celery task with only one of the time limit soft/hard Or use this test to reproduce: `instrumentation/opentelemetry-instrumentation-celery/tests/test_utils.py` ```python def test_set_attributes_partial_timelimit_hard_limit(self): context = { "correlation_id": "44b7f305", "delivery_info": {"eager": True}, "eta": "soon", "expires": "later", "hostname": "localhost", "id": "44b7f305", "reply_to": "44b7f305", "retries": 4, "timelimit": ("now", None), "custom_meta": "custom_value", "routing_key": "celery", } span = trace._Span("name", mock.Mock(spec=trace_api.SpanContext)) utils.set_attributes_from_context(span, context) self.assertEqual(span.attributes.get("celery.timelimit"), ("now", "")) ``` **What is the expected behavior?** The time limit that was specify should be instrumented **What is the actual behavior?** Exception is raised and no time limit is instrumented ``` [__init__.py:_translate_attributes:164] _translate_key_values(key, value) exception.trace.1 [exporter.py:_translate_key_values:126] return KeyValue(key=key, value=_translate_value(value)) exception.trace.2 [ exporter.py:_translate_value:104] array_value=ArrayValue(values=[_translate_value(v) for v in value]) exception.trace.3 [ exporter.py:<listcomp>:104] array_value=ArrayValue(values=[_translate_value(v) for v in value]) exception.trace.4 [ exporter.py:_translate_value:119] "Invalid type {} of value {}".format(type(value), value) ``` </issue> <code> [start of instrumentation/opentelemetry-instrumentation-celery/src/opentelemetry/instrumentation/celery/utils.py] 1 # Copyright The OpenTelemetry Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import logging 16 17 from celery import registry # pylint: disable=no-name-in-module 18 19 from opentelemetry.semconv.trace import SpanAttributes 20 21 logger = logging.getLogger(__name__) 22 23 # Celery Context key 24 CTX_KEY = "__otel_task_span" 25 26 # Celery Context attributes 27 CELERY_CONTEXT_ATTRIBUTES = ( 28 "compression", 29 "correlation_id", 30 "countdown", 31 "delivery_info", 32 "declare", 33 "eta", 34 "exchange", 35 "expires", 36 "hostname", 37 "id", 38 "priority", 39 "queue", 40 "reply_to", 41 "retries", 42 "routing_key", 43 "serializer", 44 "timelimit", 45 "origin", 46 "state", 47 ) 48 49 50 # pylint:disable=too-many-branches 51 def set_attributes_from_context(span, context): 52 """Helper to extract meta values from a Celery Context""" 53 if not span.is_recording(): 54 return 55 for key in CELERY_CONTEXT_ATTRIBUTES: 56 value = context.get(key) 57 58 # Skip this key if it is not set 59 if value is None or value == "": 60 continue 61 62 # Skip `timelimit` if it is not set (it's default/unset value is a 63 # tuple or a list of `None` values 64 if key == "timelimit" and value in [(None, None), [None, None]]: 65 continue 66 67 # Skip `retries` if it's value is `0` 68 if key == "retries" and value == 0: 69 continue 70 71 attribute_name = None 72 73 # Celery 4.0 uses `origin` instead of `hostname`; this change preserves 74 # the same name for the tag despite Celery version 75 if key == "origin": 76 key = "hostname" 77 78 elif key == "delivery_info": 79 # Get also destination from this 80 routing_key = value.get("routing_key") 81 if routing_key is not None: 82 span.set_attribute( 83 SpanAttributes.MESSAGING_DESTINATION, routing_key 84 ) 85 value = str(value) 86 87 elif key == "id": 88 attribute_name = SpanAttributes.MESSAGING_MESSAGE_ID 89 90 elif key == "correlation_id": 91 attribute_name = SpanAttributes.MESSAGING_CONVERSATION_ID 92 93 elif key == "routing_key": 94 attribute_name = SpanAttributes.MESSAGING_DESTINATION 95 96 # according to https://docs.celeryproject.org/en/stable/userguide/routing.html#exchange-types 97 elif key == "declare": 98 attribute_name = SpanAttributes.MESSAGING_DESTINATION_KIND 99 for declare in value: 100 if declare.exchange.type == "direct": 101 value = "queue" 102 break 103 if declare.exchange.type == "topic": 104 value = "topic" 105 break 106 107 # set attribute name if not set specially for a key 108 if attribute_name is None: 109 attribute_name = f"celery.{key}" 110 111 span.set_attribute(attribute_name, value) 112 113 114 def attach_span(task, task_id, span, is_publish=False): 115 """Helper to propagate a `Span` for the given `Task` instance. This 116 function uses a `dict` that stores the Span using the 117 `(task_id, is_publish)` as a key. This is useful when information must be 118 propagated from one Celery signal to another. 119 120 We use (task_id, is_publish) for the key to ensure that publishing a 121 task from within another task does not cause any conflicts. 122 123 This mostly happens when either a task fails and a retry policy is in place, 124 or when a task is manually retries (e.g. `task.retry()`), we end up trying 125 to publish a task with the same id as the task currently running. 126 127 Previously publishing the new task would overwrite the existing `celery.run` span 128 in the `dict` causing that span to be forgotten and never finished 129 NOTE: We cannot test for this well yet, because we do not run a celery worker, 130 and cannot run `task.apply_async()` 131 """ 132 span_dict = getattr(task, CTX_KEY, None) 133 if span_dict is None: 134 span_dict = {} 135 setattr(task, CTX_KEY, span_dict) 136 137 span_dict[(task_id, is_publish)] = span 138 139 140 def detach_span(task, task_id, is_publish=False): 141 """Helper to remove a `Span` in a Celery task when it's propagated. 142 This function handles tasks where the `Span` is not attached. 143 """ 144 span_dict = getattr(task, CTX_KEY, None) 145 if span_dict is None: 146 return 147 148 # See note in `attach_span` for key info 149 span_dict.pop((task_id, is_publish), (None, None)) 150 151 152 def retrieve_span(task, task_id, is_publish=False): 153 """Helper to retrieve an active `Span` stored in a `Task` 154 instance 155 """ 156 span_dict = getattr(task, CTX_KEY, None) 157 if span_dict is None: 158 return (None, None) 159 160 # See note in `attach_span` for key info 161 return span_dict.get((task_id, is_publish), (None, None)) 162 163 164 def retrieve_task(kwargs): 165 task = kwargs.get("task") 166 if task is None: 167 logger.debug("Unable to retrieve task from signal arguments") 168 return task 169 170 171 def retrieve_task_from_sender(kwargs): 172 sender = kwargs.get("sender") 173 if sender is None: 174 logger.debug("Unable to retrieve the sender from signal arguments") 175 176 # before and after publish signals sender is the task name 177 # for retry and failure signals sender is the task object 178 if isinstance(sender, str): 179 sender = registry.tasks.get(sender) 180 if sender is None: 181 logger.debug("Unable to retrieve the task from sender=%s", sender) 182 183 return sender 184 185 186 def retrieve_task_id(kwargs): 187 task_id = kwargs.get("task_id") 188 if task_id is None: 189 logger.debug("Unable to retrieve task_id from signal arguments") 190 return task_id 191 192 193 def retrieve_task_id_from_request(kwargs): 194 # retry signal does not include task_id as argument so use request argument 195 request = kwargs.get("request") 196 if request is None: 197 logger.debug("Unable to retrieve the request from signal arguments") 198 199 task_id = getattr(request, "id") 200 if task_id is None: 201 logger.debug("Unable to retrieve the task_id from the request") 202 203 return task_id 204 205 206 def retrieve_task_id_from_message(kwargs): 207 """Helper to retrieve the `Task` identifier from the message `body`. 208 This helper supports Protocol Version 1 and 2. The Protocol is well 209 detailed in the official documentation: 210 http://docs.celeryproject.org/en/latest/internals/protocol.html 211 """ 212 headers = kwargs.get("headers") 213 body = kwargs.get("body") 214 if headers is not None and len(headers) > 0: 215 # Protocol Version 2 (default from Celery 4.0) 216 return headers.get("id") 217 # Protocol Version 1 218 return body.get("id") 219 220 221 def retrieve_reason(kwargs): 222 reason = kwargs.get("reason") 223 if not reason: 224 logger.debug("Unable to retrieve the retry reason") 225 return reason 226 [end of instrumentation/opentelemetry-instrumentation-celery/src/opentelemetry/instrumentation/celery/utils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/instrumentation/opentelemetry-instrumentation-celery/src/opentelemetry/instrumentation/celery/utils.py b/instrumentation/opentelemetry-instrumentation-celery/src/opentelemetry/instrumentation/celery/utils.py --- a/instrumentation/opentelemetry-instrumentation-celery/src/opentelemetry/instrumentation/celery/utils.py +++ b/instrumentation/opentelemetry-instrumentation-celery/src/opentelemetry/instrumentation/celery/utils.py @@ -61,8 +61,11 @@ # Skip `timelimit` if it is not set (it's default/unset value is a # tuple or a list of `None` values - if key == "timelimit" and value in [(None, None), [None, None]]: - continue + if key == "timelimit": + if value in [(None, None), [None, None]]: + continue + if None in value: + value = ["" if tl is None else tl for tl in value] # Skip `retries` if it's value is `0` if key == "retries" and value == 0:
{"golden_diff": "diff --git a/instrumentation/opentelemetry-instrumentation-celery/src/opentelemetry/instrumentation/celery/utils.py b/instrumentation/opentelemetry-instrumentation-celery/src/opentelemetry/instrumentation/celery/utils.py\n--- a/instrumentation/opentelemetry-instrumentation-celery/src/opentelemetry/instrumentation/celery/utils.py\n+++ b/instrumentation/opentelemetry-instrumentation-celery/src/opentelemetry/instrumentation/celery/utils.py\n@@ -61,8 +61,11 @@\n \n # Skip `timelimit` if it is not set (it's default/unset value is a\n # tuple or a list of `None` values\n- if key == \"timelimit\" and value in [(None, None), [None, None]]:\n- continue\n+ if key == \"timelimit\":\n+ if value in [(None, None), [None, None]]:\n+ continue\n+ if None in value:\n+ value = [\"\" if tl is None else tl for tl in value]\n \n # Skip `retries` if it's value is `0`\n if key == \"retries\" and value == 0:\n", "issue": "Partial celery task time limit is not support\nPython 3.8\r\n\r\n**Steps to reproduce**\r\n- Create celery task with only one of the time limit soft/hard\r\nOr use this test to reproduce:\r\n\r\n`instrumentation/opentelemetry-instrumentation-celery/tests/test_utils.py`\r\n\r\n```python\r\n def test_set_attributes_partial_timelimit_hard_limit(self):\r\n context = {\r\n \"correlation_id\": \"44b7f305\",\r\n \"delivery_info\": {\"eager\": True},\r\n \"eta\": \"soon\",\r\n \"expires\": \"later\",\r\n \"hostname\": \"localhost\",\r\n \"id\": \"44b7f305\",\r\n \"reply_to\": \"44b7f305\",\r\n \"retries\": 4,\r\n \"timelimit\": (\"now\", None),\r\n \"custom_meta\": \"custom_value\",\r\n \"routing_key\": \"celery\",\r\n }\r\n span = trace._Span(\"name\", mock.Mock(spec=trace_api.SpanContext))\r\n utils.set_attributes_from_context(span, context)\r\n self.assertEqual(span.attributes.get(\"celery.timelimit\"), (\"now\", \"\"))\r\n```\r\n**What is the expected behavior?**\r\nThe time limit that was specify should be instrumented \r\n\r\n**What is the actual behavior?**\r\nException is raised and no time limit is instrumented\r\n```\r\n[__init__.py:_translate_attributes:164] _translate_key_values(key, value)\r\nexception.trace.1\t\r\n[exporter.py:_translate_key_values:126] return KeyValue(key=key, value=_translate_value(value))\r\nexception.trace.2\t\r\n[ exporter.py:_translate_value:104] array_value=ArrayValue(values=[_translate_value(v) for v in value])\r\nexception.trace.3\t\r\n[ exporter.py:<listcomp>:104] array_value=ArrayValue(values=[_translate_value(v) for v in value])\r\nexception.trace.4\t\r\n[ exporter.py:_translate_value:119] \"Invalid type {} of value {}\".format(type(value), value)\r\n```\r\n \r\n\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\n\nfrom celery import registry # pylint: disable=no-name-in-module\n\nfrom opentelemetry.semconv.trace import SpanAttributes\n\nlogger = logging.getLogger(__name__)\n\n# Celery Context key\nCTX_KEY = \"__otel_task_span\"\n\n# Celery Context attributes\nCELERY_CONTEXT_ATTRIBUTES = (\n \"compression\",\n \"correlation_id\",\n \"countdown\",\n \"delivery_info\",\n \"declare\",\n \"eta\",\n \"exchange\",\n \"expires\",\n \"hostname\",\n \"id\",\n \"priority\",\n \"queue\",\n \"reply_to\",\n \"retries\",\n \"routing_key\",\n \"serializer\",\n \"timelimit\",\n \"origin\",\n \"state\",\n)\n\n\n# pylint:disable=too-many-branches\ndef set_attributes_from_context(span, context):\n \"\"\"Helper to extract meta values from a Celery Context\"\"\"\n if not span.is_recording():\n return\n for key in CELERY_CONTEXT_ATTRIBUTES:\n value = context.get(key)\n\n # Skip this key if it is not set\n if value is None or value == \"\":\n continue\n\n # Skip `timelimit` if it is not set (it's default/unset value is a\n # tuple or a list of `None` values\n if key == \"timelimit\" and value in [(None, None), [None, None]]:\n continue\n\n # Skip `retries` if it's value is `0`\n if key == \"retries\" and value == 0:\n continue\n\n attribute_name = None\n\n # Celery 4.0 uses `origin` instead of `hostname`; this change preserves\n # the same name for the tag despite Celery version\n if key == \"origin\":\n key = \"hostname\"\n\n elif key == \"delivery_info\":\n # Get also destination from this\n routing_key = value.get(\"routing_key\")\n if routing_key is not None:\n span.set_attribute(\n SpanAttributes.MESSAGING_DESTINATION, routing_key\n )\n value = str(value)\n\n elif key == \"id\":\n attribute_name = SpanAttributes.MESSAGING_MESSAGE_ID\n\n elif key == \"correlation_id\":\n attribute_name = SpanAttributes.MESSAGING_CONVERSATION_ID\n\n elif key == \"routing_key\":\n attribute_name = SpanAttributes.MESSAGING_DESTINATION\n\n # according to https://docs.celeryproject.org/en/stable/userguide/routing.html#exchange-types\n elif key == \"declare\":\n attribute_name = SpanAttributes.MESSAGING_DESTINATION_KIND\n for declare in value:\n if declare.exchange.type == \"direct\":\n value = \"queue\"\n break\n if declare.exchange.type == \"topic\":\n value = \"topic\"\n break\n\n # set attribute name if not set specially for a key\n if attribute_name is None:\n attribute_name = f\"celery.{key}\"\n\n span.set_attribute(attribute_name, value)\n\n\ndef attach_span(task, task_id, span, is_publish=False):\n \"\"\"Helper to propagate a `Span` for the given `Task` instance. This\n function uses a `dict` that stores the Span using the\n `(task_id, is_publish)` as a key. This is useful when information must be\n propagated from one Celery signal to another.\n\n We use (task_id, is_publish) for the key to ensure that publishing a\n task from within another task does not cause any conflicts.\n\n This mostly happens when either a task fails and a retry policy is in place,\n or when a task is manually retries (e.g. `task.retry()`), we end up trying\n to publish a task with the same id as the task currently running.\n\n Previously publishing the new task would overwrite the existing `celery.run` span\n in the `dict` causing that span to be forgotten and never finished\n NOTE: We cannot test for this well yet, because we do not run a celery worker,\n and cannot run `task.apply_async()`\n \"\"\"\n span_dict = getattr(task, CTX_KEY, None)\n if span_dict is None:\n span_dict = {}\n setattr(task, CTX_KEY, span_dict)\n\n span_dict[(task_id, is_publish)] = span\n\n\ndef detach_span(task, task_id, is_publish=False):\n \"\"\"Helper to remove a `Span` in a Celery task when it's propagated.\n This function handles tasks where the `Span` is not attached.\n \"\"\"\n span_dict = getattr(task, CTX_KEY, None)\n if span_dict is None:\n return\n\n # See note in `attach_span` for key info\n span_dict.pop((task_id, is_publish), (None, None))\n\n\ndef retrieve_span(task, task_id, is_publish=False):\n \"\"\"Helper to retrieve an active `Span` stored in a `Task`\n instance\n \"\"\"\n span_dict = getattr(task, CTX_KEY, None)\n if span_dict is None:\n return (None, None)\n\n # See note in `attach_span` for key info\n return span_dict.get((task_id, is_publish), (None, None))\n\n\ndef retrieve_task(kwargs):\n task = kwargs.get(\"task\")\n if task is None:\n logger.debug(\"Unable to retrieve task from signal arguments\")\n return task\n\n\ndef retrieve_task_from_sender(kwargs):\n sender = kwargs.get(\"sender\")\n if sender is None:\n logger.debug(\"Unable to retrieve the sender from signal arguments\")\n\n # before and after publish signals sender is the task name\n # for retry and failure signals sender is the task object\n if isinstance(sender, str):\n sender = registry.tasks.get(sender)\n if sender is None:\n logger.debug(\"Unable to retrieve the task from sender=%s\", sender)\n\n return sender\n\n\ndef retrieve_task_id(kwargs):\n task_id = kwargs.get(\"task_id\")\n if task_id is None:\n logger.debug(\"Unable to retrieve task_id from signal arguments\")\n return task_id\n\n\ndef retrieve_task_id_from_request(kwargs):\n # retry signal does not include task_id as argument so use request argument\n request = kwargs.get(\"request\")\n if request is None:\n logger.debug(\"Unable to retrieve the request from signal arguments\")\n\n task_id = getattr(request, \"id\")\n if task_id is None:\n logger.debug(\"Unable to retrieve the task_id from the request\")\n\n return task_id\n\n\ndef retrieve_task_id_from_message(kwargs):\n \"\"\"Helper to retrieve the `Task` identifier from the message `body`.\n This helper supports Protocol Version 1 and 2. The Protocol is well\n detailed in the official documentation:\n http://docs.celeryproject.org/en/latest/internals/protocol.html\n \"\"\"\n headers = kwargs.get(\"headers\")\n body = kwargs.get(\"body\")\n if headers is not None and len(headers) > 0:\n # Protocol Version 2 (default from Celery 4.0)\n return headers.get(\"id\")\n # Protocol Version 1\n return body.get(\"id\")\n\n\ndef retrieve_reason(kwargs):\n reason = kwargs.get(\"reason\")\n if not reason:\n logger.debug(\"Unable to retrieve the retry reason\")\n return reason\n", "path": "instrumentation/opentelemetry-instrumentation-celery/src/opentelemetry/instrumentation/celery/utils.py"}]}
3,265
267
gh_patches_debug_21075
rasdani/github-patches
git_diff
larq__larq-146
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Document how to use a quantizer as an activation function `tf.keras.layers.Activation("ste_sign")` </issue> <code> [start of larq/quantizers.py] 1 """A Quantizer defines the way of transforming a full precision input to a 2 quantized output and the pseudo-gradient method used for the backwards pass.""" 3 4 import tensorflow as tf 5 from larq import utils, math 6 7 8 @tf.custom_gradient 9 def _binarize_with_identity_grad(x): 10 def grad(dy): 11 return dy 12 13 return math.sign(x), grad 14 15 16 @tf.custom_gradient 17 def _binarize_with_weighted_grad(x): 18 def grad(dy): 19 return (1 - tf.abs(x)) * 2 * dy 20 21 return math.sign(x), grad 22 23 24 @utils.register_keras_custom_object 25 @utils.set_precision(1) 26 def ste_sign(x): 27 r""" 28 Sign binarization function. 29 \\[ 30 q(x) = \begin{cases} 31 -1 & x < 0 \\\ 32 1 & x \geq 0 33 \end{cases} 34 \\] 35 36 The gradient is estimated using the Straight-Through Estimator 37 (essentially the binarization is replaced by a clipped identity on the 38 backward pass). 39 \\[\frac{\partial q(x)}{\partial x} = \begin{cases} 40 1 & \left|x\right| \leq 1 \\\ 41 0 & \left|x\right| > 1 42 \end{cases}\\] 43 44 ```plot-activation 45 quantizers.ste_sign 46 ``` 47 48 # Arguments 49 x: Input tensor. 50 51 # Returns 52 Binarized tensor. 53 54 # References 55 - [Binarized Neural Networks: Training Deep Neural Networks with Weights and 56 Activations Constrained to +1 or -1](http://arxiv.org/abs/1602.02830) 57 """ 58 59 x = tf.clip_by_value(x, -1, 1) 60 61 return _binarize_with_identity_grad(x) 62 63 64 @utils.register_keras_custom_object 65 @utils.set_precision(1) 66 def magnitude_aware_sign(x): 67 r""" 68 Magnitude-aware sign for Bi-Real Net. 69 70 ```plot-activation 71 quantizers.magnitude_aware_sign 72 ``` 73 74 # Arguments 75 x: Input tensor 76 77 # Returns 78 Scaled binarized tensor (with values in $\{-a, a\}$, where $a$ is a float). 79 80 # References 81 - [Bi-Real Net: Enhancing the Performance of 1-bit CNNs With Improved 82 Representational Capability and Advanced Training 83 Algorithm](https://arxiv.org/abs/1808.00278) 84 85 """ 86 scale_factor = tf.reduce_mean(tf.abs(x), axis=list(range(len(x.shape) - 1))) 87 88 return tf.stop_gradient(scale_factor) * ste_sign(x) 89 90 91 @utils.register_keras_custom_object 92 @utils.set_precision(1) 93 def approx_sign(x): 94 r""" 95 Sign binarization function. 96 \\[ 97 q(x) = \begin{cases} 98 -1 & x < 0 \\\ 99 1 & x \geq 0 100 \end{cases} 101 \\] 102 103 The gradient is estimated using the ApproxSign method. 104 \\[\frac{\partial q(x)}{\partial x} = \begin{cases} 105 (2 - 2 \left|x\right|) & \left|x\right| \leq 1 \\\ 106 0 & \left|x\right| > 1 107 \end{cases} 108 \\] 109 110 ```plot-activation 111 quantizers.approx_sign 112 ``` 113 114 # Arguments 115 x: Input tensor. 116 117 # Returns 118 Binarized tensor. 119 120 # References 121 - [Bi-Real Net: Enhancing the Performance of 1-bit CNNs With Improved 122 Representational Capability and Advanced 123 Training Algorithm](http://arxiv.org/abs/1808.00278) 124 """ 125 126 x = tf.clip_by_value(x, -1, 1) 127 128 return _binarize_with_weighted_grad(x) 129 130 131 @utils.register_keras_custom_object 132 @utils.set_precision(2) 133 class SteTern: 134 r""" 135 Ternarization function. 136 \\[ 137 q(x) = \begin{cases} 138 +1 & x > \Delta \\\ 139 0 & |x| < \Delta \\\ 140 -1 & x < - \Delta 141 \end{cases} 142 \\] 143 144 where $\Delta$ is defined as the threshold and can be passed as an argument, 145 or can be calculated as per the Ternary Weight Networks original paper, such that 146 147 \\[ 148 \Delta = \frac{0.7}{n} \sum_{i=1}^{n} |W_i| 149 \\] 150 where we assume that $W_i$ is generated from a normal distribution. 151 152 The gradient is estimated using the Straight-Through Estimator 153 (essentially the Ternarization is replaced by a clipped identity on the 154 backward pass). 155 \\[\frac{\partial q(x)}{\partial x} = \begin{cases} 156 1 & \left|x\right| \leq 1 \\\ 157 0 & \left|x\right| > 1 158 \end{cases}\\] 159 160 ```plot-activation 161 quantizers.SteTern 162 ``` 163 164 # Arguments 165 x: Input tensor. 166 threshold_value: The value for the threshold, $\Delta$. 167 ternary_weight_networks: Boolean of whether to use the Ternary Weight Networks threshold calculation. 168 169 # Returns 170 Ternarized tensor. 171 172 # References 173 - [Ternary Weight Networks](http://arxiv.org/abs/1605.04711) 174 """ 175 176 def __init__(self, threshold_value=0.1, ternary_weight_networks=False): 177 self.threshold_value = threshold_value 178 self.ternary_weight_networks = ternary_weight_networks 179 180 def __call__(self, x): 181 x = tf.clip_by_value(x, -1, 1) 182 if self.ternary_weight_networks: 183 threshold = self.threshold_twn(x) 184 else: 185 threshold = self.threshold_value 186 187 @tf.custom_gradient 188 def _ternarize_with_identity_grad(x): 189 def grad(dy): 190 return dy 191 192 return (tf.sign(tf.sign(x + threshold) + tf.sign(x - threshold)), grad) 193 194 return _ternarize_with_identity_grad(x) 195 196 def threshold_twn(self, x): 197 return 0.7 * tf.reduce_sum(tf.abs(x)) / tf.cast(tf.size(x), x.dtype) 198 199 def get_config(self): 200 return { 201 "threshold_value": self.threshold_value, 202 "ternary_weight_networks": self.ternary_weight_networks, 203 } 204 205 206 def serialize(initializer): 207 return tf.keras.utils.serialize_keras_object(initializer) 208 209 210 def deserialize(name, custom_objects=None): 211 return tf.keras.utils.deserialize_keras_object( 212 name, 213 module_objects=globals(), 214 custom_objects=custom_objects, 215 printable_module_name="quantization function", 216 ) 217 218 219 def get(identifier): 220 if identifier is None: 221 return None 222 if isinstance(identifier, str): 223 return deserialize(str(identifier)) 224 if callable(identifier): 225 return identifier 226 raise ValueError( 227 f"Could not interpret quantization function identifier: {identifier}" 228 ) 229 [end of larq/quantizers.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/larq/quantizers.py b/larq/quantizers.py --- a/larq/quantizers.py +++ b/larq/quantizers.py @@ -1,5 +1,39 @@ """A Quantizer defines the way of transforming a full precision input to a -quantized output and the pseudo-gradient method used for the backwards pass.""" +quantized output and the pseudo-gradient method used for the backwards pass. + +Quantizers can either be used through quantizer arguments that are supported +for Larq layers, such as `input_quantizer` and `kernel_quantizer`; or they +can be used similar to activations, i.e. either through an `Activation` layer, +or through the `activation` argument supported by all forward layer: + +```python +import tensorflow as tf +import larq as lq +... +x = lq.layers.QuantDense(64, activation=None)(x) +x = lq.layers.QuantDense(64, input_quantizer="ste_sign")(x) +``` + +is equivalent to: + +```python +x = lq.layers.QuantDense(64)(x) +x = tf.keras.layers.Activation("ste_sign")(x) +x = lq.layers.QuantDense(64)(x) +``` + +as well as: + +```python +x = lq.layers.QuantDense(64, activation="ste_sign")(x) +x = lq.layers.QuantDense(64)(x) +``` + +We highly recommend using the first of these formulations: for the +other two formulations, intermediate layers - like batch normalization or +average pooling - and shortcut connections may result in non-binary input +to the convolutions. +""" import tensorflow as tf from larq import utils, math
{"golden_diff": "diff --git a/larq/quantizers.py b/larq/quantizers.py\n--- a/larq/quantizers.py\n+++ b/larq/quantizers.py\n@@ -1,5 +1,39 @@\n \"\"\"A Quantizer defines the way of transforming a full precision input to a\n-quantized output and the pseudo-gradient method used for the backwards pass.\"\"\"\n+quantized output and the pseudo-gradient method used for the backwards pass.\n+\n+Quantizers can either be used through quantizer arguments that are supported\n+for Larq layers, such as `input_quantizer` and `kernel_quantizer`; or they\n+can be used similar to activations, i.e. either through an `Activation` layer,\n+or through the `activation` argument supported by all forward layer:\n+\n+```python\n+import tensorflow as tf\n+import larq as lq\n+...\n+x = lq.layers.QuantDense(64, activation=None)(x)\n+x = lq.layers.QuantDense(64, input_quantizer=\"ste_sign\")(x)\n+```\n+\n+is equivalent to:\n+\n+```python\n+x = lq.layers.QuantDense(64)(x)\n+x = tf.keras.layers.Activation(\"ste_sign\")(x)\n+x = lq.layers.QuantDense(64)(x)\n+```\n+\n+as well as:\n+\n+```python\n+x = lq.layers.QuantDense(64, activation=\"ste_sign\")(x)\n+x = lq.layers.QuantDense(64)(x)\n+```\n+\n+We highly recommend using the first of these formulations: for the\n+other two formulations, intermediate layers - like batch normalization or\n+average pooling - and shortcut connections may result in non-binary input\n+to the convolutions.\n+\"\"\"\n \n import tensorflow as tf\n from larq import utils, math\n", "issue": "Document how to use a quantizer as an activation function\n`tf.keras.layers.Activation(\"ste_sign\")`\n", "before_files": [{"content": "\"\"\"A Quantizer defines the way of transforming a full precision input to a\nquantized output and the pseudo-gradient method used for the backwards pass.\"\"\"\n\nimport tensorflow as tf\nfrom larq import utils, math\n\n\[email protected]_gradient\ndef _binarize_with_identity_grad(x):\n def grad(dy):\n return dy\n\n return math.sign(x), grad\n\n\[email protected]_gradient\ndef _binarize_with_weighted_grad(x):\n def grad(dy):\n return (1 - tf.abs(x)) * 2 * dy\n\n return math.sign(x), grad\n\n\[email protected]_keras_custom_object\[email protected]_precision(1)\ndef ste_sign(x):\n r\"\"\"\n Sign binarization function.\n \\\\[\n q(x) = \\begin{cases}\n -1 & x < 0 \\\\\\\n 1 & x \\geq 0\n \\end{cases}\n \\\\]\n\n The gradient is estimated using the Straight-Through Estimator\n (essentially the binarization is replaced by a clipped identity on the\n backward pass).\n \\\\[\\frac{\\partial q(x)}{\\partial x} = \\begin{cases}\n 1 & \\left|x\\right| \\leq 1 \\\\\\\n 0 & \\left|x\\right| > 1\n \\end{cases}\\\\]\n\n ```plot-activation\n quantizers.ste_sign\n ```\n\n # Arguments\n x: Input tensor.\n\n # Returns\n Binarized tensor.\n\n # References\n - [Binarized Neural Networks: Training Deep Neural Networks with Weights and\n Activations Constrained to +1 or -1](http://arxiv.org/abs/1602.02830)\n \"\"\"\n\n x = tf.clip_by_value(x, -1, 1)\n\n return _binarize_with_identity_grad(x)\n\n\[email protected]_keras_custom_object\[email protected]_precision(1)\ndef magnitude_aware_sign(x):\n r\"\"\"\n Magnitude-aware sign for Bi-Real Net.\n\n ```plot-activation\n quantizers.magnitude_aware_sign\n ```\n\n # Arguments\n x: Input tensor\n\n # Returns\n Scaled binarized tensor (with values in $\\{-a, a\\}$, where $a$ is a float).\n\n # References\n - [Bi-Real Net: Enhancing the Performance of 1-bit CNNs With Improved\n Representational Capability and Advanced Training\n Algorithm](https://arxiv.org/abs/1808.00278)\n\n \"\"\"\n scale_factor = tf.reduce_mean(tf.abs(x), axis=list(range(len(x.shape) - 1)))\n\n return tf.stop_gradient(scale_factor) * ste_sign(x)\n\n\[email protected]_keras_custom_object\[email protected]_precision(1)\ndef approx_sign(x):\n r\"\"\"\n Sign binarization function.\n \\\\[\n q(x) = \\begin{cases}\n -1 & x < 0 \\\\\\\n 1 & x \\geq 0\n \\end{cases}\n \\\\]\n\n The gradient is estimated using the ApproxSign method.\n \\\\[\\frac{\\partial q(x)}{\\partial x} = \\begin{cases}\n (2 - 2 \\left|x\\right|) & \\left|x\\right| \\leq 1 \\\\\\\n 0 & \\left|x\\right| > 1\n \\end{cases}\n \\\\]\n\n ```plot-activation\n quantizers.approx_sign\n ```\n\n # Arguments\n x: Input tensor.\n\n # Returns\n Binarized tensor.\n\n # References\n - [Bi-Real Net: Enhancing the Performance of 1-bit CNNs With Improved\n Representational Capability and Advanced\n Training Algorithm](http://arxiv.org/abs/1808.00278)\n \"\"\"\n\n x = tf.clip_by_value(x, -1, 1)\n\n return _binarize_with_weighted_grad(x)\n\n\[email protected]_keras_custom_object\[email protected]_precision(2)\nclass SteTern:\n r\"\"\"\n Ternarization function.\n \\\\[\n q(x) = \\begin{cases}\n +1 & x > \\Delta \\\\\\\n 0 & |x| < \\Delta \\\\\\\n -1 & x < - \\Delta\n \\end{cases}\n \\\\]\n\n where $\\Delta$ is defined as the threshold and can be passed as an argument,\n or can be calculated as per the Ternary Weight Networks original paper, such that\n\n \\\\[\n \\Delta = \\frac{0.7}{n} \\sum_{i=1}^{n} |W_i|\n \\\\]\n where we assume that $W_i$ is generated from a normal distribution.\n\n The gradient is estimated using the Straight-Through Estimator\n (essentially the Ternarization is replaced by a clipped identity on the\n backward pass).\n \\\\[\\frac{\\partial q(x)}{\\partial x} = \\begin{cases}\n 1 & \\left|x\\right| \\leq 1 \\\\\\\n 0 & \\left|x\\right| > 1\n \\end{cases}\\\\]\n\n ```plot-activation\n quantizers.SteTern\n ```\n\n # Arguments\n x: Input tensor.\n threshold_value: The value for the threshold, $\\Delta$.\n ternary_weight_networks: Boolean of whether to use the Ternary Weight Networks threshold calculation.\n\n # Returns\n Ternarized tensor.\n\n # References\n - [Ternary Weight Networks](http://arxiv.org/abs/1605.04711)\n \"\"\"\n\n def __init__(self, threshold_value=0.1, ternary_weight_networks=False):\n self.threshold_value = threshold_value\n self.ternary_weight_networks = ternary_weight_networks\n\n def __call__(self, x):\n x = tf.clip_by_value(x, -1, 1)\n if self.ternary_weight_networks:\n threshold = self.threshold_twn(x)\n else:\n threshold = self.threshold_value\n\n @tf.custom_gradient\n def _ternarize_with_identity_grad(x):\n def grad(dy):\n return dy\n\n return (tf.sign(tf.sign(x + threshold) + tf.sign(x - threshold)), grad)\n\n return _ternarize_with_identity_grad(x)\n\n def threshold_twn(self, x):\n return 0.7 * tf.reduce_sum(tf.abs(x)) / tf.cast(tf.size(x), x.dtype)\n\n def get_config(self):\n return {\n \"threshold_value\": self.threshold_value,\n \"ternary_weight_networks\": self.ternary_weight_networks,\n }\n\n\ndef serialize(initializer):\n return tf.keras.utils.serialize_keras_object(initializer)\n\n\ndef deserialize(name, custom_objects=None):\n return tf.keras.utils.deserialize_keras_object(\n name,\n module_objects=globals(),\n custom_objects=custom_objects,\n printable_module_name=\"quantization function\",\n )\n\n\ndef get(identifier):\n if identifier is None:\n return None\n if isinstance(identifier, str):\n return deserialize(str(identifier))\n if callable(identifier):\n return identifier\n raise ValueError(\n f\"Could not interpret quantization function identifier: {identifier}\"\n )\n", "path": "larq/quantizers.py"}]}
2,766
402
gh_patches_debug_34198
rasdani/github-patches
git_diff
readthedocs__readthedocs.org-3214
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Better User deletion With #3175 we now have a way for users to mark themselves as wanting to be deleted, now we need to actually delete them. This could be done inline during delete, or as a batch job that runs after the web view returns. We want to actually **delete** the User instance, but we need to confirm a couple things: * Projects that the user _only_ owns are deleted. This is a [M2m](https://github.com/rtfd/readthedocs.org/blob/master/readthedocs/projects/models.py#L83), so we need to either *remove* them if there are multiple users, or *delete* the project if they are the only owner, so we don't end up with orphaned projects. </issue> <code> [start of readthedocs/profiles/views.py] 1 """Views for creating, editing and viewing site-specific user profiles.""" 2 3 from __future__ import absolute_import 4 5 from django.contrib import messages 6 from django.contrib.auth import logout 7 from django.contrib.auth.decorators import login_required 8 from django.contrib.auth.models import User 9 from django.core.exceptions import ObjectDoesNotExist 10 from django.core.urlresolvers import reverse 11 from django.http import Http404 12 from django.http import HttpResponseRedirect 13 from django.shortcuts import get_object_or_404, render, redirect 14 from django.shortcuts import render_to_response 15 from django.template import RequestContext 16 17 from readthedocs.core.forms import UserDeleteForm 18 19 20 def create_profile(request, form_class, success_url=None, 21 template_name='profiles/private/create_profile.html', 22 extra_context=None): 23 """ 24 Create a profile for the current user, if one doesn't already exist. 25 26 If the user already has a profile, a redirect will be issued to the 27 :view:`profiles.views.edit_profile` view. 28 29 **Optional arguments:** 30 31 ``extra_context`` 32 A dictionary of variables to add to the template context. Any 33 callable object in this dictionary will be called to produce 34 the end result which appears in the context. 35 36 ``form_class`` 37 The form class to use for validating and creating the user 38 profile. This form class must define a method named 39 ``save()``, implementing the same argument signature as the 40 ``save()`` method of a standard Django ``ModelForm`` (this 41 view will call ``save(commit=False)`` to obtain the profile 42 object, and fill in the user before the final save). If the 43 profile object includes many-to-many relations, the convention 44 established by ``ModelForm`` of using a method named 45 ``save_m2m()`` will be used, and so your form class should 46 also define this method. 47 48 ``success_url`` 49 The URL to redirect to after successful profile creation. If 50 this argument is not supplied, this will default to the URL of 51 :view:`profiles.views.profile_detail` for the newly-created 52 profile object. 53 54 ``template_name`` 55 The template to use when displaying the profile-creation 56 form. If not supplied, this will default to 57 :template:`profiles/create_profile.html`. 58 59 **Context:** 60 61 ``form`` 62 The profile-creation form. 63 64 **Template:** 65 66 ``template_name`` keyword argument, or 67 :template:`profiles/create_profile.html`. 68 69 """ 70 try: 71 profile_obj = request.user.profile 72 return HttpResponseRedirect(reverse('profiles_edit_profile')) 73 except ObjectDoesNotExist: 74 pass 75 76 # 77 # We set up success_url here, rather than as the default value for 78 # the argument. Trying to do it as the argument's default would 79 # mean evaluating the call to reverse() at the time this module is 80 # first imported, which introduces a circular dependency: to 81 # perform the reverse lookup we need access to profiles/urls.py, 82 # but profiles/urls.py in turn imports this module. 83 # 84 85 if success_url is None: 86 success_url = reverse('profiles_profile_detail', 87 kwargs={'username': request.user.username}) 88 if request.method == 'POST': 89 form = form_class(data=request.POST, files=request.FILES) 90 if form.is_valid(): 91 profile_obj = form.save(commit=False) 92 profile_obj.user = request.user 93 profile_obj.save() 94 if hasattr(form, 'save_m2m'): 95 form.save_m2m() 96 return HttpResponseRedirect(success_url) 97 else: 98 form = form_class() 99 100 if extra_context is None: 101 extra_context = {} 102 context = RequestContext(request) 103 for key, value in list(extra_context.items()): 104 context[key] = (value() if callable(value) else value) 105 106 return render_to_response(template_name, 107 {'form': form}, 108 context_instance=context) 109 create_profile = login_required(create_profile) 110 111 112 def edit_profile(request, form_class, success_url=None, 113 template_name='profiles/private/edit_profile.html', 114 extra_context=None): 115 """ 116 Edit the current user's profile. 117 118 If the user does not already have a profile, a redirect will be issued to 119 the :view:`profiles.views.create_profile` view. 120 121 **Optional arguments:** 122 123 ``extra_context`` 124 A dictionary of variables to add to the template context. Any 125 callable object in this dictionary will be called to produce 126 the end result which appears in the context. 127 128 ``form_class`` 129 The form class to use for validating and editing the user 130 profile. This form class must operate similarly to a standard 131 Django ``ModelForm`` in that it must accept an instance of the 132 object to be edited as the keyword argument ``instance`` to 133 its constructor, and it must implement a method named 134 ``save()`` which will save the updates to the object. 135 136 ``success_url`` 137 The URL to redirect to following a successful edit. If not 138 specified, this will default to the URL of 139 :view:`profiles.views.profile_detail` for the profile object 140 being edited. 141 142 ``template_name`` 143 The template to use when displaying the profile-editing 144 form. If not specified, this will default to 145 :template:`profiles/edit_profile.html`. 146 147 **Context:** 148 149 ``form`` 150 The form for editing the profile. 151 152 ``profile`` 153 The user's current profile. 154 155 **Template:** 156 157 ``template_name`` keyword argument or 158 :template:`profiles/edit_profile.html`. 159 160 """ 161 try: 162 profile_obj = request.user.profile 163 except ObjectDoesNotExist: 164 return HttpResponseRedirect(reverse('profiles_profile_create')) 165 166 if success_url is None: 167 success_url = reverse('profiles_profile_detail', 168 kwargs={'username': request.user.username}) 169 if request.method == 'POST': 170 form = form_class(data=request.POST, files=request.FILES, instance=profile_obj) 171 if form.is_valid(): 172 form.save() 173 return HttpResponseRedirect(success_url) 174 else: 175 form = form_class(instance=profile_obj) 176 177 if extra_context is None: 178 extra_context = {} 179 context = RequestContext(request) 180 for key, value in list(extra_context.items()): 181 context[key] = (value() if callable(value) else value) 182 183 return render_to_response(template_name, { 184 'form': form, 185 'profile': profile_obj, 186 'user': profile_obj.user, 187 }, context_instance=context) 188 edit_profile = login_required(edit_profile) 189 190 191 @login_required() 192 def delete_account(request): 193 form = UserDeleteForm() 194 template_name = 'profiles/private/delete_account.html' 195 196 if request.method == 'POST': 197 form = UserDeleteForm(instance=request.user, data=request.POST) 198 if form.is_valid(): 199 200 # Do not delete the account permanently because it may create disaster 201 # Inactive the user instead. 202 request.user.is_active = False 203 request.user.save() 204 logout(request) 205 messages.info(request, 'You have successfully deleted your account') 206 207 return redirect('homepage') 208 209 return render(request, template_name, {'form': form}) 210 211 212 def profile_detail(request, username, public_profile_field=None, 213 template_name='profiles/public/profile_detail.html', 214 extra_context=None): 215 """ 216 Detail view of a user's profile. 217 218 If the user has not yet created a profile, ``Http404`` will be 219 raised. 220 221 **Required arguments:** 222 223 ``username`` 224 The username of the user whose profile is being displayed. 225 226 **Optional arguments:** 227 228 ``extra_context`` 229 A dictionary of variables to add to the template context. Any 230 callable object in this dictionary will be called to produce 231 the end result which appears in the context. 232 233 ``public_profile_field`` 234 The name of a ``BooleanField`` on the profile model; if the 235 value of that field on the user's profile is ``False``, the 236 ``profile`` variable in the template will be ``None``. Use 237 this feature to allow users to mark their profiles as not 238 being publicly viewable. 239 240 If this argument is not specified, it will be assumed that all 241 users' profiles are publicly viewable. 242 243 ``template_name`` 244 The name of the template to use for displaying the profile. If 245 not specified, this will default to 246 :template:`profiles/profile_detail.html`. 247 248 **Context:** 249 250 ``profile`` 251 The user's profile, or ``None`` if the user's profile is not 252 publicly viewable (see the description of 253 ``public_profile_field`` above). 254 255 **Template:** 256 257 ``template_name`` keyword argument or 258 :template:`profiles/profile_detail.html`. 259 260 """ 261 user = get_object_or_404(User, username=username) 262 try: 263 profile_obj = user.profile 264 except ObjectDoesNotExist: 265 raise Http404 266 if public_profile_field is not None and \ 267 not getattr(profile_obj, public_profile_field): 268 profile_obj = None 269 270 if extra_context is None: 271 extra_context = {} 272 context = RequestContext(request) 273 for key, value in list(extra_context.items()): 274 context[key] = (value() if callable(value) else value) 275 276 return render_to_response(template_name, 277 {'profile': profile_obj}, 278 context_instance=context) 279 [end of readthedocs/profiles/views.py] [start of readthedocs/core/signals.py] 1 """Signal handling for core app.""" 2 3 from __future__ import absolute_import 4 5 import logging 6 7 from corsheaders import signals 8 from django.dispatch import Signal 9 from django.db.models import Q 10 from future.backports.urllib.parse import urlparse 11 12 from readthedocs.projects.models import Project, Domain 13 14 15 log = logging.getLogger(__name__) 16 17 WHITELIST_URLS = ['/api/v2/footer_html', '/api/v2/search', '/api/v2/docsearch'] 18 19 20 webhook_github = Signal(providing_args=['project', 'data', 'event']) 21 webhook_gitlab = Signal(providing_args=['project', 'data', 'event']) 22 webhook_bitbucket = Signal(providing_args=['project', 'data', 'event']) 23 24 25 def decide_if_cors(sender, request, **kwargs): # pylint: disable=unused-argument 26 """ 27 Decide whether a request should be given CORS access. 28 29 This checks that: 30 * The URL is whitelisted against our CORS-allowed domains 31 * The Domain exists in our database, and belongs to the project being queried. 32 33 Returns True when a request should be given CORS access. 34 """ 35 if 'HTTP_ORIGIN' not in request.META: 36 return False 37 host = urlparse(request.META['HTTP_ORIGIN']).netloc.split(':')[0] 38 valid_url = False 39 for url in WHITELIST_URLS: 40 if request.path_info.startswith(url): 41 valid_url = True 42 43 if valid_url: 44 project_slug = request.GET.get('project', None) 45 try: 46 project = Project.objects.get(slug=project_slug) 47 except Project.DoesNotExist: 48 log.warning( 49 'Invalid project passed to domain. [{project}:{domain}'.format( 50 project=project_slug, 51 domain=host, 52 ) 53 ) 54 return False 55 56 domain = Domain.objects.filter( 57 Q(domain__icontains=host), 58 Q(project=project) | Q(project__subprojects__child=project) 59 ) 60 if domain.exists(): 61 return True 62 63 return False 64 65 signals.check_request_enabled.connect(decide_if_cors) 66 [end of readthedocs/core/signals.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/readthedocs/core/signals.py b/readthedocs/core/signals.py --- a/readthedocs/core/signals.py +++ b/readthedocs/core/signals.py @@ -5,13 +5,15 @@ import logging from corsheaders import signals +from django.conf import settings +from django.db.models.signals import pre_delete from django.dispatch import Signal -from django.db.models import Q +from django.db.models import Q, Count +from django.dispatch import receiver from future.backports.urllib.parse import urlparse from readthedocs.projects.models import Project, Domain - log = logging.getLogger(__name__) WHITELIST_URLS = ['/api/v2/footer_html', '/api/v2/search', '/api/v2/docsearch'] @@ -62,4 +64,20 @@ return False + +@receiver(pre_delete, sender=settings.AUTH_USER_MODEL) +def delete_projects_and_organizations(sender, instance, *args, **kwargs): + # Here we count the owner list from the projects that the user own + # Then exclude the projects where there are more than one owner + projects = instance.projects.all().annotate(num_users=Count('users')).exclude(num_users__gt=1) + + # Here we count the users list from the organization that the user belong + # Then exclude the organizations where there are more than one user + oauth_organizations = (instance.oauth_organizations.annotate(num_users=Count('users')) + .exclude(num_users__gt=1)) + + projects.delete() + oauth_organizations.delete() + + signals.check_request_enabled.connect(decide_if_cors) diff --git a/readthedocs/profiles/views.py b/readthedocs/profiles/views.py --- a/readthedocs/profiles/views.py +++ b/readthedocs/profiles/views.py @@ -196,11 +196,9 @@ if request.method == 'POST': form = UserDeleteForm(instance=request.user, data=request.POST) if form.is_valid(): - - # Do not delete the account permanently because it may create disaster - # Inactive the user instead. - request.user.is_active = False - request.user.save() + # Delete the user permanently + # It will also delete some projects where he is the only owner + request.user.delete() logout(request) messages.info(request, 'You have successfully deleted your account')
{"golden_diff": "diff --git a/readthedocs/core/signals.py b/readthedocs/core/signals.py\n--- a/readthedocs/core/signals.py\n+++ b/readthedocs/core/signals.py\n@@ -5,13 +5,15 @@\n import logging\n \n from corsheaders import signals\n+from django.conf import settings\n+from django.db.models.signals import pre_delete\n from django.dispatch import Signal\n-from django.db.models import Q\n+from django.db.models import Q, Count\n+from django.dispatch import receiver\n from future.backports.urllib.parse import urlparse\n \n from readthedocs.projects.models import Project, Domain\n \n-\n log = logging.getLogger(__name__)\n \n WHITELIST_URLS = ['/api/v2/footer_html', '/api/v2/search', '/api/v2/docsearch']\n@@ -62,4 +64,20 @@\n \n return False\n \n+\n+@receiver(pre_delete, sender=settings.AUTH_USER_MODEL)\n+def delete_projects_and_organizations(sender, instance, *args, **kwargs):\n+ # Here we count the owner list from the projects that the user own\n+ # Then exclude the projects where there are more than one owner\n+ projects = instance.projects.all().annotate(num_users=Count('users')).exclude(num_users__gt=1)\n+\n+ # Here we count the users list from the organization that the user belong\n+ # Then exclude the organizations where there are more than one user\n+ oauth_organizations = (instance.oauth_organizations.annotate(num_users=Count('users'))\n+ .exclude(num_users__gt=1))\n+\n+ projects.delete()\n+ oauth_organizations.delete()\n+\n+\n signals.check_request_enabled.connect(decide_if_cors)\ndiff --git a/readthedocs/profiles/views.py b/readthedocs/profiles/views.py\n--- a/readthedocs/profiles/views.py\n+++ b/readthedocs/profiles/views.py\n@@ -196,11 +196,9 @@\n if request.method == 'POST':\n form = UserDeleteForm(instance=request.user, data=request.POST)\n if form.is_valid():\n-\n- # Do not delete the account permanently because it may create disaster\n- # Inactive the user instead.\n- request.user.is_active = False\n- request.user.save()\n+ # Delete the user permanently\n+ # It will also delete some projects where he is the only owner\n+ request.user.delete()\n logout(request)\n messages.info(request, 'You have successfully deleted your account')\n", "issue": "Better User deletion\nWith #3175 we now have a way for users to mark themselves as wanting to be deleted, now we need to actually delete them. This could be done inline during delete, or as a batch job that runs after the web view returns.\r\n\r\nWe want to actually **delete** the User instance, but we need to confirm a couple things:\r\n\r\n* Projects that the user _only_ owns are deleted. This is a [M2m](https://github.com/rtfd/readthedocs.org/blob/master/readthedocs/projects/models.py#L83), so we need to either *remove* them if there are multiple users, or *delete* the project if they are the only owner, so we don't end up with orphaned projects. \r\n\n", "before_files": [{"content": "\"\"\"Views for creating, editing and viewing site-specific user profiles.\"\"\"\n\nfrom __future__ import absolute_import\n\nfrom django.contrib import messages\nfrom django.contrib.auth import logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.core.urlresolvers import reverse\nfrom django.http import Http404\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404, render, redirect\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\n\nfrom readthedocs.core.forms import UserDeleteForm\n\n\ndef create_profile(request, form_class, success_url=None,\n template_name='profiles/private/create_profile.html',\n extra_context=None):\n \"\"\"\n Create a profile for the current user, if one doesn't already exist.\n\n If the user already has a profile, a redirect will be issued to the\n :view:`profiles.views.edit_profile` view.\n\n **Optional arguments:**\n\n ``extra_context``\n A dictionary of variables to add to the template context. Any\n callable object in this dictionary will be called to produce\n the end result which appears in the context.\n\n ``form_class``\n The form class to use for validating and creating the user\n profile. This form class must define a method named\n ``save()``, implementing the same argument signature as the\n ``save()`` method of a standard Django ``ModelForm`` (this\n view will call ``save(commit=False)`` to obtain the profile\n object, and fill in the user before the final save). If the\n profile object includes many-to-many relations, the convention\n established by ``ModelForm`` of using a method named\n ``save_m2m()`` will be used, and so your form class should\n also define this method.\n\n ``success_url``\n The URL to redirect to after successful profile creation. If\n this argument is not supplied, this will default to the URL of\n :view:`profiles.views.profile_detail` for the newly-created\n profile object.\n\n ``template_name``\n The template to use when displaying the profile-creation\n form. If not supplied, this will default to\n :template:`profiles/create_profile.html`.\n\n **Context:**\n\n ``form``\n The profile-creation form.\n\n **Template:**\n\n ``template_name`` keyword argument, or\n :template:`profiles/create_profile.html`.\n\n \"\"\"\n try:\n profile_obj = request.user.profile\n return HttpResponseRedirect(reverse('profiles_edit_profile'))\n except ObjectDoesNotExist:\n pass\n\n #\n # We set up success_url here, rather than as the default value for\n # the argument. Trying to do it as the argument's default would\n # mean evaluating the call to reverse() at the time this module is\n # first imported, which introduces a circular dependency: to\n # perform the reverse lookup we need access to profiles/urls.py,\n # but profiles/urls.py in turn imports this module.\n #\n\n if success_url is None:\n success_url = reverse('profiles_profile_detail',\n kwargs={'username': request.user.username})\n if request.method == 'POST':\n form = form_class(data=request.POST, files=request.FILES)\n if form.is_valid():\n profile_obj = form.save(commit=False)\n profile_obj.user = request.user\n profile_obj.save()\n if hasattr(form, 'save_m2m'):\n form.save_m2m()\n return HttpResponseRedirect(success_url)\n else:\n form = form_class()\n\n if extra_context is None:\n extra_context = {}\n context = RequestContext(request)\n for key, value in list(extra_context.items()):\n context[key] = (value() if callable(value) else value)\n\n return render_to_response(template_name,\n {'form': form},\n context_instance=context)\ncreate_profile = login_required(create_profile)\n\n\ndef edit_profile(request, form_class, success_url=None,\n template_name='profiles/private/edit_profile.html',\n extra_context=None):\n \"\"\"\n Edit the current user's profile.\n\n If the user does not already have a profile, a redirect will be issued to\n the :view:`profiles.views.create_profile` view.\n\n **Optional arguments:**\n\n ``extra_context``\n A dictionary of variables to add to the template context. Any\n callable object in this dictionary will be called to produce\n the end result which appears in the context.\n\n ``form_class``\n The form class to use for validating and editing the user\n profile. This form class must operate similarly to a standard\n Django ``ModelForm`` in that it must accept an instance of the\n object to be edited as the keyword argument ``instance`` to\n its constructor, and it must implement a method named\n ``save()`` which will save the updates to the object.\n\n ``success_url``\n The URL to redirect to following a successful edit. If not\n specified, this will default to the URL of\n :view:`profiles.views.profile_detail` for the profile object\n being edited.\n\n ``template_name``\n The template to use when displaying the profile-editing\n form. If not specified, this will default to\n :template:`profiles/edit_profile.html`.\n\n **Context:**\n\n ``form``\n The form for editing the profile.\n\n ``profile``\n The user's current profile.\n\n **Template:**\n\n ``template_name`` keyword argument or\n :template:`profiles/edit_profile.html`.\n\n \"\"\"\n try:\n profile_obj = request.user.profile\n except ObjectDoesNotExist:\n return HttpResponseRedirect(reverse('profiles_profile_create'))\n\n if success_url is None:\n success_url = reverse('profiles_profile_detail',\n kwargs={'username': request.user.username})\n if request.method == 'POST':\n form = form_class(data=request.POST, files=request.FILES, instance=profile_obj)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(success_url)\n else:\n form = form_class(instance=profile_obj)\n\n if extra_context is None:\n extra_context = {}\n context = RequestContext(request)\n for key, value in list(extra_context.items()):\n context[key] = (value() if callable(value) else value)\n\n return render_to_response(template_name, {\n 'form': form,\n 'profile': profile_obj,\n 'user': profile_obj.user,\n }, context_instance=context)\nedit_profile = login_required(edit_profile)\n\n\n@login_required()\ndef delete_account(request):\n form = UserDeleteForm()\n template_name = 'profiles/private/delete_account.html'\n\n if request.method == 'POST':\n form = UserDeleteForm(instance=request.user, data=request.POST)\n if form.is_valid():\n\n # Do not delete the account permanently because it may create disaster\n # Inactive the user instead.\n request.user.is_active = False\n request.user.save()\n logout(request)\n messages.info(request, 'You have successfully deleted your account')\n\n return redirect('homepage')\n\n return render(request, template_name, {'form': form})\n\n\ndef profile_detail(request, username, public_profile_field=None,\n template_name='profiles/public/profile_detail.html',\n extra_context=None):\n \"\"\"\n Detail view of a user's profile.\n\n If the user has not yet created a profile, ``Http404`` will be\n raised.\n\n **Required arguments:**\n\n ``username``\n The username of the user whose profile is being displayed.\n\n **Optional arguments:**\n\n ``extra_context``\n A dictionary of variables to add to the template context. Any\n callable object in this dictionary will be called to produce\n the end result which appears in the context.\n\n ``public_profile_field``\n The name of a ``BooleanField`` on the profile model; if the\n value of that field on the user's profile is ``False``, the\n ``profile`` variable in the template will be ``None``. Use\n this feature to allow users to mark their profiles as not\n being publicly viewable.\n\n If this argument is not specified, it will be assumed that all\n users' profiles are publicly viewable.\n\n ``template_name``\n The name of the template to use for displaying the profile. If\n not specified, this will default to\n :template:`profiles/profile_detail.html`.\n\n **Context:**\n\n ``profile``\n The user's profile, or ``None`` if the user's profile is not\n publicly viewable (see the description of\n ``public_profile_field`` above).\n\n **Template:**\n\n ``template_name`` keyword argument or\n :template:`profiles/profile_detail.html`.\n\n \"\"\"\n user = get_object_or_404(User, username=username)\n try:\n profile_obj = user.profile\n except ObjectDoesNotExist:\n raise Http404\n if public_profile_field is not None and \\\n not getattr(profile_obj, public_profile_field):\n profile_obj = None\n\n if extra_context is None:\n extra_context = {}\n context = RequestContext(request)\n for key, value in list(extra_context.items()):\n context[key] = (value() if callable(value) else value)\n\n return render_to_response(template_name,\n {'profile': profile_obj},\n context_instance=context)\n", "path": "readthedocs/profiles/views.py"}, {"content": "\"\"\"Signal handling for core app.\"\"\"\n\nfrom __future__ import absolute_import\n\nimport logging\n\nfrom corsheaders import signals\nfrom django.dispatch import Signal\nfrom django.db.models import Q\nfrom future.backports.urllib.parse import urlparse\n\nfrom readthedocs.projects.models import Project, Domain\n\n\nlog = logging.getLogger(__name__)\n\nWHITELIST_URLS = ['/api/v2/footer_html', '/api/v2/search', '/api/v2/docsearch']\n\n\nwebhook_github = Signal(providing_args=['project', 'data', 'event'])\nwebhook_gitlab = Signal(providing_args=['project', 'data', 'event'])\nwebhook_bitbucket = Signal(providing_args=['project', 'data', 'event'])\n\n\ndef decide_if_cors(sender, request, **kwargs): # pylint: disable=unused-argument\n \"\"\"\n Decide whether a request should be given CORS access.\n\n This checks that:\n * The URL is whitelisted against our CORS-allowed domains\n * The Domain exists in our database, and belongs to the project being queried.\n\n Returns True when a request should be given CORS access.\n \"\"\"\n if 'HTTP_ORIGIN' not in request.META:\n return False\n host = urlparse(request.META['HTTP_ORIGIN']).netloc.split(':')[0]\n valid_url = False\n for url in WHITELIST_URLS:\n if request.path_info.startswith(url):\n valid_url = True\n\n if valid_url:\n project_slug = request.GET.get('project', None)\n try:\n project = Project.objects.get(slug=project_slug)\n except Project.DoesNotExist:\n log.warning(\n 'Invalid project passed to domain. [{project}:{domain}'.format(\n project=project_slug,\n domain=host,\n )\n )\n return False\n\n domain = Domain.objects.filter(\n Q(domain__icontains=host),\n Q(project=project) | Q(project__subprojects__child=project)\n )\n if domain.exists():\n return True\n\n return False\n\nsignals.check_request_enabled.connect(decide_if_cors)\n", "path": "readthedocs/core/signals.py"}]}
4,022
528
gh_patches_debug_49285
rasdani/github-patches
git_diff
ansible__awx-12242
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Websocket not working at non-root path ### Please confirm the following - [X] I agree to follow this project's [code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html). - [X] I have checked the [current issues](https://github.com/ansible/awx/issues) for duplicates. - [X] I understand that AWX is open source software provided for free and that I might not receive a timely response. ### Summary Changes from #11342 and #652 are not full ### AWX version 21.0.0 ### Select the relevant components - [X] UI - [ ] API - [ ] Docs ### Installation method kubernetes ### Modifications no ### Ansible version _No response_ ### Operating system _No response_ ### Web browser _No response_ ### Steps to reproduce Deploy AWX with custom `ingress_path: /awx` ### Expected results websocket should work ### Actual results `2022-05-17 08:46:41,031 ERROR [-] daphne.ws_protocol [Failure instance: Traceback: <class 'ValueError'>: No route found for path 'awx/websocket/'. /var/lib/awx/venv/awx/lib64/python3.9/site-packages/autobahn/websocket/protocol.py:2841:processHandshake /var/lib/awx/venv/awx/lib64/python3.9/site-packages/txaio/tx.py:366:as_future /var/lib/awx/venv/awx/lib64/python3.9/site-packages/twisted/internet/defer.py:151:maybeDeferred /var/lib/awx/venv/awx/lib64/python3.9/site-packages/daphne/ws_protocol.py:72:onConnect --- <exception caught here> --- /var/lib/awx/venv/awx/lib64/python3.9/site-packages/twisted/internet/defer.py:151:maybeDeferred /var/lib/awx/venv/awx/lib64/python3.9/site-packages/daphne/server.py:201:create_application /var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/routing.py:54:__call__ /var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/sessions.py:47:__call__ /var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/sessions.py:145:__call__ /var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/sessions.py:169:__init__ /var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/middleware.py:31:__call__ /var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/routing.py:150:__call__ ] 2022-05-17 08:46:41,031 ERROR [Failure instance: Traceback: <class 'ValueError'>: No route found for path 'awx/websocket/'. /var/lib/awx/venv/awx/lib64/python3.9/site-packages/autobahn/websocket/protocol.py:2841:processHandshake /var/lib/awx/venv/awx/lib64/python3.9/site-packages/txaio/tx.py:366:as_future /var/lib/awx/venv/awx/lib64/python3.9/site-packages/twisted/internet/defer.py:151:maybeDeferred /var/lib/awx/venv/awx/lib64/python3.9/site-packages/daphne/ws_protocol.py:72:onConnect --- <exception caught here> --- /var/lib/awx/venv/awx/lib64/python3.9/site-packages/twisted/internet/defer.py:151:maybeDeferred /var/lib/awx/venv/awx/lib64/python3.9/site-packages/daphne/server.py:201:create_application /var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/routing.py:54:__call__ /var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/sessions.py:47:__call__ /var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/sessions.py:145:__call__ /var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/sessions.py:169:__init__ /var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/middleware.py:31:__call__ /var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/routing.py:150:__call__` ### Additional information It seems that issue is in https://github.com/ansible/awx/blob/48b016802c517ff04d1cff4c43e64f17bb77a7a8/awx/main/routing.py ``` websocket_urlpatterns = [ re_path(r'websocket/$', consumers.EventConsumer), re_path(r'websocket/broadcast/$', consumers.BroadcastConsumer), ] ``` From https://docs.djangoproject.com/en/4.0/ref/urls/: When a route ends with $ the whole requested URL, matching against path_info, must match the regular expression pattern (re.fullmatch() is used). Replacing with ``` websocket_urlpatterns = [ re_path(r'websocket/', consumers.EventConsumer), re_path(r'websocket/broadcast/', consumers.BroadcastConsumer), ] ``` solves the issue </issue> <code> [start of awx/main/routing.py] 1 import redis 2 import logging 3 4 from django.conf import settings 5 from django.urls import re_path 6 7 from channels.auth import AuthMiddlewareStack 8 from channels.routing import ProtocolTypeRouter, URLRouter 9 10 from . import consumers 11 12 13 logger = logging.getLogger('awx.main.routing') 14 15 16 class AWXProtocolTypeRouter(ProtocolTypeRouter): 17 def __init__(self, *args, **kwargs): 18 try: 19 r = redis.Redis.from_url(settings.BROKER_URL) 20 for k in r.scan_iter('asgi:*', 500): 21 logger.debug(f"cleaning up Redis key {k}") 22 r.delete(k) 23 except redis.exceptions.RedisError as e: 24 logger.warning("encountered an error communicating with redis.") 25 raise e 26 super().__init__(*args, **kwargs) 27 28 29 websocket_urlpatterns = [ 30 re_path(r'websocket/$', consumers.EventConsumer.as_asgi()), 31 re_path(r'websocket/broadcast/$', consumers.BroadcastConsumer.as_asgi()), 32 ] 33 34 application = AWXProtocolTypeRouter( 35 { 36 'websocket': AuthMiddlewareStack(URLRouter(websocket_urlpatterns)), 37 } 38 ) 39 [end of awx/main/routing.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/awx/main/routing.py b/awx/main/routing.py --- a/awx/main/routing.py +++ b/awx/main/routing.py @@ -27,8 +27,8 @@ websocket_urlpatterns = [ - re_path(r'websocket/$', consumers.EventConsumer.as_asgi()), - re_path(r'websocket/broadcast/$', consumers.BroadcastConsumer.as_asgi()), + re_path(r'websocket/', consumers.EventConsumer.as_asgi()), + re_path(r'websocket/broadcast/', consumers.BroadcastConsumer.as_asgi()), ] application = AWXProtocolTypeRouter(
{"golden_diff": "diff --git a/awx/main/routing.py b/awx/main/routing.py\n--- a/awx/main/routing.py\n+++ b/awx/main/routing.py\n@@ -27,8 +27,8 @@\n \n \n websocket_urlpatterns = [\n- re_path(r'websocket/$', consumers.EventConsumer.as_asgi()),\n- re_path(r'websocket/broadcast/$', consumers.BroadcastConsumer.as_asgi()),\n+ re_path(r'websocket/', consumers.EventConsumer.as_asgi()),\n+ re_path(r'websocket/broadcast/', consumers.BroadcastConsumer.as_asgi()),\n ]\n \n application = AWXProtocolTypeRouter(\n", "issue": "Websocket not working at non-root path\n### Please confirm the following\n\n- [X] I agree to follow this project's [code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html).\n- [X] I have checked the [current issues](https://github.com/ansible/awx/issues) for duplicates.\n- [X] I understand that AWX is open source software provided for free and that I might not receive a timely response.\n\n### Summary\n\nChanges from #11342 and #652 are not full\n\n### AWX version\n\n21.0.0\n\n### Select the relevant components\n\n- [X] UI\n- [ ] API\n- [ ] Docs\n\n### Installation method\n\nkubernetes\n\n### Modifications\n\nno\n\n### Ansible version\n\n_No response_\n\n### Operating system\n\n_No response_\n\n### Web browser\n\n_No response_\n\n### Steps to reproduce\n\nDeploy AWX with custom `ingress_path: /awx`\n\n### Expected results\n\nwebsocket should work\n\n### Actual results\n\n`2022-05-17 08:46:41,031 ERROR [-] daphne.ws_protocol [Failure instance: Traceback: <class 'ValueError'>: No route found for path 'awx/websocket/'.\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/autobahn/websocket/protocol.py:2841:processHandshake\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/txaio/tx.py:366:as_future\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/twisted/internet/defer.py:151:maybeDeferred\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/daphne/ws_protocol.py:72:onConnect\r\n--- <exception caught here> ---\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/twisted/internet/defer.py:151:maybeDeferred\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/daphne/server.py:201:create_application\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/routing.py:54:__call__\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/sessions.py:47:__call__\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/sessions.py:145:__call__\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/sessions.py:169:__init__\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/middleware.py:31:__call__\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/routing.py:150:__call__\r\n]\r\n2022-05-17 08:46:41,031 ERROR [Failure instance: Traceback: <class 'ValueError'>: No route found for path 'awx/websocket/'.\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/autobahn/websocket/protocol.py:2841:processHandshake\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/txaio/tx.py:366:as_future\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/twisted/internet/defer.py:151:maybeDeferred\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/daphne/ws_protocol.py:72:onConnect\r\n--- <exception caught here> ---\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/twisted/internet/defer.py:151:maybeDeferred\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/daphne/server.py:201:create_application\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/routing.py:54:__call__\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/sessions.py:47:__call__\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/sessions.py:145:__call__\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/sessions.py:169:__init__\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/middleware.py:31:__call__\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/routing.py:150:__call__`\n\n### Additional information\n\nIt seems that issue is in https://github.com/ansible/awx/blob/48b016802c517ff04d1cff4c43e64f17bb77a7a8/awx/main/routing.py\r\n\r\n```\r\nwebsocket_urlpatterns = [\r\n re_path(r'websocket/$', consumers.EventConsumer),\r\n re_path(r'websocket/broadcast/$', consumers.BroadcastConsumer),\r\n]\r\n```\r\nFrom https://docs.djangoproject.com/en/4.0/ref/urls/:\r\n\r\nWhen a route ends with $ the whole requested URL, matching against path_info, must match the regular expression pattern (re.fullmatch() is used).\r\n\r\nReplacing with\r\n\r\n```\r\nwebsocket_urlpatterns = [\r\n re_path(r'websocket/', consumers.EventConsumer),\r\n re_path(r'websocket/broadcast/', consumers.BroadcastConsumer),\r\n]\r\n```\r\n\r\nsolves the issue\n", "before_files": [{"content": "import redis\nimport logging\n\nfrom django.conf import settings\nfrom django.urls import re_path\n\nfrom channels.auth import AuthMiddlewareStack\nfrom channels.routing import ProtocolTypeRouter, URLRouter\n\nfrom . import consumers\n\n\nlogger = logging.getLogger('awx.main.routing')\n\n\nclass AWXProtocolTypeRouter(ProtocolTypeRouter):\n def __init__(self, *args, **kwargs):\n try:\n r = redis.Redis.from_url(settings.BROKER_URL)\n for k in r.scan_iter('asgi:*', 500):\n logger.debug(f\"cleaning up Redis key {k}\")\n r.delete(k)\n except redis.exceptions.RedisError as e:\n logger.warning(\"encountered an error communicating with redis.\")\n raise e\n super().__init__(*args, **kwargs)\n\n\nwebsocket_urlpatterns = [\n re_path(r'websocket/$', consumers.EventConsumer.as_asgi()),\n re_path(r'websocket/broadcast/$', consumers.BroadcastConsumer.as_asgi()),\n]\n\napplication = AWXProtocolTypeRouter(\n {\n 'websocket': AuthMiddlewareStack(URLRouter(websocket_urlpatterns)),\n }\n)\n", "path": "awx/main/routing.py"}]}
2,188
132
gh_patches_debug_18711
rasdani/github-patches
git_diff
feast-dev__feast-1766
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add validation for project name **Is your feature request related to a problem? Please describe.** A follow up to https://github.com/feast-dev/feast/pull/1752 - I think project name should definitely be validated early on. Otherwise could run into an error message (https://github.com/feast-dev/feast/pull/1752#issue-700598472) that is not obviously related to the project name. **Describe the solution you'd like** Validation upon feast apply that checks the project name. Can use the name validation function [here](https://github.com/feast-dev/feast/blob/a548c48927e6f6858d91a93cf356b43fe7c67aad/sdk/python/feast/repo_operations.py#L390). **Describe alternatives you've considered** There's probably more validation coverage to be done, and it could be done at a later time if there are higher priorities at the moment. **Additional context** @tedhtchang to take this simple addition on per https://github.com/feast-dev/feast/pull/1752#issuecomment-891339661 </issue> <code> [start of sdk/python/feast/repo_config.py] 1 from pathlib import Path 2 from typing import Any 3 4 import yaml 5 from pydantic import BaseModel, StrictInt, StrictStr, ValidationError, root_validator 6 from pydantic.error_wrappers import ErrorWrapper 7 from pydantic.typing import Dict, Optional, Union 8 9 from feast.importer import get_class_from_type 10 from feast.usage import log_exceptions 11 12 # These dict exists so that: 13 # - existing values for the online store type in featurestore.yaml files continue to work in a backwards compatible way 14 # - first party and third party implementations can use the same class loading code path. 15 ONLINE_STORE_CLASS_FOR_TYPE = { 16 "sqlite": "feast.infra.online_stores.sqlite.SqliteOnlineStore", 17 "datastore": "feast.infra.online_stores.datastore.DatastoreOnlineStore", 18 "redis": "feast.infra.online_stores.redis.RedisOnlineStore", 19 "dynamodb": "feast.infra.online_stores.dynamodb.DynamoDBOnlineStore", 20 } 21 22 OFFLINE_STORE_CLASS_FOR_TYPE = { 23 "file": "feast.infra.offline_stores.file.FileOfflineStore", 24 "bigquery": "feast.infra.offline_stores.bigquery.BigQueryOfflineStore", 25 "redshift": "feast.infra.offline_stores.redshift.RedshiftOfflineStore", 26 } 27 28 29 class FeastBaseModel(BaseModel): 30 """ Feast Pydantic Configuration Class """ 31 32 class Config: 33 arbitrary_types_allowed = True 34 extra = "allow" 35 36 37 class FeastConfigBaseModel(BaseModel): 38 """ Feast Pydantic Configuration Class """ 39 40 class Config: 41 arbitrary_types_allowed = True 42 extra = "forbid" 43 44 45 class RegistryConfig(FeastBaseModel): 46 """ Metadata Store Configuration. Configuration that relates to reading from and writing to the Feast registry.""" 47 48 path: StrictStr 49 """ str: Path to metadata store. Can be a local path, or remote object storage path, e.g. a GCS URI """ 50 51 cache_ttl_seconds: StrictInt = 600 52 """int: The cache TTL is the amount of time registry state will be cached in memory. If this TTL is exceeded then 53 the registry will be refreshed when any feature store method asks for access to registry state. The TTL can be 54 set to infinity by setting TTL to 0 seconds, which means the cache will only be loaded once and will never 55 expire. Users can manually refresh the cache by calling feature_store.refresh_registry() """ 56 57 58 class RepoConfig(FeastBaseModel): 59 """ Repo config. Typically loaded from `feature_store.yaml` """ 60 61 registry: Union[StrictStr, RegistryConfig] = "data/registry.db" 62 """ str: Path to metadata store. Can be a local path, or remote object storage path, e.g. a GCS URI """ 63 64 project: StrictStr 65 """ str: Feast project id. This can be any alphanumeric string up to 16 characters. 66 You can have multiple independent feature repositories deployed to the same cloud 67 provider account, as long as they have different project ids. 68 """ 69 70 provider: StrictStr 71 """ str: local or gcp or aws """ 72 73 online_store: Any 74 """ OnlineStoreConfig: Online store configuration (optional depending on provider) """ 75 76 offline_store: Any 77 """ OfflineStoreConfig: Offline store configuration (optional depending on provider) """ 78 79 repo_path: Optional[Path] = None 80 81 def __init__(self, **data: Any): 82 super().__init__(**data) 83 84 if isinstance(self.online_store, Dict): 85 self.online_store = get_online_config_from_type(self.online_store["type"])( 86 **self.online_store 87 ) 88 elif isinstance(self.online_store, str): 89 self.online_store = get_online_config_from_type(self.online_store)() 90 91 if isinstance(self.offline_store, Dict): 92 self.offline_store = get_offline_config_from_type( 93 self.offline_store["type"] 94 )(**self.offline_store) 95 elif isinstance(self.offline_store, str): 96 self.offline_store = get_offline_config_from_type(self.offline_store)() 97 98 def get_registry_config(self): 99 if isinstance(self.registry, str): 100 return RegistryConfig(path=self.registry) 101 else: 102 return self.registry 103 104 @root_validator(pre=True) 105 @log_exceptions 106 def _validate_online_store_config(cls, values): 107 # This method will validate whether the online store configurations are set correctly. This explicit validation 108 # is necessary because Pydantic Unions throw very verbose and cryptic exceptions. We also use this method to 109 # impute the default online store type based on the selected provider. For the time being this method should be 110 # considered tech debt until we can implement https://github.com/samuelcolvin/pydantic/issues/619 or a more 111 # granular configuration system 112 113 # Set empty online_store config if it isn't set explicitly 114 if "online_store" not in values: 115 values["online_store"] = dict() 116 117 # Skip if we aren't creating the configuration from a dict 118 if not isinstance(values["online_store"], Dict): 119 return values 120 121 # Make sure that the provider configuration is set. We need it to set the defaults 122 assert "provider" in values 123 124 # Set the default type 125 # This is only direct reference to a provider or online store that we should have 126 # for backwards compatibility. 127 if "type" not in values["online_store"]: 128 if values["provider"] == "local": 129 values["online_store"]["type"] = "sqlite" 130 elif values["provider"] == "gcp": 131 values["online_store"]["type"] = "datastore" 132 elif values["provider"] == "aws": 133 values["online_store"]["type"] = "dynamodb" 134 135 online_store_type = values["online_store"]["type"] 136 137 # Validate the dict to ensure one of the union types match 138 try: 139 online_config_class = get_online_config_from_type(online_store_type) 140 online_config_class(**values["online_store"]) 141 except ValidationError as e: 142 raise ValidationError( 143 [ErrorWrapper(e, loc="online_store")], model=RepoConfig, 144 ) 145 146 return values 147 148 @root_validator(pre=True) 149 def _validate_offline_store_config(cls, values): 150 # Set empty offline_store config if it isn't set explicitly 151 if "offline_store" not in values: 152 values["offline_store"] = dict() 153 154 # Skip if we aren't creating the configuration from a dict 155 if not isinstance(values["offline_store"], Dict): 156 return values 157 158 # Make sure that the provider configuration is set. We need it to set the defaults 159 assert "provider" in values 160 161 # Set the default type 162 if "type" not in values["offline_store"]: 163 if values["provider"] == "local": 164 values["offline_store"]["type"] = "file" 165 elif values["provider"] == "gcp": 166 values["offline_store"]["type"] = "bigquery" 167 elif values["provider"] == "aws": 168 values["offline_store"]["type"] = "redshift" 169 170 offline_store_type = values["offline_store"]["type"] 171 172 # Validate the dict to ensure one of the union types match 173 try: 174 offline_config_class = get_offline_config_from_type(offline_store_type) 175 offline_config_class(**values["offline_store"]) 176 except ValidationError as e: 177 raise ValidationError( 178 [ErrorWrapper(e, loc="offline_store")], model=RepoConfig, 179 ) 180 181 return values 182 183 184 class FeastConfigError(Exception): 185 def __init__(self, error_message, config_path): 186 self._error_message = error_message 187 self._config_path = config_path 188 super().__init__(self._error_message) 189 190 def __str__(self) -> str: 191 return f"{self._error_message}\nat {self._config_path}" 192 193 def __repr__(self) -> str: 194 return ( 195 f"FeastConfigError({repr(self._error_message)}, {repr(self._config_path)})" 196 ) 197 198 199 def get_data_source_class_from_type(data_source_type: str): 200 module_name, config_class_name = data_source_type.rsplit(".", 1) 201 return get_class_from_type(module_name, config_class_name, "Source") 202 203 204 def get_online_config_from_type(online_store_type: str): 205 if online_store_type in ONLINE_STORE_CLASS_FOR_TYPE: 206 online_store_type = ONLINE_STORE_CLASS_FOR_TYPE[online_store_type] 207 else: 208 assert online_store_type.endswith("OnlineStore") 209 module_name, online_store_class_type = online_store_type.rsplit(".", 1) 210 config_class_name = f"{online_store_class_type}Config" 211 212 return get_class_from_type(module_name, config_class_name, config_class_name) 213 214 215 def get_offline_config_from_type(offline_store_type: str): 216 if offline_store_type in OFFLINE_STORE_CLASS_FOR_TYPE: 217 offline_store_type = OFFLINE_STORE_CLASS_FOR_TYPE[offline_store_type] 218 else: 219 assert offline_store_type.endswith("OfflineStore") 220 module_name, offline_store_class_type = offline_store_type.rsplit(".", 1) 221 config_class_name = f"{offline_store_class_type}Config" 222 223 return get_class_from_type(module_name, config_class_name, config_class_name) 224 225 226 def load_repo_config(repo_path: Path) -> RepoConfig: 227 config_path = repo_path / "feature_store.yaml" 228 229 with open(config_path) as f: 230 raw_config = yaml.safe_load(f) 231 try: 232 c = RepoConfig(**raw_config) 233 c.repo_path = repo_path 234 return c 235 except ValidationError as e: 236 raise FeastConfigError(e, config_path) 237 [end of sdk/python/feast/repo_config.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/sdk/python/feast/repo_config.py b/sdk/python/feast/repo_config.py --- a/sdk/python/feast/repo_config.py +++ b/sdk/python/feast/repo_config.py @@ -2,7 +2,14 @@ from typing import Any import yaml -from pydantic import BaseModel, StrictInt, StrictStr, ValidationError, root_validator +from pydantic import ( + BaseModel, + StrictInt, + StrictStr, + ValidationError, + root_validator, + validator, +) from pydantic.error_wrappers import ErrorWrapper from pydantic.typing import Dict, Optional, Union @@ -180,6 +187,17 @@ return values + @validator("project") + def _validate_project_name(cls, v): + from feast.repo_operations import is_valid_name + + if not is_valid_name(v): + raise ValueError( + f"Project name, {v}, should only have " + f"alphanumerical values and underscores but not start with an underscore." + ) + return v + class FeastConfigError(Exception): def __init__(self, error_message, config_path):
{"golden_diff": "diff --git a/sdk/python/feast/repo_config.py b/sdk/python/feast/repo_config.py\n--- a/sdk/python/feast/repo_config.py\n+++ b/sdk/python/feast/repo_config.py\n@@ -2,7 +2,14 @@\n from typing import Any\n \n import yaml\n-from pydantic import BaseModel, StrictInt, StrictStr, ValidationError, root_validator\n+from pydantic import (\n+ BaseModel,\n+ StrictInt,\n+ StrictStr,\n+ ValidationError,\n+ root_validator,\n+ validator,\n+)\n from pydantic.error_wrappers import ErrorWrapper\n from pydantic.typing import Dict, Optional, Union\n \n@@ -180,6 +187,17 @@\n \n return values\n \n+ @validator(\"project\")\n+ def _validate_project_name(cls, v):\n+ from feast.repo_operations import is_valid_name\n+\n+ if not is_valid_name(v):\n+ raise ValueError(\n+ f\"Project name, {v}, should only have \"\n+ f\"alphanumerical values and underscores but not start with an underscore.\"\n+ )\n+ return v\n+\n \n class FeastConfigError(Exception):\n def __init__(self, error_message, config_path):\n", "issue": "Add validation for project name\n**Is your feature request related to a problem? Please describe.**\r\nA follow up to https://github.com/feast-dev/feast/pull/1752 - I think project name should definitely be validated early on. Otherwise could run into an error message (https://github.com/feast-dev/feast/pull/1752#issue-700598472) that is not obviously related to the project name.\r\n\r\n**Describe the solution you'd like**\r\nValidation upon feast apply that checks the project name. Can use the name validation function [here](https://github.com/feast-dev/feast/blob/a548c48927e6f6858d91a93cf356b43fe7c67aad/sdk/python/feast/repo_operations.py#L390).\r\n\r\n**Describe alternatives you've considered**\r\nThere's probably more validation coverage to be done, and it could be done at a later time if there are higher priorities at the moment.\r\n\r\n**Additional context**\r\n@tedhtchang to take this simple addition on per https://github.com/feast-dev/feast/pull/1752#issuecomment-891339661\r\n\n", "before_files": [{"content": "from pathlib import Path\nfrom typing import Any\n\nimport yaml\nfrom pydantic import BaseModel, StrictInt, StrictStr, ValidationError, root_validator\nfrom pydantic.error_wrappers import ErrorWrapper\nfrom pydantic.typing import Dict, Optional, Union\n\nfrom feast.importer import get_class_from_type\nfrom feast.usage import log_exceptions\n\n# These dict exists so that:\n# - existing values for the online store type in featurestore.yaml files continue to work in a backwards compatible way\n# - first party and third party implementations can use the same class loading code path.\nONLINE_STORE_CLASS_FOR_TYPE = {\n \"sqlite\": \"feast.infra.online_stores.sqlite.SqliteOnlineStore\",\n \"datastore\": \"feast.infra.online_stores.datastore.DatastoreOnlineStore\",\n \"redis\": \"feast.infra.online_stores.redis.RedisOnlineStore\",\n \"dynamodb\": \"feast.infra.online_stores.dynamodb.DynamoDBOnlineStore\",\n}\n\nOFFLINE_STORE_CLASS_FOR_TYPE = {\n \"file\": \"feast.infra.offline_stores.file.FileOfflineStore\",\n \"bigquery\": \"feast.infra.offline_stores.bigquery.BigQueryOfflineStore\",\n \"redshift\": \"feast.infra.offline_stores.redshift.RedshiftOfflineStore\",\n}\n\n\nclass FeastBaseModel(BaseModel):\n \"\"\" Feast Pydantic Configuration Class \"\"\"\n\n class Config:\n arbitrary_types_allowed = True\n extra = \"allow\"\n\n\nclass FeastConfigBaseModel(BaseModel):\n \"\"\" Feast Pydantic Configuration Class \"\"\"\n\n class Config:\n arbitrary_types_allowed = True\n extra = \"forbid\"\n\n\nclass RegistryConfig(FeastBaseModel):\n \"\"\" Metadata Store Configuration. Configuration that relates to reading from and writing to the Feast registry.\"\"\"\n\n path: StrictStr\n \"\"\" str: Path to metadata store. Can be a local path, or remote object storage path, e.g. a GCS URI \"\"\"\n\n cache_ttl_seconds: StrictInt = 600\n \"\"\"int: The cache TTL is the amount of time registry state will be cached in memory. If this TTL is exceeded then\n the registry will be refreshed when any feature store method asks for access to registry state. The TTL can be\n set to infinity by setting TTL to 0 seconds, which means the cache will only be loaded once and will never\n expire. Users can manually refresh the cache by calling feature_store.refresh_registry() \"\"\"\n\n\nclass RepoConfig(FeastBaseModel):\n \"\"\" Repo config. Typically loaded from `feature_store.yaml` \"\"\"\n\n registry: Union[StrictStr, RegistryConfig] = \"data/registry.db\"\n \"\"\" str: Path to metadata store. Can be a local path, or remote object storage path, e.g. a GCS URI \"\"\"\n\n project: StrictStr\n \"\"\" str: Feast project id. This can be any alphanumeric string up to 16 characters.\n You can have multiple independent feature repositories deployed to the same cloud\n provider account, as long as they have different project ids.\n \"\"\"\n\n provider: StrictStr\n \"\"\" str: local or gcp or aws \"\"\"\n\n online_store: Any\n \"\"\" OnlineStoreConfig: Online store configuration (optional depending on provider) \"\"\"\n\n offline_store: Any\n \"\"\" OfflineStoreConfig: Offline store configuration (optional depending on provider) \"\"\"\n\n repo_path: Optional[Path] = None\n\n def __init__(self, **data: Any):\n super().__init__(**data)\n\n if isinstance(self.online_store, Dict):\n self.online_store = get_online_config_from_type(self.online_store[\"type\"])(\n **self.online_store\n )\n elif isinstance(self.online_store, str):\n self.online_store = get_online_config_from_type(self.online_store)()\n\n if isinstance(self.offline_store, Dict):\n self.offline_store = get_offline_config_from_type(\n self.offline_store[\"type\"]\n )(**self.offline_store)\n elif isinstance(self.offline_store, str):\n self.offline_store = get_offline_config_from_type(self.offline_store)()\n\n def get_registry_config(self):\n if isinstance(self.registry, str):\n return RegistryConfig(path=self.registry)\n else:\n return self.registry\n\n @root_validator(pre=True)\n @log_exceptions\n def _validate_online_store_config(cls, values):\n # This method will validate whether the online store configurations are set correctly. This explicit validation\n # is necessary because Pydantic Unions throw very verbose and cryptic exceptions. We also use this method to\n # impute the default online store type based on the selected provider. For the time being this method should be\n # considered tech debt until we can implement https://github.com/samuelcolvin/pydantic/issues/619 or a more\n # granular configuration system\n\n # Set empty online_store config if it isn't set explicitly\n if \"online_store\" not in values:\n values[\"online_store\"] = dict()\n\n # Skip if we aren't creating the configuration from a dict\n if not isinstance(values[\"online_store\"], Dict):\n return values\n\n # Make sure that the provider configuration is set. We need it to set the defaults\n assert \"provider\" in values\n\n # Set the default type\n # This is only direct reference to a provider or online store that we should have\n # for backwards compatibility.\n if \"type\" not in values[\"online_store\"]:\n if values[\"provider\"] == \"local\":\n values[\"online_store\"][\"type\"] = \"sqlite\"\n elif values[\"provider\"] == \"gcp\":\n values[\"online_store\"][\"type\"] = \"datastore\"\n elif values[\"provider\"] == \"aws\":\n values[\"online_store\"][\"type\"] = \"dynamodb\"\n\n online_store_type = values[\"online_store\"][\"type\"]\n\n # Validate the dict to ensure one of the union types match\n try:\n online_config_class = get_online_config_from_type(online_store_type)\n online_config_class(**values[\"online_store\"])\n except ValidationError as e:\n raise ValidationError(\n [ErrorWrapper(e, loc=\"online_store\")], model=RepoConfig,\n )\n\n return values\n\n @root_validator(pre=True)\n def _validate_offline_store_config(cls, values):\n # Set empty offline_store config if it isn't set explicitly\n if \"offline_store\" not in values:\n values[\"offline_store\"] = dict()\n\n # Skip if we aren't creating the configuration from a dict\n if not isinstance(values[\"offline_store\"], Dict):\n return values\n\n # Make sure that the provider configuration is set. We need it to set the defaults\n assert \"provider\" in values\n\n # Set the default type\n if \"type\" not in values[\"offline_store\"]:\n if values[\"provider\"] == \"local\":\n values[\"offline_store\"][\"type\"] = \"file\"\n elif values[\"provider\"] == \"gcp\":\n values[\"offline_store\"][\"type\"] = \"bigquery\"\n elif values[\"provider\"] == \"aws\":\n values[\"offline_store\"][\"type\"] = \"redshift\"\n\n offline_store_type = values[\"offline_store\"][\"type\"]\n\n # Validate the dict to ensure one of the union types match\n try:\n offline_config_class = get_offline_config_from_type(offline_store_type)\n offline_config_class(**values[\"offline_store\"])\n except ValidationError as e:\n raise ValidationError(\n [ErrorWrapper(e, loc=\"offline_store\")], model=RepoConfig,\n )\n\n return values\n\n\nclass FeastConfigError(Exception):\n def __init__(self, error_message, config_path):\n self._error_message = error_message\n self._config_path = config_path\n super().__init__(self._error_message)\n\n def __str__(self) -> str:\n return f\"{self._error_message}\\nat {self._config_path}\"\n\n def __repr__(self) -> str:\n return (\n f\"FeastConfigError({repr(self._error_message)}, {repr(self._config_path)})\"\n )\n\n\ndef get_data_source_class_from_type(data_source_type: str):\n module_name, config_class_name = data_source_type.rsplit(\".\", 1)\n return get_class_from_type(module_name, config_class_name, \"Source\")\n\n\ndef get_online_config_from_type(online_store_type: str):\n if online_store_type in ONLINE_STORE_CLASS_FOR_TYPE:\n online_store_type = ONLINE_STORE_CLASS_FOR_TYPE[online_store_type]\n else:\n assert online_store_type.endswith(\"OnlineStore\")\n module_name, online_store_class_type = online_store_type.rsplit(\".\", 1)\n config_class_name = f\"{online_store_class_type}Config\"\n\n return get_class_from_type(module_name, config_class_name, config_class_name)\n\n\ndef get_offline_config_from_type(offline_store_type: str):\n if offline_store_type in OFFLINE_STORE_CLASS_FOR_TYPE:\n offline_store_type = OFFLINE_STORE_CLASS_FOR_TYPE[offline_store_type]\n else:\n assert offline_store_type.endswith(\"OfflineStore\")\n module_name, offline_store_class_type = offline_store_type.rsplit(\".\", 1)\n config_class_name = f\"{offline_store_class_type}Config\"\n\n return get_class_from_type(module_name, config_class_name, config_class_name)\n\n\ndef load_repo_config(repo_path: Path) -> RepoConfig:\n config_path = repo_path / \"feature_store.yaml\"\n\n with open(config_path) as f:\n raw_config = yaml.safe_load(f)\n try:\n c = RepoConfig(**raw_config)\n c.repo_path = repo_path\n return c\n except ValidationError as e:\n raise FeastConfigError(e, config_path)\n", "path": "sdk/python/feast/repo_config.py"}]}
3,507
270
gh_patches_debug_20926
rasdani/github-patches
git_diff
projectmesa__mesa-323
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> When server is launched, browser window should open automagically When guest lecturing at GMU, I observed that users did not read the instructions to visit 127.0.0.1... in their browser. I think jupyter spoiled them ;-). We should add functionality to have the browser auto launch when someone kicks off a model. </issue> <code> [start of mesa/visualization/ModularVisualization.py] 1 # -*- coding: utf-8 -*- 2 """ 3 ModularServer 4 ============= 5 6 A visualization server which renders a model via one or more elements. 7 8 The concept for the modular visualization server as follows: 9 A visualization is composed of VisualizationElements, each of which defines how 10 to generate some visualization from a model instance and render it on the 11 client. VisualizationElements may be anything from a simple text display to 12 a multilayered HTML5 canvas. 13 14 The actual server is launched with one or more VisualizationElements; 15 it runs the model object through each of them, generating data to be sent to 16 the client. The client page is also generated based on the JavaScript code 17 provided by each element. 18 19 This file consists of the following classes: 20 21 VisualizationElement: Parent class for all other visualization elements, with 22 the minimal necessary options. 23 PageHandler: The handler for the visualization page, generated from a template 24 and built from the various visualization elements. 25 SocketHandler: Handles the websocket connection between the client page and 26 the server. 27 ModularServer: The overall visualization application class which stores and 28 controls the model and visualization instance. 29 30 31 ModularServer should *not* need to be subclassed on a model-by-model basis; it 32 should be primarily a pass-through for VisualizationElement subclasses, which 33 define the actual visualization specifics. 34 35 For example, suppose we have created two visualization elements for our model, 36 called canvasvis and graphvis; we would launch a server with: 37 38 server = ModularServer(MyModel, [canvasvis, graphvis], name="My Model") 39 server.launch() 40 41 The client keeps track of what step it is showing. Clicking the Step button in 42 the browser sends a message requesting the viz_state corresponding to the next 43 step position, which is then sent back to the client via the websocket. 44 45 The websocket protocol is as follows: 46 Each message is a JSON object, with a "type" property which defines the rest of 47 the structure. 48 49 Server -> Client: 50 Send over the model state to visualize. 51 Model state is a list, with each element corresponding to a div; each div 52 is expected to have a render function associated with it, which knows how 53 to render that particular data. The example below includes two elements: 54 the first is data for a CanvasGrid, the second for a raw text display. 55 56 { 57 "type": "viz_state", 58 "data": [{0:[ {"Shape": "circle", "x": 0, "y": 0, "r": 0.5, 59 "Color": "#AAAAAA", "Filled": "true", "Layer": 0, 60 "text": 'A', "text_color": "white" }]}, 61 "Shape Count: 1"] 62 } 63 64 Informs the client that the model is over. 65 {"type": "end"} 66 67 Client -> Server: 68 Reset the model. 69 TODO: Allow this to come with parameters 70 { 71 "type": "reset" 72 } 73 74 Get a given state. 75 { 76 "type": "get_step", 77 "step:" index of the step to get. 78 } 79 80 """ 81 import os 82 83 import tornado.ioloop 84 import tornado.template 85 import tornado.web 86 import tornado.websocket 87 import tornado.escape 88 import tornado.gen 89 90 # Suppress several pylint warnings for this file. 91 # Attributes being defined outside of init is a Tornado feature. 92 # pylint: disable=attribute-defined-outside-init 93 94 95 class VisualizationElement: 96 """ 97 Defines an element of the visualization. 98 99 Attributes: 100 package_includes: A list of external JavaScript files to include that 101 are part of the Mesa packages. 102 local_includes: A list of JavaScript files that are local to the 103 directory that the server is being run in. 104 js_code: A JavaScript code string to instantiate the element. 105 106 Methods: 107 render: Takes a model object, and produces JSON data which can be sent 108 to the client. 109 110 """ 111 112 package_includes = [] 113 local_includes = [] 114 js_code = '' 115 render_args = {} 116 117 def __init__(self): 118 pass 119 120 def render(self, model): 121 """ Build visualization data from a model object. 122 123 Args: 124 model: A model object 125 126 Returns: 127 A JSON-ready object. 128 129 """ 130 return "<b>VisualizationElement goes here</b>." 131 132 # ============================================================================= 133 # Actual Tornado code starts here: 134 135 136 class PageHandler(tornado.web.RequestHandler): 137 """ Handler for the HTML template which holds the visualization. """ 138 139 def get(self): 140 elements = self.application.visualization_elements 141 for i, element in enumerate(elements): 142 element.index = i 143 self.render("modular_template.html", port=self.application.port, 144 model_name=self.application.model_name, 145 package_includes=self.application.package_includes, 146 local_includes=self.application.local_includes, 147 scripts=self.application.js_code) 148 149 150 class SocketHandler(tornado.websocket.WebSocketHandler): 151 """ Handler for websocket. """ 152 def open(self): 153 if self.application.verbose: 154 print("Socket opened!") 155 156 def check_origin(self, origin): 157 return True 158 159 def on_message(self, message): 160 """ Receiving a message from the websocket, parse, and act accordingly. 161 162 """ 163 if self.application.verbose: 164 print(message) 165 msg = tornado.escape.json_decode(message) 166 167 if msg["type"] == "get_step": 168 self.application.model.step() 169 self.write_message({"type": "viz_state", 170 "data": self.application.render_model()}) 171 172 elif msg["type"] == "reset": 173 self.application.reset_model() 174 self.write_message({"type": "viz_state", 175 "data": self.application.render_model()}) 176 177 else: 178 if self.application.verbose: 179 print("Unexpected message!") 180 181 182 class ModularServer(tornado.web.Application): 183 """ Main visualization application. """ 184 verbose = True 185 186 model_name = "Mesa Model" 187 model_cls = None # A model class 188 portrayal_method = None 189 port = 8888 # Default port to listen on 190 canvas_width = 500 191 canvas_height = 500 192 grid_height = 0 193 grid_width = 0 194 195 max_steps = 100000 196 197 model_args = () 198 model_kwargs = {} 199 200 # Handlers and other globals: 201 page_handler = (r'/', PageHandler) 202 socket_handler = (r'/ws', SocketHandler) 203 static_handler = (r'/static/(.*)', tornado.web.StaticFileHandler, 204 {"path": os.path.dirname(__file__) + "/templates"}) 205 local_handler = (r'/local/(.*)', tornado.web.StaticFileHandler, 206 {"path": ''}) 207 208 handlers = [page_handler, socket_handler, static_handler, local_handler] 209 210 settings = {"debug": True, 211 "template_path": os.path.dirname(__file__) + "/templates"} 212 213 def __init__(self, model_cls, visualization_elements, name="Mesa Model", 214 *args, **kwargs): 215 """ Create a new visualization server with the given elements. """ 216 # Prep visualization elements: 217 self.visualization_elements = visualization_elements 218 self.package_includes = set() 219 self.local_includes = set() 220 self.js_code = [] 221 for element in self.visualization_elements: 222 for include_file in element.package_includes: 223 self.package_includes.add(include_file) 224 for include_file in element.local_includes: 225 self.local_includes.add(include_file) 226 self.js_code.append(element.js_code) 227 228 # Initializing the model 229 self.model_name = name 230 self.model_cls = model_cls 231 232 self.model_args = args 233 self.model_kwargs = kwargs 234 self.reset_model() 235 236 # Initializing the application itself: 237 super().__init__(self.handlers, **self.settings) 238 239 def reset_model(self): 240 """ Reinstantiate the model object, using the current parameters. """ 241 self.model = self.model_cls(*self.model_args, **self.model_kwargs) 242 243 def render_model(self): 244 """ Turn the current state of the model into a dictionary of 245 visualizations 246 247 """ 248 visualization_state = [] 249 for element in self.visualization_elements: 250 element_state = element.render(self.model) 251 visualization_state.append(element_state) 252 return visualization_state 253 254 def launch(self, port=None): 255 """ Run the app. """ 256 if port is not None: 257 self.port = port 258 print('Interface starting at http://127.0.0.1:{PORT}'.format(PORT=self.port)) 259 self.listen(self.port) 260 tornado.ioloop.IOLoop.instance().start() 261 [end of mesa/visualization/ModularVisualization.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mesa/visualization/ModularVisualization.py b/mesa/visualization/ModularVisualization.py --- a/mesa/visualization/ModularVisualization.py +++ b/mesa/visualization/ModularVisualization.py @@ -81,12 +81,13 @@ import os import tornado.ioloop -import tornado.template import tornado.web import tornado.websocket import tornado.escape import tornado.gen +import webbrowser + # Suppress several pylint warnings for this file. # Attributes being defined outside of init is a Tornado feature. # pylint: disable=attribute-defined-outside-init @@ -255,6 +256,8 @@ """ Run the app. """ if port is not None: self.port = port - print('Interface starting at http://127.0.0.1:{PORT}'.format(PORT=self.port)) + url = 'http://127.0.0.1:{PORT}'.format(PORT=self.port) + print('Interface starting at {url}'.format(url=url)) self.listen(self.port) + webbrowser.open(url) tornado.ioloop.IOLoop.instance().start()
{"golden_diff": "diff --git a/mesa/visualization/ModularVisualization.py b/mesa/visualization/ModularVisualization.py\n--- a/mesa/visualization/ModularVisualization.py\n+++ b/mesa/visualization/ModularVisualization.py\n@@ -81,12 +81,13 @@\n import os\n \n import tornado.ioloop\n-import tornado.template\n import tornado.web\n import tornado.websocket\n import tornado.escape\n import tornado.gen\n \n+import webbrowser\n+\n # Suppress several pylint warnings for this file.\n # Attributes being defined outside of init is a Tornado feature.\n # pylint: disable=attribute-defined-outside-init\n@@ -255,6 +256,8 @@\n \"\"\" Run the app. \"\"\"\n if port is not None:\n self.port = port\n- print('Interface starting at http://127.0.0.1:{PORT}'.format(PORT=self.port))\n+ url = 'http://127.0.0.1:{PORT}'.format(PORT=self.port)\n+ print('Interface starting at {url}'.format(url=url))\n self.listen(self.port)\n+ webbrowser.open(url)\n tornado.ioloop.IOLoop.instance().start()\n", "issue": "When server is launched, browser window should open automagically \nWhen guest lecturing at GMU, I observed that users did not read the instructions to visit 127.0.0.1... in their browser. I think jupyter spoiled them ;-). We should add functionality to have the browser auto launch when someone kicks off a model.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nModularServer\n=============\n\nA visualization server which renders a model via one or more elements.\n\nThe concept for the modular visualization server as follows:\nA visualization is composed of VisualizationElements, each of which defines how\nto generate some visualization from a model instance and render it on the\nclient. VisualizationElements may be anything from a simple text display to\na multilayered HTML5 canvas.\n\nThe actual server is launched with one or more VisualizationElements;\nit runs the model object through each of them, generating data to be sent to\nthe client. The client page is also generated based on the JavaScript code\nprovided by each element.\n\nThis file consists of the following classes:\n\nVisualizationElement: Parent class for all other visualization elements, with\n the minimal necessary options.\nPageHandler: The handler for the visualization page, generated from a template\n and built from the various visualization elements.\nSocketHandler: Handles the websocket connection between the client page and\n the server.\nModularServer: The overall visualization application class which stores and\n controls the model and visualization instance.\n\n\nModularServer should *not* need to be subclassed on a model-by-model basis; it\nshould be primarily a pass-through for VisualizationElement subclasses, which\ndefine the actual visualization specifics.\n\nFor example, suppose we have created two visualization elements for our model,\ncalled canvasvis and graphvis; we would launch a server with:\n\n server = ModularServer(MyModel, [canvasvis, graphvis], name=\"My Model\")\n server.launch()\n\nThe client keeps track of what step it is showing. Clicking the Step button in\nthe browser sends a message requesting the viz_state corresponding to the next\nstep position, which is then sent back to the client via the websocket.\n\nThe websocket protocol is as follows:\nEach message is a JSON object, with a \"type\" property which defines the rest of\nthe structure.\n\nServer -> Client:\n Send over the model state to visualize.\n Model state is a list, with each element corresponding to a div; each div\n is expected to have a render function associated with it, which knows how\n to render that particular data. The example below includes two elements:\n the first is data for a CanvasGrid, the second for a raw text display.\n\n {\n \"type\": \"viz_state\",\n \"data\": [{0:[ {\"Shape\": \"circle\", \"x\": 0, \"y\": 0, \"r\": 0.5,\n \"Color\": \"#AAAAAA\", \"Filled\": \"true\", \"Layer\": 0,\n \"text\": 'A', \"text_color\": \"white\" }]},\n \"Shape Count: 1\"]\n }\n\n Informs the client that the model is over.\n {\"type\": \"end\"}\n\nClient -> Server:\n Reset the model.\n TODO: Allow this to come with parameters\n {\n \"type\": \"reset\"\n }\n\n Get a given state.\n {\n \"type\": \"get_step\",\n \"step:\" index of the step to get.\n }\n\n\"\"\"\nimport os\n\nimport tornado.ioloop\nimport tornado.template\nimport tornado.web\nimport tornado.websocket\nimport tornado.escape\nimport tornado.gen\n\n# Suppress several pylint warnings for this file.\n# Attributes being defined outside of init is a Tornado feature.\n# pylint: disable=attribute-defined-outside-init\n\n\nclass VisualizationElement:\n \"\"\"\n Defines an element of the visualization.\n\n Attributes:\n package_includes: A list of external JavaScript files to include that\n are part of the Mesa packages.\n local_includes: A list of JavaScript files that are local to the\n directory that the server is being run in.\n js_code: A JavaScript code string to instantiate the element.\n\n Methods:\n render: Takes a model object, and produces JSON data which can be sent\n to the client.\n\n \"\"\"\n\n package_includes = []\n local_includes = []\n js_code = ''\n render_args = {}\n\n def __init__(self):\n pass\n\n def render(self, model):\n \"\"\" Build visualization data from a model object.\n\n Args:\n model: A model object\n\n Returns:\n A JSON-ready object.\n\n \"\"\"\n return \"<b>VisualizationElement goes here</b>.\"\n\n# =============================================================================\n# Actual Tornado code starts here:\n\n\nclass PageHandler(tornado.web.RequestHandler):\n \"\"\" Handler for the HTML template which holds the visualization. \"\"\"\n\n def get(self):\n elements = self.application.visualization_elements\n for i, element in enumerate(elements):\n element.index = i\n self.render(\"modular_template.html\", port=self.application.port,\n model_name=self.application.model_name,\n package_includes=self.application.package_includes,\n local_includes=self.application.local_includes,\n scripts=self.application.js_code)\n\n\nclass SocketHandler(tornado.websocket.WebSocketHandler):\n \"\"\" Handler for websocket. \"\"\"\n def open(self):\n if self.application.verbose:\n print(\"Socket opened!\")\n\n def check_origin(self, origin):\n return True\n\n def on_message(self, message):\n \"\"\" Receiving a message from the websocket, parse, and act accordingly.\n\n \"\"\"\n if self.application.verbose:\n print(message)\n msg = tornado.escape.json_decode(message)\n\n if msg[\"type\"] == \"get_step\":\n self.application.model.step()\n self.write_message({\"type\": \"viz_state\",\n \"data\": self.application.render_model()})\n\n elif msg[\"type\"] == \"reset\":\n self.application.reset_model()\n self.write_message({\"type\": \"viz_state\",\n \"data\": self.application.render_model()})\n\n else:\n if self.application.verbose:\n print(\"Unexpected message!\")\n\n\nclass ModularServer(tornado.web.Application):\n \"\"\" Main visualization application. \"\"\"\n verbose = True\n\n model_name = \"Mesa Model\"\n model_cls = None # A model class\n portrayal_method = None\n port = 8888 # Default port to listen on\n canvas_width = 500\n canvas_height = 500\n grid_height = 0\n grid_width = 0\n\n max_steps = 100000\n\n model_args = ()\n model_kwargs = {}\n\n # Handlers and other globals:\n page_handler = (r'/', PageHandler)\n socket_handler = (r'/ws', SocketHandler)\n static_handler = (r'/static/(.*)', tornado.web.StaticFileHandler,\n {\"path\": os.path.dirname(__file__) + \"/templates\"})\n local_handler = (r'/local/(.*)', tornado.web.StaticFileHandler,\n {\"path\": ''})\n\n handlers = [page_handler, socket_handler, static_handler, local_handler]\n\n settings = {\"debug\": True,\n \"template_path\": os.path.dirname(__file__) + \"/templates\"}\n\n def __init__(self, model_cls, visualization_elements, name=\"Mesa Model\",\n *args, **kwargs):\n \"\"\" Create a new visualization server with the given elements. \"\"\"\n # Prep visualization elements:\n self.visualization_elements = visualization_elements\n self.package_includes = set()\n self.local_includes = set()\n self.js_code = []\n for element in self.visualization_elements:\n for include_file in element.package_includes:\n self.package_includes.add(include_file)\n for include_file in element.local_includes:\n self.local_includes.add(include_file)\n self.js_code.append(element.js_code)\n\n # Initializing the model\n self.model_name = name\n self.model_cls = model_cls\n\n self.model_args = args\n self.model_kwargs = kwargs\n self.reset_model()\n\n # Initializing the application itself:\n super().__init__(self.handlers, **self.settings)\n\n def reset_model(self):\n \"\"\" Reinstantiate the model object, using the current parameters. \"\"\"\n self.model = self.model_cls(*self.model_args, **self.model_kwargs)\n\n def render_model(self):\n \"\"\" Turn the current state of the model into a dictionary of\n visualizations\n\n \"\"\"\n visualization_state = []\n for element in self.visualization_elements:\n element_state = element.render(self.model)\n visualization_state.append(element_state)\n return visualization_state\n\n def launch(self, port=None):\n \"\"\" Run the app. \"\"\"\n if port is not None:\n self.port = port\n print('Interface starting at http://127.0.0.1:{PORT}'.format(PORT=self.port))\n self.listen(self.port)\n tornado.ioloop.IOLoop.instance().start()\n", "path": "mesa/visualization/ModularVisualization.py"}]}
3,119
253
gh_patches_debug_38158
rasdani/github-patches
git_diff
Flexget__Flexget-171
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Input plugin "imdb_list" currently failing to fetch lists behind authentication Message: `There was an error during imdb_list input (Unable to get imdb list: 404 Client Error: Not Found), using cache instead."` Same issue as, http://flexget.com/ticket/2313 but even with the most recent fix applied it still fails. </issue> <code> [start of flexget/plugins/input/imdb_list.py] 1 from __future__ import unicode_literals, division, absolute_import 2 import logging 3 import csv 4 import re 5 from cgi import parse_header 6 7 from flexget import plugin 8 from flexget.event import event 9 from flexget.utils import requests 10 from flexget.utils.imdb import make_url 11 from flexget.utils.cached_input import cached 12 from flexget.utils.tools import decode_html 13 from flexget.entry import Entry 14 from flexget.utils.soup import get_soup 15 16 log = logging.getLogger('imdb_list') 17 18 USER_ID_RE = r'^ur\d{7,8}$' 19 20 21 class ImdbList(object): 22 """"Creates an entry for each movie in your imdb list.""" 23 24 schema = { 25 'type': 'object', 26 'properties': { 27 'user_id': { 28 'type': 'string', 29 'pattern': USER_ID_RE, 30 'error_pattern': 'user_id must be in the form urXXXXXXX' 31 }, 32 'username': {'type': 'string'}, 33 'password': {'type': 'string'}, 34 'list': {'type': 'string'} 35 }, 36 'required': ['list'], 37 'additionalProperties': False 38 } 39 40 @cached('imdb_list', persist='2 hours') 41 def on_task_input(self, task, config): 42 sess = requests.Session() 43 if config.get('username') and config.get('password'): 44 45 log.verbose('Logging in ...') 46 47 # Log in to imdb with our handler 48 params = {'login': config['username'], 'password': config['password']} 49 try: 50 # First get the login page so we can get the hidden input value 51 soup = get_soup(sess.get('https://secure.imdb.com/register-imdb/login').content) 52 53 # Fix for bs4 bug. see #2313 and github#118 54 auxsoup = soup.find('div', id='nb20').next_sibling.next_sibling 55 tag = auxsoup.find('input', attrs={'name': '49e6c'}) 56 if tag: 57 params['49e6c'] = tag['value'] 58 else: 59 log.warning('Unable to find required info for imdb login, maybe their login method has changed.') 60 # Now we do the actual login with appropriate parameters 61 r = sess.post('https://secure.imdb.com/register-imdb/login', data=params, raise_status=False) 62 except requests.RequestException as e: 63 raise plugin.PluginError('Unable to login to imdb: %s' % e.message) 64 65 # IMDb redirects us upon a successful login. 66 # removed - doesn't happen always? 67 # if r.status_code != 302: 68 # log.warning('It appears logging in to IMDb was unsuccessful.') 69 70 # try to automatically figure out user_id from watchlist redirect url 71 if not 'user_id' in config: 72 log.verbose('Getting user_id ...') 73 try: 74 response = sess.get('http://www.imdb.com/list/watchlist') 75 except requests.RequestException as e: 76 log.error('Error retrieving user ID from imdb: %s' % e.message) 77 user_id = '' 78 else: 79 log.debug('redirected to %s' % response.url) 80 user_id = response.url.split('/')[-2] 81 if re.match(USER_ID_RE, user_id): 82 config['user_id'] = user_id 83 else: 84 raise plugin.PluginError('Couldn\'t figure out user_id, please configure it manually.') 85 86 if not 'user_id' in config: 87 raise plugin.PluginError('Configuration option `user_id` required.') 88 89 log.verbose('Retrieving list %s ...' % config['list']) 90 91 # Get the imdb list in csv format 92 try: 93 url = 'http://www.imdb.com/list/export' 94 params = {'list_id': config['list'], 'author_id': config['user_id']} 95 log.debug('Requesting %s' % url) 96 opener = sess.get(url, params=params) 97 mime_type = parse_header(opener.headers['content-type'])[0] 98 log.debug('mime_type: %s' % mime_type) 99 if mime_type != 'text/csv': 100 raise plugin.PluginError('Didn\'t get CSV export as response. Probably specified list `%s` ' 101 'does not exist.' % config['list']) 102 csv_rows = csv.reader(opener.iter_lines()) 103 except requests.RequestException as e: 104 raise plugin.PluginError('Unable to get imdb list: %s' % e.message) 105 106 # Create an Entry for each movie in the list 107 entries = [] 108 for row in csv_rows: 109 if not row or row[0] == 'position': 110 # Don't use blank rows or the headings row 111 continue 112 try: 113 title = decode_html(row[5]).decode('utf-8') 114 entries.append(Entry(title=title, url=make_url(row[1]), imdb_id=row[1], imdb_name=title)) 115 except IndexError: 116 log.critical('IndexError! Unable to handle row: %s' % row) 117 return entries 118 119 120 @event('plugin.register') 121 def register_plugin(): 122 plugin.register(ImdbList, 'imdb_list', api_ver=2) 123 [end of flexget/plugins/input/imdb_list.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/flexget/plugins/input/imdb_list.py b/flexget/plugins/input/imdb_list.py --- a/flexget/plugins/input/imdb_list.py +++ b/flexget/plugins/input/imdb_list.py @@ -1,13 +1,13 @@ from __future__ import unicode_literals, division, absolute_import import logging -import csv +import feedparser import re from cgi import parse_header from flexget import plugin from flexget.event import event from flexget.utils import requests -from flexget.utils.imdb import make_url +from flexget.utils.imdb import make_url, extract_id from flexget.utils.cached_input import cached from flexget.utils.tools import decode_html from flexget.entry import Entry @@ -88,32 +88,27 @@ log.verbose('Retrieving list %s ...' % config['list']) - # Get the imdb list in csv format + # Get the imdb list in RSS format try: - url = 'http://www.imdb.com/list/export' - params = {'list_id': config['list'], 'author_id': config['user_id']} + if config['list'] in ['watchlist', 'ratings', 'checkins']: + url = 'http://rss.imdb.com/user/%s/%s' % (config['user_id'], config['list']) + else: + url = 'http://rss.imdb.com/list/%s' % config['list'] log.debug('Requesting %s' % url) - opener = sess.get(url, params=params) - mime_type = parse_header(opener.headers['content-type'])[0] - log.debug('mime_type: %s' % mime_type) - if mime_type != 'text/csv': - raise plugin.PluginError('Didn\'t get CSV export as response. Probably specified list `%s` ' - 'does not exist.' % config['list']) - csv_rows = csv.reader(opener.iter_lines()) + try: + rss = feedparser.parse(url) + except LookupError as e: + raise plugin.PluginError('Failed to parse RSS feed for list `%s` correctly: %s' % (config['list'], e)) except requests.RequestException as e: raise plugin.PluginError('Unable to get imdb list: %s' % e.message) # Create an Entry for each movie in the list entries = [] - for row in csv_rows: - if not row or row[0] == 'position': - # Don't use blank rows or the headings row - continue + for entry in rss.entries: try: - title = decode_html(row[5]).decode('utf-8') - entries.append(Entry(title=title, url=make_url(row[1]), imdb_id=row[1], imdb_name=title)) + entries.append(Entry(title=entry.title, url=entry.link, imdb_id=extract_id(entry.link), imdb_name=entry.title)) except IndexError: - log.critical('IndexError! Unable to handle row: %s' % row) + log.critical('IndexError! Unable to handle RSS entry: %s' % entry) return entries
{"golden_diff": "diff --git a/flexget/plugins/input/imdb_list.py b/flexget/plugins/input/imdb_list.py\n--- a/flexget/plugins/input/imdb_list.py\n+++ b/flexget/plugins/input/imdb_list.py\n@@ -1,13 +1,13 @@\n from __future__ import unicode_literals, division, absolute_import\n import logging\n-import csv\n+import feedparser\n import re\n from cgi import parse_header\n \n from flexget import plugin\n from flexget.event import event\n from flexget.utils import requests\n-from flexget.utils.imdb import make_url\n+from flexget.utils.imdb import make_url, extract_id\n from flexget.utils.cached_input import cached\n from flexget.utils.tools import decode_html\n from flexget.entry import Entry\n@@ -88,32 +88,27 @@\n \n log.verbose('Retrieving list %s ...' % config['list'])\n \n- # Get the imdb list in csv format\n+ # Get the imdb list in RSS format\n try:\n- url = 'http://www.imdb.com/list/export'\n- params = {'list_id': config['list'], 'author_id': config['user_id']}\n+ if config['list'] in ['watchlist', 'ratings', 'checkins']:\n+ url = 'http://rss.imdb.com/user/%s/%s' % (config['user_id'], config['list'])\n+ else:\n+ url = 'http://rss.imdb.com/list/%s' % config['list']\n log.debug('Requesting %s' % url)\n- opener = sess.get(url, params=params)\n- mime_type = parse_header(opener.headers['content-type'])[0]\n- log.debug('mime_type: %s' % mime_type)\n- if mime_type != 'text/csv':\n- raise plugin.PluginError('Didn\\'t get CSV export as response. Probably specified list `%s` '\n- 'does not exist.' % config['list'])\n- csv_rows = csv.reader(opener.iter_lines())\n+ try:\n+ rss = feedparser.parse(url)\n+ except LookupError as e:\n+ raise plugin.PluginError('Failed to parse RSS feed for list `%s` correctly: %s' % (config['list'], e))\n except requests.RequestException as e:\n raise plugin.PluginError('Unable to get imdb list: %s' % e.message)\n \n # Create an Entry for each movie in the list\n entries = []\n- for row in csv_rows:\n- if not row or row[0] == 'position':\n- # Don't use blank rows or the headings row\n- continue\n+ for entry in rss.entries:\n try:\n- title = decode_html(row[5]).decode('utf-8')\n- entries.append(Entry(title=title, url=make_url(row[1]), imdb_id=row[1], imdb_name=title))\n+ entries.append(Entry(title=entry.title, url=entry.link, imdb_id=extract_id(entry.link), imdb_name=entry.title))\n except IndexError:\n- log.critical('IndexError! Unable to handle row: %s' % row)\n+ log.critical('IndexError! Unable to handle RSS entry: %s' % entry)\n return entries\n", "issue": "Input plugin \"imdb_list\" currently failing to fetch lists behind authentication\nMessage: `There was an error during imdb_list input (Unable to get imdb list: 404 Client Error: Not Found), using cache instead.\"`\n\nSame issue as, http://flexget.com/ticket/2313 but even with the most recent fix applied it still fails.\n\n", "before_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nimport logging\nimport csv\nimport re\nfrom cgi import parse_header\n\nfrom flexget import plugin\nfrom flexget.event import event\nfrom flexget.utils import requests\nfrom flexget.utils.imdb import make_url\nfrom flexget.utils.cached_input import cached\nfrom flexget.utils.tools import decode_html\nfrom flexget.entry import Entry\nfrom flexget.utils.soup import get_soup\n\nlog = logging.getLogger('imdb_list')\n\nUSER_ID_RE = r'^ur\\d{7,8}$'\n\n\nclass ImdbList(object):\n \"\"\"\"Creates an entry for each movie in your imdb list.\"\"\"\n\n schema = {\n 'type': 'object',\n 'properties': {\n 'user_id': {\n 'type': 'string',\n 'pattern': USER_ID_RE,\n 'error_pattern': 'user_id must be in the form urXXXXXXX'\n },\n 'username': {'type': 'string'},\n 'password': {'type': 'string'},\n 'list': {'type': 'string'}\n },\n 'required': ['list'],\n 'additionalProperties': False\n }\n\n @cached('imdb_list', persist='2 hours')\n def on_task_input(self, task, config):\n sess = requests.Session()\n if config.get('username') and config.get('password'):\n\n log.verbose('Logging in ...')\n\n # Log in to imdb with our handler\n params = {'login': config['username'], 'password': config['password']}\n try:\n # First get the login page so we can get the hidden input value\n soup = get_soup(sess.get('https://secure.imdb.com/register-imdb/login').content)\n\n # Fix for bs4 bug. see #2313 and github#118\n auxsoup = soup.find('div', id='nb20').next_sibling.next_sibling\n tag = auxsoup.find('input', attrs={'name': '49e6c'})\n if tag:\n params['49e6c'] = tag['value']\n else:\n log.warning('Unable to find required info for imdb login, maybe their login method has changed.')\n # Now we do the actual login with appropriate parameters\n r = sess.post('https://secure.imdb.com/register-imdb/login', data=params, raise_status=False)\n except requests.RequestException as e:\n raise plugin.PluginError('Unable to login to imdb: %s' % e.message)\n\n # IMDb redirects us upon a successful login.\n # removed - doesn't happen always?\n # if r.status_code != 302:\n # log.warning('It appears logging in to IMDb was unsuccessful.')\n\n # try to automatically figure out user_id from watchlist redirect url\n if not 'user_id' in config:\n log.verbose('Getting user_id ...')\n try:\n response = sess.get('http://www.imdb.com/list/watchlist')\n except requests.RequestException as e:\n log.error('Error retrieving user ID from imdb: %s' % e.message)\n user_id = ''\n else:\n log.debug('redirected to %s' % response.url)\n user_id = response.url.split('/')[-2]\n if re.match(USER_ID_RE, user_id):\n config['user_id'] = user_id\n else:\n raise plugin.PluginError('Couldn\\'t figure out user_id, please configure it manually.')\n\n if not 'user_id' in config:\n raise plugin.PluginError('Configuration option `user_id` required.')\n\n log.verbose('Retrieving list %s ...' % config['list'])\n\n # Get the imdb list in csv format\n try:\n url = 'http://www.imdb.com/list/export'\n params = {'list_id': config['list'], 'author_id': config['user_id']}\n log.debug('Requesting %s' % url)\n opener = sess.get(url, params=params)\n mime_type = parse_header(opener.headers['content-type'])[0]\n log.debug('mime_type: %s' % mime_type)\n if mime_type != 'text/csv':\n raise plugin.PluginError('Didn\\'t get CSV export as response. Probably specified list `%s` '\n 'does not exist.' % config['list'])\n csv_rows = csv.reader(opener.iter_lines())\n except requests.RequestException as e:\n raise plugin.PluginError('Unable to get imdb list: %s' % e.message)\n\n # Create an Entry for each movie in the list\n entries = []\n for row in csv_rows:\n if not row or row[0] == 'position':\n # Don't use blank rows or the headings row\n continue\n try:\n title = decode_html(row[5]).decode('utf-8')\n entries.append(Entry(title=title, url=make_url(row[1]), imdb_id=row[1], imdb_name=title))\n except IndexError:\n log.critical('IndexError! Unable to handle row: %s' % row)\n return entries\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(ImdbList, 'imdb_list', api_ver=2)\n", "path": "flexget/plugins/input/imdb_list.py"}]}
1,987
704
gh_patches_debug_18491
rasdani/github-patches
git_diff
pretalx__pretalx-464
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Setting speaker availibilty to "all day" for the last day of the conference triggers an error message of "Submitted availability is not within the event timeframe." ## Expected Behavior Setting the availibilty to "All Day" should be possible for all days of the conference, even if it ends at some point during this day. ## Current Behavior Setting the availibility to "All Day" on all days of the conference (in our case monday to sunday) triggers an error message of "Submitted availability is not within the event timeframe." ## Steps to Reproduce 1. Set the conference date to start at 2018-10-22 and end at 2018-10-28 2. Edit a speaker 3. Set the availability to all day for all conference days 4. On the bottom of the page, you the the error message "Submitted availability is not within the event timeframe." if needed, we can provide a speaker account on the instance for testing. ## Your Environment * Version used: 0.7.1 * Environment name and version (e.g. Chrome 39, python 3.5): Firefox 61 * Operating System and version (desktop or mobile): Linux Desktop (Debian Unstable) * Link to your instance, if in production: https://conference.c3w.at </issue> <code> [start of src/pretalx/schedule/forms.py] 1 import datetime 2 import json 3 4 import django.forms as forms 5 import pytz 6 from django.db import transaction 7 from django.utils.dateparse import parse_datetime 8 from django.utils.translation import ugettext_lazy as _ 9 from i18nfield.forms import I18nModelForm 10 11 from pretalx.common.mixins.forms import ReadOnlyFlag 12 from pretalx.schedule.models import Availability, Room 13 14 15 class AvailabilitiesFormMixin(forms.Form): 16 availabilities = forms.CharField( 17 label=_('Availability'), 18 help_text=_('Please click and drag to mark the availability during the conference.'), 19 widget=forms.TextInput(attrs={'class': 'availabilities-editor-data'}), 20 required=False, 21 ) 22 23 def _serialize(self, event, instance): 24 if instance: 25 availabilities = [ 26 avail.serialize() 27 for avail in instance.availabilities.all() 28 ] 29 else: 30 availabilities = [] 31 32 return json.dumps({ 33 'availabilities': availabilities, 34 'event': { 35 'timezone': event.timezone, 36 'date_from': str(event.date_from), 37 'date_to': str(event.date_to), 38 } 39 }) 40 41 def __init__(self, *args, event=None, **kwargs): 42 self.event = event 43 initial = kwargs.pop('initial', dict()) 44 initial['availabilities'] = self._serialize(self.event, kwargs['instance']) 45 kwargs['initial'] = initial 46 super().__init__(*args, **kwargs) 47 48 def _parse_availabilities_json(self, jsonavailabilities): 49 try: 50 rawdata = json.loads(jsonavailabilities) 51 except ValueError: 52 raise forms.ValidationError("Submitted availabilities are not valid json.") 53 54 try: 55 assert isinstance(rawdata, dict) 56 availabilities = rawdata['availabilities'] 57 assert isinstance(availabilities, list) 58 return availabilities 59 except (ValueError, AssertionError, LookupError): 60 raise forms.ValidationError("Submitted json does not comply with format.") 61 62 def _parse_datetime(self, strdate): 63 tz = pytz.timezone(self.event.timezone) 64 65 obj = parse_datetime(strdate) 66 assert obj 67 if obj.tzinfo is None: 68 obj = tz.localize(obj) 69 70 return obj 71 72 def _validate_availability(self, rawavail): 73 try: 74 assert isinstance(rawavail, dict) 75 rawavail.pop('id', None) 76 rawavail.pop('allDay', None) 77 assert len(rawavail) == 2 78 assert 'start' in rawavail 79 assert 'end' in rawavail 80 except AssertionError: 81 raise forms.ValidationError("Submitted availability does not comply with format.") 82 83 try: 84 rawavail['start'] = self._parse_datetime(rawavail['start']) 85 rawavail['end'] = self._parse_datetime(rawavail['end']) 86 except (AssertionError, TypeError, ValueError): 87 raise forms.ValidationError("Submitted availability contains an invalid date.") 88 89 tz = pytz.timezone(self.event.timezone) 90 91 try: 92 timeframe_start = tz.localize(datetime.datetime.combine(self.event.date_from, datetime.time())) 93 assert rawavail['start'] >= timeframe_start 94 timeframe_end = tz.localize(datetime.datetime.combine(self.event.date_to, datetime.time())) 95 timeframe_end += datetime.timedelta(days=1) 96 assert rawavail['end'] <= timeframe_end 97 except AssertionError: 98 raise forms.ValidationError("Submitted availability is not within the event timeframe.") 99 100 def clean_availabilities(self): 101 if self.cleaned_data['availabilities'] == '': 102 return None 103 104 rawavailabilities = self._parse_availabilities_json(self.cleaned_data['availabilities']) 105 availabilities = [] 106 107 for rawavail in rawavailabilities: 108 self._validate_availability(rawavail) 109 availabilities.append(Availability(event_id=self.event.id, **rawavail)) 110 111 return availabilities 112 113 def _set_foreignkeys(self, instance, availabilities): 114 """ 115 Set the reference to `instance` in each given availability. 116 117 For example, set the availabilitiy.room_id to instance.id, in case instance of type Room. 118 """ 119 reference_name = instance.availabilities.field.name + '_id' 120 121 for avail in availabilities: 122 setattr(avail, reference_name, instance.id) 123 124 def _replace_availabilities(self, instance, availabilities): 125 with transaction.atomic(): 126 # TODO: do not recreate objects unnecessarily, give the client the IDs, so we can track modifications and leave unchanged objects alone 127 instance.availabilities.all().delete() 128 Availability.objects.bulk_create(availabilities) 129 130 def save(self, *args, **kwargs): 131 instance = super().save(*args, **kwargs) 132 availabilities = self.cleaned_data['availabilities'] 133 134 if availabilities is not None: 135 self._set_foreignkeys(instance, availabilities) 136 self._replace_availabilities(instance, availabilities) 137 138 return instance 139 140 141 class RoomForm(AvailabilitiesFormMixin, ReadOnlyFlag, I18nModelForm): 142 143 def __init__(self, *args, **kwargs): 144 super().__init__(*args, **kwargs) 145 self.fields['name'].widget.attrs['placeholder'] = _('Room I') 146 self.fields['description'].widget.attrs['placeholder'] = _('Description, e.g.: Our main meeting place, Room I, enter from the right.') 147 self.fields['speaker_info'].widget.attrs['placeholder'] = _('Information for speakers, e.g.: Projector has only HDMI input.') 148 self.fields['capacity'].widget.attrs['placeholder'] = '300' 149 150 class Meta: 151 model = Room 152 fields = ['name', 'description', 'speaker_info', 'capacity', 'position'] 153 [end of src/pretalx/schedule/forms.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/pretalx/schedule/forms.py b/src/pretalx/schedule/forms.py --- a/src/pretalx/schedule/forms.py +++ b/src/pretalx/schedule/forms.py @@ -91,8 +91,11 @@ try: timeframe_start = tz.localize(datetime.datetime.combine(self.event.date_from, datetime.time())) assert rawavail['start'] >= timeframe_start - timeframe_end = tz.localize(datetime.datetime.combine(self.event.date_to, datetime.time())) - timeframe_end += datetime.timedelta(days=1) + + # add 1 day, not 24 hours, https://stackoverflow.com/a/25427822/2486196 + timeframe_end = datetime.datetime.combine(self.event.date_to, datetime.time()) + timeframe_end = timeframe_end + datetime.timedelta(days=1) + timeframe_end = tz.localize(timeframe_end, is_dst=None) assert rawavail['end'] <= timeframe_end except AssertionError: raise forms.ValidationError("Submitted availability is not within the event timeframe.")
{"golden_diff": "diff --git a/src/pretalx/schedule/forms.py b/src/pretalx/schedule/forms.py\n--- a/src/pretalx/schedule/forms.py\n+++ b/src/pretalx/schedule/forms.py\n@@ -91,8 +91,11 @@\n try:\n timeframe_start = tz.localize(datetime.datetime.combine(self.event.date_from, datetime.time()))\n assert rawavail['start'] >= timeframe_start\n- timeframe_end = tz.localize(datetime.datetime.combine(self.event.date_to, datetime.time()))\n- timeframe_end += datetime.timedelta(days=1)\n+\n+ # add 1 day, not 24 hours, https://stackoverflow.com/a/25427822/2486196\n+ timeframe_end = datetime.datetime.combine(self.event.date_to, datetime.time())\n+ timeframe_end = timeframe_end + datetime.timedelta(days=1)\n+ timeframe_end = tz.localize(timeframe_end, is_dst=None)\n assert rawavail['end'] <= timeframe_end\n except AssertionError:\n raise forms.ValidationError(\"Submitted availability is not within the event timeframe.\")\n", "issue": "Setting speaker availibilty to \"all day\" for the last day of the conference triggers an error message of \"Submitted availability is not within the event timeframe.\"\n## Expected Behavior\r\nSetting the availibilty to \"All Day\" should be possible for all days of the conference, even if it ends at some point during this day.\r\n\r\n## Current Behavior\r\nSetting the availibility to \"All Day\" on all days of the conference (in our case monday to sunday) triggers an error message of \"Submitted availability is not within the event timeframe.\"\r\n\r\n\r\n## Steps to Reproduce\r\n\r\n1. Set the conference date to start at 2018-10-22 and end at 2018-10-28\r\n2. Edit a speaker\r\n3. Set the availability to all day for all conference days\r\n4. On the bottom of the page, you the the error message \"Submitted availability is not within the event timeframe.\"\r\n\r\nif needed, we can provide a speaker account on the instance for testing.\r\n\r\n## Your Environment\r\n* Version used: 0.7.1\r\n* Environment name and version (e.g. Chrome 39, python 3.5): Firefox 61\r\n* Operating System and version (desktop or mobile): Linux Desktop (Debian Unstable)\r\n* Link to your instance, if in production: https://conference.c3w.at\r\n\n", "before_files": [{"content": "import datetime\nimport json\n\nimport django.forms as forms\nimport pytz\nfrom django.db import transaction\nfrom django.utils.dateparse import parse_datetime\nfrom django.utils.translation import ugettext_lazy as _\nfrom i18nfield.forms import I18nModelForm\n\nfrom pretalx.common.mixins.forms import ReadOnlyFlag\nfrom pretalx.schedule.models import Availability, Room\n\n\nclass AvailabilitiesFormMixin(forms.Form):\n availabilities = forms.CharField(\n label=_('Availability'),\n help_text=_('Please click and drag to mark the availability during the conference.'),\n widget=forms.TextInput(attrs={'class': 'availabilities-editor-data'}),\n required=False,\n )\n\n def _serialize(self, event, instance):\n if instance:\n availabilities = [\n avail.serialize()\n for avail in instance.availabilities.all()\n ]\n else:\n availabilities = []\n\n return json.dumps({\n 'availabilities': availabilities,\n 'event': {\n 'timezone': event.timezone,\n 'date_from': str(event.date_from),\n 'date_to': str(event.date_to),\n }\n })\n\n def __init__(self, *args, event=None, **kwargs):\n self.event = event\n initial = kwargs.pop('initial', dict())\n initial['availabilities'] = self._serialize(self.event, kwargs['instance'])\n kwargs['initial'] = initial\n super().__init__(*args, **kwargs)\n\n def _parse_availabilities_json(self, jsonavailabilities):\n try:\n rawdata = json.loads(jsonavailabilities)\n except ValueError:\n raise forms.ValidationError(\"Submitted availabilities are not valid json.\")\n\n try:\n assert isinstance(rawdata, dict)\n availabilities = rawdata['availabilities']\n assert isinstance(availabilities, list)\n return availabilities\n except (ValueError, AssertionError, LookupError):\n raise forms.ValidationError(\"Submitted json does not comply with format.\")\n\n def _parse_datetime(self, strdate):\n tz = pytz.timezone(self.event.timezone)\n\n obj = parse_datetime(strdate)\n assert obj\n if obj.tzinfo is None:\n obj = tz.localize(obj)\n\n return obj\n\n def _validate_availability(self, rawavail):\n try:\n assert isinstance(rawavail, dict)\n rawavail.pop('id', None)\n rawavail.pop('allDay', None)\n assert len(rawavail) == 2\n assert 'start' in rawavail\n assert 'end' in rawavail\n except AssertionError:\n raise forms.ValidationError(\"Submitted availability does not comply with format.\")\n\n try:\n rawavail['start'] = self._parse_datetime(rawavail['start'])\n rawavail['end'] = self._parse_datetime(rawavail['end'])\n except (AssertionError, TypeError, ValueError):\n raise forms.ValidationError(\"Submitted availability contains an invalid date.\")\n\n tz = pytz.timezone(self.event.timezone)\n\n try:\n timeframe_start = tz.localize(datetime.datetime.combine(self.event.date_from, datetime.time()))\n assert rawavail['start'] >= timeframe_start\n timeframe_end = tz.localize(datetime.datetime.combine(self.event.date_to, datetime.time()))\n timeframe_end += datetime.timedelta(days=1)\n assert rawavail['end'] <= timeframe_end\n except AssertionError:\n raise forms.ValidationError(\"Submitted availability is not within the event timeframe.\")\n\n def clean_availabilities(self):\n if self.cleaned_data['availabilities'] == '':\n return None\n\n rawavailabilities = self._parse_availabilities_json(self.cleaned_data['availabilities'])\n availabilities = []\n\n for rawavail in rawavailabilities:\n self._validate_availability(rawavail)\n availabilities.append(Availability(event_id=self.event.id, **rawavail))\n\n return availabilities\n\n def _set_foreignkeys(self, instance, availabilities):\n \"\"\"\n Set the reference to `instance` in each given availability.\n\n For example, set the availabilitiy.room_id to instance.id, in case instance of type Room.\n \"\"\"\n reference_name = instance.availabilities.field.name + '_id'\n\n for avail in availabilities:\n setattr(avail, reference_name, instance.id)\n\n def _replace_availabilities(self, instance, availabilities):\n with transaction.atomic():\n # TODO: do not recreate objects unnecessarily, give the client the IDs, so we can track modifications and leave unchanged objects alone\n instance.availabilities.all().delete()\n Availability.objects.bulk_create(availabilities)\n\n def save(self, *args, **kwargs):\n instance = super().save(*args, **kwargs)\n availabilities = self.cleaned_data['availabilities']\n\n if availabilities is not None:\n self._set_foreignkeys(instance, availabilities)\n self._replace_availabilities(instance, availabilities)\n\n return instance\n\n\nclass RoomForm(AvailabilitiesFormMixin, ReadOnlyFlag, I18nModelForm):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['name'].widget.attrs['placeholder'] = _('Room I')\n self.fields['description'].widget.attrs['placeholder'] = _('Description, e.g.: Our main meeting place, Room I, enter from the right.')\n self.fields['speaker_info'].widget.attrs['placeholder'] = _('Information for speakers, e.g.: Projector has only HDMI input.')\n self.fields['capacity'].widget.attrs['placeholder'] = '300'\n\n class Meta:\n model = Room\n fields = ['name', 'description', 'speaker_info', 'capacity', 'position']\n", "path": "src/pretalx/schedule/forms.py"}]}
2,367
236
gh_patches_debug_19551
rasdani/github-patches
git_diff
python-telegram-bot__python-telegram-bot-1074
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> CommandHandler should not check filters if not listening to the command I have a filter to restrict some users from call some commands. I call `AdminFilter` and the relevant code can be seen below (actually, I check the admin users from a database). ```python class AdminFilter(BaseFilter): def __init__(self, *admin_users): self.admin_users = admin_users def filter(self, message): if message.from_user.username in self.admin_users: return True else: message.reply_text("You're not an admin!", quote=True) return False ``` With this filter applied, when an user that is not an admin calls the forbidden command, then receives a reply warning. And here is the issue. ### Steps to reproduce The following code illustrate the problem. In this scenario I have an administrative command `/jungle` and a normal command `/rain`. If I am the admin, I call any command and see no issue, but If am not admin, I receive the warning for both commands. ```python def jungle(bot, update): bot.send_message(update.message.chat_id, 'Welcome to the Jungle!') def rain(bot, update): bot.send_message(update.message.chat_id, 'November Rain...') dispatcher.add_handler(CommandHandler('jungle', jungle, AdminFilter('wagnermacedo'))) dispatcher.add_handler(CommandHandler('rain', rain)) ``` ### Expected behaviour Suppose I'm not the bot administrator, then in a conversation with the bot, I expect the following: ``` me: /jungle bot: > reply /jungle You're not an admin! ``` ``` me: /rain bot: November Rain... ``` ### Actual behaviour What happens instead is that bot reply that I'm not an admin for both commands. For `/jungle`, it's okay, it was expected ``` me: /jungle bot: > reply /jungle You're not an admin! ``` But `/rain` hasn't the admin filter, so it should not reply the warning. ``` me: /rain bot: > reply /rain You're not an admin! bot: November Rain... ``` :point_up_2: Surprisingly the command still works, even replying I'm not admin... ### Configuration **Version of Python, python-telegram-bot & dependencies:** I tested with the last version from master (59659ea). ``` $ python -m telegram python-telegram-bot 10.0.1 certifi 2018.01.18 future 0.16.0 Python 3.4.8 (default, Apr 13 2018, 16:18:01) [GCC 5.4.0 20160609] ``` ### Fix I have already fixed the issue in my machine, I'm going soon to create a pull request. </issue> <code> [start of telegram/ext/commandhandler.py] 1 #!/usr/bin/env python 2 # 3 # A library that provides a Python interface to the Telegram Bot API 4 # Copyright (C) 2015-2018 5 # Leandro Toledo de Souza <[email protected]> 6 # 7 # This program is free software: you can redistribute it and/or modify 8 # it under the terms of the GNU Lesser Public License as published by 9 # the Free Software Foundation, either version 3 of the License, or 10 # (at your option) any later version. 11 # 12 # This program is distributed in the hope that it will be useful, 13 # but WITHOUT ANY WARRANTY; without even the implied warranty of 14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 # GNU Lesser Public License for more details. 16 # 17 # You should have received a copy of the GNU Lesser Public License 18 # along with this program. If not, see [http://www.gnu.org/licenses/]. 19 """This module contains the CommandHandler class.""" 20 import warnings 21 22 from future.utils import string_types 23 24 from .handler import Handler 25 from telegram import Update 26 27 28 class CommandHandler(Handler): 29 """Handler class to handle Telegram commands. 30 31 Commands are Telegram messages that start with ``/``, optionally followed by an ``@`` and the 32 bot's name and/or some additional text. 33 34 Attributes: 35 command (:obj:`str` | List[:obj:`str`]): The command or list of commands this handler 36 should listen for. 37 callback (:obj:`callable`): The callback function for this handler. 38 filters (:class:`telegram.ext.BaseFilter`): Optional. Only allow updates with these 39 Filters. 40 allow_edited (:obj:`bool`): Optional. Determines Whether the handler should also accept 41 edited messages. 42 pass_args (:obj:`bool`): Optional. Determines whether the handler should be passed 43 ``args``. 44 pass_update_queue (:obj:`bool`): Optional. Determines whether ``update_queue`` will be 45 passed to the callback function. 46 pass_job_queue (:obj:`bool`): Optional. Determines whether ``job_queue`` will be passed to 47 the callback function. 48 pass_user_data (:obj:`bool`): Optional. Determines whether ``user_data`` will be passed to 49 the callback function. 50 pass_chat_data (:obj:`bool`): Optional. Determines whether ``chat_data`` will be passed to 51 the callback function. 52 53 Note: 54 :attr:`pass_user_data` and :attr:`pass_chat_data` determine whether a ``dict`` you 55 can use to keep any data in will be sent to the :attr:`callback` function.. Related to 56 either the user or the chat that the update was sent in. For each update from the same user 57 or in the same chat, it will be the same ``dict``. 58 59 Args: 60 command (:obj:`str` | List[:obj:`str`]): The command or list of commands this handler 61 should listen for. 62 callback (:obj:`callable`): A function that takes ``bot, update`` as positional arguments. 63 It will be called when the :attr:`check_update` has determined that an update should be 64 processed by this handler. 65 filters (:class:`telegram.ext.BaseFilter`, optional): A filter inheriting from 66 :class:`telegram.ext.filters.BaseFilter`. Standard filters can be found in 67 :class:`telegram.ext.filters.Filters`. Filters can be combined using bitwise 68 operators (& for and, | for or, ~ for not). 69 allow_edited (:obj:`bool`, optional): Determines whether the handler should also accept 70 edited messages. Default is ``False``. 71 pass_args (:obj:`bool`, optional): Determines whether the handler should be passed the 72 arguments passed to the command as a keyword argument called ``args``. It will contain 73 a list of strings, which is the text following the command split on single or 74 consecutive whitespace characters. Default is ``False`` 75 pass_update_queue (:obj:`bool`, optional): If set to ``True``, a keyword argument called 76 ``update_queue`` will be passed to the callback function. It will be the ``Queue`` 77 instance used by the :class:`telegram.ext.Updater` and :class:`telegram.ext.Dispatcher` 78 that contains new updates which can be used to insert updates. Default is ``False``. 79 pass_job_queue (:obj:`bool`, optional): If set to ``True``, a keyword argument called 80 ``job_queue`` will be passed to the callback function. It will be a 81 :class:`telegram.ext.JobQueue` instance created by the :class:`telegram.ext.Updater` 82 which can be used to schedule new jobs. Default is ``False``. 83 pass_user_data (:obj:`bool`, optional): If set to ``True``, a keyword argument called 84 ``user_data`` will be passed to the callback function. Default is ``False``. 85 pass_chat_data (:obj:`bool`, optional): If set to ``True``, a keyword argument called 86 ``chat_data`` will be passed to the callback function. Default is ``False``. 87 88 """ 89 90 def __init__(self, 91 command, 92 callback, 93 filters=None, 94 allow_edited=False, 95 pass_args=False, 96 pass_update_queue=False, 97 pass_job_queue=False, 98 pass_user_data=False, 99 pass_chat_data=False): 100 super(CommandHandler, self).__init__( 101 callback, 102 pass_update_queue=pass_update_queue, 103 pass_job_queue=pass_job_queue, 104 pass_user_data=pass_user_data, 105 pass_chat_data=pass_chat_data) 106 107 if isinstance(command, string_types): 108 self.command = [command.lower()] 109 else: 110 self.command = [x.lower() for x in command] 111 self.filters = filters 112 self.allow_edited = allow_edited 113 self.pass_args = pass_args 114 115 # We put this up here instead of with the rest of checking code 116 # in check_update since we don't wanna spam a ton 117 if isinstance(self.filters, list): 118 warnings.warn('Using a list of filters in MessageHandler is getting ' 119 'deprecated, please use bitwise operators (& and |) ' 120 'instead. More info: https://git.io/vPTbc.') 121 122 def check_update(self, update): 123 """Determines whether an update should be passed to this handlers :attr:`callback`. 124 125 Args: 126 update (:class:`telegram.Update`): Incoming telegram update. 127 128 Returns: 129 :obj:`bool` 130 131 """ 132 if (isinstance(update, Update) 133 and (update.message or update.edited_message and self.allow_edited)): 134 message = update.message or update.edited_message 135 136 if message.text and message.text.startswith('/') and len(message.text) > 1: 137 first_word = message.text_html.split(None, 1)[0] 138 if len(first_word) > 1 and first_word.startswith('/'): 139 command = first_word[1:].split('@') 140 command.append( 141 message.bot.username) # in case the command was sent without a username 142 143 if self.filters is None: 144 res = True 145 elif isinstance(self.filters, list): 146 res = any(func(message) for func in self.filters) 147 else: 148 res = self.filters(message) 149 150 return res and (command[0].lower() in self.command 151 and command[1].lower() == message.bot.username.lower()) 152 153 return False 154 155 def handle_update(self, update, dispatcher): 156 """Send the update to the :attr:`callback`. 157 158 Args: 159 update (:class:`telegram.Update`): Incoming telegram update. 160 dispatcher (:class:`telegram.ext.Dispatcher`): Dispatcher that originated the Update. 161 162 """ 163 optional_args = self.collect_optional_args(dispatcher, update) 164 165 message = update.message or update.edited_message 166 167 if self.pass_args: 168 optional_args['args'] = message.text.split()[1:] 169 170 return self.callback(dispatcher.bot, update, **optional_args) 171 [end of telegram/ext/commandhandler.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/telegram/ext/commandhandler.py b/telegram/ext/commandhandler.py --- a/telegram/ext/commandhandler.py +++ b/telegram/ext/commandhandler.py @@ -140,6 +140,10 @@ command.append( message.bot.username) # in case the command was sent without a username + if not (command[0].lower() in self.command + and command[1].lower() == message.bot.username.lower()): + return False + if self.filters is None: res = True elif isinstance(self.filters, list): @@ -147,8 +151,7 @@ else: res = self.filters(message) - return res and (command[0].lower() in self.command - and command[1].lower() == message.bot.username.lower()) + return res return False
{"golden_diff": "diff --git a/telegram/ext/commandhandler.py b/telegram/ext/commandhandler.py\n--- a/telegram/ext/commandhandler.py\n+++ b/telegram/ext/commandhandler.py\n@@ -140,6 +140,10 @@\n command.append(\n message.bot.username) # in case the command was sent without a username\n \n+ if not (command[0].lower() in self.command\n+ and command[1].lower() == message.bot.username.lower()):\n+ return False\n+\n if self.filters is None:\n res = True\n elif isinstance(self.filters, list):\n@@ -147,8 +151,7 @@\n else:\n res = self.filters(message)\n \n- return res and (command[0].lower() in self.command\n- and command[1].lower() == message.bot.username.lower())\n+ return res\n \n return False\n", "issue": "CommandHandler should not check filters if not listening to the command\nI have a filter to restrict some users from call some commands. I call `AdminFilter` and the relevant code can be seen below (actually, I check the admin users from a database).\r\n\r\n```python\r\nclass AdminFilter(BaseFilter):\r\n def __init__(self, *admin_users):\r\n self.admin_users = admin_users\r\n\r\n def filter(self, message):\r\n if message.from_user.username in self.admin_users:\r\n return True\r\n else:\r\n message.reply_text(\"You're not an admin!\", quote=True)\r\n return False\r\n```\r\n\r\nWith this filter applied, when an user that is not an admin calls the forbidden command, then receives a reply warning. And here is the issue.\r\n\r\n### Steps to reproduce\r\nThe following code illustrate the problem. In this scenario I have an administrative command `/jungle` and a normal command `/rain`.\r\n\r\nIf I am the admin, I call any command and see no issue, but If am not admin, I receive the warning for both commands.\r\n\r\n```python\r\ndef jungle(bot, update):\r\n bot.send_message(update.message.chat_id, 'Welcome to the Jungle!')\r\n\r\ndef rain(bot, update):\r\n bot.send_message(update.message.chat_id, 'November Rain...')\r\n\r\ndispatcher.add_handler(CommandHandler('jungle', jungle, AdminFilter('wagnermacedo')))\r\ndispatcher.add_handler(CommandHandler('rain', rain))\r\n```\r\n\r\n### Expected behaviour\r\nSuppose I'm not the bot administrator, then in a conversation with the bot, I expect the following:\r\n\r\n```\r\nme: /jungle\r\nbot: > reply /jungle\r\n You're not an admin!\r\n```\r\n```\r\nme: /rain\r\nbot: November Rain...\r\n```\r\n\r\n### Actual behaviour\r\nWhat happens instead is that bot reply that I'm not an admin for both commands.\r\n\r\nFor `/jungle`, it's okay, it was expected\r\n```\r\nme: /jungle\r\nbot: > reply /jungle\r\n You're not an admin!\r\n```\r\n\r\nBut `/rain` hasn't the admin filter, so it should not reply the warning.\r\n```\r\nme: /rain\r\nbot: > reply /rain\r\n You're not an admin!\r\nbot: November Rain...\r\n```\r\n\r\n:point_up_2: Surprisingly the command still works, even replying I'm not admin...\r\n\r\n### Configuration\r\n**Version of Python, python-telegram-bot & dependencies:**\r\n\r\nI tested with the last version from master (59659ea).\r\n\r\n```\r\n$ python -m telegram\r\npython-telegram-bot 10.0.1\r\ncertifi 2018.01.18\r\nfuture 0.16.0\r\nPython 3.4.8 (default, Apr 13 2018, 16:18:01) [GCC 5.4.0 20160609]\r\n```\r\n\r\n### Fix\r\nI have already fixed the issue in my machine, I'm going soon to create a pull request.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2018\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains the CommandHandler class.\"\"\"\nimport warnings\n\nfrom future.utils import string_types\n\nfrom .handler import Handler\nfrom telegram import Update\n\n\nclass CommandHandler(Handler):\n \"\"\"Handler class to handle Telegram commands.\n\n Commands are Telegram messages that start with ``/``, optionally followed by an ``@`` and the\n bot's name and/or some additional text.\n\n Attributes:\n command (:obj:`str` | List[:obj:`str`]): The command or list of commands this handler\n should listen for.\n callback (:obj:`callable`): The callback function for this handler.\n filters (:class:`telegram.ext.BaseFilter`): Optional. Only allow updates with these\n Filters.\n allow_edited (:obj:`bool`): Optional. Determines Whether the handler should also accept\n edited messages.\n pass_args (:obj:`bool`): Optional. Determines whether the handler should be passed\n ``args``.\n pass_update_queue (:obj:`bool`): Optional. Determines whether ``update_queue`` will be\n passed to the callback function.\n pass_job_queue (:obj:`bool`): Optional. Determines whether ``job_queue`` will be passed to\n the callback function.\n pass_user_data (:obj:`bool`): Optional. Determines whether ``user_data`` will be passed to\n the callback function.\n pass_chat_data (:obj:`bool`): Optional. Determines whether ``chat_data`` will be passed to\n the callback function.\n\n Note:\n :attr:`pass_user_data` and :attr:`pass_chat_data` determine whether a ``dict`` you\n can use to keep any data in will be sent to the :attr:`callback` function.. Related to\n either the user or the chat that the update was sent in. For each update from the same user\n or in the same chat, it will be the same ``dict``.\n\n Args:\n command (:obj:`str` | List[:obj:`str`]): The command or list of commands this handler\n should listen for.\n callback (:obj:`callable`): A function that takes ``bot, update`` as positional arguments.\n It will be called when the :attr:`check_update` has determined that an update should be\n processed by this handler.\n filters (:class:`telegram.ext.BaseFilter`, optional): A filter inheriting from\n :class:`telegram.ext.filters.BaseFilter`. Standard filters can be found in\n :class:`telegram.ext.filters.Filters`. Filters can be combined using bitwise\n operators (& for and, | for or, ~ for not).\n allow_edited (:obj:`bool`, optional): Determines whether the handler should also accept\n edited messages. Default is ``False``.\n pass_args (:obj:`bool`, optional): Determines whether the handler should be passed the\n arguments passed to the command as a keyword argument called ``args``. It will contain\n a list of strings, which is the text following the command split on single or\n consecutive whitespace characters. Default is ``False``\n pass_update_queue (:obj:`bool`, optional): If set to ``True``, a keyword argument called\n ``update_queue`` will be passed to the callback function. It will be the ``Queue``\n instance used by the :class:`telegram.ext.Updater` and :class:`telegram.ext.Dispatcher`\n that contains new updates which can be used to insert updates. Default is ``False``.\n pass_job_queue (:obj:`bool`, optional): If set to ``True``, a keyword argument called\n ``job_queue`` will be passed to the callback function. It will be a\n :class:`telegram.ext.JobQueue` instance created by the :class:`telegram.ext.Updater`\n which can be used to schedule new jobs. Default is ``False``.\n pass_user_data (:obj:`bool`, optional): If set to ``True``, a keyword argument called\n ``user_data`` will be passed to the callback function. Default is ``False``.\n pass_chat_data (:obj:`bool`, optional): If set to ``True``, a keyword argument called\n ``chat_data`` will be passed to the callback function. Default is ``False``.\n\n \"\"\"\n\n def __init__(self,\n command,\n callback,\n filters=None,\n allow_edited=False,\n pass_args=False,\n pass_update_queue=False,\n pass_job_queue=False,\n pass_user_data=False,\n pass_chat_data=False):\n super(CommandHandler, self).__init__(\n callback,\n pass_update_queue=pass_update_queue,\n pass_job_queue=pass_job_queue,\n pass_user_data=pass_user_data,\n pass_chat_data=pass_chat_data)\n\n if isinstance(command, string_types):\n self.command = [command.lower()]\n else:\n self.command = [x.lower() for x in command]\n self.filters = filters\n self.allow_edited = allow_edited\n self.pass_args = pass_args\n\n # We put this up here instead of with the rest of checking code\n # in check_update since we don't wanna spam a ton\n if isinstance(self.filters, list):\n warnings.warn('Using a list of filters in MessageHandler is getting '\n 'deprecated, please use bitwise operators (& and |) '\n 'instead. More info: https://git.io/vPTbc.')\n\n def check_update(self, update):\n \"\"\"Determines whether an update should be passed to this handlers :attr:`callback`.\n\n Args:\n update (:class:`telegram.Update`): Incoming telegram update.\n\n Returns:\n :obj:`bool`\n\n \"\"\"\n if (isinstance(update, Update)\n and (update.message or update.edited_message and self.allow_edited)):\n message = update.message or update.edited_message\n\n if message.text and message.text.startswith('/') and len(message.text) > 1:\n first_word = message.text_html.split(None, 1)[0]\n if len(first_word) > 1 and first_word.startswith('/'):\n command = first_word[1:].split('@')\n command.append(\n message.bot.username) # in case the command was sent without a username\n\n if self.filters is None:\n res = True\n elif isinstance(self.filters, list):\n res = any(func(message) for func in self.filters)\n else:\n res = self.filters(message)\n\n return res and (command[0].lower() in self.command\n and command[1].lower() == message.bot.username.lower())\n\n return False\n\n def handle_update(self, update, dispatcher):\n \"\"\"Send the update to the :attr:`callback`.\n\n Args:\n update (:class:`telegram.Update`): Incoming telegram update.\n dispatcher (:class:`telegram.ext.Dispatcher`): Dispatcher that originated the Update.\n\n \"\"\"\n optional_args = self.collect_optional_args(dispatcher, update)\n\n message = update.message or update.edited_message\n\n if self.pass_args:\n optional_args['args'] = message.text.split()[1:]\n\n return self.callback(dispatcher.bot, update, **optional_args)\n", "path": "telegram/ext/commandhandler.py"}]}
3,273
193
gh_patches_debug_21247
rasdani/github-patches
git_diff
hedyorg__hedy-1727
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> No or incorrect output [BUG] Sometimes when my students run a program they don't get any or incorrect output. It is hard to pinpoint when exactly this happens. I think it has to do with pressing "Run Code" while a program is still running. Student do this a lot and I see this issue multiple times per lesson. Running the program keeps on giving incorrect results once this has happened. My workaround is to save the program, refresh the webpage in the browser and try again. This usually works. One example of this issue arises when I run the code below and press the "Run Code" button multiple times during execution. At one point it will create two figures and raise an error (see screenshot). However the issue is certainly not limited to drawings, it also happens when using the `repeat` and `ask` commands. ``` hoek = 90 repeat 10 times turn hoek forward 50 forward 100 ``` ![image](https://user-images.githubusercontent.com/10562903/150156604-ddb95243-c88f-4ff3-a791-db2dda773c86.png) </issue> <code> [start of website/achievements.py] 1 from website import database 2 from hedyweb import AchievementTranslations 3 from website.auth import requires_login, current_user 4 from flask import request, jsonify, session 5 import hedy 6 7 8 class Achievements: 9 10 def __init__(self): 11 self.DATABASE = database.Database() 12 self.TRANSLATIONS = AchievementTranslations() 13 self.all_commands = self.get_all_commands() 14 15 def get_all_commands(self): 16 commands = [] 17 for i in range(1, hedy.HEDY_MAX_LEVEL+1): 18 for command in hedy.commands_per_level.get(i): 19 commands.append(command) 20 return set(commands) 21 22 def initialize_user_data_if_necessary(self): 23 if 'achieved' not in session: 24 achievements_data = self.DATABASE.progress_by_username(current_user()['username']) 25 session['new_achieved'] = [] 26 session['new_commands'] = [] 27 session['previous_code'] = None 28 session['identical_consecutive_errors'] = 0 29 session['consecutive_errors'] = 0 30 if not achievements_data: 31 achievements_data = {} 32 if 'achieved' in achievements_data: 33 session['achieved'] = achievements_data['achieved'] 34 else: 35 session['achieved'] = [] 36 if 'commands' in achievements_data: 37 session['commands'] = achievements_data['commands'] 38 else: 39 session['commands'] = [] 40 if 'run_programs' in achievements_data: 41 session['run_programs'] = achievements_data['run_programs'] 42 else: 43 session['run_programs'] = 0 44 if 'saved_programs' in achievements_data: 45 session['saved_programs'] = achievements_data['saved_programs'] 46 else: 47 session['saved_programs'] = 0 48 if 'submitted_programs' in achievements_data: 49 session['submitted_programs'] = achievements_data['submitted_programs'] 50 else: 51 session['submitted_programs'] = 0 52 53 def routes(self, app, database): 54 global DATABASE 55 DATABASE = database 56 57 @app.route('/achievements', methods=['POST']) 58 @requires_login 59 def push_new_achievement(user): 60 body = request.json 61 if "achievement" in body: 62 self.initialize_user_data_if_necessary() 63 if body['achievement'] not in session['achieved'] and body['achievement'] in self.TRANSLATIONS.get_translations(session['lang']): 64 return jsonify({"achievements": self.verify_pushed_achievement(user.get('username'), body['achievement'])}) 65 return jsonify({}) 66 67 def increase_count(self, category): 68 self.initialize_user_data_if_necessary() 69 if category == "run": 70 session['run_programs'] += 1 71 elif category == "saved": 72 session['saved_programs'] += 1 73 elif category == "submitted": 74 session['submitted_programs'] += 1 75 76 def add_single_achievement(self, username, achievement): 77 self.initialize_user_data_if_necessary() 78 if achievement not in session['achieved'] and achievement in self.TRANSLATIONS.get_translations(session['lang']): 79 return self.verify_pushed_achievement(username, achievement) 80 else: 81 return None 82 83 def verify_run_achievements(self, username, code=None, level=None, response=None): 84 self.initialize_user_data_if_necessary() 85 self.check_programs_run() 86 if code and level: 87 self.check_code_achievements(code, level) 88 if code and response: 89 self.check_response_achievements(code, response) 90 91 if len(session['new_commands']) > 0: 92 for command in session['new_commands']: 93 session['commands'].append(command) 94 session['new_commands'] = [] 95 self.DATABASE.add_commands_to_username(username, session['commands']) 96 97 if len(session['new_achieved']) > 0: 98 self.DATABASE.add_achievements_to_username(username, session['new_achieved']) 99 for achievement in session['new_achieved']: 100 session['achieved'].append(achievement) 101 return True 102 return False 103 104 def verify_save_achievements(self, username, adventure=None): 105 self.initialize_user_data_if_necessary() 106 self.check_programs_saved() 107 if adventure and 'adventure_is_worthwhile' not in session['achieved']: 108 session['new_achieved'].append("adventure_is_worthwhile") 109 if len(session['new_achieved']) > 0: 110 self.DATABASE.add_achievements_to_username(username, session['new_achieved']) 111 for achievement in session['new_achieved']: 112 session['achieved'].append(achievement) 113 return True 114 return False 115 116 def verify_submit_achievements(self, username): 117 self.initialize_user_data_if_necessary() 118 self.check_programs_submitted() 119 120 if len(session['new_achieved']) > 0: 121 self.DATABASE.add_achievements_to_username(username, session['new_achieved']) 122 for achievement in session['new_achieved']: 123 session['achieved'].append(achievement) 124 return True 125 return False 126 127 def verify_pushed_achievement(self, username, achievement): 128 self.initialize_user_data_if_necessary() 129 session['new_achieved'] = [achievement] 130 self.DATABASE.add_achievement_to_username(username, achievement) 131 session['achieved'].append(achievement) 132 return self.get_earned_achievements() 133 134 def get_earned_achievements(self): 135 self.initialize_user_data_if_necessary() 136 translations = self.TRANSLATIONS.get_translations(session['lang']) 137 translated_achievements = [] 138 for achievement in session['new_achieved']: 139 translated_achievements.append([translations[achievement]['title'], translations[achievement]['text'], translations[achievement]['image']]) 140 session['new_achieved'] = [] #Once we get earned achievements -> empty the array with "waiting" ones 141 session['new_commands'] = [] 142 return translated_achievements 143 144 def check_programs_run(self): 145 self.initialize_user_data_if_necessary() 146 if 'getting_started_I' not in session['achieved'] and session['run_programs'] >= 1: 147 session['new_achieved'].append("getting_started_I") 148 if 'getting_started_II' not in session['achieved'] and session['run_programs'] >= 10: 149 session['new_achieved'].append("getting_started_II") 150 if 'getting_started_III' not in session['achieved'] and session['run_programs'] >= 50: 151 session['new_achieved'].append("getting_started_III") 152 if 'getting_started_IV' not in session['achieved'] and session['run_programs'] >= 200: 153 session['new_achieved'].append("getting_started_IV") 154 if 'getting_started_V' not in session['achieved'] and session['run_programs'] >= 500: 155 session['new_achieved'].append("getting_started_V") 156 157 def check_programs_saved(self): 158 self.initialize_user_data_if_necessary() 159 if 'one_to_remember_I' not in session['achieved'] and session['saved_programs'] >= 1: 160 session['new_achieved'].append("one_to_remember_I") 161 if 'one_to_remember_II' not in session['achieved'] and session['saved_programs'] >= 5: 162 session['new_achieved'].append("one_to_remember_II") 163 if 'one_to_remember_III' not in session['achieved'] and session['saved_programs'] >= 10: 164 session['new_achieved'].append("one_to_remember_III") 165 if 'one_to_remember_IV' not in session['achieved'] and session['saved_programs'] >= 25: 166 session['new_achieved'].append("one_to_remember_IV") 167 if 'one_to_remember_V' not in session['achieved'] and session['saved_programs'] >= 50: 168 session['new_achieved'].append("one_to_remember_V") 169 170 def check_programs_submitted(self): 171 self.initialize_user_data_if_necessary() 172 if 'deadline_daredevil_I' not in session['achieved'] and session['submitted_programs'] >= 1: 173 session['new_achieved'].append("deadline_daredevil_I") 174 if 'deadline_daredevil_II' not in session['achieved'] and session['submitted_programs'] >= 3: 175 session['new_achieved'].append("deadline_daredevil_II") 176 if 'deadline_daredevil_III' not in session['achieved'] and session['submitted_programs'] >= 10: 177 session['new_achieved'].append("deadline_daredevil_III") 178 179 def check_code_achievements(self, code, level): 180 self.initialize_user_data_if_necessary() 181 commands_in_code = hedy.all_commands(code, level, session['lang']) 182 if 'trying_is_key' not in session['achieved']: 183 for command in set(commands_in_code): 184 if command not in session['commands'] and command not in session['new_commands']: 185 session['new_commands'].append(command) 186 if set(session['commands']).union(session['new_commands']) == self.all_commands: 187 session['new_achieved'].append("trying_is_key") 188 if 'did_you_say_please' not in session['achieved'] and "ask" in hedy.all_commands(code, level, session['lang']): 189 session['new_achieved'].append("did_you_say_please") 190 if 'talk-talk-talk' not in session['achieved'] and hedy.all_commands(code, level, session['lang']).count("ask") >= 5: 191 session['new_achieved'].append("talk-talk-talk") 192 if 'hedy_honor' not in session['achieved'] and "Hedy" in code: 193 session['new_achieved'].append("hedy_honor") 194 if 'hedy-ious' not in session['achieved']: 195 all_print_arguments = hedy.all_print_arguments(code, level, session['lang']) 196 for argument in all_print_arguments: 197 if all_print_arguments.count(argument) >= 10: 198 session['new_achieved'].append("hedy-ious") 199 break 200 201 def check_response_achievements(self, code, response): 202 self.initialize_user_data_if_necessary() 203 if 'ninja_turtle' not in session['achieved'] and 'has_turtle' in response and response['has_turtle']: 204 session['new_achieved'].append("ninja_turtle") 205 if 'watch_out' not in session['achieved'] and 'Warning' in response and response['Warning']: 206 session['new_achieved'].append("watch_out") 207 if 'Error' in response and response['Error']: 208 session['consecutive_errors'] += 1 209 if session['previous_code'] == code: 210 if session['identical_consecutive_errors'] == 0: 211 session['identical_consecutive_errors'] += 2 #We have to count the first one too! 212 else: 213 session['identical_consecutive_errors'] += 1 214 if session['identical_consecutive_errors'] >= 3: 215 if 'programming_panic' not in session['achieved']: 216 session['new_achieved'].append("programming_panic") 217 session['previous_code'] = code 218 else: 219 if 'programming_protagonist' not in session['achieved'] and session['consecutive_errors'] >= 1: 220 session['new_achieved'].append("programming_protagonist") 221 session['consecutive_errors'] = 0 222 session['identical_consecutive_errors'] = 0 223 224 225 226 [end of website/achievements.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/website/achievements.py b/website/achievements.py --- a/website/achievements.py +++ b/website/achievements.py @@ -183,8 +183,8 @@ for command in set(commands_in_code): if command not in session['commands'] and command not in session['new_commands']: session['new_commands'].append(command) - if set(session['commands']).union(session['new_commands']) == self.all_commands: - session['new_achieved'].append("trying_is_key") + if set(session['commands']).union(session['new_commands']) == self.all_commands: + session['new_achieved'].append("trying_is_key") if 'did_you_say_please' not in session['achieved'] and "ask" in hedy.all_commands(code, level, session['lang']): session['new_achieved'].append("did_you_say_please") if 'talk-talk-talk' not in session['achieved'] and hedy.all_commands(code, level, session['lang']).count("ask") >= 5:
{"golden_diff": "diff --git a/website/achievements.py b/website/achievements.py\n--- a/website/achievements.py\n+++ b/website/achievements.py\n@@ -183,8 +183,8 @@\n for command in set(commands_in_code):\n if command not in session['commands'] and command not in session['new_commands']:\n session['new_commands'].append(command)\n- if set(session['commands']).union(session['new_commands']) == self.all_commands:\n- session['new_achieved'].append(\"trying_is_key\")\n+ if set(session['commands']).union(session['new_commands']) == self.all_commands:\n+ session['new_achieved'].append(\"trying_is_key\")\n if 'did_you_say_please' not in session['achieved'] and \"ask\" in hedy.all_commands(code, level, session['lang']):\n session['new_achieved'].append(\"did_you_say_please\")\n if 'talk-talk-talk' not in session['achieved'] and hedy.all_commands(code, level, session['lang']).count(\"ask\") >= 5:\n", "issue": "No or incorrect output [BUG]\nSometimes when my students run a program they don't get any or incorrect output. It is hard to pinpoint when exactly this happens. I think it has to do with pressing \"Run Code\" while a program is still running. Student do this a lot and I see this issue multiple times per lesson. Running the program keeps on giving incorrect results once this has happened. My workaround is to save the program, refresh the webpage in the browser and try again. This usually works.\r\n\r\nOne example of this issue arises when I run the code below and press the \"Run Code\" button multiple times during execution. At one point it will create two figures and raise an error (see screenshot). However the issue is certainly not limited to drawings, it also happens when using the `repeat` and `ask` commands.\r\n\r\n```\r\nhoek = 90\r\nrepeat 10 times\r\n turn hoek\r\n forward 50\r\nforward 100\r\n```\r\n\r\n![image](https://user-images.githubusercontent.com/10562903/150156604-ddb95243-c88f-4ff3-a791-db2dda773c86.png)\r\n\n", "before_files": [{"content": "from website import database\nfrom hedyweb import AchievementTranslations\nfrom website.auth import requires_login, current_user\nfrom flask import request, jsonify, session\nimport hedy\n\n\nclass Achievements:\n\n def __init__(self):\n self.DATABASE = database.Database()\n self.TRANSLATIONS = AchievementTranslations()\n self.all_commands = self.get_all_commands()\n\n def get_all_commands(self):\n commands = []\n for i in range(1, hedy.HEDY_MAX_LEVEL+1):\n for command in hedy.commands_per_level.get(i):\n commands.append(command)\n return set(commands)\n\n def initialize_user_data_if_necessary(self):\n if 'achieved' not in session:\n achievements_data = self.DATABASE.progress_by_username(current_user()['username'])\n session['new_achieved'] = []\n session['new_commands'] = []\n session['previous_code'] = None\n session['identical_consecutive_errors'] = 0\n session['consecutive_errors'] = 0\n if not achievements_data:\n achievements_data = {}\n if 'achieved' in achievements_data:\n session['achieved'] = achievements_data['achieved']\n else:\n session['achieved'] = []\n if 'commands' in achievements_data:\n session['commands'] = achievements_data['commands']\n else:\n session['commands'] = []\n if 'run_programs' in achievements_data:\n session['run_programs'] = achievements_data['run_programs']\n else:\n session['run_programs'] = 0\n if 'saved_programs' in achievements_data:\n session['saved_programs'] = achievements_data['saved_programs']\n else:\n session['saved_programs'] = 0\n if 'submitted_programs' in achievements_data:\n session['submitted_programs'] = achievements_data['submitted_programs']\n else:\n session['submitted_programs'] = 0\n\n def routes(self, app, database):\n global DATABASE\n DATABASE = database\n\n @app.route('/achievements', methods=['POST'])\n @requires_login\n def push_new_achievement(user):\n body = request.json\n if \"achievement\" in body:\n self.initialize_user_data_if_necessary()\n if body['achievement'] not in session['achieved'] and body['achievement'] in self.TRANSLATIONS.get_translations(session['lang']):\n return jsonify({\"achievements\": self.verify_pushed_achievement(user.get('username'), body['achievement'])})\n return jsonify({})\n\n def increase_count(self, category):\n self.initialize_user_data_if_necessary()\n if category == \"run\":\n session['run_programs'] += 1\n elif category == \"saved\":\n session['saved_programs'] += 1\n elif category == \"submitted\":\n session['submitted_programs'] += 1\n\n def add_single_achievement(self, username, achievement):\n self.initialize_user_data_if_necessary()\n if achievement not in session['achieved'] and achievement in self.TRANSLATIONS.get_translations(session['lang']):\n return self.verify_pushed_achievement(username, achievement)\n else:\n return None\n\n def verify_run_achievements(self, username, code=None, level=None, response=None):\n self.initialize_user_data_if_necessary()\n self.check_programs_run()\n if code and level:\n self.check_code_achievements(code, level)\n if code and response:\n self.check_response_achievements(code, response)\n\n if len(session['new_commands']) > 0:\n for command in session['new_commands']:\n session['commands'].append(command)\n session['new_commands'] = []\n self.DATABASE.add_commands_to_username(username, session['commands'])\n\n if len(session['new_achieved']) > 0:\n self.DATABASE.add_achievements_to_username(username, session['new_achieved'])\n for achievement in session['new_achieved']:\n session['achieved'].append(achievement)\n return True\n return False\n\n def verify_save_achievements(self, username, adventure=None):\n self.initialize_user_data_if_necessary()\n self.check_programs_saved()\n if adventure and 'adventure_is_worthwhile' not in session['achieved']:\n session['new_achieved'].append(\"adventure_is_worthwhile\")\n if len(session['new_achieved']) > 0:\n self.DATABASE.add_achievements_to_username(username, session['new_achieved'])\n for achievement in session['new_achieved']:\n session['achieved'].append(achievement)\n return True\n return False\n\n def verify_submit_achievements(self, username):\n self.initialize_user_data_if_necessary()\n self.check_programs_submitted()\n\n if len(session['new_achieved']) > 0:\n self.DATABASE.add_achievements_to_username(username, session['new_achieved'])\n for achievement in session['new_achieved']:\n session['achieved'].append(achievement)\n return True\n return False\n\n def verify_pushed_achievement(self, username, achievement):\n self.initialize_user_data_if_necessary()\n session['new_achieved'] = [achievement]\n self.DATABASE.add_achievement_to_username(username, achievement)\n session['achieved'].append(achievement)\n return self.get_earned_achievements()\n\n def get_earned_achievements(self):\n self.initialize_user_data_if_necessary()\n translations = self.TRANSLATIONS.get_translations(session['lang'])\n translated_achievements = []\n for achievement in session['new_achieved']:\n translated_achievements.append([translations[achievement]['title'], translations[achievement]['text'], translations[achievement]['image']])\n session['new_achieved'] = [] #Once we get earned achievements -> empty the array with \"waiting\" ones\n session['new_commands'] = []\n return translated_achievements\n\n def check_programs_run(self):\n self.initialize_user_data_if_necessary()\n if 'getting_started_I' not in session['achieved'] and session['run_programs'] >= 1:\n session['new_achieved'].append(\"getting_started_I\")\n if 'getting_started_II' not in session['achieved'] and session['run_programs'] >= 10:\n session['new_achieved'].append(\"getting_started_II\")\n if 'getting_started_III' not in session['achieved'] and session['run_programs'] >= 50:\n session['new_achieved'].append(\"getting_started_III\")\n if 'getting_started_IV' not in session['achieved'] and session['run_programs'] >= 200:\n session['new_achieved'].append(\"getting_started_IV\")\n if 'getting_started_V' not in session['achieved'] and session['run_programs'] >= 500:\n session['new_achieved'].append(\"getting_started_V\")\n\n def check_programs_saved(self):\n self.initialize_user_data_if_necessary()\n if 'one_to_remember_I' not in session['achieved'] and session['saved_programs'] >= 1:\n session['new_achieved'].append(\"one_to_remember_I\")\n if 'one_to_remember_II' not in session['achieved'] and session['saved_programs'] >= 5:\n session['new_achieved'].append(\"one_to_remember_II\")\n if 'one_to_remember_III' not in session['achieved'] and session['saved_programs'] >= 10:\n session['new_achieved'].append(\"one_to_remember_III\")\n if 'one_to_remember_IV' not in session['achieved'] and session['saved_programs'] >= 25:\n session['new_achieved'].append(\"one_to_remember_IV\")\n if 'one_to_remember_V' not in session['achieved'] and session['saved_programs'] >= 50:\n session['new_achieved'].append(\"one_to_remember_V\")\n\n def check_programs_submitted(self):\n self.initialize_user_data_if_necessary()\n if 'deadline_daredevil_I' not in session['achieved'] and session['submitted_programs'] >= 1:\n session['new_achieved'].append(\"deadline_daredevil_I\")\n if 'deadline_daredevil_II' not in session['achieved'] and session['submitted_programs'] >= 3:\n session['new_achieved'].append(\"deadline_daredevil_II\")\n if 'deadline_daredevil_III' not in session['achieved'] and session['submitted_programs'] >= 10:\n session['new_achieved'].append(\"deadline_daredevil_III\")\n\n def check_code_achievements(self, code, level):\n self.initialize_user_data_if_necessary()\n commands_in_code = hedy.all_commands(code, level, session['lang'])\n if 'trying_is_key' not in session['achieved']:\n for command in set(commands_in_code):\n if command not in session['commands'] and command not in session['new_commands']:\n session['new_commands'].append(command)\n if set(session['commands']).union(session['new_commands']) == self.all_commands:\n session['new_achieved'].append(\"trying_is_key\")\n if 'did_you_say_please' not in session['achieved'] and \"ask\" in hedy.all_commands(code, level, session['lang']):\n session['new_achieved'].append(\"did_you_say_please\")\n if 'talk-talk-talk' not in session['achieved'] and hedy.all_commands(code, level, session['lang']).count(\"ask\") >= 5:\n session['new_achieved'].append(\"talk-talk-talk\")\n if 'hedy_honor' not in session['achieved'] and \"Hedy\" in code:\n session['new_achieved'].append(\"hedy_honor\")\n if 'hedy-ious' not in session['achieved']:\n all_print_arguments = hedy.all_print_arguments(code, level, session['lang'])\n for argument in all_print_arguments:\n if all_print_arguments.count(argument) >= 10:\n session['new_achieved'].append(\"hedy-ious\")\n break\n\n def check_response_achievements(self, code, response):\n self.initialize_user_data_if_necessary()\n if 'ninja_turtle' not in session['achieved'] and 'has_turtle' in response and response['has_turtle']:\n session['new_achieved'].append(\"ninja_turtle\")\n if 'watch_out' not in session['achieved'] and 'Warning' in response and response['Warning']:\n session['new_achieved'].append(\"watch_out\")\n if 'Error' in response and response['Error']:\n session['consecutive_errors'] += 1\n if session['previous_code'] == code:\n if session['identical_consecutive_errors'] == 0:\n session['identical_consecutive_errors'] += 2 #We have to count the first one too!\n else:\n session['identical_consecutive_errors'] += 1\n if session['identical_consecutive_errors'] >= 3:\n if 'programming_panic' not in session['achieved']:\n session['new_achieved'].append(\"programming_panic\")\n session['previous_code'] = code\n else:\n if 'programming_protagonist' not in session['achieved'] and session['consecutive_errors'] >= 1:\n session['new_achieved'].append(\"programming_protagonist\")\n session['consecutive_errors'] = 0\n session['identical_consecutive_errors'] = 0\n\n\n\n", "path": "website/achievements.py"}]}
3,895
240
gh_patches_debug_17840
rasdani/github-patches
git_diff
PaddlePaddle__PaddleSpeech-1354
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> The version between the setup.py and __init__.py is not synchronized. The version in setup.py is 0.1.1, but the version in __init.py is 0.1.0. </issue> <code> [start of setup.py] 1 # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 import contextlib 15 import inspect 16 import io 17 import os 18 import subprocess as sp 19 import sys 20 from pathlib import Path 21 22 from setuptools import Command 23 from setuptools import find_packages 24 from setuptools import setup 25 from setuptools.command.develop import develop 26 from setuptools.command.install import install 27 28 HERE = Path(os.path.abspath(os.path.dirname(__file__))) 29 30 requirements = { 31 "install": [ 32 "editdistance", 33 "g2p_en", 34 "g2pM", 35 "h5py", 36 "inflect", 37 "jieba", 38 "jsonlines", 39 "kaldiio", 40 "librosa", 41 "loguru", 42 "matplotlib", 43 "nara_wpe", 44 "pandas", 45 "paddleaudio", 46 "paddlenlp", 47 "paddlespeech_feat", 48 "praatio==5.0.0", 49 "pypinyin", 50 "python-dateutil", 51 "pyworld", 52 "resampy==0.2.2", 53 "sacrebleu", 54 "scipy", 55 "sentencepiece~=0.1.96", 56 "soundfile~=0.10", 57 "textgrid", 58 "timer", 59 "tqdm", 60 "typeguard", 61 "visualdl", 62 "webrtcvad", 63 "yacs~=0.1.8", 64 ], 65 "develop": [ 66 "ConfigArgParse", 67 "coverage", 68 "gpustat", 69 "paddlespeech_ctcdecoders", 70 "phkit", 71 "Pillow", 72 "pybind11", 73 "pypi-kenlm", 74 "snakeviz", 75 "sox", 76 "soxbindings", 77 "unidecode", 78 "yq", 79 "pre-commit", 80 "zhon", 81 ] 82 } 83 84 85 @contextlib.contextmanager 86 def pushd(new_dir): 87 old_dir = os.getcwd() 88 os.chdir(new_dir) 89 print(new_dir) 90 yield 91 os.chdir(old_dir) 92 print(old_dir) 93 94 95 def read(*names, **kwargs): 96 with io.open( 97 os.path.join(os.path.dirname(__file__), *names), 98 encoding=kwargs.get("encoding", "utf8")) as fp: 99 return fp.read() 100 101 102 def check_call(cmd: str, shell=False, executable=None): 103 try: 104 sp.check_call( 105 cmd.split(), 106 shell=shell, 107 executable="/bin/bash" if shell else executable) 108 except sp.CalledProcessError as e: 109 print( 110 f"{__file__}:{inspect.currentframe().f_lineno}: CMD: {cmd}, Error:", 111 e.output, 112 file=sys.stderr) 113 raise e 114 115 116 def _remove(files: str): 117 for f in files: 118 f.unlink() 119 120 121 def _post_install(install_lib_dir): 122 # tools/make 123 tool_dir = HERE / "tools" 124 _remove(tool_dir.glob("*.done")) 125 with pushd(tool_dir): 126 check_call("make") 127 print("tools install.") 128 129 # ctcdecoder 130 ctcdecoder_dir = HERE / 'third_party/ctc_decoders' 131 with pushd(ctcdecoder_dir): 132 check_call("bash -e setup.sh") 133 print("ctcdecoder install.") 134 135 136 class DevelopCommand(develop): 137 def run(self): 138 develop.run(self) 139 # must after develop.run, or pkg install by shell will not see 140 self.execute(_post_install, (self.install_lib, ), msg="Post Install...") 141 142 143 class InstallCommand(install): 144 def run(self): 145 install.run(self) 146 147 148 # cmd: python setup.py upload 149 class UploadCommand(Command): 150 description = "Build and publish the package." 151 user_options = [] 152 153 def initialize_options(self): 154 pass 155 156 def finalize_options(self): 157 pass 158 159 def run(self): 160 try: 161 print("Removing previous dist/ ...") 162 shutil.rmtree(str(HERE / "dist")) 163 except OSError: 164 pass 165 print("Building source distribution...") 166 sp.check_call([sys.executable, "setup.py", "sdist"]) 167 print("Uploading package to PyPi...") 168 sp.check_call(["twine", "upload", "dist/*"]) 169 sys.exit() 170 171 172 setup_info = dict( 173 # Metadata 174 name='paddlespeech', 175 version='0.1.1', 176 author='PaddlePaddle Speech and Language Team', 177 author_email='[email protected]', 178 url='https://github.com/PaddlePaddle/PaddleSpeech', 179 license='Apache 2.0', 180 description='Speech tools and models based on Paddlepaddle', 181 long_description=read("README.md"), 182 long_description_content_type="text/markdown", 183 keywords=[ 184 "speech", 185 "asr", 186 "tts", 187 "speaker verfication", 188 "speech classfication", 189 "text frontend", 190 "MFA", 191 "paddlepaddle", 192 "beam search", 193 "ctcdecoder", 194 "deepspeech2", 195 "transformer", 196 "conformer", 197 "fastspeech", 198 "vocoder", 199 "pwgan", 200 "gan", 201 ], 202 python_requires='>=3.7', 203 install_requires=requirements["install"], 204 extras_require={ 205 'develop': 206 requirements["develop"], 207 'doc': [ 208 "sphinx", "sphinx-rtd-theme", "numpydoc", "myst_parser", 209 "recommonmark>=0.5.0", "sphinx-markdown-tables", "sphinx-autobuild" 210 ], 211 }, 212 cmdclass={ 213 'develop': DevelopCommand, 214 'install': InstallCommand, 215 'upload': UploadCommand, 216 }, 217 218 # Package info 219 packages=find_packages(include=('paddlespeech*')), 220 zip_safe=True, 221 classifiers=[ 222 'Development Status :: 5 - Production/Stable', 223 'Intended Audience :: Developers', 224 'Intended Audience :: Science/Research', 225 'Topic :: Scientific/Engineering :: Artificial Intelligence', 226 'License :: OSI Approved :: Apache Software License', 227 'Programming Language :: Python', 228 'Programming Language :: Python :: 3', 229 'Programming Language :: Python :: 3.7', 230 'Programming Language :: Python :: 3.8', 231 'Programming Language :: Python :: 3.9', 232 ], 233 entry_points={ 234 'console_scripts': ['paddlespeech=paddlespeech.cli.entry:_execute'] 235 }) 236 237 setup(**setup_info) 238 [end of setup.py] [start of paddlespeech/__init__.py] 1 # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 __version__ = '0.1.0' 16 [end of paddlespeech/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/paddlespeech/__init__.py b/paddlespeech/__init__.py --- a/paddlespeech/__init__.py +++ b/paddlespeech/__init__.py @@ -12,4 +12,4 @@ # See the License for the specific language governing permissions and # limitations under the License. -__version__ = '0.1.0' +__version__ = '0.1.1' diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -17,6 +17,7 @@ import os import subprocess as sp import sys +import paddlespeech from pathlib import Path from setuptools import Command @@ -172,7 +173,7 @@ setup_info = dict( # Metadata name='paddlespeech', - version='0.1.1', + version=paddlespeech.__version__, author='PaddlePaddle Speech and Language Team', author_email='[email protected]', url='https://github.com/PaddlePaddle/PaddleSpeech',
{"golden_diff": "diff --git a/paddlespeech/__init__.py b/paddlespeech/__init__.py\n--- a/paddlespeech/__init__.py\n+++ b/paddlespeech/__init__.py\n@@ -12,4 +12,4 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-__version__ = '0.1.0'\n+__version__ = '0.1.1'\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -17,6 +17,7 @@\n import os\n import subprocess as sp\n import sys\n+import paddlespeech\n from pathlib import Path\n \n from setuptools import Command\n@@ -172,7 +173,7 @@\n setup_info = dict(\n # Metadata\n name='paddlespeech',\n- version='0.1.1',\n+ version=paddlespeech.__version__,\n author='PaddlePaddle Speech and Language Team',\n author_email='[email protected]',\n url='https://github.com/PaddlePaddle/PaddleSpeech',\n", "issue": "The version between the setup.py and __init__.py is not synchronized.\nThe version in setup.py is 0.1.1, but the version in __init.py is 0.1.0.\n", "before_files": [{"content": "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport contextlib\nimport inspect\nimport io\nimport os\nimport subprocess as sp\nimport sys\nfrom pathlib import Path\n\nfrom setuptools import Command\nfrom setuptools import find_packages\nfrom setuptools import setup\nfrom setuptools.command.develop import develop\nfrom setuptools.command.install import install\n\nHERE = Path(os.path.abspath(os.path.dirname(__file__)))\n\nrequirements = {\n \"install\": [\n \"editdistance\",\n \"g2p_en\",\n \"g2pM\",\n \"h5py\",\n \"inflect\",\n \"jieba\",\n \"jsonlines\",\n \"kaldiio\",\n \"librosa\",\n \"loguru\",\n \"matplotlib\",\n \"nara_wpe\",\n \"pandas\",\n \"paddleaudio\",\n \"paddlenlp\",\n \"paddlespeech_feat\",\n \"praatio==5.0.0\",\n \"pypinyin\",\n \"python-dateutil\",\n \"pyworld\",\n \"resampy==0.2.2\",\n \"sacrebleu\",\n \"scipy\",\n \"sentencepiece~=0.1.96\",\n \"soundfile~=0.10\",\n \"textgrid\",\n \"timer\",\n \"tqdm\",\n \"typeguard\",\n \"visualdl\",\n \"webrtcvad\",\n \"yacs~=0.1.8\",\n ],\n \"develop\": [\n \"ConfigArgParse\",\n \"coverage\",\n \"gpustat\",\n \"paddlespeech_ctcdecoders\",\n \"phkit\",\n \"Pillow\",\n \"pybind11\",\n \"pypi-kenlm\",\n \"snakeviz\",\n \"sox\",\n \"soxbindings\",\n \"unidecode\",\n \"yq\",\n \"pre-commit\",\n \"zhon\",\n ]\n}\n\n\[email protected]\ndef pushd(new_dir):\n old_dir = os.getcwd()\n os.chdir(new_dir)\n print(new_dir)\n yield\n os.chdir(old_dir)\n print(old_dir)\n\n\ndef read(*names, **kwargs):\n with io.open(\n os.path.join(os.path.dirname(__file__), *names),\n encoding=kwargs.get(\"encoding\", \"utf8\")) as fp:\n return fp.read()\n\n\ndef check_call(cmd: str, shell=False, executable=None):\n try:\n sp.check_call(\n cmd.split(),\n shell=shell,\n executable=\"/bin/bash\" if shell else executable)\n except sp.CalledProcessError as e:\n print(\n f\"{__file__}:{inspect.currentframe().f_lineno}: CMD: {cmd}, Error:\",\n e.output,\n file=sys.stderr)\n raise e\n\n\ndef _remove(files: str):\n for f in files:\n f.unlink()\n\n\ndef _post_install(install_lib_dir):\n # tools/make\n tool_dir = HERE / \"tools\"\n _remove(tool_dir.glob(\"*.done\"))\n with pushd(tool_dir):\n check_call(\"make\")\n print(\"tools install.\")\n\n # ctcdecoder\n ctcdecoder_dir = HERE / 'third_party/ctc_decoders'\n with pushd(ctcdecoder_dir):\n check_call(\"bash -e setup.sh\")\n print(\"ctcdecoder install.\")\n\n\nclass DevelopCommand(develop):\n def run(self):\n develop.run(self)\n # must after develop.run, or pkg install by shell will not see\n self.execute(_post_install, (self.install_lib, ), msg=\"Post Install...\")\n\n\nclass InstallCommand(install):\n def run(self):\n install.run(self)\n\n\n # cmd: python setup.py upload\nclass UploadCommand(Command):\n description = \"Build and publish the package.\"\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n try:\n print(\"Removing previous dist/ ...\")\n shutil.rmtree(str(HERE / \"dist\"))\n except OSError:\n pass\n print(\"Building source distribution...\")\n sp.check_call([sys.executable, \"setup.py\", \"sdist\"])\n print(\"Uploading package to PyPi...\")\n sp.check_call([\"twine\", \"upload\", \"dist/*\"])\n sys.exit()\n\n\nsetup_info = dict(\n # Metadata\n name='paddlespeech',\n version='0.1.1',\n author='PaddlePaddle Speech and Language Team',\n author_email='[email protected]',\n url='https://github.com/PaddlePaddle/PaddleSpeech',\n license='Apache 2.0',\n description='Speech tools and models based on Paddlepaddle',\n long_description=read(\"README.md\"),\n long_description_content_type=\"text/markdown\",\n keywords=[\n \"speech\",\n \"asr\",\n \"tts\",\n \"speaker verfication\",\n \"speech classfication\",\n \"text frontend\",\n \"MFA\",\n \"paddlepaddle\",\n \"beam search\",\n \"ctcdecoder\",\n \"deepspeech2\",\n \"transformer\",\n \"conformer\",\n \"fastspeech\",\n \"vocoder\",\n \"pwgan\",\n \"gan\",\n ],\n python_requires='>=3.7',\n install_requires=requirements[\"install\"],\n extras_require={\n 'develop':\n requirements[\"develop\"],\n 'doc': [\n \"sphinx\", \"sphinx-rtd-theme\", \"numpydoc\", \"myst_parser\",\n \"recommonmark>=0.5.0\", \"sphinx-markdown-tables\", \"sphinx-autobuild\"\n ],\n },\n cmdclass={\n 'develop': DevelopCommand,\n 'install': InstallCommand,\n 'upload': UploadCommand,\n },\n\n # Package info\n packages=find_packages(include=('paddlespeech*')),\n zip_safe=True,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n ],\n entry_points={\n 'console_scripts': ['paddlespeech=paddlespeech.cli.entry:_execute']\n })\n\nsetup(**setup_info)\n", "path": "setup.py"}, {"content": "# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n__version__ = '0.1.0'\n", "path": "paddlespeech/__init__.py"}]}
2,907
249
gh_patches_debug_8367
rasdani/github-patches
git_diff
cal-itp__benefits-928
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Use 500 page for true application errors A clear and concise description of the task. ## Acceptance Criteria <!-- Remember to consider edge cases --> - [ ] Use 500 / service is down for true application errors, EV server offline, Auth provider offline, etc. ## Additional context <!-- Add any other context about the task here --> Related to https://github.com/cal-itp/benefits/issues/913 https://github.com/cal-itp/benefits/issues/757 #914 </issue> <code> [start of benefits/core/viewmodels.py] 1 """ 2 The core application: view model definitions for the root of the webapp. 3 """ 4 from django.utils.translation import pgettext, gettext_lazy as _ 5 from django.urls import reverse 6 7 from benefits.core import models 8 9 from . import session 10 11 12 class Button: 13 """ 14 Represents a clickable button as styled <a> element (with optional label, optional transparent fallback text): 15 * classes: str, str[] 16 * id: str 17 * fallback_text: str 18 * label: str 19 * text: str 20 * url: str 21 * target: str 22 * rel: str 23 """ 24 25 def __init__(self, **kwargs): 26 classes = kwargs.get("classes", []) 27 if isinstance(classes, str): 28 classes = classes.split() 29 30 self.classes = ["btn", "btn-lg"] 31 self.classes.extend(classes) 32 self.id = kwargs.get("id") 33 self.fallback_text = kwargs.get("fallback_text") 34 self.label = kwargs.get("label") 35 self.text = kwargs.get("text", "Button") 36 self.url = kwargs.get("url") 37 self.target = kwargs.get("target") 38 self.rel = kwargs.get("rel") 39 40 @staticmethod 41 def agency_contact_links(agency): 42 """Create link buttons for agency contact information.""" 43 return [ 44 Button.link(classes="agency", label=agency.long_name, text=agency.phone, url=f"tel:{agency.phone}"), 45 Button.link( 46 classes="agency", text=agency.info_url, url=agency.info_url, target="_blank", rel="noopener noreferrer" 47 ), 48 ] 49 50 @staticmethod 51 def home(request, text=None): 52 """Create a button back to this session's origin.""" 53 if text is None: 54 text = _("core.buttons.return_home") 55 56 return Button.primary(text=text, url=session.origin(request)) 57 58 @staticmethod 59 def link(**kwargs): 60 classes = kwargs.pop("classes", []) 61 if isinstance(classes, str): 62 classes = classes.split(" ") 63 classes.insert(0, "btn-link") 64 return Button(classes=classes, **kwargs) 65 66 @staticmethod 67 def primary(**kwargs): 68 classes = kwargs.pop("classes", []) 69 if isinstance(classes, str): 70 classes = classes.split(" ") 71 classes.insert(0, "btn-primary") 72 return Button(classes=classes, **kwargs) 73 74 @staticmethod 75 def outline_primary(**kwargs): 76 classes = kwargs.pop("classes", []) 77 if isinstance(classes, str): 78 classes = classes.split(" ") 79 classes.insert(0, "btn-outline-primary") 80 return Button(classes=classes, **kwargs) 81 82 @staticmethod 83 def login(**kwargs): 84 """Create a login.gov button, with a login.gov logo and fallback text""" 85 btn = Button.primary(fallback_text="Login.gov", id="login", **kwargs) 86 return btn 87 88 @staticmethod 89 def logout(**kwargs): 90 """Create a button that logs user out, with a login.gov button, with a login.gov logo and fallback text""" 91 btn = Button.primary(fallback_text="Login.gov", id="login", url=reverse("oauth:logout"), text="", **kwargs) 92 return btn 93 94 95 class Icon: 96 """Represents an icon.""" 97 98 def __init__(self, icon, alt): 99 self.src = f"img/icon/{icon}.svg" 100 self.alt = alt 101 102 103 class Page: 104 """ 105 Represents a page of content: 106 * title: str 107 * noimage: bool 108 * icon: core.viewmodels.Icon 109 * content_title: str 110 * paragraphs: str[] 111 * form: django.forms.Form 112 * forms: django.forms.Form[] 113 * button: core.viewmodels.Button 114 * buttons: core.viewmodels.Button[] 115 * classes: str[] 116 """ 117 118 def __init__(self, **kwargs): 119 self.title = kwargs.get("title") 120 if self.title is None: 121 self.title = _("core.pages.index.prefix") 122 else: 123 self.title = f"{_('core.pages.index.prefix')}: {self.title}" 124 125 self.noimage = kwargs.get("noimage", False) 126 self.icon = kwargs.get("icon") 127 self.content_title = kwargs.get("content_title") 128 self.paragraphs = kwargs.get("paragraphs", []) 129 self.steps = kwargs.get("steps") 130 131 self.forms = kwargs.get("forms", []) 132 if not isinstance(self.forms, list): 133 self.forms = [self.forms] 134 if "form" in kwargs: 135 self.forms.append(kwargs.get("form")) 136 137 self.buttons = kwargs.get("buttons", []) 138 if not isinstance(self.buttons, list): 139 self.buttons = [self.buttons] 140 if "button" in kwargs: 141 self.buttons.append(kwargs.get("button")) 142 143 self.classes = kwargs.get("classes", []) 144 if not isinstance(self.classes, list): 145 self.classes = self.classes.split(" ") 146 if not self.noimage: 147 self.classes.append("with-image") 148 149 def context_dict(self): 150 """Return a context dict for a Page.""" 151 return {"page": self} 152 153 154 class ErrorPage(Page): 155 """ 156 Represents an error page: 157 * title: str 158 * icon: core.viewmodels.Icon 159 * content_title: str 160 * paragraphs: str[] 161 * button: core.viewmodels.Button 162 """ 163 164 def __init__(self, **kwargs): 165 super().__init__( 166 title=kwargs.get("title", _("core.pages.error.title")), 167 icon=kwargs.get("icon", Icon("sadbus", pgettext("image alt text", "core.icons.sadbus"))), 168 content_title=kwargs.get("content_title", _("core.pages.error.title")), 169 paragraphs=kwargs.get("paragraphs", [_("core.pages.server_error.content_title")]), 170 button=kwargs.get("button"), 171 noimage=True, 172 ) 173 174 @staticmethod 175 def user_error( 176 title=_("core.pages.user_error.title"), 177 content_title=_("core.pages.user_error.content_title"), 178 paragraphs=[_("core.pages.user_error.p[0]")], 179 **kwargs, 180 ): 181 """Create a new core.viewmodels.ErrorPage instance with defaults for a user error.""" 182 return ErrorPage(title=title, content_title=content_title, paragraphs=paragraphs, **kwargs) 183 184 @staticmethod 185 def server_error( 186 title=_("core.pages.server_error.title"), 187 content_title=_("core.pages.server_error.title"), 188 paragraphs=[_("core.pages.server_error.p[0]"), _("core.pages.server_error.p[1]")], 189 **kwargs, 190 ): 191 """Create a new core.viewmodels.ErrorPage instance with defaults for a generic server error.""" 192 return ErrorPage(title=title, content_title=content_title, paragraphs=paragraphs, **kwargs) 193 194 @staticmethod 195 def not_found( 196 title=_("core.pages.not_found.title"), 197 content_title=_("core.pages.not_found.content_title"), 198 paragraphs=[_("core.pages.not_found.p[0]")], 199 **kwargs, 200 ): 201 """Create a new core.viewmodels.ErrorPage with defaults for a 404.""" 202 path = kwargs.pop("path", None) 203 if path and title: 204 title = f"{title}: {path}" 205 elif path and not title: 206 title = path 207 return ErrorPage(title=title, content_title=content_title, paragraphs=paragraphs, **kwargs) 208 209 210 class PaymentProcessor: 211 """ 212 Represents a core.models.PaymentProcessor: 213 * model: core.models.PaymentProcessor 214 * access_token_url: str 215 * element_id: str 216 * color: str 217 * [name: str] 218 * [loading_text: str] 219 """ 220 221 def __init__(self, model, access_token_url, element_id, color, name=None, loading_text=_("core.buttons.wait")): 222 if isinstance(model, models.PaymentProcessor): 223 self.access_token_url = access_token_url 224 self.element_id = element_id 225 self.color = color 226 self.name = name or model.name 227 self.loading_text = loading_text 228 self.card_tokenize_url = model.card_tokenize_url 229 self.card_tokenize_func = model.card_tokenize_func 230 self.card_tokenize_env = model.card_tokenize_env 231 232 def context_dict(self): 233 """Return a context dict for a PaymentProcessor.""" 234 return {"payment_processor": self} 235 236 237 class TransitAgency: 238 """ 239 Represents a core.models.TransitAgency: 240 * model: core.models.TransitAgency 241 """ 242 243 def __init__(self, model): 244 if isinstance(model, models.TransitAgency): 245 self.slug = model.slug 246 self.short_name = model.short_name 247 self.long_name = model.long_name 248 self.agency_id = model.agency_id 249 self.merchant_id = model.merchant_id 250 self.info_url = model.info_url 251 self.phone = model.phone 252 253 def context_dict(self): 254 """Return a context dict for a TransitAgency.""" 255 return {"agency": self} 256 [end of benefits/core/viewmodels.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/benefits/core/viewmodels.py b/benefits/core/viewmodels.py --- a/benefits/core/viewmodels.py +++ b/benefits/core/viewmodels.py @@ -185,7 +185,7 @@ def server_error( title=_("core.pages.server_error.title"), content_title=_("core.pages.server_error.title"), - paragraphs=[_("core.pages.server_error.p[0]"), _("core.pages.server_error.p[1]")], + paragraphs=[_("core.pages.server_error.p[0]")], **kwargs, ): """Create a new core.viewmodels.ErrorPage instance with defaults for a generic server error."""
{"golden_diff": "diff --git a/benefits/core/viewmodels.py b/benefits/core/viewmodels.py\n--- a/benefits/core/viewmodels.py\n+++ b/benefits/core/viewmodels.py\n@@ -185,7 +185,7 @@\n def server_error(\n title=_(\"core.pages.server_error.title\"),\n content_title=_(\"core.pages.server_error.title\"),\n- paragraphs=[_(\"core.pages.server_error.p[0]\"), _(\"core.pages.server_error.p[1]\")],\n+ paragraphs=[_(\"core.pages.server_error.p[0]\")],\n **kwargs,\n ):\n \"\"\"Create a new core.viewmodels.ErrorPage instance with defaults for a generic server error.\"\"\"\n", "issue": "Use 500 page for true application errors\nA clear and concise description of the task.\r\n\r\n## Acceptance Criteria\r\n\r\n<!-- Remember to consider edge cases -->\r\n\r\n- [ ] Use 500 / service is down for true application errors, EV server offline, Auth provider offline, etc.\r\n\r\n## Additional context\r\n\r\n<!-- Add any other context about the task here -->\r\nRelated to https://github.com/cal-itp/benefits/issues/913 https://github.com/cal-itp/benefits/issues/757 #914 \n", "before_files": [{"content": "\"\"\"\nThe core application: view model definitions for the root of the webapp.\n\"\"\"\nfrom django.utils.translation import pgettext, gettext_lazy as _\nfrom django.urls import reverse\n\nfrom benefits.core import models\n\nfrom . import session\n\n\nclass Button:\n \"\"\"\n Represents a clickable button as styled <a> element (with optional label, optional transparent fallback text):\n * classes: str, str[]\n * id: str\n * fallback_text: str\n * label: str\n * text: str\n * url: str\n * target: str\n * rel: str\n \"\"\"\n\n def __init__(self, **kwargs):\n classes = kwargs.get(\"classes\", [])\n if isinstance(classes, str):\n classes = classes.split()\n\n self.classes = [\"btn\", \"btn-lg\"]\n self.classes.extend(classes)\n self.id = kwargs.get(\"id\")\n self.fallback_text = kwargs.get(\"fallback_text\")\n self.label = kwargs.get(\"label\")\n self.text = kwargs.get(\"text\", \"Button\")\n self.url = kwargs.get(\"url\")\n self.target = kwargs.get(\"target\")\n self.rel = kwargs.get(\"rel\")\n\n @staticmethod\n def agency_contact_links(agency):\n \"\"\"Create link buttons for agency contact information.\"\"\"\n return [\n Button.link(classes=\"agency\", label=agency.long_name, text=agency.phone, url=f\"tel:{agency.phone}\"),\n Button.link(\n classes=\"agency\", text=agency.info_url, url=agency.info_url, target=\"_blank\", rel=\"noopener noreferrer\"\n ),\n ]\n\n @staticmethod\n def home(request, text=None):\n \"\"\"Create a button back to this session's origin.\"\"\"\n if text is None:\n text = _(\"core.buttons.return_home\")\n\n return Button.primary(text=text, url=session.origin(request))\n\n @staticmethod\n def link(**kwargs):\n classes = kwargs.pop(\"classes\", [])\n if isinstance(classes, str):\n classes = classes.split(\" \")\n classes.insert(0, \"btn-link\")\n return Button(classes=classes, **kwargs)\n\n @staticmethod\n def primary(**kwargs):\n classes = kwargs.pop(\"classes\", [])\n if isinstance(classes, str):\n classes = classes.split(\" \")\n classes.insert(0, \"btn-primary\")\n return Button(classes=classes, **kwargs)\n\n @staticmethod\n def outline_primary(**kwargs):\n classes = kwargs.pop(\"classes\", [])\n if isinstance(classes, str):\n classes = classes.split(\" \")\n classes.insert(0, \"btn-outline-primary\")\n return Button(classes=classes, **kwargs)\n\n @staticmethod\n def login(**kwargs):\n \"\"\"Create a login.gov button, with a login.gov logo and fallback text\"\"\"\n btn = Button.primary(fallback_text=\"Login.gov\", id=\"login\", **kwargs)\n return btn\n\n @staticmethod\n def logout(**kwargs):\n \"\"\"Create a button that logs user out, with a login.gov button, with a login.gov logo and fallback text\"\"\"\n btn = Button.primary(fallback_text=\"Login.gov\", id=\"login\", url=reverse(\"oauth:logout\"), text=\"\", **kwargs)\n return btn\n\n\nclass Icon:\n \"\"\"Represents an icon.\"\"\"\n\n def __init__(self, icon, alt):\n self.src = f\"img/icon/{icon}.svg\"\n self.alt = alt\n\n\nclass Page:\n \"\"\"\n Represents a page of content:\n * title: str\n * noimage: bool\n * icon: core.viewmodels.Icon\n * content_title: str\n * paragraphs: str[]\n * form: django.forms.Form\n * forms: django.forms.Form[]\n * button: core.viewmodels.Button\n * buttons: core.viewmodels.Button[]\n * classes: str[]\n \"\"\"\n\n def __init__(self, **kwargs):\n self.title = kwargs.get(\"title\")\n if self.title is None:\n self.title = _(\"core.pages.index.prefix\")\n else:\n self.title = f\"{_('core.pages.index.prefix')}: {self.title}\"\n\n self.noimage = kwargs.get(\"noimage\", False)\n self.icon = kwargs.get(\"icon\")\n self.content_title = kwargs.get(\"content_title\")\n self.paragraphs = kwargs.get(\"paragraphs\", [])\n self.steps = kwargs.get(\"steps\")\n\n self.forms = kwargs.get(\"forms\", [])\n if not isinstance(self.forms, list):\n self.forms = [self.forms]\n if \"form\" in kwargs:\n self.forms.append(kwargs.get(\"form\"))\n\n self.buttons = kwargs.get(\"buttons\", [])\n if not isinstance(self.buttons, list):\n self.buttons = [self.buttons]\n if \"button\" in kwargs:\n self.buttons.append(kwargs.get(\"button\"))\n\n self.classes = kwargs.get(\"classes\", [])\n if not isinstance(self.classes, list):\n self.classes = self.classes.split(\" \")\n if not self.noimage:\n self.classes.append(\"with-image\")\n\n def context_dict(self):\n \"\"\"Return a context dict for a Page.\"\"\"\n return {\"page\": self}\n\n\nclass ErrorPage(Page):\n \"\"\"\n Represents an error page:\n * title: str\n * icon: core.viewmodels.Icon\n * content_title: str\n * paragraphs: str[]\n * button: core.viewmodels.Button\n \"\"\"\n\n def __init__(self, **kwargs):\n super().__init__(\n title=kwargs.get(\"title\", _(\"core.pages.error.title\")),\n icon=kwargs.get(\"icon\", Icon(\"sadbus\", pgettext(\"image alt text\", \"core.icons.sadbus\"))),\n content_title=kwargs.get(\"content_title\", _(\"core.pages.error.title\")),\n paragraphs=kwargs.get(\"paragraphs\", [_(\"core.pages.server_error.content_title\")]),\n button=kwargs.get(\"button\"),\n noimage=True,\n )\n\n @staticmethod\n def user_error(\n title=_(\"core.pages.user_error.title\"),\n content_title=_(\"core.pages.user_error.content_title\"),\n paragraphs=[_(\"core.pages.user_error.p[0]\")],\n **kwargs,\n ):\n \"\"\"Create a new core.viewmodels.ErrorPage instance with defaults for a user error.\"\"\"\n return ErrorPage(title=title, content_title=content_title, paragraphs=paragraphs, **kwargs)\n\n @staticmethod\n def server_error(\n title=_(\"core.pages.server_error.title\"),\n content_title=_(\"core.pages.server_error.title\"),\n paragraphs=[_(\"core.pages.server_error.p[0]\"), _(\"core.pages.server_error.p[1]\")],\n **kwargs,\n ):\n \"\"\"Create a new core.viewmodels.ErrorPage instance with defaults for a generic server error.\"\"\"\n return ErrorPage(title=title, content_title=content_title, paragraphs=paragraphs, **kwargs)\n\n @staticmethod\n def not_found(\n title=_(\"core.pages.not_found.title\"),\n content_title=_(\"core.pages.not_found.content_title\"),\n paragraphs=[_(\"core.pages.not_found.p[0]\")],\n **kwargs,\n ):\n \"\"\"Create a new core.viewmodels.ErrorPage with defaults for a 404.\"\"\"\n path = kwargs.pop(\"path\", None)\n if path and title:\n title = f\"{title}: {path}\"\n elif path and not title:\n title = path\n return ErrorPage(title=title, content_title=content_title, paragraphs=paragraphs, **kwargs)\n\n\nclass PaymentProcessor:\n \"\"\"\n Represents a core.models.PaymentProcessor:\n * model: core.models.PaymentProcessor\n * access_token_url: str\n * element_id: str\n * color: str\n * [name: str]\n * [loading_text: str]\n \"\"\"\n\n def __init__(self, model, access_token_url, element_id, color, name=None, loading_text=_(\"core.buttons.wait\")):\n if isinstance(model, models.PaymentProcessor):\n self.access_token_url = access_token_url\n self.element_id = element_id\n self.color = color\n self.name = name or model.name\n self.loading_text = loading_text\n self.card_tokenize_url = model.card_tokenize_url\n self.card_tokenize_func = model.card_tokenize_func\n self.card_tokenize_env = model.card_tokenize_env\n\n def context_dict(self):\n \"\"\"Return a context dict for a PaymentProcessor.\"\"\"\n return {\"payment_processor\": self}\n\n\nclass TransitAgency:\n \"\"\"\n Represents a core.models.TransitAgency:\n * model: core.models.TransitAgency\n \"\"\"\n\n def __init__(self, model):\n if isinstance(model, models.TransitAgency):\n self.slug = model.slug\n self.short_name = model.short_name\n self.long_name = model.long_name\n self.agency_id = model.agency_id\n self.merchant_id = model.merchant_id\n self.info_url = model.info_url\n self.phone = model.phone\n\n def context_dict(self):\n \"\"\"Return a context dict for a TransitAgency.\"\"\"\n return {\"agency\": self}\n", "path": "benefits/core/viewmodels.py"}]}
3,197
141
gh_patches_debug_36150
rasdani/github-patches
git_diff
prowler-cloud__prowler-2736
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> ecr_repositories_scan_vulnerabilities_in_latest_image: Configure level ### New feature motivation Hi, is it possible to configure the level from which the test shall fail? AWS tags some findings as medium which I might want to ignore, but of course I don't want to mute critical findings for the image. ### Solution Proposed none ### Describe alternatives you've considered none ### Additional context _No response_ </issue> <code> [start of prowler/providers/aws/services/ecr/ecr_repositories_scan_vulnerabilities_in_latest_image/ecr_repositories_scan_vulnerabilities_in_latest_image.py] 1 from prowler.lib.check.models import Check, Check_Report_AWS 2 from prowler.providers.aws.services.ecr.ecr_client import ecr_client 3 4 5 class ecr_repositories_scan_vulnerabilities_in_latest_image(Check): 6 def execute(self): 7 findings = [] 8 for registry in ecr_client.registries.values(): 9 for repository in registry.repositories: 10 # First check if the repository has images 11 if len(repository.images_details) > 0: 12 # We only want to check the latest image pushed 13 image = repository.images_details[-1] 14 15 report = Check_Report_AWS(self.metadata()) 16 report.region = repository.region 17 report.resource_id = repository.name 18 report.resource_arn = repository.arn 19 report.resource_tags = repository.tags 20 report.status = "PASS" 21 report.status_extended = f"ECR repository {repository.name} has imageTag {image.latest_tag} scanned without findings." 22 if not image.scan_findings_status: 23 report.status = "FAIL" 24 report.status_extended = f"ECR repository {repository.name} has imageTag {image.latest_tag} without a scan." 25 elif image.scan_findings_status == "FAILED": 26 report.status = "FAIL" 27 report.status_extended = ( 28 f"ECR repository {repository.name} with scan status FAILED." 29 ) 30 elif image.scan_findings_status != "FAILED": 31 if image.scan_findings_severity_count and ( 32 image.scan_findings_severity_count.critical 33 or image.scan_findings_severity_count.high 34 or image.scan_findings_severity_count.medium 35 ): 36 report.status = "FAIL" 37 report.status_extended = f"ECR repository {repository.name} has imageTag {image.latest_tag} scanned with findings: CRITICAL->{image.scan_findings_severity_count.critical}, HIGH->{image.scan_findings_severity_count.high}, MEDIUM->{image.scan_findings_severity_count.medium}." 38 39 findings.append(report) 40 41 return findings 42 [end of prowler/providers/aws/services/ecr/ecr_repositories_scan_vulnerabilities_in_latest_image/ecr_repositories_scan_vulnerabilities_in_latest_image.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/prowler/providers/aws/services/ecr/ecr_repositories_scan_vulnerabilities_in_latest_image/ecr_repositories_scan_vulnerabilities_in_latest_image.py b/prowler/providers/aws/services/ecr/ecr_repositories_scan_vulnerabilities_in_latest_image/ecr_repositories_scan_vulnerabilities_in_latest_image.py --- a/prowler/providers/aws/services/ecr/ecr_repositories_scan_vulnerabilities_in_latest_image/ecr_repositories_scan_vulnerabilities_in_latest_image.py +++ b/prowler/providers/aws/services/ecr/ecr_repositories_scan_vulnerabilities_in_latest_image/ecr_repositories_scan_vulnerabilities_in_latest_image.py @@ -5,6 +5,12 @@ class ecr_repositories_scan_vulnerabilities_in_latest_image(Check): def execute(self): findings = [] + + # Get minimun severity to report + minimum_severity = ecr_client.audit_config.get( + "ecr_repository_vulnerability_minimum_severity", "MEDIUM" + ) + for registry in ecr_client.registries.values(): for repository in registry.repositories: # First check if the repository has images @@ -27,8 +33,23 @@ report.status_extended = ( f"ECR repository {repository.name} with scan status FAILED." ) - elif image.scan_findings_status != "FAILED": - if image.scan_findings_severity_count and ( + elif ( + image.scan_findings_status != "FAILED" + and image.scan_findings_severity_count + ): + if ( + minimum_severity == "CRITICAL" + and image.scan_findings_severity_count.critical + ): + report.status = "FAIL" + report.status_extended = f"ECR repository {repository.name} has imageTag {image.latest_tag} scanned with findings: CRITICAL->{image.scan_findings_severity_count.critical}." + elif minimum_severity == "HIGH" and ( + image.scan_findings_severity_count.critical + or image.scan_findings_severity_count.high + ): + report.status = "FAIL" + report.status_extended = f"ECR repository {repository.name} has imageTag {image.latest_tag} scanned with findings: CRITICAL->{image.scan_findings_severity_count.critical}, HIGH->{image.scan_findings_severity_count.high}." + elif minimum_severity == "MEDIUM" and ( image.scan_findings_severity_count.critical or image.scan_findings_severity_count.high or image.scan_findings_severity_count.medium
{"golden_diff": "diff --git a/prowler/providers/aws/services/ecr/ecr_repositories_scan_vulnerabilities_in_latest_image/ecr_repositories_scan_vulnerabilities_in_latest_image.py b/prowler/providers/aws/services/ecr/ecr_repositories_scan_vulnerabilities_in_latest_image/ecr_repositories_scan_vulnerabilities_in_latest_image.py\n--- a/prowler/providers/aws/services/ecr/ecr_repositories_scan_vulnerabilities_in_latest_image/ecr_repositories_scan_vulnerabilities_in_latest_image.py\n+++ b/prowler/providers/aws/services/ecr/ecr_repositories_scan_vulnerabilities_in_latest_image/ecr_repositories_scan_vulnerabilities_in_latest_image.py\n@@ -5,6 +5,12 @@\n class ecr_repositories_scan_vulnerabilities_in_latest_image(Check):\n def execute(self):\n findings = []\n+\n+ # Get minimun severity to report\n+ minimum_severity = ecr_client.audit_config.get(\n+ \"ecr_repository_vulnerability_minimum_severity\", \"MEDIUM\"\n+ )\n+\n for registry in ecr_client.registries.values():\n for repository in registry.repositories:\n # First check if the repository has images\n@@ -27,8 +33,23 @@\n report.status_extended = (\n f\"ECR repository {repository.name} with scan status FAILED.\"\n )\n- elif image.scan_findings_status != \"FAILED\":\n- if image.scan_findings_severity_count and (\n+ elif (\n+ image.scan_findings_status != \"FAILED\"\n+ and image.scan_findings_severity_count\n+ ):\n+ if (\n+ minimum_severity == \"CRITICAL\"\n+ and image.scan_findings_severity_count.critical\n+ ):\n+ report.status = \"FAIL\"\n+ report.status_extended = f\"ECR repository {repository.name} has imageTag {image.latest_tag} scanned with findings: CRITICAL->{image.scan_findings_severity_count.critical}.\"\n+ elif minimum_severity == \"HIGH\" and (\n+ image.scan_findings_severity_count.critical\n+ or image.scan_findings_severity_count.high\n+ ):\n+ report.status = \"FAIL\"\n+ report.status_extended = f\"ECR repository {repository.name} has imageTag {image.latest_tag} scanned with findings: CRITICAL->{image.scan_findings_severity_count.critical}, HIGH->{image.scan_findings_severity_count.high}.\"\n+ elif minimum_severity == \"MEDIUM\" and (\n image.scan_findings_severity_count.critical\n or image.scan_findings_severity_count.high\n or image.scan_findings_severity_count.medium\n", "issue": "ecr_repositories_scan_vulnerabilities_in_latest_image: Configure level\n### New feature motivation\n\nHi, is it possible to configure the level from which the test shall fail?\r\nAWS tags some findings as medium which I might want to ignore, but of course I don't want to mute critical findings for the image.\n\n### Solution Proposed\n\nnone\n\n### Describe alternatives you've considered\n\nnone\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "from prowler.lib.check.models import Check, Check_Report_AWS\nfrom prowler.providers.aws.services.ecr.ecr_client import ecr_client\n\n\nclass ecr_repositories_scan_vulnerabilities_in_latest_image(Check):\n def execute(self):\n findings = []\n for registry in ecr_client.registries.values():\n for repository in registry.repositories:\n # First check if the repository has images\n if len(repository.images_details) > 0:\n # We only want to check the latest image pushed\n image = repository.images_details[-1]\n\n report = Check_Report_AWS(self.metadata())\n report.region = repository.region\n report.resource_id = repository.name\n report.resource_arn = repository.arn\n report.resource_tags = repository.tags\n report.status = \"PASS\"\n report.status_extended = f\"ECR repository {repository.name} has imageTag {image.latest_tag} scanned without findings.\"\n if not image.scan_findings_status:\n report.status = \"FAIL\"\n report.status_extended = f\"ECR repository {repository.name} has imageTag {image.latest_tag} without a scan.\"\n elif image.scan_findings_status == \"FAILED\":\n report.status = \"FAIL\"\n report.status_extended = (\n f\"ECR repository {repository.name} with scan status FAILED.\"\n )\n elif image.scan_findings_status != \"FAILED\":\n if image.scan_findings_severity_count and (\n image.scan_findings_severity_count.critical\n or image.scan_findings_severity_count.high\n or image.scan_findings_severity_count.medium\n ):\n report.status = \"FAIL\"\n report.status_extended = f\"ECR repository {repository.name} has imageTag {image.latest_tag} scanned with findings: CRITICAL->{image.scan_findings_severity_count.critical}, HIGH->{image.scan_findings_severity_count.high}, MEDIUM->{image.scan_findings_severity_count.medium}.\"\n\n findings.append(report)\n\n return findings\n", "path": "prowler/providers/aws/services/ecr/ecr_repositories_scan_vulnerabilities_in_latest_image/ecr_repositories_scan_vulnerabilities_in_latest_image.py"}]}
1,161
559
gh_patches_debug_19484
rasdani/github-patches
git_diff
open-telemetry__opentelemetry-python-contrib-310
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Resource must be immutable The boto instrumentation changes a span's resource to set attributes, this goes against the spec. </issue> <code> [start of instrumentation/opentelemetry-instrumentation-boto/src/opentelemetry/instrumentation/boto/__init__.py] 1 # Copyright The OpenTelemetry Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 """ 15 Instrument `Boto`_ to trace service requests. 16 17 There are two options for instrumenting code. The first option is to use the 18 ``opentelemetry-instrument`` executable which will automatically 19 instrument your Boto client. The second is to programmatically enable 20 instrumentation via the following code: 21 22 .. _boto: https://pypi.org/project/boto/ 23 24 Usage 25 ----- 26 27 .. code:: python 28 29 from opentelemetry.instrumentation.boto import BotoInstrumentor 30 import boto 31 32 33 # Instrument Boto 34 BotoInstrumentor().instrument() 35 36 # This will create a span with Boto-specific attributes 37 ec2 = boto.ec2.connect_to_region("us-west-2") 38 ec2.get_all_instances() 39 40 API 41 --- 42 """ 43 44 import logging 45 from inspect import currentframe 46 47 from boto.connection import AWSAuthConnection, AWSQueryConnection 48 from wrapt import wrap_function_wrapper 49 50 from opentelemetry.instrumentation.boto.version import __version__ 51 from opentelemetry.instrumentation.instrumentor import BaseInstrumentor 52 from opentelemetry.instrumentation.utils import unwrap 53 from opentelemetry.sdk.trace import Resource 54 from opentelemetry.trace import SpanKind, get_tracer 55 56 logger = logging.getLogger(__name__) 57 58 SERVICE_PARAMS_BLOCK_LIST = {"s3": ["params.Body"]} 59 60 61 def _get_instance_region_name(instance): 62 region = getattr(instance, "region", None) 63 64 if not region: 65 return None 66 if isinstance(region, str): 67 return region.split(":")[1] 68 return region.name 69 70 71 class BotoInstrumentor(BaseInstrumentor): 72 """A instrumentor for Boto 73 74 See `BaseInstrumentor` 75 """ 76 77 def __init__(self): 78 super().__init__() 79 self._original_boto = None 80 81 def _instrument(self, **kwargs): 82 # AWSQueryConnection and AWSAuthConnection are two different classes 83 # called by different services for connection. 84 # For exemple EC2 uses AWSQueryConnection and S3 uses 85 # AWSAuthConnection 86 87 # pylint: disable=attribute-defined-outside-init 88 self._tracer = get_tracer( 89 __name__, __version__, kwargs.get("tracer_provider") 90 ) 91 92 wrap_function_wrapper( 93 "boto.connection", 94 "AWSQueryConnection.make_request", 95 self._patched_query_request, 96 ) 97 wrap_function_wrapper( 98 "boto.connection", 99 "AWSAuthConnection.make_request", 100 self._patched_auth_request, 101 ) 102 103 def _uninstrument(self, **kwargs): 104 unwrap(AWSQueryConnection, "make_request") 105 unwrap(AWSAuthConnection, "make_request") 106 107 def _common_request( # pylint: disable=too-many-locals 108 self, 109 args_name, 110 traced_args, 111 operation_name, 112 original_func, 113 instance, 114 args, 115 kwargs, 116 ): 117 118 endpoint_name = getattr(instance, "host").split(".")[0] 119 120 with self._tracer.start_as_current_span( 121 "{}.command".format(endpoint_name), kind=SpanKind.CONSUMER, 122 ) as span: 123 if args: 124 http_method = args[0] 125 span.resource = Resource( 126 attributes={ 127 "endpoint": endpoint_name, 128 "http_method": http_method.lower(), 129 } 130 ) 131 else: 132 span.resource = Resource( 133 attributes={"endpoint": endpoint_name} 134 ) 135 136 # Original func returns a boto.connection.HTTPResponse object 137 result = original_func(*args, **kwargs) 138 139 if span.is_recording(): 140 add_span_arg_tags( 141 span, endpoint_name, args, args_name, traced_args, 142 ) 143 144 # Obtaining region name 145 region_name = _get_instance_region_name(instance) 146 147 meta = { 148 "aws.agent": "boto", 149 "aws.operation": operation_name, 150 } 151 if region_name: 152 meta["aws.region"] = region_name 153 154 for key, value in meta.items(): 155 span.set_attribute(key, value) 156 157 span.set_attribute( 158 "http.status_code", getattr(result, "status") 159 ) 160 span.set_attribute("http.method", getattr(result, "_method")) 161 162 return result 163 164 def _patched_query_request(self, original_func, instance, args, kwargs): 165 166 return self._common_request( 167 ("operation_name", "params", "path", "verb"), 168 ["operation_name", "params", "path"], 169 args[0] if args else None, 170 original_func, 171 instance, 172 args, 173 kwargs, 174 ) 175 176 def _patched_auth_request(self, original_func, instance, args, kwargs): 177 operation_name = None 178 179 frame = currentframe().f_back 180 operation_name = None 181 while frame: 182 if frame.f_code.co_name == "make_request": 183 operation_name = frame.f_back.f_code.co_name 184 break 185 frame = frame.f_back 186 187 return self._common_request( 188 ( 189 "method", 190 "path", 191 "headers", 192 "data", 193 "host", 194 "auth_path", 195 "sender", 196 ), 197 ["path", "data", "host"], 198 operation_name, 199 original_func, 200 instance, 201 args, 202 kwargs, 203 ) 204 205 206 def flatten_dict(dict_, sep=".", prefix=""): 207 """ 208 Returns a normalized dict of depth 1 with keys in order of embedding 209 """ 210 # NOTE: This should probably be in `opentelemetry.instrumentation.utils`. 211 # adapted from https://stackoverflow.com/a/19647596 212 return ( 213 { 214 prefix + sep + k if prefix else k: v 215 for kk, vv in dict_.items() 216 for k, v in flatten_dict(vv, sep, kk).items() 217 } 218 if isinstance(dict_, dict) 219 else {prefix: dict_} 220 ) 221 222 223 def add_span_arg_tags(span, aws_service, args, args_names, args_traced): 224 def truncate_arg_value(value, max_len=1024): 225 """Truncate values which are bytes and greater than `max_len`. 226 Useful for parameters like "Body" in `put_object` operations. 227 """ 228 if isinstance(value, bytes) and len(value) > max_len: 229 return b"..." 230 231 return value 232 233 if not span.is_recording(): 234 return 235 236 # Do not trace `Key Management Service` or `Secure Token Service` API calls 237 # over concerns of security leaks. 238 if aws_service not in {"kms", "sts"}: 239 tags = dict( 240 (name, value) 241 for (name, value) in zip(args_names, args) 242 if name in args_traced 243 ) 244 tags = flatten_dict(tags) 245 246 for param_key, value in tags.items(): 247 if param_key in SERVICE_PARAMS_BLOCK_LIST.get(aws_service, {}): 248 continue 249 250 span.set_attribute(param_key, truncate_arg_value(value)) 251 [end of instrumentation/opentelemetry-instrumentation-boto/src/opentelemetry/instrumentation/boto/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/instrumentation/opentelemetry-instrumentation-boto/src/opentelemetry/instrumentation/boto/__init__.py b/instrumentation/opentelemetry-instrumentation-boto/src/opentelemetry/instrumentation/boto/__init__.py --- a/instrumentation/opentelemetry-instrumentation-boto/src/opentelemetry/instrumentation/boto/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-boto/src/opentelemetry/instrumentation/boto/__init__.py @@ -120,18 +120,10 @@ with self._tracer.start_as_current_span( "{}.command".format(endpoint_name), kind=SpanKind.CONSUMER, ) as span: + span.set_attribute("endpoint", endpoint_name) if args: http_method = args[0] - span.resource = Resource( - attributes={ - "endpoint": endpoint_name, - "http_method": http_method.lower(), - } - ) - else: - span.resource = Resource( - attributes={"endpoint": endpoint_name} - ) + span.set_attribute("http_method", http_method.lower()) # Original func returns a boto.connection.HTTPResponse object result = original_func(*args, **kwargs)
{"golden_diff": "diff --git a/instrumentation/opentelemetry-instrumentation-boto/src/opentelemetry/instrumentation/boto/__init__.py b/instrumentation/opentelemetry-instrumentation-boto/src/opentelemetry/instrumentation/boto/__init__.py\n--- a/instrumentation/opentelemetry-instrumentation-boto/src/opentelemetry/instrumentation/boto/__init__.py\n+++ b/instrumentation/opentelemetry-instrumentation-boto/src/opentelemetry/instrumentation/boto/__init__.py\n@@ -120,18 +120,10 @@\n with self._tracer.start_as_current_span(\n \"{}.command\".format(endpoint_name), kind=SpanKind.CONSUMER,\n ) as span:\n+ span.set_attribute(\"endpoint\", endpoint_name)\n if args:\n http_method = args[0]\n- span.resource = Resource(\n- attributes={\n- \"endpoint\": endpoint_name,\n- \"http_method\": http_method.lower(),\n- }\n- )\n- else:\n- span.resource = Resource(\n- attributes={\"endpoint\": endpoint_name}\n- )\n+ span.set_attribute(\"http_method\", http_method.lower())\n \n # Original func returns a boto.connection.HTTPResponse object\n result = original_func(*args, **kwargs)\n", "issue": "Resource must be immutable\nThe boto instrumentation changes a span's resource to set attributes, this goes against the spec.\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nInstrument `Boto`_ to trace service requests.\n\nThere are two options for instrumenting code. The first option is to use the\n``opentelemetry-instrument`` executable which will automatically\ninstrument your Boto client. The second is to programmatically enable\ninstrumentation via the following code:\n\n.. _boto: https://pypi.org/project/boto/\n\nUsage\n-----\n\n.. code:: python\n\n from opentelemetry.instrumentation.boto import BotoInstrumentor\n import boto\n\n\n # Instrument Boto\n BotoInstrumentor().instrument()\n\n # This will create a span with Boto-specific attributes\n ec2 = boto.ec2.connect_to_region(\"us-west-2\")\n ec2.get_all_instances()\n\nAPI\n---\n\"\"\"\n\nimport logging\nfrom inspect import currentframe\n\nfrom boto.connection import AWSAuthConnection, AWSQueryConnection\nfrom wrapt import wrap_function_wrapper\n\nfrom opentelemetry.instrumentation.boto.version import __version__\nfrom opentelemetry.instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.instrumentation.utils import unwrap\nfrom opentelemetry.sdk.trace import Resource\nfrom opentelemetry.trace import SpanKind, get_tracer\n\nlogger = logging.getLogger(__name__)\n\nSERVICE_PARAMS_BLOCK_LIST = {\"s3\": [\"params.Body\"]}\n\n\ndef _get_instance_region_name(instance):\n region = getattr(instance, \"region\", None)\n\n if not region:\n return None\n if isinstance(region, str):\n return region.split(\":\")[1]\n return region.name\n\n\nclass BotoInstrumentor(BaseInstrumentor):\n \"\"\"A instrumentor for Boto\n\n See `BaseInstrumentor`\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self._original_boto = None\n\n def _instrument(self, **kwargs):\n # AWSQueryConnection and AWSAuthConnection are two different classes\n # called by different services for connection.\n # For exemple EC2 uses AWSQueryConnection and S3 uses\n # AWSAuthConnection\n\n # pylint: disable=attribute-defined-outside-init\n self._tracer = get_tracer(\n __name__, __version__, kwargs.get(\"tracer_provider\")\n )\n\n wrap_function_wrapper(\n \"boto.connection\",\n \"AWSQueryConnection.make_request\",\n self._patched_query_request,\n )\n wrap_function_wrapper(\n \"boto.connection\",\n \"AWSAuthConnection.make_request\",\n self._patched_auth_request,\n )\n\n def _uninstrument(self, **kwargs):\n unwrap(AWSQueryConnection, \"make_request\")\n unwrap(AWSAuthConnection, \"make_request\")\n\n def _common_request( # pylint: disable=too-many-locals\n self,\n args_name,\n traced_args,\n operation_name,\n original_func,\n instance,\n args,\n kwargs,\n ):\n\n endpoint_name = getattr(instance, \"host\").split(\".\")[0]\n\n with self._tracer.start_as_current_span(\n \"{}.command\".format(endpoint_name), kind=SpanKind.CONSUMER,\n ) as span:\n if args:\n http_method = args[0]\n span.resource = Resource(\n attributes={\n \"endpoint\": endpoint_name,\n \"http_method\": http_method.lower(),\n }\n )\n else:\n span.resource = Resource(\n attributes={\"endpoint\": endpoint_name}\n )\n\n # Original func returns a boto.connection.HTTPResponse object\n result = original_func(*args, **kwargs)\n\n if span.is_recording():\n add_span_arg_tags(\n span, endpoint_name, args, args_name, traced_args,\n )\n\n # Obtaining region name\n region_name = _get_instance_region_name(instance)\n\n meta = {\n \"aws.agent\": \"boto\",\n \"aws.operation\": operation_name,\n }\n if region_name:\n meta[\"aws.region\"] = region_name\n\n for key, value in meta.items():\n span.set_attribute(key, value)\n\n span.set_attribute(\n \"http.status_code\", getattr(result, \"status\")\n )\n span.set_attribute(\"http.method\", getattr(result, \"_method\"))\n\n return result\n\n def _patched_query_request(self, original_func, instance, args, kwargs):\n\n return self._common_request(\n (\"operation_name\", \"params\", \"path\", \"verb\"),\n [\"operation_name\", \"params\", \"path\"],\n args[0] if args else None,\n original_func,\n instance,\n args,\n kwargs,\n )\n\n def _patched_auth_request(self, original_func, instance, args, kwargs):\n operation_name = None\n\n frame = currentframe().f_back\n operation_name = None\n while frame:\n if frame.f_code.co_name == \"make_request\":\n operation_name = frame.f_back.f_code.co_name\n break\n frame = frame.f_back\n\n return self._common_request(\n (\n \"method\",\n \"path\",\n \"headers\",\n \"data\",\n \"host\",\n \"auth_path\",\n \"sender\",\n ),\n [\"path\", \"data\", \"host\"],\n operation_name,\n original_func,\n instance,\n args,\n kwargs,\n )\n\n\ndef flatten_dict(dict_, sep=\".\", prefix=\"\"):\n \"\"\"\n Returns a normalized dict of depth 1 with keys in order of embedding\n \"\"\"\n # NOTE: This should probably be in `opentelemetry.instrumentation.utils`.\n # adapted from https://stackoverflow.com/a/19647596\n return (\n {\n prefix + sep + k if prefix else k: v\n for kk, vv in dict_.items()\n for k, v in flatten_dict(vv, sep, kk).items()\n }\n if isinstance(dict_, dict)\n else {prefix: dict_}\n )\n\n\ndef add_span_arg_tags(span, aws_service, args, args_names, args_traced):\n def truncate_arg_value(value, max_len=1024):\n \"\"\"Truncate values which are bytes and greater than `max_len`.\n Useful for parameters like \"Body\" in `put_object` operations.\n \"\"\"\n if isinstance(value, bytes) and len(value) > max_len:\n return b\"...\"\n\n return value\n\n if not span.is_recording():\n return\n\n # Do not trace `Key Management Service` or `Secure Token Service` API calls\n # over concerns of security leaks.\n if aws_service not in {\"kms\", \"sts\"}:\n tags = dict(\n (name, value)\n for (name, value) in zip(args_names, args)\n if name in args_traced\n )\n tags = flatten_dict(tags)\n\n for param_key, value in tags.items():\n if param_key in SERVICE_PARAMS_BLOCK_LIST.get(aws_service, {}):\n continue\n\n span.set_attribute(param_key, truncate_arg_value(value))\n", "path": "instrumentation/opentelemetry-instrumentation-boto/src/opentelemetry/instrumentation/boto/__init__.py"}]}
2,844
273
gh_patches_debug_13964
rasdani/github-patches
git_diff
azavea__raster-vision-701
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Predict zero for nodata pixels on semantic segmentation </issue> <code> [start of rastervision/task/semantic_segmentation.py] 1 from typing import List 2 import logging 3 4 import numpy as np 5 6 from .task import Task 7 from rastervision.core.box import Box 8 from rastervision.data.scene import Scene 9 from rastervision.data.label import SemanticSegmentationLabels 10 11 log = logging.getLogger(__name__) 12 13 14 def get_random_sample_train_windows(label_store, chip_size, class_map, extent, 15 chip_options, filter_windows): 16 prob = chip_options.negative_survival_probability 17 target_count_threshold = chip_options.target_count_threshold 18 target_classes = chip_options.target_classes 19 chips_per_scene = chip_options.chips_per_scene 20 21 if not target_classes: 22 all_class_ids = [item.id for item in class_map.get_items()] 23 target_classes = all_class_ids 24 25 windows = [] 26 attempts = 0 27 while (attempts < chips_per_scene): 28 candidate_window = extent.make_random_square(chip_size) 29 if not filter_windows([candidate_window]): 30 continue 31 attempts = attempts + 1 32 33 if (prob >= 1.0): 34 windows.append(candidate_window) 35 elif attempts == chips_per_scene and len(windows) == 0: 36 windows.append(candidate_window) 37 else: 38 good = label_store.enough_target_pixels( 39 candidate_window, target_count_threshold, target_classes) 40 if good or (np.random.rand() < prob): 41 windows.append(candidate_window) 42 43 return windows 44 45 46 class SemanticSegmentation(Task): 47 """Task-derived type that implements the semantic segmentation task.""" 48 49 def get_train_windows(self, scene: Scene) -> List[Box]: 50 """Get training windows covering a scene. 51 52 Args: 53 scene: The scene over-which windows are to be generated. 54 55 Returns: 56 A list of windows, list(Box) 57 58 """ 59 60 def filter_windows(windows): 61 if scene.aoi_polygons: 62 windows = Box.filter_by_aoi(windows, scene.aoi_polygons) 63 return windows 64 65 raster_source = scene.raster_source 66 extent = raster_source.get_extent() 67 label_store = scene.ground_truth_label_source 68 chip_size = self.config.chip_size 69 70 chip_options = self.config.chip_options 71 72 if chip_options.window_method == 'random_sample': 73 return get_random_sample_train_windows( 74 label_store, chip_size, self.config.class_map, extent, 75 chip_options, filter_windows) 76 elif chip_options.window_method == 'sliding': 77 stride = chip_options.stride 78 if stride is None: 79 stride = chip_size / 2 80 81 return list( 82 filter_windows((extent.get_windows(chip_size, stride)))) 83 84 def get_train_labels(self, window: Box, scene: Scene) -> np.ndarray: 85 """Get the training labels for the given window in the given scene. 86 87 Args: 88 window: The window over-which the labels are to be 89 retrieved. 90 scene: The scene from-which the window of labels is to be 91 extracted. 92 93 Returns: 94 An appropriately-shaped 2d np.ndarray with the labels 95 encoded as packed pixels. 96 97 """ 98 label_store = scene.ground_truth_label_source 99 return label_store.get_labels(window) 100 101 def get_predict_windows(self, extent: Box) -> List[Box]: 102 """Get windows over-which predictions will be calculated. 103 104 Args: 105 extent: The overall extent of the area. 106 107 Returns: 108 An sequence of windows. 109 110 """ 111 chip_size = self.config.chip_size 112 return extent.get_windows(chip_size, chip_size) 113 114 def post_process_predictions(self, labels, scene): 115 return labels 116 117 def save_debug_predict_image(self, scene, debug_dir_uri): 118 # TODO implement this 119 pass 120 121 def predict_scene(self, scene, tmp_dir): 122 """Predict on a single scene, and return the labels.""" 123 log.info('Making predictions for scene') 124 raster_source = scene.raster_source 125 windows = self.get_predict_windows(raster_source.get_extent()) 126 127 def label_fn(window): 128 chip = raster_source.get_chip(window) 129 if np.any(chip): 130 chip = raster_source.get_chip(window) 131 labels = self.backend.predict([chip], [window], tmp_dir) 132 label_arr = labels.get_label_arr(window) 133 else: 134 label_arr = np.zeros((window.get_height(), window.get_width())) 135 print('.', end='', flush=True) 136 return label_arr 137 138 return SemanticSegmentationLabels(windows, label_fn) 139 [end of rastervision/task/semantic_segmentation.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/rastervision/task/semantic_segmentation.py b/rastervision/task/semantic_segmentation.py --- a/rastervision/task/semantic_segmentation.py +++ b/rastervision/task/semantic_segmentation.py @@ -126,12 +126,12 @@ def label_fn(window): chip = raster_source.get_chip(window) - if np.any(chip): - chip = raster_source.get_chip(window) - labels = self.backend.predict([chip], [window], tmp_dir) - label_arr = labels.get_label_arr(window) - else: - label_arr = np.zeros((window.get_height(), window.get_width())) + labels = self.backend.predict([chip], [window], tmp_dir) + label_arr = labels.get_label_arr(window) + + # Set NODATA pixels in imagery to predicted value of 0 (ie. ignore) + label_arr[np.sum(chip, axis=2) == 0] = 0 + print('.', end='', flush=True) return label_arr
{"golden_diff": "diff --git a/rastervision/task/semantic_segmentation.py b/rastervision/task/semantic_segmentation.py\n--- a/rastervision/task/semantic_segmentation.py\n+++ b/rastervision/task/semantic_segmentation.py\n@@ -126,12 +126,12 @@\n \n def label_fn(window):\n chip = raster_source.get_chip(window)\n- if np.any(chip):\n- chip = raster_source.get_chip(window)\n- labels = self.backend.predict([chip], [window], tmp_dir)\n- label_arr = labels.get_label_arr(window)\n- else:\n- label_arr = np.zeros((window.get_height(), window.get_width()))\n+ labels = self.backend.predict([chip], [window], tmp_dir)\n+ label_arr = labels.get_label_arr(window)\n+\n+ # Set NODATA pixels in imagery to predicted value of 0 (ie. ignore)\n+ label_arr[np.sum(chip, axis=2) == 0] = 0\n+\n print('.', end='', flush=True)\n return label_arr\n", "issue": "Predict zero for nodata pixels on semantic segmentation\n\n", "before_files": [{"content": "from typing import List\nimport logging\n\nimport numpy as np\n\nfrom .task import Task\nfrom rastervision.core.box import Box\nfrom rastervision.data.scene import Scene\nfrom rastervision.data.label import SemanticSegmentationLabels\n\nlog = logging.getLogger(__name__)\n\n\ndef get_random_sample_train_windows(label_store, chip_size, class_map, extent,\n chip_options, filter_windows):\n prob = chip_options.negative_survival_probability\n target_count_threshold = chip_options.target_count_threshold\n target_classes = chip_options.target_classes\n chips_per_scene = chip_options.chips_per_scene\n\n if not target_classes:\n all_class_ids = [item.id for item in class_map.get_items()]\n target_classes = all_class_ids\n\n windows = []\n attempts = 0\n while (attempts < chips_per_scene):\n candidate_window = extent.make_random_square(chip_size)\n if not filter_windows([candidate_window]):\n continue\n attempts = attempts + 1\n\n if (prob >= 1.0):\n windows.append(candidate_window)\n elif attempts == chips_per_scene and len(windows) == 0:\n windows.append(candidate_window)\n else:\n good = label_store.enough_target_pixels(\n candidate_window, target_count_threshold, target_classes)\n if good or (np.random.rand() < prob):\n windows.append(candidate_window)\n\n return windows\n\n\nclass SemanticSegmentation(Task):\n \"\"\"Task-derived type that implements the semantic segmentation task.\"\"\"\n\n def get_train_windows(self, scene: Scene) -> List[Box]:\n \"\"\"Get training windows covering a scene.\n\n Args:\n scene: The scene over-which windows are to be generated.\n\n Returns:\n A list of windows, list(Box)\n\n \"\"\"\n\n def filter_windows(windows):\n if scene.aoi_polygons:\n windows = Box.filter_by_aoi(windows, scene.aoi_polygons)\n return windows\n\n raster_source = scene.raster_source\n extent = raster_source.get_extent()\n label_store = scene.ground_truth_label_source\n chip_size = self.config.chip_size\n\n chip_options = self.config.chip_options\n\n if chip_options.window_method == 'random_sample':\n return get_random_sample_train_windows(\n label_store, chip_size, self.config.class_map, extent,\n chip_options, filter_windows)\n elif chip_options.window_method == 'sliding':\n stride = chip_options.stride\n if stride is None:\n stride = chip_size / 2\n\n return list(\n filter_windows((extent.get_windows(chip_size, stride))))\n\n def get_train_labels(self, window: Box, scene: Scene) -> np.ndarray:\n \"\"\"Get the training labels for the given window in the given scene.\n\n Args:\n window: The window over-which the labels are to be\n retrieved.\n scene: The scene from-which the window of labels is to be\n extracted.\n\n Returns:\n An appropriately-shaped 2d np.ndarray with the labels\n encoded as packed pixels.\n\n \"\"\"\n label_store = scene.ground_truth_label_source\n return label_store.get_labels(window)\n\n def get_predict_windows(self, extent: Box) -> List[Box]:\n \"\"\"Get windows over-which predictions will be calculated.\n\n Args:\n extent: The overall extent of the area.\n\n Returns:\n An sequence of windows.\n\n \"\"\"\n chip_size = self.config.chip_size\n return extent.get_windows(chip_size, chip_size)\n\n def post_process_predictions(self, labels, scene):\n return labels\n\n def save_debug_predict_image(self, scene, debug_dir_uri):\n # TODO implement this\n pass\n\n def predict_scene(self, scene, tmp_dir):\n \"\"\"Predict on a single scene, and return the labels.\"\"\"\n log.info('Making predictions for scene')\n raster_source = scene.raster_source\n windows = self.get_predict_windows(raster_source.get_extent())\n\n def label_fn(window):\n chip = raster_source.get_chip(window)\n if np.any(chip):\n chip = raster_source.get_chip(window)\n labels = self.backend.predict([chip], [window], tmp_dir)\n label_arr = labels.get_label_arr(window)\n else:\n label_arr = np.zeros((window.get_height(), window.get_width()))\n print('.', end='', flush=True)\n return label_arr\n\n return SemanticSegmentationLabels(windows, label_fn)\n", "path": "rastervision/task/semantic_segmentation.py"}]}
1,819
231
gh_patches_debug_38244
rasdani/github-patches
git_diff
pyinstaller__pyinstaller-7820
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> genpy hook breaks win32com.client.gencache.EnsureDispatch() ## Description of the issue The [runtime hook for genpy](https://github.com/pyinstaller/pyinstaller/blob/develop/PyInstaller/hooks/rthooks/pyi_rth_win32comgenpy.py) creates a temporary directory for the genpy cache and overwrites `win32com.__gen_path__` accordingly: https://github.com/pyinstaller/pyinstaller/blob/14c53a9d9f7b9322cfc8e18ae1c6e415230fba22/PyInstaller/hooks/rthooks/pyi_rth_win32comgenpy.py#L42 This creates a new cache such as: C:\\Users\\<USER>\\AppData\\Local\\Temp\\**tmpytt5_e6s**\\gen_py (let's call this **hook cache**) If a genpy cache already exists in: C:\\Users\\<USER>\\AppData\\Local\\Temp\\gen_py (let's call this one **global cache**) (as caused by running the python script directly, instead of the pyinstaller version) the global cache is found by `EnsureDispatch()` such that no new cache will be generated in the hook cache. This should be fine as long as the global cache contains all required python modules. However, if it does not, the win32com package tries to generate the missing modules: This leads to the first exception shown below: `ModuleNotFoundError`, which is expected behaviour as far as I understand this mechanism. But now, when handling this exception, since the hook has overwritten `win32com.__gen_path__`, generating the missing modules is attempted in the hook cache although the global cache is being used currently. This leads to the second exception shown below: `No such file or directory`. I'm not sure about the correct way to fix this. I can think of at least the following two ways: 1. Prevent the win32com package from discovering the global cache such that an entirely new cache is generated each time. 2. Modify the runtime hook such that the missing modules are generated in the currently used cache (i.e. the global cache if one exists and the hook cache if not global cache was found). ### Context information (for bug reports) * Output of `pyinstaller --version`: ```5.0.dev0``` * Version of Python: 3.9.2 * Platform: Windows * How you installed Python: python.org/downloads * Did you also try this on another platform? Does it work there? No, since this is only applicable to Windows. ### A minimal example program which shows the error ``` from win32com.client import gencache excel = gencache.EnsureDispatch("Excel.Application") if not input(">"): # this will generate a cache that is complete for this "if" branch, but incomplete for the "else" branch. print(excel.Selection) else: print(excel.Selection) print(excel.Selection.Font) ``` To reproduce the error: 1. Open any excel sheet 2. Make sure that no global cache exists (simply delete it, if it exists) 3. Create the executable using: `pyinstaller bug_demo.py` 4. Run `bug_demo.exe`. It will prompt for an input. This should work no matter if you take the "if" branch (simply press Enter) or the "else" branch (provide some actual input, e.g. "a"). 5. Run `bug_demo.py` directly using the python interpreter and just press Enter when it asks for input. This will create a global cache, which is incomplete for the "else" branch. 6. Run `bug_demo.exe` again. This time, it will only work if you take the "if" branch (simply press Enter). If you take the "else" branch (provide some actual input, e.g. "a"), the error shown below should occur. ### Stacktrace / full error message ``` (venv) PS C:\Users\<SNIP>> .\dist\bug_demo\bug_demo.exe a None Traceback (most recent call last): File "win32com\client\gencache.py", line 233, in GetModuleForCLSID ModuleNotFoundError: No module named 'win32com.gen_py.00020813-0000-0000-C000-000000000046x0x1x9.Font' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "bug_demo.py", line 9, in <module> print(excel.Selection.Font) File "win32com\client\__init__.py", line 474, in __getattr__ File "win32com\client\__init__.py", line 466, in _ApplyTypes_ File "win32com\client\__init__.py", line 486, in _get_good_object_ File "win32com\client\__init__.py", line 502, in _get_good_object_ File "win32com\client\__init__.py", line 491, in _get_good_single_object_ File "win32com\client\__init__.py", line 96, in Dispatch File "win32com\client\__init__.py", line 37, in __WrapDispatch File "win32com\client\gencache.py", line 180, in GetClassForCLSID File "win32com\client\gencache.py", line 241, in GetModuleForCLSID File "win32com\client\makepy.py", line 319, in GenerateChildFromTypeLibSpec File "win32com\client\genpy.py", line 1042, in generate_child File "win32com\client\genpy.py", line 779, in open_writer FileNotFoundError: [Errno 2] No such file or directory: 'C:\\Users\\<USER>\\AppData\\Local\\Temp\\tmpytt5_e6s\\gen_py\\00020813-0000-0000-C000-000000000046x0x1x9\\Font.py.18792.temp' [18792] Failed to execute script bug_demo ``` </issue> <code> [start of PyInstaller/hooks/rthooks/pyi_rth_win32comgenpy.py] 1 #----------------------------------------------------------------------------- 2 # Copyright (c) 2013-2023, PyInstaller Development Team. 3 # 4 # Licensed under the Apache License, Version 2.0 (the "License"); 5 # you may not use this file except in compliance with the License. 6 # 7 # The full license is in the file COPYING.txt, distributed with this software. 8 # 9 # SPDX-License-Identifier: Apache-2.0 10 #----------------------------------------------------------------------------- 11 12 # The win32.client.gencache code must be allowed to create the cache in %temp% (user's temp). It is necessary to get the 13 # gencache code to use a suitable directory other than the default in lib\site-packages\win32com\client\gen_py. 14 # PyInstaller does not provide this directory structure and the frozen executable could be placed in a non-writable 15 # directory like 'C:\Program Files. That's the reason for %temp% directory. 16 # 17 # http://www.py2exe.org/index.cgi/UsingEnsureDispatch 18 19 20 def _pyi_rthook(): 21 import atexit 22 import os 23 import shutil 24 import tempfile 25 26 # Put gen_py cache in temp directory. 27 supportdir = tempfile.mkdtemp() 28 # gen_py has to be put into directory 'gen_py'. 29 genpydir = os.path.join(supportdir, 'gen_py') 30 31 # Create 'gen_py' directory. This directory does not need to contain '__init__.py' file. 32 try: 33 # win32com gencache cannot be put directly to 'supportdir' with any random name. It has to be put in a directory 34 # called 'gen_py'. This is the reason why to create this directory in supportdir'. 35 os.makedirs(genpydir) 36 # Remove temp directory at application exit and ignore any errors. 37 atexit.register(shutil.rmtree, supportdir, ignore_errors=True) 38 except OSError: 39 pass 40 41 # Override the default path to gen_py cache. 42 import win32com # noqa: E402 43 44 win32com.__gen_path__ = genpydir 45 46 # The attribute __loader__ makes module 'pkg_resources' working but On Windows it breaks pywin32 (win32com) and test 47 # 'basic/test_pyttsx' will fail. Just removing that attribute for win32com fixes that and gencache is created 48 # properly. 49 if hasattr(win32com, '__loader__'): 50 del win32com.__loader__ 51 52 # Ensure genpydir is in 'gen_py' module paths. 53 import win32com.gen_py # noqa: E402 54 55 win32com.gen_py.__path__.insert(0, genpydir) 56 57 58 _pyi_rthook() 59 del _pyi_rthook 60 [end of PyInstaller/hooks/rthooks/pyi_rth_win32comgenpy.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/PyInstaller/hooks/rthooks/pyi_rth_win32comgenpy.py b/PyInstaller/hooks/rthooks/pyi_rth_win32comgenpy.py --- a/PyInstaller/hooks/rthooks/pyi_rth_win32comgenpy.py +++ b/PyInstaller/hooks/rthooks/pyi_rth_win32comgenpy.py @@ -9,12 +9,12 @@ # SPDX-License-Identifier: Apache-2.0 #----------------------------------------------------------------------------- -# The win32.client.gencache code must be allowed to create the cache in %temp% (user's temp). It is necessary to get the -# gencache code to use a suitable directory other than the default in lib\site-packages\win32com\client\gen_py. -# PyInstaller does not provide this directory structure and the frozen executable could be placed in a non-writable -# directory like 'C:\Program Files. That's the reason for %temp% directory. -# -# http://www.py2exe.org/index.cgi/UsingEnsureDispatch +# Put the cache generated by `win32com.client.gencache` into isolated temporary directory. Historically, this was +# required due to earlier versions of `pywin32` using the `site-packages\win32com\client\gen_py` directory for +# the cache by default. Nowadays, the default location for the cache seems to be in the configured temporary directory +# (pointed to by TEMP or TMP, for example %LOCALAPPDATA%\Temp), so strictly speaking, the relocation is not necessary +# anymore. But for the time being, we are keeping it around to isolate the frozen application from the rest of the +# system. def _pyi_rthook(): @@ -23,36 +23,22 @@ import shutil import tempfile - # Put gen_py cache in temp directory. + import win32com + + # Create temporary directory. The actual cache directory needs to be named `gen_py`, so create a sub-directory. supportdir = tempfile.mkdtemp() - # gen_py has to be put into directory 'gen_py'. + genpydir = os.path.join(supportdir, 'gen_py') + os.makedirs(genpydir, exist_ok=True) - # Create 'gen_py' directory. This directory does not need to contain '__init__.py' file. - try: - # win32com gencache cannot be put directly to 'supportdir' with any random name. It has to be put in a directory - # called 'gen_py'. This is the reason why to create this directory in supportdir'. - os.makedirs(genpydir) - # Remove temp directory at application exit and ignore any errors. - atexit.register(shutil.rmtree, supportdir, ignore_errors=True) - except OSError: - pass + # Remove the teporary directory at application exit, ignoring errors. + atexit.register(shutil.rmtree, supportdir, ignore_errors=True) # Override the default path to gen_py cache. - import win32com # noqa: E402 - win32com.__gen_path__ = genpydir - # The attribute __loader__ makes module 'pkg_resources' working but On Windows it breaks pywin32 (win32com) and test - # 'basic/test_pyttsx' will fail. Just removing that attribute for win32com fixes that and gencache is created - # properly. - if hasattr(win32com, '__loader__'): - del win32com.__loader__ - - # Ensure genpydir is in 'gen_py' module paths. - import win32com.gen_py # noqa: E402 - - win32com.gen_py.__path__.insert(0, genpydir) + # Override the sub-module paths for win32com.gen_py run-time sub-package. + win32com.gen_py.__path__ = [genpydir] _pyi_rthook()
{"golden_diff": "diff --git a/PyInstaller/hooks/rthooks/pyi_rth_win32comgenpy.py b/PyInstaller/hooks/rthooks/pyi_rth_win32comgenpy.py\n--- a/PyInstaller/hooks/rthooks/pyi_rth_win32comgenpy.py\n+++ b/PyInstaller/hooks/rthooks/pyi_rth_win32comgenpy.py\n@@ -9,12 +9,12 @@\n # SPDX-License-Identifier: Apache-2.0\n #-----------------------------------------------------------------------------\n \n-# The win32.client.gencache code must be allowed to create the cache in %temp% (user's temp). It is necessary to get the\n-# gencache code to use a suitable directory other than the default in lib\\site-packages\\win32com\\client\\gen_py.\n-# PyInstaller does not provide this directory structure and the frozen executable could be placed in a non-writable\n-# directory like 'C:\\Program Files. That's the reason for %temp% directory.\n-#\n-# http://www.py2exe.org/index.cgi/UsingEnsureDispatch\n+# Put the cache generated by `win32com.client.gencache` into isolated temporary directory. Historically, this was\n+# required due to earlier versions of `pywin32` using the `site-packages\\win32com\\client\\gen_py` directory for\n+# the cache by default. Nowadays, the default location for the cache seems to be in the configured temporary directory\n+# (pointed to by TEMP or TMP, for example %LOCALAPPDATA%\\Temp), so strictly speaking, the relocation is not necessary\n+# anymore. But for the time being, we are keeping it around to isolate the frozen application from the rest of the\n+# system.\n \n \n def _pyi_rthook():\n@@ -23,36 +23,22 @@\n import shutil\n import tempfile\n \n- # Put gen_py cache in temp directory.\n+ import win32com\n+\n+ # Create temporary directory. The actual cache directory needs to be named `gen_py`, so create a sub-directory.\n supportdir = tempfile.mkdtemp()\n- # gen_py has to be put into directory 'gen_py'.\n+\n genpydir = os.path.join(supportdir, 'gen_py')\n+ os.makedirs(genpydir, exist_ok=True)\n \n- # Create 'gen_py' directory. This directory does not need to contain '__init__.py' file.\n- try:\n- # win32com gencache cannot be put directly to 'supportdir' with any random name. It has to be put in a directory\n- # called 'gen_py'. This is the reason why to create this directory in supportdir'.\n- os.makedirs(genpydir)\n- # Remove temp directory at application exit and ignore any errors.\n- atexit.register(shutil.rmtree, supportdir, ignore_errors=True)\n- except OSError:\n- pass\n+ # Remove the teporary directory at application exit, ignoring errors.\n+ atexit.register(shutil.rmtree, supportdir, ignore_errors=True)\n \n # Override the default path to gen_py cache.\n- import win32com # noqa: E402\n-\n win32com.__gen_path__ = genpydir\n \n- # The attribute __loader__ makes module 'pkg_resources' working but On Windows it breaks pywin32 (win32com) and test\n- # 'basic/test_pyttsx' will fail. Just removing that attribute for win32com fixes that and gencache is created\n- # properly.\n- if hasattr(win32com, '__loader__'):\n- del win32com.__loader__\n-\n- # Ensure genpydir is in 'gen_py' module paths.\n- import win32com.gen_py # noqa: E402\n-\n- win32com.gen_py.__path__.insert(0, genpydir)\n+ # Override the sub-module paths for win32com.gen_py run-time sub-package.\n+ win32com.gen_py.__path__ = [genpydir]\n \n \n _pyi_rthook()\n", "issue": "genpy hook breaks win32com.client.gencache.EnsureDispatch()\n## Description of the issue\r\nThe [runtime hook for genpy](https://github.com/pyinstaller/pyinstaller/blob/develop/PyInstaller/hooks/rthooks/pyi_rth_win32comgenpy.py) creates a temporary directory for the genpy cache and overwrites `win32com.__gen_path__` accordingly: https://github.com/pyinstaller/pyinstaller/blob/14c53a9d9f7b9322cfc8e18ae1c6e415230fba22/PyInstaller/hooks/rthooks/pyi_rth_win32comgenpy.py#L42\r\nThis creates a new cache such as:\r\nC:\\\\Users\\\\<USER>\\\\AppData\\\\Local\\\\Temp\\\\**tmpytt5_e6s**\\\\gen_py (let's call this **hook cache**)\r\n\r\nIf a genpy cache already exists in:\r\nC:\\\\Users\\\\<USER>\\\\AppData\\\\Local\\\\Temp\\\\gen_py (let's call this one **global cache**)\r\n(as caused by running the python script directly, instead of the pyinstaller version)\r\nthe global cache is found by `EnsureDispatch()` such that no new cache will be generated in the hook cache.\r\n\r\nThis should be fine as long as the global cache contains all required python modules. However, if it does not, the win32com package tries to generate the missing modules:\r\nThis leads to the first exception shown below: `ModuleNotFoundError`, which is expected behaviour as far as I understand this mechanism.\r\n\r\nBut now, when handling this exception, since the hook has overwritten `win32com.__gen_path__`, generating the missing modules is attempted in the hook cache although the global cache is being used currently.\r\nThis leads to the second exception shown below: `No such file or directory`.\r\n\r\nI'm not sure about the correct way to fix this. I can think of at least the following two ways:\r\n1. Prevent the win32com package from discovering the global cache such that an entirely new cache is generated each time.\r\n2. Modify the runtime hook such that the missing modules are generated in the currently used cache (i.e. the global cache if one exists and the hook cache if not global cache was found).\r\n\r\n### Context information (for bug reports)\r\n\r\n* Output of `pyinstaller --version`: ```5.0.dev0```\r\n* Version of Python: 3.9.2\r\n* Platform: Windows\r\n* How you installed Python: python.org/downloads\r\n* Did you also try this on another platform? Does it work there? No, since this is only applicable to Windows.\r\n\r\n### A minimal example program which shows the error\r\n\r\n```\r\nfrom win32com.client import gencache\r\n\r\nexcel = gencache.EnsureDispatch(\"Excel.Application\")\r\n\r\nif not input(\">\"):\r\n # this will generate a cache that is complete for this \"if\" branch, but incomplete for the \"else\" branch.\r\n print(excel.Selection)\r\nelse:\r\n print(excel.Selection) \r\n print(excel.Selection.Font)\r\n\r\n```\r\n\r\nTo reproduce the error:\r\n1. Open any excel sheet\r\n2. Make sure that no global cache exists (simply delete it, if it exists)\r\n3. Create the executable using: `pyinstaller bug_demo.py`\r\n4. Run `bug_demo.exe`. It will prompt for an input. This should work no matter if you take the \"if\" branch (simply press Enter) or the \"else\" branch (provide some actual input, e.g. \"a\").\r\n5. Run `bug_demo.py` directly using the python interpreter and just press Enter when it asks for input. This will create a global cache, which is incomplete for the \"else\" branch.\r\n6. Run `bug_demo.exe` again. This time, it will only work if you take the \"if\" branch (simply press Enter). If you take the \"else\" branch (provide some actual input, e.g. \"a\"), the error shown below should occur.\r\n\r\n### Stacktrace / full error message\r\n\r\n\r\n```\r\n(venv) PS C:\\Users\\<SNIP>> .\\dist\\bug_demo\\bug_demo.exe\r\na\r\nNone\r\nTraceback (most recent call last):\r\n File \"win32com\\client\\gencache.py\", line 233, in GetModuleForCLSID\r\nModuleNotFoundError: No module named 'win32com.gen_py.00020813-0000-0000-C000-000000000046x0x1x9.Font'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"bug_demo.py\", line 9, in <module>\r\n print(excel.Selection.Font)\r\n File \"win32com\\client\\__init__.py\", line 474, in __getattr__\r\n File \"win32com\\client\\__init__.py\", line 466, in _ApplyTypes_\r\n File \"win32com\\client\\__init__.py\", line 486, in _get_good_object_\r\n File \"win32com\\client\\__init__.py\", line 502, in _get_good_object_\r\n File \"win32com\\client\\__init__.py\", line 491, in _get_good_single_object_\r\n File \"win32com\\client\\__init__.py\", line 96, in Dispatch\r\n File \"win32com\\client\\__init__.py\", line 37, in __WrapDispatch\r\n File \"win32com\\client\\gencache.py\", line 180, in GetClassForCLSID\r\n File \"win32com\\client\\gencache.py\", line 241, in GetModuleForCLSID\r\n File \"win32com\\client\\makepy.py\", line 319, in GenerateChildFromTypeLibSpec\r\n File \"win32com\\client\\genpy.py\", line 1042, in generate_child\r\n File \"win32com\\client\\genpy.py\", line 779, in open_writer\r\nFileNotFoundError: [Errno 2] No such file or directory: 'C:\\\\Users\\\\<USER>\\\\AppData\\\\Local\\\\Temp\\\\tmpytt5_e6s\\\\gen_py\\\\00020813-0000-0000-C000-000000000046x0x1x9\\\\Font.py.18792.temp'\r\n[18792] Failed to execute script bug_demo\r\n\r\n```\r\n\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2013-2023, PyInstaller Development Team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: Apache-2.0\n#-----------------------------------------------------------------------------\n\n# The win32.client.gencache code must be allowed to create the cache in %temp% (user's temp). It is necessary to get the\n# gencache code to use a suitable directory other than the default in lib\\site-packages\\win32com\\client\\gen_py.\n# PyInstaller does not provide this directory structure and the frozen executable could be placed in a non-writable\n# directory like 'C:\\Program Files. That's the reason for %temp% directory.\n#\n# http://www.py2exe.org/index.cgi/UsingEnsureDispatch\n\n\ndef _pyi_rthook():\n import atexit\n import os\n import shutil\n import tempfile\n\n # Put gen_py cache in temp directory.\n supportdir = tempfile.mkdtemp()\n # gen_py has to be put into directory 'gen_py'.\n genpydir = os.path.join(supportdir, 'gen_py')\n\n # Create 'gen_py' directory. This directory does not need to contain '__init__.py' file.\n try:\n # win32com gencache cannot be put directly to 'supportdir' with any random name. It has to be put in a directory\n # called 'gen_py'. This is the reason why to create this directory in supportdir'.\n os.makedirs(genpydir)\n # Remove temp directory at application exit and ignore any errors.\n atexit.register(shutil.rmtree, supportdir, ignore_errors=True)\n except OSError:\n pass\n\n # Override the default path to gen_py cache.\n import win32com # noqa: E402\n\n win32com.__gen_path__ = genpydir\n\n # The attribute __loader__ makes module 'pkg_resources' working but On Windows it breaks pywin32 (win32com) and test\n # 'basic/test_pyttsx' will fail. Just removing that attribute for win32com fixes that and gencache is created\n # properly.\n if hasattr(win32com, '__loader__'):\n del win32com.__loader__\n\n # Ensure genpydir is in 'gen_py' module paths.\n import win32com.gen_py # noqa: E402\n\n win32com.gen_py.__path__.insert(0, genpydir)\n\n\n_pyi_rthook()\ndel _pyi_rthook\n", "path": "PyInstaller/hooks/rthooks/pyi_rth_win32comgenpy.py"}]}
2,717
902
gh_patches_debug_41415
rasdani/github-patches
git_diff
Kinto__kinto-540
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Create Collection event notification not fired for auto-create on default bucket I was hacking around with Redis event notifications today. I setup two separate listeners with filters: ``` kinto.event_listeners = redis1 redis2 kinto.event_listeners.redis1.use = cliquet.listeners.redis kinto.event_listeners.redis1.url = <url> kinto.event_listeners.redis1.pool_size = 5 kinto.event_listeners.redis1.listname = kinto:collection:create kinto.event_listeners.redis1.actions = create kinto.event_listeners.redis1.resources = collection kinto.event_listeners.redis2.use = cliquet.listeners.redis kinto.event_listeners.redis2.url = <url> kinto.event_listeners.redis2.pool_size = 5 kinto.event_listeners.redis2.listname = kinto:record:create kinto.event_listeners.redis2.actions = create kinto.event_listeners.redis2.resources = record ``` To test, I was using the default bucket, and the inserting a sample record to a collection that did not yet exist. Although the collection was auto-created, and the record inserted, only the event for create record was logged to Redis. If I create the collection manually, then the create collection event is logged. Summary: Auto-created collections on the default bucket are not firing event notifications. </issue> <code> [start of kinto/plugins/default_bucket/__init__.py] 1 import uuid 2 3 import six 4 from pyramid import httpexceptions 5 from pyramid.settings import asbool 6 from pyramid.security import NO_PERMISSION_REQUIRED, Authenticated 7 8 from cliquet.errors import raise_invalid 9 from cliquet.utils import build_request, reapply_cors, hmac_digest 10 from cliquet.storage import exceptions as storage_exceptions 11 12 from kinto.authorization import RouteFactory 13 from kinto.views.buckets import Bucket 14 from kinto.views.collections import Collection 15 16 17 def create_bucket(request, bucket_id): 18 """Create a bucket if it doesn't exists.""" 19 bucket_put = (request.method.lower() == 'put' and 20 request.path.endswith('buckets/default')) 21 # Do nothing if current request will already create the bucket. 22 if bucket_put: 23 return 24 25 # Do not intent to create multiple times per request (e.g. in batch). 26 already_created = request.bound_data.setdefault('buckets', {}) 27 if bucket_id in already_created: 28 return 29 30 # Fake context to instantiate a Bucket resource. 31 context = RouteFactory(request) 32 context.get_permission_object_id = lambda r, i: '/buckets/%s' % bucket_id 33 resource = Bucket(request, context) 34 try: 35 bucket = resource.model.create_record({'id': bucket_id}) 36 except storage_exceptions.UnicityError as e: 37 bucket = e.record 38 already_created[bucket_id] = bucket 39 40 41 def create_collection(request, bucket_id): 42 # Do nothing if current request does not involve a collection. 43 subpath = request.matchdict.get('subpath') 44 if not (subpath and subpath.startswith('collections/')): 45 return 46 47 collection_id = subpath.split('/')[1] 48 collection_uri = '/buckets/%s/collections/%s' % (bucket_id, collection_id) 49 50 # Do not intent to create multiple times per request (e.g. in batch). 51 already_created = request.bound_data.setdefault('collections', {}) 52 if collection_uri in already_created: 53 return 54 55 # Do nothing if current request will already create the collection. 56 collection_put = (request.method.lower() == 'put' and 57 request.path.endswith(collection_id)) 58 if collection_put: 59 return 60 61 # Fake context to instantiate a Collection resource. 62 context = RouteFactory(request) 63 context.get_permission_object_id = lambda r, i: collection_uri 64 65 backup = request.matchdict 66 request.matchdict = dict(bucket_id=bucket_id, 67 id=collection_id, 68 **request.matchdict) 69 resource = Collection(request, context) 70 if not resource.model.id_generator.match(collection_id): 71 error_details = { 72 'location': 'path', 73 'description': "Invalid collection_id id" 74 } 75 raise_invalid(request, **error_details) 76 try: 77 collection = resource.model.create_record({'id': collection_id}) 78 except storage_exceptions.UnicityError as e: 79 collection = e.record 80 already_created[collection_uri] = collection 81 request.matchdict = backup 82 83 84 def default_bucket(request): 85 if request.method.lower() == 'options': 86 path = request.path.replace('default', 'unknown') 87 subrequest = build_request(request, { 88 'method': 'OPTIONS', 89 'path': path 90 }) 91 return request.invoke_subrequest(subrequest) 92 93 if Authenticated not in request.effective_principals: 94 # Pass through the forbidden_view_config 95 raise httpexceptions.HTTPForbidden() 96 97 settings = request.registry.settings 98 99 if asbool(settings['readonly']): 100 raise httpexceptions.HTTPMethodNotAllowed() 101 102 bucket_id = request.default_bucket_id 103 path = request.path.replace('/buckets/default', '/buckets/%s' % bucket_id) 104 querystring = request.url[(request.url.index(request.path) + 105 len(request.path)):] 106 107 # Make sure bucket exists 108 create_bucket(request, bucket_id) 109 110 # Make sure the collection exists 111 create_collection(request, bucket_id) 112 113 subrequest = build_request(request, { 114 'method': request.method, 115 'path': path + querystring, 116 'body': request.body 117 }) 118 subrequest.bound_data = request.bound_data 119 120 try: 121 response = request.invoke_subrequest(subrequest) 122 except httpexceptions.HTTPException as error: 123 is_redirect = error.status_code < 400 124 if error.content_type == 'application/json' or is_redirect: 125 response = reapply_cors(subrequest, error) 126 else: 127 # Ask the upper level to format the error. 128 raise error 129 return response 130 131 132 def default_bucket_id(request): 133 settings = request.registry.settings 134 secret = settings['userid_hmac_secret'] 135 # Build the user unguessable bucket_id UUID from its user_id 136 digest = hmac_digest(secret, request.prefixed_userid) 137 return six.text_type(uuid.UUID(digest[:32])) 138 139 140 def get_user_info(request): 141 user_info = { 142 'id': request.prefixed_userid, 143 'bucket': request.default_bucket_id 144 } 145 return user_info 146 147 148 def includeme(config): 149 # Redirect default to the right endpoint 150 config.add_view(default_bucket, 151 route_name='default_bucket', 152 permission=NO_PERMISSION_REQUIRED) 153 config.add_view(default_bucket, 154 route_name='default_bucket_collection', 155 permission=NO_PERMISSION_REQUIRED) 156 157 config.add_route('default_bucket_collection', 158 '/buckets/default/{subpath:.*}') 159 config.add_route('default_bucket', '/buckets/default') 160 161 # Provide helpers 162 config.add_request_method(default_bucket_id, reify=True) 163 # Override Cliquet default user info 164 config.add_request_method(get_user_info) 165 166 config.add_api_capability( 167 "default_bucket", 168 description="The default bucket is an alias for a personal" 169 " bucket where collections are created implicitly.", 170 url="http://kinto.readthedocs.org/en/latest/api/1.x/" 171 "buckets.html#personal-bucket-default") 172 [end of kinto/plugins/default_bucket/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/kinto/plugins/default_bucket/__init__.py b/kinto/plugins/default_bucket/__init__.py --- a/kinto/plugins/default_bucket/__init__.py +++ b/kinto/plugins/default_bucket/__init__.py @@ -6,6 +6,7 @@ from pyramid.security import NO_PERMISSION_REQUIRED, Authenticated from cliquet.errors import raise_invalid +from cliquet.events import ACTIONS from cliquet.utils import build_request, reapply_cors, hmac_digest from cliquet.storage import exceptions as storage_exceptions @@ -27,14 +28,11 @@ if bucket_id in already_created: return - # Fake context to instantiate a Bucket resource. - context = RouteFactory(request) - context.get_permission_object_id = lambda r, i: '/buckets/%s' % bucket_id - resource = Bucket(request, context) - try: - bucket = resource.model.create_record({'id': bucket_id}) - except storage_exceptions.UnicityError as e: - bucket = e.record + bucket = resource_create_object(request=request, + resource_cls=Bucket, + uri='/buckets/%s' % bucket_id, + resource_name='bucket', + obj_id=bucket_id) already_created[bucket_id] = bucket @@ -58,27 +56,51 @@ if collection_put: return - # Fake context to instantiate a Collection resource. - context = RouteFactory(request) - context.get_permission_object_id = lambda r, i: collection_uri - - backup = request.matchdict + backup_matchdict = request.matchdict request.matchdict = dict(bucket_id=bucket_id, id=collection_id, **request.matchdict) - resource = Collection(request, context) - if not resource.model.id_generator.match(collection_id): + collection = resource_create_object(request=request, + resource_cls=Collection, + uri=collection_uri, + resource_name='collection', + obj_id=collection_id) + already_created[collection_uri] = collection + request.matchdict = backup_matchdict + + +def resource_create_object(request, resource_cls, uri, resource_name, obj_id): + """In the default bucket, the bucket and collection are implicitly + created. This helper instantiate the resource and simulate a request + with its RootFactory on the instantiated resource. + :returns: the created object + :rtype: dict + """ + # Fake context to instantiate a resource. + context = RouteFactory(request) + context.get_permission_object_id = lambda r, i: uri + + resource = resource_cls(request, context) + + # Check that provided id is valid for this resource. + if not resource.model.id_generator.match(obj_id): error_details = { 'location': 'path', - 'description': "Invalid collection_id id" + 'description': "Invalid %s id" % resource_name } - raise_invalid(request, **error_details) + raise_invalid(resource.request, **error_details) + + data = {'id': obj_id} try: - collection = resource.model.create_record({'id': collection_id}) + obj = resource.model.create_record(data) + # Since the current request is not a resource (but a straight Service), + # we simulate a request on a resource. + # This will be used in the resource event payload. + resource.request.current_resource_name = resource_name + resource.postprocess(data, action=ACTIONS.CREATE) except storage_exceptions.UnicityError as e: - collection = e.record - already_created[collection_uri] = collection - request.matchdict = backup + obj = e.record + return obj def default_bucket(request):
{"golden_diff": "diff --git a/kinto/plugins/default_bucket/__init__.py b/kinto/plugins/default_bucket/__init__.py\n--- a/kinto/plugins/default_bucket/__init__.py\n+++ b/kinto/plugins/default_bucket/__init__.py\n@@ -6,6 +6,7 @@\n from pyramid.security import NO_PERMISSION_REQUIRED, Authenticated\n \n from cliquet.errors import raise_invalid\n+from cliquet.events import ACTIONS\n from cliquet.utils import build_request, reapply_cors, hmac_digest\n from cliquet.storage import exceptions as storage_exceptions\n \n@@ -27,14 +28,11 @@\n if bucket_id in already_created:\n return\n \n- # Fake context to instantiate a Bucket resource.\n- context = RouteFactory(request)\n- context.get_permission_object_id = lambda r, i: '/buckets/%s' % bucket_id\n- resource = Bucket(request, context)\n- try:\n- bucket = resource.model.create_record({'id': bucket_id})\n- except storage_exceptions.UnicityError as e:\n- bucket = e.record\n+ bucket = resource_create_object(request=request,\n+ resource_cls=Bucket,\n+ uri='/buckets/%s' % bucket_id,\n+ resource_name='bucket',\n+ obj_id=bucket_id)\n already_created[bucket_id] = bucket\n \n \n@@ -58,27 +56,51 @@\n if collection_put:\n return\n \n- # Fake context to instantiate a Collection resource.\n- context = RouteFactory(request)\n- context.get_permission_object_id = lambda r, i: collection_uri\n-\n- backup = request.matchdict\n+ backup_matchdict = request.matchdict\n request.matchdict = dict(bucket_id=bucket_id,\n id=collection_id,\n **request.matchdict)\n- resource = Collection(request, context)\n- if not resource.model.id_generator.match(collection_id):\n+ collection = resource_create_object(request=request,\n+ resource_cls=Collection,\n+ uri=collection_uri,\n+ resource_name='collection',\n+ obj_id=collection_id)\n+ already_created[collection_uri] = collection\n+ request.matchdict = backup_matchdict\n+\n+\n+def resource_create_object(request, resource_cls, uri, resource_name, obj_id):\n+ \"\"\"In the default bucket, the bucket and collection are implicitly\n+ created. This helper instantiate the resource and simulate a request\n+ with its RootFactory on the instantiated resource.\n+ :returns: the created object\n+ :rtype: dict\n+ \"\"\"\n+ # Fake context to instantiate a resource.\n+ context = RouteFactory(request)\n+ context.get_permission_object_id = lambda r, i: uri\n+\n+ resource = resource_cls(request, context)\n+\n+ # Check that provided id is valid for this resource.\n+ if not resource.model.id_generator.match(obj_id):\n error_details = {\n 'location': 'path',\n- 'description': \"Invalid collection_id id\"\n+ 'description': \"Invalid %s id\" % resource_name\n }\n- raise_invalid(request, **error_details)\n+ raise_invalid(resource.request, **error_details)\n+\n+ data = {'id': obj_id}\n try:\n- collection = resource.model.create_record({'id': collection_id})\n+ obj = resource.model.create_record(data)\n+ # Since the current request is not a resource (but a straight Service),\n+ # we simulate a request on a resource.\n+ # This will be used in the resource event payload.\n+ resource.request.current_resource_name = resource_name\n+ resource.postprocess(data, action=ACTIONS.CREATE)\n except storage_exceptions.UnicityError as e:\n- collection = e.record\n- already_created[collection_uri] = collection\n- request.matchdict = backup\n+ obj = e.record\n+ return obj\n \n \n def default_bucket(request):\n", "issue": "Create Collection event notification not fired for auto-create on default bucket\nI was hacking around with Redis event notifications today. I setup two separate listeners with filters:\n\n```\nkinto.event_listeners = redis1 redis2\n\nkinto.event_listeners.redis1.use = cliquet.listeners.redis\nkinto.event_listeners.redis1.url = <url>\nkinto.event_listeners.redis1.pool_size = 5\nkinto.event_listeners.redis1.listname = kinto:collection:create\nkinto.event_listeners.redis1.actions = create\nkinto.event_listeners.redis1.resources = collection\n\nkinto.event_listeners.redis2.use = cliquet.listeners.redis\nkinto.event_listeners.redis2.url = <url>\nkinto.event_listeners.redis2.pool_size = 5\nkinto.event_listeners.redis2.listname = kinto:record:create\nkinto.event_listeners.redis2.actions = create\nkinto.event_listeners.redis2.resources = record\n```\n\nTo test, I was using the default bucket, and the inserting a sample record to a collection that did not yet exist. Although the collection was auto-created, and the record inserted, only the event for create record was logged to Redis. If I create the collection manually, then the create collection event is logged.\n\nSummary: Auto-created collections on the default bucket are not firing event notifications.\n\n", "before_files": [{"content": "import uuid\n\nimport six\nfrom pyramid import httpexceptions\nfrom pyramid.settings import asbool\nfrom pyramid.security import NO_PERMISSION_REQUIRED, Authenticated\n\nfrom cliquet.errors import raise_invalid\nfrom cliquet.utils import build_request, reapply_cors, hmac_digest\nfrom cliquet.storage import exceptions as storage_exceptions\n\nfrom kinto.authorization import RouteFactory\nfrom kinto.views.buckets import Bucket\nfrom kinto.views.collections import Collection\n\n\ndef create_bucket(request, bucket_id):\n \"\"\"Create a bucket if it doesn't exists.\"\"\"\n bucket_put = (request.method.lower() == 'put' and\n request.path.endswith('buckets/default'))\n # Do nothing if current request will already create the bucket.\n if bucket_put:\n return\n\n # Do not intent to create multiple times per request (e.g. in batch).\n already_created = request.bound_data.setdefault('buckets', {})\n if bucket_id in already_created:\n return\n\n # Fake context to instantiate a Bucket resource.\n context = RouteFactory(request)\n context.get_permission_object_id = lambda r, i: '/buckets/%s' % bucket_id\n resource = Bucket(request, context)\n try:\n bucket = resource.model.create_record({'id': bucket_id})\n except storage_exceptions.UnicityError as e:\n bucket = e.record\n already_created[bucket_id] = bucket\n\n\ndef create_collection(request, bucket_id):\n # Do nothing if current request does not involve a collection.\n subpath = request.matchdict.get('subpath')\n if not (subpath and subpath.startswith('collections/')):\n return\n\n collection_id = subpath.split('/')[1]\n collection_uri = '/buckets/%s/collections/%s' % (bucket_id, collection_id)\n\n # Do not intent to create multiple times per request (e.g. in batch).\n already_created = request.bound_data.setdefault('collections', {})\n if collection_uri in already_created:\n return\n\n # Do nothing if current request will already create the collection.\n collection_put = (request.method.lower() == 'put' and\n request.path.endswith(collection_id))\n if collection_put:\n return\n\n # Fake context to instantiate a Collection resource.\n context = RouteFactory(request)\n context.get_permission_object_id = lambda r, i: collection_uri\n\n backup = request.matchdict\n request.matchdict = dict(bucket_id=bucket_id,\n id=collection_id,\n **request.matchdict)\n resource = Collection(request, context)\n if not resource.model.id_generator.match(collection_id):\n error_details = {\n 'location': 'path',\n 'description': \"Invalid collection_id id\"\n }\n raise_invalid(request, **error_details)\n try:\n collection = resource.model.create_record({'id': collection_id})\n except storage_exceptions.UnicityError as e:\n collection = e.record\n already_created[collection_uri] = collection\n request.matchdict = backup\n\n\ndef default_bucket(request):\n if request.method.lower() == 'options':\n path = request.path.replace('default', 'unknown')\n subrequest = build_request(request, {\n 'method': 'OPTIONS',\n 'path': path\n })\n return request.invoke_subrequest(subrequest)\n\n if Authenticated not in request.effective_principals:\n # Pass through the forbidden_view_config\n raise httpexceptions.HTTPForbidden()\n\n settings = request.registry.settings\n\n if asbool(settings['readonly']):\n raise httpexceptions.HTTPMethodNotAllowed()\n\n bucket_id = request.default_bucket_id\n path = request.path.replace('/buckets/default', '/buckets/%s' % bucket_id)\n querystring = request.url[(request.url.index(request.path) +\n len(request.path)):]\n\n # Make sure bucket exists\n create_bucket(request, bucket_id)\n\n # Make sure the collection exists\n create_collection(request, bucket_id)\n\n subrequest = build_request(request, {\n 'method': request.method,\n 'path': path + querystring,\n 'body': request.body\n })\n subrequest.bound_data = request.bound_data\n\n try:\n response = request.invoke_subrequest(subrequest)\n except httpexceptions.HTTPException as error:\n is_redirect = error.status_code < 400\n if error.content_type == 'application/json' or is_redirect:\n response = reapply_cors(subrequest, error)\n else:\n # Ask the upper level to format the error.\n raise error\n return response\n\n\ndef default_bucket_id(request):\n settings = request.registry.settings\n secret = settings['userid_hmac_secret']\n # Build the user unguessable bucket_id UUID from its user_id\n digest = hmac_digest(secret, request.prefixed_userid)\n return six.text_type(uuid.UUID(digest[:32]))\n\n\ndef get_user_info(request):\n user_info = {\n 'id': request.prefixed_userid,\n 'bucket': request.default_bucket_id\n }\n return user_info\n\n\ndef includeme(config):\n # Redirect default to the right endpoint\n config.add_view(default_bucket,\n route_name='default_bucket',\n permission=NO_PERMISSION_REQUIRED)\n config.add_view(default_bucket,\n route_name='default_bucket_collection',\n permission=NO_PERMISSION_REQUIRED)\n\n config.add_route('default_bucket_collection',\n '/buckets/default/{subpath:.*}')\n config.add_route('default_bucket', '/buckets/default')\n\n # Provide helpers\n config.add_request_method(default_bucket_id, reify=True)\n # Override Cliquet default user info\n config.add_request_method(get_user_info)\n\n config.add_api_capability(\n \"default_bucket\",\n description=\"The default bucket is an alias for a personal\"\n \" bucket where collections are created implicitly.\",\n url=\"http://kinto.readthedocs.org/en/latest/api/1.x/\"\n \"buckets.html#personal-bucket-default\")\n", "path": "kinto/plugins/default_bucket/__init__.py"}]}
2,479
824
gh_patches_debug_36
rasdani/github-patches
git_diff
sonic-net__sonic-utilities-2090
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> sonic-installer install fails in armhf <!-- If you are reporting a new issue, make sure that we do not have any duplicates already open. You can ensure this by searching the issue list for this repository. If there is a duplicate, please close your issue and add a comment to the existing issue instead. If you suspect your issue is a bug, please edit your issue description to include the BUG REPORT INFORMATION shown below. If you fail to provide this information within 7 days, we cannot debug your issue and will close it. We will, however, reopen it if you later provide the information. For more information about reporting issues, see https://github.com/Azure/SONiC/wiki#report-issues --------------------------------------------------- GENERAL SUPPORT INFORMATION --------------------------------------------------- The GitHub issue tracker is for bug reports and feature requests. General support can be found at the following locations: - SONiC Support Forums - https://groups.google.com/forum/#!forum/sonicproject --------------------------------------------------- BUG REPORT INFORMATION --------------------------------------------------- Use the commands below to provide key information from your environment: You do NOT have to include this information if this is a FEATURE REQUEST --> #### Description <!-- Briefly describe the problem you are having in a few paragraphs. --> Sonic-installer failure log using install operation. File “/usr/local/lib/python3.9/dist-packages/click/core.py”, line 956, in invoke return ctx.invoke(self.callback, **ctx.params) File “/usr/local/lib/python3.9/dist-packages/click/core.py”, line 555, in invoke return callback(*args, **kwargs) File “/usr/local/lib/python3.9/dist-packages/sonic_installer/main.py”, line 543, in install if not skip_platform_check and not bootloader.verify_image_platform(image_path): File “/usr/local/lib/python3.9/dist-packages/sonic_installer/bootloader/uboot.py”, line 81, in verify_image_platform return os.path.isfile(image_path) NameError: name ‘os’ is not defined ## Steps to reproduce the issue 1. sonic-installer install <image> #### Describe the results you received #### Describe the results you expected sonic-installer to work seamlessly. #### Additional information you deem important (e.g. issue happens only occasionally) #### Output of `show version` ``` # show version SONiC Software Version: SONiC.HEAD.0-dirty-20220302.124544 Distribution: Debian 11.2 Kernel: 5.10.0-8-2-armmp Build commit: 94b778c39 Build date: Wed Mar 2 08:25:34 UTC 2022 Built by: marvell@cpss-build1 Platform: armhf-nokia_ixs7215_52x-r0 HwSKU: Nokia-7215 ASIC: marvell ASIC Count: 1 Serial Number: NK203110011 Model Number: 3HE16794AARA01 Hardware Revision: N/A Uptime: 16:49:12 up 33 min, 1 user, load average: 0.21, 0.69, 0.82 Docker images: REPOSITORY TAG IMAGE ID SIZE docker-dhcp-relay latest 39c289f394ba 484MB docker-teamd HEAD.0-dirty-20220302.124544 897670943f24 483MB docker-teamd latest 897670943f24 483MB docker-syncd-mrvl HEAD.0-dirty-20220302.124544 ded0f3c5116c 607MB docker-syncd-mrvl latest ded0f3c5116c 607MB docker-snmp HEAD.0-dirty-20220302.124544 50b2af07aa43 514MB docker-snmp latest 50b2af07aa43 514MB docker-sflow HEAD.0-dirty-20220302.124544 80d19598c760 484MB docker-sflow latest 80d19598c760 484MB docker-router-advertiser HEAD.0-dirty-20220302.124544 ff951f4fa02e 474MB docker-router-advertiser latest ff951f4fa02e 474MB docker-platform-monitor HEAD.0-dirty-20220302.124544 87406f9f212c 695MB docker-platform-monitor latest 87406f9f212c 695MB docker-orchagent HEAD.0-dirty-20220302.124544 fafd5ae1c574 571MB docker-orchagent latest fafd5ae1c574 571MB docker-nat HEAD.0-dirty-20220302.124544 c6a2c3a9d794 485MB docker-nat latest c6a2c3a9d794 485MB docker-mux HEAD.0-dirty-20220302.124544 e1246be5c510 492MB docker-mux latest e1246be5c510 492MB docker-macsec HEAD.0-dirty-20220302.124544 87fbc786165c 485MB docker-macsec latest 87fbc786165c 485MB docker-lldp HEAD.0-dirty-20220302.124544 498091e0d9f6 478MB docker-lldp latest 498091e0d9f6 478MB docker-fpm-frr HEAD.0-dirty-20220302.124544 95d1d947a343 497MB docker-fpm-frr latest 95d1d947a343 497MB docker-database HEAD.0-dirty-20220302.124544 3e5047261b1c 471MB docker-database latest 3e5047261b1c 471MB ``` <!-- Also attach debug file produced by `sudo generate_dump` --> </issue> <code> [start of sonic_installer/bootloader/uboot.py] 1 """ 2 Bootloader implementation for uboot based platforms 3 """ 4 5 import platform 6 import subprocess 7 8 import click 9 10 from ..common import ( 11 HOST_PATH, 12 IMAGE_DIR_PREFIX, 13 IMAGE_PREFIX, 14 run_command, 15 ) 16 from .onie import OnieInstallerBootloader 17 18 class UbootBootloader(OnieInstallerBootloader): 19 20 NAME = 'uboot' 21 22 def get_installed_images(self): 23 images = [] 24 proc = subprocess.Popen("/usr/bin/fw_printenv -n sonic_version_1", shell=True, text=True, stdout=subprocess.PIPE) 25 (out, _) = proc.communicate() 26 image = out.rstrip() 27 if IMAGE_PREFIX in image: 28 images.append(image) 29 proc = subprocess.Popen("/usr/bin/fw_printenv -n sonic_version_2", shell=True, text=True, stdout=subprocess.PIPE) 30 (out, _) = proc.communicate() 31 image = out.rstrip() 32 if IMAGE_PREFIX in image: 33 images.append(image) 34 return images 35 36 def get_next_image(self): 37 images = self.get_installed_images() 38 proc = subprocess.Popen("/usr/bin/fw_printenv -n boot_next", shell=True, text=True, stdout=subprocess.PIPE) 39 (out, _) = proc.communicate() 40 image = out.rstrip() 41 if "sonic_image_2" in image: 42 next_image_index = 1 43 else: 44 next_image_index = 0 45 return images[next_image_index] 46 47 def set_default_image(self, image): 48 images = self.get_installed_images() 49 if image in images[0]: 50 run_command('/usr/bin/fw_setenv boot_next "run sonic_image_1"') 51 elif image in images[1]: 52 run_command('/usr/bin/fw_setenv boot_next "run sonic_image_2"') 53 return True 54 55 def set_next_image(self, image): 56 images = self.get_installed_images() 57 if image in images[0]: 58 run_command('/usr/bin/fw_setenv boot_once "run sonic_image_1"') 59 elif image in images[1]: 60 run_command('/usr/bin/fw_setenv boot_once "run sonic_image_2"') 61 return True 62 63 def install_image(self, image_path): 64 run_command("bash " + image_path) 65 66 def remove_image(self, image): 67 click.echo('Updating next boot ...') 68 images = self.get_installed_images() 69 if image in images[0]: 70 run_command('/usr/bin/fw_setenv boot_next "run sonic_image_2"') 71 run_command('/usr/bin/fw_setenv sonic_version_1 "NONE"') 72 elif image in images[1]: 73 run_command('/usr/bin/fw_setenv boot_next "run sonic_image_1"') 74 run_command('/usr/bin/fw_setenv sonic_version_2 "NONE"') 75 image_dir = image.replace(IMAGE_PREFIX, IMAGE_DIR_PREFIX) 76 click.echo('Removing image root filesystem...') 77 subprocess.call(['rm','-rf', HOST_PATH + '/' + image_dir]) 78 click.echo('Done') 79 80 def verify_image_platform(self, image_path): 81 return os.path.isfile(image_path) 82 83 @classmethod 84 def detect(cls): 85 arch = platform.machine() 86 return ("arm" in arch) or ("aarch64" in arch) 87 [end of sonic_installer/bootloader/uboot.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/sonic_installer/bootloader/uboot.py b/sonic_installer/bootloader/uboot.py --- a/sonic_installer/bootloader/uboot.py +++ b/sonic_installer/bootloader/uboot.py @@ -4,6 +4,7 @@ import platform import subprocess +import os import click
{"golden_diff": "diff --git a/sonic_installer/bootloader/uboot.py b/sonic_installer/bootloader/uboot.py\n--- a/sonic_installer/bootloader/uboot.py\n+++ b/sonic_installer/bootloader/uboot.py\n@@ -4,6 +4,7 @@\n \n import platform\n import subprocess\n+import os\n \n import click\n", "issue": "sonic-installer install fails in armhf\n<!--\r\n If you are reporting a new issue, make sure that we do not have any duplicates\r\n already open. You can ensure this by searching the issue list for this\r\n repository. If there is a duplicate, please close your issue and add a comment\r\n to the existing issue instead.\r\n\r\n If you suspect your issue is a bug, please edit your issue description to\r\n include the BUG REPORT INFORMATION shown below. If you fail to provide this\r\n information within 7 days, we cannot debug your issue and will close it. We\r\n will, however, reopen it if you later provide the information.\r\n\r\n For more information about reporting issues, see\r\n https://github.com/Azure/SONiC/wiki#report-issues\r\n\r\n ---------------------------------------------------\r\n GENERAL SUPPORT INFORMATION\r\n ---------------------------------------------------\r\n\r\n The GitHub issue tracker is for bug reports and feature requests.\r\n General support can be found at the following locations:\r\n\r\n - SONiC Support Forums - https://groups.google.com/forum/#!forum/sonicproject\r\n\r\n ---------------------------------------------------\r\n BUG REPORT INFORMATION\r\n ---------------------------------------------------\r\n Use the commands below to provide key information from your environment:\r\n You do NOT have to include this information if this is a FEATURE REQUEST\r\n-->\r\n\r\n#### Description\r\n\r\n<!--\r\n Briefly describe the problem you are having in a few paragraphs.\r\n-->\r\nSonic-installer failure log using install operation.\r\n\r\nFile \u201c/usr/local/lib/python3.9/dist-packages/click/core.py\u201d, line 956, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n File \u201c/usr/local/lib/python3.9/dist-packages/click/core.py\u201d, line 555, in invoke\r\n return callback(*args, **kwargs)\r\n File \u201c/usr/local/lib/python3.9/dist-packages/sonic_installer/main.py\u201d, line 543, in install\r\n if not skip_platform_check and not bootloader.verify_image_platform(image_path):\r\n File \u201c/usr/local/lib/python3.9/dist-packages/sonic_installer/bootloader/uboot.py\u201d, line 81, in verify_image_platform\r\n return os.path.isfile(image_path)\r\nNameError: name \u2018os\u2019 is not defined\r\n\r\n## Steps to reproduce the issue\r\n1. sonic-installer install <image>\r\n\r\n\r\n#### Describe the results you received\r\n\r\n\r\n#### Describe the results you expected\r\nsonic-installer to work seamlessly.\r\n\r\n#### Additional information you deem important (e.g. issue happens only occasionally)\r\n\r\n\r\n#### Output of `show version`\r\n\r\n```\r\n# show version\r\n\r\nSONiC Software Version: SONiC.HEAD.0-dirty-20220302.124544\r\nDistribution: Debian 11.2\r\nKernel: 5.10.0-8-2-armmp\r\nBuild commit: 94b778c39\r\nBuild date: Wed Mar 2 08:25:34 UTC 2022\r\nBuilt by: marvell@cpss-build1\r\n\r\nPlatform: armhf-nokia_ixs7215_52x-r0\r\nHwSKU: Nokia-7215\r\nASIC: marvell\r\nASIC Count: 1\r\nSerial Number: NK203110011\r\nModel Number: 3HE16794AARA01\r\nHardware Revision: N/A\r\nUptime: 16:49:12 up 33 min, 1 user, load average: 0.21, 0.69, 0.82\r\n\r\nDocker images:\r\nREPOSITORY TAG IMAGE ID SIZE\r\ndocker-dhcp-relay latest 39c289f394ba 484MB\r\ndocker-teamd HEAD.0-dirty-20220302.124544 897670943f24 483MB\r\ndocker-teamd latest 897670943f24 483MB\r\ndocker-syncd-mrvl HEAD.0-dirty-20220302.124544 ded0f3c5116c 607MB\r\ndocker-syncd-mrvl latest ded0f3c5116c 607MB\r\ndocker-snmp HEAD.0-dirty-20220302.124544 50b2af07aa43 514MB\r\ndocker-snmp latest 50b2af07aa43 514MB\r\ndocker-sflow HEAD.0-dirty-20220302.124544 80d19598c760 484MB\r\ndocker-sflow latest 80d19598c760 484MB\r\ndocker-router-advertiser HEAD.0-dirty-20220302.124544 ff951f4fa02e 474MB\r\ndocker-router-advertiser latest ff951f4fa02e 474MB\r\ndocker-platform-monitor HEAD.0-dirty-20220302.124544 87406f9f212c 695MB\r\ndocker-platform-monitor latest 87406f9f212c 695MB\r\ndocker-orchagent HEAD.0-dirty-20220302.124544 fafd5ae1c574 571MB\r\ndocker-orchagent latest fafd5ae1c574 571MB\r\ndocker-nat HEAD.0-dirty-20220302.124544 c6a2c3a9d794 485MB\r\ndocker-nat latest c6a2c3a9d794 485MB\r\ndocker-mux HEAD.0-dirty-20220302.124544 e1246be5c510 492MB\r\ndocker-mux latest e1246be5c510 492MB\r\ndocker-macsec HEAD.0-dirty-20220302.124544 87fbc786165c 485MB\r\ndocker-macsec latest 87fbc786165c 485MB\r\ndocker-lldp HEAD.0-dirty-20220302.124544 498091e0d9f6 478MB\r\ndocker-lldp latest 498091e0d9f6 478MB\r\ndocker-fpm-frr HEAD.0-dirty-20220302.124544 95d1d947a343 497MB\r\ndocker-fpm-frr latest 95d1d947a343 497MB\r\ndocker-database HEAD.0-dirty-20220302.124544 3e5047261b1c 471MB\r\ndocker-database latest 3e5047261b1c 471MB\r\n\r\n```\r\n\r\n<!--\r\n Also attach debug file produced by `sudo generate_dump`\r\n-->\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nBootloader implementation for uboot based platforms\n\"\"\"\n\nimport platform\nimport subprocess\n\nimport click\n\nfrom ..common import (\n HOST_PATH,\n IMAGE_DIR_PREFIX,\n IMAGE_PREFIX,\n run_command,\n)\nfrom .onie import OnieInstallerBootloader\n\nclass UbootBootloader(OnieInstallerBootloader):\n\n NAME = 'uboot'\n\n def get_installed_images(self):\n images = []\n proc = subprocess.Popen(\"/usr/bin/fw_printenv -n sonic_version_1\", shell=True, text=True, stdout=subprocess.PIPE)\n (out, _) = proc.communicate()\n image = out.rstrip()\n if IMAGE_PREFIX in image:\n images.append(image)\n proc = subprocess.Popen(\"/usr/bin/fw_printenv -n sonic_version_2\", shell=True, text=True, stdout=subprocess.PIPE)\n (out, _) = proc.communicate()\n image = out.rstrip()\n if IMAGE_PREFIX in image:\n images.append(image)\n return images\n\n def get_next_image(self):\n images = self.get_installed_images()\n proc = subprocess.Popen(\"/usr/bin/fw_printenv -n boot_next\", shell=True, text=True, stdout=subprocess.PIPE)\n (out, _) = proc.communicate()\n image = out.rstrip()\n if \"sonic_image_2\" in image:\n next_image_index = 1\n else:\n next_image_index = 0\n return images[next_image_index]\n\n def set_default_image(self, image):\n images = self.get_installed_images()\n if image in images[0]:\n run_command('/usr/bin/fw_setenv boot_next \"run sonic_image_1\"')\n elif image in images[1]:\n run_command('/usr/bin/fw_setenv boot_next \"run sonic_image_2\"')\n return True\n\n def set_next_image(self, image):\n images = self.get_installed_images()\n if image in images[0]:\n run_command('/usr/bin/fw_setenv boot_once \"run sonic_image_1\"')\n elif image in images[1]:\n run_command('/usr/bin/fw_setenv boot_once \"run sonic_image_2\"')\n return True\n\n def install_image(self, image_path):\n run_command(\"bash \" + image_path)\n\n def remove_image(self, image):\n click.echo('Updating next boot ...')\n images = self.get_installed_images()\n if image in images[0]:\n run_command('/usr/bin/fw_setenv boot_next \"run sonic_image_2\"')\n run_command('/usr/bin/fw_setenv sonic_version_1 \"NONE\"')\n elif image in images[1]:\n run_command('/usr/bin/fw_setenv boot_next \"run sonic_image_1\"')\n run_command('/usr/bin/fw_setenv sonic_version_2 \"NONE\"')\n image_dir = image.replace(IMAGE_PREFIX, IMAGE_DIR_PREFIX)\n click.echo('Removing image root filesystem...')\n subprocess.call(['rm','-rf', HOST_PATH + '/' + image_dir])\n click.echo('Done')\n\n def verify_image_platform(self, image_path):\n return os.path.isfile(image_path)\n\n @classmethod\n def detect(cls):\n arch = platform.machine()\n return (\"arm\" in arch) or (\"aarch64\" in arch)\n", "path": "sonic_installer/bootloader/uboot.py"}]}
3,138
80
gh_patches_debug_24535
rasdani/github-patches
git_diff
nvaccess__nvda-14588
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> in tabbed notepad when switching between tabs nvda should announce some way to differentiate between tabs ### Steps to reproduce: download the new tabbed notepad. now using the menu create a new tab now switch between tabs with ctrl+tabe ### Actual behavior: nvda announces blank edition text editor ### Expected behavior: Before writing what I want, I would like to talk about my discoveries, sorry if it doesn't make sense. I typed a different word into the first line of text on each tab. guide example 1 Fernando guide 2 silva using object navigation I found the list of tabs and within this list there was each tab named with what was written in the first line of text. Now I left the first line of text empty in tab 1 in the list of tabs tab 1 appears with the name of untitled from what i understand if the first line of text is characters this text will be the title of the tab. If the first line of text is empty, the tab will have an untitled title. so my suggestion is: when switching between tabs in notepad in this example by pressing ctrl+tab nvda should announce the title of the tab which will be what is typed in the first line. But this doesn't work if the first line of the tabs is empty, so I suggest that nvda also announce the position of the tab within the list. example guide 1 first line Fernando guide 2 first line empty guide 3 first line silva when switching between tabs nvda would announce: guide 1 of 3 fernando guide 2 of 3 untitled guide 3 of 3 silva Tab name and tab count could also be announced by command nvda + t to read window name. ### NVDA logs, crash dumps and other attachments: ### System configuration #### NVDA installed/portable/running from source: instaled #### NVDA version: nvda.exe, NVDA alpha-27590,180c9f2b #### Windows version: 11 22.623.1095 #### Name and version of other software in use when reproducing the issue: Notepad.exe, Microsoft.WindowsNotepad 11.2212.33.0 #### Other information about your system: ### Other questions #### Does the issue still occur after restarting your computer? yes #### Have you tried any other versions of NVDA? If so, please report their behaviors. no #### If NVDA add-ons are disabled, is your problem still occurring? yes #### Does the issue still occur after you run the COM Registration Fixing Tool in NVDA's tools menu? yes </issue> <code> [start of source/appModules/notepad.py] 1 # A part of NonVisual Desktop Access (NVDA) 2 # Copyright (C) 2022-2023 NV Access Limited, Joseph Lee 3 # This file is covered by the GNU General Public License. 4 # See the file COPYING for more details. 5 6 """App module for Windows Notepad. 7 While this app module also covers older Notepad releases, 8 this module provides workarounds for Windows 11 Notepad.""" 9 10 from comtypes import COMError 11 import appModuleHandler 12 import api 13 import UIAHandler 14 from NVDAObjects.UIA import UIA 15 from NVDAObjects import NVDAObject 16 17 18 class AppModule(appModuleHandler.AppModule): 19 20 def _get_statusBar(self) -> NVDAObject: 21 """Retrieves Windows 11 Notepad status bar. 22 In Windows 10 and earlier, status bar can be obtained by looking at the bottom of the screen. 23 Windows 11 Notepad uses Windows 11 UI design (top-level window is labeled "DesktopWindowXamlSource", 24 therefore status bar cannot be obtained by position alone. 25 If visible, a child of the foreground window hosts the status bar elements. 26 Status bar child position must be checked whenever Notepad is updated on stable Windows 11 releases 27 as Notepad is updated through Microsoft Store as opposed to tied to specific Windows releases. 28 L{api.getStatusBar} will resort to position lookup if C{NotImplementedError} is raised. 29 """ 30 # #13688: Notepad 11 uses Windows 11 user interface, therefore status bar is harder to obtain. 31 # This does not affect earlier versions. 32 notepadVersion = int(self.productVersion.split(".")[0]) 33 if notepadVersion < 11: 34 raise NotImplementedError() 35 # And no, status bar is shown when editing documents. 36 # Thankfully, of all the UIA objects encountered, document window has a unique window class name. 37 if api.getFocusObject().windowClassName != "RichEditD2DPT": 38 raise NotImplementedError() 39 # Obtain status bar text across Notepad 11 releases. 40 clientObject = UIAHandler.handler.clientObject 41 condition = clientObject.createPropertyCondition(UIAHandler.UIA_AutomationIdPropertyId, "ContentTextBlock") 42 walker = clientObject.createTreeWalker(condition) 43 notepadWindow = clientObject.elementFromHandle(api.getForegroundObject().windowHandle) 44 try: 45 element = walker.getFirstChildElement(notepadWindow) 46 # Is status bar even showing? 47 element = element.buildUpdatedCache(UIAHandler.handler.baseCacheRequest) 48 except (ValueError, COMError): 49 raise NotImplementedError 50 statusBar = UIA(UIAElement=element).parent 51 return statusBar 52 [end of source/appModules/notepad.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/source/appModules/notepad.py b/source/appModules/notepad.py --- a/source/appModules/notepad.py +++ b/source/appModules/notepad.py @@ -10,13 +10,38 @@ from comtypes import COMError import appModuleHandler import api +import braille +import controlTypes +import eventHandler +import speech import UIAHandler from NVDAObjects.UIA import UIA from NVDAObjects import NVDAObject +from typing import Callable class AppModule(appModuleHandler.AppModule): + def event_UIA_elementSelected(self, obj: NVDAObject, nextHandler: Callable[[], None]): + # Announce currently selected tab when it changes. + if ( + obj.role == controlTypes.Role.TAB + # this is done because 2 selection events are sent for the same object, so to prevent double speaking. + and not eventHandler.isPendingEvents("UIA_elementSelected") + and controlTypes.State.SELECTED in obj.states + ): + speech.cancelSpeech() + speech.speakObject(obj, reason=controlTypes.OutputReason.FOCUS) + braille.handler.message( + braille.getPropertiesBraille( + name=obj.name, + role=obj.role, + states=obj.states, + positionInfo=obj.positionInfo + ) + ) + nextHandler() + def _get_statusBar(self) -> NVDAObject: """Retrieves Windows 11 Notepad status bar. In Windows 10 and earlier, status bar can be obtained by looking at the bottom of the screen.
{"golden_diff": "diff --git a/source/appModules/notepad.py b/source/appModules/notepad.py\n--- a/source/appModules/notepad.py\n+++ b/source/appModules/notepad.py\n@@ -10,13 +10,38 @@\n from comtypes import COMError\n import appModuleHandler\n import api\n+import braille\n+import controlTypes\n+import eventHandler\n+import speech\n import UIAHandler\n from NVDAObjects.UIA import UIA\n from NVDAObjects import NVDAObject\n+from typing import Callable\n \n \n class AppModule(appModuleHandler.AppModule):\n \n+\tdef event_UIA_elementSelected(self, obj: NVDAObject, nextHandler: Callable[[], None]):\n+\t\t# Announce currently selected tab when it changes.\n+\t\tif (\n+\t\t\tobj.role == controlTypes.Role.TAB\n+\t\t\t# this is done because 2 selection events are sent for the same object, so to prevent double speaking.\n+\t\t\tand not eventHandler.isPendingEvents(\"UIA_elementSelected\")\n+\t\t\tand controlTypes.State.SELECTED in obj.states\n+\t\t):\n+\t\t\tspeech.cancelSpeech()\n+\t\t\tspeech.speakObject(obj, reason=controlTypes.OutputReason.FOCUS)\n+\t\t\tbraille.handler.message(\n+\t\t\t\tbraille.getPropertiesBraille(\n+\t\t\t\t\tname=obj.name,\n+\t\t\t\t\trole=obj.role,\n+\t\t\t\t\tstates=obj.states,\n+\t\t\t\t\tpositionInfo=obj.positionInfo\n+\t\t\t\t)\n+\t\t\t)\n+\t\tnextHandler()\n+\n \tdef _get_statusBar(self) -> NVDAObject:\n \t\t\"\"\"Retrieves Windows 11 Notepad status bar.\n \t\tIn Windows 10 and earlier, status bar can be obtained by looking at the bottom of the screen.\n", "issue": "in tabbed notepad when switching between tabs nvda should announce some way to differentiate between tabs\n\r\n### Steps to reproduce:\r\ndownload the new tabbed notepad.\r\nnow using the menu create a new tab\r\nnow switch between tabs with ctrl+tabe\r\n### Actual behavior:\r\nnvda announces blank edition text editor\r\n### Expected behavior:\r\nBefore writing what I want, I would like to talk about my discoveries, sorry if it doesn't make sense.\r\nI typed a different word into the first line of text on each tab.\r\nguide example 1\r\nFernando\r\nguide 2\r\nsilva\r\nusing object navigation I found the list of tabs and within this list there was each tab named with what was written in the first line of text.\r\nNow I left the first line of text empty in tab 1\r\nin the list of tabs tab 1 appears with the name of untitled\r\nfrom what i understand if the first line of text is characters this text will be the title of the tab.\r\nIf the first line of text is empty, the tab will have an untitled title.\r\nso my suggestion is:\r\nwhen switching between tabs in notepad in this example by pressing ctrl+tab nvda should announce the title of the tab which will be what is typed in the first line.\r\nBut this doesn't work if the first line of the tabs is empty, so I suggest that nvda also announce the position of the tab within the list.\r\nexample\r\nguide 1\r\nfirst line\r\nFernando\r\nguide 2\r\nfirst line\r\nempty\r\nguide 3\r\nfirst line\r\nsilva\r\nwhen switching between tabs nvda would announce:\r\nguide 1 of 3 fernando\r\nguide 2 of 3 untitled\r\nguide 3 of 3 silva\r\nTab name and tab count could also be announced by command nvda + t to read window name.\r\n### NVDA logs, crash dumps and other attachments:\r\n\r\n### System configuration\r\n#### NVDA installed/portable/running from source:\r\ninstaled\r\n#### NVDA version:\r\nnvda.exe, NVDA alpha-27590,180c9f2b\r\n#### Windows version:\r\n11 22.623.1095\r\n#### Name and version of other software in use when reproducing the issue:\r\nNotepad.exe, Microsoft.WindowsNotepad 11.2212.33.0\r\n\r\n#### Other information about your system:\r\n\r\n### Other questions\r\n#### Does the issue still occur after restarting your computer?\r\nyes\r\n#### Have you tried any other versions of NVDA? If so, please report their behaviors.\r\nno\r\n#### If NVDA add-ons are disabled, is your problem still occurring?\r\nyes\r\n#### Does the issue still occur after you run the COM Registration Fixing Tool in NVDA's tools menu?\r\nyes\n", "before_files": [{"content": "# A part of NonVisual Desktop Access (NVDA)\n# Copyright (C) 2022-2023 NV Access Limited, Joseph Lee\n# This file is covered by the GNU General Public License.\n# See the file COPYING for more details.\n\n\"\"\"App module for Windows Notepad.\nWhile this app module also covers older Notepad releases,\nthis module provides workarounds for Windows 11 Notepad.\"\"\"\n\nfrom comtypes import COMError\nimport appModuleHandler\nimport api\nimport UIAHandler\nfrom NVDAObjects.UIA import UIA\nfrom NVDAObjects import NVDAObject\n\n\nclass AppModule(appModuleHandler.AppModule):\n\n\tdef _get_statusBar(self) -> NVDAObject:\n\t\t\"\"\"Retrieves Windows 11 Notepad status bar.\n\t\tIn Windows 10 and earlier, status bar can be obtained by looking at the bottom of the screen.\n\t\tWindows 11 Notepad uses Windows 11 UI design (top-level window is labeled \"DesktopWindowXamlSource\",\n\t\ttherefore status bar cannot be obtained by position alone.\n\t\tIf visible, a child of the foreground window hosts the status bar elements.\n\t\tStatus bar child position must be checked whenever Notepad is updated on stable Windows 11 releases\n\t\tas Notepad is updated through Microsoft Store as opposed to tied to specific Windows releases.\n\t\tL{api.getStatusBar} will resort to position lookup if C{NotImplementedError} is raised.\n\t\t\"\"\"\n\t\t# #13688: Notepad 11 uses Windows 11 user interface, therefore status bar is harder to obtain.\n\t\t# This does not affect earlier versions.\n\t\tnotepadVersion = int(self.productVersion.split(\".\")[0])\n\t\tif notepadVersion < 11:\n\t\t\traise NotImplementedError()\n\t\t# And no, status bar is shown when editing documents.\n\t\t# Thankfully, of all the UIA objects encountered, document window has a unique window class name.\n\t\tif api.getFocusObject().windowClassName != \"RichEditD2DPT\":\n\t\t\traise NotImplementedError()\n\t\t# Obtain status bar text across Notepad 11 releases.\n\t\tclientObject = UIAHandler.handler.clientObject\n\t\tcondition = clientObject.createPropertyCondition(UIAHandler.UIA_AutomationIdPropertyId, \"ContentTextBlock\")\n\t\twalker = clientObject.createTreeWalker(condition)\n\t\tnotepadWindow = clientObject.elementFromHandle(api.getForegroundObject().windowHandle)\n\t\ttry:\n\t\t\telement = walker.getFirstChildElement(notepadWindow)\n\t\t\t# Is status bar even showing?\n\t\t\telement = element.buildUpdatedCache(UIAHandler.handler.baseCacheRequest)\n\t\texcept (ValueError, COMError):\n\t\t\traise NotImplementedError\n\t\tstatusBar = UIA(UIAElement=element).parent\n\t\treturn statusBar\n", "path": "source/appModules/notepad.py"}]}
1,801
364
gh_patches_debug_11669
rasdani/github-patches
git_diff
scikit-hep__pyhf-960
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Documentation: meaning of value for return_fitted_val=True # Description In this code snippet from the documentation ```python >>> pyhf.infer.mle.fixed_poi_fit(test_poi, data, model, return_fitted_val=True) (array([1. , 0.97224597, 0.87553894]), array([28.92218013])) ``` it isn't clear what the meaning of `array([28.92218013])` is. Is it likelihood, log likelihood, -log likelihood, -2 log likelihood? It is the latter, but that is not clear. Applies to https://scikit-hep.org/pyhf/_generated/pyhf.infer.mle.fixed_poi_fit.html or https://scikit-hep.org/pyhf/_generated/pyhf.infer.mle.fit.html ## Is your feature request related to a problem? Please describe. I wasn't sure, so I had to try a few things to figure it out. ### Describe the solution you'd like Add a note to the documentation for the convention. ### Describe alternatives you've considered banging my head against the wall. # Relevant Issues and Pull Requests </issue> <code> [start of src/pyhf/infer/mle.py] 1 """Module for Maximum Likelihood Estimation.""" 2 from .. import get_backend 3 from ..exceptions import UnspecifiedPOI 4 5 6 def twice_nll(pars, data, pdf): 7 """ 8 Twice the negative Log-Likelihood. 9 10 Args: 11 data (`tensor`): The data 12 pdf (~pyhf.pdf.Model): The statistical model adhering to the schema model.json 13 14 Returns: 15 Twice the negative log likelihood. 16 17 """ 18 return -2 * pdf.logpdf(pars, data) 19 20 21 def fit(data, pdf, init_pars=None, par_bounds=None, **kwargs): 22 """ 23 Run a unconstrained maximum likelihood fit. 24 25 Example: 26 >>> import pyhf 27 >>> pyhf.set_backend("numpy") 28 >>> model = pyhf.simplemodels.hepdata_like( 29 ... signal_data=[12.0, 11.0], bkg_data=[50.0, 52.0], bkg_uncerts=[3.0, 7.0] 30 ... ) 31 >>> observations = [51, 48] 32 >>> data = pyhf.tensorlib.astensor(observations + model.config.auxdata) 33 >>> pyhf.infer.mle.fit(data, model, return_fitted_val=True) 34 (array([0. , 1.0030512 , 0.96266961]), array([24.98393521])) 35 36 Args: 37 data (`tensor`): The data 38 pdf (~pyhf.pdf.Model): The statistical model adhering to the schema model.json 39 init_pars (`list`): Values to initialize the model parameters at for the fit 40 par_bounds (`list` of `list`\s or `tuple`\s): The extrema of values the model parameters are allowed to reach in the fit 41 kwargs: Keyword arguments passed through to the optimizer API 42 43 Returns: 44 See optimizer API 45 46 """ 47 _, opt = get_backend() 48 init_pars = init_pars or pdf.config.suggested_init() 49 par_bounds = par_bounds or pdf.config.suggested_bounds() 50 return opt.minimize(twice_nll, data, pdf, init_pars, par_bounds, **kwargs) 51 52 53 def fixed_poi_fit(poi_val, data, pdf, init_pars=None, par_bounds=None, **kwargs): 54 """ 55 Run a maximum likelihood fit with the POI value fixed. 56 57 Example: 58 >>> import pyhf 59 >>> pyhf.set_backend("numpy") 60 >>> model = pyhf.simplemodels.hepdata_like( 61 ... signal_data=[12.0, 11.0], bkg_data=[50.0, 52.0], bkg_uncerts=[3.0, 7.0] 62 ... ) 63 >>> observations = [51, 48] 64 >>> data = pyhf.tensorlib.astensor(observations + model.config.auxdata) 65 >>> test_poi = 1.0 66 >>> pyhf.infer.mle.fixed_poi_fit(test_poi, data, model, return_fitted_val=True) 67 (array([1. , 0.97224597, 0.87553894]), array([28.92218013])) 68 69 Args: 70 data: The data 71 pdf (~pyhf.pdf.Model): The statistical model adhering to the schema model.json 72 init_pars (`list`): Values to initialize the model parameters at for the fit 73 par_bounds (`list` of `list`\s or `tuple`\s): The extrema of values the model parameters are allowed to reach in the fit 74 kwargs: Keyword arguments passed through to the optimizer API 75 76 Returns: 77 See optimizer API 78 79 """ 80 if pdf.config.poi_index is None: 81 raise UnspecifiedPOI( 82 'No POI is defined. A POI is required to fit with a fixed POI.' 83 ) 84 _, opt = get_backend() 85 init_pars = init_pars or pdf.config.suggested_init() 86 par_bounds = par_bounds or pdf.config.suggested_bounds() 87 return opt.minimize( 88 twice_nll, 89 data, 90 pdf, 91 init_pars, 92 par_bounds, 93 [(pdf.config.poi_index, poi_val)], 94 **kwargs, 95 ) 96 [end of src/pyhf/infer/mle.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/pyhf/infer/mle.py b/src/pyhf/infer/mle.py --- a/src/pyhf/infer/mle.py +++ b/src/pyhf/infer/mle.py @@ -22,6 +22,10 @@ """ Run a unconstrained maximum likelihood fit. + .. note:: + + :func:`twice_nll` is the objective function. + Example: >>> import pyhf >>> pyhf.set_backend("numpy") @@ -54,6 +58,10 @@ """ Run a maximum likelihood fit with the POI value fixed. + .. note:: + + :func:`twice_nll` is the objective function. + Example: >>> import pyhf >>> pyhf.set_backend("numpy")
{"golden_diff": "diff --git a/src/pyhf/infer/mle.py b/src/pyhf/infer/mle.py\n--- a/src/pyhf/infer/mle.py\n+++ b/src/pyhf/infer/mle.py\n@@ -22,6 +22,10 @@\n \"\"\"\n Run a unconstrained maximum likelihood fit.\n \n+ .. note::\n+\n+ :func:`twice_nll` is the objective function.\n+\n Example:\n >>> import pyhf\n >>> pyhf.set_backend(\"numpy\")\n@@ -54,6 +58,10 @@\n \"\"\"\n Run a maximum likelihood fit with the POI value fixed.\n \n+ .. note::\n+\n+ :func:`twice_nll` is the objective function.\n+\n Example:\n >>> import pyhf\n >>> pyhf.set_backend(\"numpy\")\n", "issue": "Documentation: meaning of value for return_fitted_val=True\n# Description\r\n\r\nIn this code snippet from the documentation \r\n\r\n```python\r\n>>> pyhf.infer.mle.fixed_poi_fit(test_poi, data, model, return_fitted_val=True)\r\n(array([1. , 0.97224597, 0.87553894]), array([28.92218013]))\r\n```\r\n\r\nit isn't clear what the meaning of `array([28.92218013])` is. Is it likelihood, log likelihood, -log likelihood, -2 log likelihood?\r\nIt is the latter, but that is not clear.\r\n\r\nApplies to \r\n\r\nhttps://scikit-hep.org/pyhf/_generated/pyhf.infer.mle.fixed_poi_fit.html\r\nor\r\nhttps://scikit-hep.org/pyhf/_generated/pyhf.infer.mle.fit.html\r\n\r\n## Is your feature request related to a problem? Please describe.\r\n\r\nI wasn't sure, so I had to try a few things to figure it out.\r\n\r\n### Describe the solution you'd like\r\n\r\nAdd a note to the documentation for the convention.\r\n\r\n### Describe alternatives you've considered\r\n\r\nbanging my head against the wall.\r\n\r\n# Relevant Issues and Pull Requests\r\n\r\n\n", "before_files": [{"content": "\"\"\"Module for Maximum Likelihood Estimation.\"\"\"\nfrom .. import get_backend\nfrom ..exceptions import UnspecifiedPOI\n\n\ndef twice_nll(pars, data, pdf):\n \"\"\"\n Twice the negative Log-Likelihood.\n\n Args:\n data (`tensor`): The data\n pdf (~pyhf.pdf.Model): The statistical model adhering to the schema model.json\n\n Returns:\n Twice the negative log likelihood.\n\n \"\"\"\n return -2 * pdf.logpdf(pars, data)\n\n\ndef fit(data, pdf, init_pars=None, par_bounds=None, **kwargs):\n \"\"\"\n Run a unconstrained maximum likelihood fit.\n\n Example:\n >>> import pyhf\n >>> pyhf.set_backend(\"numpy\")\n >>> model = pyhf.simplemodels.hepdata_like(\n ... signal_data=[12.0, 11.0], bkg_data=[50.0, 52.0], bkg_uncerts=[3.0, 7.0]\n ... )\n >>> observations = [51, 48]\n >>> data = pyhf.tensorlib.astensor(observations + model.config.auxdata)\n >>> pyhf.infer.mle.fit(data, model, return_fitted_val=True)\n (array([0. , 1.0030512 , 0.96266961]), array([24.98393521]))\n\n Args:\n data (`tensor`): The data\n pdf (~pyhf.pdf.Model): The statistical model adhering to the schema model.json\n init_pars (`list`): Values to initialize the model parameters at for the fit\n par_bounds (`list` of `list`\\s or `tuple`\\s): The extrema of values the model parameters are allowed to reach in the fit\n kwargs: Keyword arguments passed through to the optimizer API\n\n Returns:\n See optimizer API\n\n \"\"\"\n _, opt = get_backend()\n init_pars = init_pars or pdf.config.suggested_init()\n par_bounds = par_bounds or pdf.config.suggested_bounds()\n return opt.minimize(twice_nll, data, pdf, init_pars, par_bounds, **kwargs)\n\n\ndef fixed_poi_fit(poi_val, data, pdf, init_pars=None, par_bounds=None, **kwargs):\n \"\"\"\n Run a maximum likelihood fit with the POI value fixed.\n\n Example:\n >>> import pyhf\n >>> pyhf.set_backend(\"numpy\")\n >>> model = pyhf.simplemodels.hepdata_like(\n ... signal_data=[12.0, 11.0], bkg_data=[50.0, 52.0], bkg_uncerts=[3.0, 7.0]\n ... )\n >>> observations = [51, 48]\n >>> data = pyhf.tensorlib.astensor(observations + model.config.auxdata)\n >>> test_poi = 1.0\n >>> pyhf.infer.mle.fixed_poi_fit(test_poi, data, model, return_fitted_val=True)\n (array([1. , 0.97224597, 0.87553894]), array([28.92218013]))\n\n Args:\n data: The data\n pdf (~pyhf.pdf.Model): The statistical model adhering to the schema model.json\n init_pars (`list`): Values to initialize the model parameters at for the fit\n par_bounds (`list` of `list`\\s or `tuple`\\s): The extrema of values the model parameters are allowed to reach in the fit\n kwargs: Keyword arguments passed through to the optimizer API\n\n Returns:\n See optimizer API\n\n \"\"\"\n if pdf.config.poi_index is None:\n raise UnspecifiedPOI(\n 'No POI is defined. A POI is required to fit with a fixed POI.'\n )\n _, opt = get_backend()\n init_pars = init_pars or pdf.config.suggested_init()\n par_bounds = par_bounds or pdf.config.suggested_bounds()\n return opt.minimize(\n twice_nll,\n data,\n pdf,\n init_pars,\n par_bounds,\n [(pdf.config.poi_index, poi_val)],\n **kwargs,\n )\n", "path": "src/pyhf/infer/mle.py"}]}
1,924
178
gh_patches_debug_13489
rasdani/github-patches
git_diff
frappe__frappe-22795
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Cron server script runs twice <!-- Welcome to the Frappe Framework issue tracker! Before creating an issue, please heed the following: 1. This tracker should only be used to report bugs and request features / enhancements to Frappe - For questions and general support, use https://stackoverflow.com/questions/tagged/frappe - For documentation issues, refer to https://frappeframework.com/docs/user/en or the developer cheetsheet https://github.com/frappe/frappe/wiki/Developer-Cheatsheet 2. Use the search function before creating a new issue. Duplicates will be closed and directed to the original discussion. 3. When making a bug report, make sure you provide all required information. The easier it is for maintainers to reproduce, the faster it'll be fixed. 4. If you think you know what the reason for the bug is, share it with us. Maybe put in a PR 😉 --> ## Description of the issue Created a cron server script with "13 11 17 10 tue" cron format. So it should run at 11:13 17/10/2023. But it runs once after save and runs again at correct time. ## Context information (for bug reports) ![image](https://github.com/frappe/frappe/assets/710051/1531ef89-3fc0-4b0a-a7af-2fe77d626e11) ![image](https://github.com/frappe/frappe/assets/710051/21fd224d-e841-4598-967f-b79c9895009b) **Output of `bench version`** ``` Bench 5.17.2 Frappe 14.52.0 ``` ## Steps to reproduce the issue 1.Create a server script as "Scheduler Event", "Event Frequency" as "Cron" 2.Make cron format as ```59 23 * * *``` 3.Set script as ```frappe.log_error("Cron Test", "Test")``` ### Observed result Watch the Error Log. It creates a "Cron Test" log once and at the midnight. ### Expected result It should only create the error log at the midnight. ### Stacktrace / full error message ``` None ``` ## Additional information OS version / distribution, `Frappe` install method, etc. Manual install </issue> <code> [start of frappe/core/doctype/scheduled_job_type/scheduled_job_type.py] 1 # Copyright (c) 2021, Frappe Technologies and contributors 2 # License: MIT. See LICENSE 3 4 import json 5 from datetime import datetime 6 7 import click 8 from croniter import croniter 9 10 import frappe 11 from frappe.model.document import Document 12 from frappe.utils import get_datetime, now_datetime 13 from frappe.utils.background_jobs import enqueue, is_job_enqueued 14 15 16 class ScheduledJobType(Document): 17 # begin: auto-generated types 18 # This code is auto-generated. Do not modify anything in this block. 19 20 from typing import TYPE_CHECKING 21 22 if TYPE_CHECKING: 23 from frappe.types import DF 24 25 create_log: DF.Check 26 cron_format: DF.Data | None 27 frequency: DF.Literal[ 28 "All", 29 "Hourly", 30 "Hourly Long", 31 "Daily", 32 "Daily Long", 33 "Weekly", 34 "Weekly Long", 35 "Monthly", 36 "Monthly Long", 37 "Cron", 38 "Yearly", 39 "Annual", 40 ] 41 last_execution: DF.Datetime | None 42 method: DF.Data 43 next_execution: DF.Datetime | None 44 server_script: DF.Link | None 45 stopped: DF.Check 46 # end: auto-generated types 47 def autoname(self): 48 self.name = ".".join(self.method.split(".")[-2:]) 49 50 def validate(self): 51 if self.frequency != "All": 52 # force logging for all events other than continuous ones (ALL) 53 self.create_log = 1 54 55 def enqueue(self, force=False) -> bool: 56 # enqueue event if last execution is done 57 if self.is_event_due() or force: 58 if not self.is_job_in_queue(): 59 enqueue( 60 "frappe.core.doctype.scheduled_job_type.scheduled_job_type.run_scheduled_job", 61 queue=self.get_queue_name(), 62 job_type=self.method, 63 job_id=self.rq_job_id, 64 ) 65 return True 66 else: 67 frappe.logger("scheduler").error( 68 f"Skipped queueing {self.method} because it was found in queue for {frappe.local.site}" 69 ) 70 71 return False 72 73 def is_event_due(self, current_time=None): 74 """Return true if event is due based on time lapsed since last execution""" 75 # if the next scheduled event is before NOW, then its due! 76 return self.get_next_execution() <= (current_time or now_datetime()) 77 78 def is_job_in_queue(self) -> bool: 79 return is_job_enqueued(self.rq_job_id) 80 81 @property 82 def rq_job_id(self): 83 """Unique ID created to deduplicate jobs with single RQ call.""" 84 return f"scheduled_job::{self.method}" 85 86 @property 87 def next_execution(self): 88 return self.get_next_execution() 89 90 def get_next_execution(self): 91 CRON_MAP = { 92 "Yearly": "0 0 1 1 *", 93 "Annual": "0 0 1 1 *", 94 "Monthly": "0 0 1 * *", 95 "Monthly Long": "0 0 1 * *", 96 "Weekly": "0 0 * * 0", 97 "Weekly Long": "0 0 * * 0", 98 "Daily": "0 0 * * *", 99 "Daily Long": "0 0 * * *", 100 "Hourly": "0 * * * *", 101 "Hourly Long": "0 * * * *", 102 "All": f"*/{(frappe.get_conf().scheduler_interval or 240) // 60} * * * *", 103 } 104 105 if not self.cron_format: 106 self.cron_format = CRON_MAP[self.frequency] 107 108 return croniter( 109 self.cron_format, get_datetime(self.last_execution or datetime(2000, 1, 1)) 110 ).get_next(datetime) 111 112 def execute(self): 113 self.scheduler_log = None 114 try: 115 self.log_status("Start") 116 if self.server_script: 117 script_name = frappe.db.get_value("Server Script", self.server_script) 118 if script_name: 119 frappe.get_doc("Server Script", script_name).execute_scheduled_method() 120 else: 121 frappe.get_attr(self.method)() 122 frappe.db.commit() 123 self.log_status("Complete") 124 except Exception: 125 frappe.db.rollback() 126 self.log_status("Failed") 127 128 def log_status(self, status): 129 # log file 130 frappe.logger("scheduler").info(f"Scheduled Job {status}: {self.method} for {frappe.local.site}") 131 self.update_scheduler_log(status) 132 133 def update_scheduler_log(self, status): 134 if not self.create_log: 135 # self.get_next_execution will work properly iff self.last_execution is properly set 136 if self.frequency == "All" and status == "Start": 137 self.db_set("last_execution", now_datetime(), update_modified=False) 138 frappe.db.commit() 139 return 140 if not self.scheduler_log: 141 self.scheduler_log = frappe.get_doc( 142 dict(doctype="Scheduled Job Log", scheduled_job_type=self.name) 143 ).insert(ignore_permissions=True) 144 self.scheduler_log.db_set("status", status) 145 if status == "Failed": 146 self.scheduler_log.db_set("details", frappe.get_traceback()) 147 if status == "Start": 148 self.db_set("last_execution", now_datetime(), update_modified=False) 149 frappe.db.commit() 150 151 def get_queue_name(self): 152 return "long" if ("Long" in self.frequency) else "default" 153 154 def on_trash(self): 155 frappe.db.delete("Scheduled Job Log", {"scheduled_job_type": self.name}) 156 157 158 @frappe.whitelist() 159 def execute_event(doc: str): 160 frappe.only_for("System Manager") 161 doc = json.loads(doc) 162 frappe.get_doc("Scheduled Job Type", doc.get("name")).enqueue(force=True) 163 return doc 164 165 166 def run_scheduled_job(job_type: str): 167 """This is a wrapper function that runs a hooks.scheduler_events method""" 168 try: 169 frappe.get_doc("Scheduled Job Type", dict(method=job_type)).execute() 170 except Exception: 171 print(frappe.get_traceback()) 172 173 174 def sync_jobs(hooks: dict = None): 175 frappe.reload_doc("core", "doctype", "scheduled_job_type") 176 scheduler_events = hooks or frappe.get_hooks("scheduler_events") 177 all_events = insert_events(scheduler_events) 178 clear_events(all_events) 179 180 181 def insert_events(scheduler_events: dict) -> list: 182 cron_jobs, event_jobs = [], [] 183 for event_type in scheduler_events: 184 events = scheduler_events.get(event_type) 185 if isinstance(events, dict): 186 cron_jobs += insert_cron_jobs(events) 187 else: 188 # hourly, daily etc 189 event_jobs += insert_event_jobs(events, event_type) 190 return cron_jobs + event_jobs 191 192 193 def insert_cron_jobs(events: dict) -> list: 194 cron_jobs = [] 195 for cron_format in events: 196 for event in events.get(cron_format): 197 cron_jobs.append(event) 198 insert_single_event("Cron", event, cron_format) 199 return cron_jobs 200 201 202 def insert_event_jobs(events: list, event_type: str) -> list: 203 event_jobs = [] 204 for event in events: 205 event_jobs.append(event) 206 frequency = event_type.replace("_", " ").title() 207 insert_single_event(frequency, event) 208 return event_jobs 209 210 211 def insert_single_event(frequency: str, event: str, cron_format: str = None): 212 cron_expr = {"cron_format": cron_format} if cron_format else {} 213 214 try: 215 frappe.get_attr(event) 216 except Exception as e: 217 click.secho(f"{event} is not a valid method: {e}", fg="yellow") 218 219 doc = frappe.get_doc( 220 { 221 "doctype": "Scheduled Job Type", 222 "method": event, 223 "cron_format": cron_format, 224 "frequency": frequency, 225 } 226 ) 227 228 if not frappe.db.exists( 229 "Scheduled Job Type", {"method": event, "frequency": frequency, **cron_expr} 230 ): 231 savepoint = "scheduled_job_type_creation" 232 try: 233 frappe.db.savepoint(savepoint) 234 doc.insert() 235 except frappe.DuplicateEntryError: 236 frappe.db.rollback(save_point=savepoint) 237 doc.delete() 238 doc.insert() 239 240 241 def clear_events(all_events: list): 242 for event in frappe.get_all("Scheduled Job Type", fields=["name", "method", "server_script"]): 243 is_server_script = event.server_script 244 is_defined_in_hooks = event.method in all_events 245 246 if not (is_defined_in_hooks or is_server_script): 247 frappe.delete_doc("Scheduled Job Type", event.name) 248 [end of frappe/core/doctype/scheduled_job_type/scheduled_job_type.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/frappe/core/doctype/scheduled_job_type/scheduled_job_type.py b/frappe/core/doctype/scheduled_job_type/scheduled_job_type.py --- a/frappe/core/doctype/scheduled_job_type/scheduled_job_type.py +++ b/frappe/core/doctype/scheduled_job_type/scheduled_job_type.py @@ -105,9 +105,12 @@ if not self.cron_format: self.cron_format = CRON_MAP[self.frequency] - return croniter( - self.cron_format, get_datetime(self.last_execution or datetime(2000, 1, 1)) - ).get_next(datetime) + # If this is a cold start then last_execution will not be set. + # Creation is set as fallback because if very old fallback is set job might trigger + # immediately, even when it's meant to be daily. + # A dynamic fallback like current time might miss the scheduler interval and job will never start. + last_execution = get_datetime(self.last_execution or self.creation) + return croniter(self.cron_format, last_execution).get_next(datetime) def execute(self): self.scheduler_log = None
{"golden_diff": "diff --git a/frappe/core/doctype/scheduled_job_type/scheduled_job_type.py b/frappe/core/doctype/scheduled_job_type/scheduled_job_type.py\n--- a/frappe/core/doctype/scheduled_job_type/scheduled_job_type.py\n+++ b/frappe/core/doctype/scheduled_job_type/scheduled_job_type.py\n@@ -105,9 +105,12 @@\n \t\tif not self.cron_format:\n \t\t\tself.cron_format = CRON_MAP[self.frequency]\n \n-\t\treturn croniter(\n-\t\t\tself.cron_format, get_datetime(self.last_execution or datetime(2000, 1, 1))\n-\t\t).get_next(datetime)\n+\t\t# If this is a cold start then last_execution will not be set.\n+\t\t# Creation is set as fallback because if very old fallback is set job might trigger\n+\t\t# immediately, even when it's meant to be daily.\n+\t\t# A dynamic fallback like current time might miss the scheduler interval and job will never start.\n+\t\tlast_execution = get_datetime(self.last_execution or self.creation)\n+\t\treturn croniter(self.cron_format, last_execution).get_next(datetime)\n \n \tdef execute(self):\n \t\tself.scheduler_log = None\n", "issue": "Cron server script runs twice\n<!--\r\nWelcome to the Frappe Framework issue tracker! Before creating an issue, please heed the following:\r\n\r\n1. This tracker should only be used to report bugs and request features / enhancements to Frappe\r\n - For questions and general support, use https://stackoverflow.com/questions/tagged/frappe\r\n - For documentation issues, refer to https://frappeframework.com/docs/user/en or the developer cheetsheet https://github.com/frappe/frappe/wiki/Developer-Cheatsheet\r\n2. Use the search function before creating a new issue. Duplicates will be closed and directed to\r\n the original discussion.\r\n3. When making a bug report, make sure you provide all required information. The easier it is for\r\n maintainers to reproduce, the faster it'll be fixed.\r\n4. If you think you know what the reason for the bug is, share it with us. Maybe put in a PR \ud83d\ude09\r\n-->\r\n\r\n## Description of the issue\r\nCreated a cron server script with \"13 11 17 10 tue\" cron format.\r\nSo it should run at 11:13 17/10/2023.\r\nBut it runs once after save and runs again at correct time.\r\n\r\n## Context information (for bug reports)\r\n![image](https://github.com/frappe/frappe/assets/710051/1531ef89-3fc0-4b0a-a7af-2fe77d626e11)\r\n\r\n![image](https://github.com/frappe/frappe/assets/710051/21fd224d-e841-4598-967f-b79c9895009b)\r\n\r\n\r\n**Output of `bench version`**\r\n```\r\nBench 5.17.2\r\nFrappe 14.52.0\r\n```\r\n\r\n## Steps to reproduce the issue\r\n\r\n1.Create a server script as \"Scheduler Event\", \"Event Frequency\" as \"Cron\"\r\n2.Make cron format as ```59 23 * * *```\r\n3.Set script as\r\n```frappe.log_error(\"Cron Test\", \"Test\")```\r\n\r\n### Observed result\r\nWatch the Error Log. \r\nIt creates a \"Cron Test\" log once and at the midnight.\r\n\r\n### Expected result\r\nIt should only create the error log at the midnight.\r\n### Stacktrace / full error message\r\n\r\n```\r\nNone\r\n```\r\n\r\n## Additional information\r\n\r\nOS version / distribution, `Frappe` install method, etc.\r\nManual install\r\n\n", "before_files": [{"content": "# Copyright (c) 2021, Frappe Technologies and contributors\n# License: MIT. See LICENSE\n\nimport json\nfrom datetime import datetime\n\nimport click\nfrom croniter import croniter\n\nimport frappe\nfrom frappe.model.document import Document\nfrom frappe.utils import get_datetime, now_datetime\nfrom frappe.utils.background_jobs import enqueue, is_job_enqueued\n\n\nclass ScheduledJobType(Document):\n\t# begin: auto-generated types\n\t# This code is auto-generated. Do not modify anything in this block.\n\n\tfrom typing import TYPE_CHECKING\n\n\tif TYPE_CHECKING:\n\t\tfrom frappe.types import DF\n\n\t\tcreate_log: DF.Check\n\t\tcron_format: DF.Data | None\n\t\tfrequency: DF.Literal[\n\t\t\t\"All\",\n\t\t\t\"Hourly\",\n\t\t\t\"Hourly Long\",\n\t\t\t\"Daily\",\n\t\t\t\"Daily Long\",\n\t\t\t\"Weekly\",\n\t\t\t\"Weekly Long\",\n\t\t\t\"Monthly\",\n\t\t\t\"Monthly Long\",\n\t\t\t\"Cron\",\n\t\t\t\"Yearly\",\n\t\t\t\"Annual\",\n\t\t]\n\t\tlast_execution: DF.Datetime | None\n\t\tmethod: DF.Data\n\t\tnext_execution: DF.Datetime | None\n\t\tserver_script: DF.Link | None\n\t\tstopped: DF.Check\n\t# end: auto-generated types\n\tdef autoname(self):\n\t\tself.name = \".\".join(self.method.split(\".\")[-2:])\n\n\tdef validate(self):\n\t\tif self.frequency != \"All\":\n\t\t\t# force logging for all events other than continuous ones (ALL)\n\t\t\tself.create_log = 1\n\n\tdef enqueue(self, force=False) -> bool:\n\t\t# enqueue event if last execution is done\n\t\tif self.is_event_due() or force:\n\t\t\tif not self.is_job_in_queue():\n\t\t\t\tenqueue(\n\t\t\t\t\t\"frappe.core.doctype.scheduled_job_type.scheduled_job_type.run_scheduled_job\",\n\t\t\t\t\tqueue=self.get_queue_name(),\n\t\t\t\t\tjob_type=self.method,\n\t\t\t\t\tjob_id=self.rq_job_id,\n\t\t\t\t)\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\tfrappe.logger(\"scheduler\").error(\n\t\t\t\t\tf\"Skipped queueing {self.method} because it was found in queue for {frappe.local.site}\"\n\t\t\t\t)\n\n\t\treturn False\n\n\tdef is_event_due(self, current_time=None):\n\t\t\"\"\"Return true if event is due based on time lapsed since last execution\"\"\"\n\t\t# if the next scheduled event is before NOW, then its due!\n\t\treturn self.get_next_execution() <= (current_time or now_datetime())\n\n\tdef is_job_in_queue(self) -> bool:\n\t\treturn is_job_enqueued(self.rq_job_id)\n\n\t@property\n\tdef rq_job_id(self):\n\t\t\"\"\"Unique ID created to deduplicate jobs with single RQ call.\"\"\"\n\t\treturn f\"scheduled_job::{self.method}\"\n\n\t@property\n\tdef next_execution(self):\n\t\treturn self.get_next_execution()\n\n\tdef get_next_execution(self):\n\t\tCRON_MAP = {\n\t\t\t\"Yearly\": \"0 0 1 1 *\",\n\t\t\t\"Annual\": \"0 0 1 1 *\",\n\t\t\t\"Monthly\": \"0 0 1 * *\",\n\t\t\t\"Monthly Long\": \"0 0 1 * *\",\n\t\t\t\"Weekly\": \"0 0 * * 0\",\n\t\t\t\"Weekly Long\": \"0 0 * * 0\",\n\t\t\t\"Daily\": \"0 0 * * *\",\n\t\t\t\"Daily Long\": \"0 0 * * *\",\n\t\t\t\"Hourly\": \"0 * * * *\",\n\t\t\t\"Hourly Long\": \"0 * * * *\",\n\t\t\t\"All\": f\"*/{(frappe.get_conf().scheduler_interval or 240) // 60} * * * *\",\n\t\t}\n\n\t\tif not self.cron_format:\n\t\t\tself.cron_format = CRON_MAP[self.frequency]\n\n\t\treturn croniter(\n\t\t\tself.cron_format, get_datetime(self.last_execution or datetime(2000, 1, 1))\n\t\t).get_next(datetime)\n\n\tdef execute(self):\n\t\tself.scheduler_log = None\n\t\ttry:\n\t\t\tself.log_status(\"Start\")\n\t\t\tif self.server_script:\n\t\t\t\tscript_name = frappe.db.get_value(\"Server Script\", self.server_script)\n\t\t\t\tif script_name:\n\t\t\t\t\tfrappe.get_doc(\"Server Script\", script_name).execute_scheduled_method()\n\t\t\telse:\n\t\t\t\tfrappe.get_attr(self.method)()\n\t\t\tfrappe.db.commit()\n\t\t\tself.log_status(\"Complete\")\n\t\texcept Exception:\n\t\t\tfrappe.db.rollback()\n\t\t\tself.log_status(\"Failed\")\n\n\tdef log_status(self, status):\n\t\t# log file\n\t\tfrappe.logger(\"scheduler\").info(f\"Scheduled Job {status}: {self.method} for {frappe.local.site}\")\n\t\tself.update_scheduler_log(status)\n\n\tdef update_scheduler_log(self, status):\n\t\tif not self.create_log:\n\t\t\t# self.get_next_execution will work properly iff self.last_execution is properly set\n\t\t\tif self.frequency == \"All\" and status == \"Start\":\n\t\t\t\tself.db_set(\"last_execution\", now_datetime(), update_modified=False)\n\t\t\t\tfrappe.db.commit()\n\t\t\treturn\n\t\tif not self.scheduler_log:\n\t\t\tself.scheduler_log = frappe.get_doc(\n\t\t\t\tdict(doctype=\"Scheduled Job Log\", scheduled_job_type=self.name)\n\t\t\t).insert(ignore_permissions=True)\n\t\tself.scheduler_log.db_set(\"status\", status)\n\t\tif status == \"Failed\":\n\t\t\tself.scheduler_log.db_set(\"details\", frappe.get_traceback())\n\t\tif status == \"Start\":\n\t\t\tself.db_set(\"last_execution\", now_datetime(), update_modified=False)\n\t\tfrappe.db.commit()\n\n\tdef get_queue_name(self):\n\t\treturn \"long\" if (\"Long\" in self.frequency) else \"default\"\n\n\tdef on_trash(self):\n\t\tfrappe.db.delete(\"Scheduled Job Log\", {\"scheduled_job_type\": self.name})\n\n\[email protected]()\ndef execute_event(doc: str):\n\tfrappe.only_for(\"System Manager\")\n\tdoc = json.loads(doc)\n\tfrappe.get_doc(\"Scheduled Job Type\", doc.get(\"name\")).enqueue(force=True)\n\treturn doc\n\n\ndef run_scheduled_job(job_type: str):\n\t\"\"\"This is a wrapper function that runs a hooks.scheduler_events method\"\"\"\n\ttry:\n\t\tfrappe.get_doc(\"Scheduled Job Type\", dict(method=job_type)).execute()\n\texcept Exception:\n\t\tprint(frappe.get_traceback())\n\n\ndef sync_jobs(hooks: dict = None):\n\tfrappe.reload_doc(\"core\", \"doctype\", \"scheduled_job_type\")\n\tscheduler_events = hooks or frappe.get_hooks(\"scheduler_events\")\n\tall_events = insert_events(scheduler_events)\n\tclear_events(all_events)\n\n\ndef insert_events(scheduler_events: dict) -> list:\n\tcron_jobs, event_jobs = [], []\n\tfor event_type in scheduler_events:\n\t\tevents = scheduler_events.get(event_type)\n\t\tif isinstance(events, dict):\n\t\t\tcron_jobs += insert_cron_jobs(events)\n\t\telse:\n\t\t\t# hourly, daily etc\n\t\t\tevent_jobs += insert_event_jobs(events, event_type)\n\treturn cron_jobs + event_jobs\n\n\ndef insert_cron_jobs(events: dict) -> list:\n\tcron_jobs = []\n\tfor cron_format in events:\n\t\tfor event in events.get(cron_format):\n\t\t\tcron_jobs.append(event)\n\t\t\tinsert_single_event(\"Cron\", event, cron_format)\n\treturn cron_jobs\n\n\ndef insert_event_jobs(events: list, event_type: str) -> list:\n\tevent_jobs = []\n\tfor event in events:\n\t\tevent_jobs.append(event)\n\t\tfrequency = event_type.replace(\"_\", \" \").title()\n\t\tinsert_single_event(frequency, event)\n\treturn event_jobs\n\n\ndef insert_single_event(frequency: str, event: str, cron_format: str = None):\n\tcron_expr = {\"cron_format\": cron_format} if cron_format else {}\n\n\ttry:\n\t\tfrappe.get_attr(event)\n\texcept Exception as e:\n\t\tclick.secho(f\"{event} is not a valid method: {e}\", fg=\"yellow\")\n\n\tdoc = frappe.get_doc(\n\t\t{\n\t\t\t\"doctype\": \"Scheduled Job Type\",\n\t\t\t\"method\": event,\n\t\t\t\"cron_format\": cron_format,\n\t\t\t\"frequency\": frequency,\n\t\t}\n\t)\n\n\tif not frappe.db.exists(\n\t\t\"Scheduled Job Type\", {\"method\": event, \"frequency\": frequency, **cron_expr}\n\t):\n\t\tsavepoint = \"scheduled_job_type_creation\"\n\t\ttry:\n\t\t\tfrappe.db.savepoint(savepoint)\n\t\t\tdoc.insert()\n\t\texcept frappe.DuplicateEntryError:\n\t\t\tfrappe.db.rollback(save_point=savepoint)\n\t\t\tdoc.delete()\n\t\t\tdoc.insert()\n\n\ndef clear_events(all_events: list):\n\tfor event in frappe.get_all(\"Scheduled Job Type\", fields=[\"name\", \"method\", \"server_script\"]):\n\t\tis_server_script = event.server_script\n\t\tis_defined_in_hooks = event.method in all_events\n\n\t\tif not (is_defined_in_hooks or is_server_script):\n\t\t\tfrappe.delete_doc(\"Scheduled Job Type\", event.name)\n", "path": "frappe/core/doctype/scheduled_job_type/scheduled_job_type.py"}]}
3,707
262
gh_patches_debug_12627
rasdani/github-patches
git_diff
StackStorm__st2-1465
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Custom webhook fails action depending on payload. Hi there, I've faced a weird issue. Say it we have a webhook action like this: https://gist.github.com/dennybaa/00d1936af2f441cfff15 If I issue the following commands: 1. `curl -X POST --data "{\"message\": \"6fd781b849bb\"}" http://172.17.0.8:9101/v1/webhooks/sample` 2. `curl -X POST --data "{\"hits.rate_15m\": 0.4765927732670934, \"hits.count\": 30, \"tags\": [\"metric\"], \"@timestamp\": \"2015-04-23T13:58:28.319Z\", \"hits.rate_5m\": 0.31123263029226506, \"@version\": \"1\", \"message\": \"6fd781b849bb\", \"hits.rate_1m\": 0.08765952620175455}"` In both cases trigger is dispatched. But the action takes place only in the #1 case. </issue> <code> [start of st2actions/st2actions/resultstracker.py] 1 # Licensed to the StackStorm, Inc ('StackStorm') under one or more 2 # contributor license agreements. See the NOTICE file distributed with 3 # this work for additional information regarding copyright ownership. 4 # The ASF licenses this file to You under the Apache License, Version 2.0 5 # (the "License"); you may not use this file except in compliance with 6 # the License. You may obtain a copy of the License at 7 # 8 # http://www.apache.org/licenses/LICENSE-2.0 9 # 10 # Unless required by applicable law or agreed to in writing, software 11 # distributed under the License is distributed on an "AS IS" BASIS, 12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 # See the License for the specific language governing permissions and 14 # limitations under the License. 15 16 import eventlet 17 import importlib 18 import six 19 20 from collections import defaultdict 21 from kombu import Connection 22 from oslo.config import cfg 23 24 from st2actions.query.base import QueryContext 25 from st2common import log as logging 26 from st2common.models.db import action as action_models 27 from st2common.persistence.action import ActionExecutionState 28 from st2common.transport import actionexecutionstate, consumers, publishers 29 30 31 LOG = logging.getLogger(__name__) 32 33 ACTIONSTATE_WORK_Q = actionexecutionstate.get_queue('st2.resultstracker.work', 34 routing_key=publishers.CREATE_RK) 35 36 37 class ResultsTracker(consumers.MessageHandler): 38 message_type = action_models.ActionExecutionStateDB 39 40 def __init__(self, connection, queues): 41 super(ResultsTracker, self).__init__(connection, queues) 42 self._queriers = {} 43 self._query_threads = [] 44 self._failed_imports = set() 45 46 def start(self, wait=False): 47 self._bootstrap() 48 super(ResultsTracker, self).start(wait=wait) 49 50 def wait(self): 51 super(ResultsTracker, self).wait() 52 for thread in self._query_threads(): 53 thread.wait() 54 55 def shutdown(self): 56 super(ResultsTracker, self).shutdown() 57 LOG.info('Stats from queriers:') 58 self._print_stats() 59 60 def _print_stats(self): 61 for name, querier in six.iteritems(self._queriers): 62 if querier: 63 querier.print_stats() 64 65 def _bootstrap(self): 66 all_states = ActionExecutionState.get_all() 67 LOG.info('Found %d pending states in db.' % len(all_states)) 68 69 query_contexts_dict = defaultdict(list) 70 for state_db in all_states: 71 try: 72 context = QueryContext.from_model(state_db) 73 except: 74 LOG.exception('Invalid state object: %s', state_db) 75 continue 76 query_module_name = state_db.query_module 77 querier = self.get_querier(query_module_name) 78 79 if querier is not None: 80 query_contexts_dict[querier].append(context) 81 82 for querier, contexts in six.iteritems(query_contexts_dict): 83 LOG.info('Found %d pending actions for query module %s', len(contexts), querier) 84 querier.add_queries(query_contexts=contexts) 85 86 def process(self, query_context): 87 querier = self.get_querier(query_context.query_module) 88 context = QueryContext.from_model(query_context) 89 querier.add_queries(query_contexts=[context]) 90 return 91 92 def get_querier(self, query_module_name): 93 if (query_module_name not in self._queriers and 94 query_module_name not in self._failed_imports): 95 try: 96 query_module = self._import_query_module(query_module_name) 97 except: 98 LOG.exception('Failed importing query module: %s', query_module_name) 99 self._failed_imports.add(query_module_name) 100 self._queriers[query_module_name] = None 101 else: 102 querier = query_module.get_instance() 103 self._queriers[query_module_name] = querier 104 self._query_threads.append(eventlet.spawn(querier.start)) 105 106 return self._queriers[query_module_name] 107 108 def _import_query_module(self, module_name): 109 return importlib.import_module(module_name, package=None) 110 111 112 def get_tracker(): 113 with Connection(cfg.CONF.messaging.url) as conn: 114 return ResultsTracker(conn, [ACTIONSTATE_WORK_Q]) 115 [end of st2actions/st2actions/resultstracker.py] [start of st2common/st2common/models/db/reactor.py] 1 # Licensed to the StackStorm, Inc ('StackStorm') under one or more 2 # contributor license agreements. See the NOTICE file distributed with 3 # this work for additional information regarding copyright ownership. 4 # The ASF licenses this file to You under the Apache License, Version 2.0 5 # (the "License"); you may not use this file except in compliance with 6 # the License. You may obtain a copy of the License at 7 # 8 # http://www.apache.org/licenses/LICENSE-2.0 9 # 10 # Unless required by applicable law or agreed to in writing, software 11 # distributed under the License is distributed on an "AS IS" BASIS, 12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 # See the License for the specific language governing permissions and 14 # limitations under the License. 15 16 import mongoengine as me 17 from st2common.models.db import MongoDBAccess 18 from st2common.models.db import stormbase 19 20 __all__ = [ 21 'SensorTypeDB', 22 'TriggerTypeDB', 23 'TriggerDB', 24 'TriggerInstanceDB', 25 'ActionExecutionSpecDB', 26 'RuleDB' 27 ] 28 29 30 class SensorTypeDB(stormbase.StormBaseDB, stormbase.ContentPackResourceMixin): 31 """ 32 Description of a specific type of a sensor (think of it as a sensor 33 template). 34 35 Attribute: 36 pack - Name of the content pack this sensor belongs to. 37 artifact_uri - URI to the artifact file. 38 entry_point - Full path to the sensor entry point (e.g. module.foo.ClassSensor). 39 trigger_type - A list of references to the TriggerTypeDB objects exposed by this sensor. 40 poll_interval - Poll interval for this sensor. 41 """ 42 name = me.StringField(required=True) 43 pack = me.StringField(required=True, unique_with='name') 44 artifact_uri = me.StringField() 45 entry_point = me.StringField() 46 trigger_types = me.ListField(field=me.StringField()) 47 poll_interval = me.IntField() 48 enabled = me.BooleanField(default=True, 49 help_text=u'Flag indicating whether the sensor is enabled.') 50 51 52 class TriggerTypeDB(stormbase.StormBaseDB, 53 stormbase.ContentPackResourceMixin, 54 stormbase.TagsMixin): 55 """Description of a specific kind/type of a trigger. The 56 (pack, name) tuple is expected uniquely identify a trigger in 57 the namespace of all triggers provided by a specific trigger_source. 58 Attribute: 59 pack - Name of the content pack this trigger belongs to. 60 trigger_source: Source that owns this trigger type. 61 payload_info: Meta information of the expected payload. 62 """ 63 name = me.StringField(required=True) 64 pack = me.StringField(required=True, unique_with='name') 65 payload_schema = me.DictField() 66 parameters_schema = me.DictField(default={}) 67 68 meta = { 69 'indexes': stormbase.TagsMixin.get_indices() 70 } 71 72 73 class TriggerDB(stormbase.StormBaseDB, stormbase.ContentPackResourceMixin): 74 """ 75 Attribute: 76 pack - Name of the content pack this trigger belongs to. 77 type - Reference to the TriggerType object. 78 parameters - Trigger parameters. 79 """ 80 name = me.StringField(required=True) 81 pack = me.StringField(required=True, unique_with='name') 82 type = me.StringField() 83 parameters = me.DictField() 84 85 86 class TriggerInstanceDB(stormbase.StormFoundationDB): 87 """An instance or occurrence of a type of Trigger. 88 Attribute: 89 trigger: Reference to the Trigger object. 90 payload (dict): payload specific to the occurrence. 91 occurrence_time (datetime): time of occurrence of the trigger. 92 """ 93 trigger = me.StringField() 94 payload = me.DictField() 95 occurrence_time = me.DateTimeField() 96 97 98 class ActionExecutionSpecDB(me.EmbeddedDocument): 99 ref = me.StringField(required=True, unique=False) 100 parameters = me.DictField() 101 102 def __str__(self): 103 result = [] 104 result.append('ActionExecutionSpecDB@') 105 result.append(str(id(self))) 106 result.append('(ref="%s", ' % self.ref) 107 result.append('parameters="%s")' % self.parameters) 108 return ''.join(result) 109 110 111 class RuleDB(stormbase.StormBaseDB, stormbase.TagsMixin): 112 """Specifies the action to invoke on the occurrence of a Trigger. It 113 also includes the transformation to perform to match the impedance 114 between the payload of a TriggerInstance and input of a action. 115 Attribute: 116 trigger: Trigger that trips this rule. 117 criteria: 118 action: Action to execute when the rule is tripped. 119 status: enabled or disabled. If disabled occurrence of the trigger 120 does not lead to execution of a action and vice-versa. 121 """ 122 trigger = me.StringField() 123 criteria = stormbase.EscapedDictField() 124 action = me.EmbeddedDocumentField(ActionExecutionSpecDB) 125 enabled = me.BooleanField(required=True, default=True, 126 help_text=u'Flag indicating whether the rule is enabled.') 127 128 meta = { 129 'indexes': stormbase.TagsMixin.get_indices() 130 } 131 132 # specialized access objects 133 sensor_type_access = MongoDBAccess(SensorTypeDB) 134 triggertype_access = MongoDBAccess(TriggerTypeDB) 135 trigger_access = MongoDBAccess(TriggerDB) 136 triggerinstance_access = MongoDBAccess(TriggerInstanceDB) 137 rule_access = MongoDBAccess(RuleDB) 138 139 MODELS = [SensorTypeDB, TriggerTypeDB, TriggerDB, TriggerInstanceDB, RuleDB] 140 [end of st2common/st2common/models/db/reactor.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/st2actions/st2actions/resultstracker.py b/st2actions/st2actions/resultstracker.py --- a/st2actions/st2actions/resultstracker.py +++ b/st2actions/st2actions/resultstracker.py @@ -49,7 +49,7 @@ def wait(self): super(ResultsTracker, self).wait() - for thread in self._query_threads(): + for thread in self._query_threads: thread.wait() def shutdown(self): diff --git a/st2common/st2common/models/db/reactor.py b/st2common/st2common/models/db/reactor.py --- a/st2common/st2common/models/db/reactor.py +++ b/st2common/st2common/models/db/reactor.py @@ -91,7 +91,7 @@ occurrence_time (datetime): time of occurrence of the trigger. """ trigger = me.StringField() - payload = me.DictField() + payload = stormbase.EscapedDictField() occurrence_time = me.DateTimeField()
{"golden_diff": "diff --git a/st2actions/st2actions/resultstracker.py b/st2actions/st2actions/resultstracker.py\n--- a/st2actions/st2actions/resultstracker.py\n+++ b/st2actions/st2actions/resultstracker.py\n@@ -49,7 +49,7 @@\n \n def wait(self):\n super(ResultsTracker, self).wait()\n- for thread in self._query_threads():\n+ for thread in self._query_threads:\n thread.wait()\n \n def shutdown(self):\ndiff --git a/st2common/st2common/models/db/reactor.py b/st2common/st2common/models/db/reactor.py\n--- a/st2common/st2common/models/db/reactor.py\n+++ b/st2common/st2common/models/db/reactor.py\n@@ -91,7 +91,7 @@\n occurrence_time (datetime): time of occurrence of the trigger.\n \"\"\"\n trigger = me.StringField()\n- payload = me.DictField()\n+ payload = stormbase.EscapedDictField()\n occurrence_time = me.DateTimeField()\n", "issue": "Custom webhook fails action depending on payload.\nHi there, I've faced a weird issue.\n\nSay it we have a webhook action like this: https://gist.github.com/dennybaa/00d1936af2f441cfff15\nIf I issue the following commands:\n1. `curl -X POST --data \"{\\\"message\\\": \\\"6fd781b849bb\\\"}\" http://172.17.0.8:9101/v1/webhooks/sample`\n2. `curl -X POST --data \"{\\\"hits.rate_15m\\\": 0.4765927732670934, \\\"hits.count\\\": 30, \\\"tags\\\": [\\\"metric\\\"], \\\"@timestamp\\\": \\\"2015-04-23T13:58:28.319Z\\\", \\\"hits.rate_5m\\\": 0.31123263029226506, \\\"@version\\\": \\\"1\\\", \\\"message\\\": \\\"6fd781b849bb\\\", \\\"hits.rate_1m\\\": 0.08765952620175455}\"`\n\nIn both cases trigger is dispatched. But the action takes place only in the #1 case.\n\n", "before_files": [{"content": "# Licensed to the StackStorm, Inc ('StackStorm') under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport eventlet\nimport importlib\nimport six\n\nfrom collections import defaultdict\nfrom kombu import Connection\nfrom oslo.config import cfg\n\nfrom st2actions.query.base import QueryContext\nfrom st2common import log as logging\nfrom st2common.models.db import action as action_models\nfrom st2common.persistence.action import ActionExecutionState\nfrom st2common.transport import actionexecutionstate, consumers, publishers\n\n\nLOG = logging.getLogger(__name__)\n\nACTIONSTATE_WORK_Q = actionexecutionstate.get_queue('st2.resultstracker.work',\n routing_key=publishers.CREATE_RK)\n\n\nclass ResultsTracker(consumers.MessageHandler):\n message_type = action_models.ActionExecutionStateDB\n\n def __init__(self, connection, queues):\n super(ResultsTracker, self).__init__(connection, queues)\n self._queriers = {}\n self._query_threads = []\n self._failed_imports = set()\n\n def start(self, wait=False):\n self._bootstrap()\n super(ResultsTracker, self).start(wait=wait)\n\n def wait(self):\n super(ResultsTracker, self).wait()\n for thread in self._query_threads():\n thread.wait()\n\n def shutdown(self):\n super(ResultsTracker, self).shutdown()\n LOG.info('Stats from queriers:')\n self._print_stats()\n\n def _print_stats(self):\n for name, querier in six.iteritems(self._queriers):\n if querier:\n querier.print_stats()\n\n def _bootstrap(self):\n all_states = ActionExecutionState.get_all()\n LOG.info('Found %d pending states in db.' % len(all_states))\n\n query_contexts_dict = defaultdict(list)\n for state_db in all_states:\n try:\n context = QueryContext.from_model(state_db)\n except:\n LOG.exception('Invalid state object: %s', state_db)\n continue\n query_module_name = state_db.query_module\n querier = self.get_querier(query_module_name)\n\n if querier is not None:\n query_contexts_dict[querier].append(context)\n\n for querier, contexts in six.iteritems(query_contexts_dict):\n LOG.info('Found %d pending actions for query module %s', len(contexts), querier)\n querier.add_queries(query_contexts=contexts)\n\n def process(self, query_context):\n querier = self.get_querier(query_context.query_module)\n context = QueryContext.from_model(query_context)\n querier.add_queries(query_contexts=[context])\n return\n\n def get_querier(self, query_module_name):\n if (query_module_name not in self._queriers and\n query_module_name not in self._failed_imports):\n try:\n query_module = self._import_query_module(query_module_name)\n except:\n LOG.exception('Failed importing query module: %s', query_module_name)\n self._failed_imports.add(query_module_name)\n self._queriers[query_module_name] = None\n else:\n querier = query_module.get_instance()\n self._queriers[query_module_name] = querier\n self._query_threads.append(eventlet.spawn(querier.start))\n\n return self._queriers[query_module_name]\n\n def _import_query_module(self, module_name):\n return importlib.import_module(module_name, package=None)\n\n\ndef get_tracker():\n with Connection(cfg.CONF.messaging.url) as conn:\n return ResultsTracker(conn, [ACTIONSTATE_WORK_Q])\n", "path": "st2actions/st2actions/resultstracker.py"}, {"content": "# Licensed to the StackStorm, Inc ('StackStorm') under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport mongoengine as me\nfrom st2common.models.db import MongoDBAccess\nfrom st2common.models.db import stormbase\n\n__all__ = [\n 'SensorTypeDB',\n 'TriggerTypeDB',\n 'TriggerDB',\n 'TriggerInstanceDB',\n 'ActionExecutionSpecDB',\n 'RuleDB'\n]\n\n\nclass SensorTypeDB(stormbase.StormBaseDB, stormbase.ContentPackResourceMixin):\n \"\"\"\n Description of a specific type of a sensor (think of it as a sensor\n template).\n\n Attribute:\n pack - Name of the content pack this sensor belongs to.\n artifact_uri - URI to the artifact file.\n entry_point - Full path to the sensor entry point (e.g. module.foo.ClassSensor).\n trigger_type - A list of references to the TriggerTypeDB objects exposed by this sensor.\n poll_interval - Poll interval for this sensor.\n \"\"\"\n name = me.StringField(required=True)\n pack = me.StringField(required=True, unique_with='name')\n artifact_uri = me.StringField()\n entry_point = me.StringField()\n trigger_types = me.ListField(field=me.StringField())\n poll_interval = me.IntField()\n enabled = me.BooleanField(default=True,\n help_text=u'Flag indicating whether the sensor is enabled.')\n\n\nclass TriggerTypeDB(stormbase.StormBaseDB,\n stormbase.ContentPackResourceMixin,\n stormbase.TagsMixin):\n \"\"\"Description of a specific kind/type of a trigger. The\n (pack, name) tuple is expected uniquely identify a trigger in\n the namespace of all triggers provided by a specific trigger_source.\n Attribute:\n pack - Name of the content pack this trigger belongs to.\n trigger_source: Source that owns this trigger type.\n payload_info: Meta information of the expected payload.\n \"\"\"\n name = me.StringField(required=True)\n pack = me.StringField(required=True, unique_with='name')\n payload_schema = me.DictField()\n parameters_schema = me.DictField(default={})\n\n meta = {\n 'indexes': stormbase.TagsMixin.get_indices()\n }\n\n\nclass TriggerDB(stormbase.StormBaseDB, stormbase.ContentPackResourceMixin):\n \"\"\"\n Attribute:\n pack - Name of the content pack this trigger belongs to.\n type - Reference to the TriggerType object.\n parameters - Trigger parameters.\n \"\"\"\n name = me.StringField(required=True)\n pack = me.StringField(required=True, unique_with='name')\n type = me.StringField()\n parameters = me.DictField()\n\n\nclass TriggerInstanceDB(stormbase.StormFoundationDB):\n \"\"\"An instance or occurrence of a type of Trigger.\n Attribute:\n trigger: Reference to the Trigger object.\n payload (dict): payload specific to the occurrence.\n occurrence_time (datetime): time of occurrence of the trigger.\n \"\"\"\n trigger = me.StringField()\n payload = me.DictField()\n occurrence_time = me.DateTimeField()\n\n\nclass ActionExecutionSpecDB(me.EmbeddedDocument):\n ref = me.StringField(required=True, unique=False)\n parameters = me.DictField()\n\n def __str__(self):\n result = []\n result.append('ActionExecutionSpecDB@')\n result.append(str(id(self)))\n result.append('(ref=\"%s\", ' % self.ref)\n result.append('parameters=\"%s\")' % self.parameters)\n return ''.join(result)\n\n\nclass RuleDB(stormbase.StormBaseDB, stormbase.TagsMixin):\n \"\"\"Specifies the action to invoke on the occurrence of a Trigger. It\n also includes the transformation to perform to match the impedance\n between the payload of a TriggerInstance and input of a action.\n Attribute:\n trigger: Trigger that trips this rule.\n criteria:\n action: Action to execute when the rule is tripped.\n status: enabled or disabled. If disabled occurrence of the trigger\n does not lead to execution of a action and vice-versa.\n \"\"\"\n trigger = me.StringField()\n criteria = stormbase.EscapedDictField()\n action = me.EmbeddedDocumentField(ActionExecutionSpecDB)\n enabled = me.BooleanField(required=True, default=True,\n help_text=u'Flag indicating whether the rule is enabled.')\n\n meta = {\n 'indexes': stormbase.TagsMixin.get_indices()\n }\n\n# specialized access objects\nsensor_type_access = MongoDBAccess(SensorTypeDB)\ntriggertype_access = MongoDBAccess(TriggerTypeDB)\ntrigger_access = MongoDBAccess(TriggerDB)\ntriggerinstance_access = MongoDBAccess(TriggerInstanceDB)\nrule_access = MongoDBAccess(RuleDB)\n\nMODELS = [SensorTypeDB, TriggerTypeDB, TriggerDB, TriggerInstanceDB, RuleDB]\n", "path": "st2common/st2common/models/db/reactor.py"}]}
3,477
227
gh_patches_debug_33534
rasdani/github-patches
git_diff
mampfes__hacs_waste_collection_schedule-1673
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [Feature]: paper dates are not in the complete date-file ### I propose a feature for: Sources ### Describe your wanted feature Hi, right now I saw that the homepage "https://www.geoport-nwm.de/de/abfuhrtermine-geoportal.html" describes 3 ics files for paper-dates: Please can you add them to the integrsation, because I need to add them manually now. Kalenderdatei AltpapiertonneGER Umweltschutz GmbH | downloaden (ICS) Kalenderdatei AltpapiertonneGollan Recycling GmbH | downloaden (ICS) Kalenderdatei AltpapiertonneVeolia Umweltservice Nord GmbH | downloaden (ICS) </issue> <code> [start of custom_components/waste_collection_schedule/waste_collection_schedule/source/geoport_nwm_de.py] 1 import datetime 2 import urllib 3 4 import requests 5 from waste_collection_schedule import Collection # type: ignore[attr-defined] 6 from waste_collection_schedule.service.ICS import ICS 7 8 TITLE = "Landkreis Nordwestmecklenburg" 9 DESCRIPTION = "Source for Landkreis Nordwestmecklenburg" 10 URL = "https://www.geoport-nwm.de" 11 TEST_CASES = { 12 "Rüting": {"district": "Rüting"}, 13 "Grevenstein u. ...": {"district": "Grevenstein u. Ausbau"}, 14 "Seefeld": {"district": "Seefeld/ Testorf- Steinfort"}, 15 "1100l": {"district": "Groß Stieten (1.100 l Behälter)"}, 16 "kl. Bünsdorf": {"district": "Klein Bünsdorf"}, 17 } 18 19 20 class Source: 21 def __init__(self, district): 22 self._district = district 23 self._ics = ICS() 24 25 def fetch(self): 26 today = datetime.date.today() 27 dates = [] 28 if today.month == 12: 29 # On Dec 27 2022, the 2022 schedule was no longer available for test case "Seefeld", all others worked 30 try: 31 dates = self.fetch_year(today.year) 32 except Exception: 33 pass 34 try: 35 dates.extend(self.fetch_year(today.year + 1)) 36 except Exception: 37 pass 38 else: 39 dates = self.fetch_year(today.year) 40 41 entries = [] 42 for d in dates: 43 entries.append(Collection(d[0], d[1])) 44 return entries 45 46 def fetch_year(self, year): 47 arg = convert_to_arg(self._district) 48 r = requests.get( 49 f"https://www.geoport-nwm.de/nwm-download/Abfuhrtermine/ICS/{year}/{arg}.ics" 50 ) 51 r.raise_for_status() 52 return self._ics.convert(r.text) 53 54 55 def convert_to_arg(district): 56 district = district.replace("(1.100 l Behälter)", "1100_l") 57 district = district.replace("ü", "ue") 58 district = district.replace("ö", "oe") 59 district = district.replace("ä", "ae") 60 district = district.replace("ß", "ss") 61 district = district.replace("/", "") 62 district = district.replace("- ", "-") 63 district = district.replace(".", "") 64 district = district.replace(" ", "_") 65 arg = urllib.parse.quote("Ortsteil_" + district) 66 return arg 67 [end of custom_components/waste_collection_schedule/waste_collection_schedule/source/geoport_nwm_de.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/geoport_nwm_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/geoport_nwm_de.py --- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/geoport_nwm_de.py +++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/geoport_nwm_de.py @@ -16,6 +16,8 @@ "kl. Bünsdorf": {"district": "Klein Bünsdorf"}, } +API_URL = "https://www.geoport-nwm.de/nwm-download/Abfuhrtermine/ICS/{year}/{arg}.ics" + class Source: def __init__(self, district): @@ -45,22 +47,35 @@ def fetch_year(self, year): arg = convert_to_arg(self._district) - r = requests.get( - f"https://www.geoport-nwm.de/nwm-download/Abfuhrtermine/ICS/{year}/{arg}.ics" - ) + r = requests.get(API_URL.format(year=year, arg=arg)) r.raise_for_status() - return self._ics.convert(r.text) + entries = self._ics.convert(r.text) + for prefix in ( + "Schadstoffmobil", + "Papiertonne_GER", + "Papiertonne_Gollan", + "Papiertonne_Veolia", + ): + try: + r = requests.get(API_URL.format(year=year, arg=f"{prefix}_{arg}")) + r.raise_for_status() + new_entries = self._ics.convert(r.text) + entries.extend(new_entries) + except (ValueError, requests.exceptions.HTTPError): + pass + return entries -def convert_to_arg(district): +def convert_to_arg(district, prefix=""): district = district.replace("(1.100 l Behälter)", "1100_l") district = district.replace("ü", "ue") district = district.replace("ö", "oe") district = district.replace("ä", "ae") district = district.replace("ß", "ss") district = district.replace("/", "") - district = district.replace("- ", "-") + # district = district.replace("- ", "-") failed with Seefeld/ Testorf- Steinfort district = district.replace(".", "") district = district.replace(" ", "_") - arg = urllib.parse.quote("Ortsteil_" + district) + prefix = prefix + "_" if prefix else "" + arg = urllib.parse.quote(f"{prefix}Ortsteil_{district}") return arg
{"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/geoport_nwm_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/geoport_nwm_de.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/geoport_nwm_de.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/geoport_nwm_de.py\n@@ -16,6 +16,8 @@\n \"kl. B\u00fcnsdorf\": {\"district\": \"Klein B\u00fcnsdorf\"},\n }\n \n+API_URL = \"https://www.geoport-nwm.de/nwm-download/Abfuhrtermine/ICS/{year}/{arg}.ics\"\n+\n \n class Source:\n def __init__(self, district):\n@@ -45,22 +47,35 @@\n \n def fetch_year(self, year):\n arg = convert_to_arg(self._district)\n- r = requests.get(\n- f\"https://www.geoport-nwm.de/nwm-download/Abfuhrtermine/ICS/{year}/{arg}.ics\"\n- )\n+ r = requests.get(API_URL.format(year=year, arg=arg))\n r.raise_for_status()\n- return self._ics.convert(r.text)\n+ entries = self._ics.convert(r.text)\n+ for prefix in (\n+ \"Schadstoffmobil\",\n+ \"Papiertonne_GER\",\n+ \"Papiertonne_Gollan\",\n+ \"Papiertonne_Veolia\",\n+ ):\n+ try:\n+ r = requests.get(API_URL.format(year=year, arg=f\"{prefix}_{arg}\"))\n+ r.raise_for_status()\n+ new_entries = self._ics.convert(r.text)\n+ entries.extend(new_entries)\n+ except (ValueError, requests.exceptions.HTTPError):\n+ pass\n+ return entries\n \n \n-def convert_to_arg(district):\n+def convert_to_arg(district, prefix=\"\"):\n district = district.replace(\"(1.100 l Beh\u00e4lter)\", \"1100_l\")\n district = district.replace(\"\u00fc\", \"ue\")\n district = district.replace(\"\u00f6\", \"oe\")\n district = district.replace(\"\u00e4\", \"ae\")\n district = district.replace(\"\u00df\", \"ss\")\n district = district.replace(\"/\", \"\")\n- district = district.replace(\"- \", \"-\")\n+ # district = district.replace(\"- \", \"-\") failed with Seefeld/ Testorf- Steinfort\n district = district.replace(\".\", \"\")\n district = district.replace(\" \", \"_\")\n- arg = urllib.parse.quote(\"Ortsteil_\" + district)\n+ prefix = prefix + \"_\" if prefix else \"\"\n+ arg = urllib.parse.quote(f\"{prefix}Ortsteil_{district}\")\n return arg\n", "issue": "[Feature]: paper dates are not in the complete date-file\n### I propose a feature for:\r\n\r\nSources\r\n\r\n### Describe your wanted feature\r\n\r\nHi,\r\nright now I saw that the homepage \"https://www.geoport-nwm.de/de/abfuhrtermine-geoportal.html\" describes 3 ics files for paper-dates: \r\nPlease can you add them to the integrsation, because I need to add them manually now.\r\n\r\nKalenderdatei AltpapiertonneGER Umweltschutz GmbH | downloaden (ICS)\r\nKalenderdatei AltpapiertonneGollan Recycling GmbH | downloaden (ICS)\r\nKalenderdatei AltpapiertonneVeolia Umweltservice Nord GmbH | downloaden (ICS)\r\n\n", "before_files": [{"content": "import datetime\nimport urllib\n\nimport requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\nfrom waste_collection_schedule.service.ICS import ICS\n\nTITLE = \"Landkreis Nordwestmecklenburg\"\nDESCRIPTION = \"Source for Landkreis Nordwestmecklenburg\"\nURL = \"https://www.geoport-nwm.de\"\nTEST_CASES = {\n \"R\u00fcting\": {\"district\": \"R\u00fcting\"},\n \"Grevenstein u. ...\": {\"district\": \"Grevenstein u. Ausbau\"},\n \"Seefeld\": {\"district\": \"Seefeld/ Testorf- Steinfort\"},\n \"1100l\": {\"district\": \"Gro\u00df Stieten (1.100 l Beh\u00e4lter)\"},\n \"kl. B\u00fcnsdorf\": {\"district\": \"Klein B\u00fcnsdorf\"},\n}\n\n\nclass Source:\n def __init__(self, district):\n self._district = district\n self._ics = ICS()\n\n def fetch(self):\n today = datetime.date.today()\n dates = []\n if today.month == 12:\n # On Dec 27 2022, the 2022 schedule was no longer available for test case \"Seefeld\", all others worked\n try:\n dates = self.fetch_year(today.year)\n except Exception:\n pass\n try:\n dates.extend(self.fetch_year(today.year + 1))\n except Exception:\n pass\n else:\n dates = self.fetch_year(today.year)\n\n entries = []\n for d in dates:\n entries.append(Collection(d[0], d[1]))\n return entries\n\n def fetch_year(self, year):\n arg = convert_to_arg(self._district)\n r = requests.get(\n f\"https://www.geoport-nwm.de/nwm-download/Abfuhrtermine/ICS/{year}/{arg}.ics\"\n )\n r.raise_for_status()\n return self._ics.convert(r.text)\n\n\ndef convert_to_arg(district):\n district = district.replace(\"(1.100 l Beh\u00e4lter)\", \"1100_l\")\n district = district.replace(\"\u00fc\", \"ue\")\n district = district.replace(\"\u00f6\", \"oe\")\n district = district.replace(\"\u00e4\", \"ae\")\n district = district.replace(\"\u00df\", \"ss\")\n district = district.replace(\"/\", \"\")\n district = district.replace(\"- \", \"-\")\n district = district.replace(\".\", \"\")\n district = district.replace(\" \", \"_\")\n arg = urllib.parse.quote(\"Ortsteil_\" + district)\n return arg\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/geoport_nwm_de.py"}]}
1,393
590
gh_patches_debug_19205
rasdani/github-patches
git_diff
strawberry-graphql__strawberry-2205
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Move non-core dependencies to dedicated groups @la4de has made a very useful playground for Strawberry, available (for now) here -> https://la4de.github.io/strawberry-playground/ Unfortunately some of the default dependencies aren't uploaded as wheels (see https://github.com/la4de/strawberry-playground/issues/1). Maybe it could time to move some of these deps to specific groups, we definitely don't need python-multipart installed by default :) Here's a list of proposed groups based on dependencies installed when doing `pip install strawberry-graphql`: **Default**: - cached-property - sentinel - typing-extensions - graphql-core - python-dateutil (I think we need this because of compatibility with python 3.7) **CLI**: - click - pygments **All web frameworks**: - python-multipart </issue> <code> [start of strawberry/utils/debug.py] 1 import datetime 2 import json 3 from json import JSONEncoder 4 from typing import Any, Dict, Optional 5 6 from pygments import highlight, lexers 7 from pygments.formatters import Terminal256Formatter 8 9 from .graphql_lexer import GraphQLLexer 10 11 12 class StrawberryJSONEncoder(JSONEncoder): 13 def default(self, o: Any) -> Any: 14 return repr(o) 15 16 17 def pretty_print_graphql_operation( 18 operation_name: Optional[str], query: str, variables: Optional[Dict["str", Any]] 19 ): 20 """Pretty print a GraphQL operation using pygments. 21 22 Won't print introspection operation to prevent noise in the output.""" 23 24 if operation_name == "IntrospectionQuery": 25 return 26 27 now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") 28 29 print(f"[{now}]: {operation_name or 'No operation name'}") 30 print(highlight(query, GraphQLLexer(), Terminal256Formatter())) 31 32 if variables: 33 variables_json = json.dumps(variables, indent=4, cls=StrawberryJSONEncoder) 34 35 print(highlight(variables_json, lexers.JsonLexer(), Terminal256Formatter())) 36 [end of strawberry/utils/debug.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/strawberry/utils/debug.py b/strawberry/utils/debug.py --- a/strawberry/utils/debug.py +++ b/strawberry/utils/debug.py @@ -3,11 +3,6 @@ from json import JSONEncoder from typing import Any, Dict, Optional -from pygments import highlight, lexers -from pygments.formatters import Terminal256Formatter - -from .graphql_lexer import GraphQLLexer - class StrawberryJSONEncoder(JSONEncoder): def default(self, o: Any) -> Any: @@ -21,6 +16,17 @@ Won't print introspection operation to prevent noise in the output.""" + try: + from pygments import highlight, lexers + from pygments.formatters import Terminal256Formatter + except ImportError as e: + raise ImportError( + "pygments is not installed but is required for debug output, install it " + "directly or run `pip install strawberry-graphql[debug-server]`" + ) from e + + from .graphql_lexer import GraphQLLexer + if operation_name == "IntrospectionQuery": return
{"golden_diff": "diff --git a/strawberry/utils/debug.py b/strawberry/utils/debug.py\n--- a/strawberry/utils/debug.py\n+++ b/strawberry/utils/debug.py\n@@ -3,11 +3,6 @@\n from json import JSONEncoder\n from typing import Any, Dict, Optional\n \n-from pygments import highlight, lexers\n-from pygments.formatters import Terminal256Formatter\n-\n-from .graphql_lexer import GraphQLLexer\n-\n \n class StrawberryJSONEncoder(JSONEncoder):\n def default(self, o: Any) -> Any:\n@@ -21,6 +16,17 @@\n \n Won't print introspection operation to prevent noise in the output.\"\"\"\n \n+ try:\n+ from pygments import highlight, lexers\n+ from pygments.formatters import Terminal256Formatter\n+ except ImportError as e:\n+ raise ImportError(\n+ \"pygments is not installed but is required for debug output, install it \"\n+ \"directly or run `pip install strawberry-graphql[debug-server]`\"\n+ ) from e\n+\n+ from .graphql_lexer import GraphQLLexer\n+\n if operation_name == \"IntrospectionQuery\":\n return\n", "issue": "Move non-core dependencies to dedicated groups\n@la4de has made a very useful playground for Strawberry, available (for now) here -> https://la4de.github.io/strawberry-playground/\r\n\r\nUnfortunately some of the default dependencies aren't uploaded as wheels (see https://github.com/la4de/strawberry-playground/issues/1).\r\n\r\nMaybe it could time to move some of these deps to specific groups, we definitely don't need python-multipart installed by default :)\r\n\r\nHere's a list of proposed groups based on dependencies installed when doing `pip install strawberry-graphql`:\r\n\r\n**Default**:\r\n \r\n- cached-property\r\n- sentinel\r\n- typing-extensions\r\n- graphql-core\r\n- python-dateutil (I think we need this because of compatibility with python 3.7)\r\n\r\n**CLI**:\r\n\r\n- click\r\n- pygments\r\n\r\n**All web frameworks**:\r\n\r\n- python-multipart\r\n\r\n\r\n\n", "before_files": [{"content": "import datetime\nimport json\nfrom json import JSONEncoder\nfrom typing import Any, Dict, Optional\n\nfrom pygments import highlight, lexers\nfrom pygments.formatters import Terminal256Formatter\n\nfrom .graphql_lexer import GraphQLLexer\n\n\nclass StrawberryJSONEncoder(JSONEncoder):\n def default(self, o: Any) -> Any:\n return repr(o)\n\n\ndef pretty_print_graphql_operation(\n operation_name: Optional[str], query: str, variables: Optional[Dict[\"str\", Any]]\n):\n \"\"\"Pretty print a GraphQL operation using pygments.\n\n Won't print introspection operation to prevent noise in the output.\"\"\"\n\n if operation_name == \"IntrospectionQuery\":\n return\n\n now = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n print(f\"[{now}]: {operation_name or 'No operation name'}\")\n print(highlight(query, GraphQLLexer(), Terminal256Formatter()))\n\n if variables:\n variables_json = json.dumps(variables, indent=4, cls=StrawberryJSONEncoder)\n\n print(highlight(variables_json, lexers.JsonLexer(), Terminal256Formatter()))\n", "path": "strawberry/utils/debug.py"}]}
1,036
258
gh_patches_debug_7588
rasdani/github-patches
git_diff
ansible-collections__community.general-5687
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> gconftool2 throws an IndexError exception when setting configuration ### Summary When setting a key using gconftool2, an IndexError exception is thrown because the wrong index is used when formatting the command string. It appears to have been broken since version 4.8.1 of this collection. It should be a simple one character fix and would be great if we could have it back ported as a patch for the older releases. Effectively `cmd.extend(["--type", self.value_type, "--{3}".format(call_type), self.key, self.value])` Should be `cmd.extend(["--type", self.value_type, "--{0}".format(call_type), self.key, self.value])` ### Issue Type Bug Report ### Component Name gconftool2 ### Ansible Version ```console (paste below) $ ansible --version ansible [core 2.13.5] config file = /Users/***/work/ansible/ansible.cfg configured module search path = ['/Users/***/work/ansible/library'] ansible python module location = /Users/***/Library/Python/3.9/lib/python/site-packages/ansible ansible collection location = /Users/***/.ansible/collections:/usr/share/ansible/collections executable location = /Users/***/Library/Python/3.9/bin/ansible python version = 3.9.6 (default, Sep 26 2022, 11:37:49) [Clang 14.0.0 (clang-1400.0.29.202)] jinja version = 3.1.2 libyaml = True ``` ### Community.general Version ```console (paste below) $ ansible-galaxy collection list community.general Collection Version ----------------- ------- community.general 5.7.0 ``` ### Configuration ```yaml (paste below) - community.general.gconftool2: key: '/desktop/gnome/remote_access/enabled' value_type: 'bool' value: 'true' state: 'present' ``` ### OS / Environment Targeting CentOS 7.9 ### Steps to Reproduce <!--- Paste example playbooks or commands between quotes below --> ```yaml (paste below) ``` ### Expected Results I expect gconftool2 to set the key and not throw an exception ### Actual Results ```console (paste below) Traceback (most recent call last): File \"/home/***/.ansible/tmp/ansible-tmp-1669197386.99966-48716-109362476478706/AnsiballZ_gconftool2.py\", line 107, in <module> _ansiballz_main() File \"/home/***/.ansible/tmp/ansible-tmp-1669197386.99966-48716-109362476478706/AnsiballZ_gconftool2.py\", line 99, in _ansiballz_main invoke_module(zipped_mod, temp_path, ANSIBALLZ_PARAMS) File \"/home/***/.ansible/tmp/ansible-tmp-1669197386.99966-48716-109362476478706/AnsiballZ_gconftool2.py\", line 48, in invoke_module run_name='__main__', alter_sys=True) File \"/usr/lib64/python2.7/runpy.py\", line 176, in run_module fname, loader, pkg_name) File \"/usr/lib64/python2.7/runpy.py\", line 82, in _run_module_code mod_name, mod_fname, mod_loader, pkg_name) File \"/usr/lib64/python2.7/runpy.py\", line 72, in _run_code exec code in run_globals File \"/tmp/ansible_community.general.gconftool2_payload__SXOuh/ansible_community.general.gconftool2_payload.zip/ansible_collections/community/general/plugins/modules/system/gconftool2.py\", line 230, in <module> File \"/tmp/ansible_community.general.gconftool2_payload__SXOuh/ansible_community.general.gconftool2_payload.zip/ansible_collections/community/general/plugins/modules/system/gconftool2.py\", line 213, in main File \"/tmp/ansible_community.general.gconftool2_payload__SXOuh/ansible_community.general.gconftool2_payload.zip/ansible_collections/community/general/plugins/modules/system/gconftool2.py\", line 128, in call IndexError: tuple index out of range ``` ### Code of Conduct - [X] I agree to follow the Ansible Code of Conduct </issue> <code> [start of plugins/modules/system/gconftool2.py] 1 #!/usr/bin/python 2 # -*- coding: utf-8 -*- 3 4 # Copyright (c) 2016, Kenneth D. Evensen <[email protected]> 5 # Copyright (c) 2017, Abhijeet Kasurde <[email protected]> 6 # GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) 7 # SPDX-License-Identifier: GPL-3.0-or-later 8 9 from __future__ import absolute_import, division, print_function 10 __metaclass__ = type 11 12 DOCUMENTATION = ''' 13 module: gconftool2 14 author: 15 - Kenneth D. Evensen (@kevensen) 16 short_description: Edit GNOME Configurations 17 description: 18 - This module allows for the manipulation of GNOME 2 Configuration via 19 gconftool-2. Please see the gconftool-2(1) man pages for more details. 20 options: 21 key: 22 type: str 23 description: 24 - A GConf preference key is an element in the GConf repository 25 that corresponds to an application preference. See man gconftool-2(1) 26 required: true 27 value: 28 type: str 29 description: 30 - Preference keys typically have simple values such as strings, 31 integers, or lists of strings and integers. This is ignored if the state 32 is "get". See man gconftool-2(1) 33 value_type: 34 type: str 35 description: 36 - The type of value being set. This is ignored if the state is "get". 37 choices: [ bool, float, int, string ] 38 state: 39 type: str 40 description: 41 - The action to take upon the key/value. 42 required: true 43 choices: [ absent, get, present ] 44 config_source: 45 type: str 46 description: 47 - Specify a configuration source to use rather than the default path. 48 See man gconftool-2(1) 49 direct: 50 description: 51 - Access the config database directly, bypassing server. If direct is 52 specified then the config_source must be specified as well. 53 See man gconftool-2(1) 54 type: bool 55 default: false 56 ''' 57 58 EXAMPLES = """ 59 - name: Change the widget font to "Serif 12" 60 community.general.gconftool2: 61 key: "/desktop/gnome/interface/font_name" 62 value_type: "string" 63 value: "Serif 12" 64 """ 65 66 RETURN = ''' 67 key: 68 description: The key specified in the module parameters 69 returned: success 70 type: str 71 sample: /desktop/gnome/interface/font_name 72 value_type: 73 description: The type of the value that was changed 74 returned: success 75 type: str 76 sample: string 77 value: 78 description: The value of the preference key after executing the module 79 returned: success 80 type: str 81 sample: "Serif 12" 82 ... 83 ''' 84 85 from ansible.module_utils.basic import AnsibleModule 86 87 88 class GConf2Preference(object): 89 def __init__(self, ansible, key, value_type, value, 90 direct=False, config_source=""): 91 self.ansible = ansible 92 self.key = key 93 self.value_type = value_type 94 self.value = value 95 self.config_source = config_source 96 self.direct = direct 97 98 def value_already_set(self): 99 return False 100 101 def call(self, call_type, fail_onerr=True): 102 """ Helper function to perform gconftool-2 operations """ 103 config_source = [] 104 direct = [] 105 changed = False 106 out = '' 107 108 # If the configuration source is different from the default, create 109 # the argument 110 if self.config_source is not None and len(self.config_source) > 0: 111 config_source = ["--config-source", self.config_source] 112 113 # If direct is true, create the argument 114 if self.direct: 115 direct = ["--direct"] 116 117 # Execute the call 118 cmd = ["gconftool-2"] 119 try: 120 # If the call is "get", then we don't need as many parameters and 121 # we can ignore some 122 if call_type == 'get': 123 cmd.extend(["--get", self.key]) 124 # Otherwise, we will use all relevant parameters 125 elif call_type == 'set': 126 cmd.extend(direct) 127 cmd.extend(config_source) 128 cmd.extend(["--type", self.value_type, "--{3}".format(call_type), self.key, self.value]) 129 elif call_type == 'unset': 130 cmd.extend(["--unset", self.key]) 131 132 # Start external command 133 rc, out, err = self.ansible.run_command(cmd) 134 135 if err and fail_onerr: 136 self.ansible.fail_json(msg='gconftool-2 failed with ' 137 'error: %s' % (str(err))) 138 else: 139 changed = True 140 141 except OSError as exception: 142 self.ansible.fail_json(msg='gconftool-2 failed with exception: ' 143 '%s' % exception) 144 return changed, out.rstrip() 145 146 147 def main(): 148 # Setup the Ansible module 149 module = AnsibleModule( 150 argument_spec=dict( 151 key=dict(type='str', required=True, no_log=False), 152 value_type=dict(type='str', choices=['bool', 'float', 'int', 'string']), 153 value=dict(type='str'), 154 state=dict(type='str', required=True, choices=['absent', 'get', 'present']), 155 direct=dict(type='bool', default=False), 156 config_source=dict(type='str'), 157 ), 158 supports_check_mode=True 159 ) 160 161 state_values = {"present": "set", "absent": "unset", "get": "get"} 162 163 # Assign module values to dictionary values 164 key = module.params['key'] 165 value_type = module.params['value_type'] 166 if module.params['value'].lower() == "true": 167 value = "true" 168 elif module.params['value'] == "false": 169 value = "false" 170 else: 171 value = module.params['value'] 172 173 state = state_values[module.params['state']] 174 direct = module.params['direct'] 175 config_source = module.params['config_source'] 176 177 # Initialize some variables for later 178 change = False 179 new_value = '' 180 181 if state != "get": 182 if value is None or value == "": 183 module.fail_json(msg='State %s requires "value" to be set' 184 % str(state)) 185 elif value_type is None or value_type == "": 186 module.fail_json(msg='State %s requires "value_type" to be set' 187 % str(state)) 188 189 if direct and config_source is None: 190 module.fail_json(msg='If "direct" is "true" then the ' + 191 '"config_source" must be specified') 192 elif not direct and config_source is not None: 193 module.fail_json(msg='If the "config_source" is specified ' + 194 'then "direct" must be "true"') 195 196 # Create a gconf2 preference 197 gconf_pref = GConf2Preference(module, key, value_type, 198 value, direct, config_source) 199 # Now we get the current value, if not found don't fail 200 dummy, current_value = gconf_pref.call("get", fail_onerr=False) 201 202 # Check if the current value equals the value we want to set. If not, make 203 # a change 204 if current_value != value: 205 # If check mode, we know a change would have occurred. 206 if module.check_mode: 207 # So we will set the change to True 208 change = True 209 # And set the new_value to the value that would have been set 210 new_value = value 211 # If not check mode make the change. 212 else: 213 change, new_value = gconf_pref.call(state) 214 # If the value we want to set is the same as the current_value, we will 215 # set the new_value to the current_value for reporting 216 else: 217 new_value = current_value 218 219 facts = dict(gconftool2={'changed': change, 220 'key': key, 221 'value_type': value_type, 222 'new_value': new_value, 223 'previous_value': current_value, 224 'playbook_value': module.params['value']}) 225 226 module.exit_json(changed=change, ansible_facts=facts) 227 228 229 if __name__ == '__main__': 230 main() 231 [end of plugins/modules/system/gconftool2.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/plugins/modules/system/gconftool2.py b/plugins/modules/system/gconftool2.py --- a/plugins/modules/system/gconftool2.py +++ b/plugins/modules/system/gconftool2.py @@ -125,7 +125,7 @@ elif call_type == 'set': cmd.extend(direct) cmd.extend(config_source) - cmd.extend(["--type", self.value_type, "--{3}".format(call_type), self.key, self.value]) + cmd.extend(["--type", self.value_type, "--{0}".format(call_type), self.key, self.value]) elif call_type == 'unset': cmd.extend(["--unset", self.key])
{"golden_diff": "diff --git a/plugins/modules/system/gconftool2.py b/plugins/modules/system/gconftool2.py\n--- a/plugins/modules/system/gconftool2.py\n+++ b/plugins/modules/system/gconftool2.py\n@@ -125,7 +125,7 @@\n elif call_type == 'set':\n cmd.extend(direct)\n cmd.extend(config_source)\n- cmd.extend([\"--type\", self.value_type, \"--{3}\".format(call_type), self.key, self.value])\n+ cmd.extend([\"--type\", self.value_type, \"--{0}\".format(call_type), self.key, self.value])\n elif call_type == 'unset':\n cmd.extend([\"--unset\", self.key])\n", "issue": "gconftool2 throws an IndexError exception when setting configuration\n### Summary\n\nWhen setting a key using gconftool2, an IndexError exception is thrown because the wrong index is used when formatting the command string. It appears to have been broken since version 4.8.1 of this collection. It should be a simple one character fix and would be great if we could have it back ported as a patch for the older releases.\r\n\r\nEffectively \r\n`cmd.extend([\"--type\", self.value_type, \"--{3}\".format(call_type), self.key, self.value])`\r\nShould be\r\n`cmd.extend([\"--type\", self.value_type, \"--{0}\".format(call_type), self.key, self.value])`\n\n### Issue Type\n\nBug Report\n\n### Component Name\n\ngconftool2\n\n### Ansible Version\n\n```console (paste below)\r\n$ ansible --version\r\nansible [core 2.13.5]\r\n config file = /Users/***/work/ansible/ansible.cfg\r\n configured module search path = ['/Users/***/work/ansible/library']\r\n ansible python module location = /Users/***/Library/Python/3.9/lib/python/site-packages/ansible\r\n ansible collection location = /Users/***/.ansible/collections:/usr/share/ansible/collections\r\n executable location = /Users/***/Library/Python/3.9/bin/ansible\r\n python version = 3.9.6 (default, Sep 26 2022, 11:37:49) [Clang 14.0.0 (clang-1400.0.29.202)]\r\n jinja version = 3.1.2\r\n libyaml = True\r\n```\r\n\n\n### Community.general Version\n\n```console (paste below)\r\n$ ansible-galaxy collection list community.general\r\nCollection Version\r\n----------------- -------\r\ncommunity.general 5.7.0 \r\n```\r\n\n\n### Configuration\n\n```yaml (paste below)\r\n- community.general.gconftool2:\r\n key: '/desktop/gnome/remote_access/enabled'\r\n value_type: 'bool'\r\n value: 'true'\r\n state: 'present'\r\n```\r\n\n\n### OS / Environment\n\nTargeting CentOS 7.9\n\n### Steps to Reproduce\n\n<!--- Paste example playbooks or commands between quotes below -->\r\n```yaml (paste below)\r\n\r\n```\r\n\n\n### Expected Results\n\nI expect gconftool2 to set the key and not throw an exception\n\n### Actual Results\n\n```console (paste below)\r\nTraceback (most recent call last):\r\n File \\\"/home/***/.ansible/tmp/ansible-tmp-1669197386.99966-48716-109362476478706/AnsiballZ_gconftool2.py\\\", line 107, in <module>\r\n _ansiballz_main()\r\n File \\\"/home/***/.ansible/tmp/ansible-tmp-1669197386.99966-48716-109362476478706/AnsiballZ_gconftool2.py\\\", line 99, in _ansiballz_main\r\n invoke_module(zipped_mod, temp_path, ANSIBALLZ_PARAMS)\r\n File \\\"/home/***/.ansible/tmp/ansible-tmp-1669197386.99966-48716-109362476478706/AnsiballZ_gconftool2.py\\\", line 48, in invoke_module\r\n run_name='__main__', alter_sys=True)\r\n File \\\"/usr/lib64/python2.7/runpy.py\\\", line 176, in run_module\r\n fname, loader, pkg_name)\r\n File \\\"/usr/lib64/python2.7/runpy.py\\\", line 82, in _run_module_code\r\n mod_name, mod_fname, mod_loader, pkg_name)\r\n File \\\"/usr/lib64/python2.7/runpy.py\\\", line 72, in _run_code\r\n exec code in run_globals\r\n File \\\"/tmp/ansible_community.general.gconftool2_payload__SXOuh/ansible_community.general.gconftool2_payload.zip/ansible_collections/community/general/plugins/modules/system/gconftool2.py\\\", line 230, in <module>\r\n File \\\"/tmp/ansible_community.general.gconftool2_payload__SXOuh/ansible_community.general.gconftool2_payload.zip/ansible_collections/community/general/plugins/modules/system/gconftool2.py\\\", line 213, in main\r\n File \\\"/tmp/ansible_community.general.gconftool2_payload__SXOuh/ansible_community.general.gconftool2_payload.zip/ansible_collections/community/general/plugins/modules/system/gconftool2.py\\\", line 128, in call\r\n IndexError: tuple index out of range\r\n```\r\n\n\n### Code of Conduct\n\n- [X] I agree to follow the Ansible Code of Conduct\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright (c) 2016, Kenneth D. Evensen <[email protected]>\n# Copyright (c) 2017, Abhijeet Kasurde <[email protected]>\n# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\nDOCUMENTATION = '''\nmodule: gconftool2\nauthor:\n - Kenneth D. Evensen (@kevensen)\nshort_description: Edit GNOME Configurations\ndescription:\n - This module allows for the manipulation of GNOME 2 Configuration via\n gconftool-2. Please see the gconftool-2(1) man pages for more details.\noptions:\n key:\n type: str\n description:\n - A GConf preference key is an element in the GConf repository\n that corresponds to an application preference. See man gconftool-2(1)\n required: true\n value:\n type: str\n description:\n - Preference keys typically have simple values such as strings,\n integers, or lists of strings and integers. This is ignored if the state\n is \"get\". See man gconftool-2(1)\n value_type:\n type: str\n description:\n - The type of value being set. This is ignored if the state is \"get\".\n choices: [ bool, float, int, string ]\n state:\n type: str\n description:\n - The action to take upon the key/value.\n required: true\n choices: [ absent, get, present ]\n config_source:\n type: str\n description:\n - Specify a configuration source to use rather than the default path.\n See man gconftool-2(1)\n direct:\n description:\n - Access the config database directly, bypassing server. If direct is\n specified then the config_source must be specified as well.\n See man gconftool-2(1)\n type: bool\n default: false\n'''\n\nEXAMPLES = \"\"\"\n- name: Change the widget font to \"Serif 12\"\n community.general.gconftool2:\n key: \"/desktop/gnome/interface/font_name\"\n value_type: \"string\"\n value: \"Serif 12\"\n\"\"\"\n\nRETURN = '''\n key:\n description: The key specified in the module parameters\n returned: success\n type: str\n sample: /desktop/gnome/interface/font_name\n value_type:\n description: The type of the value that was changed\n returned: success\n type: str\n sample: string\n value:\n description: The value of the preference key after executing the module\n returned: success\n type: str\n sample: \"Serif 12\"\n...\n'''\n\nfrom ansible.module_utils.basic import AnsibleModule\n\n\nclass GConf2Preference(object):\n def __init__(self, ansible, key, value_type, value,\n direct=False, config_source=\"\"):\n self.ansible = ansible\n self.key = key\n self.value_type = value_type\n self.value = value\n self.config_source = config_source\n self.direct = direct\n\n def value_already_set(self):\n return False\n\n def call(self, call_type, fail_onerr=True):\n \"\"\" Helper function to perform gconftool-2 operations \"\"\"\n config_source = []\n direct = []\n changed = False\n out = ''\n\n # If the configuration source is different from the default, create\n # the argument\n if self.config_source is not None and len(self.config_source) > 0:\n config_source = [\"--config-source\", self.config_source]\n\n # If direct is true, create the argument\n if self.direct:\n direct = [\"--direct\"]\n\n # Execute the call\n cmd = [\"gconftool-2\"]\n try:\n # If the call is \"get\", then we don't need as many parameters and\n # we can ignore some\n if call_type == 'get':\n cmd.extend([\"--get\", self.key])\n # Otherwise, we will use all relevant parameters\n elif call_type == 'set':\n cmd.extend(direct)\n cmd.extend(config_source)\n cmd.extend([\"--type\", self.value_type, \"--{3}\".format(call_type), self.key, self.value])\n elif call_type == 'unset':\n cmd.extend([\"--unset\", self.key])\n\n # Start external command\n rc, out, err = self.ansible.run_command(cmd)\n\n if err and fail_onerr:\n self.ansible.fail_json(msg='gconftool-2 failed with '\n 'error: %s' % (str(err)))\n else:\n changed = True\n\n except OSError as exception:\n self.ansible.fail_json(msg='gconftool-2 failed with exception: '\n '%s' % exception)\n return changed, out.rstrip()\n\n\ndef main():\n # Setup the Ansible module\n module = AnsibleModule(\n argument_spec=dict(\n key=dict(type='str', required=True, no_log=False),\n value_type=dict(type='str', choices=['bool', 'float', 'int', 'string']),\n value=dict(type='str'),\n state=dict(type='str', required=True, choices=['absent', 'get', 'present']),\n direct=dict(type='bool', default=False),\n config_source=dict(type='str'),\n ),\n supports_check_mode=True\n )\n\n state_values = {\"present\": \"set\", \"absent\": \"unset\", \"get\": \"get\"}\n\n # Assign module values to dictionary values\n key = module.params['key']\n value_type = module.params['value_type']\n if module.params['value'].lower() == \"true\":\n value = \"true\"\n elif module.params['value'] == \"false\":\n value = \"false\"\n else:\n value = module.params['value']\n\n state = state_values[module.params['state']]\n direct = module.params['direct']\n config_source = module.params['config_source']\n\n # Initialize some variables for later\n change = False\n new_value = ''\n\n if state != \"get\":\n if value is None or value == \"\":\n module.fail_json(msg='State %s requires \"value\" to be set'\n % str(state))\n elif value_type is None or value_type == \"\":\n module.fail_json(msg='State %s requires \"value_type\" to be set'\n % str(state))\n\n if direct and config_source is None:\n module.fail_json(msg='If \"direct\" is \"true\" then the ' +\n '\"config_source\" must be specified')\n elif not direct and config_source is not None:\n module.fail_json(msg='If the \"config_source\" is specified ' +\n 'then \"direct\" must be \"true\"')\n\n # Create a gconf2 preference\n gconf_pref = GConf2Preference(module, key, value_type,\n value, direct, config_source)\n # Now we get the current value, if not found don't fail\n dummy, current_value = gconf_pref.call(\"get\", fail_onerr=False)\n\n # Check if the current value equals the value we want to set. If not, make\n # a change\n if current_value != value:\n # If check mode, we know a change would have occurred.\n if module.check_mode:\n # So we will set the change to True\n change = True\n # And set the new_value to the value that would have been set\n new_value = value\n # If not check mode make the change.\n else:\n change, new_value = gconf_pref.call(state)\n # If the value we want to set is the same as the current_value, we will\n # set the new_value to the current_value for reporting\n else:\n new_value = current_value\n\n facts = dict(gconftool2={'changed': change,\n 'key': key,\n 'value_type': value_type,\n 'new_value': new_value,\n 'previous_value': current_value,\n 'playbook_value': module.params['value']})\n\n module.exit_json(changed=change, ansible_facts=facts)\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/modules/system/gconftool2.py"}]}
4,088
152
gh_patches_debug_40376
rasdani/github-patches
git_diff
rlworkgroup__garage-849
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Garage does not run without display (pyglet) Not all use cases require environment rendering. Can we get rid of this default import? ``` Traceback (most recent call last): from garage.tf.envs import TfEnv File "/home//garage/env/lib/python3.6/site-packages/garage/tf/envs/__init__.py", line 1, in <module> from garage.tf.envs.base import TfEnv File "/home//garage/env/lib/python3.6/site-packages/garage/tf/envs/base.py", line 4, in <module> from garage.envs import GarageEnv File "/home//garage/env/lib/python3.6/site-packages/garage/envs/__init__.py", line 1, in <module> from garage.envs.base import GarageEnv File "/home//garage/env/lib/python3.6/site-packages/garage/envs/base.py", line 7, in <module> from gym.envs.classic_control.rendering import SimpleImageViewer File "/home//garage/env/lib/python3.6/site-packages/gym/envs/classic_control/rendering.py", line 27, in <module> from pyglet.gl import * File "/home//garage/env/lib/python3.6/site-packages/pyglet/gl/__init__.py", line 239, in <module> import pyglet.window File "/home//garage/env/lib/python3.6/site-packages/pyglet/window/__init__.py", line 1896, in <module> gl._create_shadow_window() File "/home//garage/env/lib/python3.6/site-packages/pyglet/gl/__init__.py", line 208, in _create_shadow_window _shadow_window = Window(width=1, height=1, visible=False) File "/home//garage/env/lib/python3.6/site-packages/pyglet/window/xlib/__init__.py", line 166, in __init__ super(XlibWindow, self).__init__(*args, **kwargs) File "/home//garage/env/lib/python3.6/site-packages/pyglet/window/__init__.py", line 501, in __init__ display = get_platform().get_default_display() File "/home//garage/env/lib/python3.6/site-packages/pyglet/window/__init__.py", line 1845, in get_default_display return pyglet.canvas.get_display() File "/home//garage/env/lib/python3.6/site-packages/pyglet/canvas/__init__.py", line 82, in get_display return Display() File "/home//garage/env/lib/python3.6/site-packages/pyglet/canvas/xlib.py", line 86, in __init__ raise NoSuchDisplayException('Cannot connect to "%s"' % name) pyglet.canvas.xlib.NoSuchDisplayException: Cannot connect to "None" ``` </issue> <code> [start of src/garage/envs/base.py] 1 """Wrapper class that converts gym.Env into GarageEnv.""" 2 import collections 3 4 import akro 5 import glfw 6 import gym 7 from gym.envs.classic_control.rendering import SimpleImageViewer 8 from gym.envs.classic_control.rendering import Viewer 9 10 from garage.core import Serializable 11 from garage.envs.env_spec import EnvSpec 12 13 # The gym environments using one of the packages in the following lists as 14 # entry points don't close their viewer windows. 15 KNOWN_GYM_NOT_CLOSE_VIEWER = [ 16 # Please keep alphabetized 17 'gym.envs.atari', 18 'gym.envs.box2d', 19 'gym.envs.classic_control' 20 ] 21 22 KNOWN_GYM_NOT_CLOSE_MJ_VIEWER = [ 23 # Please keep alphabetized 24 'gym.envs.mujoco', 25 'gym.envs.robotics' 26 ] 27 28 29 class GarageEnv(gym.Wrapper, Serializable): 30 """ 31 Returns an abstract Garage wrapper class for gym.Env. 32 33 In order to provide pickling (serialization) and parameterization 34 for gym.Envs, they must be wrapped with a GarageEnv. This ensures 35 compatibility with existing samplers and checkpointing when the 36 envs are passed internally around garage. 37 38 Furthermore, classes inheriting from GarageEnv should silently 39 convert action_space and observation_space from gym.Spaces to 40 akro.spaces. 41 42 Args: env (gym.Env): the env that will be wrapped 43 """ 44 45 def __init__(self, env=None, env_name=''): 46 if env_name: 47 super().__init__(gym.make(env_name)) 48 else: 49 super().__init__(env) 50 51 self.action_space = akro.from_gym(self.env.action_space) 52 self.observation_space = akro.from_gym(self.env.observation_space) 53 if self.spec: 54 self.spec.action_space = self.action_space 55 self.spec.observation_space = self.observation_space 56 else: 57 self.spec = EnvSpec( 58 action_space=self.action_space, 59 observation_space=self.observation_space) 60 61 Serializable.quick_init(self, locals()) 62 63 def close(self): 64 """ 65 Close the wrapped env. 66 67 Returns: 68 None 69 """ 70 self._close_viewer_window() 71 self.env.close() 72 73 def _close_viewer_window(self): 74 """ 75 Close viewer window. 76 77 Unfortunately, some gym environments don't close the viewer windows 78 properly, which leads to "out of memory" issues when several of 79 these environments are tested one after the other. 80 This method searches for the viewer object of type MjViewer, Viewer 81 or SimpleImageViewer, based on environment, and if the environment 82 is wrapped in other environment classes, it performs depth search 83 in those as well. 84 This method can be removed once OpenAI solves the issue. 85 """ 86 if self.env.spec: 87 if any(package in self.env.spec._entry_point 88 for package in KNOWN_GYM_NOT_CLOSE_MJ_VIEWER): 89 # This import is not in the header to avoid a MuJoCo dependency 90 # with non-MuJoCo environments that use this base class. 91 from mujoco_py.mjviewer import MjViewer 92 if (hasattr(self.env, 'viewer') 93 and isinstance(self.env.viewer, MjViewer)): 94 glfw.destroy_window(self.env.viewer.window) 95 elif any(package in self.env.spec._entry_point 96 for package in KNOWN_GYM_NOT_CLOSE_VIEWER): 97 if (hasattr(self.env, 'viewer') and 98 (isinstance(self.env.viewer, Viewer) 99 or isinstance(self.env.viewer, SimpleImageViewer))): 100 self.env.viewer.close() 101 102 def reset(self, **kwargs): 103 """ 104 This method is necessary to suppress a deprecated warning 105 thrown by gym.Wrapper. 106 107 Calls reset on wrapped env. 108 """ 109 return self.env.reset(**kwargs) 110 111 def step(self, action): 112 """ 113 This method is necessary to suppress a deprecated warning 114 thrown by gym.Wrapper. 115 116 Calls step on wrapped env. 117 """ 118 return self.env.step(action) 119 120 121 def Step(observation, reward, done, **kwargs): # noqa: N802 122 """ 123 Convenience method for creating a namedtuple from the results of 124 environment.step(action). Provides the option to put extra 125 diagnostic info in the kwargs (if it exists) without demanding 126 an explicit positional argument. 127 """ 128 return _Step(observation, reward, done, kwargs) 129 130 131 _Step = collections.namedtuple('Step', 132 ['observation', 'reward', 'done', 'info']) 133 [end of src/garage/envs/base.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/garage/envs/base.py b/src/garage/envs/base.py --- a/src/garage/envs/base.py +++ b/src/garage/envs/base.py @@ -4,8 +4,6 @@ import akro import glfw import gym -from gym.envs.classic_control.rendering import SimpleImageViewer -from gym.envs.classic_control.rendering import Viewer from garage.core import Serializable from garage.envs.env_spec import EnvSpec @@ -54,9 +52,8 @@ self.spec.action_space = self.action_space self.spec.observation_space = self.observation_space else: - self.spec = EnvSpec( - action_space=self.action_space, - observation_space=self.observation_space) + self.spec = EnvSpec(action_space=self.action_space, + observation_space=self.observation_space) Serializable.quick_init(self, locals()) @@ -66,6 +63,7 @@ Returns: None + """ self._close_viewer_window() self.env.close() @@ -94,36 +92,35 @@ glfw.destroy_window(self.env.viewer.window) elif any(package in self.env.spec._entry_point for package in KNOWN_GYM_NOT_CLOSE_VIEWER): - if (hasattr(self.env, 'viewer') and - (isinstance(self.env.viewer, Viewer) - or isinstance(self.env.viewer, SimpleImageViewer))): - self.env.viewer.close() + if hasattr(self.env, 'viewer'): + from gym.envs.classic_control.rendering import ( + Viewer, SimpleImageViewer) + if (isinstance(self.env.viewer, Viewer) + or isinstance(self.env.viewer, SimpleImageViewer)): + self.env.viewer.close() def reset(self, **kwargs): - """ + """Call reset on wrapped env. + This method is necessary to suppress a deprecated warning thrown by gym.Wrapper. - - Calls reset on wrapped env. """ return self.env.reset(**kwargs) def step(self, action): - """ + """Call step on wrapped env. + This method is necessary to suppress a deprecated warning thrown by gym.Wrapper. - - Calls step on wrapped env. """ return self.env.step(action) def Step(observation, reward, done, **kwargs): # noqa: N802 - """ - Convenience method for creating a namedtuple from the results of - environment.step(action). Provides the option to put extra - diagnostic info in the kwargs (if it exists) without demanding - an explicit positional argument. + """Create a namedtuple from the results of environment.step(action). + + Provides the option to put extra diagnostic info in the kwargs (if it + exists) without demanding an explicit positional argument. """ return _Step(observation, reward, done, kwargs)
{"golden_diff": "diff --git a/src/garage/envs/base.py b/src/garage/envs/base.py\n--- a/src/garage/envs/base.py\n+++ b/src/garage/envs/base.py\n@@ -4,8 +4,6 @@\n import akro\n import glfw\n import gym\n-from gym.envs.classic_control.rendering import SimpleImageViewer\n-from gym.envs.classic_control.rendering import Viewer\n \n from garage.core import Serializable\n from garage.envs.env_spec import EnvSpec\n@@ -54,9 +52,8 @@\n self.spec.action_space = self.action_space\n self.spec.observation_space = self.observation_space\n else:\n- self.spec = EnvSpec(\n- action_space=self.action_space,\n- observation_space=self.observation_space)\n+ self.spec = EnvSpec(action_space=self.action_space,\n+ observation_space=self.observation_space)\n \n Serializable.quick_init(self, locals())\n \n@@ -66,6 +63,7 @@\n \n Returns:\n None\n+\n \"\"\"\n self._close_viewer_window()\n self.env.close()\n@@ -94,36 +92,35 @@\n glfw.destroy_window(self.env.viewer.window)\n elif any(package in self.env.spec._entry_point\n for package in KNOWN_GYM_NOT_CLOSE_VIEWER):\n- if (hasattr(self.env, 'viewer') and\n- (isinstance(self.env.viewer, Viewer)\n- or isinstance(self.env.viewer, SimpleImageViewer))):\n- self.env.viewer.close()\n+ if hasattr(self.env, 'viewer'):\n+ from gym.envs.classic_control.rendering import (\n+ Viewer, SimpleImageViewer)\n+ if (isinstance(self.env.viewer, Viewer)\n+ or isinstance(self.env.viewer, SimpleImageViewer)):\n+ self.env.viewer.close()\n \n def reset(self, **kwargs):\n- \"\"\"\n+ \"\"\"Call reset on wrapped env.\n+\n This method is necessary to suppress a deprecated warning\n thrown by gym.Wrapper.\n-\n- Calls reset on wrapped env.\n \"\"\"\n return self.env.reset(**kwargs)\n \n def step(self, action):\n- \"\"\"\n+ \"\"\"Call step on wrapped env.\n+\n This method is necessary to suppress a deprecated warning\n thrown by gym.Wrapper.\n-\n- Calls step on wrapped env.\n \"\"\"\n return self.env.step(action)\n \n \n def Step(observation, reward, done, **kwargs): # noqa: N802\n- \"\"\"\n- Convenience method for creating a namedtuple from the results of\n- environment.step(action). Provides the option to put extra\n- diagnostic info in the kwargs (if it exists) without demanding\n- an explicit positional argument.\n+ \"\"\"Create a namedtuple from the results of environment.step(action).\n+\n+ Provides the option to put extra diagnostic info in the kwargs (if it\n+ exists) without demanding an explicit positional argument.\n \"\"\"\n return _Step(observation, reward, done, kwargs)\n", "issue": "Garage does not run without display (pyglet)\nNot all use cases require environment rendering. Can we get rid of this default import?\r\n\r\n```\r\nTraceback (most recent call last):\r\n from garage.tf.envs import TfEnv\r\n File \"/home//garage/env/lib/python3.6/site-packages/garage/tf/envs/__init__.py\", line 1, in <module>\r\n from garage.tf.envs.base import TfEnv\r\n File \"/home//garage/env/lib/python3.6/site-packages/garage/tf/envs/base.py\", line 4, in <module>\r\n from garage.envs import GarageEnv\r\n File \"/home//garage/env/lib/python3.6/site-packages/garage/envs/__init__.py\", line 1, in <module>\r\n from garage.envs.base import GarageEnv\r\n File \"/home//garage/env/lib/python3.6/site-packages/garage/envs/base.py\", line 7, in <module>\r\n from gym.envs.classic_control.rendering import SimpleImageViewer\r\n File \"/home//garage/env/lib/python3.6/site-packages/gym/envs/classic_control/rendering.py\", line 27, in <module>\r\n from pyglet.gl import *\r\n File \"/home//garage/env/lib/python3.6/site-packages/pyglet/gl/__init__.py\", line 239, in <module>\r\n import pyglet.window\r\n File \"/home//garage/env/lib/python3.6/site-packages/pyglet/window/__init__.py\", line 1896, in <module>\r\n gl._create_shadow_window()\r\n File \"/home//garage/env/lib/python3.6/site-packages/pyglet/gl/__init__.py\", line 208, in _create_shadow_window\r\n _shadow_window = Window(width=1, height=1, visible=False)\r\n File \"/home//garage/env/lib/python3.6/site-packages/pyglet/window/xlib/__init__.py\", line 166, in __init__\r\n super(XlibWindow, self).__init__(*args, **kwargs)\r\n File \"/home//garage/env/lib/python3.6/site-packages/pyglet/window/__init__.py\", line 501, in __init__\r\n display = get_platform().get_default_display()\r\n File \"/home//garage/env/lib/python3.6/site-packages/pyglet/window/__init__.py\", line 1845, in get_default_display\r\n return pyglet.canvas.get_display()\r\n File \"/home//garage/env/lib/python3.6/site-packages/pyglet/canvas/__init__.py\", line 82, in get_display\r\n return Display()\r\n File \"/home//garage/env/lib/python3.6/site-packages/pyglet/canvas/xlib.py\", line 86, in __init__\r\n raise NoSuchDisplayException('Cannot connect to \"%s\"' % name)\r\npyglet.canvas.xlib.NoSuchDisplayException: Cannot connect to \"None\"\r\n```\n", "before_files": [{"content": "\"\"\"Wrapper class that converts gym.Env into GarageEnv.\"\"\"\nimport collections\n\nimport akro\nimport glfw\nimport gym\nfrom gym.envs.classic_control.rendering import SimpleImageViewer\nfrom gym.envs.classic_control.rendering import Viewer\n\nfrom garage.core import Serializable\nfrom garage.envs.env_spec import EnvSpec\n\n# The gym environments using one of the packages in the following lists as\n# entry points don't close their viewer windows.\nKNOWN_GYM_NOT_CLOSE_VIEWER = [\n # Please keep alphabetized\n 'gym.envs.atari',\n 'gym.envs.box2d',\n 'gym.envs.classic_control'\n]\n\nKNOWN_GYM_NOT_CLOSE_MJ_VIEWER = [\n # Please keep alphabetized\n 'gym.envs.mujoco',\n 'gym.envs.robotics'\n]\n\n\nclass GarageEnv(gym.Wrapper, Serializable):\n \"\"\"\n Returns an abstract Garage wrapper class for gym.Env.\n\n In order to provide pickling (serialization) and parameterization\n for gym.Envs, they must be wrapped with a GarageEnv. This ensures\n compatibility with existing samplers and checkpointing when the\n envs are passed internally around garage.\n\n Furthermore, classes inheriting from GarageEnv should silently\n convert action_space and observation_space from gym.Spaces to\n akro.spaces.\n\n Args: env (gym.Env): the env that will be wrapped\n \"\"\"\n\n def __init__(self, env=None, env_name=''):\n if env_name:\n super().__init__(gym.make(env_name))\n else:\n super().__init__(env)\n\n self.action_space = akro.from_gym(self.env.action_space)\n self.observation_space = akro.from_gym(self.env.observation_space)\n if self.spec:\n self.spec.action_space = self.action_space\n self.spec.observation_space = self.observation_space\n else:\n self.spec = EnvSpec(\n action_space=self.action_space,\n observation_space=self.observation_space)\n\n Serializable.quick_init(self, locals())\n\n def close(self):\n \"\"\"\n Close the wrapped env.\n\n Returns:\n None\n \"\"\"\n self._close_viewer_window()\n self.env.close()\n\n def _close_viewer_window(self):\n \"\"\"\n Close viewer window.\n\n Unfortunately, some gym environments don't close the viewer windows\n properly, which leads to \"out of memory\" issues when several of\n these environments are tested one after the other.\n This method searches for the viewer object of type MjViewer, Viewer\n or SimpleImageViewer, based on environment, and if the environment\n is wrapped in other environment classes, it performs depth search\n in those as well.\n This method can be removed once OpenAI solves the issue.\n \"\"\"\n if self.env.spec:\n if any(package in self.env.spec._entry_point\n for package in KNOWN_GYM_NOT_CLOSE_MJ_VIEWER):\n # This import is not in the header to avoid a MuJoCo dependency\n # with non-MuJoCo environments that use this base class.\n from mujoco_py.mjviewer import MjViewer\n if (hasattr(self.env, 'viewer')\n and isinstance(self.env.viewer, MjViewer)):\n glfw.destroy_window(self.env.viewer.window)\n elif any(package in self.env.spec._entry_point\n for package in KNOWN_GYM_NOT_CLOSE_VIEWER):\n if (hasattr(self.env, 'viewer') and\n (isinstance(self.env.viewer, Viewer)\n or isinstance(self.env.viewer, SimpleImageViewer))):\n self.env.viewer.close()\n\n def reset(self, **kwargs):\n \"\"\"\n This method is necessary to suppress a deprecated warning\n thrown by gym.Wrapper.\n\n Calls reset on wrapped env.\n \"\"\"\n return self.env.reset(**kwargs)\n\n def step(self, action):\n \"\"\"\n This method is necessary to suppress a deprecated warning\n thrown by gym.Wrapper.\n\n Calls step on wrapped env.\n \"\"\"\n return self.env.step(action)\n\n\ndef Step(observation, reward, done, **kwargs): # noqa: N802\n \"\"\"\n Convenience method for creating a namedtuple from the results of\n environment.step(action). Provides the option to put extra\n diagnostic info in the kwargs (if it exists) without demanding\n an explicit positional argument.\n \"\"\"\n return _Step(observation, reward, done, kwargs)\n\n\n_Step = collections.namedtuple('Step',\n ['observation', 'reward', 'done', 'info'])\n", "path": "src/garage/envs/base.py"}]}
2,456
635