problem_id
stringlengths
18
22
source
stringclasses
1 value
task_type
stringclasses
1 value
in_source_id
stringlengths
13
58
prompt
stringlengths
1.71k
18.9k
golden_diff
stringlengths
145
5.13k
verification_info
stringlengths
465
23.6k
num_tokens_prompt
int64
556
4.1k
num_tokens_diff
int64
47
1.02k
gh_patches_debug_6477
rasdani/github-patches
git_diff
mathesar-foundation__mathesar-2483
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `clean_stale_db --force` kills demo databases newer than 3 days ## Description `clean_stale_db --force` is meant to only kill demo databases older than 3 days (by default), but that doesn't seem to be the case. ## Additional context https://github.com/centerofci/mathesar/blob/master/demo/management/commands/clean_stale_db.py </issue> <code> [start of demo/management/commands/clean_stale_db.py] 1 from datetime import timedelta 2 3 from django.conf import settings 4 from django.core.management import BaseCommand 5 from django.utils.timezone import now 6 from sqlalchemy import text 7 from sqlalchemy.exc import OperationalError 8 9 from db import engine 10 from db.metadata import get_empty_metadata 11 from mathesar.models.base import Database 12 from mathesar.state.django import reflect_db_objects 13 14 15 class Command(BaseCommand): 16 help = 'Cleans up the stale database created during live demo' 17 18 def add_arguments(self, parser): 19 parser.add_argument( 20 '--force', 21 action='store_true', 22 help='Force delete a database even if it in use' 23 ) 24 parser.add_argument( 25 '--max-days', 26 action='store', 27 type=int, 28 default=3, 29 help='A database is considered for deletion if it has existed for more than --max-days', 30 ) 31 32 def handle(self, *args, **options): 33 drop_all_stale_databases(*args, **options) 34 35 36 def drop_all_stale_databases(force=False, max_days=3, *args, **kwargs): 37 excluded_databases = [ 38 settings.DATABASES["default"]["NAME"], 39 settings.DATABASES["mathesar_tables"]["NAME"], 40 getattr(settings, "MATHESAR_DEMO_TEMPLATE", None), 41 # Exclude Postgres default databases 42 'postgres', 43 'template0', 44 'template1' 45 ] 46 stale_databases = Database.objects.filter(created_at__lt=now() - timedelta(minutes=max_days)) 47 deleted_databases = [] 48 for database in stale_databases: 49 if database.name not in excluded_databases and database.deleted is False: 50 dropped = drop_mathesar_database( 51 database.name, 52 username=settings.DATABASES["default"]["USER"], 53 password=settings.DATABASES["default"]["PASSWORD"], 54 hostname=settings.DATABASES["default"]["HOST"], 55 root_database=settings.DATABASES["default"]["NAME"], 56 port=settings.DATABASES["default"]["PORT"], 57 force=force 58 ) 59 if dropped: 60 deleted_databases.append(database.name) 61 database.delete() 62 reflect_db_objects(get_empty_metadata()) 63 return deleted_databases 64 65 66 def drop_mathesar_database( 67 user_database, username, password, hostname, root_database, port, force=False 68 ): 69 user_db_engine = engine.create_future_engine( 70 username, password, hostname, user_database, port 71 ) 72 try: 73 user_db_engine.connect() 74 except OperationalError: 75 # Non existent db object 76 user_db_engine.dispose() 77 return True 78 else: 79 try: 80 root_db_engine = engine.create_future_engine( 81 username, password, hostname, root_database, port, 82 ) 83 with root_db_engine.connect() as conn: 84 conn.execution_options(isolation_level="AUTOCOMMIT") 85 delete_stmt = f"DROP DATABASE {user_database} {'WITH (FORCE)' if force else ''}" 86 conn.execute(text(delete_stmt)) 87 # This database is not created using a config file, 88 # so their objects can be safety deleted 89 # as they won't be created again during reflection 90 return True 91 except OperationalError: 92 # Database is in use, ignore 93 pass 94 return False 95 [end of demo/management/commands/clean_stale_db.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/demo/management/commands/clean_stale_db.py b/demo/management/commands/clean_stale_db.py --- a/demo/management/commands/clean_stale_db.py +++ b/demo/management/commands/clean_stale_db.py @@ -43,7 +43,7 @@ 'template0', 'template1' ] - stale_databases = Database.objects.filter(created_at__lt=now() - timedelta(minutes=max_days)) + stale_databases = Database.objects.filter(created_at__lt=now() - timedelta(days=max_days)) deleted_databases = [] for database in stale_databases: if database.name not in excluded_databases and database.deleted is False:
{"golden_diff": "diff --git a/demo/management/commands/clean_stale_db.py b/demo/management/commands/clean_stale_db.py\n--- a/demo/management/commands/clean_stale_db.py\n+++ b/demo/management/commands/clean_stale_db.py\n@@ -43,7 +43,7 @@\n 'template0',\n 'template1'\n ]\n- stale_databases = Database.objects.filter(created_at__lt=now() - timedelta(minutes=max_days))\n+ stale_databases = Database.objects.filter(created_at__lt=now() - timedelta(days=max_days))\n deleted_databases = []\n for database in stale_databases:\n if database.name not in excluded_databases and database.deleted is False:\n", "issue": "`clean_stale_db --force` kills demo databases newer than 3 days\n## Description\r\n`clean_stale_db --force` is meant to only kill demo databases older than 3 days (by default), but that doesn't seem to be the case.\r\n\r\n## Additional context\r\n\r\nhttps://github.com/centerofci/mathesar/blob/master/demo/management/commands/clean_stale_db.py\n", "before_files": [{"content": "from datetime import timedelta\n\nfrom django.conf import settings\nfrom django.core.management import BaseCommand\nfrom django.utils.timezone import now\nfrom sqlalchemy import text\nfrom sqlalchemy.exc import OperationalError\n\nfrom db import engine\nfrom db.metadata import get_empty_metadata\nfrom mathesar.models.base import Database\nfrom mathesar.state.django import reflect_db_objects\n\n\nclass Command(BaseCommand):\n help = 'Cleans up the stale database created during live demo'\n\n def add_arguments(self, parser):\n parser.add_argument(\n '--force',\n action='store_true',\n help='Force delete a database even if it in use'\n )\n parser.add_argument(\n '--max-days',\n action='store',\n type=int,\n default=3,\n help='A database is considered for deletion if it has existed for more than --max-days',\n )\n\n def handle(self, *args, **options):\n drop_all_stale_databases(*args, **options)\n\n\ndef drop_all_stale_databases(force=False, max_days=3, *args, **kwargs):\n excluded_databases = [\n settings.DATABASES[\"default\"][\"NAME\"],\n settings.DATABASES[\"mathesar_tables\"][\"NAME\"],\n getattr(settings, \"MATHESAR_DEMO_TEMPLATE\", None),\n # Exclude Postgres default databases\n 'postgres',\n 'template0',\n 'template1'\n ]\n stale_databases = Database.objects.filter(created_at__lt=now() - timedelta(minutes=max_days))\n deleted_databases = []\n for database in stale_databases:\n if database.name not in excluded_databases and database.deleted is False:\n dropped = drop_mathesar_database(\n database.name,\n username=settings.DATABASES[\"default\"][\"USER\"],\n password=settings.DATABASES[\"default\"][\"PASSWORD\"],\n hostname=settings.DATABASES[\"default\"][\"HOST\"],\n root_database=settings.DATABASES[\"default\"][\"NAME\"],\n port=settings.DATABASES[\"default\"][\"PORT\"],\n force=force\n )\n if dropped:\n deleted_databases.append(database.name)\n database.delete()\n reflect_db_objects(get_empty_metadata())\n return deleted_databases\n\n\ndef drop_mathesar_database(\n user_database, username, password, hostname, root_database, port, force=False\n):\n user_db_engine = engine.create_future_engine(\n username, password, hostname, user_database, port\n )\n try:\n user_db_engine.connect()\n except OperationalError:\n # Non existent db object\n user_db_engine.dispose()\n return True\n else:\n try:\n root_db_engine = engine.create_future_engine(\n username, password, hostname, root_database, port,\n )\n with root_db_engine.connect() as conn:\n conn.execution_options(isolation_level=\"AUTOCOMMIT\")\n delete_stmt = f\"DROP DATABASE {user_database} {'WITH (FORCE)' if force else ''}\"\n conn.execute(text(delete_stmt))\n # This database is not created using a config file,\n # so their objects can be safety deleted\n # as they won't be created again during reflection\n return True\n except OperationalError:\n # Database is in use, ignore\n pass\n return False\n", "path": "demo/management/commands/clean_stale_db.py"}]}
1,483
153
gh_patches_debug_7289
rasdani/github-patches
git_diff
beetbox__beets-1492
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> importfeeds: name of m3u_multi playlist get messed up when both m3u* options are on activate both m3u output formats ``` importfeeds: formats: m3u m3u_multi ``` Result : m3u_multi filename is not `<date> <track/album name>` as expected </issue> <code> [start of beetsplug/importfeeds.py] 1 # This file is part of beets. 2 # Copyright 2015, Fabrice Laporte. 3 # 4 # Permission is hereby granted, free of charge, to any person obtaining 5 # a copy of this software and associated documentation files (the 6 # "Software"), to deal in the Software without restriction, including 7 # without limitation the rights to use, copy, modify, merge, publish, 8 # distribute, sublicense, and/or sell copies of the Software, and to 9 # permit persons to whom the Software is furnished to do so, subject to 10 # the following conditions: 11 # 12 # The above copyright notice and this permission notice shall be 13 # included in all copies or substantial portions of the Software. 14 15 from __future__ import (division, absolute_import, print_function, 16 unicode_literals) 17 18 """Write paths of imported files in various formats to ease later import in a 19 music player. Also allow printing the new file locations to stdout in case 20 one wants to manually add music to a player by its path. 21 """ 22 import datetime 23 import os 24 import re 25 26 from beets.plugins import BeetsPlugin 27 from beets.util import mkdirall, normpath, syspath, bytestring_path 28 from beets import config 29 30 M3U_DEFAULT_NAME = 'imported.m3u' 31 32 33 def _get_feeds_dir(lib): 34 """Given a Library object, return the path to the feeds directory to be 35 used (either in the library directory or an explicitly configured 36 path). Ensures that the directory exists. 37 """ 38 # Inside library directory. 39 dirpath = lib.directory 40 41 # Ensure directory exists. 42 if not os.path.exists(syspath(dirpath)): 43 os.makedirs(syspath(dirpath)) 44 return dirpath 45 46 47 def _build_m3u_filename(basename): 48 """Builds unique m3u filename by appending given basename to current 49 date.""" 50 51 basename = re.sub(r"[\s,/\\'\"]", '_', basename) 52 date = datetime.datetime.now().strftime("%Y%m%d_%Hh%M") 53 path = normpath(os.path.join( 54 config['importfeeds']['dir'].as_filename(), 55 date + '_' + basename + '.m3u' 56 )) 57 return path 58 59 60 def _write_m3u(m3u_path, items_paths): 61 """Append relative paths to items into m3u file. 62 """ 63 mkdirall(m3u_path) 64 with open(syspath(m3u_path), 'a') as f: 65 for path in items_paths: 66 f.write(path + b'\n') 67 68 69 class ImportFeedsPlugin(BeetsPlugin): 70 def __init__(self): 71 super(ImportFeedsPlugin, self).__init__() 72 73 self.config.add({ 74 'formats': [], 75 'm3u_name': u'imported.m3u', 76 'dir': None, 77 'relative_to': None, 78 'absolute_path': False, 79 }) 80 81 feeds_dir = self.config['dir'].get() 82 if feeds_dir: 83 feeds_dir = os.path.expanduser(bytestring_path(feeds_dir)) 84 self.config['dir'] = feeds_dir 85 if not os.path.exists(syspath(feeds_dir)): 86 os.makedirs(syspath(feeds_dir)) 87 88 relative_to = self.config['relative_to'].get() 89 if relative_to: 90 self.config['relative_to'] = normpath(relative_to) 91 else: 92 self.config['relative_to'] = feeds_dir 93 94 self.register_listener('library_opened', self.library_opened) 95 self.register_listener('album_imported', self.album_imported) 96 self.register_listener('item_imported', self.item_imported) 97 98 def _record_items(self, lib, basename, items): 99 """Records relative paths to the given items for each feed format 100 """ 101 feedsdir = bytestring_path(self.config['dir'].as_filename()) 102 formats = self.config['formats'].as_str_seq() 103 relative_to = self.config['relative_to'].get() \ 104 or self.config['dir'].as_filename() 105 relative_to = bytestring_path(relative_to) 106 107 paths = [] 108 for item in items: 109 if self.config['absolute_path']: 110 paths.append(item.path) 111 else: 112 try: 113 relpath = os.path.relpath(item.path, relative_to) 114 except ValueError: 115 # On Windows, it is sometimes not possible to construct a 116 # relative path (if the files are on different disks). 117 relpath = item.path 118 paths.append(relpath) 119 120 if 'm3u' in formats: 121 basename = bytestring_path( 122 self.config['m3u_name'].get(unicode) 123 ) 124 m3u_path = os.path.join(feedsdir, basename) 125 _write_m3u(m3u_path, paths) 126 127 if 'm3u_multi' in formats: 128 m3u_path = _build_m3u_filename(basename) 129 _write_m3u(m3u_path, paths) 130 131 if 'link' in formats: 132 for path in paths: 133 dest = os.path.join(feedsdir, os.path.basename(path)) 134 if not os.path.exists(syspath(dest)): 135 os.symlink(syspath(path), syspath(dest)) 136 137 if 'echo' in formats: 138 self._log.info("Location of imported music:") 139 for path in paths: 140 self._log.info(" {0}", path) 141 142 def library_opened(self, lib): 143 if self.config['dir'].get() is None: 144 self.config['dir'] = _get_feeds_dir(lib) 145 146 def album_imported(self, lib, album): 147 self._record_items(lib, album.album, album.items()) 148 149 def item_imported(self, lib, item): 150 self._record_items(lib, item.title, [item]) 151 [end of beetsplug/importfeeds.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/beetsplug/importfeeds.py b/beetsplug/importfeeds.py --- a/beetsplug/importfeeds.py +++ b/beetsplug/importfeeds.py @@ -118,10 +118,9 @@ paths.append(relpath) if 'm3u' in formats: - basename = bytestring_path( - self.config['m3u_name'].get(unicode) - ) - m3u_path = os.path.join(feedsdir, basename) + m3u_basename = bytestring_path( + self.config['m3u_name'].get(unicode)) + m3u_path = os.path.join(feedsdir, m3u_basename) _write_m3u(m3u_path, paths) if 'm3u_multi' in formats:
{"golden_diff": "diff --git a/beetsplug/importfeeds.py b/beetsplug/importfeeds.py\n--- a/beetsplug/importfeeds.py\n+++ b/beetsplug/importfeeds.py\n@@ -118,10 +118,9 @@\n paths.append(relpath)\n \n if 'm3u' in formats:\n- basename = bytestring_path(\n- self.config['m3u_name'].get(unicode)\n- )\n- m3u_path = os.path.join(feedsdir, basename)\n+ m3u_basename = bytestring_path(\n+ self.config['m3u_name'].get(unicode))\n+ m3u_path = os.path.join(feedsdir, m3u_basename)\n _write_m3u(m3u_path, paths)\n \n if 'm3u_multi' in formats:\n", "issue": "importfeeds: name of m3u_multi playlist get messed up when both m3u* options are on\nactivate both m3u output formats \n\n```\n importfeeds:\n formats: m3u m3u_multi\n```\n\nResult : m3u_multi filename is not `<date> <track/album name>` as expected\n\n", "before_files": [{"content": "# This file is part of beets.\n# Copyright 2015, Fabrice Laporte.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\nfrom __future__ import (division, absolute_import, print_function,\n unicode_literals)\n\n\"\"\"Write paths of imported files in various formats to ease later import in a\nmusic player. Also allow printing the new file locations to stdout in case\none wants to manually add music to a player by its path.\n\"\"\"\nimport datetime\nimport os\nimport re\n\nfrom beets.plugins import BeetsPlugin\nfrom beets.util import mkdirall, normpath, syspath, bytestring_path\nfrom beets import config\n\nM3U_DEFAULT_NAME = 'imported.m3u'\n\n\ndef _get_feeds_dir(lib):\n \"\"\"Given a Library object, return the path to the feeds directory to be\n used (either in the library directory or an explicitly configured\n path). Ensures that the directory exists.\n \"\"\"\n # Inside library directory.\n dirpath = lib.directory\n\n # Ensure directory exists.\n if not os.path.exists(syspath(dirpath)):\n os.makedirs(syspath(dirpath))\n return dirpath\n\n\ndef _build_m3u_filename(basename):\n \"\"\"Builds unique m3u filename by appending given basename to current\n date.\"\"\"\n\n basename = re.sub(r\"[\\s,/\\\\'\\\"]\", '_', basename)\n date = datetime.datetime.now().strftime(\"%Y%m%d_%Hh%M\")\n path = normpath(os.path.join(\n config['importfeeds']['dir'].as_filename(),\n date + '_' + basename + '.m3u'\n ))\n return path\n\n\ndef _write_m3u(m3u_path, items_paths):\n \"\"\"Append relative paths to items into m3u file.\n \"\"\"\n mkdirall(m3u_path)\n with open(syspath(m3u_path), 'a') as f:\n for path in items_paths:\n f.write(path + b'\\n')\n\n\nclass ImportFeedsPlugin(BeetsPlugin):\n def __init__(self):\n super(ImportFeedsPlugin, self).__init__()\n\n self.config.add({\n 'formats': [],\n 'm3u_name': u'imported.m3u',\n 'dir': None,\n 'relative_to': None,\n 'absolute_path': False,\n })\n\n feeds_dir = self.config['dir'].get()\n if feeds_dir:\n feeds_dir = os.path.expanduser(bytestring_path(feeds_dir))\n self.config['dir'] = feeds_dir\n if not os.path.exists(syspath(feeds_dir)):\n os.makedirs(syspath(feeds_dir))\n\n relative_to = self.config['relative_to'].get()\n if relative_to:\n self.config['relative_to'] = normpath(relative_to)\n else:\n self.config['relative_to'] = feeds_dir\n\n self.register_listener('library_opened', self.library_opened)\n self.register_listener('album_imported', self.album_imported)\n self.register_listener('item_imported', self.item_imported)\n\n def _record_items(self, lib, basename, items):\n \"\"\"Records relative paths to the given items for each feed format\n \"\"\"\n feedsdir = bytestring_path(self.config['dir'].as_filename())\n formats = self.config['formats'].as_str_seq()\n relative_to = self.config['relative_to'].get() \\\n or self.config['dir'].as_filename()\n relative_to = bytestring_path(relative_to)\n\n paths = []\n for item in items:\n if self.config['absolute_path']:\n paths.append(item.path)\n else:\n try:\n relpath = os.path.relpath(item.path, relative_to)\n except ValueError:\n # On Windows, it is sometimes not possible to construct a\n # relative path (if the files are on different disks).\n relpath = item.path\n paths.append(relpath)\n\n if 'm3u' in formats:\n basename = bytestring_path(\n self.config['m3u_name'].get(unicode)\n )\n m3u_path = os.path.join(feedsdir, basename)\n _write_m3u(m3u_path, paths)\n\n if 'm3u_multi' in formats:\n m3u_path = _build_m3u_filename(basename)\n _write_m3u(m3u_path, paths)\n\n if 'link' in formats:\n for path in paths:\n dest = os.path.join(feedsdir, os.path.basename(path))\n if not os.path.exists(syspath(dest)):\n os.symlink(syspath(path), syspath(dest))\n\n if 'echo' in formats:\n self._log.info(\"Location of imported music:\")\n for path in paths:\n self._log.info(\" {0}\", path)\n\n def library_opened(self, lib):\n if self.config['dir'].get() is None:\n self.config['dir'] = _get_feeds_dir(lib)\n\n def album_imported(self, lib, album):\n self._record_items(lib, album.album, album.items())\n\n def item_imported(self, lib, item):\n self._record_items(lib, item.title, [item])\n", "path": "beetsplug/importfeeds.py"}]}
2,178
180
gh_patches_debug_66681
rasdani/github-patches
git_diff
pantsbuild__pants-16793
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Please add Brand24 to the public list of Pants Users ### Company name Brand24 ### Company website https://brand24.com ### Company logo ![logo_brand24_black_small](https://user-images.githubusercontent.com/36135334/187741686-89d1e4f7-37ea-4827-91c0-c90d130e751c.png) </issue> <code> [start of build-support/bin/generate_user_list.py] 1 #!/usr/bin/env python3 2 # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md). 3 # Licensed under the Apache License, Version 2.0 (see LICENSE). 4 5 from __future__ import annotations 6 7 import pkgutil 8 from dataclasses import dataclass 9 10 import chevron 11 12 """Generates the custom HTML/CSS block in https://www.pantsbuild.org/docs/who-uses-pants . 13 14 To add new companies or make other changes, edit and run this script, then paste the output 15 into that block instead of its current content. Be sure to check that the page renders properly 16 and be prepared to revert (via the "Page history" link) if necessary. 17 18 On MacOS it's useful to pipe the output of this script into pbcopy, so it's in the clipboard 19 ready to be pasted: 20 21 ./pants run build-support/bin/generate_user_list.py | pbcopy 22 23 NOTE: Please consider adding your company/organization to this list! If you wish to do so then 24 thank you, and please follow the guidance at https://pantsbuild.org/register. 25 """ 26 27 # Note: To create an image URL, temporarily add an image block to some page on readme.com (such 28 # as the user list page itself), and upload the logo image (after appropriate resizing in GIMP 29 # or your tool of choice). Do NOT save the page. Instead, right-click to capture the image URL 30 # from the preview in the edit page, and then remove the image block. 31 32 33 @dataclass 34 class Org: 35 name: str 36 website: str 37 image: str | None 38 39 40 # Orgs will be displayed in case-insensitive alphabetical order, but it's useful for human readers 41 # to keep this list in that order too. 42 _orgs = ( 43 Org( 44 "Chartbeat", "https://chartbeat.com/", "https://files.readme.io/861ace7-chartbeat-small.png" 45 ), 46 Org( 47 "Coinbase", 48 "https://www.coinbase.com/", 49 "https://files.readme.io/a213f0f-coinbase-small.png", 50 ), 51 Org( 52 "ESL Gaming", 53 "https://about.eslgaming.com/", 54 "https://files.readme.io/b63d33d-esl-small.png", 55 ), 56 Org( 57 "Foursquare", 58 "https://foursquare.com/", 59 "https://files.readme.io/aa53b52-foursquare-small.png", 60 ), 61 Org( 62 "Geminus", 63 "https://www.geminus.ai/", 64 "https://files.readme.io/0da3c3f-geminus-small.png", 65 ), 66 Org("Grapl", "https://www.graplsecurity.com/", "https://files.readme.io/341b9cd-grapl.png"), 67 Org( 68 "HousingAnywhere", 69 "https://housinganywhere.com/", 70 "https://files.readme.io/dd2a703-housinganywhere-small.png", 71 ), 72 Org("IBM", "https://www.ibm.com/", None), 73 Org("iManage", "https://imanage.com/", "https://files.readme.io/0f7b5f6-imanage-small.png"), 74 Org("Lablup", "https://lablup.com/", "https://files.readme.io/a94d375-lablup-small.png"), 75 Org("Myst AI", "https://www.myst.ai/", "https://files.readme.io/802d8fa-myst_ai_small.png"), 76 Org("Ocrolus", "https://www.ocrolus.com/", "https://files.readme.io/ff166fa-ocrolus-small.png"), 77 Org( 78 "Orca Security", 79 "https://orca.security/", 80 "https://files.readme.io/e87f6c5-Orca_Security-small.png", 81 ), 82 Org("Pave", "https://www.pave.dev/", "https://files.readme.io/924aa3e-pave-small.png"), 83 Org( 84 "People Data Labs", 85 "https://www.peopledatalabs.com/", 86 "https://files.readme.io/8c4f5cd-peopledatalabs-small.png", 87 ), 88 Org( 89 "Rippling", 90 "https://www.rippling.com/", 91 "https://files.readme.io/c8be3a1-rippling-small.png", 92 ), 93 Org( 94 "Snowfall", 95 "https://snowfalltravel.com/", 96 "https://files.readme.io/245f03e-snowfall-small.png", 97 ), 98 Org( 99 "Tessian", 100 "https://www.tessian.com", 101 "https://files.readme.io/6ef9d57-tessian-small.png", 102 ), 103 Org( 104 "Toolchain", 105 "https://www.toolchain.com/", 106 "https://files.readme.io/43d674d-toolchain_logo_small.png", 107 ), 108 Org("Valon", "https://valon.com/", "https://files.readme.io/df5216a-valon-small.png"), 109 Org( 110 "Vicara Solutions", 111 "https://vicarasolutions.com/", 112 "https://files.readme.io/1748a22-vicara-solutions.png", 113 ), 114 ) 115 116 117 @dataclass 118 class OrgPair: 119 a: Org 120 b: Org 121 122 123 def main(): 124 orgs = sorted(_orgs, key=lambda x: x.name.lower()) 125 # Ensure an even number of cells, leaving one to render blankly if necessary. 126 if len(orgs) % 2 == 1: 127 orgs.append(Org("", "", "")) 128 org_pairs = tuple(OrgPair(orgs[i], orgs[i + 1]) for i in range(0, len(orgs), 2)) 129 buf = pkgutil.get_data("generate_user_list", "user_list_templates/table.html.mustache") 130 print(chevron.render(buf.decode(), data={"org_pairs": org_pairs})) 131 132 133 if __name__ == "__main__": 134 main() 135 [end of build-support/bin/generate_user_list.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/build-support/bin/generate_user_list.py b/build-support/bin/generate_user_list.py --- a/build-support/bin/generate_user_list.py +++ b/build-support/bin/generate_user_list.py @@ -40,6 +40,7 @@ # Orgs will be displayed in case-insensitive alphabetical order, but it's useful for human readers # to keep this list in that order too. _orgs = ( + Org("Brand24", "https://brand24.com/", "https://files.readme.io/e3203d1-brand24-small.png"), Org( "Chartbeat", "https://chartbeat.com/", "https://files.readme.io/861ace7-chartbeat-small.png" ),
{"golden_diff": "diff --git a/build-support/bin/generate_user_list.py b/build-support/bin/generate_user_list.py\n--- a/build-support/bin/generate_user_list.py\n+++ b/build-support/bin/generate_user_list.py\n@@ -40,6 +40,7 @@\n # Orgs will be displayed in case-insensitive alphabetical order, but it's useful for human readers\n # to keep this list in that order too.\n _orgs = (\n+ Org(\"Brand24\", \"https://brand24.com/\", \"https://files.readme.io/e3203d1-brand24-small.png\"),\n Org(\n \"Chartbeat\", \"https://chartbeat.com/\", \"https://files.readme.io/861ace7-chartbeat-small.png\"\n ),\n", "issue": "Please add Brand24 to the public list of Pants Users\n### Company name\n\nBrand24\n\n### Company website\n\nhttps://brand24.com\n\n### Company logo\n\n![logo_brand24_black_small](https://user-images.githubusercontent.com/36135334/187741686-89d1e4f7-37ea-4827-91c0-c90d130e751c.png)\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nimport pkgutil\nfrom dataclasses import dataclass\n\nimport chevron\n\n\"\"\"Generates the custom HTML/CSS block in https://www.pantsbuild.org/docs/who-uses-pants .\n\nTo add new companies or make other changes, edit and run this script, then paste the output\ninto that block instead of its current content. Be sure to check that the page renders properly\nand be prepared to revert (via the \"Page history\" link) if necessary.\n\nOn MacOS it's useful to pipe the output of this script into pbcopy, so it's in the clipboard\nready to be pasted:\n\n./pants run build-support/bin/generate_user_list.py | pbcopy\n\nNOTE: Please consider adding your company/organization to this list! If you wish to do so then\n thank you, and please follow the guidance at https://pantsbuild.org/register.\n\"\"\"\n\n# Note: To create an image URL, temporarily add an image block to some page on readme.com (such\n# as the user list page itself), and upload the logo image (after appropriate resizing in GIMP\n# or your tool of choice). Do NOT save the page. Instead, right-click to capture the image URL\n# from the preview in the edit page, and then remove the image block.\n\n\n@dataclass\nclass Org:\n name: str\n website: str\n image: str | None\n\n\n# Orgs will be displayed in case-insensitive alphabetical order, but it's useful for human readers\n# to keep this list in that order too.\n_orgs = (\n Org(\n \"Chartbeat\", \"https://chartbeat.com/\", \"https://files.readme.io/861ace7-chartbeat-small.png\"\n ),\n Org(\n \"Coinbase\",\n \"https://www.coinbase.com/\",\n \"https://files.readme.io/a213f0f-coinbase-small.png\",\n ),\n Org(\n \"ESL Gaming\",\n \"https://about.eslgaming.com/\",\n \"https://files.readme.io/b63d33d-esl-small.png\",\n ),\n Org(\n \"Foursquare\",\n \"https://foursquare.com/\",\n \"https://files.readme.io/aa53b52-foursquare-small.png\",\n ),\n Org(\n \"Geminus\",\n \"https://www.geminus.ai/\",\n \"https://files.readme.io/0da3c3f-geminus-small.png\",\n ),\n Org(\"Grapl\", \"https://www.graplsecurity.com/\", \"https://files.readme.io/341b9cd-grapl.png\"),\n Org(\n \"HousingAnywhere\",\n \"https://housinganywhere.com/\",\n \"https://files.readme.io/dd2a703-housinganywhere-small.png\",\n ),\n Org(\"IBM\", \"https://www.ibm.com/\", None),\n Org(\"iManage\", \"https://imanage.com/\", \"https://files.readme.io/0f7b5f6-imanage-small.png\"),\n Org(\"Lablup\", \"https://lablup.com/\", \"https://files.readme.io/a94d375-lablup-small.png\"),\n Org(\"Myst AI\", \"https://www.myst.ai/\", \"https://files.readme.io/802d8fa-myst_ai_small.png\"),\n Org(\"Ocrolus\", \"https://www.ocrolus.com/\", \"https://files.readme.io/ff166fa-ocrolus-small.png\"),\n Org(\n \"Orca Security\",\n \"https://orca.security/\",\n \"https://files.readme.io/e87f6c5-Orca_Security-small.png\",\n ),\n Org(\"Pave\", \"https://www.pave.dev/\", \"https://files.readme.io/924aa3e-pave-small.png\"),\n Org(\n \"People Data Labs\",\n \"https://www.peopledatalabs.com/\",\n \"https://files.readme.io/8c4f5cd-peopledatalabs-small.png\",\n ),\n Org(\n \"Rippling\",\n \"https://www.rippling.com/\",\n \"https://files.readme.io/c8be3a1-rippling-small.png\",\n ),\n Org(\n \"Snowfall\",\n \"https://snowfalltravel.com/\",\n \"https://files.readme.io/245f03e-snowfall-small.png\",\n ),\n Org(\n \"Tessian\",\n \"https://www.tessian.com\",\n \"https://files.readme.io/6ef9d57-tessian-small.png\",\n ),\n Org(\n \"Toolchain\",\n \"https://www.toolchain.com/\",\n \"https://files.readme.io/43d674d-toolchain_logo_small.png\",\n ),\n Org(\"Valon\", \"https://valon.com/\", \"https://files.readme.io/df5216a-valon-small.png\"),\n Org(\n \"Vicara Solutions\",\n \"https://vicarasolutions.com/\",\n \"https://files.readme.io/1748a22-vicara-solutions.png\",\n ),\n)\n\n\n@dataclass\nclass OrgPair:\n a: Org\n b: Org\n\n\ndef main():\n orgs = sorted(_orgs, key=lambda x: x.name.lower())\n # Ensure an even number of cells, leaving one to render blankly if necessary.\n if len(orgs) % 2 == 1:\n orgs.append(Org(\"\", \"\", \"\"))\n org_pairs = tuple(OrgPair(orgs[i], orgs[i + 1]) for i in range(0, len(orgs), 2))\n buf = pkgutil.get_data(\"generate_user_list\", \"user_list_templates/table.html.mustache\")\n print(chevron.render(buf.decode(), data={\"org_pairs\": org_pairs}))\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "build-support/bin/generate_user_list.py"}]}
2,282
163
gh_patches_debug_50802
rasdani/github-patches
git_diff
googleapis__google-cloud-python-1481
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> pubsub fails if data key is not present If a message is published with a string of 0 length (`topic.publish( '', url=url, title=title)`) when the message is received there is no data field in the message and a key error is thrown when trying to transform the message from the PubSub API representation. https://github.com/GoogleCloudPlatform/gcloud-python/blob/master/gcloud/pubsub/message.py#L74 ``` Traceback (most recent call last): File "/en_notifications/en_notifications.py", line 51, in <module> received = PS_SUBSCRIPTION.pull(max_messages=PULL_COUNT) File "/usr/local/lib/python2.7/dist-packages/gcloud/pubsub/subscription.py", line 212, in pull File "/usr/local/lib/python2.7/dist-packages/gcloud/pubsub/message.py", line 74, in from_api_repr for info in response.get('receivedMessages', ())] data = base64.b64decode(api_repr['data']) KeyError: 'data' ``` </issue> <code> [start of gcloud/pubsub/message.py] 1 # Copyright 2015 Google Inc. All rights reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """Define API Topics.""" 16 17 import base64 18 19 from gcloud._helpers import _rfc3339_to_datetime 20 21 22 class Message(object): 23 """Messages can be published to a topic and received by subscribers. 24 25 See: 26 https://cloud.google.com/pubsub/reference/rest/v1/PubsubMessage 27 28 :type data: bytes 29 :param data: the payload of the message 30 31 :type message_id: string 32 :param message_id: An ID assigned to the message by the API. 33 34 :type attributes: dict or None 35 :param attributes: Extra metadata associated by the publisher with the 36 message. 37 """ 38 def __init__(self, data, message_id, attributes=None): 39 self.data = data 40 self.message_id = message_id 41 self._attributes = attributes 42 43 @property 44 def attributes(self): 45 """Lazily-constructed attribute dictionary""" 46 if self._attributes is None: 47 self._attributes = {} 48 return self._attributes 49 50 @property 51 def timestamp(self): 52 """Return sortable timestamp from attributes, if passed. 53 54 Allows sorting messages in publication order (assuming consistent 55 clocks across all publishers). 56 57 :rtype: :class:`datetime.datetime` 58 :returns: timestamp (in UTC timezone) parsed from RFC 3339 timestamp 59 :raises: ValueError if timestamp not in ``attributes``, or if it does 60 not match the RFC 3339 format. 61 """ 62 stamp = self.attributes.get('timestamp') 63 if stamp is None: 64 raise ValueError('No timestamp') 65 return _rfc3339_to_datetime(stamp) 66 67 @classmethod 68 def from_api_repr(cls, api_repr): 69 """Factory: construct message from API representation. 70 71 :type api_repr: dict or None 72 :param api_repr: The API representation of the message 73 """ 74 data = base64.b64decode(api_repr['data']) 75 return cls(data=data, message_id=api_repr['messageId'], 76 attributes=api_repr.get('attributes')) 77 [end of gcloud/pubsub/message.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/gcloud/pubsub/message.py b/gcloud/pubsub/message.py --- a/gcloud/pubsub/message.py +++ b/gcloud/pubsub/message.py @@ -71,6 +71,6 @@ :type api_repr: dict or None :param api_repr: The API representation of the message """ - data = base64.b64decode(api_repr['data']) + data = base64.b64decode(api_repr.get('data', b'')) return cls(data=data, message_id=api_repr['messageId'], attributes=api_repr.get('attributes'))
{"golden_diff": "diff --git a/gcloud/pubsub/message.py b/gcloud/pubsub/message.py\n--- a/gcloud/pubsub/message.py\n+++ b/gcloud/pubsub/message.py\n@@ -71,6 +71,6 @@\n :type api_repr: dict or None\n :param api_repr: The API representation of the message\n \"\"\"\n- data = base64.b64decode(api_repr['data'])\n+ data = base64.b64decode(api_repr.get('data', b''))\n return cls(data=data, message_id=api_repr['messageId'],\n attributes=api_repr.get('attributes'))\n", "issue": "pubsub fails if data key is not present\nIf a message is published with a string of 0 length (`topic.publish( '', url=url, title=title)`) when the message is received there is no data field in the message and a key error is thrown when trying to transform the message from the PubSub API representation.\n\nhttps://github.com/GoogleCloudPlatform/gcloud-python/blob/master/gcloud/pubsub/message.py#L74\n\n```\nTraceback (most recent call last):\nFile \"/en_notifications/en_notifications.py\", line 51, in <module>\nreceived = PS_SUBSCRIPTION.pull(max_messages=PULL_COUNT)\nFile \"/usr/local/lib/python2.7/dist-packages/gcloud/pubsub/subscription.py\", line 212, in pull\nFile \"/usr/local/lib/python2.7/dist-packages/gcloud/pubsub/message.py\", line 74, in from_api_repr\nfor info in response.get('receivedMessages', ())]\ndata = base64.b64decode(api_repr['data'])\nKeyError: 'data'\n```\n\n", "before_files": [{"content": "# Copyright 2015 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Define API Topics.\"\"\"\n\nimport base64\n\nfrom gcloud._helpers import _rfc3339_to_datetime\n\n\nclass Message(object):\n \"\"\"Messages can be published to a topic and received by subscribers.\n\n See:\n https://cloud.google.com/pubsub/reference/rest/v1/PubsubMessage\n\n :type data: bytes\n :param data: the payload of the message\n\n :type message_id: string\n :param message_id: An ID assigned to the message by the API.\n\n :type attributes: dict or None\n :param attributes: Extra metadata associated by the publisher with the\n message.\n \"\"\"\n def __init__(self, data, message_id, attributes=None):\n self.data = data\n self.message_id = message_id\n self._attributes = attributes\n\n @property\n def attributes(self):\n \"\"\"Lazily-constructed attribute dictionary\"\"\"\n if self._attributes is None:\n self._attributes = {}\n return self._attributes\n\n @property\n def timestamp(self):\n \"\"\"Return sortable timestamp from attributes, if passed.\n\n Allows sorting messages in publication order (assuming consistent\n clocks across all publishers).\n\n :rtype: :class:`datetime.datetime`\n :returns: timestamp (in UTC timezone) parsed from RFC 3339 timestamp\n :raises: ValueError if timestamp not in ``attributes``, or if it does\n not match the RFC 3339 format.\n \"\"\"\n stamp = self.attributes.get('timestamp')\n if stamp is None:\n raise ValueError('No timestamp')\n return _rfc3339_to_datetime(stamp)\n\n @classmethod\n def from_api_repr(cls, api_repr):\n \"\"\"Factory: construct message from API representation.\n\n :type api_repr: dict or None\n :param api_repr: The API representation of the message\n \"\"\"\n data = base64.b64decode(api_repr['data'])\n return cls(data=data, message_id=api_repr['messageId'],\n attributes=api_repr.get('attributes'))\n", "path": "gcloud/pubsub/message.py"}]}
1,481
133
gh_patches_debug_1643
rasdani/github-patches
git_diff
dask__distributed-367
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> OverflowError when sending large sparse arrays I don't yet have a small reproducible example, but I can make this happen every time I try to collect many large sparse arrays. I do have a notebook that will produce it though, and can make that available. The traceback: ``` Traceback (most recent call last): File "/home/jcrist/miniconda/envs/dask_learn/lib/python2.7/site-packages/distributed/core.py", line 266, in write frames = protocol.dumps(msg) File "/home/jcrist/miniconda/envs/dask_learn/lib/python2.7/site-packages/distributed/protocol.py", line 81, in dumps frames = dumps_msgpack(small) File "/home/jcrist/miniconda/envs/dask_learn/lib/python2.7/site-packages/distributed/protocol.py", line 155, in dumps_msgpack fmt, payload = maybe_compress(payload) File "/home/jcrist/miniconda/envs/dask_learn/lib/python2.7/site-packages/distributed/protocol.py", line 137, in maybe_compress compressed = compress(payload) OverflowError: size does not fit in an int ``` A few notes: - Each array is roughly `675000 x 745`, and ~1% dense. The total bytes for indices + indptr + data is ~40MB each. - I can get each array individually, so it's not a problem with a chunk being too large - The error appears only when I'm collecting enough at once (for my size, 39 and and lower works fine). - At 41 arrays I get the above error, 40 arrays gives me a different (but probably related) error: ``` --------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-55-7b87709b6c67> in <module>() ----> 1 res = t.compute() /home/jcrist/dask/dask/base.pyc in compute(self, **kwargs) 84 Extra keywords to forward to the scheduler ``get`` function. 85 """ ---> 86 return compute(self, **kwargs)[0] 87 88 @classmethod /home/jcrist/dask/dask/base.pyc in compute(*args, **kwargs) 177 dsk = merge(var.dask for var in variables) 178 keys = [var._keys() for var in variables] --> 179 results = get(dsk, keys, **kwargs) 180 181 results_iter = iter(results) /home/jcrist/miniconda/envs/dask_learn/lib/python2.7/site-packages/distributed/executor.pyc in get(self, dsk, keys, **kwargs) 1008 1009 if status == 'error': -> 1010 raise result 1011 else: 1012 return result ValueError: corrupt input at byte 2 ``` </issue> <code> [start of distributed/protocol.py] 1 """ 2 The distributed message protocol consists of the following parts: 3 4 1. The length of the header, stored as a uint32 5 2. The header, stored as msgpack. 6 If there are no fields in the header then we skip it entirely. 7 3. The payload, stored as possibly compressed msgpack 8 4. A sentinel value 9 10 **Header** 11 12 The Header contains the following fields: 13 14 * **compression**: string, optional 15 One of the following: ``'snappy', 'lz4', 'zlib'`` or missing for None 16 17 **Payload** 18 19 The payload is any msgpack serializable value. It may be compressed based 20 on the header. 21 22 **Sentinel** 23 24 We often terminate each message with a sentinel value. This happens 25 outside of this module though and is not baked in. 26 """ 27 from __future__ import print_function, division, absolute_import 28 29 import random 30 import struct 31 32 try: 33 import pandas.msgpack as msgpack 34 except ImportError: 35 import msgpack 36 37 from toolz import first, keymap, identity, merge 38 39 from .utils import ignoring 40 from .compatibility import unicode 41 42 43 compressions = {None: {'compress': identity, 44 'decompress': identity}} 45 46 default_compression = None 47 48 49 with ignoring(ImportError): 50 import zlib 51 compressions['zlib'] = {'compress': zlib.compress, 52 'decompress': zlib.decompress} 53 54 with ignoring(ImportError): 55 import snappy 56 compressions['snappy'] = {'compress': snappy.compress, 57 'decompress': snappy.decompress} 58 default_compression = 'snappy' 59 60 with ignoring(ImportError): 61 import lz4 62 compressions['lz4'] = {'compress': lz4.LZ4_compress, 63 'decompress': lz4.LZ4_uncompress} 64 default_compression = 'lz4' 65 66 67 def dumps(msg): 68 """ Transform Python value to bytestream suitable for communication """ 69 small_header = {} 70 71 if isinstance(msg, dict): 72 big = {k: v for k, v in msg.items() 73 if isinstance(v, bytes) and len(v) > 1e6} 74 else: 75 big = False 76 if big: 77 small = {k: v for k, v in msg.items() if k not in big} 78 else: 79 small = msg 80 81 frames = dumps_msgpack(small) 82 if big: 83 frames += dumps_big_byte_dict(big) 84 85 return frames 86 87 88 def loads(frames): 89 """ Transform bytestream back into Python value """ 90 header, payload, frames = frames[0], frames[1], frames[2:] 91 msg = loads_msgpack(header, payload) 92 93 if frames: 94 big = loads_big_byte_dict(*frames) 95 msg.update(big) 96 97 return msg 98 99 100 def byte_sample(b, size, n): 101 """ Sample a bytestring from many locations """ 102 starts = [random.randint(0, len(b) - size) for j in range(n)] 103 ends = [] 104 for i, start in enumerate(starts[:-1]): 105 ends.append(min(start + size, starts[i + 1])) 106 ends.append(starts[-1] + size) 107 108 return b''.join([b[start:end] for start, end in zip(starts, ends)]) 109 110 111 def maybe_compress(payload, compression=default_compression, min_size=1e4, 112 sample_size=1e4, nsamples=5): 113 """ Maybe compress payload 114 115 1. We don't compress small messages 116 2. We sample the payload in a few spots, compress that, and if it doesn't 117 do any good we return the original 118 3. We then compress the full original, it it doesn't compress well then we 119 return the original 120 4. We return the compressed result 121 """ 122 if not compression: 123 return None, payload 124 if len(payload) < min_size: 125 return None, payload 126 127 min_size = int(min_size) 128 sample_size = int(sample_size) 129 130 compress = compressions[compression]['compress'] 131 132 # Compress a sample, return original if not very compressed 133 sample = byte_sample(payload, sample_size, nsamples) 134 if len(compress(sample)) > 0.9 * len(sample): # not very compressible 135 return None, payload 136 137 compressed = compress(payload) 138 if len(compressed) > 0.9 * len(payload): # not very compressible 139 return None, payload 140 141 return compression, compress(payload) 142 143 144 def dumps_msgpack(msg): 145 """ Dump msg into header and payload, both bytestrings 146 147 All of the message must be msgpack encodable 148 149 See Also: 150 loads_msgpack 151 """ 152 header = {} 153 payload = msgpack.dumps(msg, use_bin_type=True) 154 155 fmt, payload = maybe_compress(payload) 156 if fmt: 157 header['compression'] = fmt 158 159 if header: 160 header_bytes = msgpack.dumps(header, use_bin_type=True) 161 else: 162 header_bytes = b'' 163 164 return [header_bytes, payload] 165 166 167 def loads_msgpack(header, payload): 168 """ Read msgpack header and payload back to Python object 169 170 See Also: 171 dumps_msgpack 172 """ 173 if header: 174 header = msgpack.loads(header, encoding='utf8') 175 else: 176 header = {} 177 178 if header.get('compression'): 179 try: 180 decompress = compressions[header['compression']]['decompress'] 181 payload = decompress(payload) 182 except KeyError: 183 raise ValueError("Data is compressed as %s but we don't have this" 184 " installed" % header['compression'].decode()) 185 186 return msgpack.loads(payload, encoding='utf8') 187 188 189 def dumps_big_byte_dict(d): 190 """ Serialize large byte dictionary to sequence of frames 191 192 The input must be a dictionary and all values of that dictionary must be 193 bytestrings. These should probably be large. 194 195 Returns a sequence of frames, one header followed by each of the values 196 197 See Also: 198 loads_big_byte_dict 199 """ 200 assert isinstance(d, dict) and all(isinstance(v, bytes) for v in d.values()) 201 shards = {} 202 for k, v in list(d.items()): 203 if len(v) >= 2**31: 204 L = [] 205 for i, j in enumerate(range(0, len(v), 2**30)): 206 key = '.shard-%d-%s' % (i, k) 207 d[key] = v[j: j + 2**30] 208 L.append(key) 209 del d[k] 210 shards[k] = L 211 212 keys, values = zip(*d.items()) 213 214 compress = compressions[default_compression]['compress'] 215 compression = [] 216 values2 = [] 217 for v in values: 218 fmt, vv = maybe_compress(v) 219 compression.append(fmt) 220 values2.append(vv) 221 222 header = {'encoding': 'big-byte-dict', 223 'keys': keys, 224 'compression': compression} 225 if shards: 226 header['shards'] = shards 227 228 return [msgpack.dumps(header, use_bin_type=True)] + values2 229 230 231 def loads_big_byte_dict(header, *values): 232 """ Deserialize big-byte frames to large byte dictionary 233 234 See Also: 235 dumps_big_byte_dict 236 """ 237 header = msgpack.loads(header, encoding='utf8') 238 239 values2 = [compressions[c]['decompress'](v) 240 for c, v in zip(header['compression'], values)] 241 result = dict(zip(header['keys'], values2)) 242 243 for k, keys in header.get('shards', {}).items(): 244 result[k] = b''.join(result.pop(kk) for kk in keys) 245 return result 246 [end of distributed/protocol.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/distributed/protocol.py b/distributed/protocol.py --- a/distributed/protocol.py +++ b/distributed/protocol.py @@ -123,6 +123,8 @@ return None, payload if len(payload) < min_size: return None, payload + if len(payload) > 2**31: + return None, payload min_size = int(min_size) sample_size = int(sample_size)
{"golden_diff": "diff --git a/distributed/protocol.py b/distributed/protocol.py\n--- a/distributed/protocol.py\n+++ b/distributed/protocol.py\n@@ -123,6 +123,8 @@\n return None, payload\n if len(payload) < min_size:\n return None, payload\n+ if len(payload) > 2**31:\n+ return None, payload\n \n min_size = int(min_size)\n sample_size = int(sample_size)\n", "issue": "OverflowError when sending large sparse arrays\nI don't yet have a small reproducible example, but I can make this happen every time I try to collect many large sparse arrays. I do have a notebook that will produce it though, and can make that available. The traceback:\n\n```\nTraceback (most recent call last):\n File \"/home/jcrist/miniconda/envs/dask_learn/lib/python2.7/site-packages/distributed/core.py\", line 266, in write\n frames = protocol.dumps(msg)\n File \"/home/jcrist/miniconda/envs/dask_learn/lib/python2.7/site-packages/distributed/protocol.py\", line 81, in dumps\n frames = dumps_msgpack(small)\n File \"/home/jcrist/miniconda/envs/dask_learn/lib/python2.7/site-packages/distributed/protocol.py\", line 155, in dumps_msgpack\n fmt, payload = maybe_compress(payload)\n File \"/home/jcrist/miniconda/envs/dask_learn/lib/python2.7/site-packages/distributed/protocol.py\", line 137, in maybe_compress\n compressed = compress(payload)\nOverflowError: size does not fit in an int\n```\n\nA few notes:\n- Each array is roughly `675000 x 745`, and ~1% dense. The total bytes for indices + indptr + data is ~40MB each.\n- I can get each array individually, so it's not a problem with a chunk being too large\n- The error appears only when I'm collecting enough at once (for my size, 39 and and lower works fine).\n- At 41 arrays I get the above error, 40 arrays gives me a different (but probably related) error:\n\n```\n---------------------------------------------------------------------------\nValueError Traceback (most recent call last)\n<ipython-input-55-7b87709b6c67> in <module>()\n----> 1 res = t.compute()\n\n/home/jcrist/dask/dask/base.pyc in compute(self, **kwargs)\n 84 Extra keywords to forward to the scheduler ``get`` function.\n 85 \"\"\"\n---> 86 return compute(self, **kwargs)[0]\n 87 \n 88 @classmethod\n\n/home/jcrist/dask/dask/base.pyc in compute(*args, **kwargs)\n 177 dsk = merge(var.dask for var in variables)\n 178 keys = [var._keys() for var in variables]\n--> 179 results = get(dsk, keys, **kwargs)\n 180 \n 181 results_iter = iter(results)\n\n/home/jcrist/miniconda/envs/dask_learn/lib/python2.7/site-packages/distributed/executor.pyc in get(self, dsk, keys, **kwargs)\n 1008 \n 1009 if status == 'error':\n-> 1010 raise result\n 1011 else:\n 1012 return result\n\nValueError: corrupt input at byte 2\n```\n\n", "before_files": [{"content": "\"\"\"\nThe distributed message protocol consists of the following parts:\n\n1. The length of the header, stored as a uint32\n2. The header, stored as msgpack.\n If there are no fields in the header then we skip it entirely.\n3. The payload, stored as possibly compressed msgpack\n4. A sentinel value\n\n**Header**\n\nThe Header contains the following fields:\n\n* **compression**: string, optional\n One of the following: ``'snappy', 'lz4', 'zlib'`` or missing for None\n\n**Payload**\n\nThe payload is any msgpack serializable value. It may be compressed based\non the header.\n\n**Sentinel**\n\nWe often terminate each message with a sentinel value. This happens\noutside of this module though and is not baked in.\n\"\"\"\nfrom __future__ import print_function, division, absolute_import\n\nimport random\nimport struct\n\ntry:\n import pandas.msgpack as msgpack\nexcept ImportError:\n import msgpack\n\nfrom toolz import first, keymap, identity, merge\n\nfrom .utils import ignoring\nfrom .compatibility import unicode\n\n\ncompressions = {None: {'compress': identity,\n 'decompress': identity}}\n\ndefault_compression = None\n\n\nwith ignoring(ImportError):\n import zlib\n compressions['zlib'] = {'compress': zlib.compress,\n 'decompress': zlib.decompress}\n\nwith ignoring(ImportError):\n import snappy\n compressions['snappy'] = {'compress': snappy.compress,\n 'decompress': snappy.decompress}\n default_compression = 'snappy'\n\nwith ignoring(ImportError):\n import lz4\n compressions['lz4'] = {'compress': lz4.LZ4_compress,\n 'decompress': lz4.LZ4_uncompress}\n default_compression = 'lz4'\n\n\ndef dumps(msg):\n \"\"\" Transform Python value to bytestream suitable for communication \"\"\"\n small_header = {}\n\n if isinstance(msg, dict):\n big = {k: v for k, v in msg.items()\n if isinstance(v, bytes) and len(v) > 1e6}\n else:\n big = False\n if big:\n small = {k: v for k, v in msg.items() if k not in big}\n else:\n small = msg\n\n frames = dumps_msgpack(small)\n if big:\n frames += dumps_big_byte_dict(big)\n\n return frames\n\n\ndef loads(frames):\n \"\"\" Transform bytestream back into Python value \"\"\"\n header, payload, frames = frames[0], frames[1], frames[2:]\n msg = loads_msgpack(header, payload)\n\n if frames:\n big = loads_big_byte_dict(*frames)\n msg.update(big)\n\n return msg\n\n\ndef byte_sample(b, size, n):\n \"\"\" Sample a bytestring from many locations \"\"\"\n starts = [random.randint(0, len(b) - size) for j in range(n)]\n ends = []\n for i, start in enumerate(starts[:-1]):\n ends.append(min(start + size, starts[i + 1]))\n ends.append(starts[-1] + size)\n\n return b''.join([b[start:end] for start, end in zip(starts, ends)])\n\n\ndef maybe_compress(payload, compression=default_compression, min_size=1e4,\n sample_size=1e4, nsamples=5):\n \"\"\" Maybe compress payload\n\n 1. We don't compress small messages\n 2. We sample the payload in a few spots, compress that, and if it doesn't\n do any good we return the original\n 3. We then compress the full original, it it doesn't compress well then we\n return the original\n 4. We return the compressed result\n \"\"\"\n if not compression:\n return None, payload\n if len(payload) < min_size:\n return None, payload\n\n min_size = int(min_size)\n sample_size = int(sample_size)\n\n compress = compressions[compression]['compress']\n\n # Compress a sample, return original if not very compressed\n sample = byte_sample(payload, sample_size, nsamples)\n if len(compress(sample)) > 0.9 * len(sample): # not very compressible\n return None, payload\n\n compressed = compress(payload)\n if len(compressed) > 0.9 * len(payload): # not very compressible\n return None, payload\n\n return compression, compress(payload)\n\n\ndef dumps_msgpack(msg):\n \"\"\" Dump msg into header and payload, both bytestrings\n\n All of the message must be msgpack encodable\n\n See Also:\n loads_msgpack\n \"\"\"\n header = {}\n payload = msgpack.dumps(msg, use_bin_type=True)\n\n fmt, payload = maybe_compress(payload)\n if fmt:\n header['compression'] = fmt\n\n if header:\n header_bytes = msgpack.dumps(header, use_bin_type=True)\n else:\n header_bytes = b''\n\n return [header_bytes, payload]\n\n\ndef loads_msgpack(header, payload):\n \"\"\" Read msgpack header and payload back to Python object\n\n See Also:\n dumps_msgpack\n \"\"\"\n if header:\n header = msgpack.loads(header, encoding='utf8')\n else:\n header = {}\n\n if header.get('compression'):\n try:\n decompress = compressions[header['compression']]['decompress']\n payload = decompress(payload)\n except KeyError:\n raise ValueError(\"Data is compressed as %s but we don't have this\"\n \" installed\" % header['compression'].decode())\n\n return msgpack.loads(payload, encoding='utf8')\n\n\ndef dumps_big_byte_dict(d):\n \"\"\" Serialize large byte dictionary to sequence of frames\n\n The input must be a dictionary and all values of that dictionary must be\n bytestrings. These should probably be large.\n\n Returns a sequence of frames, one header followed by each of the values\n\n See Also:\n loads_big_byte_dict\n \"\"\"\n assert isinstance(d, dict) and all(isinstance(v, bytes) for v in d.values())\n shards = {}\n for k, v in list(d.items()):\n if len(v) >= 2**31:\n L = []\n for i, j in enumerate(range(0, len(v), 2**30)):\n key = '.shard-%d-%s' % (i, k)\n d[key] = v[j: j + 2**30]\n L.append(key)\n del d[k]\n shards[k] = L\n\n keys, values = zip(*d.items())\n\n compress = compressions[default_compression]['compress']\n compression = []\n values2 = []\n for v in values:\n fmt, vv = maybe_compress(v)\n compression.append(fmt)\n values2.append(vv)\n\n header = {'encoding': 'big-byte-dict',\n 'keys': keys,\n 'compression': compression}\n if shards:\n header['shards'] = shards\n\n return [msgpack.dumps(header, use_bin_type=True)] + values2\n\n\ndef loads_big_byte_dict(header, *values):\n \"\"\" Deserialize big-byte frames to large byte dictionary\n\n See Also:\n dumps_big_byte_dict\n \"\"\"\n header = msgpack.loads(header, encoding='utf8')\n\n values2 = [compressions[c]['decompress'](v)\n for c, v in zip(header['compression'], values)]\n result = dict(zip(header['keys'], values2))\n\n for k, keys in header.get('shards', {}).items():\n result[k] = b''.join(result.pop(kk) for kk in keys)\n return result\n", "path": "distributed/protocol.py"}]}
3,546
103
gh_patches_debug_16958
rasdani/github-patches
git_diff
pytorch__ignite-2914
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Minor Bug | metrics.ssim, produces empty tensors in update function ## 🐛 Bug description In line 165 ``` output_list = [outputs[x * y_pred.size(0) : (x + 1) * y_pred.size(0)] for x in range(len(outputs))] ``` the list comprehension produces a list with `len = (Batch_size * 5)`, where only the first 5 elements are valid and correspond to stacked `[y_pred, y, y_pred * y_pred, y * y, y_pred * y]` of all the batches, in cases where the batch size is greater than one the elements with index>4 are empty `torch.Tensors` with shape `(0, C, H, W)` . ### Solution This bug neither affects the output, nor consumes a lot of RAM, but I thought I should point it out. The fix for this is pretty simple and you only need to divide the len of outputs by the batch size. ``` output_list = [outputs[x * y_pred.size(0) : (x + 1) * y_pred.size(0)] for x in range(int(len(outputs)/y_pred.size(0)))] # len(outputs) is B*5 so we need to divide it by B so it's only 5 -> [y_pred, y, y_pred * y_pred, y * y, y_pred * y] ``` </issue> <code> [start of ignite/metrics/ssim.py] 1 from typing import Callable, Sequence, Union 2 3 import torch 4 import torch.nn.functional as F 5 6 from ignite.exceptions import NotComputableError 7 from ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce 8 9 __all__ = ["SSIM"] 10 11 12 class SSIM(Metric): 13 """ 14 Computes Structual Similarity Index Measure 15 16 - ``update`` must receive output of the form ``(y_pred, y)``. 17 18 Args: 19 data_range: Range of the image. Typically, ``1.0`` or ``255``. 20 kernel_size: Size of the kernel. Default: (11, 11) 21 sigma: Standard deviation of the gaussian kernel. 22 Argument is used if ``gaussian=True``. Default: (1.5, 1.5) 23 k1: Parameter of SSIM. Default: 0.01 24 k2: Parameter of SSIM. Default: 0.03 25 gaussian: ``True`` to use gaussian kernel, ``False`` to use uniform kernel 26 output_transform: A callable that is used to transform the 27 :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the 28 form expected by the metric. 29 device: specifies which device updates are accumulated on. Setting the metric's 30 device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By 31 default, CPU. 32 33 Examples: 34 To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine. 35 The output of the engine's ``process_function`` needs to be in the format of 36 ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``. If not, ``output_tranform`` can be added 37 to the metric to transform the output into the form expected by the metric. 38 39 ``y_pred`` and ``y`` can be un-normalized or normalized image tensors. Depending on that, the user might need 40 to adjust ``data_range``. ``y_pred`` and ``y`` should have the same shape. 41 42 For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`. 43 44 .. include:: defaults.rst 45 :start-after: :orphan: 46 47 .. testcode:: 48 49 metric = SSIM(data_range=1.0) 50 metric.attach(default_evaluator, 'ssim') 51 preds = torch.rand([4, 3, 16, 16]) 52 target = preds * 0.75 53 state = default_evaluator.run([[preds, target]]) 54 print(state.metrics['ssim']) 55 56 .. testoutput:: 57 58 0.9218971... 59 60 .. versionadded:: 0.4.2 61 """ 62 63 def __init__( 64 self, 65 data_range: Union[int, float], 66 kernel_size: Union[int, Sequence[int]] = (11, 11), 67 sigma: Union[float, Sequence[float]] = (1.5, 1.5), 68 k1: float = 0.01, 69 k2: float = 0.03, 70 gaussian: bool = True, 71 output_transform: Callable = lambda x: x, 72 device: Union[str, torch.device] = torch.device("cpu"), 73 ): 74 if isinstance(kernel_size, int): 75 self.kernel_size: Sequence[int] = [kernel_size, kernel_size] 76 elif isinstance(kernel_size, Sequence): 77 self.kernel_size = kernel_size 78 else: 79 raise ValueError("Argument kernel_size should be either int or a sequence of int.") 80 81 if isinstance(sigma, float): 82 self.sigma: Sequence[float] = [sigma, sigma] 83 elif isinstance(sigma, Sequence): 84 self.sigma = sigma 85 else: 86 raise ValueError("Argument sigma should be either float or a sequence of float.") 87 88 if any(x % 2 == 0 or x <= 0 for x in self.kernel_size): 89 raise ValueError(f"Expected kernel_size to have odd positive number. Got {kernel_size}.") 90 91 if any(y <= 0 for y in self.sigma): 92 raise ValueError(f"Expected sigma to have positive number. Got {sigma}.") 93 94 super(SSIM, self).__init__(output_transform=output_transform, device=device) 95 self.gaussian = gaussian 96 self.c1 = (k1 * data_range) ** 2 97 self.c2 = (k2 * data_range) ** 2 98 self.pad_h = (self.kernel_size[0] - 1) // 2 99 self.pad_w = (self.kernel_size[1] - 1) // 2 100 self._kernel = self._gaussian_or_uniform_kernel(kernel_size=self.kernel_size, sigma=self.sigma) 101 102 @reinit__is_reduced 103 def reset(self) -> None: 104 self._sum_of_ssim = torch.tensor(0.0, dtype=torch.float64, device=self._device) 105 self._num_examples = 0 106 self._kernel = self._gaussian_or_uniform_kernel(kernel_size=self.kernel_size, sigma=self.sigma) 107 108 def _uniform(self, kernel_size: int) -> torch.Tensor: 109 max, min = 2.5, -2.5 110 ksize_half = (kernel_size - 1) * 0.5 111 kernel = torch.linspace(-ksize_half, ksize_half, steps=kernel_size, device=self._device) 112 for i, j in enumerate(kernel): 113 if min <= j <= max: 114 kernel[i] = 1 / (max - min) 115 else: 116 kernel[i] = 0 117 118 return kernel.unsqueeze(dim=0) # (1, kernel_size) 119 120 def _gaussian(self, kernel_size: int, sigma: float) -> torch.Tensor: 121 ksize_half = (kernel_size - 1) * 0.5 122 kernel = torch.linspace(-ksize_half, ksize_half, steps=kernel_size, device=self._device) 123 gauss = torch.exp(-0.5 * (kernel / sigma).pow(2)) 124 return (gauss / gauss.sum()).unsqueeze(dim=0) # (1, kernel_size) 125 126 def _gaussian_or_uniform_kernel(self, kernel_size: Sequence[int], sigma: Sequence[float]) -> torch.Tensor: 127 if self.gaussian: 128 kernel_x = self._gaussian(kernel_size[0], sigma[0]) 129 kernel_y = self._gaussian(kernel_size[1], sigma[1]) 130 else: 131 kernel_x = self._uniform(kernel_size[0]) 132 kernel_y = self._uniform(kernel_size[1]) 133 134 return torch.matmul(kernel_x.t(), kernel_y) # (kernel_size, 1) * (1, kernel_size) 135 136 @reinit__is_reduced 137 def update(self, output: Sequence[torch.Tensor]) -> None: 138 y_pred, y = output[0].detach(), output[1].detach() 139 140 if y_pred.dtype != y.dtype: 141 raise TypeError( 142 f"Expected y_pred and y to have the same data type. Got y_pred: {y_pred.dtype} and y: {y.dtype}." 143 ) 144 145 if y_pred.shape != y.shape: 146 raise ValueError( 147 f"Expected y_pred and y to have the same shape. Got y_pred: {y_pred.shape} and y: {y.shape}." 148 ) 149 150 if len(y_pred.shape) != 4 or len(y.shape) != 4: 151 raise ValueError( 152 f"Expected y_pred and y to have BxCxHxW shape. Got y_pred: {y_pred.shape} and y: {y.shape}." 153 ) 154 155 channel = y_pred.size(1) 156 if len(self._kernel.shape) < 4: 157 self._kernel = self._kernel.expand(channel, 1, -1, -1).to(device=y_pred.device) 158 159 y_pred = F.pad(y_pred, [self.pad_w, self.pad_w, self.pad_h, self.pad_h], mode="reflect") 160 y = F.pad(y, [self.pad_w, self.pad_w, self.pad_h, self.pad_h], mode="reflect") 161 162 input_list = torch.cat([y_pred, y, y_pred * y_pred, y * y, y_pred * y]) 163 outputs = F.conv2d(input_list, self._kernel, groups=channel) 164 165 output_list = [outputs[x * y_pred.size(0) : (x + 1) * y_pred.size(0)] for x in range(len(outputs))] 166 167 mu_pred_sq = output_list[0].pow(2) 168 mu_target_sq = output_list[1].pow(2) 169 mu_pred_target = output_list[0] * output_list[1] 170 171 sigma_pred_sq = output_list[2] - mu_pred_sq 172 sigma_target_sq = output_list[3] - mu_target_sq 173 sigma_pred_target = output_list[4] - mu_pred_target 174 175 a1 = 2 * mu_pred_target + self.c1 176 a2 = 2 * sigma_pred_target + self.c2 177 b1 = mu_pred_sq + mu_target_sq + self.c1 178 b2 = sigma_pred_sq + sigma_target_sq + self.c2 179 180 ssim_idx = (a1 * a2) / (b1 * b2) 181 self._sum_of_ssim += torch.mean(ssim_idx, (1, 2, 3), dtype=torch.float64).sum().to(self._device) 182 183 self._num_examples += y.shape[0] 184 185 @sync_all_reduce("_sum_of_ssim", "_num_examples") 186 def compute(self) -> float: 187 if self._num_examples == 0: 188 raise NotComputableError("SSIM must have at least one example before it can be computed.") 189 return (self._sum_of_ssim / self._num_examples).item() 190 [end of ignite/metrics/ssim.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ignite/metrics/ssim.py b/ignite/metrics/ssim.py --- a/ignite/metrics/ssim.py +++ b/ignite/metrics/ssim.py @@ -159,10 +159,10 @@ y_pred = F.pad(y_pred, [self.pad_w, self.pad_w, self.pad_h, self.pad_h], mode="reflect") y = F.pad(y, [self.pad_w, self.pad_w, self.pad_h, self.pad_h], mode="reflect") - input_list = torch.cat([y_pred, y, y_pred * y_pred, y * y, y_pred * y]) - outputs = F.conv2d(input_list, self._kernel, groups=channel) - - output_list = [outputs[x * y_pred.size(0) : (x + 1) * y_pred.size(0)] for x in range(len(outputs))] + input_list = [y_pred, y, y_pred * y_pred, y * y, y_pred * y] + outputs = F.conv2d(torch.cat(input_list), self._kernel, groups=channel) + batch_size = y_pred.size(0) + output_list = [outputs[x * batch_size : (x + 1) * batch_size] for x in range(len(input_list))] mu_pred_sq = output_list[0].pow(2) mu_target_sq = output_list[1].pow(2)
{"golden_diff": "diff --git a/ignite/metrics/ssim.py b/ignite/metrics/ssim.py\n--- a/ignite/metrics/ssim.py\n+++ b/ignite/metrics/ssim.py\n@@ -159,10 +159,10 @@\n y_pred = F.pad(y_pred, [self.pad_w, self.pad_w, self.pad_h, self.pad_h], mode=\"reflect\")\n y = F.pad(y, [self.pad_w, self.pad_w, self.pad_h, self.pad_h], mode=\"reflect\")\n \n- input_list = torch.cat([y_pred, y, y_pred * y_pred, y * y, y_pred * y])\n- outputs = F.conv2d(input_list, self._kernel, groups=channel)\n-\n- output_list = [outputs[x * y_pred.size(0) : (x + 1) * y_pred.size(0)] for x in range(len(outputs))]\n+ input_list = [y_pred, y, y_pred * y_pred, y * y, y_pred * y]\n+ outputs = F.conv2d(torch.cat(input_list), self._kernel, groups=channel)\n+ batch_size = y_pred.size(0)\n+ output_list = [outputs[x * batch_size : (x + 1) * batch_size] for x in range(len(input_list))]\n \n mu_pred_sq = output_list[0].pow(2)\n mu_target_sq = output_list[1].pow(2)\n", "issue": "Minor Bug | metrics.ssim, produces empty tensors in update function\n## \ud83d\udc1b Bug description\r\n\r\nIn line 165\r\n```\r\noutput_list = [outputs[x * y_pred.size(0) : (x + 1) * y_pred.size(0)] for x in range(len(outputs))]\r\n```\r\nthe list comprehension produces a list with `len = (Batch_size * 5)`, where only the first 5 elements are valid and correspond to stacked `[y_pred, y, y_pred * y_pred, y * y, y_pred * y]` of all the batches, in cases where the batch size is greater than one the elements with index>4 are empty `torch.Tensors` with shape `(0, C, H, W)` .\r\n\r\n### Solution\r\nThis bug neither affects the output, nor consumes a lot of RAM, but I thought I should point it out.\r\nThe fix for this is pretty simple and you only need to divide the len of outputs by the batch size.\r\n```\r\noutput_list = [outputs[x * y_pred.size(0) : (x + 1) * y_pred.size(0)] for x in range(int(len(outputs)/y_pred.size(0)))] # len(outputs) is B*5 so we need to divide it by B so it's only 5 -> [y_pred, y, y_pred * y_pred, y * y, y_pred * y]\r\n```\r\n\n", "before_files": [{"content": "from typing import Callable, Sequence, Union\n\nimport torch\nimport torch.nn.functional as F\n\nfrom ignite.exceptions import NotComputableError\nfrom ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce\n\n__all__ = [\"SSIM\"]\n\n\nclass SSIM(Metric):\n \"\"\"\n Computes Structual Similarity Index Measure\n\n - ``update`` must receive output of the form ``(y_pred, y)``.\n\n Args:\n data_range: Range of the image. Typically, ``1.0`` or ``255``.\n kernel_size: Size of the kernel. Default: (11, 11)\n sigma: Standard deviation of the gaussian kernel.\n Argument is used if ``gaussian=True``. Default: (1.5, 1.5)\n k1: Parameter of SSIM. Default: 0.01\n k2: Parameter of SSIM. Default: 0.03\n gaussian: ``True`` to use gaussian kernel, ``False`` to use uniform kernel\n output_transform: A callable that is used to transform the\n :class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the\n form expected by the metric.\n device: specifies which device updates are accumulated on. Setting the metric's\n device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By\n default, CPU.\n\n Examples:\n To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.\n The output of the engine's ``process_function`` needs to be in the format of\n ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``. If not, ``output_tranform`` can be added\n to the metric to transform the output into the form expected by the metric.\n\n ``y_pred`` and ``y`` can be un-normalized or normalized image tensors. Depending on that, the user might need\n to adjust ``data_range``. ``y_pred`` and ``y`` should have the same shape.\n\n For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.\n\n .. include:: defaults.rst\n :start-after: :orphan:\n\n .. testcode::\n\n metric = SSIM(data_range=1.0)\n metric.attach(default_evaluator, 'ssim')\n preds = torch.rand([4, 3, 16, 16])\n target = preds * 0.75\n state = default_evaluator.run([[preds, target]])\n print(state.metrics['ssim'])\n\n .. testoutput::\n\n 0.9218971...\n\n .. versionadded:: 0.4.2\n \"\"\"\n\n def __init__(\n self,\n data_range: Union[int, float],\n kernel_size: Union[int, Sequence[int]] = (11, 11),\n sigma: Union[float, Sequence[float]] = (1.5, 1.5),\n k1: float = 0.01,\n k2: float = 0.03,\n gaussian: bool = True,\n output_transform: Callable = lambda x: x,\n device: Union[str, torch.device] = torch.device(\"cpu\"),\n ):\n if isinstance(kernel_size, int):\n self.kernel_size: Sequence[int] = [kernel_size, kernel_size]\n elif isinstance(kernel_size, Sequence):\n self.kernel_size = kernel_size\n else:\n raise ValueError(\"Argument kernel_size should be either int or a sequence of int.\")\n\n if isinstance(sigma, float):\n self.sigma: Sequence[float] = [sigma, sigma]\n elif isinstance(sigma, Sequence):\n self.sigma = sigma\n else:\n raise ValueError(\"Argument sigma should be either float or a sequence of float.\")\n\n if any(x % 2 == 0 or x <= 0 for x in self.kernel_size):\n raise ValueError(f\"Expected kernel_size to have odd positive number. Got {kernel_size}.\")\n\n if any(y <= 0 for y in self.sigma):\n raise ValueError(f\"Expected sigma to have positive number. Got {sigma}.\")\n\n super(SSIM, self).__init__(output_transform=output_transform, device=device)\n self.gaussian = gaussian\n self.c1 = (k1 * data_range) ** 2\n self.c2 = (k2 * data_range) ** 2\n self.pad_h = (self.kernel_size[0] - 1) // 2\n self.pad_w = (self.kernel_size[1] - 1) // 2\n self._kernel = self._gaussian_or_uniform_kernel(kernel_size=self.kernel_size, sigma=self.sigma)\n\n @reinit__is_reduced\n def reset(self) -> None:\n self._sum_of_ssim = torch.tensor(0.0, dtype=torch.float64, device=self._device)\n self._num_examples = 0\n self._kernel = self._gaussian_or_uniform_kernel(kernel_size=self.kernel_size, sigma=self.sigma)\n\n def _uniform(self, kernel_size: int) -> torch.Tensor:\n max, min = 2.5, -2.5\n ksize_half = (kernel_size - 1) * 0.5\n kernel = torch.linspace(-ksize_half, ksize_half, steps=kernel_size, device=self._device)\n for i, j in enumerate(kernel):\n if min <= j <= max:\n kernel[i] = 1 / (max - min)\n else:\n kernel[i] = 0\n\n return kernel.unsqueeze(dim=0) # (1, kernel_size)\n\n def _gaussian(self, kernel_size: int, sigma: float) -> torch.Tensor:\n ksize_half = (kernel_size - 1) * 0.5\n kernel = torch.linspace(-ksize_half, ksize_half, steps=kernel_size, device=self._device)\n gauss = torch.exp(-0.5 * (kernel / sigma).pow(2))\n return (gauss / gauss.sum()).unsqueeze(dim=0) # (1, kernel_size)\n\n def _gaussian_or_uniform_kernel(self, kernel_size: Sequence[int], sigma: Sequence[float]) -> torch.Tensor:\n if self.gaussian:\n kernel_x = self._gaussian(kernel_size[0], sigma[0])\n kernel_y = self._gaussian(kernel_size[1], sigma[1])\n else:\n kernel_x = self._uniform(kernel_size[0])\n kernel_y = self._uniform(kernel_size[1])\n\n return torch.matmul(kernel_x.t(), kernel_y) # (kernel_size, 1) * (1, kernel_size)\n\n @reinit__is_reduced\n def update(self, output: Sequence[torch.Tensor]) -> None:\n y_pred, y = output[0].detach(), output[1].detach()\n\n if y_pred.dtype != y.dtype:\n raise TypeError(\n f\"Expected y_pred and y to have the same data type. Got y_pred: {y_pred.dtype} and y: {y.dtype}.\"\n )\n\n if y_pred.shape != y.shape:\n raise ValueError(\n f\"Expected y_pred and y to have the same shape. Got y_pred: {y_pred.shape} and y: {y.shape}.\"\n )\n\n if len(y_pred.shape) != 4 or len(y.shape) != 4:\n raise ValueError(\n f\"Expected y_pred and y to have BxCxHxW shape. Got y_pred: {y_pred.shape} and y: {y.shape}.\"\n )\n\n channel = y_pred.size(1)\n if len(self._kernel.shape) < 4:\n self._kernel = self._kernel.expand(channel, 1, -1, -1).to(device=y_pred.device)\n\n y_pred = F.pad(y_pred, [self.pad_w, self.pad_w, self.pad_h, self.pad_h], mode=\"reflect\")\n y = F.pad(y, [self.pad_w, self.pad_w, self.pad_h, self.pad_h], mode=\"reflect\")\n\n input_list = torch.cat([y_pred, y, y_pred * y_pred, y * y, y_pred * y])\n outputs = F.conv2d(input_list, self._kernel, groups=channel)\n\n output_list = [outputs[x * y_pred.size(0) : (x + 1) * y_pred.size(0)] for x in range(len(outputs))]\n\n mu_pred_sq = output_list[0].pow(2)\n mu_target_sq = output_list[1].pow(2)\n mu_pred_target = output_list[0] * output_list[1]\n\n sigma_pred_sq = output_list[2] - mu_pred_sq\n sigma_target_sq = output_list[3] - mu_target_sq\n sigma_pred_target = output_list[4] - mu_pred_target\n\n a1 = 2 * mu_pred_target + self.c1\n a2 = 2 * sigma_pred_target + self.c2\n b1 = mu_pred_sq + mu_target_sq + self.c1\n b2 = sigma_pred_sq + sigma_target_sq + self.c2\n\n ssim_idx = (a1 * a2) / (b1 * b2)\n self._sum_of_ssim += torch.mean(ssim_idx, (1, 2, 3), dtype=torch.float64).sum().to(self._device)\n\n self._num_examples += y.shape[0]\n\n @sync_all_reduce(\"_sum_of_ssim\", \"_num_examples\")\n def compute(self) -> float:\n if self._num_examples == 0:\n raise NotComputableError(\"SSIM must have at least one example before it can be computed.\")\n return (self._sum_of_ssim / self._num_examples).item()\n", "path": "ignite/metrics/ssim.py"}]}
3,456
316
gh_patches_debug_7350
rasdani/github-patches
git_diff
getsentry__sentry-python-1292
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Support for Sanic v21.12.0 Raising custom exceptions in Sanic's latest version (which shouldn't be logged to Sentry), getting **IndexError: pop from empty list** ``` File "/app/.heroku/python/lib/python3.9/site-packages/sentry_sdk/integrations/sanic.py", line 184, in _hub_exit request.ctx._sentry_hub.__exit__(None, None, None) File "/app/.heroku/python/lib/python3.9/site-packages/sentry_sdk/hub.py", line 247, in __exit__ old = self._old_hubs.pop() IndexError: pop from empty list ``` </issue> <code> [start of sentry_sdk/integrations/sanic.py] 1 import sys 2 import weakref 3 from inspect import isawaitable 4 5 from sentry_sdk._compat import urlparse, reraise 6 from sentry_sdk.hub import Hub 7 from sentry_sdk.utils import ( 8 capture_internal_exceptions, 9 event_from_exception, 10 HAS_REAL_CONTEXTVARS, 11 CONTEXTVARS_ERROR_MESSAGE, 12 ) 13 from sentry_sdk.integrations import Integration, DidNotEnable 14 from sentry_sdk.integrations._wsgi_common import RequestExtractor, _filter_headers 15 from sentry_sdk.integrations.logging import ignore_logger 16 17 from sentry_sdk._types import MYPY 18 19 if MYPY: 20 from typing import Any 21 from typing import Callable 22 from typing import Optional 23 from typing import Union 24 from typing import Tuple 25 from typing import Dict 26 27 from sanic.request import Request, RequestParameters 28 29 from sentry_sdk._types import Event, EventProcessor, Hint 30 from sanic.router import Route 31 32 try: 33 from sanic import Sanic, __version__ as SANIC_VERSION 34 from sanic.exceptions import SanicException 35 from sanic.router import Router 36 from sanic.handlers import ErrorHandler 37 except ImportError: 38 raise DidNotEnable("Sanic not installed") 39 40 old_error_handler_lookup = ErrorHandler.lookup 41 old_handle_request = Sanic.handle_request 42 old_router_get = Router.get 43 44 try: 45 # This method was introduced in Sanic v21.9 46 old_startup = Sanic._startup 47 except AttributeError: 48 pass 49 50 51 class SanicIntegration(Integration): 52 identifier = "sanic" 53 version = (0, 0) # type: Tuple[int, ...] 54 55 @staticmethod 56 def setup_once(): 57 # type: () -> None 58 59 try: 60 SanicIntegration.version = tuple(map(int, SANIC_VERSION.split("."))) 61 except (TypeError, ValueError): 62 raise DidNotEnable("Unparsable Sanic version: {}".format(SANIC_VERSION)) 63 64 if SanicIntegration.version < (0, 8): 65 raise DidNotEnable("Sanic 0.8 or newer required.") 66 67 if not HAS_REAL_CONTEXTVARS: 68 # We better have contextvars or we're going to leak state between 69 # requests. 70 raise DidNotEnable( 71 "The sanic integration for Sentry requires Python 3.7+ " 72 " or the aiocontextvars package." + CONTEXTVARS_ERROR_MESSAGE 73 ) 74 75 if SANIC_VERSION.startswith("0.8."): 76 # Sanic 0.8 and older creates a logger named "root" and puts a 77 # stringified version of every exception in there (without exc_info), 78 # which our error deduplication can't detect. 79 # 80 # We explicitly check the version here because it is a very 81 # invasive step to ignore this logger and not necessary in newer 82 # versions at all. 83 # 84 # https://github.com/huge-success/sanic/issues/1332 85 ignore_logger("root") 86 87 if SanicIntegration.version < (21, 9): 88 _setup_legacy_sanic() 89 return 90 91 _setup_sanic() 92 93 94 class SanicRequestExtractor(RequestExtractor): 95 def content_length(self): 96 # type: () -> int 97 if self.request.body is None: 98 return 0 99 return len(self.request.body) 100 101 def cookies(self): 102 # type: () -> Dict[str, str] 103 return dict(self.request.cookies) 104 105 def raw_data(self): 106 # type: () -> bytes 107 return self.request.body 108 109 def form(self): 110 # type: () -> RequestParameters 111 return self.request.form 112 113 def is_json(self): 114 # type: () -> bool 115 raise NotImplementedError() 116 117 def json(self): 118 # type: () -> Optional[Any] 119 return self.request.json 120 121 def files(self): 122 # type: () -> RequestParameters 123 return self.request.files 124 125 def size_of_file(self, file): 126 # type: (Any) -> int 127 return len(file.body or ()) 128 129 130 def _setup_sanic(): 131 # type: () -> None 132 Sanic._startup = _startup 133 ErrorHandler.lookup = _sentry_error_handler_lookup 134 135 136 def _setup_legacy_sanic(): 137 # type: () -> None 138 Sanic.handle_request = _legacy_handle_request 139 Router.get = _legacy_router_get 140 ErrorHandler.lookup = _sentry_error_handler_lookup 141 142 143 async def _startup(self): 144 # type: (Sanic) -> None 145 # This happens about as early in the lifecycle as possible, just after the 146 # Request object is created. The body has not yet been consumed. 147 self.signal("http.lifecycle.request")(_hub_enter) 148 149 # This happens after the handler is complete. In v21.9 this signal is not 150 # dispatched when there is an exception. Therefore we need to close out 151 # and call _hub_exit from the custom exception handler as well. 152 # See https://github.com/sanic-org/sanic/issues/2297 153 self.signal("http.lifecycle.response")(_hub_exit) 154 155 # This happens inside of request handling immediately after the route 156 # has been identified by the router. 157 self.signal("http.routing.after")(_set_transaction) 158 159 # The above signals need to be declared before this can be called. 160 await old_startup(self) 161 162 163 async def _hub_enter(request): 164 # type: (Request) -> None 165 hub = Hub.current 166 request.ctx._sentry_do_integration = ( 167 hub.get_integration(SanicIntegration) is not None 168 ) 169 170 if not request.ctx._sentry_do_integration: 171 return 172 173 weak_request = weakref.ref(request) 174 request.ctx._sentry_hub = Hub(hub) 175 request.ctx._sentry_hub.__enter__() 176 177 with request.ctx._sentry_hub.configure_scope() as scope: 178 scope.clear_breadcrumbs() 179 scope.add_event_processor(_make_request_processor(weak_request)) 180 181 182 async def _hub_exit(request, **_): 183 # type: (Request, **Any) -> None 184 request.ctx._sentry_hub.__exit__(None, None, None) 185 186 187 async def _set_transaction(request, route, **kwargs): 188 # type: (Request, Route, **Any) -> None 189 hub = Hub.current 190 if hub.get_integration(SanicIntegration) is not None: 191 with capture_internal_exceptions(): 192 with hub.configure_scope() as scope: 193 route_name = route.name.replace(request.app.name, "").strip(".") 194 scope.transaction = route_name 195 196 197 def _sentry_error_handler_lookup(self, exception, *args, **kwargs): 198 # type: (Any, Exception, *Any, **Any) -> Optional[object] 199 _capture_exception(exception) 200 old_error_handler = old_error_handler_lookup(self, exception, *args, **kwargs) 201 202 if old_error_handler is None: 203 return None 204 205 if Hub.current.get_integration(SanicIntegration) is None: 206 return old_error_handler 207 208 async def sentry_wrapped_error_handler(request, exception): 209 # type: (Request, Exception) -> Any 210 try: 211 response = old_error_handler(request, exception) 212 if isawaitable(response): 213 response = await response 214 return response 215 except Exception: 216 # Report errors that occur in Sanic error handler. These 217 # exceptions will not even show up in Sanic's 218 # `sanic.exceptions` logger. 219 exc_info = sys.exc_info() 220 _capture_exception(exc_info) 221 reraise(*exc_info) 222 finally: 223 # As mentioned in previous comment in _startup, this can be removed 224 # after https://github.com/sanic-org/sanic/issues/2297 is resolved 225 if SanicIntegration.version >= (21, 9): 226 await _hub_exit(request) 227 228 return sentry_wrapped_error_handler 229 230 231 async def _legacy_handle_request(self, request, *args, **kwargs): 232 # type: (Any, Request, *Any, **Any) -> Any 233 hub = Hub.current 234 if hub.get_integration(SanicIntegration) is None: 235 return old_handle_request(self, request, *args, **kwargs) 236 237 weak_request = weakref.ref(request) 238 239 with Hub(hub) as hub: 240 with hub.configure_scope() as scope: 241 scope.clear_breadcrumbs() 242 scope.add_event_processor(_make_request_processor(weak_request)) 243 244 response = old_handle_request(self, request, *args, **kwargs) 245 if isawaitable(response): 246 response = await response 247 248 return response 249 250 251 def _legacy_router_get(self, *args): 252 # type: (Any, Union[Any, Request]) -> Any 253 rv = old_router_get(self, *args) 254 hub = Hub.current 255 if hub.get_integration(SanicIntegration) is not None: 256 with capture_internal_exceptions(): 257 with hub.configure_scope() as scope: 258 if SanicIntegration.version and SanicIntegration.version >= (21, 3): 259 # Sanic versions above and including 21.3 append the app name to the 260 # route name, and so we need to remove it from Route name so the 261 # transaction name is consistent across all versions 262 sanic_app_name = self.ctx.app.name 263 sanic_route = rv[0].name 264 265 if sanic_route.startswith("%s." % sanic_app_name): 266 # We add a 1 to the len of the sanic_app_name because there is a dot 267 # that joins app name and the route name 268 # Format: app_name.route_name 269 sanic_route = sanic_route[len(sanic_app_name) + 1 :] 270 271 scope.transaction = sanic_route 272 else: 273 scope.transaction = rv[0].__name__ 274 return rv 275 276 277 def _capture_exception(exception): 278 # type: (Union[Tuple[Optional[type], Optional[BaseException], Any], BaseException]) -> None 279 hub = Hub.current 280 integration = hub.get_integration(SanicIntegration) 281 if integration is None: 282 return 283 284 # If an integration is there, a client has to be there. 285 client = hub.client # type: Any 286 287 with capture_internal_exceptions(): 288 event, hint = event_from_exception( 289 exception, 290 client_options=client.options, 291 mechanism={"type": "sanic", "handled": False}, 292 ) 293 hub.capture_event(event, hint=hint) 294 295 296 def _make_request_processor(weak_request): 297 # type: (Callable[[], Request]) -> EventProcessor 298 def sanic_processor(event, hint): 299 # type: (Event, Optional[Hint]) -> Optional[Event] 300 301 try: 302 if hint and issubclass(hint["exc_info"][0], SanicException): 303 return None 304 except KeyError: 305 pass 306 307 request = weak_request() 308 if request is None: 309 return event 310 311 with capture_internal_exceptions(): 312 extractor = SanicRequestExtractor(request) 313 extractor.extract_into_event(event) 314 315 request_info = event["request"] 316 urlparts = urlparse.urlsplit(request.url) 317 318 request_info["url"] = "%s://%s%s" % ( 319 urlparts.scheme, 320 urlparts.netloc, 321 urlparts.path, 322 ) 323 324 request_info["query_string"] = urlparts.query 325 request_info["method"] = request.method 326 request_info["env"] = {"REMOTE_ADDR": request.remote_addr} 327 request_info["headers"] = _filter_headers(dict(request.headers)) 328 329 return event 330 331 return sanic_processor 332 [end of sentry_sdk/integrations/sanic.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/sentry_sdk/integrations/sanic.py b/sentry_sdk/integrations/sanic.py --- a/sentry_sdk/integrations/sanic.py +++ b/sentry_sdk/integrations/sanic.py @@ -222,7 +222,7 @@ finally: # As mentioned in previous comment in _startup, this can be removed # after https://github.com/sanic-org/sanic/issues/2297 is resolved - if SanicIntegration.version >= (21, 9): + if SanicIntegration.version == (21, 9): await _hub_exit(request) return sentry_wrapped_error_handler
{"golden_diff": "diff --git a/sentry_sdk/integrations/sanic.py b/sentry_sdk/integrations/sanic.py\n--- a/sentry_sdk/integrations/sanic.py\n+++ b/sentry_sdk/integrations/sanic.py\n@@ -222,7 +222,7 @@\n finally:\n # As mentioned in previous comment in _startup, this can be removed\n # after https://github.com/sanic-org/sanic/issues/2297 is resolved\n- if SanicIntegration.version >= (21, 9):\n+ if SanicIntegration.version == (21, 9):\n await _hub_exit(request)\n \n return sentry_wrapped_error_handler\n", "issue": "Support for Sanic v21.12.0\nRaising custom exceptions in Sanic's latest version (which shouldn't be logged to Sentry), getting **IndexError: pop from empty list**\r\n\r\n```\r\nFile \"/app/.heroku/python/lib/python3.9/site-packages/sentry_sdk/integrations/sanic.py\", line 184, in _hub_exit\r\n request.ctx._sentry_hub.__exit__(None, None, None)\r\nFile \"/app/.heroku/python/lib/python3.9/site-packages/sentry_sdk/hub.py\", line 247, in __exit__\r\n old = self._old_hubs.pop()\r\nIndexError: pop from empty list\r\n```\r\n\r\n\n", "before_files": [{"content": "import sys\nimport weakref\nfrom inspect import isawaitable\n\nfrom sentry_sdk._compat import urlparse, reraise\nfrom sentry_sdk.hub import Hub\nfrom sentry_sdk.utils import (\n capture_internal_exceptions,\n event_from_exception,\n HAS_REAL_CONTEXTVARS,\n CONTEXTVARS_ERROR_MESSAGE,\n)\nfrom sentry_sdk.integrations import Integration, DidNotEnable\nfrom sentry_sdk.integrations._wsgi_common import RequestExtractor, _filter_headers\nfrom sentry_sdk.integrations.logging import ignore_logger\n\nfrom sentry_sdk._types import MYPY\n\nif MYPY:\n from typing import Any\n from typing import Callable\n from typing import Optional\n from typing import Union\n from typing import Tuple\n from typing import Dict\n\n from sanic.request import Request, RequestParameters\n\n from sentry_sdk._types import Event, EventProcessor, Hint\n from sanic.router import Route\n\ntry:\n from sanic import Sanic, __version__ as SANIC_VERSION\n from sanic.exceptions import SanicException\n from sanic.router import Router\n from sanic.handlers import ErrorHandler\nexcept ImportError:\n raise DidNotEnable(\"Sanic not installed\")\n\nold_error_handler_lookup = ErrorHandler.lookup\nold_handle_request = Sanic.handle_request\nold_router_get = Router.get\n\ntry:\n # This method was introduced in Sanic v21.9\n old_startup = Sanic._startup\nexcept AttributeError:\n pass\n\n\nclass SanicIntegration(Integration):\n identifier = \"sanic\"\n version = (0, 0) # type: Tuple[int, ...]\n\n @staticmethod\n def setup_once():\n # type: () -> None\n\n try:\n SanicIntegration.version = tuple(map(int, SANIC_VERSION.split(\".\")))\n except (TypeError, ValueError):\n raise DidNotEnable(\"Unparsable Sanic version: {}\".format(SANIC_VERSION))\n\n if SanicIntegration.version < (0, 8):\n raise DidNotEnable(\"Sanic 0.8 or newer required.\")\n\n if not HAS_REAL_CONTEXTVARS:\n # We better have contextvars or we're going to leak state between\n # requests.\n raise DidNotEnable(\n \"The sanic integration for Sentry requires Python 3.7+ \"\n \" or the aiocontextvars package.\" + CONTEXTVARS_ERROR_MESSAGE\n )\n\n if SANIC_VERSION.startswith(\"0.8.\"):\n # Sanic 0.8 and older creates a logger named \"root\" and puts a\n # stringified version of every exception in there (without exc_info),\n # which our error deduplication can't detect.\n #\n # We explicitly check the version here because it is a very\n # invasive step to ignore this logger and not necessary in newer\n # versions at all.\n #\n # https://github.com/huge-success/sanic/issues/1332\n ignore_logger(\"root\")\n\n if SanicIntegration.version < (21, 9):\n _setup_legacy_sanic()\n return\n\n _setup_sanic()\n\n\nclass SanicRequestExtractor(RequestExtractor):\n def content_length(self):\n # type: () -> int\n if self.request.body is None:\n return 0\n return len(self.request.body)\n\n def cookies(self):\n # type: () -> Dict[str, str]\n return dict(self.request.cookies)\n\n def raw_data(self):\n # type: () -> bytes\n return self.request.body\n\n def form(self):\n # type: () -> RequestParameters\n return self.request.form\n\n def is_json(self):\n # type: () -> bool\n raise NotImplementedError()\n\n def json(self):\n # type: () -> Optional[Any]\n return self.request.json\n\n def files(self):\n # type: () -> RequestParameters\n return self.request.files\n\n def size_of_file(self, file):\n # type: (Any) -> int\n return len(file.body or ())\n\n\ndef _setup_sanic():\n # type: () -> None\n Sanic._startup = _startup\n ErrorHandler.lookup = _sentry_error_handler_lookup\n\n\ndef _setup_legacy_sanic():\n # type: () -> None\n Sanic.handle_request = _legacy_handle_request\n Router.get = _legacy_router_get\n ErrorHandler.lookup = _sentry_error_handler_lookup\n\n\nasync def _startup(self):\n # type: (Sanic) -> None\n # This happens about as early in the lifecycle as possible, just after the\n # Request object is created. The body has not yet been consumed.\n self.signal(\"http.lifecycle.request\")(_hub_enter)\n\n # This happens after the handler is complete. In v21.9 this signal is not\n # dispatched when there is an exception. Therefore we need to close out\n # and call _hub_exit from the custom exception handler as well.\n # See https://github.com/sanic-org/sanic/issues/2297\n self.signal(\"http.lifecycle.response\")(_hub_exit)\n\n # This happens inside of request handling immediately after the route\n # has been identified by the router.\n self.signal(\"http.routing.after\")(_set_transaction)\n\n # The above signals need to be declared before this can be called.\n await old_startup(self)\n\n\nasync def _hub_enter(request):\n # type: (Request) -> None\n hub = Hub.current\n request.ctx._sentry_do_integration = (\n hub.get_integration(SanicIntegration) is not None\n )\n\n if not request.ctx._sentry_do_integration:\n return\n\n weak_request = weakref.ref(request)\n request.ctx._sentry_hub = Hub(hub)\n request.ctx._sentry_hub.__enter__()\n\n with request.ctx._sentry_hub.configure_scope() as scope:\n scope.clear_breadcrumbs()\n scope.add_event_processor(_make_request_processor(weak_request))\n\n\nasync def _hub_exit(request, **_):\n # type: (Request, **Any) -> None\n request.ctx._sentry_hub.__exit__(None, None, None)\n\n\nasync def _set_transaction(request, route, **kwargs):\n # type: (Request, Route, **Any) -> None\n hub = Hub.current\n if hub.get_integration(SanicIntegration) is not None:\n with capture_internal_exceptions():\n with hub.configure_scope() as scope:\n route_name = route.name.replace(request.app.name, \"\").strip(\".\")\n scope.transaction = route_name\n\n\ndef _sentry_error_handler_lookup(self, exception, *args, **kwargs):\n # type: (Any, Exception, *Any, **Any) -> Optional[object]\n _capture_exception(exception)\n old_error_handler = old_error_handler_lookup(self, exception, *args, **kwargs)\n\n if old_error_handler is None:\n return None\n\n if Hub.current.get_integration(SanicIntegration) is None:\n return old_error_handler\n\n async def sentry_wrapped_error_handler(request, exception):\n # type: (Request, Exception) -> Any\n try:\n response = old_error_handler(request, exception)\n if isawaitable(response):\n response = await response\n return response\n except Exception:\n # Report errors that occur in Sanic error handler. These\n # exceptions will not even show up in Sanic's\n # `sanic.exceptions` logger.\n exc_info = sys.exc_info()\n _capture_exception(exc_info)\n reraise(*exc_info)\n finally:\n # As mentioned in previous comment in _startup, this can be removed\n # after https://github.com/sanic-org/sanic/issues/2297 is resolved\n if SanicIntegration.version >= (21, 9):\n await _hub_exit(request)\n\n return sentry_wrapped_error_handler\n\n\nasync def _legacy_handle_request(self, request, *args, **kwargs):\n # type: (Any, Request, *Any, **Any) -> Any\n hub = Hub.current\n if hub.get_integration(SanicIntegration) is None:\n return old_handle_request(self, request, *args, **kwargs)\n\n weak_request = weakref.ref(request)\n\n with Hub(hub) as hub:\n with hub.configure_scope() as scope:\n scope.clear_breadcrumbs()\n scope.add_event_processor(_make_request_processor(weak_request))\n\n response = old_handle_request(self, request, *args, **kwargs)\n if isawaitable(response):\n response = await response\n\n return response\n\n\ndef _legacy_router_get(self, *args):\n # type: (Any, Union[Any, Request]) -> Any\n rv = old_router_get(self, *args)\n hub = Hub.current\n if hub.get_integration(SanicIntegration) is not None:\n with capture_internal_exceptions():\n with hub.configure_scope() as scope:\n if SanicIntegration.version and SanicIntegration.version >= (21, 3):\n # Sanic versions above and including 21.3 append the app name to the\n # route name, and so we need to remove it from Route name so the\n # transaction name is consistent across all versions\n sanic_app_name = self.ctx.app.name\n sanic_route = rv[0].name\n\n if sanic_route.startswith(\"%s.\" % sanic_app_name):\n # We add a 1 to the len of the sanic_app_name because there is a dot\n # that joins app name and the route name\n # Format: app_name.route_name\n sanic_route = sanic_route[len(sanic_app_name) + 1 :]\n\n scope.transaction = sanic_route\n else:\n scope.transaction = rv[0].__name__\n return rv\n\n\ndef _capture_exception(exception):\n # type: (Union[Tuple[Optional[type], Optional[BaseException], Any], BaseException]) -> None\n hub = Hub.current\n integration = hub.get_integration(SanicIntegration)\n if integration is None:\n return\n\n # If an integration is there, a client has to be there.\n client = hub.client # type: Any\n\n with capture_internal_exceptions():\n event, hint = event_from_exception(\n exception,\n client_options=client.options,\n mechanism={\"type\": \"sanic\", \"handled\": False},\n )\n hub.capture_event(event, hint=hint)\n\n\ndef _make_request_processor(weak_request):\n # type: (Callable[[], Request]) -> EventProcessor\n def sanic_processor(event, hint):\n # type: (Event, Optional[Hint]) -> Optional[Event]\n\n try:\n if hint and issubclass(hint[\"exc_info\"][0], SanicException):\n return None\n except KeyError:\n pass\n\n request = weak_request()\n if request is None:\n return event\n\n with capture_internal_exceptions():\n extractor = SanicRequestExtractor(request)\n extractor.extract_into_event(event)\n\n request_info = event[\"request\"]\n urlparts = urlparse.urlsplit(request.url)\n\n request_info[\"url\"] = \"%s://%s%s\" % (\n urlparts.scheme,\n urlparts.netloc,\n urlparts.path,\n )\n\n request_info[\"query_string\"] = urlparts.query\n request_info[\"method\"] = request.method\n request_info[\"env\"] = {\"REMOTE_ADDR\": request.remote_addr}\n request_info[\"headers\"] = _filter_headers(dict(request.headers))\n\n return event\n\n return sanic_processor\n", "path": "sentry_sdk/integrations/sanic.py"}]}
4,077
149
gh_patches_debug_31403
rasdani/github-patches
git_diff
PyGithub__PyGithub-718
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Comments do not include reply info assert github.Github().get_user().get_repo("PyGithub").get_pull(664).get_comment(166456140).in_reply_to_id == "166453895" Currently, in_reply_to_id is undefined. This makes it impossible to understand the comment threading. </issue> <code> [start of github/PullRequestComment.py] 1 # -*- coding: utf-8 -*- 2 3 # ########################## Copyrights and license ############################ 4 # # 5 # Copyright 2012 Vincent Jacques <[email protected]> # 6 # Copyright 2012 Zearin <[email protected]> # 7 # Copyright 2013 AKFish <[email protected]> # 8 # Copyright 2013 Michael Stead <[email protected]> # 9 # Copyright 2013 Vincent Jacques <[email protected]> # 10 # Copyright 2013 martinqt <[email protected]> # 11 # # 12 # This file is part of PyGithub. # 13 # http://pygithub.github.io/PyGithub/v1/index.html # 14 # # 15 # PyGithub is free software: you can redistribute it and/or modify it under # 16 # the terms of the GNU Lesser General Public License as published by the Free # 17 # Software Foundation, either version 3 of the License, or (at your option) # 18 # any later version. # 19 # # 20 # PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY # 21 # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # 22 # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # 23 # details. # 24 # # 25 # You should have received a copy of the GNU Lesser General Public License # 26 # along with PyGithub. If not, see <http://www.gnu.org/licenses/>. # 27 # # 28 # ############################################################################## 29 30 import github.GithubObject 31 32 import github.NamedUser 33 34 35 class PullRequestComment(github.GithubObject.CompletableGithubObject): 36 """ 37 This class represents PullRequestComments. The reference can be found here http://developer.github.com/v3/pulls/comments/ 38 """ 39 40 def __repr__(self): 41 return self.get__repr__({"id": self._id.value, "user": self._user.value}) 42 43 @property 44 def body(self): 45 """ 46 :type: string 47 """ 48 self._completeIfNotSet(self._body) 49 return self._body.value 50 51 @property 52 def commit_id(self): 53 """ 54 :type: string 55 """ 56 self._completeIfNotSet(self._commit_id) 57 return self._commit_id.value 58 59 @property 60 def created_at(self): 61 """ 62 :type: datetime.datetime 63 """ 64 self._completeIfNotSet(self._created_at) 65 return self._created_at.value 66 67 @property 68 def diff_hunk(self): 69 """ 70 :type: string 71 """ 72 self._completeIfNotSet(self._diff_hunk) 73 return self._diff_hunk.value 74 75 @property 76 def id(self): 77 """ 78 :type: integer 79 """ 80 self._completeIfNotSet(self._id) 81 return self._id.value 82 83 @property 84 def original_commit_id(self): 85 """ 86 :type: string 87 """ 88 self._completeIfNotSet(self._original_commit_id) 89 return self._original_commit_id.value 90 91 @property 92 def original_position(self): 93 """ 94 :type: integer 95 """ 96 self._completeIfNotSet(self._original_position) 97 return self._original_position.value 98 99 @property 100 def path(self): 101 """ 102 :type: string 103 """ 104 self._completeIfNotSet(self._path) 105 return self._path.value 106 107 @property 108 def position(self): 109 """ 110 :type: integer 111 """ 112 self._completeIfNotSet(self._position) 113 return self._position.value 114 115 @property 116 def pull_request_url(self): 117 """ 118 :type: string 119 """ 120 self._completeIfNotSet(self._pull_request_url) 121 return self._pull_request_url.value 122 123 @property 124 def updated_at(self): 125 """ 126 :type: datetime.datetime 127 """ 128 self._completeIfNotSet(self._updated_at) 129 return self._updated_at.value 130 131 @property 132 def url(self): 133 """ 134 :type: string 135 """ 136 self._completeIfNotSet(self._url) 137 return self._url.value 138 139 @property 140 def html_url(self): 141 """ 142 :type: string 143 """ 144 self._completeIfNotSet(self._html_url) 145 return self._html_url.value 146 147 @property 148 def user(self): 149 """ 150 :type: :class:`github.NamedUser.NamedUser` 151 """ 152 self._completeIfNotSet(self._user) 153 return self._user.value 154 155 def delete(self): 156 """ 157 :calls: `DELETE /repos/:owner/:repo/pulls/comments/:number <http://developer.github.com/v3/pulls/comments>`_ 158 :rtype: None 159 """ 160 headers, data = self._requester.requestJsonAndCheck( 161 "DELETE", 162 self.url 163 ) 164 165 def edit(self, body): 166 """ 167 :calls: `PATCH /repos/:owner/:repo/pulls/comments/:number <http://developer.github.com/v3/pulls/comments>`_ 168 :param body: string 169 :rtype: None 170 """ 171 assert isinstance(body, (str, unicode)), body 172 post_parameters = { 173 "body": body, 174 } 175 headers, data = self._requester.requestJsonAndCheck( 176 "PATCH", 177 self.url, 178 input=post_parameters 179 ) 180 self._useAttributes(data) 181 182 def get_reactions(self): 183 """ 184 :calls: `GET /repos/:owner/:repo/pulls/comments/:number/reactions 185 <https://developer.github.com/v3/reactions/#list-reactions-for-a-pull-request-review-comment>` 186 :return: :class: :class:`github.PaginatedList.PaginatedList` of :class:`github.Reaction.Reaction` 187 """ 188 return github.PaginatedList.PaginatedList( 189 github.Reaction.Reaction, 190 self._requester, 191 self.url + "/reactions", 192 None, 193 headers={'Accept': 'application/vnd.github.squirrel-girl-preview'} 194 ) 195 196 def create_reaction(self, reaction_type): 197 """ 198 :calls: `POST /repos/:owner/:repo/pulls/comments/:number/reactions 199 <https://developer.github.com/v3/reactions/#create-reaction-for-a-pull-request-review-comment>`_ 200 :param reaction_type: string 201 :rtype: :class:`github.Reaction.Reaction` 202 """ 203 assert isinstance(reaction_type, (str, unicode)), "reaction type should be a string" 204 assert reaction_type in ["+1", "-1", "laugh", "confused", "heart", "hooray"], \ 205 "Invalid reaction type (https://developer.github.com/v3/reactions/#reaction-types)" 206 207 post_parameters = { 208 "content": reaction_type, 209 } 210 headers, data = self._requester.requestJsonAndCheck( 211 "POST", 212 self.url + "/reactions", 213 input=post_parameters, 214 headers={'Accept': 'application/vnd.github.squirrel-girl-preview'} 215 ) 216 return github.Reaction.Reaction(self._requester, headers, data, completed=True) 217 218 def _initAttributes(self): 219 self._body = github.GithubObject.NotSet 220 self._commit_id = github.GithubObject.NotSet 221 self._created_at = github.GithubObject.NotSet 222 self._diff_hunk = github.GithubObject.NotSet 223 self._id = github.GithubObject.NotSet 224 self._original_commit_id = github.GithubObject.NotSet 225 self._original_position = github.GithubObject.NotSet 226 self._path = github.GithubObject.NotSet 227 self._position = github.GithubObject.NotSet 228 self._pull_request_url = github.GithubObject.NotSet 229 self._updated_at = github.GithubObject.NotSet 230 self._url = github.GithubObject.NotSet 231 self._html_url = github.GithubObject.NotSet 232 self._user = github.GithubObject.NotSet 233 234 def _useAttributes(self, attributes): 235 if "body" in attributes: # pragma no branch 236 self._body = self._makeStringAttribute(attributes["body"]) 237 if "commit_id" in attributes: # pragma no branch 238 self._commit_id = self._makeStringAttribute(attributes["commit_id"]) 239 if "created_at" in attributes: # pragma no branch 240 self._created_at = self._makeDatetimeAttribute(attributes["created_at"]) 241 if "diff_hunk" in attributes: # pragma no branch 242 self._diff_hunk = self._makeStringAttribute(attributes["diff_hunk"]) 243 if "id" in attributes: # pragma no branch 244 self._id = self._makeIntAttribute(attributes["id"]) 245 if "original_commit_id" in attributes: # pragma no branch 246 self._original_commit_id = self._makeStringAttribute(attributes["original_commit_id"]) 247 if "original_position" in attributes: # pragma no branch 248 self._original_position = self._makeIntAttribute(attributes["original_position"]) 249 if "path" in attributes: # pragma no branch 250 self._path = self._makeStringAttribute(attributes["path"]) 251 if "position" in attributes: # pragma no branch 252 self._position = self._makeIntAttribute(attributes["position"]) 253 if "pull_request_url" in attributes: # pragma no branch 254 self._pull_request_url = self._makeStringAttribute(attributes["pull_request_url"]) 255 if "updated_at" in attributes: # pragma no branch 256 self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"]) 257 if "url" in attributes: # pragma no branch 258 self._url = self._makeStringAttribute(attributes["url"]) 259 if "html_url" in attributes: # pragma no branch 260 self._html_url = self._makeStringAttribute(attributes["html_url"]) 261 if "user" in attributes: # pragma no branch 262 self._user = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["user"]) 263 [end of github/PullRequestComment.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/github/PullRequestComment.py b/github/PullRequestComment.py --- a/github/PullRequestComment.py +++ b/github/PullRequestComment.py @@ -80,6 +80,14 @@ self._completeIfNotSet(self._id) return self._id.value + @property + def in_reply_to_id(self): + """ + :type: integer + """ + self._completeIfNotSet(self._in_reply_to_id) + return self._in_reply_to_id.value + @property def original_commit_id(self): """ @@ -221,6 +229,7 @@ self._created_at = github.GithubObject.NotSet self._diff_hunk = github.GithubObject.NotSet self._id = github.GithubObject.NotSet + self._in_reply_to_id = github.GithubObject.NotSet self._original_commit_id = github.GithubObject.NotSet self._original_position = github.GithubObject.NotSet self._path = github.GithubObject.NotSet @@ -242,6 +251,8 @@ self._diff_hunk = self._makeStringAttribute(attributes["diff_hunk"]) if "id" in attributes: # pragma no branch self._id = self._makeIntAttribute(attributes["id"]) + if "in_reply_to_id" in attributes: # pragma no branch + self._in_reply_to_id = self._makeIntAttribute(attributes["in_reply_to_id"]) if "original_commit_id" in attributes: # pragma no branch self._original_commit_id = self._makeStringAttribute(attributes["original_commit_id"]) if "original_position" in attributes: # pragma no branch
{"golden_diff": "diff --git a/github/PullRequestComment.py b/github/PullRequestComment.py\n--- a/github/PullRequestComment.py\n+++ b/github/PullRequestComment.py\n@@ -80,6 +80,14 @@\n self._completeIfNotSet(self._id)\n return self._id.value\n \n+ @property\n+ def in_reply_to_id(self):\n+ \"\"\"\n+ :type: integer\n+ \"\"\"\n+ self._completeIfNotSet(self._in_reply_to_id)\n+ return self._in_reply_to_id.value\n+\n @property\n def original_commit_id(self):\n \"\"\"\n@@ -221,6 +229,7 @@\n self._created_at = github.GithubObject.NotSet\n self._diff_hunk = github.GithubObject.NotSet\n self._id = github.GithubObject.NotSet\n+ self._in_reply_to_id = github.GithubObject.NotSet\n self._original_commit_id = github.GithubObject.NotSet\n self._original_position = github.GithubObject.NotSet\n self._path = github.GithubObject.NotSet\n@@ -242,6 +251,8 @@\n self._diff_hunk = self._makeStringAttribute(attributes[\"diff_hunk\"])\n if \"id\" in attributes: # pragma no branch\n self._id = self._makeIntAttribute(attributes[\"id\"])\n+ if \"in_reply_to_id\" in attributes: # pragma no branch\n+ self._in_reply_to_id = self._makeIntAttribute(attributes[\"in_reply_to_id\"])\n if \"original_commit_id\" in attributes: # pragma no branch\n self._original_commit_id = self._makeStringAttribute(attributes[\"original_commit_id\"])\n if \"original_position\" in attributes: # pragma no branch\n", "issue": "Comments do not include reply info\nassert github.Github().get_user().get_repo(\"PyGithub\").get_pull(664).get_comment(166456140).in_reply_to_id == \"166453895\"\r\n\r\nCurrently, in_reply_to_id is undefined. This makes it impossible to understand the comment threading.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# ########################## Copyrights and license ############################\n# #\n# Copyright 2012 Vincent Jacques <[email protected]> #\n# Copyright 2012 Zearin <[email protected]> #\n# Copyright 2013 AKFish <[email protected]> #\n# Copyright 2013 Michael Stead <[email protected]> #\n# Copyright 2013 Vincent Jacques <[email protected]> #\n# Copyright 2013 martinqt <[email protected]> #\n# #\n# This file is part of PyGithub. #\n# http://pygithub.github.io/PyGithub/v1/index.html #\n# #\n# PyGithub is free software: you can redistribute it and/or modify it under #\n# the terms of the GNU Lesser General Public License as published by the Free #\n# Software Foundation, either version 3 of the License, or (at your option) #\n# any later version. #\n# #\n# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #\n# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #\n# details. #\n# #\n# You should have received a copy of the GNU Lesser General Public License #\n# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #\n# #\n# ##############################################################################\n\nimport github.GithubObject\n\nimport github.NamedUser\n\n\nclass PullRequestComment(github.GithubObject.CompletableGithubObject):\n \"\"\"\n This class represents PullRequestComments. The reference can be found here http://developer.github.com/v3/pulls/comments/\n \"\"\"\n\n def __repr__(self):\n return self.get__repr__({\"id\": self._id.value, \"user\": self._user.value})\n\n @property\n def body(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._body)\n return self._body.value\n\n @property\n def commit_id(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._commit_id)\n return self._commit_id.value\n\n @property\n def created_at(self):\n \"\"\"\n :type: datetime.datetime\n \"\"\"\n self._completeIfNotSet(self._created_at)\n return self._created_at.value\n\n @property\n def diff_hunk(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._diff_hunk)\n return self._diff_hunk.value\n\n @property\n def id(self):\n \"\"\"\n :type: integer\n \"\"\"\n self._completeIfNotSet(self._id)\n return self._id.value\n\n @property\n def original_commit_id(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._original_commit_id)\n return self._original_commit_id.value\n\n @property\n def original_position(self):\n \"\"\"\n :type: integer\n \"\"\"\n self._completeIfNotSet(self._original_position)\n return self._original_position.value\n\n @property\n def path(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._path)\n return self._path.value\n\n @property\n def position(self):\n \"\"\"\n :type: integer\n \"\"\"\n self._completeIfNotSet(self._position)\n return self._position.value\n\n @property\n def pull_request_url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._pull_request_url)\n return self._pull_request_url.value\n\n @property\n def updated_at(self):\n \"\"\"\n :type: datetime.datetime\n \"\"\"\n self._completeIfNotSet(self._updated_at)\n return self._updated_at.value\n\n @property\n def url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._url)\n return self._url.value\n\n @property\n def html_url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._html_url)\n return self._html_url.value\n\n @property\n def user(self):\n \"\"\"\n :type: :class:`github.NamedUser.NamedUser`\n \"\"\"\n self._completeIfNotSet(self._user)\n return self._user.value\n\n def delete(self):\n \"\"\"\n :calls: `DELETE /repos/:owner/:repo/pulls/comments/:number <http://developer.github.com/v3/pulls/comments>`_\n :rtype: None\n \"\"\"\n headers, data = self._requester.requestJsonAndCheck(\n \"DELETE\",\n self.url\n )\n\n def edit(self, body):\n \"\"\"\n :calls: `PATCH /repos/:owner/:repo/pulls/comments/:number <http://developer.github.com/v3/pulls/comments>`_\n :param body: string\n :rtype: None\n \"\"\"\n assert isinstance(body, (str, unicode)), body\n post_parameters = {\n \"body\": body,\n }\n headers, data = self._requester.requestJsonAndCheck(\n \"PATCH\",\n self.url,\n input=post_parameters\n )\n self._useAttributes(data)\n\n def get_reactions(self):\n \"\"\"\n :calls: `GET /repos/:owner/:repo/pulls/comments/:number/reactions\n <https://developer.github.com/v3/reactions/#list-reactions-for-a-pull-request-review-comment>`\n :return: :class: :class:`github.PaginatedList.PaginatedList` of :class:`github.Reaction.Reaction`\n \"\"\"\n return github.PaginatedList.PaginatedList(\n github.Reaction.Reaction,\n self._requester,\n self.url + \"/reactions\",\n None,\n headers={'Accept': 'application/vnd.github.squirrel-girl-preview'}\n )\n\n def create_reaction(self, reaction_type):\n \"\"\"\n :calls: `POST /repos/:owner/:repo/pulls/comments/:number/reactions\n <https://developer.github.com/v3/reactions/#create-reaction-for-a-pull-request-review-comment>`_\n :param reaction_type: string\n :rtype: :class:`github.Reaction.Reaction`\n \"\"\"\n assert isinstance(reaction_type, (str, unicode)), \"reaction type should be a string\"\n assert reaction_type in [\"+1\", \"-1\", \"laugh\", \"confused\", \"heart\", \"hooray\"], \\\n \"Invalid reaction type (https://developer.github.com/v3/reactions/#reaction-types)\"\n\n post_parameters = {\n \"content\": reaction_type,\n }\n headers, data = self._requester.requestJsonAndCheck(\n \"POST\",\n self.url + \"/reactions\",\n input=post_parameters,\n headers={'Accept': 'application/vnd.github.squirrel-girl-preview'}\n )\n return github.Reaction.Reaction(self._requester, headers, data, completed=True)\n\n def _initAttributes(self):\n self._body = github.GithubObject.NotSet\n self._commit_id = github.GithubObject.NotSet\n self._created_at = github.GithubObject.NotSet\n self._diff_hunk = github.GithubObject.NotSet\n self._id = github.GithubObject.NotSet\n self._original_commit_id = github.GithubObject.NotSet\n self._original_position = github.GithubObject.NotSet\n self._path = github.GithubObject.NotSet\n self._position = github.GithubObject.NotSet\n self._pull_request_url = github.GithubObject.NotSet\n self._updated_at = github.GithubObject.NotSet\n self._url = github.GithubObject.NotSet\n self._html_url = github.GithubObject.NotSet\n self._user = github.GithubObject.NotSet\n\n def _useAttributes(self, attributes):\n if \"body\" in attributes: # pragma no branch\n self._body = self._makeStringAttribute(attributes[\"body\"])\n if \"commit_id\" in attributes: # pragma no branch\n self._commit_id = self._makeStringAttribute(attributes[\"commit_id\"])\n if \"created_at\" in attributes: # pragma no branch\n self._created_at = self._makeDatetimeAttribute(attributes[\"created_at\"])\n if \"diff_hunk\" in attributes: # pragma no branch\n self._diff_hunk = self._makeStringAttribute(attributes[\"diff_hunk\"])\n if \"id\" in attributes: # pragma no branch\n self._id = self._makeIntAttribute(attributes[\"id\"])\n if \"original_commit_id\" in attributes: # pragma no branch\n self._original_commit_id = self._makeStringAttribute(attributes[\"original_commit_id\"])\n if \"original_position\" in attributes: # pragma no branch\n self._original_position = self._makeIntAttribute(attributes[\"original_position\"])\n if \"path\" in attributes: # pragma no branch\n self._path = self._makeStringAttribute(attributes[\"path\"])\n if \"position\" in attributes: # pragma no branch\n self._position = self._makeIntAttribute(attributes[\"position\"])\n if \"pull_request_url\" in attributes: # pragma no branch\n self._pull_request_url = self._makeStringAttribute(attributes[\"pull_request_url\"])\n if \"updated_at\" in attributes: # pragma no branch\n self._updated_at = self._makeDatetimeAttribute(attributes[\"updated_at\"])\n if \"url\" in attributes: # pragma no branch\n self._url = self._makeStringAttribute(attributes[\"url\"])\n if \"html_url\" in attributes: # pragma no branch\n self._html_url = self._makeStringAttribute(attributes[\"html_url\"])\n if \"user\" in attributes: # pragma no branch\n self._user = self._makeClassAttribute(github.NamedUser.NamedUser, attributes[\"user\"])\n", "path": "github/PullRequestComment.py"}]}
3,482
390
gh_patches_debug_23315
rasdani/github-patches
git_diff
fossasia__open-event-server-5328
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Internal Server Error thrown for patching a nonexistent session **Describe the bug** <!-- A clear and concise description of what the bug is. --> HTTP 500 error is thrown when a patch request is sent for a session which doesn't exist. **To Reproduce** Steps to reproduce the behavior: 1. Send a patch request for a session which doesn't exist **Expected behavior** <!-- A clear and concise description of what you expected to happen. --> A proper error should be sent. Working on fixing this. </issue> <code> [start of app/api/schema/sessions.py] 1 from marshmallow import validates_schema, validate 2 from marshmallow_jsonapi import fields 3 from marshmallow_jsonapi.flask import Relationship 4 5 from app.api.helpers.exceptions import UnprocessableEntity, ForbiddenException 6 from app.api.helpers.permission_manager import has_access 7 from app.api.helpers.utilities import dasherize 8 from app.api.schema.base import SoftDeletionSchema 9 from app.models.session import Session 10 from utils.common import use_defaults 11 12 13 @use_defaults() 14 class SessionSchema(SoftDeletionSchema): 15 """ 16 Api schema for Session Model 17 """ 18 19 class Meta: 20 """ 21 Meta class for Session Api Schema 22 """ 23 type_ = 'session' 24 self_view = 'v1.session_detail' 25 self_view_kwargs = {'id': '<id>'} 26 inflect = dasherize 27 28 @validates_schema(pass_original=True) 29 def validate_date(self, data, original_data): 30 if 'id' in original_data['data']: 31 session = Session.query.filter_by(id=original_data['data']['id']).one() 32 33 if 'starts_at' not in data: 34 data['starts_at'] = session.starts_at 35 36 if 'ends_at' not in data: 37 data['ends_at'] = session.ends_at 38 39 if 'event' not in data: 40 data['event'] = session.event_id 41 42 if data['starts_at'] and data['ends_at']: 43 if data['starts_at'] >= data['ends_at']: 44 raise UnprocessableEntity( 45 {'pointer': '/data/attributes/ends-at'}, "ends-at should be after starts-at") 46 47 if 'state' in data: 48 if data['state'] is not 'draft' or not 'pending': 49 if not has_access('is_coorganizer', event_id=data['event']): 50 return ForbiddenException({'source': ''}, 'Co-organizer access is required.') 51 52 if 'track' in data: 53 if not has_access('is_coorganizer', event_id=data['event']): 54 return ForbiddenException({'source': ''}, 'Co-organizer access is required.') 55 56 if 'microlocation' in data: 57 if not has_access('is_coorganizer', event_id=data['event']): 58 return ForbiddenException({'source': ''}, 'Co-organizer access is required.') 59 60 id = fields.Str(dump_only=True) 61 title = fields.Str(required=True) 62 subtitle = fields.Str(allow_none=True) 63 level = fields.Int(allow_none=True) 64 short_abstract = fields.Str(allow_none=True) 65 long_abstract = fields.Str(allow_none=True) 66 comments = fields.Str(allow_none=True) 67 starts_at = fields.DateTime(allow_none=True) 68 ends_at = fields.DateTime(allow_none=True) 69 language = fields.Str(allow_none=True) 70 slides_url = fields.Url(allow_none=True) 71 video_url = fields.Url(allow_none=True) 72 audio_url = fields.Url(allow_none=True) 73 signup_url = fields.Url(allow_none=True) 74 state = fields.Str(validate=validate.OneOf(choices=["pending", "accepted", "confirmed", "rejected", "draft"]), 75 allow_none=True, default='draft') 76 created_at = fields.DateTime(dump_only=True) 77 deleted_at = fields.DateTime(dump_only=True) 78 submitted_at = fields.DateTime(allow_none=True) 79 is_mail_sent = fields.Boolean() 80 last_modified_at = fields.DateTime(dump_only=True) 81 send_email = fields.Boolean(load_only=True, allow_none=True) 82 average_rating = fields.Float(dump_only=True) 83 microlocation = Relationship(attribute='microlocation', 84 self_view='v1.session_microlocation', 85 self_view_kwargs={'id': '<id>'}, 86 related_view='v1.microlocation_detail', 87 related_view_kwargs={'session_id': '<id>'}, 88 schema='MicrolocationSchema', 89 type_='microlocation') 90 track = Relationship(attribute='track', 91 self_view='v1.session_track', 92 self_view_kwargs={'id': '<id>'}, 93 related_view='v1.track_detail', 94 related_view_kwargs={'session_id': '<id>'}, 95 schema='TrackSchema', 96 type_='track') 97 session_type = Relationship(attribute='session_type', 98 self_view='v1.session_session_type', 99 self_view_kwargs={'id': '<id>'}, 100 related_view='v1.session_type_detail', 101 related_view_kwargs={'session_id': '<id>'}, 102 schema='SessionTypeSchema', 103 type_='session-type') 104 event = Relationship(attribute='event', 105 self_view='v1.session_event', 106 self_view_kwargs={'id': '<id>'}, 107 related_view='v1.event_detail', 108 related_view_kwargs={'session_id': '<id>'}, 109 schema='EventSchemaPublic', 110 type_='event') 111 feedbacks = Relationship(attribute='feedbacks', 112 self_view='v1.session_feedbacks', 113 self_view_kwargs={'id': '<id>'}, 114 related_view='v1.feedback_list', 115 related_view_kwargs={'session_id': '<id>'}, 116 schema='FeedbackSchema', 117 many=True, 118 type_='feedback') 119 speakers = Relationship(attribute='speakers', 120 many=True, 121 self_view='v1.session_speaker', 122 self_view_kwargs={'id': '<id>'}, 123 related_view='v1.speaker_list', 124 related_view_kwargs={'session_id': '<id>'}, 125 schema='SpeakerSchema', 126 type_='speaker') 127 creator = Relationship(attribute='user', 128 self_view='v1.session_user', 129 self_view_kwargs={'id': '<id>'}, 130 related_view='v1.user_detail', 131 related_view_kwargs={'session_id': '<id>'}, 132 schema='UserSchemaPublic', 133 type_='user') 134 [end of app/api/schema/sessions.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/app/api/schema/sessions.py b/app/api/schema/sessions.py --- a/app/api/schema/sessions.py +++ b/app/api/schema/sessions.py @@ -1,6 +1,8 @@ +from flask_rest_jsonapi.exceptions import ObjectNotFound from marshmallow import validates_schema, validate from marshmallow_jsonapi import fields from marshmallow_jsonapi.flask import Relationship +from sqlalchemy.orm.exc import NoResultFound from app.api.helpers.exceptions import UnprocessableEntity, ForbiddenException from app.api.helpers.permission_manager import has_access @@ -28,7 +30,10 @@ @validates_schema(pass_original=True) def validate_date(self, data, original_data): if 'id' in original_data['data']: - session = Session.query.filter_by(id=original_data['data']['id']).one() + try: + session = Session.query.filter_by(id=original_data['data']['id']).one() + except NoResultFound: + raise ObjectNotFound({'parameter': '{id}'}, "Session: not found") if 'starts_at' not in data: data['starts_at'] = session.starts_at
{"golden_diff": "diff --git a/app/api/schema/sessions.py b/app/api/schema/sessions.py\n--- a/app/api/schema/sessions.py\n+++ b/app/api/schema/sessions.py\n@@ -1,6 +1,8 @@\n+from flask_rest_jsonapi.exceptions import ObjectNotFound\n from marshmallow import validates_schema, validate\n from marshmallow_jsonapi import fields\n from marshmallow_jsonapi.flask import Relationship\n+from sqlalchemy.orm.exc import NoResultFound\n \n from app.api.helpers.exceptions import UnprocessableEntity, ForbiddenException\n from app.api.helpers.permission_manager import has_access\n@@ -28,7 +30,10 @@\n @validates_schema(pass_original=True)\n def validate_date(self, data, original_data):\n if 'id' in original_data['data']:\n- session = Session.query.filter_by(id=original_data['data']['id']).one()\n+ try:\n+ session = Session.query.filter_by(id=original_data['data']['id']).one()\n+ except NoResultFound:\n+ raise ObjectNotFound({'parameter': '{id}'}, \"Session: not found\")\n \n if 'starts_at' not in data:\n data['starts_at'] = session.starts_at\n", "issue": "Internal Server Error thrown for patching a nonexistent session\n**Describe the bug**\r\n<!-- A clear and concise description of what the bug is. -->\r\nHTTP 500 error is thrown when a patch request is sent for a session which doesn't exist.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Send a patch request for a session which doesn't exist\r\n\r\n**Expected behavior**\r\n<!-- A clear and concise description of what you expected to happen. -->\r\nA proper error should be sent.\r\n\r\nWorking on fixing this.\n", "before_files": [{"content": "from marshmallow import validates_schema, validate\nfrom marshmallow_jsonapi import fields\nfrom marshmallow_jsonapi.flask import Relationship\n\nfrom app.api.helpers.exceptions import UnprocessableEntity, ForbiddenException\nfrom app.api.helpers.permission_manager import has_access\nfrom app.api.helpers.utilities import dasherize\nfrom app.api.schema.base import SoftDeletionSchema\nfrom app.models.session import Session\nfrom utils.common import use_defaults\n\n\n@use_defaults()\nclass SessionSchema(SoftDeletionSchema):\n \"\"\"\n Api schema for Session Model\n \"\"\"\n\n class Meta:\n \"\"\"\n Meta class for Session Api Schema\n \"\"\"\n type_ = 'session'\n self_view = 'v1.session_detail'\n self_view_kwargs = {'id': '<id>'}\n inflect = dasherize\n\n @validates_schema(pass_original=True)\n def validate_date(self, data, original_data):\n if 'id' in original_data['data']:\n session = Session.query.filter_by(id=original_data['data']['id']).one()\n\n if 'starts_at' not in data:\n data['starts_at'] = session.starts_at\n\n if 'ends_at' not in data:\n data['ends_at'] = session.ends_at\n\n if 'event' not in data:\n data['event'] = session.event_id\n\n if data['starts_at'] and data['ends_at']:\n if data['starts_at'] >= data['ends_at']:\n raise UnprocessableEntity(\n {'pointer': '/data/attributes/ends-at'}, \"ends-at should be after starts-at\")\n\n if 'state' in data:\n if data['state'] is not 'draft' or not 'pending':\n if not has_access('is_coorganizer', event_id=data['event']):\n return ForbiddenException({'source': ''}, 'Co-organizer access is required.')\n\n if 'track' in data:\n if not has_access('is_coorganizer', event_id=data['event']):\n return ForbiddenException({'source': ''}, 'Co-organizer access is required.')\n\n if 'microlocation' in data:\n if not has_access('is_coorganizer', event_id=data['event']):\n return ForbiddenException({'source': ''}, 'Co-organizer access is required.')\n\n id = fields.Str(dump_only=True)\n title = fields.Str(required=True)\n subtitle = fields.Str(allow_none=True)\n level = fields.Int(allow_none=True)\n short_abstract = fields.Str(allow_none=True)\n long_abstract = fields.Str(allow_none=True)\n comments = fields.Str(allow_none=True)\n starts_at = fields.DateTime(allow_none=True)\n ends_at = fields.DateTime(allow_none=True)\n language = fields.Str(allow_none=True)\n slides_url = fields.Url(allow_none=True)\n video_url = fields.Url(allow_none=True)\n audio_url = fields.Url(allow_none=True)\n signup_url = fields.Url(allow_none=True)\n state = fields.Str(validate=validate.OneOf(choices=[\"pending\", \"accepted\", \"confirmed\", \"rejected\", \"draft\"]),\n allow_none=True, default='draft')\n created_at = fields.DateTime(dump_only=True)\n deleted_at = fields.DateTime(dump_only=True)\n submitted_at = fields.DateTime(allow_none=True)\n is_mail_sent = fields.Boolean()\n last_modified_at = fields.DateTime(dump_only=True)\n send_email = fields.Boolean(load_only=True, allow_none=True)\n average_rating = fields.Float(dump_only=True)\n microlocation = Relationship(attribute='microlocation',\n self_view='v1.session_microlocation',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.microlocation_detail',\n related_view_kwargs={'session_id': '<id>'},\n schema='MicrolocationSchema',\n type_='microlocation')\n track = Relationship(attribute='track',\n self_view='v1.session_track',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.track_detail',\n related_view_kwargs={'session_id': '<id>'},\n schema='TrackSchema',\n type_='track')\n session_type = Relationship(attribute='session_type',\n self_view='v1.session_session_type',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.session_type_detail',\n related_view_kwargs={'session_id': '<id>'},\n schema='SessionTypeSchema',\n type_='session-type')\n event = Relationship(attribute='event',\n self_view='v1.session_event',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.event_detail',\n related_view_kwargs={'session_id': '<id>'},\n schema='EventSchemaPublic',\n type_='event')\n feedbacks = Relationship(attribute='feedbacks',\n self_view='v1.session_feedbacks',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.feedback_list',\n related_view_kwargs={'session_id': '<id>'},\n schema='FeedbackSchema',\n many=True,\n type_='feedback')\n speakers = Relationship(attribute='speakers',\n many=True,\n self_view='v1.session_speaker',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.speaker_list',\n related_view_kwargs={'session_id': '<id>'},\n schema='SpeakerSchema',\n type_='speaker')\n creator = Relationship(attribute='user',\n self_view='v1.session_user',\n self_view_kwargs={'id': '<id>'},\n related_view='v1.user_detail',\n related_view_kwargs={'session_id': '<id>'},\n schema='UserSchemaPublic',\n type_='user')\n", "path": "app/api/schema/sessions.py"}]}
2,129
251
gh_patches_debug_3950
rasdani/github-patches
git_diff
deis__deis-2622
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> LICENSE needs updating for 2014 It still says 2013 </issue> <code> [start of docs/conf.py] 1 # -*- coding: utf-8 -*- 2 # 3 # deis documentation build configuration file, created by 4 # sphinx-quickstart on Fri Jul 26 12:12:00 2013. 5 # 6 # This file is execfile()d with the current directory set to its containing dir. 7 # 8 # Note that not all possible configuration values are present in this 9 # autogenerated file. 10 # 11 # All configuration values have a default; values that are commented out 12 # serve to show the default. 13 14 import os 15 import sys 16 17 # If extensions (or modules to document with autodoc) are in another directory, 18 # add these directories to sys.path here. If the directory is relative to the 19 # documentation root, use os.path.abspath to make it absolute, like shown here. 20 21 # Some hackery here to get deis.py to be importable as client.deis 22 open(os.path.join('..', '__init__.py'), 'a') 23 sys.path.insert(0, os.path.abspath(os.path.join('..'))) 24 sys.path.insert(0, os.path.abspath(os.path.join('..', 'controller'))) 25 # create local_settings.py for SECRET_KEY if necessary 26 local_settings_path = os.path.abspath( 27 os.path.join('..', 'controller', 'deis', 'local_settings.py')) 28 if not os.path.exists(local_settings_path): 29 with open(local_settings_path, 'w') as local_settings: 30 local_settings.write("SECRET_KEY = 'DummySecretKey'\n") 31 # set up Django 32 os.environ['DJANGO_SETTINGS_MODULE'] = 'deis.settings' 33 from django.conf import settings # noqa 34 35 # -- General configuration ----------------------------------------------------- 36 37 # If your documentation needs a minimal Sphinx version, state it here. 38 #needs_sphinx = '1.0' 39 40 # Add any Sphinx extension module names here, as strings. They can be extensions 41 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. 42 extensions = ['sphinx.ext.autodoc', 'sphinx.ext.autosummary', 43 'sphinx.ext.viewcode'] 44 45 # default flags for auto-generated python code documetation 46 autodoc_default_flags = ['members', 'undoc-members'] 47 48 # Add any paths that contain templates here, relative to this directory. 49 templates_path = ['_templates'] 50 51 # The suffix of source filenames. 52 source_suffix = '.rst' 53 54 # The encoding of source files. 55 #source_encoding = 'utf-8-sig' 56 57 # The master toctree document. 58 master_doc = 'toctree' 59 60 # General information about the project. 61 project = u'deis' 62 copyright = u'2013, OpDemand LLC' 63 64 # The version info for the project you're documenting, acts as replacement for 65 # |version| and |release|, also used in various other places throughout the 66 # built documents. 67 # 68 from deis import __version__ 69 70 # The short X.Y version. 71 version = __version__.rsplit('.', 1)[0] 72 # The full version, including alpha/beta/rc tags. 73 release = __version__ 74 75 # The language for content autogenerated by Sphinx. Refer to documentation 76 # for a list of supported languages. 77 #language = None 78 79 # There are two options for replacing |today|: either, you set today to some 80 # non-false value, then it is used: 81 #today = '' 82 # Else, today_fmt is used as the format for a strftime call. 83 #today_fmt = '%B %d, %Y' 84 85 # List of patterns, relative to source directory, that match files and 86 # directories to ignore when looking for source files. 87 exclude_patterns = ['_build', 'venv', '**/_*.rst'] 88 89 # The reST default role (used for this markup: `text`) to use for all documents. 90 #default_role = None 91 92 # If true, '()' will be appended to :func: etc. cross-reference text. 93 #add_function_parentheses = True 94 95 # If true, the current module name will be prepended to all description 96 # unit titles (such as .. function::). 97 #add_module_names = True 98 99 # If true, sectionauthor and moduleauthor directives will be shown in the 100 # output. They are ignored by default. 101 #show_authors = False 102 103 # The name of the Pygments (syntax highlighting) style to use. 104 pygments_style = 'sphinx' 105 106 # A list of ignored prefixes for module index sorting. 107 #modindex_common_prefix = [] 108 109 # If true, keep warnings as "system message" paragraphs in the built documents. 110 #keep_warnings = False 111 112 113 # -- Options for HTML output --------------------------------------------------- 114 115 # The theme to use for HTML and HTML Help pages. See the documentation for 116 # a list of builtin themes. 117 html_theme = 'deis' 118 119 # Theme options are theme-specific and customize the look and feel of a theme 120 # further. For a list of options available for each theme, see the 121 # documentation. 122 #html_theme_options = {} 123 124 # Add any paths that contain custom themes here, relative to this directory. 125 html_theme_path = ['theme'] 126 127 # The name for this set of Sphinx documents. If None, it defaults to 128 # "<project> v<release> documentation". 129 #html_title = None 130 131 # A shorter title for the navigation bar. Default is the same as html_title. 132 #html_short_title = None 133 134 # The name of an image file (relative to this directory) to place at the top 135 # of the sidebar. 136 #html_logo = None 137 138 # The name of an image file (within the static path) to use as favicon of the 139 # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 140 # pixels large. 141 #html_favicon = None 142 143 # Add any paths that contain custom static files (such as style sheets) here, 144 # relative to this directory. They are copied after the builtin static files, 145 # so a file named "default.css" will overwrite the builtin "default.css". 146 html_static_path = ['../controller/web/static'] 147 148 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 149 # using the given strftime format. 150 #html_last_updated_fmt = '%b %d, %Y' 151 152 # If true, SmartyPants will be used to convert quotes and dashes to 153 # typographically correct entities. 154 html_use_smartypants = True 155 156 html_add_permalinks = True 157 158 # Custom sidebar templates, maps document names to template names. 159 #html_sidebars = {} 160 161 # Additional templates that should be rendered to pages, maps page names to 162 # template names. 163 #html_additional_pages = {} 164 165 # If false, no module index is generated. 166 #html_domain_indices = True 167 168 # If false, no index is generated. 169 #html_use_index = True 170 171 # If true, the index is split into individual pages for each letter. 172 #html_split_index = False 173 174 # If true, links to the reST sources are added to the pages. 175 #html_show_sourcelink = True 176 177 # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 178 #html_show_sphinx = True 179 180 # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 181 #html_show_copyright = True 182 183 # If true, an OpenSearch description file will be output, and all pages will 184 # contain a <link> tag referring to it. The value of this option must be the 185 # base URL from which the finished HTML is served. 186 #html_use_opensearch = '' 187 188 # This is the file name suffix for HTML files (e.g. ".xhtml"). 189 #html_file_suffix = None 190 191 # Output file base name for HTML help builder. 192 htmlhelp_basename = 'deisdoc' 193 194 195 # -- Options for LaTeX output -------------------------------------------------- 196 197 latex_elements = { 198 # The paper size ('letterpaper' or 'a4paper'). 199 #'papersize': 'letterpaper', 200 201 # The font size ('10pt', '11pt' or '12pt'). 202 #'pointsize': '10pt', 203 204 # Additional stuff for the LaTeX preamble. 205 #'preamble': '', 206 } 207 208 # Grouping the document tree into LaTeX files. List of tuples 209 # (source start file, target name, title, author, documentclass [howto/manual]). 210 latex_documents = [ 211 ('index', 'deis.tex', u'deis Documentation', 212 u'Author', 'manual'), 213 ] 214 215 # The name of an image file (relative to this directory) to place at the top of 216 # the title page. 217 #latex_logo = None 218 219 # For "manual" documents, if this is true, then toplevel headings are parts, 220 # not chapters. 221 #latex_use_parts = False 222 223 # If true, show page references after internal links. 224 #latex_show_pagerefs = False 225 226 # If true, show URL addresses after external links. 227 #latex_show_urls = False 228 229 # Documents to append as an appendix to all manuals. 230 #latex_appendices = [] 231 232 # If false, no module index is generated. 233 #latex_domain_indices = True 234 235 236 # -- Options for manual page output -------------------------------------------- 237 238 # One entry per manual page. List of tuples 239 # (source start file, name, description, authors, manual section). 240 man_pages = [ 241 ('index', 'deis', u'deis Documentation', 242 [u'Author'], 1) 243 ] 244 245 # If true, show URL addresses after external links. 246 #man_show_urls = False 247 248 249 # -- Options for Texinfo output ------------------------------------------------ 250 251 # Grouping the document tree into Texinfo files. List of tuples 252 # (source start file, target name, title, author, 253 # dir menu entry, description, category) 254 texinfo_documents = [ 255 ('index', 'deis', u'deis Documentation', 256 u'Author', 'deis', 'One line description of project.', 257 'Miscellaneous'), 258 ] 259 260 # Documents to append as an appendix to all manuals. 261 #texinfo_appendices = [] 262 263 # If false, no module index is generated. 264 #texinfo_domain_indices = True 265 266 # How to display URL addresses: 'footnote', 'no', or 'inline'. 267 #texinfo_show_urls = 'footnote' 268 269 # If true, do not generate a @detailmenu in the "Top" node's menu. 270 #texinfo_no_detailmenu = False 271 272 273 # -- Options for Epub output --------------------------------------------------- 274 275 # Bibliographic Dublin Core info. 276 epub_title = u'deis' 277 epub_author = u'OpDemand LLC' 278 epub_publisher = u'OpDemand LLC' 279 epub_copyright = u'2013, OpDemand LLC' 280 281 # The language of the text. It defaults to the language option 282 # or en if the language is not set. 283 #epub_language = '' 284 285 # The scheme of the identifier. Typical schemes are ISBN or URL. 286 #epub_scheme = '' 287 288 # The unique identifier of the text. This can be a ISBN number 289 # or the project homepage. 290 #epub_identifier = '' 291 292 # A unique identification for the text. 293 #epub_uid = '' 294 295 # A tuple containing the cover image and cover page html template filenames. 296 #epub_cover = () 297 298 # A sequence of (type, uri, title) tuples for the guide element of content.opf. 299 #epub_guide = () 300 301 # HTML files that should be inserted before the pages created by sphinx. 302 # The format is a list of tuples containing the path and title. 303 #epub_pre_files = [] 304 305 # HTML files shat should be inserted after the pages created by sphinx. 306 # The format is a list of tuples containing the path and title. 307 #epub_post_files = [] 308 309 # A list of files that should not be packed into the epub file. 310 #epub_exclude_files = [] 311 312 # The depth of the table of contents in toc.ncx. 313 #epub_tocdepth = 3 314 315 # Allow duplicate toc entries. 316 #epub_tocdup = True 317 318 # Fix unsupported image types using the PIL. 319 #epub_fix_images = False 320 321 # Scale large images. 322 #epub_max_image_width = 0 323 324 # If 'no', URL addresses will not be shown. 325 #epub_show_urls = 'inline' 326 327 # If false, no index is generated. 328 #epub_use_index = True 329 [end of docs/conf.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/docs/conf.py b/docs/conf.py --- a/docs/conf.py +++ b/docs/conf.py @@ -59,7 +59,7 @@ # General information about the project. project = u'deis' -copyright = u'2013, OpDemand LLC' +copyright = u'2013, 2014 OpDemand LLC' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -59,7 +59,7 @@\n \n # General information about the project.\n project = u'deis'\n-copyright = u'2013, OpDemand LLC'\n+copyright = u'2013, 2014 OpDemand LLC'\n \n # The version info for the project you're documenting, acts as replacement for\n # |version| and |release|, also used in various other places throughout the\n", "issue": "LICENSE needs updating for 2014\nIt still says 2013\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# deis documentation build configuration file, created by\n# sphinx-quickstart on Fri Jul 26 12:12:00 2013.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport os\nimport sys\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\n# Some hackery here to get deis.py to be importable as client.deis\nopen(os.path.join('..', '__init__.py'), 'a')\nsys.path.insert(0, os.path.abspath(os.path.join('..')))\nsys.path.insert(0, os.path.abspath(os.path.join('..', 'controller')))\n# create local_settings.py for SECRET_KEY if necessary\nlocal_settings_path = os.path.abspath(\n os.path.join('..', 'controller', 'deis', 'local_settings.py'))\nif not os.path.exists(local_settings_path):\n with open(local_settings_path, 'w') as local_settings:\n local_settings.write(\"SECRET_KEY = 'DummySecretKey'\\n\")\n# set up Django\nos.environ['DJANGO_SETTINGS_MODULE'] = 'deis.settings'\nfrom django.conf import settings # noqa\n\n# -- General configuration -----------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = ['sphinx.ext.autodoc', 'sphinx.ext.autosummary',\n 'sphinx.ext.viewcode']\n\n# default flags for auto-generated python code documetation\nautodoc_default_flags = ['members', 'undoc-members']\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'toctree'\n\n# General information about the project.\nproject = u'deis'\ncopyright = u'2013, OpDemand LLC'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\nfrom deis import __version__\n\n# The short X.Y version.\nversion = __version__.rsplit('.', 1)[0]\n# The full version, including alpha/beta/rc tags.\nrelease = __version__\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build', 'venv', '**/_*.rst']\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n#keep_warnings = False\n\n\n# -- Options for HTML output ---------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'deis'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\nhtml_theme_path = ['theme']\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['../controller/web/static']\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\nhtml_use_smartypants = True\n\nhtml_add_permalinks = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'deisdoc'\n\n\n# -- Options for LaTeX output --------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #'preamble': '',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass [howto/manual]).\nlatex_documents = [\n ('index', 'deis.tex', u'deis Documentation',\n u'Author', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output --------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'deis', u'deis Documentation',\n [u'Author'], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output ------------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n ('index', 'deis', u'deis Documentation',\n u'Author', 'deis', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#texinfo_no_detailmenu = False\n\n\n# -- Options for Epub output ---------------------------------------------------\n\n# Bibliographic Dublin Core info.\nepub_title = u'deis'\nepub_author = u'OpDemand LLC'\nepub_publisher = u'OpDemand LLC'\nepub_copyright = u'2013, OpDemand LLC'\n\n# The language of the text. It defaults to the language option\n# or en if the language is not set.\n#epub_language = ''\n\n# The scheme of the identifier. Typical schemes are ISBN or URL.\n#epub_scheme = ''\n\n# The unique identifier of the text. This can be a ISBN number\n# or the project homepage.\n#epub_identifier = ''\n\n# A unique identification for the text.\n#epub_uid = ''\n\n# A tuple containing the cover image and cover page html template filenames.\n#epub_cover = ()\n\n# A sequence of (type, uri, title) tuples for the guide element of content.opf.\n#epub_guide = ()\n\n# HTML files that should be inserted before the pages created by sphinx.\n# The format is a list of tuples containing the path and title.\n#epub_pre_files = []\n\n# HTML files shat should be inserted after the pages created by sphinx.\n# The format is a list of tuples containing the path and title.\n#epub_post_files = []\n\n# A list of files that should not be packed into the epub file.\n#epub_exclude_files = []\n\n# The depth of the table of contents in toc.ncx.\n#epub_tocdepth = 3\n\n# Allow duplicate toc entries.\n#epub_tocdup = True\n\n# Fix unsupported image types using the PIL.\n#epub_fix_images = False\n\n# Scale large images.\n#epub_max_image_width = 0\n\n# If 'no', URL addresses will not be shown.\n#epub_show_urls = 'inline'\n\n# If false, no index is generated.\n#epub_use_index = True\n", "path": "docs/conf.py"}]}
4,072
119
gh_patches_debug_14589
rasdani/github-patches
git_diff
scrapy__scrapy-3379
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Cannot use contracts with inherited callbacks ### Description If you want to `scrapy check` a spider that has inherited methods, these methods' contracts will be ignored. ### Reproduce ```python class BaseSpider(Spider): def returns_request(self, response): """ method which returns request @url https://docs.scrapy.org/en/latest/ @returns requests 1 """ return Request('http://scrapy.org', callback=self.returns_item) class DemoSpider(BaseSpider): name = 'demo_spider' ``` And then run `scrapy check`. You'll get the following output: ``` ---------------------------------------------------------------------- Ran 0 contracts in 0.000s OK ``` ### Reason `ContractsManager.tested_methods_from_spidercls` uses `vars(spidercls).items()` to get methods. ### Solution Use `inspect.getmembers(spidercls)` instead. </issue> <code> [start of scrapy/contracts/__init__.py] 1 import sys 2 import re 3 from functools import wraps 4 from unittest import TestCase 5 6 from scrapy.http import Request 7 from scrapy.utils.spider import iterate_spider_output 8 from scrapy.utils.python import get_spec 9 10 11 class ContractsManager(object): 12 contracts = {} 13 14 def __init__(self, contracts): 15 for contract in contracts: 16 self.contracts[contract.name] = contract 17 18 def tested_methods_from_spidercls(self, spidercls): 19 methods = [] 20 for key, value in vars(spidercls).items(): 21 if (callable(value) and value.__doc__ and 22 re.search(r'^\s*@', value.__doc__, re.MULTILINE)): 23 methods.append(key) 24 25 return methods 26 27 def extract_contracts(self, method): 28 contracts = [] 29 for line in method.__doc__.split('\n'): 30 line = line.strip() 31 32 if line.startswith('@'): 33 name, args = re.match(r'@(\w+)\s*(.*)', line).groups() 34 args = re.split(r'\s+', args) 35 36 contracts.append(self.contracts[name](method, *args)) 37 38 return contracts 39 40 def from_spider(self, spider, results): 41 requests = [] 42 for method in self.tested_methods_from_spidercls(type(spider)): 43 bound_method = spider.__getattribute__(method) 44 requests.append(self.from_method(bound_method, results)) 45 46 return requests 47 48 def from_method(self, method, results): 49 contracts = self.extract_contracts(method) 50 if contracts: 51 # calculate request args 52 args, kwargs = get_spec(Request.__init__) 53 kwargs['callback'] = method 54 for contract in contracts: 55 kwargs = contract.adjust_request_args(kwargs) 56 57 # create and prepare request 58 args.remove('self') 59 if set(args).issubset(set(kwargs)): 60 request = Request(**kwargs) 61 62 # execute pre and post hooks in order 63 for contract in reversed(contracts): 64 request = contract.add_pre_hook(request, results) 65 for contract in contracts: 66 request = contract.add_post_hook(request, results) 67 68 self._clean_req(request, method, results) 69 return request 70 71 def _clean_req(self, request, method, results): 72 """ stop the request from returning objects and records any errors """ 73 74 cb = request.callback 75 76 @wraps(cb) 77 def cb_wrapper(response): 78 try: 79 output = cb(response) 80 output = list(iterate_spider_output(output)) 81 except: 82 case = _create_testcase(method, 'callback') 83 results.addError(case, sys.exc_info()) 84 85 def eb_wrapper(failure): 86 case = _create_testcase(method, 'errback') 87 exc_info = failure.type, failure.value, failure.getTracebackObject() 88 results.addError(case, exc_info) 89 90 request.callback = cb_wrapper 91 request.errback = eb_wrapper 92 93 94 class Contract(object): 95 """ Abstract class for contracts """ 96 97 def __init__(self, method, *args): 98 self.testcase_pre = _create_testcase(method, '@%s pre-hook' % self.name) 99 self.testcase_post = _create_testcase(method, '@%s post-hook' % self.name) 100 self.args = args 101 102 def add_pre_hook(self, request, results): 103 if hasattr(self, 'pre_process'): 104 cb = request.callback 105 106 @wraps(cb) 107 def wrapper(response): 108 try: 109 results.startTest(self.testcase_pre) 110 self.pre_process(response) 111 results.stopTest(self.testcase_pre) 112 except AssertionError: 113 results.addFailure(self.testcase_pre, sys.exc_info()) 114 except Exception: 115 results.addError(self.testcase_pre, sys.exc_info()) 116 else: 117 results.addSuccess(self.testcase_pre) 118 finally: 119 return list(iterate_spider_output(cb(response))) 120 121 request.callback = wrapper 122 123 return request 124 125 def add_post_hook(self, request, results): 126 if hasattr(self, 'post_process'): 127 cb = request.callback 128 129 @wraps(cb) 130 def wrapper(response): 131 output = list(iterate_spider_output(cb(response))) 132 try: 133 results.startTest(self.testcase_post) 134 self.post_process(output) 135 results.stopTest(self.testcase_post) 136 except AssertionError: 137 results.addFailure(self.testcase_post, sys.exc_info()) 138 except Exception: 139 results.addError(self.testcase_post, sys.exc_info()) 140 else: 141 results.addSuccess(self.testcase_post) 142 finally: 143 return output 144 145 request.callback = wrapper 146 147 return request 148 149 def adjust_request_args(self, args): 150 return args 151 152 153 def _create_testcase(method, desc): 154 spider = method.__self__.name 155 156 class ContractTestCase(TestCase): 157 def __str__(_self): 158 return "[%s] %s (%s)" % (spider, method.__name__, desc) 159 160 name = '%s_%s' % (spider, method.__name__) 161 setattr(ContractTestCase, name, lambda x: x) 162 return ContractTestCase(name) 163 [end of scrapy/contracts/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/scrapy/contracts/__init__.py b/scrapy/contracts/__init__.py --- a/scrapy/contracts/__init__.py +++ b/scrapy/contracts/__init__.py @@ -1,6 +1,7 @@ import sys import re from functools import wraps +from inspect import getmembers from unittest import TestCase from scrapy.http import Request @@ -17,7 +18,7 @@ def tested_methods_from_spidercls(self, spidercls): methods = [] - for key, value in vars(spidercls).items(): + for key, value in getmembers(spidercls): if (callable(value) and value.__doc__ and re.search(r'^\s*@', value.__doc__, re.MULTILINE)): methods.append(key)
{"golden_diff": "diff --git a/scrapy/contracts/__init__.py b/scrapy/contracts/__init__.py\n--- a/scrapy/contracts/__init__.py\n+++ b/scrapy/contracts/__init__.py\n@@ -1,6 +1,7 @@\n import sys\n import re\n from functools import wraps\n+from inspect import getmembers\n from unittest import TestCase\n \n from scrapy.http import Request\n@@ -17,7 +18,7 @@\n \n def tested_methods_from_spidercls(self, spidercls):\n methods = []\n- for key, value in vars(spidercls).items():\n+ for key, value in getmembers(spidercls):\n if (callable(value) and value.__doc__ and\n re.search(r'^\\s*@', value.__doc__, re.MULTILINE)):\n methods.append(key)\n", "issue": "Cannot use contracts with inherited callbacks\n### Description\r\n\r\nIf you want to `scrapy check` a spider that has inherited methods, these methods' contracts will be ignored.\r\n\r\n### Reproduce\r\n\r\n```python\r\nclass BaseSpider(Spider):\r\n\r\n def returns_request(self, response):\r\n \"\"\" method which returns request\r\n @url https://docs.scrapy.org/en/latest/\r\n @returns requests 1\r\n \"\"\"\r\n return Request('http://scrapy.org', callback=self.returns_item)\r\n\r\n\r\nclass DemoSpider(BaseSpider):\r\n name = 'demo_spider'\r\n```\r\n\r\nAnd then run `scrapy check`.\r\n\r\nYou'll get the following output:\r\n\r\n```\r\n----------------------------------------------------------------------\r\nRan 0 contracts in 0.000s\r\n\r\nOK\r\n```\r\n\r\n### Reason\r\n\r\n`ContractsManager.tested_methods_from_spidercls` uses `vars(spidercls).items()` to get methods.\r\n\r\n### Solution\r\n\r\nUse `inspect.getmembers(spidercls)` instead.\n", "before_files": [{"content": "import sys\nimport re\nfrom functools import wraps\nfrom unittest import TestCase\n\nfrom scrapy.http import Request\nfrom scrapy.utils.spider import iterate_spider_output\nfrom scrapy.utils.python import get_spec\n\n\nclass ContractsManager(object):\n contracts = {}\n\n def __init__(self, contracts):\n for contract in contracts:\n self.contracts[contract.name] = contract\n\n def tested_methods_from_spidercls(self, spidercls):\n methods = []\n for key, value in vars(spidercls).items():\n if (callable(value) and value.__doc__ and\n re.search(r'^\\s*@', value.__doc__, re.MULTILINE)):\n methods.append(key)\n\n return methods\n\n def extract_contracts(self, method):\n contracts = []\n for line in method.__doc__.split('\\n'):\n line = line.strip()\n\n if line.startswith('@'):\n name, args = re.match(r'@(\\w+)\\s*(.*)', line).groups()\n args = re.split(r'\\s+', args)\n\n contracts.append(self.contracts[name](method, *args))\n\n return contracts\n\n def from_spider(self, spider, results):\n requests = []\n for method in self.tested_methods_from_spidercls(type(spider)):\n bound_method = spider.__getattribute__(method)\n requests.append(self.from_method(bound_method, results))\n\n return requests\n\n def from_method(self, method, results):\n contracts = self.extract_contracts(method)\n if contracts:\n # calculate request args\n args, kwargs = get_spec(Request.__init__)\n kwargs['callback'] = method\n for contract in contracts:\n kwargs = contract.adjust_request_args(kwargs)\n\n # create and prepare request\n args.remove('self')\n if set(args).issubset(set(kwargs)):\n request = Request(**kwargs)\n\n # execute pre and post hooks in order\n for contract in reversed(contracts):\n request = contract.add_pre_hook(request, results)\n for contract in contracts:\n request = contract.add_post_hook(request, results)\n\n self._clean_req(request, method, results)\n return request\n\n def _clean_req(self, request, method, results):\n \"\"\" stop the request from returning objects and records any errors \"\"\"\n\n cb = request.callback\n\n @wraps(cb)\n def cb_wrapper(response):\n try:\n output = cb(response)\n output = list(iterate_spider_output(output))\n except:\n case = _create_testcase(method, 'callback')\n results.addError(case, sys.exc_info())\n\n def eb_wrapper(failure):\n case = _create_testcase(method, 'errback')\n exc_info = failure.type, failure.value, failure.getTracebackObject()\n results.addError(case, exc_info)\n\n request.callback = cb_wrapper\n request.errback = eb_wrapper\n\n\nclass Contract(object):\n \"\"\" Abstract class for contracts \"\"\"\n\n def __init__(self, method, *args):\n self.testcase_pre = _create_testcase(method, '@%s pre-hook' % self.name)\n self.testcase_post = _create_testcase(method, '@%s post-hook' % self.name)\n self.args = args\n\n def add_pre_hook(self, request, results):\n if hasattr(self, 'pre_process'):\n cb = request.callback\n\n @wraps(cb)\n def wrapper(response):\n try:\n results.startTest(self.testcase_pre)\n self.pre_process(response)\n results.stopTest(self.testcase_pre)\n except AssertionError:\n results.addFailure(self.testcase_pre, sys.exc_info())\n except Exception:\n results.addError(self.testcase_pre, sys.exc_info())\n else:\n results.addSuccess(self.testcase_pre)\n finally:\n return list(iterate_spider_output(cb(response)))\n\n request.callback = wrapper\n\n return request\n\n def add_post_hook(self, request, results):\n if hasattr(self, 'post_process'):\n cb = request.callback\n\n @wraps(cb)\n def wrapper(response):\n output = list(iterate_spider_output(cb(response)))\n try:\n results.startTest(self.testcase_post)\n self.post_process(output)\n results.stopTest(self.testcase_post)\n except AssertionError:\n results.addFailure(self.testcase_post, sys.exc_info())\n except Exception:\n results.addError(self.testcase_post, sys.exc_info())\n else:\n results.addSuccess(self.testcase_post)\n finally:\n return output\n\n request.callback = wrapper\n\n return request\n\n def adjust_request_args(self, args):\n return args\n\n\ndef _create_testcase(method, desc):\n spider = method.__self__.name\n\n class ContractTestCase(TestCase):\n def __str__(_self):\n return \"[%s] %s (%s)\" % (spider, method.__name__, desc)\n\n name = '%s_%s' % (spider, method.__name__)\n setattr(ContractTestCase, name, lambda x: x)\n return ContractTestCase(name)\n", "path": "scrapy/contracts/__init__.py"}]}
2,193
175
gh_patches_debug_50118
rasdani/github-patches
git_diff
scrapy__scrapy-5754
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Silence the `filelock` logger After we started using `tldextract` we sometimes get log messages from `filelock` with the DEBUG level, it makes sense to silence them like we do for some other libraries in https://github.com/scrapy/scrapy/blob/fe60c1224e39aa3d85b20afd54566f135d9de085/scrapy/utils/log.py#L45-L59 </issue> <code> [start of scrapy/utils/log.py] 1 import logging 2 import sys 3 import warnings 4 from logging.config import dictConfig 5 6 from twisted.python import log as twisted_log 7 from twisted.python.failure import Failure 8 9 import scrapy 10 from scrapy.exceptions import ScrapyDeprecationWarning 11 from scrapy.settings import Settings 12 from scrapy.utils.versions import scrapy_components_versions 13 14 15 logger = logging.getLogger(__name__) 16 17 18 def failure_to_exc_info(failure): 19 """Extract exc_info from Failure instances""" 20 if isinstance(failure, Failure): 21 return (failure.type, failure.value, failure.getTracebackObject()) 22 23 24 class TopLevelFormatter(logging.Filter): 25 """Keep only top level loggers's name (direct children from root) from 26 records. 27 28 This filter will replace Scrapy loggers' names with 'scrapy'. This mimics 29 the old Scrapy log behaviour and helps shortening long names. 30 31 Since it can't be set for just one logger (it won't propagate for its 32 children), it's going to be set in the root handler, with a parametrized 33 ``loggers`` list where it should act. 34 """ 35 36 def __init__(self, loggers=None): 37 self.loggers = loggers or [] 38 39 def filter(self, record): 40 if any(record.name.startswith(logger + '.') for logger in self.loggers): 41 record.name = record.name.split('.', 1)[0] 42 return True 43 44 45 DEFAULT_LOGGING = { 46 'version': 1, 47 'disable_existing_loggers': False, 48 'loggers': { 49 'hpack': { 50 'level': 'ERROR', 51 }, 52 'scrapy': { 53 'level': 'DEBUG', 54 }, 55 'twisted': { 56 'level': 'ERROR', 57 }, 58 } 59 } 60 61 62 def configure_logging(settings=None, install_root_handler=True): 63 """ 64 Initialize logging defaults for Scrapy. 65 66 :param settings: settings used to create and configure a handler for the 67 root logger (default: None). 68 :type settings: dict, :class:`~scrapy.settings.Settings` object or ``None`` 69 70 :param install_root_handler: whether to install root logging handler 71 (default: True) 72 :type install_root_handler: bool 73 74 This function does: 75 76 - Route warnings and twisted logging through Python standard logging 77 - Assign DEBUG and ERROR level to Scrapy and Twisted loggers respectively 78 - Route stdout to log if LOG_STDOUT setting is True 79 80 When ``install_root_handler`` is True (default), this function also 81 creates a handler for the root logger according to given settings 82 (see :ref:`topics-logging-settings`). You can override default options 83 using ``settings`` argument. When ``settings`` is empty or None, defaults 84 are used. 85 """ 86 if not sys.warnoptions: 87 # Route warnings through python logging 88 logging.captureWarnings(True) 89 90 observer = twisted_log.PythonLoggingObserver('twisted') 91 observer.start() 92 93 dictConfig(DEFAULT_LOGGING) 94 95 if isinstance(settings, dict) or settings is None: 96 settings = Settings(settings) 97 98 if settings.getbool('LOG_STDOUT'): 99 sys.stdout = StreamLogger(logging.getLogger('stdout')) 100 101 if install_root_handler: 102 install_scrapy_root_handler(settings) 103 104 105 def install_scrapy_root_handler(settings): 106 global _scrapy_root_handler 107 108 if (_scrapy_root_handler is not None 109 and _scrapy_root_handler in logging.root.handlers): 110 logging.root.removeHandler(_scrapy_root_handler) 111 logging.root.setLevel(logging.NOTSET) 112 _scrapy_root_handler = _get_handler(settings) 113 logging.root.addHandler(_scrapy_root_handler) 114 115 116 def get_scrapy_root_handler(): 117 return _scrapy_root_handler 118 119 120 _scrapy_root_handler = None 121 122 123 def _get_handler(settings): 124 """ Return a log handler object according to settings """ 125 filename = settings.get('LOG_FILE') 126 if filename: 127 mode = 'a' if settings.getbool('LOG_FILE_APPEND') else 'w' 128 encoding = settings.get('LOG_ENCODING') 129 handler = logging.FileHandler(filename, mode=mode, encoding=encoding) 130 elif settings.getbool('LOG_ENABLED'): 131 handler = logging.StreamHandler() 132 else: 133 handler = logging.NullHandler() 134 135 formatter = logging.Formatter( 136 fmt=settings.get('LOG_FORMAT'), 137 datefmt=settings.get('LOG_DATEFORMAT') 138 ) 139 handler.setFormatter(formatter) 140 handler.setLevel(settings.get('LOG_LEVEL')) 141 if settings.getbool('LOG_SHORT_NAMES'): 142 handler.addFilter(TopLevelFormatter(['scrapy'])) 143 return handler 144 145 146 def log_scrapy_info(settings: Settings) -> None: 147 logger.info("Scrapy %(version)s started (bot: %(bot)s)", 148 {'version': scrapy.__version__, 'bot': settings['BOT_NAME']}) 149 versions = [ 150 f"{name} {version}" 151 for name, version in scrapy_components_versions() 152 if name != "Scrapy" 153 ] 154 logger.info("Versions: %(versions)s", {'versions': ", ".join(versions)}) 155 156 157 def log_reactor_info() -> None: 158 from twisted.internet import reactor 159 logger.debug("Using reactor: %s.%s", reactor.__module__, reactor.__class__.__name__) 160 from twisted.internet import asyncioreactor 161 if isinstance(reactor, asyncioreactor.AsyncioSelectorReactor): 162 logger.debug( 163 "Using asyncio event loop: %s.%s", 164 reactor._asyncioEventloop.__module__, 165 reactor._asyncioEventloop.__class__.__name__, 166 ) 167 168 169 class StreamLogger: 170 """Fake file-like stream object that redirects writes to a logger instance 171 172 Taken from: 173 https://www.electricmonk.nl/log/2011/08/14/redirect-stdout-and-stderr-to-a-logger-in-python/ 174 """ 175 def __init__(self, logger, log_level=logging.INFO): 176 self.logger = logger 177 self.log_level = log_level 178 self.linebuf = '' 179 180 def write(self, buf): 181 for line in buf.rstrip().splitlines(): 182 self.logger.log(self.log_level, line.rstrip()) 183 184 def flush(self): 185 for h in self.logger.handlers: 186 h.flush() 187 188 189 class LogCounterHandler(logging.Handler): 190 """Record log levels count into a crawler stats""" 191 192 def __init__(self, crawler, *args, **kwargs): 193 super().__init__(*args, **kwargs) 194 self.crawler = crawler 195 196 def emit(self, record): 197 sname = f'log_count/{record.levelname}' 198 self.crawler.stats.inc_value(sname) 199 200 201 def logformatter_adapter(logkws): 202 """ 203 Helper that takes the dictionary output from the methods in LogFormatter 204 and adapts it into a tuple of positional arguments for logger.log calls, 205 handling backward compatibility as well. 206 """ 207 if not {'level', 'msg', 'args'} <= set(logkws): 208 warnings.warn('Missing keys in LogFormatter method', 209 ScrapyDeprecationWarning) 210 211 if 'format' in logkws: 212 warnings.warn('`format` key in LogFormatter methods has been ' 213 'deprecated, use `msg` instead', 214 ScrapyDeprecationWarning) 215 216 level = logkws.get('level', logging.INFO) 217 message = logkws.get('format', logkws.get('msg')) 218 # NOTE: This also handles 'args' being an empty dict, that case doesn't 219 # play well in logger.log calls 220 args = logkws if not logkws.get('args') else logkws['args'] 221 222 return (level, message, args) 223 [end of scrapy/utils/log.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/scrapy/utils/log.py b/scrapy/utils/log.py --- a/scrapy/utils/log.py +++ b/scrapy/utils/log.py @@ -46,6 +46,9 @@ 'version': 1, 'disable_existing_loggers': False, 'loggers': { + 'filelock': { + 'level': 'ERROR', + }, 'hpack': { 'level': 'ERROR', },
{"golden_diff": "diff --git a/scrapy/utils/log.py b/scrapy/utils/log.py\n--- a/scrapy/utils/log.py\n+++ b/scrapy/utils/log.py\n@@ -46,6 +46,9 @@\n 'version': 1,\n 'disable_existing_loggers': False,\n 'loggers': {\n+ 'filelock': {\n+ 'level': 'ERROR',\n+ },\n 'hpack': {\n 'level': 'ERROR',\n },\n", "issue": "Silence the `filelock` logger\nAfter we started using `tldextract` we sometimes get log messages from `filelock` with the DEBUG level, it makes sense to silence them like we do for some other libraries in https://github.com/scrapy/scrapy/blob/fe60c1224e39aa3d85b20afd54566f135d9de085/scrapy/utils/log.py#L45-L59\n", "before_files": [{"content": "import logging\nimport sys\nimport warnings\nfrom logging.config import dictConfig\n\nfrom twisted.python import log as twisted_log\nfrom twisted.python.failure import Failure\n\nimport scrapy\nfrom scrapy.exceptions import ScrapyDeprecationWarning\nfrom scrapy.settings import Settings\nfrom scrapy.utils.versions import scrapy_components_versions\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef failure_to_exc_info(failure):\n \"\"\"Extract exc_info from Failure instances\"\"\"\n if isinstance(failure, Failure):\n return (failure.type, failure.value, failure.getTracebackObject())\n\n\nclass TopLevelFormatter(logging.Filter):\n \"\"\"Keep only top level loggers's name (direct children from root) from\n records.\n\n This filter will replace Scrapy loggers' names with 'scrapy'. This mimics\n the old Scrapy log behaviour and helps shortening long names.\n\n Since it can't be set for just one logger (it won't propagate for its\n children), it's going to be set in the root handler, with a parametrized\n ``loggers`` list where it should act.\n \"\"\"\n\n def __init__(self, loggers=None):\n self.loggers = loggers or []\n\n def filter(self, record):\n if any(record.name.startswith(logger + '.') for logger in self.loggers):\n record.name = record.name.split('.', 1)[0]\n return True\n\n\nDEFAULT_LOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'loggers': {\n 'hpack': {\n 'level': 'ERROR',\n },\n 'scrapy': {\n 'level': 'DEBUG',\n },\n 'twisted': {\n 'level': 'ERROR',\n },\n }\n}\n\n\ndef configure_logging(settings=None, install_root_handler=True):\n \"\"\"\n Initialize logging defaults for Scrapy.\n\n :param settings: settings used to create and configure a handler for the\n root logger (default: None).\n :type settings: dict, :class:`~scrapy.settings.Settings` object or ``None``\n\n :param install_root_handler: whether to install root logging handler\n (default: True)\n :type install_root_handler: bool\n\n This function does:\n\n - Route warnings and twisted logging through Python standard logging\n - Assign DEBUG and ERROR level to Scrapy and Twisted loggers respectively\n - Route stdout to log if LOG_STDOUT setting is True\n\n When ``install_root_handler`` is True (default), this function also\n creates a handler for the root logger according to given settings\n (see :ref:`topics-logging-settings`). You can override default options\n using ``settings`` argument. When ``settings`` is empty or None, defaults\n are used.\n \"\"\"\n if not sys.warnoptions:\n # Route warnings through python logging\n logging.captureWarnings(True)\n\n observer = twisted_log.PythonLoggingObserver('twisted')\n observer.start()\n\n dictConfig(DEFAULT_LOGGING)\n\n if isinstance(settings, dict) or settings is None:\n settings = Settings(settings)\n\n if settings.getbool('LOG_STDOUT'):\n sys.stdout = StreamLogger(logging.getLogger('stdout'))\n\n if install_root_handler:\n install_scrapy_root_handler(settings)\n\n\ndef install_scrapy_root_handler(settings):\n global _scrapy_root_handler\n\n if (_scrapy_root_handler is not None\n and _scrapy_root_handler in logging.root.handlers):\n logging.root.removeHandler(_scrapy_root_handler)\n logging.root.setLevel(logging.NOTSET)\n _scrapy_root_handler = _get_handler(settings)\n logging.root.addHandler(_scrapy_root_handler)\n\n\ndef get_scrapy_root_handler():\n return _scrapy_root_handler\n\n\n_scrapy_root_handler = None\n\n\ndef _get_handler(settings):\n \"\"\" Return a log handler object according to settings \"\"\"\n filename = settings.get('LOG_FILE')\n if filename:\n mode = 'a' if settings.getbool('LOG_FILE_APPEND') else 'w'\n encoding = settings.get('LOG_ENCODING')\n handler = logging.FileHandler(filename, mode=mode, encoding=encoding)\n elif settings.getbool('LOG_ENABLED'):\n handler = logging.StreamHandler()\n else:\n handler = logging.NullHandler()\n\n formatter = logging.Formatter(\n fmt=settings.get('LOG_FORMAT'),\n datefmt=settings.get('LOG_DATEFORMAT')\n )\n handler.setFormatter(formatter)\n handler.setLevel(settings.get('LOG_LEVEL'))\n if settings.getbool('LOG_SHORT_NAMES'):\n handler.addFilter(TopLevelFormatter(['scrapy']))\n return handler\n\n\ndef log_scrapy_info(settings: Settings) -> None:\n logger.info(\"Scrapy %(version)s started (bot: %(bot)s)\",\n {'version': scrapy.__version__, 'bot': settings['BOT_NAME']})\n versions = [\n f\"{name} {version}\"\n for name, version in scrapy_components_versions()\n if name != \"Scrapy\"\n ]\n logger.info(\"Versions: %(versions)s\", {'versions': \", \".join(versions)})\n\n\ndef log_reactor_info() -> None:\n from twisted.internet import reactor\n logger.debug(\"Using reactor: %s.%s\", reactor.__module__, reactor.__class__.__name__)\n from twisted.internet import asyncioreactor\n if isinstance(reactor, asyncioreactor.AsyncioSelectorReactor):\n logger.debug(\n \"Using asyncio event loop: %s.%s\",\n reactor._asyncioEventloop.__module__,\n reactor._asyncioEventloop.__class__.__name__,\n )\n\n\nclass StreamLogger:\n \"\"\"Fake file-like stream object that redirects writes to a logger instance\n\n Taken from:\n https://www.electricmonk.nl/log/2011/08/14/redirect-stdout-and-stderr-to-a-logger-in-python/\n \"\"\"\n def __init__(self, logger, log_level=logging.INFO):\n self.logger = logger\n self.log_level = log_level\n self.linebuf = ''\n\n def write(self, buf):\n for line in buf.rstrip().splitlines():\n self.logger.log(self.log_level, line.rstrip())\n\n def flush(self):\n for h in self.logger.handlers:\n h.flush()\n\n\nclass LogCounterHandler(logging.Handler):\n \"\"\"Record log levels count into a crawler stats\"\"\"\n\n def __init__(self, crawler, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.crawler = crawler\n\n def emit(self, record):\n sname = f'log_count/{record.levelname}'\n self.crawler.stats.inc_value(sname)\n\n\ndef logformatter_adapter(logkws):\n \"\"\"\n Helper that takes the dictionary output from the methods in LogFormatter\n and adapts it into a tuple of positional arguments for logger.log calls,\n handling backward compatibility as well.\n \"\"\"\n if not {'level', 'msg', 'args'} <= set(logkws):\n warnings.warn('Missing keys in LogFormatter method',\n ScrapyDeprecationWarning)\n\n if 'format' in logkws:\n warnings.warn('`format` key in LogFormatter methods has been '\n 'deprecated, use `msg` instead',\n ScrapyDeprecationWarning)\n\n level = logkws.get('level', logging.INFO)\n message = logkws.get('format', logkws.get('msg'))\n # NOTE: This also handles 'args' being an empty dict, that case doesn't\n # play well in logger.log calls\n args = logkws if not logkws.get('args') else logkws['args']\n\n return (level, message, args)\n", "path": "scrapy/utils/log.py"}]}
2,829
99
gh_patches_debug_1021
rasdani/github-patches
git_diff
cloud-custodian__cloud-custodian-8692
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> AWS user pool and identity pool resources.json has minimal information ### Describe the bug For the below custodian policy, the resources.json is created for each of the policy on successful execution. For user-pool and identity-pool, the resources.json file does not include full description/configuration of the resources, instead it contains very minimal information like ID, NAME, creation date etc. (as mentioned below) whereas for EC2 and Lambda, the populated resources.json has hundreds of metadata information. --- ##custodian.yaml policies: - name: cognito-checkauditmode resource: aws.user-pool - name: identity-checkauditmode resource: identity-pool - name: ec2-checkrunning resource: ec2 - name: find-all-lambdas resource: aws.lambda --- ##resources.json - cognito-checkauditmode [ { "Id": "xxxxxxxxxxxxxx", "Name": "xxxxxxxxxxxxxxxxxxx", "LambdaConfig": {}, "LastModifiedDate": "2023-06-29T08:56:18.028000-05:00", "CreationDate": "2023-06-29T08:56:17.860000-05:00", "Tags": [] }, { "Id": "xxxxxxxxxxxxxxxxxxx", "Name": "xxxxxxxxxxxxxxxxxxx", "LambdaConfig": {}, "LastModifiedDate": "2020-06-11T17:15:18.951000-05:00", "CreationDate": "2020-02-21T11:39:18.108000-06:00", "Tags": [] } ] --- ## resources.json - identity-checkauditmode [ { "IdentityPoolId": "xxxxxxxxxxxxxxxxxxx", "IdentityPoolName": "xxxxxxxxxxxxxxxxxxx", "Tags": [] } ] ### What did you expect to happen? Expecting a large json file with full configuration of the resource. Below is the AWS CLI command and the truncated response from CLI. Expecting a similar response. --- aws cognito-idp describe-user-pool --user-pool-id xxxxxxxxxxxxxxxxxxx --- truncated response { "UserPool": { "Id": "xxxxxxxxxxxxxxxxxxx", "Name": "xxxxxxxxxxxxxxxxxxx", "Policies": { "PasswordPolicy": { "MinimumLength": 8, "RequireUppercase": true, "RequireLowercase": true, "RequireNumbers": true, "RequireSymbols": true, "TemporaryPasswordValidityDays": 7 } }, "DeletionProtection": "INACTIVE", "LambdaConfig": {}, "LastModifiedDate": "2020-06-11T17:15:18.951000-05:00", "CreationDate": "2020-02-21T11:39:18.108000-06:00", "SchemaAttributes": [ { "Name": "sub", "AttributeDataType": "String", "DeveloperOnlyAttribute": false, "Mutable": false, "Required": true, "StringAttributeConstraints": { "MinLength": "1", "MaxLength": "2048" } }, ### Cloud Provider Amazon Web Services (AWS) ### Cloud Custodian version and dependency information ```shell Custodian: 0.9.27 Python: 3.11.4 (main, Jun 7 2023, 00:34:59) [Clang 14.0.3 (clang-1403.0.22.14.1)] Platform: posix.uname_result(sysname='Darwin', nodename='MABPWKJJ4T9RYW', release='22.5.0', version='Darwin Kernel Version 22.5.0: Thu Jun 8 22:22:23 PDT 2023; root:xnu-8796.121.3~7/RELEASE_ARM64_T6020', machine='arm64') Using venv: False Docker: False Installed: argcomplete==3.0.8 attrs==23.1.0 boto3==1.26.139 botocore==1.29.139 docutils==0.18.1 importlib-metadata==5.2.0 jmespath==1.0.1 jsonschema==4.17.3 pyrsistent==0.19.3 python-dateutil==2.8.2 pyyaml==6.0 s3transfer==0.6.1 six==1.16.0 tabulate==0.9.0 typing-extensions==4.6.3 urllib3==1.26.16 zipp==3.15.0 ``` ### Policy ```shell ##custodian.yaml policies: - name: cognito-checkauditmode resource: aws.user-pool - name: identity-checkauditmode resource: identity-pool - name: ec2-checkrunning resource: ec2 - name: find-all-lambdas resource: aws.lambda ``` ### Relevant log/traceback output ```shell 2023-06-26 20:09:45,838 - custodian.policy - INFO - policy:cognito-checkauditmode resource:aws.user-pool region:us-east-1 count:1 time:0.00 2023-06-26 20:20:16,225 - custodian.policy - INFO - policy:cognito-checkauditmode resource:aws.user-pool region:us-east-1 count:1 time:0.70 2023-06-26 20:25:23,030 - custodian.policy - INFO - policy:cognito-checkauditmode resource:user-pool region:us-east-1 count:1 time:0.00 2023-06-26 23:09:38,143 - custodian.policy - INFO - policy:cognito-checkauditmode resource:user-pool region:us-east-1 count:1 time:0.73 2023-06-26 23:13:37,202 - custodian.policy - INFO - policy:cognito-checkauditmode resource:user-pool region:us-east-1 count:1 time:0.00 2023-06-26 23:17:02,042 - custodian.policy - INFO - policy:cognito-checkauditmode resource:user-pool region:us-east-1 count:1 time:0.00 2023-06-26 23:18:59,196 - custodian.policy - INFO - policy:cognito-checkauditmode resource:user-pool region:us-east-1 count:0 time:0.00 2023-06-26 23:28:37,082 - custodian.policy - INFO - policy:cognito-checkauditmode resource:user-pool region:us-east-1 count:0 time:0.67 2023-06-27 09:11:53,373 - custodian.policy - INFO - policy:cognito-checkauditmode resource:user-pool region:us-east-1 count:1 time:0.67 2023-06-27 09:13:07,745 - custodian.policy - INFO - policy:cognito-checkauditmode resource:user-pool region:us-east-1 count:1 time:0.00 2023-06-27 09:22:13,584 - custodian.policy - INFO - policy:cognito-checkauditmode resource:user-pool region:us-east-1 count:0 time:0.00 2023-06-27 09:22:42,984 - custodian.policy - INFO - policy:cognito-checkauditmode resource:user-pool region:us-east-1 count:0 time:0.65 2023-06-27 09:24:43,016 - custodian.policy - INFO - policy:cognito-checkauditmode resource:aws.user-pool region:us-east-1 count:0 time:0.62 2023-06-27 09:27:15,604 - custodian.policy - INFO - policy:cognito-checkauditmode resource:aws.user-pool region:us-east-1 count:1 time:0.64 2023-06-29 08:58:25,076 - custodian.policy - INFO - policy:cognito-checkauditmode resource:aws.user-pool region:us-east-1 count:2 time:0.64 ``` ### Extra information or context Applied few additional filters and that as well failed. I believe, the filters will work only after the describe is successful </issue> <code> [start of c7n/resources/cognito.py] 1 # Copyright The Cloud Custodian Authors. 2 # SPDX-License-Identifier: Apache-2.0 3 from botocore.exceptions import ClientError 4 5 from c7n.actions import BaseAction 6 from c7n.manager import resources 7 from c7n.query import QueryResourceManager, TypeInfo, DescribeSource 8 from c7n.tags import universal_augment 9 from c7n.utils import local_session, type_schema 10 11 12 class DescribeIdentityPool(DescribeSource): 13 def augment(self, resources): 14 return universal_augment(self.manager, resources) 15 16 17 class DescribeUserPool(DescribeSource): 18 def augment(self, resources): 19 resources = super().augment(resources) 20 return universal_augment(self.manager, resources) 21 22 23 @resources.register('identity-pool') 24 class CognitoIdentityPool(QueryResourceManager): 25 26 class resource_type(TypeInfo): 27 service = 'cognito-identity' 28 enum_spec = ('list_identity_pools', 'IdentityPools', {'MaxResults': 60}) 29 detail_spec = ( 30 'describe_identity_pool', 'IdentityPoolId', 'IdentityPoolId', None) 31 id = 'IdentityPoolId' 32 name = 'IdentityPoolName' 33 arn_type = "identitypool" 34 cfn_type = 'AWS::Cognito::IdentityPool' 35 universal_taggable = object() 36 37 source_mapping = { 38 'describe': DescribeIdentityPool, 39 } 40 41 42 @CognitoIdentityPool.action_registry.register('delete') 43 class DeleteIdentityPool(BaseAction): 44 """Action to delete cognito identity pool 45 46 It is recommended to use a filter to avoid unwanted deletion of pools 47 48 :example: 49 50 .. code-block:: yaml 51 52 policies: 53 - name: identity-pool-delete 54 resource: identity-pool 55 actions: 56 - delete 57 """ 58 59 schema = type_schema('delete') 60 permissions = ("cognito-identity:DeleteIdentityPool",) 61 62 def process(self, pools): 63 with self.executor_factory(max_workers=2) as w: 64 list(w.map(self.process_pool, pools)) 65 66 def process_pool(self, pool): 67 client = local_session( 68 self.manager.session_factory).client('cognito-identity') 69 try: 70 client.delete_identity_pool(IdentityPoolId=pool['IdentityPoolId']) 71 except ClientError as e: 72 self.log.exception( 73 "Exception deleting identity pool:\n %s" % e) 74 75 76 @resources.register('user-pool') 77 class CognitoUserPool(QueryResourceManager): 78 79 class resource_type(TypeInfo): 80 service = "cognito-idp" 81 enum_spec = ('list_user_pools', 'UserPools', {'MaxResults': 60}) 82 detail_spec = ( 83 'describe_user_pool', 'UserPoolId', 'Id', 'UserPool') 84 id = 'Id' 85 name = 'Name' 86 arn_type = "userpool" 87 cfn_type = 'AWS::Cognito::UserPool' 88 universal_taggable = object() 89 90 source_mapping = { 91 'describe': DescribeUserPool, 92 } 93 94 95 @CognitoUserPool.action_registry.register('delete') 96 class DeleteUserPool(BaseAction): 97 """Action to delete cognito user pool 98 99 It is recommended to use a filter to avoid unwanted deletion of pools 100 101 :example: 102 103 .. code-block:: yaml 104 105 policies: 106 - name: user-pool-delete 107 resource: user-pool 108 actions: 109 - delete 110 """ 111 112 schema = type_schema('delete') 113 permissions = ("cognito-idp:DeleteUserPool",) 114 115 def process(self, pools): 116 with self.executor_factory(max_workers=2) as w: 117 list(w.map(self.process_pool, pools)) 118 119 def process_pool(self, pool): 120 client = local_session( 121 self.manager.session_factory).client('cognito-idp') 122 try: 123 client.delete_user_pool(UserPoolId=pool['Id']) 124 except ClientError as e: 125 self.log.exception( 126 "Exception deleting user pool:\n %s" % e) 127 [end of c7n/resources/cognito.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/c7n/resources/cognito.py b/c7n/resources/cognito.py --- a/c7n/resources/cognito.py +++ b/c7n/resources/cognito.py @@ -11,6 +11,7 @@ class DescribeIdentityPool(DescribeSource): def augment(self, resources): + resources = super().augment(resources) return universal_augment(self.manager, resources)
{"golden_diff": "diff --git a/c7n/resources/cognito.py b/c7n/resources/cognito.py\n--- a/c7n/resources/cognito.py\n+++ b/c7n/resources/cognito.py\n@@ -11,6 +11,7 @@\n \n class DescribeIdentityPool(DescribeSource):\n def augment(self, resources):\n+ resources = super().augment(resources)\n return universal_augment(self.manager, resources)\n", "issue": "AWS user pool and identity pool resources.json has minimal information\n### Describe the bug\n\nFor the below custodian policy, the resources.json is created for each of the policy on successful execution. For user-pool and identity-pool, the resources.json file does not include full description/configuration of the resources, instead it contains very minimal information like ID, NAME, creation date etc. (as mentioned below) whereas for EC2 and Lambda, the populated resources.json has hundreds of metadata information.\r\n\r\n\r\n---\r\n##custodian.yaml\r\npolicies:\r\n - name: cognito-checkauditmode\r\n resource: aws.user-pool\r\n\r\n - name: identity-checkauditmode\r\n resource: identity-pool\r\n\r\n - name: ec2-checkrunning\r\n resource: ec2\r\n\r\n- name: find-all-lambdas\r\n resource: aws.lambda\r\n\r\n--- \r\n##resources.json - cognito-checkauditmode\r\n\r\n[\r\n {\r\n \"Id\": \"xxxxxxxxxxxxxx\",\r\n \"Name\": \"xxxxxxxxxxxxxxxxxxx\",\r\n \"LambdaConfig\": {},\r\n \"LastModifiedDate\": \"2023-06-29T08:56:18.028000-05:00\",\r\n \"CreationDate\": \"2023-06-29T08:56:17.860000-05:00\",\r\n \"Tags\": []\r\n },\r\n {\r\n \"Id\": \"xxxxxxxxxxxxxxxxxxx\",\r\n \"Name\": \"xxxxxxxxxxxxxxxxxxx\",\r\n \"LambdaConfig\": {},\r\n \"LastModifiedDate\": \"2020-06-11T17:15:18.951000-05:00\",\r\n \"CreationDate\": \"2020-02-21T11:39:18.108000-06:00\",\r\n \"Tags\": []\r\n }\r\n]\r\n\r\n\r\n---\r\n## resources.json - identity-checkauditmode\r\n\r\n[\r\n {\r\n \"IdentityPoolId\": \"xxxxxxxxxxxxxxxxxxx\",\r\n \"IdentityPoolName\": \"xxxxxxxxxxxxxxxxxxx\",\r\n \"Tags\": []\r\n }\r\n]\n\n### What did you expect to happen?\n\nExpecting a large json file with full configuration of the resource. Below is the AWS CLI command and the truncated response from CLI. Expecting a similar response. \r\n\r\n\r\n---\r\naws cognito-idp describe-user-pool --user-pool-id xxxxxxxxxxxxxxxxxxx\r\n---\r\ntruncated response\r\n{\r\n \"UserPool\": {\r\n \"Id\": \"xxxxxxxxxxxxxxxxxxx\",\r\n \"Name\": \"xxxxxxxxxxxxxxxxxxx\",\r\n \"Policies\": {\r\n \"PasswordPolicy\": {\r\n \"MinimumLength\": 8,\r\n \"RequireUppercase\": true,\r\n \"RequireLowercase\": true,\r\n \"RequireNumbers\": true,\r\n \"RequireSymbols\": true,\r\n \"TemporaryPasswordValidityDays\": 7\r\n }\r\n },\r\n \"DeletionProtection\": \"INACTIVE\",\r\n \"LambdaConfig\": {},\r\n \"LastModifiedDate\": \"2020-06-11T17:15:18.951000-05:00\",\r\n \"CreationDate\": \"2020-02-21T11:39:18.108000-06:00\",\r\n \"SchemaAttributes\": [\r\n {\r\n \"Name\": \"sub\",\r\n \"AttributeDataType\": \"String\",\r\n \"DeveloperOnlyAttribute\": false,\r\n \"Mutable\": false,\r\n \"Required\": true,\r\n \"StringAttributeConstraints\": {\r\n \"MinLength\": \"1\",\r\n \"MaxLength\": \"2048\"\r\n }\r\n },\n\n### Cloud Provider\n\nAmazon Web Services (AWS)\n\n### Cloud Custodian version and dependency information\n\n```shell\nCustodian: 0.9.27\r\nPython: 3.11.4 (main, Jun 7 2023, 00:34:59) [Clang 14.0.3 (clang-1403.0.22.14.1)]\r\nPlatform: posix.uname_result(sysname='Darwin', nodename='MABPWKJJ4T9RYW', release='22.5.0', version='Darwin Kernel Version 22.5.0: Thu Jun 8 22:22:23 PDT 2023; root:xnu-8796.121.3~7/RELEASE_ARM64_T6020', machine='arm64')\r\nUsing venv: False\r\nDocker: False\r\nInstalled: \r\n\r\nargcomplete==3.0.8\r\nattrs==23.1.0\r\nboto3==1.26.139\r\nbotocore==1.29.139\r\ndocutils==0.18.1\r\nimportlib-metadata==5.2.0\r\njmespath==1.0.1\r\njsonschema==4.17.3\r\npyrsistent==0.19.3\r\npython-dateutil==2.8.2\r\npyyaml==6.0\r\ns3transfer==0.6.1\r\nsix==1.16.0\r\ntabulate==0.9.0\r\ntyping-extensions==4.6.3\r\nurllib3==1.26.16\r\nzipp==3.15.0\n```\n\n\n### Policy\n\n```shell\n##custodian.yaml\r\npolicies:\r\n - name: cognito-checkauditmode\r\n resource: aws.user-pool\r\n\r\n - name: identity-checkauditmode\r\n resource: identity-pool\r\n\r\n - name: ec2-checkrunning\r\n resource: ec2\r\n\r\n- name: find-all-lambdas\r\n resource: aws.lambda\n```\n\n\n### Relevant log/traceback output\n\n```shell\n2023-06-26 20:09:45,838 - custodian.policy - INFO - policy:cognito-checkauditmode resource:aws.user-pool region:us-east-1 count:1 time:0.00\r\n2023-06-26 20:20:16,225 - custodian.policy - INFO - policy:cognito-checkauditmode resource:aws.user-pool region:us-east-1 count:1 time:0.70\r\n2023-06-26 20:25:23,030 - custodian.policy - INFO - policy:cognito-checkauditmode resource:user-pool region:us-east-1 count:1 time:0.00\r\n2023-06-26 23:09:38,143 - custodian.policy - INFO - policy:cognito-checkauditmode resource:user-pool region:us-east-1 count:1 time:0.73\r\n2023-06-26 23:13:37,202 - custodian.policy - INFO - policy:cognito-checkauditmode resource:user-pool region:us-east-1 count:1 time:0.00\r\n2023-06-26 23:17:02,042 - custodian.policy - INFO - policy:cognito-checkauditmode resource:user-pool region:us-east-1 count:1 time:0.00\r\n2023-06-26 23:18:59,196 - custodian.policy - INFO - policy:cognito-checkauditmode resource:user-pool region:us-east-1 count:0 time:0.00\r\n2023-06-26 23:28:37,082 - custodian.policy - INFO - policy:cognito-checkauditmode resource:user-pool region:us-east-1 count:0 time:0.67\r\n2023-06-27 09:11:53,373 - custodian.policy - INFO - policy:cognito-checkauditmode resource:user-pool region:us-east-1 count:1 time:0.67\r\n2023-06-27 09:13:07,745 - custodian.policy - INFO - policy:cognito-checkauditmode resource:user-pool region:us-east-1 count:1 time:0.00\r\n2023-06-27 09:22:13,584 - custodian.policy - INFO - policy:cognito-checkauditmode resource:user-pool region:us-east-1 count:0 time:0.00\r\n2023-06-27 09:22:42,984 - custodian.policy - INFO - policy:cognito-checkauditmode resource:user-pool region:us-east-1 count:0 time:0.65\r\n2023-06-27 09:24:43,016 - custodian.policy - INFO - policy:cognito-checkauditmode resource:aws.user-pool region:us-east-1 count:0 time:0.62\r\n2023-06-27 09:27:15,604 - custodian.policy - INFO - policy:cognito-checkauditmode resource:aws.user-pool region:us-east-1 count:1 time:0.64\r\n2023-06-29 08:58:25,076 - custodian.policy - INFO - policy:cognito-checkauditmode resource:aws.user-pool region:us-east-1 count:2 time:0.64\n```\n\n\n### Extra information or context\n\nApplied few additional filters and that as well failed. I believe, the filters will work only after the describe is successful\n", "before_files": [{"content": "# Copyright The Cloud Custodian Authors.\n# SPDX-License-Identifier: Apache-2.0\nfrom botocore.exceptions import ClientError\n\nfrom c7n.actions import BaseAction\nfrom c7n.manager import resources\nfrom c7n.query import QueryResourceManager, TypeInfo, DescribeSource\nfrom c7n.tags import universal_augment\nfrom c7n.utils import local_session, type_schema\n\n\nclass DescribeIdentityPool(DescribeSource):\n def augment(self, resources):\n return universal_augment(self.manager, resources)\n\n\nclass DescribeUserPool(DescribeSource):\n def augment(self, resources):\n resources = super().augment(resources)\n return universal_augment(self.manager, resources)\n\n\[email protected]('identity-pool')\nclass CognitoIdentityPool(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = 'cognito-identity'\n enum_spec = ('list_identity_pools', 'IdentityPools', {'MaxResults': 60})\n detail_spec = (\n 'describe_identity_pool', 'IdentityPoolId', 'IdentityPoolId', None)\n id = 'IdentityPoolId'\n name = 'IdentityPoolName'\n arn_type = \"identitypool\"\n cfn_type = 'AWS::Cognito::IdentityPool'\n universal_taggable = object()\n\n source_mapping = {\n 'describe': DescribeIdentityPool,\n }\n\n\[email protected]_registry.register('delete')\nclass DeleteIdentityPool(BaseAction):\n \"\"\"Action to delete cognito identity pool\n\n It is recommended to use a filter to avoid unwanted deletion of pools\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: identity-pool-delete\n resource: identity-pool\n actions:\n - delete\n \"\"\"\n\n schema = type_schema('delete')\n permissions = (\"cognito-identity:DeleteIdentityPool\",)\n\n def process(self, pools):\n with self.executor_factory(max_workers=2) as w:\n list(w.map(self.process_pool, pools))\n\n def process_pool(self, pool):\n client = local_session(\n self.manager.session_factory).client('cognito-identity')\n try:\n client.delete_identity_pool(IdentityPoolId=pool['IdentityPoolId'])\n except ClientError as e:\n self.log.exception(\n \"Exception deleting identity pool:\\n %s\" % e)\n\n\[email protected]('user-pool')\nclass CognitoUserPool(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = \"cognito-idp\"\n enum_spec = ('list_user_pools', 'UserPools', {'MaxResults': 60})\n detail_spec = (\n 'describe_user_pool', 'UserPoolId', 'Id', 'UserPool')\n id = 'Id'\n name = 'Name'\n arn_type = \"userpool\"\n cfn_type = 'AWS::Cognito::UserPool'\n universal_taggable = object()\n\n source_mapping = {\n 'describe': DescribeUserPool,\n }\n\n\[email protected]_registry.register('delete')\nclass DeleteUserPool(BaseAction):\n \"\"\"Action to delete cognito user pool\n\n It is recommended to use a filter to avoid unwanted deletion of pools\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: user-pool-delete\n resource: user-pool\n actions:\n - delete\n \"\"\"\n\n schema = type_schema('delete')\n permissions = (\"cognito-idp:DeleteUserPool\",)\n\n def process(self, pools):\n with self.executor_factory(max_workers=2) as w:\n list(w.map(self.process_pool, pools))\n\n def process_pool(self, pool):\n client = local_session(\n self.manager.session_factory).client('cognito-idp')\n try:\n client.delete_user_pool(UserPoolId=pool['Id'])\n except ClientError as e:\n self.log.exception(\n \"Exception deleting user pool:\\n %s\" % e)\n", "path": "c7n/resources/cognito.py"}]}
3,810
88
gh_patches_debug_10522
rasdani/github-patches
git_diff
bokeh__bokeh-4437
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Can't serialize timedelta column If you have a data frame with a column of timedeltas, then make a ColumnDataSource out of it, the ColumnDataSource will be created: ``` python In [9]: source.data['delta'] Out[9]: [Timedelta('0 days 00:33:00'), Timedelta('0 days 00:35:00'), Timedelta('0 days 03:01:00')] ``` But if you use that source in a plot, even if you don't use the column, when it comes time to serialize (show/save/embed etc) the plot, it fails: ``` python Timedelta('0 days 00:33:00') is not JSON serializable ``` Maybe we can provide some validation on ColumnDataSource creation? Or, at least provide a more helpful message on failure, as it's not immediately obvious what went wrong. </issue> <code> [start of bokeh/core/json_encoder.py] 1 ''' Provide a custom JSON encoder for serializing Bokeh models. 2 3 ''' 4 from __future__ import absolute_import 5 6 import logging 7 log = logging.getLogger(__name__) 8 9 import datetime as dt 10 import decimal 11 import json 12 import time 13 14 import numpy as np 15 16 from ..settings import settings 17 from ..util.dependencies import import_optional 18 from ..util.serialization import transform_series, transform_array 19 20 pd = import_optional('pandas') 21 rd = import_optional("dateutil.relativedelta") 22 23 class BokehJSONEncoder(json.JSONEncoder): 24 ''' Encode values to be used in Bokeh documents or communicated to 25 a Bokeh server. 26 27 ''' 28 def transform_python_types(self, obj): 29 ''' Handle special scalars, use default json encoder otherwise 30 31 ''' 32 # Pandas Timestamp 33 if pd and isinstance(obj, pd.tslib.Timestamp): 34 return obj.value / 10**6.0 #nanosecond to millisecond 35 elif np.issubdtype(type(obj), np.float): 36 return float(obj) 37 elif np.issubdtype(type(obj), np.int): 38 return int(obj) 39 elif np.issubdtype(type(obj), np.bool_): 40 return bool(obj) 41 # Datetime 42 # datetime is a subclass of date. 43 elif isinstance(obj, dt.datetime): 44 return time.mktime(obj.timetuple()) * 1000. + obj.microsecond / 1000. 45 # Date 46 elif isinstance(obj, dt.date): 47 return time.mktime(obj.timetuple()) * 1000. 48 # Numpy datetime64 49 elif isinstance(obj, np.datetime64): 50 epoch_delta = obj - np.datetime64('1970-01-01T00:00:00Z') 51 return (epoch_delta / np.timedelta64(1, 'ms')) 52 # Time 53 elif isinstance(obj, dt.time): 54 return (obj.hour * 3600 + obj.minute * 60 + obj.second) * 1000 + obj.microsecond / 1000. 55 elif rd and isinstance(obj, rd.relativedelta): 56 return dict(years=obj.years, months=obj.months, days=obj.days, hours=obj.hours, 57 minutes=obj.minutes, seconds=obj.seconds, microseconds=obj.microseconds) 58 # Decimal 59 elif isinstance(obj, decimal.Decimal): 60 return float(obj) 61 else: 62 return super(BokehJSONEncoder, self).default(obj) 63 64 def default(self, obj): 65 #argh! local import! 66 from ..model import Model 67 from ..colors import Color 68 from .properties import HasProps 69 ## array types 70 if pd and isinstance(obj, (pd.Series, pd.Index)): 71 return transform_series(obj) 72 elif isinstance(obj, np.ndarray): 73 return transform_array(obj) 74 elif isinstance(obj, Model): 75 return obj.ref 76 elif isinstance(obj, HasProps): 77 return obj.properties_with_values(include_defaults=False) 78 elif isinstance(obj, Color): 79 return obj.to_css() 80 else: 81 return self.transform_python_types(obj) 82 83 def serialize_json(obj, encoder=BokehJSONEncoder, indent=None, **kwargs): 84 ''' Return a serialized JSON representation of a Bokeh model. 85 86 ''' 87 pretty = settings.pretty(False) 88 89 if pretty: 90 separators=(",", ": ") 91 else: 92 separators=(",", ":") 93 94 if pretty and indent is None: 95 indent = 2 96 97 return json.dumps(obj, cls=encoder, allow_nan=False, indent=indent, separators=separators, sort_keys=True, **kwargs) 98 [end of bokeh/core/json_encoder.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/bokeh/core/json_encoder.py b/bokeh/core/json_encoder.py --- a/bokeh/core/json_encoder.py +++ b/bokeh/core/json_encoder.py @@ -42,6 +42,10 @@ # datetime is a subclass of date. elif isinstance(obj, dt.datetime): return time.mktime(obj.timetuple()) * 1000. + obj.microsecond / 1000. + # Timedelta + # timedelta is class in the datetime library + elif isinstance(obj, dt.timedelta): + return dict(days=obj.days, seconds=obj.seconds, microseconds=obj.microseconds) # Date elif isinstance(obj, dt.date): return time.mktime(obj.timetuple()) * 1000.
{"golden_diff": "diff --git a/bokeh/core/json_encoder.py b/bokeh/core/json_encoder.py\n--- a/bokeh/core/json_encoder.py\n+++ b/bokeh/core/json_encoder.py\n@@ -42,6 +42,10 @@\n # datetime is a subclass of date.\n elif isinstance(obj, dt.datetime):\n return time.mktime(obj.timetuple()) * 1000. + obj.microsecond / 1000.\n+ # Timedelta\n+ # timedelta is class in the datetime library\n+ elif isinstance(obj, dt.timedelta):\n+ return dict(days=obj.days, seconds=obj.seconds, microseconds=obj.microseconds)\n # Date\n elif isinstance(obj, dt.date):\n return time.mktime(obj.timetuple()) * 1000.\n", "issue": "Can't serialize timedelta column\nIf you have a data frame with a column of timedeltas, then make a ColumnDataSource out of it, the ColumnDataSource will be created:\n\n``` python\nIn [9]: source.data['delta']\nOut[9]:\n[Timedelta('0 days 00:33:00'),\n Timedelta('0 days 00:35:00'),\n Timedelta('0 days 03:01:00')]\n```\n\nBut if you use that source in a plot, even if you don't use the column, when it comes time to serialize (show/save/embed etc) the plot, it fails:\n\n``` python\nTimedelta('0 days 00:33:00') is not JSON serializable\n```\n\nMaybe we can provide some validation on ColumnDataSource creation? Or, at least provide a more helpful message on failure, as it's not immediately obvious what went wrong.\n\n", "before_files": [{"content": "''' Provide a custom JSON encoder for serializing Bokeh models.\n\n'''\nfrom __future__ import absolute_import\n\nimport logging\nlog = logging.getLogger(__name__)\n\nimport datetime as dt\nimport decimal\nimport json\nimport time\n\nimport numpy as np\n\nfrom ..settings import settings\nfrom ..util.dependencies import import_optional\nfrom ..util.serialization import transform_series, transform_array\n\npd = import_optional('pandas')\nrd = import_optional(\"dateutil.relativedelta\")\n\nclass BokehJSONEncoder(json.JSONEncoder):\n ''' Encode values to be used in Bokeh documents or communicated to\n a Bokeh server.\n\n '''\n def transform_python_types(self, obj):\n ''' Handle special scalars, use default json encoder otherwise\n\n '''\n # Pandas Timestamp\n if pd and isinstance(obj, pd.tslib.Timestamp):\n return obj.value / 10**6.0 #nanosecond to millisecond\n elif np.issubdtype(type(obj), np.float):\n return float(obj)\n elif np.issubdtype(type(obj), np.int):\n return int(obj)\n elif np.issubdtype(type(obj), np.bool_):\n return bool(obj)\n # Datetime\n # datetime is a subclass of date.\n elif isinstance(obj, dt.datetime):\n return time.mktime(obj.timetuple()) * 1000. + obj.microsecond / 1000.\n # Date\n elif isinstance(obj, dt.date):\n return time.mktime(obj.timetuple()) * 1000.\n # Numpy datetime64\n elif isinstance(obj, np.datetime64):\n epoch_delta = obj - np.datetime64('1970-01-01T00:00:00Z')\n return (epoch_delta / np.timedelta64(1, 'ms'))\n # Time\n elif isinstance(obj, dt.time):\n return (obj.hour * 3600 + obj.minute * 60 + obj.second) * 1000 + obj.microsecond / 1000.\n elif rd and isinstance(obj, rd.relativedelta):\n return dict(years=obj.years, months=obj.months, days=obj.days, hours=obj.hours,\n minutes=obj.minutes, seconds=obj.seconds, microseconds=obj.microseconds)\n # Decimal\n elif isinstance(obj, decimal.Decimal):\n return float(obj)\n else:\n return super(BokehJSONEncoder, self).default(obj)\n\n def default(self, obj):\n #argh! local import!\n from ..model import Model\n from ..colors import Color\n from .properties import HasProps\n ## array types\n if pd and isinstance(obj, (pd.Series, pd.Index)):\n return transform_series(obj)\n elif isinstance(obj, np.ndarray):\n return transform_array(obj)\n elif isinstance(obj, Model):\n return obj.ref\n elif isinstance(obj, HasProps):\n return obj.properties_with_values(include_defaults=False)\n elif isinstance(obj, Color):\n return obj.to_css()\n else:\n return self.transform_python_types(obj)\n\ndef serialize_json(obj, encoder=BokehJSONEncoder, indent=None, **kwargs):\n ''' Return a serialized JSON representation of a Bokeh model.\n\n '''\n pretty = settings.pretty(False)\n\n if pretty:\n separators=(\",\", \": \")\n else:\n separators=(\",\", \":\")\n\n if pretty and indent is None:\n indent = 2\n\n return json.dumps(obj, cls=encoder, allow_nan=False, indent=indent, separators=separators, sort_keys=True, **kwargs)\n", "path": "bokeh/core/json_encoder.py"}]}
1,693
170
gh_patches_debug_4039
rasdani/github-patches
git_diff
encode__uvicorn-921
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Websockets implementation does not clean properly tasks if handshake fails this is a good explanation of what we see in those 2 tests and that causes flaky tests: FWIW there's an asyncio warning when I try to run `test_send_before_handshake` or `test_missing_handshake` in isolation. Doesn't show if I run either along with other tests, eg either the full test suite, with `-k websocket`, or the two tests together with `-k "missing_handshake or before_handshake"`. ```console $ pytest -k test_missing_handshake ====================================================== test session starts ======================================================= platform darwin -- Python 3.9.0, pytest-6.1.1, py-1.9.0, pluggy-0.13.1 rootdir: /Users/florimond/Developer/python-projects/uvicorn, configfile: setup.cfg plugins: mock-3.3.1, asyncio-0.14.0 collected 213 items / 211 deselected / 2 selected tests/protocols/test_websocket.py .. [100%] =============================================== 2 passed, 211 deselected in 2.30s ================================================ Task was destroyed but it is pending! task: <Task pending name='Task-11' coro=<WebSocketServerProtocol.handler() done, defined at /Users/florimond/Developer/python-projects/uvicorn/venv/lib/python3.9/site-packages/websockets/server.py:118> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x10a7e1760>()]>> ``` _Originally posted by @florimondmanca in https://github.com/encode/uvicorn/issues/918#issuecomment-751691297_ </issue> <code> [start of uvicorn/protocols/websockets/websockets_impl.py] 1 import asyncio 2 import http 3 import logging 4 from urllib.parse import unquote 5 6 import websockets 7 8 from uvicorn.protocols.utils import get_local_addr, get_remote_addr, is_ssl 9 10 11 class Server: 12 closing = False 13 14 def register(self, ws): 15 pass 16 17 def unregister(self, ws): 18 pass 19 20 def is_serving(self): 21 return not self.closing 22 23 24 class WebSocketProtocol(websockets.WebSocketServerProtocol): 25 def __init__(self, config, server_state, _loop=None): 26 if not config.loaded: 27 config.load() 28 29 self.config = config 30 self.app = config.loaded_app 31 self.loop = _loop or asyncio.get_event_loop() 32 self.logger = logging.getLogger("uvicorn.error") 33 self.root_path = config.root_path 34 35 # Shared server state 36 self.connections = server_state.connections 37 self.tasks = server_state.tasks 38 39 # Connection state 40 self.transport = None 41 self.server = None 42 self.client = None 43 self.scheme = None 44 45 # Connection events 46 self.scope = None 47 self.handshake_started_event = asyncio.Event() 48 self.handshake_completed_event = asyncio.Event() 49 self.closed_event = asyncio.Event() 50 self.initial_response = None 51 self.connect_sent = False 52 self.accepted_subprotocol = None 53 self.transfer_data_task = None 54 55 self.ws_server = Server() 56 57 super().__init__(ws_handler=self.ws_handler, ws_server=self.ws_server) 58 59 def connection_made(self, transport): 60 self.connections.add(self) 61 self.transport = transport 62 self.server = get_local_addr(transport) 63 self.client = get_remote_addr(transport) 64 self.scheme = "wss" if is_ssl(transport) else "ws" 65 super().connection_made(transport) 66 67 def connection_lost(self, exc): 68 self.connections.remove(self) 69 self.handshake_completed_event.set() 70 super().connection_lost(exc) 71 72 def shutdown(self): 73 self.ws_server.closing = True 74 self.transport.close() 75 76 def on_task_complete(self, task): 77 self.tasks.discard(task) 78 79 async def process_request(self, path, headers): 80 """ 81 This hook is called to determine if the websocket should return 82 an HTTP response and close. 83 84 Our behavior here is to start the ASGI application, and then wait 85 for either `accept` or `close` in order to determine if we should 86 close the connection. 87 """ 88 path_portion, _, query_string = path.partition("?") 89 90 websockets.handshake.check_request(headers) 91 92 subprotocols = [] 93 for header in headers.get_all("Sec-WebSocket-Protocol"): 94 subprotocols.extend([token.strip() for token in header.split(",")]) 95 96 asgi_headers = [ 97 (name.encode("ascii"), value.encode("ascii")) 98 for name, value in headers.raw_items() 99 ] 100 101 self.scope = { 102 "type": "websocket", 103 "asgi": {"version": self.config.asgi_version, "spec_version": "2.1"}, 104 "scheme": self.scheme, 105 "server": self.server, 106 "client": self.client, 107 "root_path": self.root_path, 108 "path": unquote(path_portion), 109 "raw_path": path_portion, 110 "query_string": query_string.encode("ascii"), 111 "headers": asgi_headers, 112 "subprotocols": subprotocols, 113 } 114 task = self.loop.create_task(self.run_asgi()) 115 task.add_done_callback(self.on_task_complete) 116 self.tasks.add(task) 117 await self.handshake_started_event.wait() 118 return self.initial_response 119 120 def process_subprotocol(self, headers, available_subprotocols): 121 """ 122 We override the standard 'process_subprotocol' behavior here so that 123 we return whatever subprotocol is sent in the 'accept' message. 124 """ 125 return self.accepted_subprotocol 126 127 def send_500_response(self): 128 msg = b"Internal Server Error" 129 content = [ 130 b"HTTP/1.1 500 Internal Server Error\r\n" 131 b"content-type: text/plain; charset=utf-8\r\n", 132 b"content-length: " + str(len(msg)).encode("ascii") + b"\r\n", 133 b"connection: close\r\n", 134 b"\r\n", 135 msg, 136 ] 137 self.transport.write(b"".join(content)) 138 139 async def ws_handler(self, protocol, path): 140 """ 141 This is the main handler function for the 'websockets' implementation 142 to call into. We just wait for close then return, and instead allow 143 'send' and 'receive' events to drive the flow. 144 """ 145 self.handshake_completed_event.set() 146 await self.closed_event.wait() 147 148 async def run_asgi(self): 149 """ 150 Wrapper around the ASGI callable, handling exceptions and unexpected 151 termination states. 152 """ 153 try: 154 result = await self.app(self.scope, self.asgi_receive, self.asgi_send) 155 except BaseException as exc: 156 self.closed_event.set() 157 msg = "Exception in ASGI application\n" 158 self.logger.error(msg, exc_info=exc) 159 if not self.handshake_started_event.is_set(): 160 self.send_500_response() 161 else: 162 await self.handshake_completed_event.wait() 163 self.transport.close() 164 else: 165 self.closed_event.set() 166 if not self.handshake_started_event.is_set(): 167 msg = "ASGI callable returned without sending handshake." 168 self.logger.error(msg) 169 self.send_500_response() 170 self.transport.close() 171 elif result is not None: 172 msg = "ASGI callable should return None, but returned '%s'." 173 self.logger.error(msg, result) 174 await self.handshake_completed_event.wait() 175 self.transport.close() 176 177 async def asgi_send(self, message): 178 message_type = message["type"] 179 180 if not self.handshake_started_event.is_set(): 181 if message_type == "websocket.accept": 182 self.logger.info( 183 '%s - "WebSocket %s" [accepted]', 184 self.scope["client"], 185 self.scope["root_path"] + self.scope["path"], 186 ) 187 self.initial_response = None 188 self.accepted_subprotocol = message.get("subprotocol") 189 self.handshake_started_event.set() 190 191 elif message_type == "websocket.close": 192 self.logger.info( 193 '%s - "WebSocket %s" 403', 194 self.scope["client"], 195 self.scope["root_path"] + self.scope["path"], 196 ) 197 self.initial_response = (http.HTTPStatus.FORBIDDEN, [], b"") 198 self.handshake_started_event.set() 199 self.closed_event.set() 200 201 else: 202 msg = ( 203 "Expected ASGI message 'websocket.accept' or 'websocket.close', " 204 "but got '%s'." 205 ) 206 raise RuntimeError(msg % message_type) 207 208 elif not self.closed_event.is_set(): 209 await self.handshake_completed_event.wait() 210 211 if message_type == "websocket.send": 212 bytes_data = message.get("bytes") 213 text_data = message.get("text") 214 data = text_data if bytes_data is None else bytes_data 215 await self.send(data) 216 217 elif message_type == "websocket.close": 218 code = message.get("code", 1000) 219 await self.close(code) 220 self.closed_event.set() 221 222 else: 223 msg = ( 224 "Expected ASGI message 'websocket.send' or 'websocket.close'," 225 " but got '%s'." 226 ) 227 raise RuntimeError(msg % message_type) 228 229 else: 230 msg = "Unexpected ASGI message '%s', after sending 'websocket.close'." 231 raise RuntimeError(msg % message_type) 232 233 async def asgi_receive(self): 234 if not self.connect_sent: 235 self.connect_sent = True 236 return {"type": "websocket.connect"} 237 238 await self.handshake_completed_event.wait() 239 try: 240 await self.ensure_open() 241 data = await self.recv() 242 except websockets.ConnectionClosed as exc: 243 return {"type": "websocket.disconnect", "code": exc.code} 244 245 msg = {"type": "websocket.receive"} 246 247 if isinstance(data, str): 248 msg["text"] = data 249 else: 250 msg["bytes"] = data 251 252 return msg 253 [end of uvicorn/protocols/websockets/websockets_impl.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/uvicorn/protocols/websockets/websockets_impl.py b/uvicorn/protocols/websockets/websockets_impl.py --- a/uvicorn/protocols/websockets/websockets_impl.py +++ b/uvicorn/protocols/websockets/websockets_impl.py @@ -135,6 +135,9 @@ msg, ] self.transport.write(b"".join(content)) + # Allow handler task to terminate cleanly, as websockets doesn't cancel it by + # itself (see https://github.com/encode/uvicorn/issues/920) + self.handshake_started_event.set() async def ws_handler(self, protocol, path): """
{"golden_diff": "diff --git a/uvicorn/protocols/websockets/websockets_impl.py b/uvicorn/protocols/websockets/websockets_impl.py\n--- a/uvicorn/protocols/websockets/websockets_impl.py\n+++ b/uvicorn/protocols/websockets/websockets_impl.py\n@@ -135,6 +135,9 @@\n msg,\n ]\n self.transport.write(b\"\".join(content))\n+ # Allow handler task to terminate cleanly, as websockets doesn't cancel it by\n+ # itself (see https://github.com/encode/uvicorn/issues/920)\n+ self.handshake_started_event.set()\n \n async def ws_handler(self, protocol, path):\n \"\"\"\n", "issue": "Websockets implementation does not clean properly tasks if handshake fails\nthis is a good explanation of what we see in those 2 tests and that causes flaky tests:\r\n\r\n\r\nFWIW there's an asyncio warning when I try to run `test_send_before_handshake` or `test_missing_handshake` in isolation. Doesn't show if I run either along with other tests, eg either the full test suite, with `-k websocket`, or the two tests together with `-k \"missing_handshake or before_handshake\"`.\r\n\r\n```console\r\n$ pytest -k test_missing_handshake\r\n====================================================== test session starts =======================================================\r\nplatform darwin -- Python 3.9.0, pytest-6.1.1, py-1.9.0, pluggy-0.13.1\r\nrootdir: /Users/florimond/Developer/python-projects/uvicorn, configfile: setup.cfg\r\nplugins: mock-3.3.1, asyncio-0.14.0\r\ncollected 213 items / 211 deselected / 2 selected \r\n\r\ntests/protocols/test_websocket.py .. [100%]\r\n\r\n=============================================== 2 passed, 211 deselected in 2.30s ================================================\r\nTask was destroyed but it is pending!\r\ntask: <Task pending name='Task-11' coro=<WebSocketServerProtocol.handler() done, defined at /Users/florimond/Developer/python-projects/uvicorn/venv/lib/python3.9/site-packages/websockets/server.py:118> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x10a7e1760>()]>>\r\n```\r\n\r\n_Originally posted by @florimondmanca in https://github.com/encode/uvicorn/issues/918#issuecomment-751691297_\n", "before_files": [{"content": "import asyncio\nimport http\nimport logging\nfrom urllib.parse import unquote\n\nimport websockets\n\nfrom uvicorn.protocols.utils import get_local_addr, get_remote_addr, is_ssl\n\n\nclass Server:\n closing = False\n\n def register(self, ws):\n pass\n\n def unregister(self, ws):\n pass\n\n def is_serving(self):\n return not self.closing\n\n\nclass WebSocketProtocol(websockets.WebSocketServerProtocol):\n def __init__(self, config, server_state, _loop=None):\n if not config.loaded:\n config.load()\n\n self.config = config\n self.app = config.loaded_app\n self.loop = _loop or asyncio.get_event_loop()\n self.logger = logging.getLogger(\"uvicorn.error\")\n self.root_path = config.root_path\n\n # Shared server state\n self.connections = server_state.connections\n self.tasks = server_state.tasks\n\n # Connection state\n self.transport = None\n self.server = None\n self.client = None\n self.scheme = None\n\n # Connection events\n self.scope = None\n self.handshake_started_event = asyncio.Event()\n self.handshake_completed_event = asyncio.Event()\n self.closed_event = asyncio.Event()\n self.initial_response = None\n self.connect_sent = False\n self.accepted_subprotocol = None\n self.transfer_data_task = None\n\n self.ws_server = Server()\n\n super().__init__(ws_handler=self.ws_handler, ws_server=self.ws_server)\n\n def connection_made(self, transport):\n self.connections.add(self)\n self.transport = transport\n self.server = get_local_addr(transport)\n self.client = get_remote_addr(transport)\n self.scheme = \"wss\" if is_ssl(transport) else \"ws\"\n super().connection_made(transport)\n\n def connection_lost(self, exc):\n self.connections.remove(self)\n self.handshake_completed_event.set()\n super().connection_lost(exc)\n\n def shutdown(self):\n self.ws_server.closing = True\n self.transport.close()\n\n def on_task_complete(self, task):\n self.tasks.discard(task)\n\n async def process_request(self, path, headers):\n \"\"\"\n This hook is called to determine if the websocket should return\n an HTTP response and close.\n\n Our behavior here is to start the ASGI application, and then wait\n for either `accept` or `close` in order to determine if we should\n close the connection.\n \"\"\"\n path_portion, _, query_string = path.partition(\"?\")\n\n websockets.handshake.check_request(headers)\n\n subprotocols = []\n for header in headers.get_all(\"Sec-WebSocket-Protocol\"):\n subprotocols.extend([token.strip() for token in header.split(\",\")])\n\n asgi_headers = [\n (name.encode(\"ascii\"), value.encode(\"ascii\"))\n for name, value in headers.raw_items()\n ]\n\n self.scope = {\n \"type\": \"websocket\",\n \"asgi\": {\"version\": self.config.asgi_version, \"spec_version\": \"2.1\"},\n \"scheme\": self.scheme,\n \"server\": self.server,\n \"client\": self.client,\n \"root_path\": self.root_path,\n \"path\": unquote(path_portion),\n \"raw_path\": path_portion,\n \"query_string\": query_string.encode(\"ascii\"),\n \"headers\": asgi_headers,\n \"subprotocols\": subprotocols,\n }\n task = self.loop.create_task(self.run_asgi())\n task.add_done_callback(self.on_task_complete)\n self.tasks.add(task)\n await self.handshake_started_event.wait()\n return self.initial_response\n\n def process_subprotocol(self, headers, available_subprotocols):\n \"\"\"\n We override the standard 'process_subprotocol' behavior here so that\n we return whatever subprotocol is sent in the 'accept' message.\n \"\"\"\n return self.accepted_subprotocol\n\n def send_500_response(self):\n msg = b\"Internal Server Error\"\n content = [\n b\"HTTP/1.1 500 Internal Server Error\\r\\n\"\n b\"content-type: text/plain; charset=utf-8\\r\\n\",\n b\"content-length: \" + str(len(msg)).encode(\"ascii\") + b\"\\r\\n\",\n b\"connection: close\\r\\n\",\n b\"\\r\\n\",\n msg,\n ]\n self.transport.write(b\"\".join(content))\n\n async def ws_handler(self, protocol, path):\n \"\"\"\n This is the main handler function for the 'websockets' implementation\n to call into. We just wait for close then return, and instead allow\n 'send' and 'receive' events to drive the flow.\n \"\"\"\n self.handshake_completed_event.set()\n await self.closed_event.wait()\n\n async def run_asgi(self):\n \"\"\"\n Wrapper around the ASGI callable, handling exceptions and unexpected\n termination states.\n \"\"\"\n try:\n result = await self.app(self.scope, self.asgi_receive, self.asgi_send)\n except BaseException as exc:\n self.closed_event.set()\n msg = \"Exception in ASGI application\\n\"\n self.logger.error(msg, exc_info=exc)\n if not self.handshake_started_event.is_set():\n self.send_500_response()\n else:\n await self.handshake_completed_event.wait()\n self.transport.close()\n else:\n self.closed_event.set()\n if not self.handshake_started_event.is_set():\n msg = \"ASGI callable returned without sending handshake.\"\n self.logger.error(msg)\n self.send_500_response()\n self.transport.close()\n elif result is not None:\n msg = \"ASGI callable should return None, but returned '%s'.\"\n self.logger.error(msg, result)\n await self.handshake_completed_event.wait()\n self.transport.close()\n\n async def asgi_send(self, message):\n message_type = message[\"type\"]\n\n if not self.handshake_started_event.is_set():\n if message_type == \"websocket.accept\":\n self.logger.info(\n '%s - \"WebSocket %s\" [accepted]',\n self.scope[\"client\"],\n self.scope[\"root_path\"] + self.scope[\"path\"],\n )\n self.initial_response = None\n self.accepted_subprotocol = message.get(\"subprotocol\")\n self.handshake_started_event.set()\n\n elif message_type == \"websocket.close\":\n self.logger.info(\n '%s - \"WebSocket %s\" 403',\n self.scope[\"client\"],\n self.scope[\"root_path\"] + self.scope[\"path\"],\n )\n self.initial_response = (http.HTTPStatus.FORBIDDEN, [], b\"\")\n self.handshake_started_event.set()\n self.closed_event.set()\n\n else:\n msg = (\n \"Expected ASGI message 'websocket.accept' or 'websocket.close', \"\n \"but got '%s'.\"\n )\n raise RuntimeError(msg % message_type)\n\n elif not self.closed_event.is_set():\n await self.handshake_completed_event.wait()\n\n if message_type == \"websocket.send\":\n bytes_data = message.get(\"bytes\")\n text_data = message.get(\"text\")\n data = text_data if bytes_data is None else bytes_data\n await self.send(data)\n\n elif message_type == \"websocket.close\":\n code = message.get(\"code\", 1000)\n await self.close(code)\n self.closed_event.set()\n\n else:\n msg = (\n \"Expected ASGI message 'websocket.send' or 'websocket.close',\"\n \" but got '%s'.\"\n )\n raise RuntimeError(msg % message_type)\n\n else:\n msg = \"Unexpected ASGI message '%s', after sending 'websocket.close'.\"\n raise RuntimeError(msg % message_type)\n\n async def asgi_receive(self):\n if not self.connect_sent:\n self.connect_sent = True\n return {\"type\": \"websocket.connect\"}\n\n await self.handshake_completed_event.wait()\n try:\n await self.ensure_open()\n data = await self.recv()\n except websockets.ConnectionClosed as exc:\n return {\"type\": \"websocket.disconnect\", \"code\": exc.code}\n\n msg = {\"type\": \"websocket.receive\"}\n\n if isinstance(data, str):\n msg[\"text\"] = data\n else:\n msg[\"bytes\"] = data\n\n return msg\n", "path": "uvicorn/protocols/websockets/websockets_impl.py"}]}
3,372
147
gh_patches_debug_21530
rasdani/github-patches
git_diff
zestedesavoir__zds-site-2194
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Il n'y a plus d'alertes sur les messages non lus Je n'ai plus d'alertes (entendez par là la petite boite rouge sur l'icone des messages) qui m'indique le nombre de MP non lus. Il s'agit d'une regression introduit avec la 1.5 que je qualifierait de bloquante quand même car la fonctionnalité est très utilisée. Screen : ![new_member](https://cloud.githubusercontent.com/assets/6066015/5916651/a31a48e6-a613-11e4-8a76-d9a4382dc2fe.PNG) </issue> <code> [start of zds/utils/templatetags/interventions.py] 1 # coding: utf-8 2 3 from datetime import datetime, timedelta 4 import time 5 6 from django import template 7 from django.db.models import F 8 9 from zds.article.models import Reaction, ArticleRead 10 from zds.forum.models import TopicFollowed, never_read as never_read_topic, Post, TopicRead 11 from zds.mp.models import PrivateTopic 12 from zds.tutorial.models import Note, TutorialRead 13 from zds.utils.models import Alert 14 15 16 register = template.Library() 17 18 19 @register.filter('is_read') 20 def is_read(topic): 21 if never_read_topic(topic): 22 return False 23 else: 24 return True 25 26 27 @register.filter('humane_delta') 28 def humane_delta(value): 29 # mapping between label day and key 30 const = {1: "Aujourd'hui", 2: "Hier", 3: "Cette semaine", 4: "Ce mois-ci", 5: "Cette année"} 31 32 return const[value] 33 34 35 @register.filter('followed_topics') 36 def followed_topics(user): 37 topicsfollowed = TopicFollowed.objects.select_related("topic").filter(user=user)\ 38 .order_by('-topic__last_message__pubdate')[:10] 39 # This period is a map for link a moment (Today, yesterday, this week, this month, etc.) with 40 # the number of days for which we can say we're still in the period 41 # for exemple, the tuple (2, 1) means for the period "2" corresponding to "Yesterday" according 42 # to humane_delta, means if your pubdate hasn't exceeded one day, we are always at "Yesterday" 43 # Number is use for index for sort map easily 44 period = ((1, 0), (2, 1), (3, 7), (4, 30), (5, 360)) 45 topics = {} 46 for tf in topicsfollowed: 47 for p in period: 48 if tf.topic.last_message.pubdate.date() >= (datetime.now() - timedelta(days=int(p[1]), 49 hours=0, minutes=0, 50 seconds=0)).date(): 51 if p[0] in topics: 52 topics[p[0]].append(tf.topic) 53 else: 54 topics[p[0]] = [tf.topic] 55 break 56 return topics 57 58 59 def comp(d1, d2): 60 v1 = int(time.mktime(d1['pubdate'].timetuple())) 61 v2 = int(time.mktime(d2['pubdate'].timetuple())) 62 if v1 > v2: 63 return -1 64 elif v1 < v2: 65 return 1 66 else: 67 return 0 68 69 70 @register.filter('interventions_topics') 71 def interventions_topics(user): 72 topicsfollowed = TopicFollowed.objects.filter(user=user).values("topic").distinct().all() 73 74 topics_never_read = TopicRead.objects\ 75 .filter(user=user)\ 76 .filter(topic__in=topicsfollowed)\ 77 .select_related("topic")\ 78 .exclude(post=F('topic__last_message')) 79 80 articlesfollowed = Reaction.objects\ 81 .filter(author=user, article__sha_public__isnull=False)\ 82 .values('article')\ 83 .distinct().all() 84 85 articles_never_read = ArticleRead.objects\ 86 .filter(user=user)\ 87 .filter(article__in=articlesfollowed)\ 88 .select_related("article")\ 89 .exclude(reaction=F('article__last_reaction')) 90 91 tutorialsfollowed = Note.objects\ 92 .filter(author=user, tutorial__sha_public__isnull=False)\ 93 .values('tutorial')\ 94 .distinct().all() 95 96 tutorials_never_read = TutorialRead.objects\ 97 .filter(user=user)\ 98 .filter(tutorial__in=tutorialsfollowed)\ 99 .exclude(note=F('tutorial__last_note')) 100 101 posts_unread = [] 102 103 for art in articles_never_read: 104 content = art.article.first_unread_reaction() 105 posts_unread.append({'pubdate': content.pubdate, 106 'author': content.author, 107 'title': art.article.title, 108 'url': content.get_absolute_url()}) 109 110 for tuto in tutorials_never_read: 111 content = tuto.tutorial.first_unread_note() 112 posts_unread.append({'pubdate': content.pubdate, 113 'author': content.author, 114 'title': tuto.tutorial.title, 115 'url': content.get_absolute_url()}) 116 117 for top in topics_never_read: 118 content = top.topic.first_unread_post() 119 if content is None: 120 content = top.topic.last_message 121 posts_unread.append({'pubdate': content.pubdate, 122 'author': content.author, 123 'title': top.topic.title, 124 'url': content.get_absolute_url()}) 125 126 posts_unread.sort(cmp=comp) 127 128 return posts_unread 129 130 131 @register.filter('interventions_privatetopics') 132 def interventions_privatetopics(user): 133 134 # Raw query because ORM doesn't seems to allow this kind of "left outer join" clauses. 135 # Parameters = list with 3x the same ID because SQLite backend doesn't allow map parameters. 136 privatetopics_unread = PrivateTopic.objects.raw( 137 ''' 138 select distinct t.* 139 from mp_privatetopic t 140 inner join mp_privatetopic_participants p on p.privatetopic_id = t.id 141 left outer join mp_privatetopicread r on r.user_id = %s and r.privatepost_id = t.last_message_id 142 where (t.author_id = %s or p.user_id = %s) 143 and r.id is null 144 order by t.pubdate desc''', 145 [user.id, user.id, user.id]) 146 147 # "total" re-do the query, but there is no other way to get the length as __len__ is not available on raw queries. 148 return {'unread': privatetopics_unread, 'total': len(list(privatetopics_unread))} 149 150 151 @register.filter(name='alerts_list') 152 def alerts_list(user): 153 total = [] 154 alerts = Alert.objects.select_related("author").all().order_by('-pubdate')[:10] 155 for alert in alerts: 156 if alert.scope == Alert.FORUM: 157 post = Post.objects.select_related("topic").get(pk=alert.comment.pk) 158 total.append({'title': post.topic.title, 159 'url': post.get_absolute_url(), 160 'pubdate': alert.pubdate, 161 'author': alert.author, 162 'text': alert.text}) 163 if alert.scope == Alert.ARTICLE: 164 reaction = Reaction.objects.select_related("article").get(pk=alert.comment.pk) 165 total.append({'title': reaction.article.title, 166 'url': reaction.get_absolute_url(), 167 'pubdate': alert.pubdate, 168 'author': alert.author, 169 'text': alert.text}) 170 if alert.scope == Alert.TUTORIAL: 171 note = Note.objects.select_related("tutorial").get(pk=alert.comment.pk) 172 total.append({'title': note.tutorial.title, 173 'url': note.get_absolute_url(), 174 'pubdate': alert.pubdate, 175 'author': alert.author, 176 'text': alert.text}) 177 178 return total 179 180 181 @register.filter(name='alerts_count') 182 def alerts_count(user): 183 if user.is_authenticated(): 184 return Alert.objects.count() 185 else: 186 return 0 187 [end of zds/utils/templatetags/interventions.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/zds/utils/templatetags/interventions.py b/zds/utils/templatetags/interventions.py --- a/zds/utils/templatetags/interventions.py +++ b/zds/utils/templatetags/interventions.py @@ -137,7 +137,7 @@ ''' select distinct t.* from mp_privatetopic t - inner join mp_privatetopic_participants p on p.privatetopic_id = t.id + left outer join mp_privatetopic_participants p on p.privatetopic_id = t.id left outer join mp_privatetopicread r on r.user_id = %s and r.privatepost_id = t.last_message_id where (t.author_id = %s or p.user_id = %s) and r.id is null @@ -145,7 +145,8 @@ [user.id, user.id, user.id]) # "total" re-do the query, but there is no other way to get the length as __len__ is not available on raw queries. - return {'unread': privatetopics_unread, 'total': len(list(privatetopics_unread))} + topics = list(privatetopics_unread) + return {'unread': topics, 'total': len(topics)} @register.filter(name='alerts_list')
{"golden_diff": "diff --git a/zds/utils/templatetags/interventions.py b/zds/utils/templatetags/interventions.py\n--- a/zds/utils/templatetags/interventions.py\n+++ b/zds/utils/templatetags/interventions.py\n@@ -137,7 +137,7 @@\n '''\n select distinct t.*\n from mp_privatetopic t\n- inner join mp_privatetopic_participants p on p.privatetopic_id = t.id\n+ left outer join mp_privatetopic_participants p on p.privatetopic_id = t.id\n left outer join mp_privatetopicread r on r.user_id = %s and r.privatepost_id = t.last_message_id\n where (t.author_id = %s or p.user_id = %s)\n and r.id is null\n@@ -145,7 +145,8 @@\n [user.id, user.id, user.id])\n \n # \"total\" re-do the query, but there is no other way to get the length as __len__ is not available on raw queries.\n- return {'unread': privatetopics_unread, 'total': len(list(privatetopics_unread))}\n+ topics = list(privatetopics_unread)\n+ return {'unread': topics, 'total': len(topics)}\n \n \n @register.filter(name='alerts_list')\n", "issue": "Il n'y a plus d'alertes sur les messages non lus\nJe n'ai plus d'alertes (entendez par l\u00e0 la petite boite rouge sur l'icone des messages) qui m'indique le nombre de MP non lus.\n\nIl s'agit d'une regression introduit avec la 1.5 que je qualifierait de bloquante quand m\u00eame car la fonctionnalit\u00e9 est tr\u00e8s utilis\u00e9e.\n\nScreen : \n\n![new_member](https://cloud.githubusercontent.com/assets/6066015/5916651/a31a48e6-a613-11e4-8a76-d9a4382dc2fe.PNG)\n\n", "before_files": [{"content": "# coding: utf-8\n\nfrom datetime import datetime, timedelta\nimport time\n\nfrom django import template\nfrom django.db.models import F\n\nfrom zds.article.models import Reaction, ArticleRead\nfrom zds.forum.models import TopicFollowed, never_read as never_read_topic, Post, TopicRead\nfrom zds.mp.models import PrivateTopic\nfrom zds.tutorial.models import Note, TutorialRead\nfrom zds.utils.models import Alert\n\n\nregister = template.Library()\n\n\[email protected]('is_read')\ndef is_read(topic):\n if never_read_topic(topic):\n return False\n else:\n return True\n\n\[email protected]('humane_delta')\ndef humane_delta(value):\n # mapping between label day and key\n const = {1: \"Aujourd'hui\", 2: \"Hier\", 3: \"Cette semaine\", 4: \"Ce mois-ci\", 5: \"Cette ann\u00e9e\"}\n\n return const[value]\n\n\[email protected]('followed_topics')\ndef followed_topics(user):\n topicsfollowed = TopicFollowed.objects.select_related(\"topic\").filter(user=user)\\\n .order_by('-topic__last_message__pubdate')[:10]\n # This period is a map for link a moment (Today, yesterday, this week, this month, etc.) with\n # the number of days for which we can say we're still in the period\n # for exemple, the tuple (2, 1) means for the period \"2\" corresponding to \"Yesterday\" according\n # to humane_delta, means if your pubdate hasn't exceeded one day, we are always at \"Yesterday\"\n # Number is use for index for sort map easily\n period = ((1, 0), (2, 1), (3, 7), (4, 30), (5, 360))\n topics = {}\n for tf in topicsfollowed:\n for p in period:\n if tf.topic.last_message.pubdate.date() >= (datetime.now() - timedelta(days=int(p[1]),\n hours=0, minutes=0,\n seconds=0)).date():\n if p[0] in topics:\n topics[p[0]].append(tf.topic)\n else:\n topics[p[0]] = [tf.topic]\n break\n return topics\n\n\ndef comp(d1, d2):\n v1 = int(time.mktime(d1['pubdate'].timetuple()))\n v2 = int(time.mktime(d2['pubdate'].timetuple()))\n if v1 > v2:\n return -1\n elif v1 < v2:\n return 1\n else:\n return 0\n\n\[email protected]('interventions_topics')\ndef interventions_topics(user):\n topicsfollowed = TopicFollowed.objects.filter(user=user).values(\"topic\").distinct().all()\n\n topics_never_read = TopicRead.objects\\\n .filter(user=user)\\\n .filter(topic__in=topicsfollowed)\\\n .select_related(\"topic\")\\\n .exclude(post=F('topic__last_message'))\n\n articlesfollowed = Reaction.objects\\\n .filter(author=user, article__sha_public__isnull=False)\\\n .values('article')\\\n .distinct().all()\n\n articles_never_read = ArticleRead.objects\\\n .filter(user=user)\\\n .filter(article__in=articlesfollowed)\\\n .select_related(\"article\")\\\n .exclude(reaction=F('article__last_reaction'))\n\n tutorialsfollowed = Note.objects\\\n .filter(author=user, tutorial__sha_public__isnull=False)\\\n .values('tutorial')\\\n .distinct().all()\n\n tutorials_never_read = TutorialRead.objects\\\n .filter(user=user)\\\n .filter(tutorial__in=tutorialsfollowed)\\\n .exclude(note=F('tutorial__last_note'))\n\n posts_unread = []\n\n for art in articles_never_read:\n content = art.article.first_unread_reaction()\n posts_unread.append({'pubdate': content.pubdate,\n 'author': content.author,\n 'title': art.article.title,\n 'url': content.get_absolute_url()})\n\n for tuto in tutorials_never_read:\n content = tuto.tutorial.first_unread_note()\n posts_unread.append({'pubdate': content.pubdate,\n 'author': content.author,\n 'title': tuto.tutorial.title,\n 'url': content.get_absolute_url()})\n\n for top in topics_never_read:\n content = top.topic.first_unread_post()\n if content is None:\n content = top.topic.last_message\n posts_unread.append({'pubdate': content.pubdate,\n 'author': content.author,\n 'title': top.topic.title,\n 'url': content.get_absolute_url()})\n\n posts_unread.sort(cmp=comp)\n\n return posts_unread\n\n\[email protected]('interventions_privatetopics')\ndef interventions_privatetopics(user):\n\n # Raw query because ORM doesn't seems to allow this kind of \"left outer join\" clauses.\n # Parameters = list with 3x the same ID because SQLite backend doesn't allow map parameters.\n privatetopics_unread = PrivateTopic.objects.raw(\n '''\n select distinct t.*\n from mp_privatetopic t\n inner join mp_privatetopic_participants p on p.privatetopic_id = t.id\n left outer join mp_privatetopicread r on r.user_id = %s and r.privatepost_id = t.last_message_id\n where (t.author_id = %s or p.user_id = %s)\n and r.id is null\n order by t.pubdate desc''',\n [user.id, user.id, user.id])\n\n # \"total\" re-do the query, but there is no other way to get the length as __len__ is not available on raw queries.\n return {'unread': privatetopics_unread, 'total': len(list(privatetopics_unread))}\n\n\[email protected](name='alerts_list')\ndef alerts_list(user):\n total = []\n alerts = Alert.objects.select_related(\"author\").all().order_by('-pubdate')[:10]\n for alert in alerts:\n if alert.scope == Alert.FORUM:\n post = Post.objects.select_related(\"topic\").get(pk=alert.comment.pk)\n total.append({'title': post.topic.title,\n 'url': post.get_absolute_url(),\n 'pubdate': alert.pubdate,\n 'author': alert.author,\n 'text': alert.text})\n if alert.scope == Alert.ARTICLE:\n reaction = Reaction.objects.select_related(\"article\").get(pk=alert.comment.pk)\n total.append({'title': reaction.article.title,\n 'url': reaction.get_absolute_url(),\n 'pubdate': alert.pubdate,\n 'author': alert.author,\n 'text': alert.text})\n if alert.scope == Alert.TUTORIAL:\n note = Note.objects.select_related(\"tutorial\").get(pk=alert.comment.pk)\n total.append({'title': note.tutorial.title,\n 'url': note.get_absolute_url(),\n 'pubdate': alert.pubdate,\n 'author': alert.author,\n 'text': alert.text})\n\n return total\n\n\[email protected](name='alerts_count')\ndef alerts_count(user):\n if user.is_authenticated():\n return Alert.objects.count()\n else:\n return 0\n", "path": "zds/utils/templatetags/interventions.py"}]}
2,731
310
gh_patches_debug_19515
rasdani/github-patches
git_diff
scoutapp__scout_apm_python-493
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Remove SCOUT_LOG_LEVEL deprecation warning The Heroku addon sets this environment variable automatically, and it can't vary based on language, so we shouldn't emit a deprecation warning (on Heroku only?) there since there's nothing users can do about it. </issue> <code> [start of src/scout_apm/core/core_agent_manager.py] 1 # coding=utf-8 2 from __future__ import absolute_import, division, print_function, unicode_literals 3 4 import hashlib 5 import json 6 import logging 7 import os 8 import subprocess 9 import tarfile 10 import time 11 import warnings 12 13 from urllib3.exceptions import HTTPError 14 15 from scout_apm.compat import urllib3_cert_pool_manager 16 from scout_apm.core.config import scout_config 17 18 logger = logging.getLogger(__name__) 19 20 21 class CoreAgentManager(object): 22 def __init__(self): 23 self.core_agent_bin_path = None 24 self.core_agent_bin_version = None 25 self.core_agent_dir = "{}/{}".format( 26 scout_config.value("core_agent_dir"), 27 scout_config.value("core_agent_full_name"), 28 ) 29 self.downloader = CoreAgentDownloader( 30 self.core_agent_dir, scout_config.value("core_agent_full_name") 31 ) 32 33 def launch(self): 34 if not scout_config.value("core_agent_launch"): 35 logger.debug( 36 "Not attempting to launch Core Agent " 37 "due to 'core_agent_launch' setting." 38 ) 39 return False 40 41 if not self.verify(): 42 if not scout_config.value("core_agent_download"): 43 logger.debug( 44 "Not attempting to download Core Agent due " 45 "to 'core_agent_download' setting." 46 ) 47 return False 48 49 self.download() 50 51 if not self.verify(): 52 logger.debug("Failed to verify Core Agent. Not launching Core Agent.") 53 return False 54 55 return self.run() 56 57 def download(self): 58 self.downloader.download() 59 60 def run(self): 61 try: 62 subprocess.check_call( 63 ( 64 self.agent_binary() 65 + self.daemonize_flag() 66 + self.log_level() 67 + self.log_file() 68 + self.config_file() 69 + self.socket_path() 70 ), 71 close_fds=True, 72 ) 73 except Exception: 74 # TODO detect failure of launch properly 75 logger.exception("Error running Core Agent") 76 return False 77 return True 78 79 def agent_binary(self): 80 return [self.core_agent_bin_path, "start"] 81 82 def daemonize_flag(self): 83 return ["--daemonize", "true"] 84 85 def socket_path(self): 86 socket_path = scout_config.value("socket_path") 87 return ["--socket", socket_path] 88 89 def log_level(self): 90 # Old deprecated name "log_level" 91 log_level = scout_config.value("log_level") 92 if log_level is not None: 93 warnings.warn( 94 "The config name 'log_level' is deprecated - " 95 + "please use the new name 'core_agent_log_level' instead. " 96 + "This might be configured in your environment variables or " 97 + "framework settings as SCOUT_LOG_LEVEL.", 98 DeprecationWarning, 99 ) 100 else: 101 log_level = scout_config.value("core_agent_log_level") 102 return ["--log-level", log_level] 103 104 def log_file(self): 105 path = scout_config.value("log_file") 106 if path is not None: 107 return ["--log-file", path] 108 else: 109 return [] 110 111 def config_file(self): 112 path = scout_config.value("config_file") 113 if path is not None: 114 return ["--config-file", path] 115 else: 116 return [] 117 118 def verify(self): 119 manifest = CoreAgentManifest(self.core_agent_dir + "/manifest.json") 120 if not manifest.is_valid(): 121 logger.debug( 122 "Core Agent verification failed: CoreAgentManifest is not valid." 123 ) 124 self.core_agent_bin_path = None 125 self.core_agent_bin_version = None 126 return False 127 128 bin_path = self.core_agent_dir + "/" + manifest.bin_name 129 if sha256_digest(bin_path) == manifest.sha256: 130 self.core_agent_bin_path = bin_path 131 self.core_agent_bin_version = manifest.bin_version 132 return True 133 else: 134 logger.debug("Core Agent verification failed: SHA mismatch.") 135 self.core_agent_bin_path = None 136 self.core_agent_bin_version = None 137 return False 138 139 140 class CoreAgentDownloader(object): 141 def __init__(self, download_destination, core_agent_full_name): 142 self.stale_download_secs = 120 143 self.destination = download_destination 144 self.core_agent_full_name = core_agent_full_name 145 self.package_location = self.destination + "/{}.tgz".format( 146 self.core_agent_full_name 147 ) 148 self.download_lock_path = self.destination + "/download.lock" 149 self.download_lock_fd = None 150 151 def download(self): 152 self.create_core_agent_dir() 153 self.obtain_download_lock() 154 if self.download_lock_fd is not None: 155 try: 156 downloaded = self.download_package() 157 if downloaded: 158 self.untar() 159 except (OSError, HTTPError): 160 logger.exception("Exception raised while downloading Core Agent") 161 finally: 162 self.release_download_lock() 163 164 def create_core_agent_dir(self): 165 try: 166 os.makedirs(self.destination, scout_config.core_agent_permissions()) 167 except OSError: 168 pass 169 170 def obtain_download_lock(self): 171 self.clean_stale_download_lock() 172 try: 173 self.download_lock_fd = os.open( 174 self.download_lock_path, 175 os.O_RDWR | os.O_CREAT | os.O_EXCL | os.O_NONBLOCK, 176 ) 177 except OSError as exc: 178 logger.debug( 179 "Could not obtain download lock on %s", 180 self.download_lock_path, 181 exc_info=exc, 182 ) 183 self.download_lock_fd = None 184 185 def clean_stale_download_lock(self): 186 try: 187 delta = time.time() - os.stat(self.download_lock_path).st_ctime 188 if delta > self.stale_download_secs: 189 logger.debug("Clearing stale download lock file.") 190 os.unlink(self.download_lock_path) 191 except OSError: 192 pass 193 194 def release_download_lock(self): 195 if self.download_lock_fd is not None: 196 os.unlink(self.download_lock_path) 197 os.close(self.download_lock_fd) 198 199 def download_package(self): 200 full_url = self.full_url() 201 logger.debug("Downloading: %s to %s", full_url, self.package_location) 202 http = urllib3_cert_pool_manager() 203 response = http.request( 204 "GET", full_url, preload_content=False, timeout=10.0, retries=3 205 ) 206 try: 207 if response.status != 200: 208 return False 209 with open(self.package_location, "wb") as fp: 210 for chunk in response.stream(): 211 fp.write(chunk) 212 finally: 213 response.release_conn() 214 return True 215 216 def untar(self): 217 t = tarfile.open(self.package_location, "r") 218 t.extractall(self.destination) 219 220 def full_url(self): 221 return "{root_url}/{core_agent_full_name}.tgz".format( 222 root_url=self.root_url(), core_agent_full_name=self.core_agent_full_name 223 ) 224 225 def root_url(self): 226 return scout_config.value("download_url") 227 228 229 class CoreAgentManifest(object): 230 def __init__(self, path): 231 self.manifest_path = path 232 self.bin_name = None 233 self.bin_version = None 234 self.sha256 = None 235 self.valid = False 236 try: 237 self.parse() 238 # noqa for this issue: https://github.com/PyCQA/flake8-bugbear/issues/110 239 except (ValueError, TypeError, OSError, IOError) as exc: # noqa: B014 240 logger.debug("Error parsing Core Agent Manifest", exc_info=exc) 241 242 def parse(self): 243 logger.debug("Parsing Core Agent manifest path: %s", self.manifest_path) 244 with open(self.manifest_path) as manifest_file: 245 self.raw = manifest_file.read() 246 self.json = json.loads(self.raw) 247 self.version = self.json["version"] 248 self.bin_version = self.json["core_agent_version"] 249 self.bin_name = self.json["core_agent_binary"] 250 self.sha256 = self.json["core_agent_binary_sha256"] 251 self.valid = True 252 logger.debug("Core Agent manifest json: %s", self.json) 253 254 def is_valid(self): 255 return self.valid 256 257 258 def sha256_digest(filename, block_size=65536): 259 try: 260 sha256 = hashlib.sha256() 261 with open(filename, "rb") as f: 262 for block in iter(lambda: f.read(block_size), b""): 263 sha256.update(block) 264 return sha256.hexdigest() 265 except OSError as exc: 266 logger.debug("Error on digest", exc_info=exc) 267 return None 268 [end of src/scout_apm/core/core_agent_manager.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/scout_apm/core/core_agent_manager.py b/src/scout_apm/core/core_agent_manager.py --- a/src/scout_apm/core/core_agent_manager.py +++ b/src/scout_apm/core/core_agent_manager.py @@ -8,7 +8,6 @@ import subprocess import tarfile import time -import warnings from urllib3.exceptions import HTTPError @@ -89,15 +88,7 @@ def log_level(self): # Old deprecated name "log_level" log_level = scout_config.value("log_level") - if log_level is not None: - warnings.warn( - "The config name 'log_level' is deprecated - " - + "please use the new name 'core_agent_log_level' instead. " - + "This might be configured in your environment variables or " - + "framework settings as SCOUT_LOG_LEVEL.", - DeprecationWarning, - ) - else: + if log_level is None: log_level = scout_config.value("core_agent_log_level") return ["--log-level", log_level]
{"golden_diff": "diff --git a/src/scout_apm/core/core_agent_manager.py b/src/scout_apm/core/core_agent_manager.py\n--- a/src/scout_apm/core/core_agent_manager.py\n+++ b/src/scout_apm/core/core_agent_manager.py\n@@ -8,7 +8,6 @@\n import subprocess\n import tarfile\n import time\n-import warnings\n \n from urllib3.exceptions import HTTPError\n \n@@ -89,15 +88,7 @@\n def log_level(self):\n # Old deprecated name \"log_level\"\n log_level = scout_config.value(\"log_level\")\n- if log_level is not None:\n- warnings.warn(\n- \"The config name 'log_level' is deprecated - \"\n- + \"please use the new name 'core_agent_log_level' instead. \"\n- + \"This might be configured in your environment variables or \"\n- + \"framework settings as SCOUT_LOG_LEVEL.\",\n- DeprecationWarning,\n- )\n- else:\n+ if log_level is None:\n log_level = scout_config.value(\"core_agent_log_level\")\n return [\"--log-level\", log_level]\n", "issue": "Remove SCOUT_LOG_LEVEL deprecation warning\nThe Heroku addon sets this environment variable automatically, and it can't vary based on language, so we shouldn't emit a deprecation warning (on Heroku only?) there since there's nothing users can do about it.\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport hashlib\nimport json\nimport logging\nimport os\nimport subprocess\nimport tarfile\nimport time\nimport warnings\n\nfrom urllib3.exceptions import HTTPError\n\nfrom scout_apm.compat import urllib3_cert_pool_manager\nfrom scout_apm.core.config import scout_config\n\nlogger = logging.getLogger(__name__)\n\n\nclass CoreAgentManager(object):\n def __init__(self):\n self.core_agent_bin_path = None\n self.core_agent_bin_version = None\n self.core_agent_dir = \"{}/{}\".format(\n scout_config.value(\"core_agent_dir\"),\n scout_config.value(\"core_agent_full_name\"),\n )\n self.downloader = CoreAgentDownloader(\n self.core_agent_dir, scout_config.value(\"core_agent_full_name\")\n )\n\n def launch(self):\n if not scout_config.value(\"core_agent_launch\"):\n logger.debug(\n \"Not attempting to launch Core Agent \"\n \"due to 'core_agent_launch' setting.\"\n )\n return False\n\n if not self.verify():\n if not scout_config.value(\"core_agent_download\"):\n logger.debug(\n \"Not attempting to download Core Agent due \"\n \"to 'core_agent_download' setting.\"\n )\n return False\n\n self.download()\n\n if not self.verify():\n logger.debug(\"Failed to verify Core Agent. Not launching Core Agent.\")\n return False\n\n return self.run()\n\n def download(self):\n self.downloader.download()\n\n def run(self):\n try:\n subprocess.check_call(\n (\n self.agent_binary()\n + self.daemonize_flag()\n + self.log_level()\n + self.log_file()\n + self.config_file()\n + self.socket_path()\n ),\n close_fds=True,\n )\n except Exception:\n # TODO detect failure of launch properly\n logger.exception(\"Error running Core Agent\")\n return False\n return True\n\n def agent_binary(self):\n return [self.core_agent_bin_path, \"start\"]\n\n def daemonize_flag(self):\n return [\"--daemonize\", \"true\"]\n\n def socket_path(self):\n socket_path = scout_config.value(\"socket_path\")\n return [\"--socket\", socket_path]\n\n def log_level(self):\n # Old deprecated name \"log_level\"\n log_level = scout_config.value(\"log_level\")\n if log_level is not None:\n warnings.warn(\n \"The config name 'log_level' is deprecated - \"\n + \"please use the new name 'core_agent_log_level' instead. \"\n + \"This might be configured in your environment variables or \"\n + \"framework settings as SCOUT_LOG_LEVEL.\",\n DeprecationWarning,\n )\n else:\n log_level = scout_config.value(\"core_agent_log_level\")\n return [\"--log-level\", log_level]\n\n def log_file(self):\n path = scout_config.value(\"log_file\")\n if path is not None:\n return [\"--log-file\", path]\n else:\n return []\n\n def config_file(self):\n path = scout_config.value(\"config_file\")\n if path is not None:\n return [\"--config-file\", path]\n else:\n return []\n\n def verify(self):\n manifest = CoreAgentManifest(self.core_agent_dir + \"/manifest.json\")\n if not manifest.is_valid():\n logger.debug(\n \"Core Agent verification failed: CoreAgentManifest is not valid.\"\n )\n self.core_agent_bin_path = None\n self.core_agent_bin_version = None\n return False\n\n bin_path = self.core_agent_dir + \"/\" + manifest.bin_name\n if sha256_digest(bin_path) == manifest.sha256:\n self.core_agent_bin_path = bin_path\n self.core_agent_bin_version = manifest.bin_version\n return True\n else:\n logger.debug(\"Core Agent verification failed: SHA mismatch.\")\n self.core_agent_bin_path = None\n self.core_agent_bin_version = None\n return False\n\n\nclass CoreAgentDownloader(object):\n def __init__(self, download_destination, core_agent_full_name):\n self.stale_download_secs = 120\n self.destination = download_destination\n self.core_agent_full_name = core_agent_full_name\n self.package_location = self.destination + \"/{}.tgz\".format(\n self.core_agent_full_name\n )\n self.download_lock_path = self.destination + \"/download.lock\"\n self.download_lock_fd = None\n\n def download(self):\n self.create_core_agent_dir()\n self.obtain_download_lock()\n if self.download_lock_fd is not None:\n try:\n downloaded = self.download_package()\n if downloaded:\n self.untar()\n except (OSError, HTTPError):\n logger.exception(\"Exception raised while downloading Core Agent\")\n finally:\n self.release_download_lock()\n\n def create_core_agent_dir(self):\n try:\n os.makedirs(self.destination, scout_config.core_agent_permissions())\n except OSError:\n pass\n\n def obtain_download_lock(self):\n self.clean_stale_download_lock()\n try:\n self.download_lock_fd = os.open(\n self.download_lock_path,\n os.O_RDWR | os.O_CREAT | os.O_EXCL | os.O_NONBLOCK,\n )\n except OSError as exc:\n logger.debug(\n \"Could not obtain download lock on %s\",\n self.download_lock_path,\n exc_info=exc,\n )\n self.download_lock_fd = None\n\n def clean_stale_download_lock(self):\n try:\n delta = time.time() - os.stat(self.download_lock_path).st_ctime\n if delta > self.stale_download_secs:\n logger.debug(\"Clearing stale download lock file.\")\n os.unlink(self.download_lock_path)\n except OSError:\n pass\n\n def release_download_lock(self):\n if self.download_lock_fd is not None:\n os.unlink(self.download_lock_path)\n os.close(self.download_lock_fd)\n\n def download_package(self):\n full_url = self.full_url()\n logger.debug(\"Downloading: %s to %s\", full_url, self.package_location)\n http = urllib3_cert_pool_manager()\n response = http.request(\n \"GET\", full_url, preload_content=False, timeout=10.0, retries=3\n )\n try:\n if response.status != 200:\n return False\n with open(self.package_location, \"wb\") as fp:\n for chunk in response.stream():\n fp.write(chunk)\n finally:\n response.release_conn()\n return True\n\n def untar(self):\n t = tarfile.open(self.package_location, \"r\")\n t.extractall(self.destination)\n\n def full_url(self):\n return \"{root_url}/{core_agent_full_name}.tgz\".format(\n root_url=self.root_url(), core_agent_full_name=self.core_agent_full_name\n )\n\n def root_url(self):\n return scout_config.value(\"download_url\")\n\n\nclass CoreAgentManifest(object):\n def __init__(self, path):\n self.manifest_path = path\n self.bin_name = None\n self.bin_version = None\n self.sha256 = None\n self.valid = False\n try:\n self.parse()\n # noqa for this issue: https://github.com/PyCQA/flake8-bugbear/issues/110\n except (ValueError, TypeError, OSError, IOError) as exc: # noqa: B014\n logger.debug(\"Error parsing Core Agent Manifest\", exc_info=exc)\n\n def parse(self):\n logger.debug(\"Parsing Core Agent manifest path: %s\", self.manifest_path)\n with open(self.manifest_path) as manifest_file:\n self.raw = manifest_file.read()\n self.json = json.loads(self.raw)\n self.version = self.json[\"version\"]\n self.bin_version = self.json[\"core_agent_version\"]\n self.bin_name = self.json[\"core_agent_binary\"]\n self.sha256 = self.json[\"core_agent_binary_sha256\"]\n self.valid = True\n logger.debug(\"Core Agent manifest json: %s\", self.json)\n\n def is_valid(self):\n return self.valid\n\n\ndef sha256_digest(filename, block_size=65536):\n try:\n sha256 = hashlib.sha256()\n with open(filename, \"rb\") as f:\n for block in iter(lambda: f.read(block_size), b\"\"):\n sha256.update(block)\n return sha256.hexdigest()\n except OSError as exc:\n logger.debug(\"Error on digest\", exc_info=exc)\n return None\n", "path": "src/scout_apm/core/core_agent_manager.py"}]}
3,127
239
gh_patches_debug_629
rasdani/github-patches
git_diff
zulip__zulip-8684
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> lint rules: Prevent `return undefined;` We should sweep the code to replace `return undefined;` with `return;`, and then make a lint rule for it, either via eslint (if they support that) or by making a custom rule. </issue> <code> [start of version.py] 1 ZULIP_VERSION = "1.7.1+git" 2 3 # Bump the minor PROVISION_VERSION to indicate that folks should provision 4 # only when going from an old version of the code to a newer version. Bump 5 # the major version to indicate that folks should provision in both 6 # directions. 7 8 # Typically, adding a dependency only requires a minor version bump, and 9 # removing a dependency requires a major version bump. 10 11 PROVISION_VERSION = '15.9' 12 [end of version.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/version.py b/version.py --- a/version.py +++ b/version.py @@ -8,4 +8,4 @@ # Typically, adding a dependency only requires a minor version bump, and # removing a dependency requires a major version bump. -PROVISION_VERSION = '15.9' +PROVISION_VERSION = '15.10'
{"golden_diff": "diff --git a/version.py b/version.py\n--- a/version.py\n+++ b/version.py\n@@ -8,4 +8,4 @@\n # Typically, adding a dependency only requires a minor version bump, and\n # removing a dependency requires a major version bump.\n \n-PROVISION_VERSION = '15.9'\n+PROVISION_VERSION = '15.10'\n", "issue": "lint rules: Prevent `return undefined;`\nWe should sweep the code to replace `return undefined;` with `return;`, and then make a lint rule for it, either via eslint (if they support that) or by making a custom rule.\n", "before_files": [{"content": "ZULIP_VERSION = \"1.7.1+git\"\n\n# Bump the minor PROVISION_VERSION to indicate that folks should provision\n# only when going from an old version of the code to a newer version. Bump\n# the major version to indicate that folks should provision in both\n# directions.\n\n# Typically, adding a dependency only requires a minor version bump, and\n# removing a dependency requires a major version bump.\n\nPROVISION_VERSION = '15.9'\n", "path": "version.py"}]}
697
79
gh_patches_debug_3601
rasdani/github-patches
git_diff
cornellius-gp__gpytorch-1647
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Slow convergence of MultiTask regressor and unreliable results. Hello everyone, Currently I am trying to learn a model to predict multiple real-valued properties of a cellular image. I have a baseline that uses CNN (e.g. resnet) as feature extractor and FC head as predictor. I would like to try to use GP as predictor and I implemented some simple network following multitask tutorial and gpshot repo ( https://github.com/BayesWatch/deep-kernel-transfer/blob/master/methods/gpshot_regression.py ). Simplified code of GP layer and the main model. ``` class GPBasedModel(torch.nn.Module): def __init__(self, model_name, likelihood, out_features=10): super(GPBasedModel, self).__init__() self.body, feature_dim = create_body(...) train_feats = torch.FloatTensor(torch.zeros(32, feature_dim)).cuda() train_y = torch.FloatTensor(torch.zeros(32,out_features)).cuda() self.gp_layer = BatchIndependentMultitaskGPModel(train_feats, train_y, likelihood, out_dim) self.feature_norm = torch.nn.Sequential( torch.nn.BatchNorm1d(input_dim) ) def forward(self, batch): features = self.body(batch) features = features.view(features.size(0), -1) features = self.feature_norm(features) #z-score features self.gp_layer.set_train_data(inputs=features) res = self.gp_layer(features) return res class BatchIndependentMultitaskGPModel(gpytorch.models.ExactGP): def __init__(self, train_x, train_y, likelihood, out_dim): super().__init__(train_x, train_y, likelihood) self.mean_module = gpytorch.means.ConstantMean(batch_shape=torch.Size([out_dim])) self.covar_module = gpytorch.kernels.ScaleKernel( gpytorch.kernels.RBFKernel(batch_shape=torch.Size([out_dim])), batch_shape=torch.Size([out_dim]) ) def forward(self, x): mean_x = self.mean_module(x) covar_x = self.covar_module(x) return gpytorch.distributions.MultitaskMultivariateNormal.from_batch_mvn( gpytorch.distributions.MultivariateNormal(mean_x, covar_x) ) likelihood = gpytorch.likelihoods.MultitaskGaussianLikelihood(num_tasks=10) mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model.gp_layer) optimizer = torch.optim.Adam([ {'params': model.body.parameters(), 'lr': 1e-3}, {'params': model.meta_head.parameters(), 'lr': 1e-3}, {'params': model.gp_layer.hyperparameters(), 'lr': 1e-1}, ], lr=1e-3) ``` likelihood defined as MultitaskGaussian and loss function as ExactMarginalLogLikelihood. However, results look really bad: convergence is painfully slow comparing to the model with FC head & results are much worse (distribution of predicted values does not look alike real ones) + MLL is relatively high (~1000-2000). I have tried to z-score both target & features (add BN for features) but it didnt change results a lot: despite MLL dropped to ~45 but it stays around this value during training. So my question is kinda simple: is smth wrong with the data, my code or GP model is not suitable for that case? Thanks in advance! </issue> <code> [start of gpytorch/mlls/exact_marginal_log_likelihood.py] 1 #!/usr/bin/env python3 2 3 from ..distributions import MultivariateNormal 4 from ..likelihoods import _GaussianLikelihoodBase 5 from .marginal_log_likelihood import MarginalLogLikelihood 6 7 8 class ExactMarginalLogLikelihood(MarginalLogLikelihood): 9 """ 10 The exact marginal log likelihood (MLL) for an exact Gaussian process with a 11 Gaussian likelihood. 12 13 .. note:: 14 This module will not work with anything other than a :obj:`~gpytorch.likelihoods.GaussianLikelihood` 15 and a :obj:`~gpytorch.models.ExactGP`. It also cannot be used in conjunction with 16 stochastic optimization. 17 18 :param ~gpytorch.likelihoods.GaussianLikelihood likelihood: The Gaussian likelihood for the model 19 :param ~gpytorch.models.ExactGP model: The exact GP model 20 21 Example: 22 >>> # model is a gpytorch.models.ExactGP 23 >>> # likelihood is a gpytorch.likelihoods.Likelihood 24 >>> mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model) 25 >>> 26 >>> output = model(train_x) 27 >>> loss = -mll(output, train_y) 28 >>> loss.backward() 29 """ 30 31 def __init__(self, likelihood, model): 32 if not isinstance(likelihood, _GaussianLikelihoodBase): 33 raise RuntimeError("Likelihood must be Gaussian for exact inference") 34 super(ExactMarginalLogLikelihood, self).__init__(likelihood, model) 35 36 def _add_other_terms(self, res, params): 37 # Add additional terms (SGPR / learned inducing points, heteroskedastic likelihood models) 38 for added_loss_term in self.model.added_loss_terms(): 39 res = res.add(added_loss_term.loss(*params)) 40 41 # Add log probs of priors on the (functions of) parameters 42 for name, module, prior, closure, _ in self.named_priors(): 43 res.add_(prior.log_prob(closure(module)).sum()) 44 45 return res 46 47 def forward(self, function_dist, target, *params): 48 r""" 49 Computes the MLL given :math:`p(\mathbf f)` and :math:`\mathbf y`. 50 51 :param ~gpytorch.distributions.MultivariateNormal function_dist: :math:`p(\mathbf f)` 52 the outputs of the latent function (the :obj:`gpytorch.models.ExactGP`) 53 :param torch.Tensor target: :math:`\mathbf y` The target values 54 :rtype: torch.Tensor 55 :return: Exact MLL. Output shape corresponds to batch shape of the model/input data. 56 """ 57 if not isinstance(function_dist, MultivariateNormal): 58 raise RuntimeError("ExactMarginalLogLikelihood can only operate on Gaussian random variables") 59 60 # Get the log prob of the marginal distribution 61 output = self.likelihood(function_dist, *params) 62 res = output.log_prob(target) 63 res = self._add_other_terms(res, params) 64 65 # Scale by the amount of data we have 66 num_data = target.size(-1) 67 return res.div_(num_data) 68 69 def pyro_factor(self, output, target, *params): 70 import pyro 71 72 mll = target.size(-1) * self(output, target, *params) 73 pyro.factor("gp_mll", mll) 74 return mll 75 [end of gpytorch/mlls/exact_marginal_log_likelihood.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/gpytorch/mlls/exact_marginal_log_likelihood.py b/gpytorch/mlls/exact_marginal_log_likelihood.py --- a/gpytorch/mlls/exact_marginal_log_likelihood.py +++ b/gpytorch/mlls/exact_marginal_log_likelihood.py @@ -63,7 +63,7 @@ res = self._add_other_terms(res, params) # Scale by the amount of data we have - num_data = target.size(-1) + num_data = function_dist.event_shape.numel() return res.div_(num_data) def pyro_factor(self, output, target, *params):
{"golden_diff": "diff --git a/gpytorch/mlls/exact_marginal_log_likelihood.py b/gpytorch/mlls/exact_marginal_log_likelihood.py\n--- a/gpytorch/mlls/exact_marginal_log_likelihood.py\n+++ b/gpytorch/mlls/exact_marginal_log_likelihood.py\n@@ -63,7 +63,7 @@\n res = self._add_other_terms(res, params)\n \n # Scale by the amount of data we have\n- num_data = target.size(-1)\n+ num_data = function_dist.event_shape.numel()\n return res.div_(num_data)\n \n def pyro_factor(self, output, target, *params):\n", "issue": "Slow convergence of MultiTask regressor and unreliable results.\nHello everyone,\r\n\r\nCurrently I am trying to learn a model to predict multiple real-valued properties of a cellular image. I have a baseline that uses CNN (e.g. resnet) as feature extractor and FC head as predictor.\r\nI would like to try to use GP as predictor and I implemented some simple network following multitask tutorial and gpshot repo ( https://github.com/BayesWatch/deep-kernel-transfer/blob/master/methods/gpshot_regression.py ). \r\n\r\nSimplified code of GP layer and the main model.\r\n\r\n```\r\nclass GPBasedModel(torch.nn.Module):\r\n def __init__(self, model_name, likelihood, out_features=10):\r\n super(GPBasedModel, self).__init__()\r\n \r\n self.body, feature_dim = create_body(...)\r\n\r\n train_feats = torch.FloatTensor(torch.zeros(32, feature_dim)).cuda()\r\n train_y = torch.FloatTensor(torch.zeros(32,out_features)).cuda()\r\n \r\n self.gp_layer = BatchIndependentMultitaskGPModel(train_feats, train_y, likelihood, out_dim)\r\n \r\n self.feature_norm = torch.nn.Sequential(\r\n torch.nn.BatchNorm1d(input_dim)\r\n )\r\n \r\n def forward(self, batch):\r\n features = self.body(batch)\r\n features = features.view(features.size(0), -1)\r\n\r\n features = self.feature_norm(features) #z-score features\r\n \r\n self.gp_layer.set_train_data(inputs=features)\r\n res = self.gp_layer(features)\r\n \r\n return res \r\n\r\nclass BatchIndependentMultitaskGPModel(gpytorch.models.ExactGP):\r\n def __init__(self, train_x, train_y, likelihood, out_dim):\r\n super().__init__(train_x, train_y, likelihood)\r\n self.mean_module = gpytorch.means.ConstantMean(batch_shape=torch.Size([out_dim]))\r\n self.covar_module = gpytorch.kernels.ScaleKernel(\r\n gpytorch.kernels.RBFKernel(batch_shape=torch.Size([out_dim])),\r\n batch_shape=torch.Size([out_dim])\r\n )\r\n\r\n def forward(self, x):\r\n mean_x = self.mean_module(x)\r\n covar_x = self.covar_module(x)\r\n return gpytorch.distributions.MultitaskMultivariateNormal.from_batch_mvn(\r\n gpytorch.distributions.MultivariateNormal(mean_x, covar_x)\r\n )\r\n\r\nlikelihood = gpytorch.likelihoods.MultitaskGaussianLikelihood(num_tasks=10)\r\nmll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model.gp_layer)\r\n\r\noptimizer = torch.optim.Adam([\r\n {'params': model.body.parameters(), 'lr': 1e-3},\r\n {'params': model.meta_head.parameters(), 'lr': 1e-3},\r\n {'params': model.gp_layer.hyperparameters(), 'lr': 1e-1},\r\n ], lr=1e-3)\r\n\r\n```\r\n\r\nlikelihood defined as MultitaskGaussian and loss function as ExactMarginalLogLikelihood. \r\n\r\nHowever, results look really bad: convergence is painfully slow comparing to the model with FC head & results are much worse (distribution of predicted values does not look alike real ones) + MLL is relatively high (~1000-2000).\r\n\r\nI have tried to z-score both target & features (add BN for features) but it didnt change results a lot: despite MLL dropped to ~45 but it stays around this value during training.\r\n\r\nSo my question is kinda simple: is smth wrong with the data, my code or GP model is not suitable for that case? \r\n\r\nThanks in advance!\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nfrom ..distributions import MultivariateNormal\nfrom ..likelihoods import _GaussianLikelihoodBase\nfrom .marginal_log_likelihood import MarginalLogLikelihood\n\n\nclass ExactMarginalLogLikelihood(MarginalLogLikelihood):\n \"\"\"\n The exact marginal log likelihood (MLL) for an exact Gaussian process with a\n Gaussian likelihood.\n\n .. note::\n This module will not work with anything other than a :obj:`~gpytorch.likelihoods.GaussianLikelihood`\n and a :obj:`~gpytorch.models.ExactGP`. It also cannot be used in conjunction with\n stochastic optimization.\n\n :param ~gpytorch.likelihoods.GaussianLikelihood likelihood: The Gaussian likelihood for the model\n :param ~gpytorch.models.ExactGP model: The exact GP model\n\n Example:\n >>> # model is a gpytorch.models.ExactGP\n >>> # likelihood is a gpytorch.likelihoods.Likelihood\n >>> mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model)\n >>>\n >>> output = model(train_x)\n >>> loss = -mll(output, train_y)\n >>> loss.backward()\n \"\"\"\n\n def __init__(self, likelihood, model):\n if not isinstance(likelihood, _GaussianLikelihoodBase):\n raise RuntimeError(\"Likelihood must be Gaussian for exact inference\")\n super(ExactMarginalLogLikelihood, self).__init__(likelihood, model)\n\n def _add_other_terms(self, res, params):\n # Add additional terms (SGPR / learned inducing points, heteroskedastic likelihood models)\n for added_loss_term in self.model.added_loss_terms():\n res = res.add(added_loss_term.loss(*params))\n\n # Add log probs of priors on the (functions of) parameters\n for name, module, prior, closure, _ in self.named_priors():\n res.add_(prior.log_prob(closure(module)).sum())\n\n return res\n\n def forward(self, function_dist, target, *params):\n r\"\"\"\n Computes the MLL given :math:`p(\\mathbf f)` and :math:`\\mathbf y`.\n\n :param ~gpytorch.distributions.MultivariateNormal function_dist: :math:`p(\\mathbf f)`\n the outputs of the latent function (the :obj:`gpytorch.models.ExactGP`)\n :param torch.Tensor target: :math:`\\mathbf y` The target values\n :rtype: torch.Tensor\n :return: Exact MLL. Output shape corresponds to batch shape of the model/input data.\n \"\"\"\n if not isinstance(function_dist, MultivariateNormal):\n raise RuntimeError(\"ExactMarginalLogLikelihood can only operate on Gaussian random variables\")\n\n # Get the log prob of the marginal distribution\n output = self.likelihood(function_dist, *params)\n res = output.log_prob(target)\n res = self._add_other_terms(res, params)\n\n # Scale by the amount of data we have\n num_data = target.size(-1)\n return res.div_(num_data)\n\n def pyro_factor(self, output, target, *params):\n import pyro\n\n mll = target.size(-1) * self(output, target, *params)\n pyro.factor(\"gp_mll\", mll)\n return mll\n", "path": "gpytorch/mlls/exact_marginal_log_likelihood.py"}]}
2,180
149
gh_patches_debug_37974
rasdani/github-patches
git_diff
modin-project__modin-6956
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> FEAT: Support sqlalchemy connections in read_sql by converting them to strings We can use the trick that we use [here](https://github.com/modin-project/modin/blob/01c529cf06cfaf412b5725f41c81a5f914b44b95/modin/core/io/sql/sql_dispatcher.py#L152) for `to_sql` to support reading from sqlalchemy connections in `read_sql`. Currently, for the distributed read, we [require](https://github.com/modin-project/modin/blob/01c529cf06cfaf412b5725f41c81a5f914b44b95/modin/core/io/sql/sql_dispatcher.py#L64) the user to supply a connection string or a `ModinDatabaseConnection` object that usually contains their credentials. Otherwise we default to pandas. </issue> <code> [start of modin/core/io/sql/sql_dispatcher.py] 1 # Licensed to Modin Development Team under one or more contributor license agreements. 2 # See the NOTICE file distributed with this work for additional information regarding 3 # copyright ownership. The Modin Development Team licenses this file to you under the 4 # Apache License, Version 2.0 (the "License"); you may not use this file except in 5 # compliance with the License. You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software distributed under 10 # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF 11 # ANY KIND, either express or implied. See the License for the specific language 12 # governing permissions and limitations under the License. 13 14 """ 15 Module houses `SQLDispatcher` class. 16 17 `SQLDispatcher` contains utils for handling SQL queries or database tables, 18 inherits util functions for handling files from `FileDispatcher` class and can be 19 used as base class for dipatchers of SQL queries. 20 """ 21 22 import math 23 24 import numpy as np 25 import pandas 26 27 from modin.config import NPartitions, ReadSqlEngine 28 from modin.core.io.file_dispatcher import FileDispatcher 29 from modin.db_conn import ModinDatabaseConnection 30 31 32 class SQLDispatcher(FileDispatcher): 33 """Class handles utils for reading SQL queries or database tables.""" 34 35 @classmethod 36 def _read(cls, sql, con, index_col=None, **kwargs): 37 """ 38 Read a SQL query or database table into a query compiler. 39 40 Parameters 41 ---------- 42 sql : str or SQLAlchemy Selectable (select or text object) 43 SQL query to be executed or a table name. 44 con : SQLAlchemy connectable, str, sqlite3 connection, or ModinDatabaseConnection 45 Connection object to database. 46 index_col : str or list of str, optional 47 Column(s) to set as index(MultiIndex). 48 **kwargs : dict 49 Parameters to pass into `pandas.read_sql` function. 50 51 Returns 52 ------- 53 BaseQueryCompiler 54 Query compiler with imported data for further processing. 55 """ 56 if isinstance(con, str): 57 con = ModinDatabaseConnection("sqlalchemy", con) 58 if not isinstance(con, ModinDatabaseConnection): 59 return cls.single_worker_read( 60 sql, 61 con=con, 62 index_col=index_col, 63 read_sql_engine=ReadSqlEngine.get(), 64 reason="To use the parallel implementation of `read_sql`, pass either " 65 + "the SQL connection string or a ModinDatabaseConnection " 66 + "with the arguments required to make a connection, instead " 67 + f"of {type(con)}. For documentation on the ModinDatabaseConnection, see " 68 + "https://modin.readthedocs.io/en/latest/supported_apis/io_supported.html#connecting-to-a-database-for-read-sql", 69 **kwargs, 70 ) 71 row_count_query = con.row_count_query(sql) 72 connection_for_pandas = con.get_connection() 73 colum_names_query = con.column_names_query(sql) 74 row_cnt = pandas.read_sql(row_count_query, connection_for_pandas).squeeze() 75 cols_names_df = pandas.read_sql( 76 colum_names_query, connection_for_pandas, index_col=index_col 77 ) 78 cols_names = cols_names_df.columns 79 num_partitions = NPartitions.get() 80 partition_ids = [None] * num_partitions 81 index_ids = [None] * num_partitions 82 dtypes_ids = [None] * num_partitions 83 limit = math.ceil(row_cnt / num_partitions) 84 for part in range(num_partitions): 85 offset = part * limit 86 query = con.partition_query(sql, limit, offset) 87 *partition_ids[part], index_ids[part], dtypes_ids[part] = cls.deploy( 88 func=cls.parse, 89 f_kwargs={ 90 "num_splits": num_partitions, 91 "sql": query, 92 "con": con, 93 "index_col": index_col, 94 "read_sql_engine": ReadSqlEngine.get(), 95 **kwargs, 96 }, 97 num_returns=num_partitions + 2, 98 ) 99 partition_ids[part] = [ 100 cls.frame_partition_cls(obj) for obj in partition_ids[part] 101 ] 102 if index_col is None: # sum all lens returned from partitions 103 index_lens = cls.materialize(index_ids) 104 new_index = pandas.RangeIndex(sum(index_lens)) 105 else: # concat index returned from partitions 106 index_lst = [ 107 x for part_index in cls.materialize(index_ids) for x in part_index 108 ] 109 new_index = pandas.Index(index_lst).set_names(index_col) 110 new_frame = cls.frame_cls(np.array(partition_ids), new_index, cols_names) 111 new_frame.synchronize_labels(axis=0) 112 return cls.query_compiler_cls(new_frame) 113 114 @classmethod 115 def _is_supported_sqlalchemy_object(cls, obj): # noqa: GL08 116 supported = None 117 try: 118 import sqlalchemy as sa 119 120 supported = isinstance(obj, (sa.engine.Engine, sa.engine.Connection)) 121 except ImportError: 122 supported = False 123 return supported 124 125 @classmethod 126 def write(cls, qc, **kwargs): 127 """ 128 Write records stored in the `qc` to a SQL database. 129 130 Parameters 131 ---------- 132 qc : BaseQueryCompiler 133 The query compiler of the Modin dataframe that we want to run ``to_sql`` on. 134 **kwargs : dict 135 Parameters for ``pandas.to_sql(**kwargs)``. 136 """ 137 # we first insert an empty DF in order to create the full table in the database 138 # This also helps to validate the input against pandas 139 # we would like to_sql() to complete only when all rows have been inserted into the database 140 # since the mapping operation is non-blocking, each partition will return an empty DF 141 # so at the end, the blocking operation will be this empty DF to_pandas 142 143 if not isinstance( 144 kwargs["con"], str 145 ) and not cls._is_supported_sqlalchemy_object(kwargs["con"]): 146 return cls.base_io.to_sql(qc, **kwargs) 147 148 # In the case that we are given a SQLAlchemy Connection or Engine, the objects 149 # are not pickleable. We have to convert it to the URL string and connect from 150 # each of the workers. 151 if cls._is_supported_sqlalchemy_object(kwargs["con"]): 152 kwargs["con"] = kwargs["con"].engine.url.render_as_string( 153 hide_password=False 154 ) 155 156 empty_df = qc.getitem_row_array([0]).to_pandas().head(0) 157 empty_df.to_sql(**kwargs) 158 # so each partition will append its respective DF 159 kwargs["if_exists"] = "append" 160 columns = qc.columns 161 162 def func(df): # pragma: no cover 163 """ 164 Override column names in the wrapped dataframe and convert it to SQL. 165 166 Notes 167 ----- 168 This function returns an empty ``pandas.DataFrame`` because ``apply_full_axis`` 169 expects a Frame object as a result of operation (and ``to_sql`` has no dataframe result). 170 """ 171 df.columns = columns 172 df.to_sql(**kwargs) 173 return pandas.DataFrame() 174 175 # Ensure that the metadata is synchronized 176 qc._modin_frame._propagate_index_objs(axis=None) 177 result = qc._modin_frame.apply_full_axis(1, func, new_index=[], new_columns=[]) 178 cls.materialize( 179 [part.list_of_blocks[0] for row in result._partitions for part in row] 180 ) 181 [end of modin/core/io/sql/sql_dispatcher.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/modin/core/io/sql/sql_dispatcher.py b/modin/core/io/sql/sql_dispatcher.py --- a/modin/core/io/sql/sql_dispatcher.py +++ b/modin/core/io/sql/sql_dispatcher.py @@ -32,6 +32,17 @@ class SQLDispatcher(FileDispatcher): """Class handles utils for reading SQL queries or database tables.""" + @classmethod + def _is_supported_sqlalchemy_object(cls, obj): # noqa: GL08 + supported = None + try: + import sqlalchemy as sa + + supported = isinstance(obj, (sa.engine.Engine, sa.engine.Connection)) + except ImportError: + supported = False + return supported + @classmethod def _read(cls, sql, con, index_col=None, **kwargs): """ @@ -55,6 +66,12 @@ """ if isinstance(con, str): con = ModinDatabaseConnection("sqlalchemy", con) + + if cls._is_supported_sqlalchemy_object(con): + con = ModinDatabaseConnection( + "sqlalchemy", con.engine.url.render_as_string(hide_password=False) + ) + if not isinstance(con, ModinDatabaseConnection): return cls.single_worker_read( sql, @@ -62,7 +79,7 @@ index_col=index_col, read_sql_engine=ReadSqlEngine.get(), reason="To use the parallel implementation of `read_sql`, pass either " - + "the SQL connection string or a ModinDatabaseConnection " + + "a SQLAlchemy connectable, the SQL connection string, or a ModinDatabaseConnection " + "with the arguments required to make a connection, instead " + f"of {type(con)}. For documentation on the ModinDatabaseConnection, see " + "https://modin.readthedocs.io/en/latest/supported_apis/io_supported.html#connecting-to-a-database-for-read-sql", @@ -111,17 +128,6 @@ new_frame.synchronize_labels(axis=0) return cls.query_compiler_cls(new_frame) - @classmethod - def _is_supported_sqlalchemy_object(cls, obj): # noqa: GL08 - supported = None - try: - import sqlalchemy as sa - - supported = isinstance(obj, (sa.engine.Engine, sa.engine.Connection)) - except ImportError: - supported = False - return supported - @classmethod def write(cls, qc, **kwargs): """
{"golden_diff": "diff --git a/modin/core/io/sql/sql_dispatcher.py b/modin/core/io/sql/sql_dispatcher.py\n--- a/modin/core/io/sql/sql_dispatcher.py\n+++ b/modin/core/io/sql/sql_dispatcher.py\n@@ -32,6 +32,17 @@\n class SQLDispatcher(FileDispatcher):\n \"\"\"Class handles utils for reading SQL queries or database tables.\"\"\"\n \n+ @classmethod\n+ def _is_supported_sqlalchemy_object(cls, obj): # noqa: GL08\n+ supported = None\n+ try:\n+ import sqlalchemy as sa\n+\n+ supported = isinstance(obj, (sa.engine.Engine, sa.engine.Connection))\n+ except ImportError:\n+ supported = False\n+ return supported\n+\n @classmethod\n def _read(cls, sql, con, index_col=None, **kwargs):\n \"\"\"\n@@ -55,6 +66,12 @@\n \"\"\"\n if isinstance(con, str):\n con = ModinDatabaseConnection(\"sqlalchemy\", con)\n+\n+ if cls._is_supported_sqlalchemy_object(con):\n+ con = ModinDatabaseConnection(\n+ \"sqlalchemy\", con.engine.url.render_as_string(hide_password=False)\n+ )\n+\n if not isinstance(con, ModinDatabaseConnection):\n return cls.single_worker_read(\n sql,\n@@ -62,7 +79,7 @@\n index_col=index_col,\n read_sql_engine=ReadSqlEngine.get(),\n reason=\"To use the parallel implementation of `read_sql`, pass either \"\n- + \"the SQL connection string or a ModinDatabaseConnection \"\n+ + \"a SQLAlchemy connectable, the SQL connection string, or a ModinDatabaseConnection \"\n + \"with the arguments required to make a connection, instead \"\n + f\"of {type(con)}. For documentation on the ModinDatabaseConnection, see \"\n + \"https://modin.readthedocs.io/en/latest/supported_apis/io_supported.html#connecting-to-a-database-for-read-sql\",\n@@ -111,17 +128,6 @@\n new_frame.synchronize_labels(axis=0)\n return cls.query_compiler_cls(new_frame)\n \n- @classmethod\n- def _is_supported_sqlalchemy_object(cls, obj): # noqa: GL08\n- supported = None\n- try:\n- import sqlalchemy as sa\n-\n- supported = isinstance(obj, (sa.engine.Engine, sa.engine.Connection))\n- except ImportError:\n- supported = False\n- return supported\n-\n @classmethod\n def write(cls, qc, **kwargs):\n \"\"\"\n", "issue": "FEAT: Support sqlalchemy connections in read_sql by converting them to strings\nWe can use the trick that we use [here](https://github.com/modin-project/modin/blob/01c529cf06cfaf412b5725f41c81a5f914b44b95/modin/core/io/sql/sql_dispatcher.py#L152) for `to_sql` to support reading from sqlalchemy connections in `read_sql`. Currently, for the distributed read, we [require](https://github.com/modin-project/modin/blob/01c529cf06cfaf412b5725f41c81a5f914b44b95/modin/core/io/sql/sql_dispatcher.py#L64) the user to supply a connection string or a `ModinDatabaseConnection` object that usually contains their credentials. Otherwise we default to pandas.\n", "before_files": [{"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\n\"\"\"\nModule houses `SQLDispatcher` class.\n\n`SQLDispatcher` contains utils for handling SQL queries or database tables,\ninherits util functions for handling files from `FileDispatcher` class and can be\nused as base class for dipatchers of SQL queries.\n\"\"\"\n\nimport math\n\nimport numpy as np\nimport pandas\n\nfrom modin.config import NPartitions, ReadSqlEngine\nfrom modin.core.io.file_dispatcher import FileDispatcher\nfrom modin.db_conn import ModinDatabaseConnection\n\n\nclass SQLDispatcher(FileDispatcher):\n \"\"\"Class handles utils for reading SQL queries or database tables.\"\"\"\n\n @classmethod\n def _read(cls, sql, con, index_col=None, **kwargs):\n \"\"\"\n Read a SQL query or database table into a query compiler.\n\n Parameters\n ----------\n sql : str or SQLAlchemy Selectable (select or text object)\n SQL query to be executed or a table name.\n con : SQLAlchemy connectable, str, sqlite3 connection, or ModinDatabaseConnection\n Connection object to database.\n index_col : str or list of str, optional\n Column(s) to set as index(MultiIndex).\n **kwargs : dict\n Parameters to pass into `pandas.read_sql` function.\n\n Returns\n -------\n BaseQueryCompiler\n Query compiler with imported data for further processing.\n \"\"\"\n if isinstance(con, str):\n con = ModinDatabaseConnection(\"sqlalchemy\", con)\n if not isinstance(con, ModinDatabaseConnection):\n return cls.single_worker_read(\n sql,\n con=con,\n index_col=index_col,\n read_sql_engine=ReadSqlEngine.get(),\n reason=\"To use the parallel implementation of `read_sql`, pass either \"\n + \"the SQL connection string or a ModinDatabaseConnection \"\n + \"with the arguments required to make a connection, instead \"\n + f\"of {type(con)}. For documentation on the ModinDatabaseConnection, see \"\n + \"https://modin.readthedocs.io/en/latest/supported_apis/io_supported.html#connecting-to-a-database-for-read-sql\",\n **kwargs,\n )\n row_count_query = con.row_count_query(sql)\n connection_for_pandas = con.get_connection()\n colum_names_query = con.column_names_query(sql)\n row_cnt = pandas.read_sql(row_count_query, connection_for_pandas).squeeze()\n cols_names_df = pandas.read_sql(\n colum_names_query, connection_for_pandas, index_col=index_col\n )\n cols_names = cols_names_df.columns\n num_partitions = NPartitions.get()\n partition_ids = [None] * num_partitions\n index_ids = [None] * num_partitions\n dtypes_ids = [None] * num_partitions\n limit = math.ceil(row_cnt / num_partitions)\n for part in range(num_partitions):\n offset = part * limit\n query = con.partition_query(sql, limit, offset)\n *partition_ids[part], index_ids[part], dtypes_ids[part] = cls.deploy(\n func=cls.parse,\n f_kwargs={\n \"num_splits\": num_partitions,\n \"sql\": query,\n \"con\": con,\n \"index_col\": index_col,\n \"read_sql_engine\": ReadSqlEngine.get(),\n **kwargs,\n },\n num_returns=num_partitions + 2,\n )\n partition_ids[part] = [\n cls.frame_partition_cls(obj) for obj in partition_ids[part]\n ]\n if index_col is None: # sum all lens returned from partitions\n index_lens = cls.materialize(index_ids)\n new_index = pandas.RangeIndex(sum(index_lens))\n else: # concat index returned from partitions\n index_lst = [\n x for part_index in cls.materialize(index_ids) for x in part_index\n ]\n new_index = pandas.Index(index_lst).set_names(index_col)\n new_frame = cls.frame_cls(np.array(partition_ids), new_index, cols_names)\n new_frame.synchronize_labels(axis=0)\n return cls.query_compiler_cls(new_frame)\n\n @classmethod\n def _is_supported_sqlalchemy_object(cls, obj): # noqa: GL08\n supported = None\n try:\n import sqlalchemy as sa\n\n supported = isinstance(obj, (sa.engine.Engine, sa.engine.Connection))\n except ImportError:\n supported = False\n return supported\n\n @classmethod\n def write(cls, qc, **kwargs):\n \"\"\"\n Write records stored in the `qc` to a SQL database.\n\n Parameters\n ----------\n qc : BaseQueryCompiler\n The query compiler of the Modin dataframe that we want to run ``to_sql`` on.\n **kwargs : dict\n Parameters for ``pandas.to_sql(**kwargs)``.\n \"\"\"\n # we first insert an empty DF in order to create the full table in the database\n # This also helps to validate the input against pandas\n # we would like to_sql() to complete only when all rows have been inserted into the database\n # since the mapping operation is non-blocking, each partition will return an empty DF\n # so at the end, the blocking operation will be this empty DF to_pandas\n\n if not isinstance(\n kwargs[\"con\"], str\n ) and not cls._is_supported_sqlalchemy_object(kwargs[\"con\"]):\n return cls.base_io.to_sql(qc, **kwargs)\n\n # In the case that we are given a SQLAlchemy Connection or Engine, the objects\n # are not pickleable. We have to convert it to the URL string and connect from\n # each of the workers.\n if cls._is_supported_sqlalchemy_object(kwargs[\"con\"]):\n kwargs[\"con\"] = kwargs[\"con\"].engine.url.render_as_string(\n hide_password=False\n )\n\n empty_df = qc.getitem_row_array([0]).to_pandas().head(0)\n empty_df.to_sql(**kwargs)\n # so each partition will append its respective DF\n kwargs[\"if_exists\"] = \"append\"\n columns = qc.columns\n\n def func(df): # pragma: no cover\n \"\"\"\n Override column names in the wrapped dataframe and convert it to SQL.\n\n Notes\n -----\n This function returns an empty ``pandas.DataFrame`` because ``apply_full_axis``\n expects a Frame object as a result of operation (and ``to_sql`` has no dataframe result).\n \"\"\"\n df.columns = columns\n df.to_sql(**kwargs)\n return pandas.DataFrame()\n\n # Ensure that the metadata is synchronized\n qc._modin_frame._propagate_index_objs(axis=None)\n result = qc._modin_frame.apply_full_axis(1, func, new_index=[], new_columns=[])\n cls.materialize(\n [part.list_of_blocks[0] for row in result._partitions for part in row]\n )\n", "path": "modin/core/io/sql/sql_dispatcher.py"}]}
2,785
549
gh_patches_debug_51665
rasdani/github-patches
git_diff
nilearn__nilearn-2960
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Variable defined multiple times This code from [`nilearn/_utils/numpy_conversions.py`](https://github.com/nilearn/nilearn/blob/ac1a934/nilearn/_utils/numpy_conversions.py#L106-L107) sets `ret` twice: ```python ret = np.array(arr, copy=True) ret = _asarray(arr, dtype=dtype, order=order) ``` Perhaps the intent was:: ```python ret = np.array(arr, copy=True) ret = _asarray(ret, dtype=dtype, order=order) ``` </issue> <code> [start of nilearn/_utils/numpy_conversions.py] 1 """ 2 Validation and conversion utilities for numpy. 3 """ 4 # Author: Gael Varoquaux, Alexandre Abraham, Philippe Gervais 5 # License: simplified BSD 6 7 import csv 8 import numpy as np 9 10 11 def _asarray(arr, dtype=None, order=None): 12 # np.asarray does not take "K" and "A" orders in version 1.3.0 13 if order in ("K", "A", None): 14 if (arr.itemsize == 1 and dtype in (bool, np.bool_)) \ 15 or (arr.dtype in (bool, np.bool_) and 16 np.dtype(dtype).itemsize == 1): 17 ret = arr.view(dtype=dtype) 18 else: 19 ret = np.asarray(arr, dtype=dtype) 20 else: 21 if (((arr.itemsize == 1 and dtype in (bool, np.bool)) or 22 (arr.dtype in (bool, np.bool_) and np.dtype(dtype).itemsize == 1)) 23 and (order == "F" and arr.flags["F_CONTIGUOUS"] 24 or order == "C" and arr.flags["C_CONTIGUOUS"])): 25 ret = arr.view(dtype=dtype) 26 else: 27 ret = np.asarray(arr, dtype=dtype, order=order) 28 29 return ret 30 31 32 def as_ndarray(arr, copy=False, dtype=None, order='K'): 33 """Starting with an arbitrary array, convert to numpy.ndarray. 34 35 In the case of a memmap array, a copy is automatically made to break the 36 link with the underlying file (whatever the value of the "copy" keyword). 37 38 The purpose of this function is mainly to get rid of memmap objects, but 39 it can be used for other purposes. In particular, combining copying and 40 casting can lead to performance improvements in some cases, by avoiding 41 unnecessary copies. 42 43 If not specified, input array order is preserved, in all cases, even when 44 a copy is requested. 45 46 Caveat: this function does not copy during bool to/from 1-byte dtype 47 conversions. This can lead to some surprising results in some rare cases. 48 Example: 49 50 a = numpy.asarray([0, 1, 2], dtype=numpy.int8) 51 b = as_ndarray(a, dtype=bool) # array([False, True, True], dtype=bool) 52 c = as_ndarray(b, dtype=numpy.int8) # array([0, 1, 2], dtype=numpy.int8) 53 54 The usually expected result for the last line would be array([0, 1, 1]) 55 because True evaluates to 1. Since there is no copy made here, the original 56 array is recovered. 57 58 Parameters 59 ---------- 60 arr: array-like 61 input array. Any value accepted by numpy.asarray is valid. 62 63 copy: bool 64 if True, force a copy of the array. Always True when arr is a memmap. 65 66 dtype: any numpy dtype 67 dtype of the returned array. Performing copy and type conversion at the 68 same time can in some cases avoid an additional copy. 69 70 order: string 71 gives the order of the returned array. 72 Valid values are: "C", "F", "A", "K", None. 73 default is "K". See ndarray.copy() for more information. 74 75 Returns 76 ------- 77 ret: numpy.ndarray 78 Numpy array containing the same data as arr, always of class 79 numpy.ndarray, and with no link to any underlying file. 80 """ 81 # This function should work on numpy 1.3 82 # in this version, astype() and copy() have no "order" keyword. 83 # and asarray() does not accept the "K" and "A" values for order. 84 85 # numpy.asarray never copies a subclass of numpy.ndarray (even for 86 # memmaps) when dtype is unchanged. 87 # .astype() always copies 88 89 if order not in ("C", "F", "A", "K", None): 90 raise ValueError("Invalid value for 'order': %s" % str(order)) 91 92 if isinstance(arr, np.memmap): 93 if dtype is None: 94 if order in ("K", "A", None): 95 ret = np.array(np.asarray(arr), copy=True) 96 else: 97 ret = np.array(np.asarray(arr), copy=True, order=order) 98 else: 99 if order in ("K", "A", None): 100 # always copy (even when dtype does not change) 101 ret = np.asarray(arr).astype(dtype) 102 else: 103 # First load data from disk without changing order 104 # Changing order while reading through a memmap is incredibly 105 # inefficient. 106 ret = np.array(arr, copy=True) 107 ret = _asarray(arr, dtype=dtype, order=order) 108 109 elif isinstance(arr, np.ndarray): 110 ret = _asarray(arr, dtype=dtype, order=order) 111 # In the present cas, np.may_share_memory result is always reliable. 112 if np.may_share_memory(ret, arr) and copy: 113 # order-preserving copy 114 if ret.flags["F_CONTIGUOUS"]: 115 ret = ret.T.copy().T 116 else: 117 ret = ret.copy() 118 119 elif isinstance(arr, (list, tuple)): 120 if order in ("A", "K"): 121 ret = np.asarray(arr, dtype=dtype) 122 else: 123 ret = np.asarray(arr, dtype=dtype, order=order) 124 125 else: 126 raise ValueError("Type not handled: %s" % arr.__class__) 127 128 return ret 129 130 131 def csv_to_array(csv_path, delimiters=' \t,;', **kwargs): 132 """Read a CSV file by trying to guess its delimiter 133 134 Parameters 135 ---------- 136 csv_path: string 137 Path of the CSV file to load. 138 139 delimiters: string 140 Each character of the delimiters string is a potential delimiters for 141 the CSV file. 142 143 kwargs: keyword arguments 144 The additional keyword arguments are passed to numpy.genfromtxt when 145 loading the CSV. 146 147 Returns 148 ------- 149 array: numpy.ndarray 150 An array containing the data loaded from the CSV file. 151 """ 152 if not isinstance(csv_path, str): 153 raise TypeError('CSV must be a file path. Got a CSV of type: %s' % 154 type(csv_path)) 155 156 try: 157 # First, we try genfromtxt which works in most cases. 158 array = np.genfromtxt(csv_path, loose=False, **kwargs) 159 except ValueError: 160 # There was an error during the conversion to numpy array, probably 161 # because the delimiter is wrong. 162 # In that case, we try to guess the delimiter. 163 try: 164 with open(csv_path, 'r') as csv_file: 165 dialect = csv.Sniffer().sniff(csv_file.readline(), delimiters) 166 except csv.Error as e: 167 raise TypeError( 168 'Could not read CSV file [%s]: %s' % (csv_path, e.args[0])) 169 170 array = np.genfromtxt(csv_path, delimiter=dialect.delimiter, **kwargs) 171 172 return array 173 [end of nilearn/_utils/numpy_conversions.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/nilearn/_utils/numpy_conversions.py b/nilearn/_utils/numpy_conversions.py --- a/nilearn/_utils/numpy_conversions.py +++ b/nilearn/_utils/numpy_conversions.py @@ -104,7 +104,7 @@ # Changing order while reading through a memmap is incredibly # inefficient. ret = np.array(arr, copy=True) - ret = _asarray(arr, dtype=dtype, order=order) + ret = _asarray(ret, dtype=dtype, order=order) elif isinstance(arr, np.ndarray): ret = _asarray(arr, dtype=dtype, order=order)
{"golden_diff": "diff --git a/nilearn/_utils/numpy_conversions.py b/nilearn/_utils/numpy_conversions.py\n--- a/nilearn/_utils/numpy_conversions.py\n+++ b/nilearn/_utils/numpy_conversions.py\n@@ -104,7 +104,7 @@\n # Changing order while reading through a memmap is incredibly\n # inefficient.\n ret = np.array(arr, copy=True)\n- ret = _asarray(arr, dtype=dtype, order=order)\n+ ret = _asarray(ret, dtype=dtype, order=order)\n \n elif isinstance(arr, np.ndarray):\n ret = _asarray(arr, dtype=dtype, order=order)\n", "issue": "Variable defined multiple times\nThis code from [`nilearn/_utils/numpy_conversions.py`](https://github.com/nilearn/nilearn/blob/ac1a934/nilearn/_utils/numpy_conversions.py#L106-L107) sets `ret` twice:\r\n```python\r\n ret = np.array(arr, copy=True)\r\n ret = _asarray(arr, dtype=dtype, order=order)\r\n```\r\nPerhaps the intent was::\r\n```python\r\n ret = np.array(arr, copy=True)\r\n ret = _asarray(ret, dtype=dtype, order=order)\r\n```\r\n\n", "before_files": [{"content": "\"\"\"\nValidation and conversion utilities for numpy.\n\"\"\"\n# Author: Gael Varoquaux, Alexandre Abraham, Philippe Gervais\n# License: simplified BSD\n\nimport csv\nimport numpy as np\n\n\ndef _asarray(arr, dtype=None, order=None):\n # np.asarray does not take \"K\" and \"A\" orders in version 1.3.0\n if order in (\"K\", \"A\", None):\n if (arr.itemsize == 1 and dtype in (bool, np.bool_)) \\\n or (arr.dtype in (bool, np.bool_) and\n np.dtype(dtype).itemsize == 1):\n ret = arr.view(dtype=dtype)\n else:\n ret = np.asarray(arr, dtype=dtype)\n else:\n if (((arr.itemsize == 1 and dtype in (bool, np.bool)) or\n (arr.dtype in (bool, np.bool_) and np.dtype(dtype).itemsize == 1))\n and (order == \"F\" and arr.flags[\"F_CONTIGUOUS\"]\n or order == \"C\" and arr.flags[\"C_CONTIGUOUS\"])):\n ret = arr.view(dtype=dtype)\n else:\n ret = np.asarray(arr, dtype=dtype, order=order)\n\n return ret\n\n\ndef as_ndarray(arr, copy=False, dtype=None, order='K'):\n \"\"\"Starting with an arbitrary array, convert to numpy.ndarray.\n\n In the case of a memmap array, a copy is automatically made to break the\n link with the underlying file (whatever the value of the \"copy\" keyword).\n\n The purpose of this function is mainly to get rid of memmap objects, but\n it can be used for other purposes. In particular, combining copying and\n casting can lead to performance improvements in some cases, by avoiding\n unnecessary copies.\n\n If not specified, input array order is preserved, in all cases, even when\n a copy is requested.\n\n Caveat: this function does not copy during bool to/from 1-byte dtype\n conversions. This can lead to some surprising results in some rare cases.\n Example:\n\n a = numpy.asarray([0, 1, 2], dtype=numpy.int8)\n b = as_ndarray(a, dtype=bool) # array([False, True, True], dtype=bool)\n c = as_ndarray(b, dtype=numpy.int8) # array([0, 1, 2], dtype=numpy.int8)\n\n The usually expected result for the last line would be array([0, 1, 1])\n because True evaluates to 1. Since there is no copy made here, the original\n array is recovered.\n\n Parameters\n ----------\n arr: array-like\n input array. Any value accepted by numpy.asarray is valid.\n\n copy: bool\n if True, force a copy of the array. Always True when arr is a memmap.\n\n dtype: any numpy dtype\n dtype of the returned array. Performing copy and type conversion at the\n same time can in some cases avoid an additional copy.\n\n order: string\n gives the order of the returned array.\n Valid values are: \"C\", \"F\", \"A\", \"K\", None.\n default is \"K\". See ndarray.copy() for more information.\n\n Returns\n -------\n ret: numpy.ndarray\n Numpy array containing the same data as arr, always of class\n numpy.ndarray, and with no link to any underlying file.\n \"\"\"\n # This function should work on numpy 1.3\n # in this version, astype() and copy() have no \"order\" keyword.\n # and asarray() does not accept the \"K\" and \"A\" values for order.\n\n # numpy.asarray never copies a subclass of numpy.ndarray (even for\n # memmaps) when dtype is unchanged.\n # .astype() always copies\n\n if order not in (\"C\", \"F\", \"A\", \"K\", None):\n raise ValueError(\"Invalid value for 'order': %s\" % str(order))\n\n if isinstance(arr, np.memmap):\n if dtype is None:\n if order in (\"K\", \"A\", None):\n ret = np.array(np.asarray(arr), copy=True)\n else:\n ret = np.array(np.asarray(arr), copy=True, order=order)\n else:\n if order in (\"K\", \"A\", None):\n # always copy (even when dtype does not change)\n ret = np.asarray(arr).astype(dtype)\n else:\n # First load data from disk without changing order\n # Changing order while reading through a memmap is incredibly\n # inefficient.\n ret = np.array(arr, copy=True)\n ret = _asarray(arr, dtype=dtype, order=order)\n\n elif isinstance(arr, np.ndarray):\n ret = _asarray(arr, dtype=dtype, order=order)\n # In the present cas, np.may_share_memory result is always reliable.\n if np.may_share_memory(ret, arr) and copy:\n # order-preserving copy\n if ret.flags[\"F_CONTIGUOUS\"]:\n ret = ret.T.copy().T\n else:\n ret = ret.copy()\n\n elif isinstance(arr, (list, tuple)):\n if order in (\"A\", \"K\"):\n ret = np.asarray(arr, dtype=dtype)\n else:\n ret = np.asarray(arr, dtype=dtype, order=order)\n\n else:\n raise ValueError(\"Type not handled: %s\" % arr.__class__)\n\n return ret\n\n\ndef csv_to_array(csv_path, delimiters=' \\t,;', **kwargs):\n \"\"\"Read a CSV file by trying to guess its delimiter\n\n Parameters\n ----------\n csv_path: string\n Path of the CSV file to load.\n\n delimiters: string\n Each character of the delimiters string is a potential delimiters for\n the CSV file.\n\n kwargs: keyword arguments\n The additional keyword arguments are passed to numpy.genfromtxt when\n loading the CSV.\n\n Returns\n -------\n array: numpy.ndarray\n An array containing the data loaded from the CSV file.\n \"\"\"\n if not isinstance(csv_path, str):\n raise TypeError('CSV must be a file path. Got a CSV of type: %s' %\n type(csv_path))\n\n try:\n # First, we try genfromtxt which works in most cases.\n array = np.genfromtxt(csv_path, loose=False, **kwargs)\n except ValueError:\n # There was an error during the conversion to numpy array, probably\n # because the delimiter is wrong.\n # In that case, we try to guess the delimiter.\n try:\n with open(csv_path, 'r') as csv_file:\n dialect = csv.Sniffer().sniff(csv_file.readline(), delimiters)\n except csv.Error as e:\n raise TypeError(\n 'Could not read CSV file [%s]: %s' % (csv_path, e.args[0]))\n\n array = np.genfromtxt(csv_path, delimiter=dialect.delimiter, **kwargs)\n\n return array\n", "path": "nilearn/_utils/numpy_conversions.py"}]}
2,620
152
gh_patches_debug_20921
rasdani/github-patches
git_diff
StackStorm__st2-4512
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Matching action alias for 'pack install xxx' returns an error ##### SUMMARY Noticed by `@ravi` on Slack. When running `st2 action-alias match 'pack install vsphere'` multiple action-aliases are matched, causing an error. This is confusing in two ways: 1) Running that action-alias should not be ambiguous 2) The help message for `st2 action-alias match` says it returns a list of matching aliases, but instead it either returns one or it errors. ##### ISSUE TYPE - Bug Report ##### STACKSTORM VERSION ```shell $ st2 --version st2 2.10.1, on Python 2.7.5 ``` ##### OS / ENVIRONMENT / INSTALL METHOD ```shell # OS $ cat /etc/redhat-release Red Hat Enterprise Linux Server release 7.6 (Maipo) # Install method puppet-st2 ``` ##### STEPS TO REPRODUCE ```shell $ st2 action-alias match 'pack install vsphere' ``` ##### EXPECTED RESULTS ```shell $ st2 action-alias match 'pack install vsphere' +--------------+-----------------------------------+ | name | description | +--------------+-----------------------------------+ | pack_install | Install/upgrade StackStorm packs. | +--------------+-----------------------------------+ ``` ##### ACTUAL RESULTS ```shell $ st2 action-alias match 'pack install vsphere' ERROR: 400 Client Error: Bad Request MESSAGE: Command 'pack install vsphere' matched more than 1 pattern for url: http://127.0.0.1:9101/v1/actionalias/match ``` This is also confusing because `st2 action-alias match --help` says it should return a list of matching aliases, when instead it either returns a single alias or it errors out if multiple are found. ```shell $ st2 action-alias match --help usage: st2 action-alias match [-h] [-t TOKEN] [--api-key API_KEY] [-j] [-y] [-a ATTR [ATTR ...]] [-w WIDTH [WIDTH ...]] command Get the list of action aliases that match the command text. ``` ##### INVESTIGATION It appears that the action-alias `packs.pack_install` has redundant patterns defined: https://github.com/StackStorm/st2/blob/master/contrib/packs/aliases/pack_install.yaml#L7-L12 I think the simplest fix is to remove the redundant pattern and change the display text for the one that is left behind. </issue> <code> [start of st2client/st2client/commands/action_alias.py] 1 # Licensed to the StackStorm, Inc ('StackStorm') under one or more 2 # contributor license agreements. See the NOTICE file distributed with 3 # this work for additional information regarding copyright ownership. 4 # The ASF licenses this file to You under the Apache License, Version 2.0 5 # (the "License"); you may not use this file except in compliance with 6 # the License. You may obtain a copy of the License at 7 # 8 # http://www.apache.org/licenses/LICENSE-2.0 9 # 10 # Unless required by applicable law or agreed to in writing, software 11 # distributed under the License is distributed on an "AS IS" BASIS, 12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 # See the License for the specific language governing permissions and 14 # limitations under the License. 15 16 from __future__ import absolute_import 17 18 from st2client.models import core 19 from st2client.models.action_alias import ActionAlias 20 from st2client.models.action_alias import ActionAliasMatch 21 from st2client.commands import resource 22 from st2client.formatters import table 23 24 25 __all__ = [ 26 'ActionAliasBranch', 27 'ActionAliasMatchCommand', 28 'ActionAliasExecuteCommand' 29 ] 30 31 32 class ActionAliasBranch(resource.ResourceBranch): 33 def __init__(self, description, app, subparsers, parent_parser=None): 34 super(ActionAliasBranch, self).__init__( 35 ActionAlias, description, app, subparsers, 36 parent_parser=parent_parser, read_only=False, 37 commands={ 38 'list': ActionAliasListCommand, 39 'get': ActionAliasGetCommand 40 }) 41 42 self.commands['match'] = ActionAliasMatchCommand( 43 self.resource, self.app, self.subparsers, 44 add_help=True) 45 self.commands['execute'] = ActionAliasExecuteCommand( 46 self.resource, self.app, self.subparsers, 47 add_help=True) 48 49 50 class ActionAliasListCommand(resource.ContentPackResourceListCommand): 51 display_attributes = ['ref', 'pack', 'description', 'enabled'] 52 53 54 class ActionAliasGetCommand(resource.ContentPackResourceGetCommand): 55 display_attributes = ['all'] 56 attribute_display_order = ['id', 'ref', 'pack', 'name', 'description', 57 'enabled', 'action_ref', 'formats'] 58 59 60 class ActionAliasMatchCommand(resource.ResourceCommand): 61 display_attributes = ['name', 'description'] 62 63 def __init__(self, resource, *args, **kwargs): 64 super(ActionAliasMatchCommand, self).__init__( 65 resource, 'match', 66 'Get the list of %s that match the command text.' % 67 resource.get_plural_display_name().lower(), 68 *args, **kwargs) 69 70 self.parser.add_argument('match_text', 71 metavar='command', 72 help=('Get the list of %s that match the command text.' % 73 resource.get_display_name().lower())) 74 self.parser.add_argument('-a', '--attr', nargs='+', 75 default=self.display_attributes, 76 help=('List of attributes to include in the ' 77 'output. "all" will return all ' 78 'attributes.')) 79 self.parser.add_argument('-w', '--width', nargs='+', type=int, 80 default=None, 81 help=('Set the width of columns in output.')) 82 83 @resource.add_auth_token_to_kwargs_from_cli 84 def run(self, args, **kwargs): 85 alias_match = ActionAliasMatch() 86 alias_match.command = args.match_text 87 88 match, _ = self.manager.match(alias_match, **kwargs) 89 return [match] 90 91 def run_and_print(self, args, **kwargs): 92 instances = self.run(args, **kwargs) 93 self.print_output(instances, table.MultiColumnTable, 94 attributes=args.attr, widths=args.width, 95 json=args.json, yaml=args.yaml) 96 97 98 class ActionAliasExecuteCommand(resource.ResourceCommand): 99 display_attributes = ['name'] 100 101 def __init__(self, resource, *args, **kwargs): 102 super(ActionAliasExecuteCommand, self).__init__( 103 resource, 'execute', 104 ('Execute the command text by finding a matching %s.' % 105 resource.get_display_name().lower()), *args, **kwargs) 106 107 self.parser.add_argument('command_text', 108 metavar='command', 109 help=('Execute the command text by finding a matching %s.' % 110 resource.get_display_name().lower())) 111 self.parser.add_argument('-u', '--user', type=str, default=None, 112 help='User under which to run the action (admins only).') 113 114 @resource.add_auth_token_to_kwargs_from_cli 115 def run(self, args, **kwargs): 116 payload = core.Resource() 117 payload.command = args.command_text 118 payload.user = args.user 119 payload.source_channel = 'cli' 120 121 alias_execution_mgr = self.app.client.managers['ActionAliasExecution'] 122 execution = alias_execution_mgr.match_and_execute(payload) 123 return execution 124 125 def run_and_print(self, args, **kwargs): 126 execution = self.run(args, **kwargs) 127 print("Matching Action-alias: '%s'" % execution.actionalias['ref']) 128 print("To get the results, execute:\n st2 execution get %s" % 129 (execution.execution['id'])) 130 [end of st2client/st2client/commands/action_alias.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/st2client/st2client/commands/action_alias.py b/st2client/st2client/commands/action_alias.py --- a/st2client/st2client/commands/action_alias.py +++ b/st2client/st2client/commands/action_alias.py @@ -63,13 +63,13 @@ def __init__(self, resource, *args, **kwargs): super(ActionAliasMatchCommand, self).__init__( resource, 'match', - 'Get the list of %s that match the command text.' % - resource.get_plural_display_name().lower(), + 'Get the %s that match the command text.' % + resource.get_display_name().lower(), *args, **kwargs) self.parser.add_argument('match_text', metavar='command', - help=('Get the list of %s that match the command text.' % + help=('Get the %s that match the command text.' % resource.get_display_name().lower())) self.parser.add_argument('-a', '--attr', nargs='+', default=self.display_attributes,
{"golden_diff": "diff --git a/st2client/st2client/commands/action_alias.py b/st2client/st2client/commands/action_alias.py\n--- a/st2client/st2client/commands/action_alias.py\n+++ b/st2client/st2client/commands/action_alias.py\n@@ -63,13 +63,13 @@\n def __init__(self, resource, *args, **kwargs):\n super(ActionAliasMatchCommand, self).__init__(\n resource, 'match',\n- 'Get the list of %s that match the command text.' %\n- resource.get_plural_display_name().lower(),\n+ 'Get the %s that match the command text.' %\n+ resource.get_display_name().lower(),\n *args, **kwargs)\n \n self.parser.add_argument('match_text',\n metavar='command',\n- help=('Get the list of %s that match the command text.' %\n+ help=('Get the %s that match the command text.' %\n resource.get_display_name().lower()))\n self.parser.add_argument('-a', '--attr', nargs='+',\n default=self.display_attributes,\n", "issue": "Matching action alias for 'pack install xxx' returns an error\n##### SUMMARY\r\n\r\nNoticed by `@ravi` on Slack.\r\n\r\nWhen running `st2 action-alias match 'pack install vsphere'` multiple action-aliases are matched, causing an error.\r\n\r\nThis is confusing in two ways: \r\n1) Running that action-alias should not be ambiguous\r\n2) The help message for `st2 action-alias match` says it returns a list of matching aliases, but instead it either returns one or it errors.\r\n\r\n##### ISSUE TYPE\r\n - Bug Report\r\n\r\n##### STACKSTORM VERSION\r\n```shell\r\n$ st2 --version\r\nst2 2.10.1, on Python 2.7.5\r\n```\r\n\r\n##### OS / ENVIRONMENT / INSTALL METHOD\r\n```shell\r\n# OS\r\n$ cat /etc/redhat-release \r\nRed Hat Enterprise Linux Server release 7.6 (Maipo)\r\n\r\n# Install method\r\npuppet-st2\r\n```\r\n\r\n##### STEPS TO REPRODUCE\r\n```shell\r\n$ st2 action-alias match 'pack install vsphere'\r\n```\r\n\r\n##### EXPECTED RESULTS\r\n```shell\r\n$ st2 action-alias match 'pack install vsphere'\r\n+--------------+-----------------------------------+\r\n| name | description |\r\n+--------------+-----------------------------------+\r\n| pack_install | Install/upgrade StackStorm packs. |\r\n+--------------+-----------------------------------+\r\n```\r\n\r\n##### ACTUAL RESULTS\r\n```shell\r\n$ st2 action-alias match 'pack install vsphere'\r\nERROR: 400 Client Error: Bad Request\r\nMESSAGE: Command 'pack install vsphere' matched more than 1 pattern for url: http://127.0.0.1:9101/v1/actionalias/match\r\n```\r\n\r\nThis is also confusing because `st2 action-alias match --help` says it should return a list of matching aliases, when instead it either returns a single alias or it errors out if multiple are found.\r\n\r\n```shell\r\n$ st2 action-alias match --help\r\nusage: st2 action-alias match [-h] [-t TOKEN] [--api-key API_KEY] [-j] [-y]\r\n [-a ATTR [ATTR ...]] [-w WIDTH [WIDTH ...]]\r\n command\r\n\r\nGet the list of action aliases that match the command text.\r\n```\r\n\r\n##### INVESTIGATION\r\n\r\nIt appears that the action-alias `packs.pack_install` has redundant patterns defined: https://github.com/StackStorm/st2/blob/master/contrib/packs/aliases/pack_install.yaml#L7-L12\r\n\r\nI think the simplest fix is to remove the redundant pattern and change the display text for the one that is left behind.\n", "before_files": [{"content": "# Licensed to the StackStorm, Inc ('StackStorm') under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\n\nfrom st2client.models import core\nfrom st2client.models.action_alias import ActionAlias\nfrom st2client.models.action_alias import ActionAliasMatch\nfrom st2client.commands import resource\nfrom st2client.formatters import table\n\n\n__all__ = [\n 'ActionAliasBranch',\n 'ActionAliasMatchCommand',\n 'ActionAliasExecuteCommand'\n]\n\n\nclass ActionAliasBranch(resource.ResourceBranch):\n def __init__(self, description, app, subparsers, parent_parser=None):\n super(ActionAliasBranch, self).__init__(\n ActionAlias, description, app, subparsers,\n parent_parser=parent_parser, read_only=False,\n commands={\n 'list': ActionAliasListCommand,\n 'get': ActionAliasGetCommand\n })\n\n self.commands['match'] = ActionAliasMatchCommand(\n self.resource, self.app, self.subparsers,\n add_help=True)\n self.commands['execute'] = ActionAliasExecuteCommand(\n self.resource, self.app, self.subparsers,\n add_help=True)\n\n\nclass ActionAliasListCommand(resource.ContentPackResourceListCommand):\n display_attributes = ['ref', 'pack', 'description', 'enabled']\n\n\nclass ActionAliasGetCommand(resource.ContentPackResourceGetCommand):\n display_attributes = ['all']\n attribute_display_order = ['id', 'ref', 'pack', 'name', 'description',\n 'enabled', 'action_ref', 'formats']\n\n\nclass ActionAliasMatchCommand(resource.ResourceCommand):\n display_attributes = ['name', 'description']\n\n def __init__(self, resource, *args, **kwargs):\n super(ActionAliasMatchCommand, self).__init__(\n resource, 'match',\n 'Get the list of %s that match the command text.' %\n resource.get_plural_display_name().lower(),\n *args, **kwargs)\n\n self.parser.add_argument('match_text',\n metavar='command',\n help=('Get the list of %s that match the command text.' %\n resource.get_display_name().lower()))\n self.parser.add_argument('-a', '--attr', nargs='+',\n default=self.display_attributes,\n help=('List of attributes to include in the '\n 'output. \"all\" will return all '\n 'attributes.'))\n self.parser.add_argument('-w', '--width', nargs='+', type=int,\n default=None,\n help=('Set the width of columns in output.'))\n\n @resource.add_auth_token_to_kwargs_from_cli\n def run(self, args, **kwargs):\n alias_match = ActionAliasMatch()\n alias_match.command = args.match_text\n\n match, _ = self.manager.match(alias_match, **kwargs)\n return [match]\n\n def run_and_print(self, args, **kwargs):\n instances = self.run(args, **kwargs)\n self.print_output(instances, table.MultiColumnTable,\n attributes=args.attr, widths=args.width,\n json=args.json, yaml=args.yaml)\n\n\nclass ActionAliasExecuteCommand(resource.ResourceCommand):\n display_attributes = ['name']\n\n def __init__(self, resource, *args, **kwargs):\n super(ActionAliasExecuteCommand, self).__init__(\n resource, 'execute',\n ('Execute the command text by finding a matching %s.' %\n resource.get_display_name().lower()), *args, **kwargs)\n\n self.parser.add_argument('command_text',\n metavar='command',\n help=('Execute the command text by finding a matching %s.' %\n resource.get_display_name().lower()))\n self.parser.add_argument('-u', '--user', type=str, default=None,\n help='User under which to run the action (admins only).')\n\n @resource.add_auth_token_to_kwargs_from_cli\n def run(self, args, **kwargs):\n payload = core.Resource()\n payload.command = args.command_text\n payload.user = args.user\n payload.source_channel = 'cli'\n\n alias_execution_mgr = self.app.client.managers['ActionAliasExecution']\n execution = alias_execution_mgr.match_and_execute(payload)\n return execution\n\n def run_and_print(self, args, **kwargs):\n execution = self.run(args, **kwargs)\n print(\"Matching Action-alias: '%s'\" % execution.actionalias['ref'])\n print(\"To get the results, execute:\\n st2 execution get %s\" %\n (execution.execution['id']))\n", "path": "st2client/st2client/commands/action_alias.py"}]}
2,473
233
gh_patches_debug_13766
rasdani/github-patches
git_diff
cobbler__cobbler-3396
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [Backport] autoinstall_templates are installed into /var/lib/cobbler/templates ### Original feature issue - PR: #2590 ### Target release - [ ] release33 - [x] release32 - [ ] release30 ### Reason Stabilization of Cobbler 3.2.x in Fedora Ecosystem. </issue> <code> [start of cobbler/actions/sync.py] 1 """ 2 Builds out filesystem trees/data based on the object tree. 3 This is the code behind 'cobbler sync'. 4 5 Copyright 2006-2009, Red Hat, Inc and Others 6 Michael DeHaan <michael.dehaan AT gmail> 7 8 This program is free software; you can redistribute it and/or modify 9 it under the terms of the GNU General Public License as published by 10 the Free Software Foundation; either version 2 of the License, or 11 (at your option) any later version. 12 13 This program is distributed in the hope that it will be useful, 14 but WITHOUT ANY WARRANTY; without even the implied warranty of 15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 GNU General Public License for more details. 17 18 You should have received a copy of the GNU General Public License 19 along with this program; if not, write to the Free Software 20 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 21 02110-1301 USA 22 """ 23 24 import glob 25 import os 26 import time 27 28 from cobbler.cexceptions import CX 29 from cobbler import clogger 30 from cobbler import templar 31 from cobbler import tftpgen 32 from cobbler import utils 33 34 35 class CobblerSync: 36 """ 37 Handles conversion of internal state to the tftpboot tree layout 38 """ 39 40 def __init__(self, collection_mgr, verbose: bool = True, dhcp=None, dns=None, logger=None, tftpd=None): 41 """ 42 Constructor 43 44 :param collection_mgr: The collection manager instance which holds all information about cobbler. 45 :param verbose: Whether to log the actions performed in this module verbose or not. 46 :param dhcp: The DHCP manager which can update the DHCP config. 47 :param dns: The DNS manager which can update the DNS config. 48 :param logger: The logger to audit all action with. 49 :param tftpd: The TFTP manager which can update the TFTP config. 50 """ 51 self.logger = logger 52 if logger is None: 53 self.logger = clogger.Logger() 54 55 self.verbose = verbose 56 self.collection_mgr = collection_mgr 57 self.api = collection_mgr.api 58 self.distros = collection_mgr.distros() 59 self.profiles = collection_mgr.profiles() 60 self.systems = collection_mgr.systems() 61 self.settings = collection_mgr.settings() 62 self.repos = collection_mgr.repos() 63 self.templar = templar.Templar(collection_mgr, self.logger) 64 self.tftpgen = tftpgen.TFTPGen(collection_mgr, self.logger) 65 self.dns = dns 66 self.dhcp = dhcp 67 self.tftpd = tftpd 68 self.bootloc = self.settings.tftpboot_location 69 self.tftpgen.verbose = verbose 70 self.dns.verbose = verbose 71 self.dhcp.verbose = verbose 72 73 self.pxelinux_dir = os.path.join(self.bootloc, "pxelinux.cfg") 74 self.grub_dir = os.path.join(self.bootloc, "grub") 75 self.images_dir = os.path.join(self.bootloc, "images") 76 self.yaboot_bin_dir = os.path.join(self.bootloc, "ppc") 77 self.yaboot_cfg_dir = os.path.join(self.bootloc, "etc") 78 self.rendered_dir = os.path.join(self.settings.webdir, "rendered") 79 80 def run(self): 81 """ 82 Syncs the current configuration file with the config tree. 83 Using the ``Check().run_`` functions previously is recommended 84 """ 85 if not os.path.exists(self.bootloc): 86 utils.die(self.logger, "cannot find directory: %s" % self.bootloc) 87 88 self.logger.info("running pre-sync triggers") 89 90 # run pre-triggers... 91 utils.run_triggers(self.api, None, "/var/lib/cobbler/triggers/sync/pre/*") 92 93 self.distros = self.collection_mgr.distros() 94 self.profiles = self.collection_mgr.profiles() 95 self.systems = self.collection_mgr.systems() 96 self.settings = self.collection_mgr.settings() 97 self.repos = self.collection_mgr.repos() 98 99 # execute the core of the sync operation 100 self.logger.info("cleaning trees") 101 self.clean_trees() 102 103 # Have the tftpd module handle copying bootloaders, distros, images, and all_system_files 104 self.tftpd.sync(self.verbose) 105 # Copy distros to the webdir 106 # Adding in the exception handling to not blow up if files have been moved (or the path references an NFS 107 # directory that's no longer mounted) 108 for d in self.distros: 109 try: 110 self.logger.info("copying files for distro: %s" % d.name) 111 self.tftpgen.copy_single_distro_files(d, self.settings.webdir, True) 112 self.tftpgen.write_templates(d, write_file=True) 113 except CX as e: 114 self.logger.error(e.value) 115 116 # make the default pxe menu anyway... 117 self.tftpgen.make_pxe_menu() 118 119 if self.settings.manage_dhcp: 120 self.write_dhcp() 121 if self.settings.manage_dns: 122 self.logger.info("rendering DNS files") 123 self.dns.regen_hosts() 124 self.dns.write_dns_files() 125 126 if self.settings.manage_tftpd: 127 # copy in boot_files 128 self.tftpd.write_boot_files() 129 130 self.logger.info("cleaning link caches") 131 self.clean_link_cache() 132 133 if self.settings.manage_rsync: 134 self.logger.info("rendering Rsync files") 135 self.rsync_gen() 136 137 # run post-triggers 138 self.logger.info("running post-sync triggers") 139 utils.run_triggers(self.api, None, "/var/lib/cobbler/triggers/sync/post/*", logger=self.logger) 140 utils.run_triggers(self.api, None, "/var/lib/cobbler/triggers/change/*", logger=self.logger) 141 142 def make_tftpboot(self): 143 """ 144 Make directories for tftpboot images 145 """ 146 if not os.path.exists(self.pxelinux_dir): 147 utils.mkdir(self.pxelinux_dir, logger=self.logger) 148 if not os.path.exists(self.grub_dir): 149 utils.mkdir(self.grub_dir, logger=self.logger) 150 grub_images_link = os.path.join(self.grub_dir, "images") 151 if not os.path.exists(grub_images_link): 152 os.symlink("../images", grub_images_link) 153 if not os.path.exists(self.images_dir): 154 utils.mkdir(self.images_dir, logger=self.logger) 155 if not os.path.exists(self.rendered_dir): 156 utils.mkdir(self.rendered_dir, logger=self.logger) 157 if not os.path.exists(self.yaboot_bin_dir): 158 utils.mkdir(self.yaboot_bin_dir, logger=self.logger) 159 if not os.path.exists(self.yaboot_cfg_dir): 160 utils.mkdir(self.yaboot_cfg_dir, logger=self.logger) 161 162 def clean_trees(self): 163 """ 164 Delete any previously built pxelinux.cfg tree and virt tree info and then create directories. 165 166 Note: for SELinux reasons, some information goes in ``/tftpboot``, some in ``/var/www/cobbler`` and some must be 167 duplicated in both. This is because PXE needs tftp, and automatic installation and Virt operations need http. 168 Only the kernel and initrd images are duplicated, which is unfortunate, though SELinux won't let me give them 169 two contexts, so symlinks are not a solution. *Otherwise* duplication is minimal. 170 """ 171 172 # clean out parts of webdir and all of /tftpboot/images and /tftpboot/pxelinux.cfg 173 for x in os.listdir(self.settings.webdir): 174 path = os.path.join(self.settings.webdir, x) 175 if os.path.isfile(path): 176 if not x.endswith(".py"): 177 utils.rmfile(path, logger=self.logger) 178 if os.path.isdir(path): 179 if x not in self.settings.webdir_whitelist: 180 # delete directories that shouldn't exist 181 utils.rmtree(path, logger=self.logger) 182 if x in ["autoinstall_templates", "autoinstall_templates_sys", "images", "systems", "distros", "profiles", "repo_profile", "repo_system", "rendered"]: 183 # clean out directory contents 184 utils.rmtree_contents(path, logger=self.logger) 185 # 186 self.make_tftpboot() 187 utils.rmtree_contents(self.pxelinux_dir, logger=self.logger) 188 utils.rmtree_contents(self.grub_dir, logger=self.logger) 189 utils.rmtree_contents(self.images_dir, logger=self.logger) 190 utils.rmtree_contents(self.yaboot_bin_dir, logger=self.logger) 191 utils.rmtree_contents(self.yaboot_cfg_dir, logger=self.logger) 192 utils.rmtree_contents(self.rendered_dir, logger=self.logger) 193 194 def write_dhcp(self): 195 """ 196 Write all files which are associated to DHCP. 197 """ 198 self.logger.info("rendering DHCP files") 199 self.dhcp.write_dhcp_file() 200 self.dhcp.regen_ethers() 201 202 def sync_dhcp(self): 203 """ 204 This calls write_dhcp and restarts the DHCP server. 205 """ 206 if self.settings.manage_dhcp: 207 self.write_dhcp() 208 self.dhcp.sync_dhcp() 209 210 def clean_link_cache(self): 211 """ 212 All files which are linked into the cache will be deleted so the cache can be rebuild. 213 """ 214 for dirtree in [os.path.join(self.bootloc, 'images'), self.settings.webdir]: 215 cachedir = os.path.join(dirtree, '.link_cache') 216 if os.path.isdir(cachedir): 217 cmd = "find %s -maxdepth 1 -type f -links 1 -exec rm -f '{}' ';'" % cachedir 218 utils.subprocess_call(self.logger, cmd) 219 220 def rsync_gen(self): 221 """ 222 Generate rsync modules of all repositories and distributions 223 """ 224 template_file = "/etc/cobbler/rsync.template" 225 226 try: 227 template = open(template_file, "r") 228 except: 229 raise CX("error reading template %s" % template_file) 230 231 template_data = "" 232 template_data = template.read() 233 template.close() 234 235 distros = [] 236 237 for link in glob.glob(os.path.join(self.settings.webdir, 'links', '*')): 238 distro = {} 239 distro["path"] = os.path.realpath(link) 240 distro["name"] = os.path.basename(link) 241 distros.append(distro) 242 243 repos = [repo.name for repo in self.api.repos() 244 if os.path.isdir(os.path.join(self.settings.webdir, "repo_mirror", repo.name))] 245 246 metadata = { 247 "date": time.asctime(time.gmtime()), 248 "cobbler_server": self.settings.server, 249 "distros": distros, 250 "repos": repos, 251 "webdir": self.settings.webdir 252 } 253 254 self.templar.render(template_data, metadata, "/etc/rsyncd.conf") 255 [end of cobbler/actions/sync.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/cobbler/actions/sync.py b/cobbler/actions/sync.py --- a/cobbler/actions/sync.py +++ b/cobbler/actions/sync.py @@ -179,7 +179,7 @@ if x not in self.settings.webdir_whitelist: # delete directories that shouldn't exist utils.rmtree(path, logger=self.logger) - if x in ["autoinstall_templates", "autoinstall_templates_sys", "images", "systems", "distros", "profiles", "repo_profile", "repo_system", "rendered"]: + if x in ["templates", "images", "systems", "distros", "profiles", "repo_profile", "repo_system", "rendered"]: # clean out directory contents utils.rmtree_contents(path, logger=self.logger) #
{"golden_diff": "diff --git a/cobbler/actions/sync.py b/cobbler/actions/sync.py\n--- a/cobbler/actions/sync.py\n+++ b/cobbler/actions/sync.py\n@@ -179,7 +179,7 @@\n if x not in self.settings.webdir_whitelist:\n # delete directories that shouldn't exist\n utils.rmtree(path, logger=self.logger)\n- if x in [\"autoinstall_templates\", \"autoinstall_templates_sys\", \"images\", \"systems\", \"distros\", \"profiles\", \"repo_profile\", \"repo_system\", \"rendered\"]:\n+ if x in [\"templates\", \"images\", \"systems\", \"distros\", \"profiles\", \"repo_profile\", \"repo_system\", \"rendered\"]:\n # clean out directory contents\n utils.rmtree_contents(path, logger=self.logger)\n #\n", "issue": "[Backport] autoinstall_templates are installed into /var/lib/cobbler/templates\n### Original feature issue\r\n\r\n- PR: #2590\r\n\r\n### Target release\r\n\r\n- [ ] release33\r\n- [x] release32\r\n- [ ] release30\r\n\r\n### Reason\r\n\r\nStabilization of Cobbler 3.2.x in Fedora Ecosystem.\r\n\n", "before_files": [{"content": "\"\"\"\nBuilds out filesystem trees/data based on the object tree.\nThis is the code behind 'cobbler sync'.\n\nCopyright 2006-2009, Red Hat, Inc and Others\nMichael DeHaan <michael.dehaan AT gmail>\n\nThis program is free software; you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation; either version 2 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, write to the Free Software\nFoundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA\n02110-1301 USA\n\"\"\"\n\nimport glob\nimport os\nimport time\n\nfrom cobbler.cexceptions import CX\nfrom cobbler import clogger\nfrom cobbler import templar\nfrom cobbler import tftpgen\nfrom cobbler import utils\n\n\nclass CobblerSync:\n \"\"\"\n Handles conversion of internal state to the tftpboot tree layout\n \"\"\"\n\n def __init__(self, collection_mgr, verbose: bool = True, dhcp=None, dns=None, logger=None, tftpd=None):\n \"\"\"\n Constructor\n\n :param collection_mgr: The collection manager instance which holds all information about cobbler.\n :param verbose: Whether to log the actions performed in this module verbose or not.\n :param dhcp: The DHCP manager which can update the DHCP config.\n :param dns: The DNS manager which can update the DNS config.\n :param logger: The logger to audit all action with.\n :param tftpd: The TFTP manager which can update the TFTP config.\n \"\"\"\n self.logger = logger\n if logger is None:\n self.logger = clogger.Logger()\n\n self.verbose = verbose\n self.collection_mgr = collection_mgr\n self.api = collection_mgr.api\n self.distros = collection_mgr.distros()\n self.profiles = collection_mgr.profiles()\n self.systems = collection_mgr.systems()\n self.settings = collection_mgr.settings()\n self.repos = collection_mgr.repos()\n self.templar = templar.Templar(collection_mgr, self.logger)\n self.tftpgen = tftpgen.TFTPGen(collection_mgr, self.logger)\n self.dns = dns\n self.dhcp = dhcp\n self.tftpd = tftpd\n self.bootloc = self.settings.tftpboot_location\n self.tftpgen.verbose = verbose\n self.dns.verbose = verbose\n self.dhcp.verbose = verbose\n\n self.pxelinux_dir = os.path.join(self.bootloc, \"pxelinux.cfg\")\n self.grub_dir = os.path.join(self.bootloc, \"grub\")\n self.images_dir = os.path.join(self.bootloc, \"images\")\n self.yaboot_bin_dir = os.path.join(self.bootloc, \"ppc\")\n self.yaboot_cfg_dir = os.path.join(self.bootloc, \"etc\")\n self.rendered_dir = os.path.join(self.settings.webdir, \"rendered\")\n\n def run(self):\n \"\"\"\n Syncs the current configuration file with the config tree.\n Using the ``Check().run_`` functions previously is recommended\n \"\"\"\n if not os.path.exists(self.bootloc):\n utils.die(self.logger, \"cannot find directory: %s\" % self.bootloc)\n\n self.logger.info(\"running pre-sync triggers\")\n\n # run pre-triggers...\n utils.run_triggers(self.api, None, \"/var/lib/cobbler/triggers/sync/pre/*\")\n\n self.distros = self.collection_mgr.distros()\n self.profiles = self.collection_mgr.profiles()\n self.systems = self.collection_mgr.systems()\n self.settings = self.collection_mgr.settings()\n self.repos = self.collection_mgr.repos()\n\n # execute the core of the sync operation\n self.logger.info(\"cleaning trees\")\n self.clean_trees()\n\n # Have the tftpd module handle copying bootloaders, distros, images, and all_system_files\n self.tftpd.sync(self.verbose)\n # Copy distros to the webdir\n # Adding in the exception handling to not blow up if files have been moved (or the path references an NFS\n # directory that's no longer mounted)\n for d in self.distros:\n try:\n self.logger.info(\"copying files for distro: %s\" % d.name)\n self.tftpgen.copy_single_distro_files(d, self.settings.webdir, True)\n self.tftpgen.write_templates(d, write_file=True)\n except CX as e:\n self.logger.error(e.value)\n\n # make the default pxe menu anyway...\n self.tftpgen.make_pxe_menu()\n\n if self.settings.manage_dhcp:\n self.write_dhcp()\n if self.settings.manage_dns:\n self.logger.info(\"rendering DNS files\")\n self.dns.regen_hosts()\n self.dns.write_dns_files()\n\n if self.settings.manage_tftpd:\n # copy in boot_files\n self.tftpd.write_boot_files()\n\n self.logger.info(\"cleaning link caches\")\n self.clean_link_cache()\n\n if self.settings.manage_rsync:\n self.logger.info(\"rendering Rsync files\")\n self.rsync_gen()\n\n # run post-triggers\n self.logger.info(\"running post-sync triggers\")\n utils.run_triggers(self.api, None, \"/var/lib/cobbler/triggers/sync/post/*\", logger=self.logger)\n utils.run_triggers(self.api, None, \"/var/lib/cobbler/triggers/change/*\", logger=self.logger)\n\n def make_tftpboot(self):\n \"\"\"\n Make directories for tftpboot images\n \"\"\"\n if not os.path.exists(self.pxelinux_dir):\n utils.mkdir(self.pxelinux_dir, logger=self.logger)\n if not os.path.exists(self.grub_dir):\n utils.mkdir(self.grub_dir, logger=self.logger)\n grub_images_link = os.path.join(self.grub_dir, \"images\")\n if not os.path.exists(grub_images_link):\n os.symlink(\"../images\", grub_images_link)\n if not os.path.exists(self.images_dir):\n utils.mkdir(self.images_dir, logger=self.logger)\n if not os.path.exists(self.rendered_dir):\n utils.mkdir(self.rendered_dir, logger=self.logger)\n if not os.path.exists(self.yaboot_bin_dir):\n utils.mkdir(self.yaboot_bin_dir, logger=self.logger)\n if not os.path.exists(self.yaboot_cfg_dir):\n utils.mkdir(self.yaboot_cfg_dir, logger=self.logger)\n\n def clean_trees(self):\n \"\"\"\n Delete any previously built pxelinux.cfg tree and virt tree info and then create directories.\n\n Note: for SELinux reasons, some information goes in ``/tftpboot``, some in ``/var/www/cobbler`` and some must be\n duplicated in both. This is because PXE needs tftp, and automatic installation and Virt operations need http.\n Only the kernel and initrd images are duplicated, which is unfortunate, though SELinux won't let me give them\n two contexts, so symlinks are not a solution. *Otherwise* duplication is minimal.\n \"\"\"\n\n # clean out parts of webdir and all of /tftpboot/images and /tftpboot/pxelinux.cfg\n for x in os.listdir(self.settings.webdir):\n path = os.path.join(self.settings.webdir, x)\n if os.path.isfile(path):\n if not x.endswith(\".py\"):\n utils.rmfile(path, logger=self.logger)\n if os.path.isdir(path):\n if x not in self.settings.webdir_whitelist:\n # delete directories that shouldn't exist\n utils.rmtree(path, logger=self.logger)\n if x in [\"autoinstall_templates\", \"autoinstall_templates_sys\", \"images\", \"systems\", \"distros\", \"profiles\", \"repo_profile\", \"repo_system\", \"rendered\"]:\n # clean out directory contents\n utils.rmtree_contents(path, logger=self.logger)\n #\n self.make_tftpboot()\n utils.rmtree_contents(self.pxelinux_dir, logger=self.logger)\n utils.rmtree_contents(self.grub_dir, logger=self.logger)\n utils.rmtree_contents(self.images_dir, logger=self.logger)\n utils.rmtree_contents(self.yaboot_bin_dir, logger=self.logger)\n utils.rmtree_contents(self.yaboot_cfg_dir, logger=self.logger)\n utils.rmtree_contents(self.rendered_dir, logger=self.logger)\n\n def write_dhcp(self):\n \"\"\"\n Write all files which are associated to DHCP.\n \"\"\"\n self.logger.info(\"rendering DHCP files\")\n self.dhcp.write_dhcp_file()\n self.dhcp.regen_ethers()\n\n def sync_dhcp(self):\n \"\"\"\n This calls write_dhcp and restarts the DHCP server.\n \"\"\"\n if self.settings.manage_dhcp:\n self.write_dhcp()\n self.dhcp.sync_dhcp()\n\n def clean_link_cache(self):\n \"\"\"\n All files which are linked into the cache will be deleted so the cache can be rebuild.\n \"\"\"\n for dirtree in [os.path.join(self.bootloc, 'images'), self.settings.webdir]:\n cachedir = os.path.join(dirtree, '.link_cache')\n if os.path.isdir(cachedir):\n cmd = \"find %s -maxdepth 1 -type f -links 1 -exec rm -f '{}' ';'\" % cachedir\n utils.subprocess_call(self.logger, cmd)\n\n def rsync_gen(self):\n \"\"\"\n Generate rsync modules of all repositories and distributions\n \"\"\"\n template_file = \"/etc/cobbler/rsync.template\"\n\n try:\n template = open(template_file, \"r\")\n except:\n raise CX(\"error reading template %s\" % template_file)\n\n template_data = \"\"\n template_data = template.read()\n template.close()\n\n distros = []\n\n for link in glob.glob(os.path.join(self.settings.webdir, 'links', '*')):\n distro = {}\n distro[\"path\"] = os.path.realpath(link)\n distro[\"name\"] = os.path.basename(link)\n distros.append(distro)\n\n repos = [repo.name for repo in self.api.repos()\n if os.path.isdir(os.path.join(self.settings.webdir, \"repo_mirror\", repo.name))]\n\n metadata = {\n \"date\": time.asctime(time.gmtime()),\n \"cobbler_server\": self.settings.server,\n \"distros\": distros,\n \"repos\": repos,\n \"webdir\": self.settings.webdir\n }\n\n self.templar.render(template_data, metadata, \"/etc/rsyncd.conf\")\n", "path": "cobbler/actions/sync.py"}]}
3,621
177
gh_patches_debug_21806
rasdani/github-patches
git_diff
psychopy__psychopy-4624
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> 404 errors on some of the page in the HELP menus Version used - psychopy: 2021.2.3 https://www.psychopy.org/builder/builder.html https://www.psychopy.org/api/api.html </issue> <code> [start of psychopy/app/urls.py] 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 4 """A central location to store information about urls 5 """ 6 urls = dict() 7 8 # links based on string names 9 urls['builder'] = "https://www.psychopy.org/builder/builder.html" 10 urls['builder.loops'] = "https://www.psychopy.org/builder/flow.html#loops" 11 # NB. builder components get their urls defined by the component code 12 # (so a custom component can have a url) 13 14 urls['downloads'] = "https://github.com/psychopy/psychopy/releases" 15 urls['changelog'] = "https://www.psychopy.org/changelog.html" 16 17 general = "https://www.psychopy.org/general/" 18 urls['prefs'] = general + "prefs.html" 19 urls['prefs.general'] = general + "prefs.html#general-settings" 20 urls['prefs.app'] = general + "prefs.html#application-settings" 21 urls['prefs.coder'] = general + "prefs.html#coder-settings" 22 urls['prefs.builder'] = general + "prefs.html#builder-settings" 23 urls['prefs.connections'] = general + "prefs.html#connection-settings" 24 25 # links keyed by wxIDs (e.g. menu item IDs) 26 urls['psychopyHome'] = "https://www.psychopy.org/" 27 urls['psychopyReference'] = "https://www.psychopy.org/api/api.html" 28 urls['coderTutorial'] = "https://www.psychopy.org/coder/tutorial1.html" 29 urls['builderHelp'] = urls['builder'] 30 urls['builderDemos'] = "http://code.google.com/p/psychopy/downloads/list?can=2&q=demos" 31 urls['projsAbout'] = "https://www.psychopy.org/general/projects.html" 32 [end of psychopy/app/urls.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/psychopy/app/urls.py b/psychopy/app/urls.py --- a/psychopy/app/urls.py +++ b/psychopy/app/urls.py @@ -6,7 +6,7 @@ urls = dict() # links based on string names -urls['builder'] = "https://www.psychopy.org/builder/builder.html" +urls['builder'] = "https://www.psychopy.org/builder" urls['builder.loops'] = "https://www.psychopy.org/builder/flow.html#loops" # NB. builder components get their urls defined by the component code # (so a custom component can have a url) @@ -24,7 +24,7 @@ # links keyed by wxIDs (e.g. menu item IDs) urls['psychopyHome'] = "https://www.psychopy.org/" -urls['psychopyReference'] = "https://www.psychopy.org/api/api.html" +urls['psychopyReference'] = "https://www.psychopy.org/api" urls['coderTutorial'] = "https://www.psychopy.org/coder/tutorial1.html" urls['builderHelp'] = urls['builder'] urls['builderDemos'] = "http://code.google.com/p/psychopy/downloads/list?can=2&q=demos"
{"golden_diff": "diff --git a/psychopy/app/urls.py b/psychopy/app/urls.py\n--- a/psychopy/app/urls.py\n+++ b/psychopy/app/urls.py\n@@ -6,7 +6,7 @@\n urls = dict()\n \n # links based on string names\n-urls['builder'] = \"https://www.psychopy.org/builder/builder.html\"\n+urls['builder'] = \"https://www.psychopy.org/builder\"\n urls['builder.loops'] = \"https://www.psychopy.org/builder/flow.html#loops\"\n # NB. builder components get their urls defined by the component code\n # (so a custom component can have a url)\n@@ -24,7 +24,7 @@\n \n # links keyed by wxIDs (e.g. menu item IDs)\n urls['psychopyHome'] = \"https://www.psychopy.org/\"\n-urls['psychopyReference'] = \"https://www.psychopy.org/api/api.html\"\n+urls['psychopyReference'] = \"https://www.psychopy.org/api\"\n urls['coderTutorial'] = \"https://www.psychopy.org/coder/tutorial1.html\"\n urls['builderHelp'] = urls['builder']\n urls['builderDemos'] = \"http://code.google.com/p/psychopy/downloads/list?can=2&q=demos\"\n", "issue": "404 errors on some of the page in the HELP menus\nVersion used\r\n- psychopy: 2021.2.3\r\n\r\nhttps://www.psychopy.org/builder/builder.html\r\nhttps://www.psychopy.org/api/api.html\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"A central location to store information about urls\n\"\"\"\nurls = dict()\n\n# links based on string names\nurls['builder'] = \"https://www.psychopy.org/builder/builder.html\"\nurls['builder.loops'] = \"https://www.psychopy.org/builder/flow.html#loops\"\n# NB. builder components get their urls defined by the component code\n# (so a custom component can have a url)\n\nurls['downloads'] = \"https://github.com/psychopy/psychopy/releases\"\nurls['changelog'] = \"https://www.psychopy.org/changelog.html\"\n\ngeneral = \"https://www.psychopy.org/general/\"\nurls['prefs'] = general + \"prefs.html\"\nurls['prefs.general'] = general + \"prefs.html#general-settings\"\nurls['prefs.app'] = general + \"prefs.html#application-settings\"\nurls['prefs.coder'] = general + \"prefs.html#coder-settings\"\nurls['prefs.builder'] = general + \"prefs.html#builder-settings\"\nurls['prefs.connections'] = general + \"prefs.html#connection-settings\"\n\n# links keyed by wxIDs (e.g. menu item IDs)\nurls['psychopyHome'] = \"https://www.psychopy.org/\"\nurls['psychopyReference'] = \"https://www.psychopy.org/api/api.html\"\nurls['coderTutorial'] = \"https://www.psychopy.org/coder/tutorial1.html\"\nurls['builderHelp'] = urls['builder']\nurls['builderDemos'] = \"http://code.google.com/p/psychopy/downloads/list?can=2&q=demos\"\nurls['projsAbout'] = \"https://www.psychopy.org/general/projects.html\"\n", "path": "psychopy/app/urls.py"}]}
1,016
285
gh_patches_debug_43307
rasdani/github-patches
git_diff
crytic__slither-447
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add "now" to the timestamp detector https://github.com/crytic/slither/blob/7cb6cf4870036f780088fa7dfec83ae3220322e2/slither/detectors/operations/block_timestamp.py#L39-L44 This could also warns about the use of `now` </issue> <code> [start of slither/detectors/operations/block_timestamp.py] 1 """ 2 Module detecting dangerous use of block.timestamp 3 4 """ 5 from slither.core.declarations import Function 6 from slither.analyses.data_dependency.data_dependency import is_tainted, is_dependent 7 from slither.core.declarations.solidity_variables import (SolidityFunction, 8 SolidityVariableComposed) 9 from slither.detectors.abstract_detector import (AbstractDetector, 10 DetectorClassification) 11 from slither.slithir.operations import Binary, BinaryType 12 13 14 class Timestamp(AbstractDetector): 15 """ 16 """ 17 18 ARGUMENT = 'timestamp' 19 HELP = 'Dangerous usage of `block.timestamp`' 20 IMPACT = DetectorClassification.LOW 21 CONFIDENCE = DetectorClassification.MEDIUM 22 23 WIKI = 'https://github.com/crytic/slither/wiki/Detector-Documentation#block-timestamp' 24 25 26 WIKI_TITLE = 'Block timestamp' 27 WIKI_DESCRIPTION = 'Dangerous usage of `block.timestamp`. `block.timestamp` can be manipulated by miners.' 28 WIKI_EXPLOIT_SCENARIO = '''"Bob's contract relies on `block.timestamp` for its randomness. Eve is a miner and manipulates `block.timestamp` to exploit Bob's contract.''' 29 WIKI_RECOMMENDATION = 'Avoid relying on `block.timestamp`.' 30 31 def timestamp(self, func): 32 """ 33 """ 34 35 ret = set() 36 for node in func.nodes: 37 if node.contains_require_or_assert(): 38 for var in node.variables_read: 39 if is_dependent(var, SolidityVariableComposed('block.timestamp'), func.contract): 40 ret.add(node) 41 for ir in node.irs: 42 if isinstance(ir, Binary) and BinaryType.return_bool(ir.type): 43 for var in ir.read: 44 if is_dependent(var, SolidityVariableComposed('block.timestamp'), func.contract): 45 ret.add(node) 46 return list(ret) 47 48 49 def detect_dangerous_timestamp(self, contract): 50 """ 51 Args: 52 contract (Contract) 53 Returns: 54 list((Function), (list (Node))) 55 """ 56 ret = [] 57 for f in [f for f in contract.functions if f.contract_declarer == contract]: 58 nodes = self.timestamp(f) 59 if nodes: 60 ret.append((f, nodes)) 61 return ret 62 63 def _detect(self): 64 """ 65 """ 66 results = [] 67 68 for c in self.contracts: 69 dangerous_timestamp = self.detect_dangerous_timestamp(c) 70 for (func, nodes) in dangerous_timestamp: 71 72 info = [func, " uses timestamp for comparisons\n"] 73 74 info += ['\tDangerous comparisons:\n'] 75 for node in nodes: 76 info += ['\t- ', node, '\n'] 77 78 res = self.generate_result(info) 79 80 results.append(res) 81 82 return results 83 [end of slither/detectors/operations/block_timestamp.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/slither/detectors/operations/block_timestamp.py b/slither/detectors/operations/block_timestamp.py --- a/slither/detectors/operations/block_timestamp.py +++ b/slither/detectors/operations/block_timestamp.py @@ -2,15 +2,51 @@ Module detecting dangerous use of block.timestamp """ -from slither.core.declarations import Function -from slither.analyses.data_dependency.data_dependency import is_tainted, is_dependent -from slither.core.declarations.solidity_variables import (SolidityFunction, - SolidityVariableComposed) +from typing import List, Tuple + +from slither.analyses.data_dependency.data_dependency import is_dependent +from slither.core.cfg.node import Node +from slither.core.declarations import Function, Contract +from slither.core.declarations.solidity_variables import (SolidityVariableComposed, SolidityVariable) from slither.detectors.abstract_detector import (AbstractDetector, DetectorClassification) from slither.slithir.operations import Binary, BinaryType +def _timestamp(func: Function) -> List[Node]: + ret = set() + for node in func.nodes: + if node.contains_require_or_assert(): + for var in node.variables_read: + if is_dependent(var, SolidityVariableComposed('block.timestamp'), func.contract): + ret.add(node) + if is_dependent(var, SolidityVariable('now'), func.contract): + ret.add(node) + for ir in node.irs: + if isinstance(ir, Binary) and BinaryType.return_bool(ir.type): + for var in ir.read: + if is_dependent(var, SolidityVariableComposed('block.timestamp'), func.contract): + ret.add(node) + if is_dependent(var, SolidityVariable('now'), func.contract): + ret.add(node) + return list(ret) + + +def _detect_dangerous_timestamp(contract: Contract) -> List[Tuple[Function, List[Node]]]: + """ + Args: + contract (Contract) + Returns: + list((Function), (list (Node))) + """ + ret = [] + for f in [f for f in contract.functions if f.contract_declarer == contract]: + nodes = _timestamp(f) + if nodes: + ret.append((f, nodes)) + return ret + + class Timestamp(AbstractDetector): """ """ @@ -22,51 +58,18 @@ WIKI = 'https://github.com/crytic/slither/wiki/Detector-Documentation#block-timestamp' - WIKI_TITLE = 'Block timestamp' WIKI_DESCRIPTION = 'Dangerous usage of `block.timestamp`. `block.timestamp` can be manipulated by miners.' WIKI_EXPLOIT_SCENARIO = '''"Bob's contract relies on `block.timestamp` for its randomness. Eve is a miner and manipulates `block.timestamp` to exploit Bob's contract.''' WIKI_RECOMMENDATION = 'Avoid relying on `block.timestamp`.' - def timestamp(self, func): - """ - """ - - ret = set() - for node in func.nodes: - if node.contains_require_or_assert(): - for var in node.variables_read: - if is_dependent(var, SolidityVariableComposed('block.timestamp'), func.contract): - ret.add(node) - for ir in node.irs: - if isinstance(ir, Binary) and BinaryType.return_bool(ir.type): - for var in ir.read: - if is_dependent(var, SolidityVariableComposed('block.timestamp'), func.contract): - ret.add(node) - return list(ret) - - - def detect_dangerous_timestamp(self, contract): - """ - Args: - contract (Contract) - Returns: - list((Function), (list (Node))) - """ - ret = [] - for f in [f for f in contract.functions if f.contract_declarer == contract]: - nodes = self.timestamp(f) - if nodes: - ret.append((f, nodes)) - return ret - def _detect(self): """ """ results = [] for c in self.contracts: - dangerous_timestamp = self.detect_dangerous_timestamp(c) + dangerous_timestamp = _detect_dangerous_timestamp(c) for (func, nodes) in dangerous_timestamp: info = [func, " uses timestamp for comparisons\n"]
{"golden_diff": "diff --git a/slither/detectors/operations/block_timestamp.py b/slither/detectors/operations/block_timestamp.py\n--- a/slither/detectors/operations/block_timestamp.py\n+++ b/slither/detectors/operations/block_timestamp.py\n@@ -2,15 +2,51 @@\n Module detecting dangerous use of block.timestamp\n \n \"\"\"\n-from slither.core.declarations import Function\n-from slither.analyses.data_dependency.data_dependency import is_tainted, is_dependent\n-from slither.core.declarations.solidity_variables import (SolidityFunction,\n- SolidityVariableComposed)\n+from typing import List, Tuple\n+\n+from slither.analyses.data_dependency.data_dependency import is_dependent\n+from slither.core.cfg.node import Node\n+from slither.core.declarations import Function, Contract\n+from slither.core.declarations.solidity_variables import (SolidityVariableComposed, SolidityVariable)\n from slither.detectors.abstract_detector import (AbstractDetector,\n DetectorClassification)\n from slither.slithir.operations import Binary, BinaryType\n \n \n+def _timestamp(func: Function) -> List[Node]:\n+ ret = set()\n+ for node in func.nodes:\n+ if node.contains_require_or_assert():\n+ for var in node.variables_read:\n+ if is_dependent(var, SolidityVariableComposed('block.timestamp'), func.contract):\n+ ret.add(node)\n+ if is_dependent(var, SolidityVariable('now'), func.contract):\n+ ret.add(node)\n+ for ir in node.irs:\n+ if isinstance(ir, Binary) and BinaryType.return_bool(ir.type):\n+ for var in ir.read:\n+ if is_dependent(var, SolidityVariableComposed('block.timestamp'), func.contract):\n+ ret.add(node)\n+ if is_dependent(var, SolidityVariable('now'), func.contract):\n+ ret.add(node)\n+ return list(ret)\n+\n+\n+def _detect_dangerous_timestamp(contract: Contract) -> List[Tuple[Function, List[Node]]]:\n+ \"\"\"\n+ Args:\n+ contract (Contract)\n+ Returns:\n+ list((Function), (list (Node)))\n+ \"\"\"\n+ ret = []\n+ for f in [f for f in contract.functions if f.contract_declarer == contract]:\n+ nodes = _timestamp(f)\n+ if nodes:\n+ ret.append((f, nodes))\n+ return ret\n+\n+\n class Timestamp(AbstractDetector):\n \"\"\"\n \"\"\"\n@@ -22,51 +58,18 @@\n \n WIKI = 'https://github.com/crytic/slither/wiki/Detector-Documentation#block-timestamp'\n \n-\n WIKI_TITLE = 'Block timestamp'\n WIKI_DESCRIPTION = 'Dangerous usage of `block.timestamp`. `block.timestamp` can be manipulated by miners.'\n WIKI_EXPLOIT_SCENARIO = '''\"Bob's contract relies on `block.timestamp` for its randomness. Eve is a miner and manipulates `block.timestamp` to exploit Bob's contract.'''\n WIKI_RECOMMENDATION = 'Avoid relying on `block.timestamp`.'\n \n- def timestamp(self, func):\n- \"\"\"\n- \"\"\"\n-\n- ret = set()\n- for node in func.nodes:\n- if node.contains_require_or_assert():\n- for var in node.variables_read:\n- if is_dependent(var, SolidityVariableComposed('block.timestamp'), func.contract):\n- ret.add(node)\n- for ir in node.irs:\n- if isinstance(ir, Binary) and BinaryType.return_bool(ir.type):\n- for var in ir.read:\n- if is_dependent(var, SolidityVariableComposed('block.timestamp'), func.contract):\n- ret.add(node)\n- return list(ret)\n-\n-\n- def detect_dangerous_timestamp(self, contract):\n- \"\"\"\n- Args:\n- contract (Contract)\n- Returns:\n- list((Function), (list (Node)))\n- \"\"\"\n- ret = []\n- for f in [f for f in contract.functions if f.contract_declarer == contract]:\n- nodes = self.timestamp(f)\n- if nodes:\n- ret.append((f, nodes))\n- return ret\n-\n def _detect(self):\n \"\"\"\n \"\"\"\n results = []\n \n for c in self.contracts:\n- dangerous_timestamp = self.detect_dangerous_timestamp(c)\n+ dangerous_timestamp = _detect_dangerous_timestamp(c)\n for (func, nodes) in dangerous_timestamp:\n \n info = [func, \" uses timestamp for comparisons\\n\"]\n", "issue": "Add \"now\" to the timestamp detector\nhttps://github.com/crytic/slither/blob/7cb6cf4870036f780088fa7dfec83ae3220322e2/slither/detectors/operations/block_timestamp.py#L39-L44\r\n\r\nThis could also warns about the use of `now`\n", "before_files": [{"content": "\"\"\"\n Module detecting dangerous use of block.timestamp\n\n\"\"\"\nfrom slither.core.declarations import Function\nfrom slither.analyses.data_dependency.data_dependency import is_tainted, is_dependent\nfrom slither.core.declarations.solidity_variables import (SolidityFunction,\n SolidityVariableComposed)\nfrom slither.detectors.abstract_detector import (AbstractDetector,\n DetectorClassification)\nfrom slither.slithir.operations import Binary, BinaryType\n\n\nclass Timestamp(AbstractDetector):\n \"\"\"\n \"\"\"\n\n ARGUMENT = 'timestamp'\n HELP = 'Dangerous usage of `block.timestamp`'\n IMPACT = DetectorClassification.LOW\n CONFIDENCE = DetectorClassification.MEDIUM\n\n WIKI = 'https://github.com/crytic/slither/wiki/Detector-Documentation#block-timestamp'\n\n\n WIKI_TITLE = 'Block timestamp'\n WIKI_DESCRIPTION = 'Dangerous usage of `block.timestamp`. `block.timestamp` can be manipulated by miners.'\n WIKI_EXPLOIT_SCENARIO = '''\"Bob's contract relies on `block.timestamp` for its randomness. Eve is a miner and manipulates `block.timestamp` to exploit Bob's contract.'''\n WIKI_RECOMMENDATION = 'Avoid relying on `block.timestamp`.'\n\n def timestamp(self, func):\n \"\"\"\n \"\"\"\n\n ret = set()\n for node in func.nodes:\n if node.contains_require_or_assert():\n for var in node.variables_read:\n if is_dependent(var, SolidityVariableComposed('block.timestamp'), func.contract):\n ret.add(node)\n for ir in node.irs:\n if isinstance(ir, Binary) and BinaryType.return_bool(ir.type):\n for var in ir.read:\n if is_dependent(var, SolidityVariableComposed('block.timestamp'), func.contract):\n ret.add(node)\n return list(ret)\n\n\n def detect_dangerous_timestamp(self, contract):\n \"\"\"\n Args:\n contract (Contract)\n Returns:\n list((Function), (list (Node)))\n \"\"\"\n ret = []\n for f in [f for f in contract.functions if f.contract_declarer == contract]:\n nodes = self.timestamp(f)\n if nodes:\n ret.append((f, nodes))\n return ret\n\n def _detect(self):\n \"\"\"\n \"\"\"\n results = []\n\n for c in self.contracts:\n dangerous_timestamp = self.detect_dangerous_timestamp(c)\n for (func, nodes) in dangerous_timestamp:\n\n info = [func, \" uses timestamp for comparisons\\n\"]\n\n info += ['\\tDangerous comparisons:\\n']\n for node in nodes:\n info += ['\\t- ', node, '\\n']\n\n res = self.generate_result(info)\n\n results.append(res)\n\n return results\n", "path": "slither/detectors/operations/block_timestamp.py"}]}
1,374
979
gh_patches_debug_38581
rasdani/github-patches
git_diff
kartoza__prj.app-217
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Crash on listing current sponsors When opening the sponsors view (with some sponsors and sponsor periods created) we get a crash. http://changelog.inasafe.org/en/qgis/sponsor/list/ Sentry info: http://sentry.kartoza.com/kartoza/projecta-live/group/5848/ Relevant code. ``` def current_sponsor(self): today = datetime.datetime.now().replace(tzinfo=utc) end = self.end_date.replace(tzinfo=utc) # <-- offending line if end < today: return False else: return True ``` </issue> <code> [start of django_project/changes/models/sponsorship_period.py] 1 __author__ = 'rischan' 2 3 import string 4 import random 5 from django.utils import timezone 6 from django.core.urlresolvers import reverse 7 from django.utils.text import slugify 8 from core.settings.contrib import STOP_WORDS 9 from django.db import models 10 from django.utils.translation import ugettext_lazy as _ 11 from django.contrib.auth.models import User 12 13 14 class ApprovedSponsorshipPeriodManager(models.Manager): 15 """Custom sponsor manager that shows only approved records.""" 16 17 def get_queryset(self): 18 """Query set generator""" 19 return super( 20 ApprovedSponsorshipPeriodManager, self).get_queryset().filter( 21 approved=True) 22 23 24 class UnapprovedSponsorshipPeriodManager(models.Manager): 25 """Custom sponsor manager that shows only unapproved records.""" 26 27 def get_queryset(self): 28 """Query set generator""" 29 return super( 30 UnapprovedSponsorshipPeriodManager, self).get_queryset().filter( 31 approved=False) 32 33 34 class SponsorshipPeriod(models.Model): 35 """A sponsorship period model e.g. gui, backend, web site etc.""" 36 37 start_date = models.DateField( 38 _("Start date"), 39 help_text='Start date of sponsorship period', 40 default=timezone.now) 41 42 end_date = models.DateField( 43 _("End date"), 44 help_text='End date of sponsorship period', 45 default=timezone.now) 46 47 approved = models.BooleanField( 48 help_text=_( 49 'Whether this sponsorship period has been approved for use by ' 50 'the project owner.'), 51 default=False 52 ) 53 54 author = models.ForeignKey(User) 55 slug = models.SlugField() 56 project = models.ForeignKey('base.Project') 57 objects = models.Manager() 58 approved_objects = ApprovedSponsorshipPeriodManager() 59 unapproved_objects = UnapprovedSponsorshipPeriodManager() 60 sponsor = models.ForeignKey( 61 'Sponsor', 62 help_text='Input the sponsor name', 63 ) 64 sponsorshiplevel = models.ForeignKey( 65 'SponsorshipLevel', 66 help_text='This level take from Sponsorship Level, ' 67 'you can add it by using Sponsorship Level menu', 68 ) 69 # noinspection PyClassicStyleClass 70 71 class Meta: 72 """Meta options for the sponsor class.""" 73 unique_together = ( 74 ('project', 'slug') 75 ) 76 app_label = 'changes' 77 ordering = ['start_date'] 78 79 def save(self, *args, **kwargs): 80 81 if not self.pk: 82 name = self.slug_generator() 83 words = name.split() 84 filtered_words = [t for t in words if t.lower() not in STOP_WORDS] 85 new_list = ' '.join(filtered_words) 86 self.slug = slugify(new_list)[:50] 87 super(SponsorshipPeriod, self).save(*args, **kwargs) 88 89 def slug_generator(self, size=6, chars=string.ascii_lowercase): 90 return ''.join(random.choice(chars) for _ in range(size)) 91 92 def __unicode__(self): 93 return u'%s - %s : %s' % ( 94 self.start_date, 95 self.end_date 96 ) 97 98 def get_absolute_url(self): 99 return reverse('sponsorshipperiod-detail', kwargs={ 100 'slug': self.slug, 101 'project_slug': self.project.slug 102 }) 103 104 def current_sponsor(self): 105 today = timezone.now() 106 end = self.end_date 107 if end < today: 108 return False 109 else: 110 return True 111 [end of django_project/changes/models/sponsorship_period.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/django_project/changes/models/sponsorship_period.py b/django_project/changes/models/sponsorship_period.py --- a/django_project/changes/models/sponsorship_period.py +++ b/django_project/changes/models/sponsorship_period.py @@ -1,15 +1,19 @@ -__author__ = 'rischan' +# coding=utf-8 import string import random +import datetime from django.utils import timezone from django.core.urlresolvers import reverse from django.utils.text import slugify +# noinspection PyPackageRequirements from core.settings.contrib import STOP_WORDS from django.db import models from django.utils.translation import ugettext_lazy as _ from django.contrib.auth.models import User +__author__ = 'rischan' + class ApprovedSponsorshipPeriodManager(models.Manager): """Custom sponsor manager that shows only approved records.""" @@ -57,14 +61,16 @@ objects = models.Manager() approved_objects = ApprovedSponsorshipPeriodManager() unapproved_objects = UnapprovedSponsorshipPeriodManager() + # noinspection PyUnresolvedReferences sponsor = models.ForeignKey( - 'Sponsor', - help_text='Input the sponsor name', + 'Sponsor', + help_text='Input the sponsor name', ) + # noinspection PyUnresolvedReferences sponsorshiplevel = models.ForeignKey( - 'SponsorshipLevel', - help_text='This level take from Sponsorship Level, ' - 'you can add it by using Sponsorship Level menu', + 'SponsorshipLevel', + help_text='This level take from Sponsorship Level, ' + 'you can add it by using Sponsorship Level menu', ) # noinspection PyClassicStyleClass @@ -86,11 +92,13 @@ self.slug = slugify(new_list)[:50] super(SponsorshipPeriod, self).save(*args, **kwargs) - def slug_generator(self, size=6, chars=string.ascii_lowercase): + @staticmethod + def slug_generator(size=6, chars=string.ascii_lowercase): return ''.join(random.choice(chars) for _ in range(size)) def __unicode__(self): return u'%s - %s : %s' % ( + self.sponsor.name, self.start_date, self.end_date ) @@ -102,7 +110,7 @@ }) def current_sponsor(self): - today = timezone.now() + today = datetime.datetime.now().date() end = self.end_date if end < today: return False
{"golden_diff": "diff --git a/django_project/changes/models/sponsorship_period.py b/django_project/changes/models/sponsorship_period.py\n--- a/django_project/changes/models/sponsorship_period.py\n+++ b/django_project/changes/models/sponsorship_period.py\n@@ -1,15 +1,19 @@\n-__author__ = 'rischan'\n+# coding=utf-8\n \n import string\n import random\n+import datetime\n from django.utils import timezone\n from django.core.urlresolvers import reverse\n from django.utils.text import slugify\n+# noinspection PyPackageRequirements\n from core.settings.contrib import STOP_WORDS\n from django.db import models\n from django.utils.translation import ugettext_lazy as _\n from django.contrib.auth.models import User\n \n+__author__ = 'rischan'\n+\n \n class ApprovedSponsorshipPeriodManager(models.Manager):\n \"\"\"Custom sponsor manager that shows only approved records.\"\"\"\n@@ -57,14 +61,16 @@\n objects = models.Manager()\n approved_objects = ApprovedSponsorshipPeriodManager()\n unapproved_objects = UnapprovedSponsorshipPeriodManager()\n+ # noinspection PyUnresolvedReferences\n sponsor = models.ForeignKey(\n- 'Sponsor',\n- help_text='Input the sponsor name',\n+ 'Sponsor',\n+ help_text='Input the sponsor name',\n )\n+ # noinspection PyUnresolvedReferences\n sponsorshiplevel = models.ForeignKey(\n- 'SponsorshipLevel',\n- help_text='This level take from Sponsorship Level, '\n- 'you can add it by using Sponsorship Level menu',\n+ 'SponsorshipLevel',\n+ help_text='This level take from Sponsorship Level, '\n+ 'you can add it by using Sponsorship Level menu',\n )\n # noinspection PyClassicStyleClass\n \n@@ -86,11 +92,13 @@\n self.slug = slugify(new_list)[:50]\n super(SponsorshipPeriod, self).save(*args, **kwargs)\n \n- def slug_generator(self, size=6, chars=string.ascii_lowercase):\n+ @staticmethod\n+ def slug_generator(size=6, chars=string.ascii_lowercase):\n return ''.join(random.choice(chars) for _ in range(size))\n \n def __unicode__(self):\n return u'%s - %s : %s' % (\n+ self.sponsor.name,\n self.start_date,\n self.end_date\n )\n@@ -102,7 +110,7 @@\n })\n \n def current_sponsor(self):\n- today = timezone.now()\n+ today = datetime.datetime.now().date()\n end = self.end_date\n if end < today:\n return False\n", "issue": "Crash on listing current sponsors\nWhen opening the sponsors view (with some sponsors and sponsor periods created) we get a crash.\n\nhttp://changelog.inasafe.org/en/qgis/sponsor/list/\n\nSentry info:\n\nhttp://sentry.kartoza.com/kartoza/projecta-live/group/5848/\n\nRelevant code.\n\n```\n def current_sponsor(self):\n today = datetime.datetime.now().replace(tzinfo=utc)\n end = self.end_date.replace(tzinfo=utc) # <-- offending line\n if end < today:\n return False\n else:\n return True\n```\n\n", "before_files": [{"content": "__author__ = 'rischan'\n\nimport string\nimport random\nfrom django.utils import timezone\nfrom django.core.urlresolvers import reverse\nfrom django.utils.text import slugify\nfrom core.settings.contrib import STOP_WORDS\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.contrib.auth.models import User\n\n\nclass ApprovedSponsorshipPeriodManager(models.Manager):\n \"\"\"Custom sponsor manager that shows only approved records.\"\"\"\n\n def get_queryset(self):\n \"\"\"Query set generator\"\"\"\n return super(\n ApprovedSponsorshipPeriodManager, self).get_queryset().filter(\n approved=True)\n\n\nclass UnapprovedSponsorshipPeriodManager(models.Manager):\n \"\"\"Custom sponsor manager that shows only unapproved records.\"\"\"\n\n def get_queryset(self):\n \"\"\"Query set generator\"\"\"\n return super(\n UnapprovedSponsorshipPeriodManager, self).get_queryset().filter(\n approved=False)\n\n\nclass SponsorshipPeriod(models.Model):\n \"\"\"A sponsorship period model e.g. gui, backend, web site etc.\"\"\"\n\n start_date = models.DateField(\n _(\"Start date\"),\n help_text='Start date of sponsorship period',\n default=timezone.now)\n\n end_date = models.DateField(\n _(\"End date\"),\n help_text='End date of sponsorship period',\n default=timezone.now)\n\n approved = models.BooleanField(\n help_text=_(\n 'Whether this sponsorship period has been approved for use by '\n 'the project owner.'),\n default=False\n )\n\n author = models.ForeignKey(User)\n slug = models.SlugField()\n project = models.ForeignKey('base.Project')\n objects = models.Manager()\n approved_objects = ApprovedSponsorshipPeriodManager()\n unapproved_objects = UnapprovedSponsorshipPeriodManager()\n sponsor = models.ForeignKey(\n 'Sponsor',\n help_text='Input the sponsor name',\n )\n sponsorshiplevel = models.ForeignKey(\n 'SponsorshipLevel',\n help_text='This level take from Sponsorship Level, '\n 'you can add it by using Sponsorship Level menu',\n )\n # noinspection PyClassicStyleClass\n\n class Meta:\n \"\"\"Meta options for the sponsor class.\"\"\"\n unique_together = (\n ('project', 'slug')\n )\n app_label = 'changes'\n ordering = ['start_date']\n\n def save(self, *args, **kwargs):\n\n if not self.pk:\n name = self.slug_generator()\n words = name.split()\n filtered_words = [t for t in words if t.lower() not in STOP_WORDS]\n new_list = ' '.join(filtered_words)\n self.slug = slugify(new_list)[:50]\n super(SponsorshipPeriod, self).save(*args, **kwargs)\n\n def slug_generator(self, size=6, chars=string.ascii_lowercase):\n return ''.join(random.choice(chars) for _ in range(size))\n\n def __unicode__(self):\n return u'%s - %s : %s' % (\n self.start_date,\n self.end_date\n )\n\n def get_absolute_url(self):\n return reverse('sponsorshipperiod-detail', kwargs={\n 'slug': self.slug,\n 'project_slug': self.project.slug\n })\n\n def current_sponsor(self):\n today = timezone.now()\n end = self.end_date\n if end < today:\n return False\n else:\n return True\n", "path": "django_project/changes/models/sponsorship_period.py"}]}
1,613
574
gh_patches_debug_5592
rasdani/github-patches
git_diff
MongoEngine__mongoengine-1862
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Name of text index Is it possible to set name of text index? I have nearly 10 fields which I want to use in text index and I can't, because limited by length of index name (see: https://docs.mongodb.com/v3.2/reference/limits/#Index-Name-Length) Also, I don't want to use Wildcard index (btw, is it possible in mongoengine?) Thanks! </issue> <code> [start of mongoengine/context_managers.py] 1 from contextlib import contextmanager 2 from pymongo.write_concern import WriteConcern 3 from mongoengine.common import _import_class 4 from mongoengine.connection import DEFAULT_CONNECTION_NAME, get_db 5 6 7 __all__ = ('switch_db', 'switch_collection', 'no_dereference', 8 'no_sub_classes', 'query_counter', 'set_write_concern') 9 10 11 class switch_db(object): 12 """switch_db alias context manager. 13 14 Example :: 15 16 # Register connections 17 register_connection('default', 'mongoenginetest') 18 register_connection('testdb-1', 'mongoenginetest2') 19 20 class Group(Document): 21 name = StringField() 22 23 Group(name='test').save() # Saves in the default db 24 25 with switch_db(Group, 'testdb-1') as Group: 26 Group(name='hello testdb!').save() # Saves in testdb-1 27 """ 28 29 def __init__(self, cls, db_alias): 30 """Construct the switch_db context manager 31 32 :param cls: the class to change the registered db 33 :param db_alias: the name of the specific database to use 34 """ 35 self.cls = cls 36 self.collection = cls._get_collection() 37 self.db_alias = db_alias 38 self.ori_db_alias = cls._meta.get('db_alias', DEFAULT_CONNECTION_NAME) 39 40 def __enter__(self): 41 """Change the db_alias and clear the cached collection.""" 42 self.cls._meta['db_alias'] = self.db_alias 43 self.cls._collection = None 44 return self.cls 45 46 def __exit__(self, t, value, traceback): 47 """Reset the db_alias and collection.""" 48 self.cls._meta['db_alias'] = self.ori_db_alias 49 self.cls._collection = self.collection 50 51 52 class switch_collection(object): 53 """switch_collection alias context manager. 54 55 Example :: 56 57 class Group(Document): 58 name = StringField() 59 60 Group(name='test').save() # Saves in the default db 61 62 with switch_collection(Group, 'group1') as Group: 63 Group(name='hello testdb!').save() # Saves in group1 collection 64 """ 65 66 def __init__(self, cls, collection_name): 67 """Construct the switch_collection context manager. 68 69 :param cls: the class to change the registered db 70 :param collection_name: the name of the collection to use 71 """ 72 self.cls = cls 73 self.ori_collection = cls._get_collection() 74 self.ori_get_collection_name = cls._get_collection_name 75 self.collection_name = collection_name 76 77 def __enter__(self): 78 """Change the _get_collection_name and clear the cached collection.""" 79 80 @classmethod 81 def _get_collection_name(cls): 82 return self.collection_name 83 84 self.cls._get_collection_name = _get_collection_name 85 self.cls._collection = None 86 return self.cls 87 88 def __exit__(self, t, value, traceback): 89 """Reset the collection.""" 90 self.cls._collection = self.ori_collection 91 self.cls._get_collection_name = self.ori_get_collection_name 92 93 94 class no_dereference(object): 95 """no_dereference context manager. 96 97 Turns off all dereferencing in Documents for the duration of the context 98 manager:: 99 100 with no_dereference(Group) as Group: 101 Group.objects.find() 102 """ 103 104 def __init__(self, cls): 105 """Construct the no_dereference context manager. 106 107 :param cls: the class to turn dereferencing off on 108 """ 109 self.cls = cls 110 111 ReferenceField = _import_class('ReferenceField') 112 GenericReferenceField = _import_class('GenericReferenceField') 113 ComplexBaseField = _import_class('ComplexBaseField') 114 115 self.deref_fields = [k for k, v in self.cls._fields.iteritems() 116 if isinstance(v, (ReferenceField, 117 GenericReferenceField, 118 ComplexBaseField))] 119 120 def __enter__(self): 121 """Change the objects default and _auto_dereference values.""" 122 for field in self.deref_fields: 123 self.cls._fields[field]._auto_dereference = False 124 return self.cls 125 126 def __exit__(self, t, value, traceback): 127 """Reset the default and _auto_dereference values.""" 128 for field in self.deref_fields: 129 self.cls._fields[field]._auto_dereference = True 130 return self.cls 131 132 133 class no_sub_classes(object): 134 """no_sub_classes context manager. 135 136 Only returns instances of this class and no sub (inherited) classes:: 137 138 with no_sub_classes(Group) as Group: 139 Group.objects.find() 140 """ 141 142 def __init__(self, cls): 143 """Construct the no_sub_classes context manager. 144 145 :param cls: the class to turn querying sub classes on 146 """ 147 self.cls = cls 148 149 def __enter__(self): 150 """Change the objects default and _auto_dereference values.""" 151 self.cls._all_subclasses = self.cls._subclasses 152 self.cls._subclasses = (self.cls,) 153 return self.cls 154 155 def __exit__(self, t, value, traceback): 156 """Reset the default and _auto_dereference values.""" 157 self.cls._subclasses = self.cls._all_subclasses 158 delattr(self.cls, '_all_subclasses') 159 return self.cls 160 161 162 class query_counter(object): 163 """Query_counter context manager to get the number of queries.""" 164 165 def __init__(self): 166 """Construct the query_counter.""" 167 self.counter = 0 168 self.db = get_db() 169 170 def __enter__(self): 171 """On every with block we need to drop the profile collection.""" 172 self.db.set_profiling_level(0) 173 self.db.system.profile.drop() 174 self.db.set_profiling_level(2) 175 return self 176 177 def __exit__(self, t, value, traceback): 178 """Reset the profiling level.""" 179 self.db.set_profiling_level(0) 180 181 def __eq__(self, value): 182 """== Compare querycounter.""" 183 counter = self._get_count() 184 return value == counter 185 186 def __ne__(self, value): 187 """!= Compare querycounter.""" 188 return not self.__eq__(value) 189 190 def __lt__(self, value): 191 """< Compare querycounter.""" 192 return self._get_count() < value 193 194 def __le__(self, value): 195 """<= Compare querycounter.""" 196 return self._get_count() <= value 197 198 def __gt__(self, value): 199 """> Compare querycounter.""" 200 return self._get_count() > value 201 202 def __ge__(self, value): 203 """>= Compare querycounter.""" 204 return self._get_count() >= value 205 206 def __int__(self): 207 """int representation.""" 208 return self._get_count() 209 210 def __repr__(self): 211 """repr query_counter as the number of queries.""" 212 return u"%s" % self._get_count() 213 214 def _get_count(self): 215 """Get the number of queries.""" 216 ignore_query = {'ns': {'$ne': '%s.system.indexes' % self.db.name}} 217 count = self.db.system.profile.find(ignore_query).count() - self.counter 218 self.counter += 1 219 return count 220 221 222 @contextmanager 223 def set_write_concern(collection, write_concerns): 224 combined_concerns = dict(collection.write_concern.document.items()) 225 combined_concerns.update(write_concerns) 226 yield collection.with_options(write_concern=WriteConcern(**combined_concerns)) 227 [end of mongoengine/context_managers.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mongoengine/context_managers.py b/mongoengine/context_managers.py --- a/mongoengine/context_managers.py +++ b/mongoengine/context_managers.py @@ -215,7 +215,7 @@ """Get the number of queries.""" ignore_query = {'ns': {'$ne': '%s.system.indexes' % self.db.name}} count = self.db.system.profile.find(ignore_query).count() - self.counter - self.counter += 1 + self.counter += 1 # Account for the query we just fired return count
{"golden_diff": "diff --git a/mongoengine/context_managers.py b/mongoengine/context_managers.py\n--- a/mongoengine/context_managers.py\n+++ b/mongoengine/context_managers.py\n@@ -215,7 +215,7 @@\n \"\"\"Get the number of queries.\"\"\"\n ignore_query = {'ns': {'$ne': '%s.system.indexes' % self.db.name}}\n count = self.db.system.profile.find(ignore_query).count() - self.counter\n- self.counter += 1\n+ self.counter += 1 # Account for the query we just fired\n return count\n", "issue": "Name of text index\nIs it possible to set name of text index?\r\n\r\nI have nearly 10 fields which I want to use in text index and I can't, because limited by length of index name (see: https://docs.mongodb.com/v3.2/reference/limits/#Index-Name-Length)\r\n\r\nAlso, I don't want to use Wildcard index (btw, is it possible in mongoengine?)\r\n\r\nThanks!\n", "before_files": [{"content": "from contextlib import contextmanager\nfrom pymongo.write_concern import WriteConcern\nfrom mongoengine.common import _import_class\nfrom mongoengine.connection import DEFAULT_CONNECTION_NAME, get_db\n\n\n__all__ = ('switch_db', 'switch_collection', 'no_dereference',\n 'no_sub_classes', 'query_counter', 'set_write_concern')\n\n\nclass switch_db(object):\n \"\"\"switch_db alias context manager.\n\n Example ::\n\n # Register connections\n register_connection('default', 'mongoenginetest')\n register_connection('testdb-1', 'mongoenginetest2')\n\n class Group(Document):\n name = StringField()\n\n Group(name='test').save() # Saves in the default db\n\n with switch_db(Group, 'testdb-1') as Group:\n Group(name='hello testdb!').save() # Saves in testdb-1\n \"\"\"\n\n def __init__(self, cls, db_alias):\n \"\"\"Construct the switch_db context manager\n\n :param cls: the class to change the registered db\n :param db_alias: the name of the specific database to use\n \"\"\"\n self.cls = cls\n self.collection = cls._get_collection()\n self.db_alias = db_alias\n self.ori_db_alias = cls._meta.get('db_alias', DEFAULT_CONNECTION_NAME)\n\n def __enter__(self):\n \"\"\"Change the db_alias and clear the cached collection.\"\"\"\n self.cls._meta['db_alias'] = self.db_alias\n self.cls._collection = None\n return self.cls\n\n def __exit__(self, t, value, traceback):\n \"\"\"Reset the db_alias and collection.\"\"\"\n self.cls._meta['db_alias'] = self.ori_db_alias\n self.cls._collection = self.collection\n\n\nclass switch_collection(object):\n \"\"\"switch_collection alias context manager.\n\n Example ::\n\n class Group(Document):\n name = StringField()\n\n Group(name='test').save() # Saves in the default db\n\n with switch_collection(Group, 'group1') as Group:\n Group(name='hello testdb!').save() # Saves in group1 collection\n \"\"\"\n\n def __init__(self, cls, collection_name):\n \"\"\"Construct the switch_collection context manager.\n\n :param cls: the class to change the registered db\n :param collection_name: the name of the collection to use\n \"\"\"\n self.cls = cls\n self.ori_collection = cls._get_collection()\n self.ori_get_collection_name = cls._get_collection_name\n self.collection_name = collection_name\n\n def __enter__(self):\n \"\"\"Change the _get_collection_name and clear the cached collection.\"\"\"\n\n @classmethod\n def _get_collection_name(cls):\n return self.collection_name\n\n self.cls._get_collection_name = _get_collection_name\n self.cls._collection = None\n return self.cls\n\n def __exit__(self, t, value, traceback):\n \"\"\"Reset the collection.\"\"\"\n self.cls._collection = self.ori_collection\n self.cls._get_collection_name = self.ori_get_collection_name\n\n\nclass no_dereference(object):\n \"\"\"no_dereference context manager.\n\n Turns off all dereferencing in Documents for the duration of the context\n manager::\n\n with no_dereference(Group) as Group:\n Group.objects.find()\n \"\"\"\n\n def __init__(self, cls):\n \"\"\"Construct the no_dereference context manager.\n\n :param cls: the class to turn dereferencing off on\n \"\"\"\n self.cls = cls\n\n ReferenceField = _import_class('ReferenceField')\n GenericReferenceField = _import_class('GenericReferenceField')\n ComplexBaseField = _import_class('ComplexBaseField')\n\n self.deref_fields = [k for k, v in self.cls._fields.iteritems()\n if isinstance(v, (ReferenceField,\n GenericReferenceField,\n ComplexBaseField))]\n\n def __enter__(self):\n \"\"\"Change the objects default and _auto_dereference values.\"\"\"\n for field in self.deref_fields:\n self.cls._fields[field]._auto_dereference = False\n return self.cls\n\n def __exit__(self, t, value, traceback):\n \"\"\"Reset the default and _auto_dereference values.\"\"\"\n for field in self.deref_fields:\n self.cls._fields[field]._auto_dereference = True\n return self.cls\n\n\nclass no_sub_classes(object):\n \"\"\"no_sub_classes context manager.\n\n Only returns instances of this class and no sub (inherited) classes::\n\n with no_sub_classes(Group) as Group:\n Group.objects.find()\n \"\"\"\n\n def __init__(self, cls):\n \"\"\"Construct the no_sub_classes context manager.\n\n :param cls: the class to turn querying sub classes on\n \"\"\"\n self.cls = cls\n\n def __enter__(self):\n \"\"\"Change the objects default and _auto_dereference values.\"\"\"\n self.cls._all_subclasses = self.cls._subclasses\n self.cls._subclasses = (self.cls,)\n return self.cls\n\n def __exit__(self, t, value, traceback):\n \"\"\"Reset the default and _auto_dereference values.\"\"\"\n self.cls._subclasses = self.cls._all_subclasses\n delattr(self.cls, '_all_subclasses')\n return self.cls\n\n\nclass query_counter(object):\n \"\"\"Query_counter context manager to get the number of queries.\"\"\"\n\n def __init__(self):\n \"\"\"Construct the query_counter.\"\"\"\n self.counter = 0\n self.db = get_db()\n\n def __enter__(self):\n \"\"\"On every with block we need to drop the profile collection.\"\"\"\n self.db.set_profiling_level(0)\n self.db.system.profile.drop()\n self.db.set_profiling_level(2)\n return self\n\n def __exit__(self, t, value, traceback):\n \"\"\"Reset the profiling level.\"\"\"\n self.db.set_profiling_level(0)\n\n def __eq__(self, value):\n \"\"\"== Compare querycounter.\"\"\"\n counter = self._get_count()\n return value == counter\n\n def __ne__(self, value):\n \"\"\"!= Compare querycounter.\"\"\"\n return not self.__eq__(value)\n\n def __lt__(self, value):\n \"\"\"< Compare querycounter.\"\"\"\n return self._get_count() < value\n\n def __le__(self, value):\n \"\"\"<= Compare querycounter.\"\"\"\n return self._get_count() <= value\n\n def __gt__(self, value):\n \"\"\"> Compare querycounter.\"\"\"\n return self._get_count() > value\n\n def __ge__(self, value):\n \"\"\">= Compare querycounter.\"\"\"\n return self._get_count() >= value\n\n def __int__(self):\n \"\"\"int representation.\"\"\"\n return self._get_count()\n\n def __repr__(self):\n \"\"\"repr query_counter as the number of queries.\"\"\"\n return u\"%s\" % self._get_count()\n\n def _get_count(self):\n \"\"\"Get the number of queries.\"\"\"\n ignore_query = {'ns': {'$ne': '%s.system.indexes' % self.db.name}}\n count = self.db.system.profile.find(ignore_query).count() - self.counter\n self.counter += 1\n return count\n\n\n@contextmanager\ndef set_write_concern(collection, write_concerns):\n combined_concerns = dict(collection.write_concern.document.items())\n combined_concerns.update(write_concerns)\n yield collection.with_options(write_concern=WriteConcern(**combined_concerns))\n", "path": "mongoengine/context_managers.py"}]}
2,831
130
gh_patches_debug_29501
rasdani/github-patches
git_diff
adap__flower-1347
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> sklearn-logreg-mnist example is outdated ### Describe the bug The sklearn-logreg-mnist example is outdated and does not work with flower 1.0.0. I will make a pull request to fix this. ### Steps/Code to Reproduce sh ./run.sh ### Expected Results The example should run with no errors, both on the client and on the server. ### Actual Results Number of errors arise when the script runs. </issue> <code> [start of examples/sklearn-logreg-mnist/server.py] 1 import flwr as fl 2 import utils 3 from sklearn.metrics import log_loss 4 from sklearn.linear_model import LogisticRegression 5 from typing import Dict 6 7 8 def fit_round(server_round: int) -> Dict: 9 """Send round number to client.""" 10 return {"server_round": server_round} 11 12 13 def get_evaluate_fn(model: LogisticRegression): 14 """Return an evaluation function for server-side evaluation.""" 15 16 # Load test data here to avoid the overhead of doing it in `evaluate` itself 17 _, (X_test, y_test) = utils.load_mnist() 18 19 # The `evaluate` function will be called after every round 20 def evaluate(parameters: fl.common.Weights): 21 # Update model with the latest parameters 22 utils.set_model_params(model, parameters) 23 loss = log_loss(y_test, model.predict_proba(X_test)) 24 accuracy = model.score(X_test, y_test) 25 return loss, {"accuracy": accuracy} 26 27 return evaluate 28 29 30 # Start Flower server for five rounds of federated learning 31 if __name__ == "__main__": 32 model = LogisticRegression() 33 utils.set_initial_params(model) 34 strategy = fl.server.strategy.FedAvg( 35 min_available_clients=2, 36 evaluate_fn=get_evaluate_fn(model), 37 on_fit_config_fn=fit_round, 38 ) 39 fl.server.start_server( 40 server_address="0.0.0.0:8080", 41 strategy=strategy, 42 config={"num_rounds": 5}, 43 ) 44 [end of examples/sklearn-logreg-mnist/server.py] [start of examples/sklearn-logreg-mnist/client.py] 1 import warnings 2 import flwr as fl 3 import numpy as np 4 5 from sklearn.linear_model import LogisticRegression 6 from sklearn.metrics import log_loss 7 8 import utils 9 10 if __name__ == "__main__": 11 # Load MNIST dataset from https://www.openml.org/d/554 12 (X_train, y_train), (X_test, y_test) = utils.load_mnist() 13 14 # Split train set into 10 partitions and randomly use one for training. 15 partition_id = np.random.choice(10) 16 (X_train, y_train) = utils.partition(X_train, y_train, 10)[partition_id] 17 18 # Create LogisticRegression Model 19 model = LogisticRegression( 20 penalty="l2", 21 max_iter=1, # local epoch 22 warm_start=True, # prevent refreshing weights when fitting 23 ) 24 25 # Setting initial parameters, akin to model.compile for keras models 26 utils.set_initial_params(model) 27 28 # Define Flower client 29 class MnistClient(fl.client.NumPyClient): 30 def get_parameters(self): # type: ignore 31 return utils.get_model_parameters(model) 32 33 def fit(self, parameters, config): # type: ignore 34 utils.set_model_params(model, parameters) 35 # Ignore convergence failure due to low local epochs 36 with warnings.catch_warnings(): 37 warnings.simplefilter("ignore") 38 model.fit(X_train, y_train) 39 print(f"Training finished for round {config['server_round']}") 40 return utils.get_model_parameters(model), len(X_train), {} 41 42 def evaluate(self, parameters, config): # type: ignore 43 utils.set_model_params(model, parameters) 44 loss = log_loss(y_test, model.predict_proba(X_test)) 45 accuracy = model.score(X_test, y_test) 46 return loss, len(X_test), {"accuracy": accuracy} 47 48 # Start Flower client 49 fl.client.start_numpy_client("0.0.0.0:8080", client=MnistClient()) 50 [end of examples/sklearn-logreg-mnist/client.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/examples/sklearn-logreg-mnist/client.py b/examples/sklearn-logreg-mnist/client.py --- a/examples/sklearn-logreg-mnist/client.py +++ b/examples/sklearn-logreg-mnist/client.py @@ -27,7 +27,7 @@ # Define Flower client class MnistClient(fl.client.NumPyClient): - def get_parameters(self): # type: ignore + def get_parameters(self, config): # type: ignore return utils.get_model_parameters(model) def fit(self, parameters, config): # type: ignore @@ -46,4 +46,4 @@ return loss, len(X_test), {"accuracy": accuracy} # Start Flower client - fl.client.start_numpy_client("0.0.0.0:8080", client=MnistClient()) + fl.client.start_numpy_client(server_address="0.0.0.0:8080", client=MnistClient()) diff --git a/examples/sklearn-logreg-mnist/server.py b/examples/sklearn-logreg-mnist/server.py --- a/examples/sklearn-logreg-mnist/server.py +++ b/examples/sklearn-logreg-mnist/server.py @@ -17,7 +17,7 @@ _, (X_test, y_test) = utils.load_mnist() # The `evaluate` function will be called after every round - def evaluate(parameters: fl.common.Weights): + def evaluate(server_round, parameters: fl.common.NDArrays, config): # Update model with the latest parameters utils.set_model_params(model, parameters) loss = log_loss(y_test, model.predict_proba(X_test)) @@ -39,5 +39,5 @@ fl.server.start_server( server_address="0.0.0.0:8080", strategy=strategy, - config={"num_rounds": 5}, + config=fl.server.ServerConfig(num_rounds=5), )
{"golden_diff": "diff --git a/examples/sklearn-logreg-mnist/client.py b/examples/sklearn-logreg-mnist/client.py\n--- a/examples/sklearn-logreg-mnist/client.py\n+++ b/examples/sklearn-logreg-mnist/client.py\n@@ -27,7 +27,7 @@\n \n # Define Flower client\n class MnistClient(fl.client.NumPyClient):\n- def get_parameters(self): # type: ignore\n+ def get_parameters(self, config): # type: ignore\n return utils.get_model_parameters(model)\n \n def fit(self, parameters, config): # type: ignore\n@@ -46,4 +46,4 @@\n return loss, len(X_test), {\"accuracy\": accuracy}\n \n # Start Flower client\n- fl.client.start_numpy_client(\"0.0.0.0:8080\", client=MnistClient())\n+ fl.client.start_numpy_client(server_address=\"0.0.0.0:8080\", client=MnistClient())\ndiff --git a/examples/sklearn-logreg-mnist/server.py b/examples/sklearn-logreg-mnist/server.py\n--- a/examples/sklearn-logreg-mnist/server.py\n+++ b/examples/sklearn-logreg-mnist/server.py\n@@ -17,7 +17,7 @@\n _, (X_test, y_test) = utils.load_mnist()\n \n # The `evaluate` function will be called after every round\n- def evaluate(parameters: fl.common.Weights):\n+ def evaluate(server_round, parameters: fl.common.NDArrays, config):\n # Update model with the latest parameters\n utils.set_model_params(model, parameters)\n loss = log_loss(y_test, model.predict_proba(X_test))\n@@ -39,5 +39,5 @@\n fl.server.start_server(\n server_address=\"0.0.0.0:8080\",\n strategy=strategy,\n- config={\"num_rounds\": 5},\n+ config=fl.server.ServerConfig(num_rounds=5),\n )\n", "issue": "sklearn-logreg-mnist example is outdated\n### Describe the bug\n\nThe sklearn-logreg-mnist example is outdated and does not work with flower 1.0.0. I will make a pull request to fix this.\n\n### Steps/Code to Reproduce\n\nsh ./run.sh\n\n### Expected Results\n\nThe example should run with no errors, both on the client and on the server.\n\n### Actual Results\n\nNumber of errors arise when the script runs.\n", "before_files": [{"content": "import flwr as fl\nimport utils\nfrom sklearn.metrics import log_loss\nfrom sklearn.linear_model import LogisticRegression\nfrom typing import Dict\n\n\ndef fit_round(server_round: int) -> Dict:\n \"\"\"Send round number to client.\"\"\"\n return {\"server_round\": server_round}\n\n\ndef get_evaluate_fn(model: LogisticRegression):\n \"\"\"Return an evaluation function for server-side evaluation.\"\"\"\n\n # Load test data here to avoid the overhead of doing it in `evaluate` itself\n _, (X_test, y_test) = utils.load_mnist()\n\n # The `evaluate` function will be called after every round\n def evaluate(parameters: fl.common.Weights):\n # Update model with the latest parameters\n utils.set_model_params(model, parameters)\n loss = log_loss(y_test, model.predict_proba(X_test))\n accuracy = model.score(X_test, y_test)\n return loss, {\"accuracy\": accuracy}\n\n return evaluate\n\n\n# Start Flower server for five rounds of federated learning\nif __name__ == \"__main__\":\n model = LogisticRegression()\n utils.set_initial_params(model)\n strategy = fl.server.strategy.FedAvg(\n min_available_clients=2,\n evaluate_fn=get_evaluate_fn(model),\n on_fit_config_fn=fit_round,\n )\n fl.server.start_server(\n server_address=\"0.0.0.0:8080\",\n strategy=strategy,\n config={\"num_rounds\": 5},\n )\n", "path": "examples/sklearn-logreg-mnist/server.py"}, {"content": "import warnings\nimport flwr as fl\nimport numpy as np\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import log_loss\n\nimport utils\n\nif __name__ == \"__main__\":\n # Load MNIST dataset from https://www.openml.org/d/554\n (X_train, y_train), (X_test, y_test) = utils.load_mnist()\n\n # Split train set into 10 partitions and randomly use one for training.\n partition_id = np.random.choice(10)\n (X_train, y_train) = utils.partition(X_train, y_train, 10)[partition_id]\n\n # Create LogisticRegression Model\n model = LogisticRegression(\n penalty=\"l2\",\n max_iter=1, # local epoch\n warm_start=True, # prevent refreshing weights when fitting\n )\n\n # Setting initial parameters, akin to model.compile for keras models\n utils.set_initial_params(model)\n\n # Define Flower client\n class MnistClient(fl.client.NumPyClient):\n def get_parameters(self): # type: ignore\n return utils.get_model_parameters(model)\n\n def fit(self, parameters, config): # type: ignore\n utils.set_model_params(model, parameters)\n # Ignore convergence failure due to low local epochs\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n model.fit(X_train, y_train)\n print(f\"Training finished for round {config['server_round']}\")\n return utils.get_model_parameters(model), len(X_train), {}\n\n def evaluate(self, parameters, config): # type: ignore\n utils.set_model_params(model, parameters)\n loss = log_loss(y_test, model.predict_proba(X_test))\n accuracy = model.score(X_test, y_test)\n return loss, len(X_test), {\"accuracy\": accuracy}\n\n # Start Flower client\n fl.client.start_numpy_client(\"0.0.0.0:8080\", client=MnistClient())\n", "path": "examples/sklearn-logreg-mnist/client.py"}]}
1,570
441
gh_patches_debug_13768
rasdani/github-patches
git_diff
liqd__a4-meinberlin-4915
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Export-Button Problem in Modul "Brainstorming (with map)" on Prod, Stage and Dev **URL:** https://meinberlin-dev.liqd.net/dashboard/projects/multimodul-test-merkmalkategorie/basic/ **user:** initiator, admin **expected behaviour:** as I user I want to export all Ideas and Comments in all moduls which have Ideas/Proposals **behaviour:** In the Modules "Brainstorming/Brainstorming with Map" I cannot see the Excel-Export-Button and therefore not export the ideas/comments in my project. **important screensize:** - **device & browser:** - **Comment/Question:** Every other modul with proposals/ideas has the excel-export-button. There's a workaround when recreating the right URL, I can get to the Excel-Export page. In this case: https://meinberlin-dev.liqd.net/dashboard/modules/brainstorming-mit-karte-7/export/mapidea/ <img width="311" alt="Bildschirm­foto 2023-02-03 um 10 50 25" src="https://user-images.githubusercontent.com/113608720/216568760-5075d601-eb68-44f1-9209-a3b547d994f9.png"> Screenshot? </issue> <code> [start of meinberlin/apps/mapideas/dashboard.py] 1 from django.urls import reverse 2 from django.utils.translation import gettext_lazy as _ 3 4 from adhocracy4.dashboard import DashboardComponent 5 from adhocracy4.dashboard import components 6 7 from . import exports 8 from . import views 9 10 11 class ExportMapIdeaComponent(DashboardComponent): 12 identifier = "mapidea_export" 13 weight = 50 14 label = _("Export Excel") 15 16 def is_effective(self, module): 17 return ( 18 module.blueprint_type == "MIC" 19 and not module.project.is_draft 20 and not module.is_draft 21 ) 22 23 def get_progress(self, module): 24 return 0, 0 25 26 def get_base_url(self, module): 27 return reverse( 28 "a4dashboard:mapidea-export-module", 29 kwargs={ 30 "module_slug": module.slug, 31 }, 32 ) 33 34 def get_urls(self): 35 return [ 36 ( 37 r"^modules/(?P<module_slug>[-\w_]+)/export/mapidea/$", 38 views.MapIdeaDashboardExportView.as_view(component=self), 39 "mapidea-export-module", 40 ), 41 ( 42 r"^modules/(?P<module_slug>[-\w_]+)/export/mapidea/ideas/$", 43 exports.MapIdeaExportView.as_view(), 44 "mapidea-export", 45 ), 46 ( 47 r"^modules/(?P<module_slug>[-\w_]+)/export/mapidea/comments/$", 48 exports.MapIdeaCommentExportView.as_view(), 49 "mapidea-comment-export", 50 ), 51 ] 52 53 54 components.register_module(ExportMapIdeaComponent()) 55 [end of meinberlin/apps/mapideas/dashboard.py] [start of meinberlin/apps/ideas/dashboard.py] 1 from django.urls import reverse 2 from django.utils.translation import gettext_lazy as _ 3 4 from adhocracy4.dashboard import DashboardComponent 5 from adhocracy4.dashboard import components 6 7 from . import exports 8 from . import views 9 10 11 class ExportIdeaComponent(DashboardComponent): 12 identifier = "idea_export" 13 weight = 50 14 label = _("Export Excel") 15 16 def is_effective(self, module): 17 return ( 18 module.blueprint_type == "IC" 19 and not module.project.is_draft 20 and not module.is_draft 21 ) 22 23 def get_progress(self, module): 24 return 0, 0 25 26 def get_base_url(self, module): 27 return reverse( 28 "a4dashboard:idea-export-module", 29 kwargs={ 30 "module_slug": module.slug, 31 }, 32 ) 33 34 def get_urls(self): 35 return [ 36 ( 37 r"^modules/(?P<module_slug>[-\w_]+)/export/idea/$", 38 views.IdeaDashboardExportView.as_view(component=self), 39 "idea-export-module", 40 ), 41 ( 42 r"^modules/(?P<module_slug>[-\w_]+)/export/idea/ideas/$", 43 exports.IdeaExportView.as_view(), 44 "idea-export", 45 ), 46 ( 47 r"^modules/(?P<module_slug>[-\w_]+)/export/idea/comments/$", 48 exports.IdeaCommentExportView.as_view(), 49 "idea-comment-export", 50 ), 51 ] 52 53 54 components.register_module(ExportIdeaComponent()) 55 [end of meinberlin/apps/ideas/dashboard.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/meinberlin/apps/ideas/dashboard.py b/meinberlin/apps/ideas/dashboard.py --- a/meinberlin/apps/ideas/dashboard.py +++ b/meinberlin/apps/ideas/dashboard.py @@ -15,7 +15,7 @@ def is_effective(self, module): return ( - module.blueprint_type == "IC" + module.blueprint_type in ["IC", "BS"] and not module.project.is_draft and not module.is_draft ) diff --git a/meinberlin/apps/mapideas/dashboard.py b/meinberlin/apps/mapideas/dashboard.py --- a/meinberlin/apps/mapideas/dashboard.py +++ b/meinberlin/apps/mapideas/dashboard.py @@ -15,7 +15,7 @@ def is_effective(self, module): return ( - module.blueprint_type == "MIC" + module.blueprint_type in ["MIC", "MBS"] and not module.project.is_draft and not module.is_draft )
{"golden_diff": "diff --git a/meinberlin/apps/ideas/dashboard.py b/meinberlin/apps/ideas/dashboard.py\n--- a/meinberlin/apps/ideas/dashboard.py\n+++ b/meinberlin/apps/ideas/dashboard.py\n@@ -15,7 +15,7 @@\n \n def is_effective(self, module):\n return (\n- module.blueprint_type == \"IC\"\n+ module.blueprint_type in [\"IC\", \"BS\"]\n and not module.project.is_draft\n and not module.is_draft\n )\ndiff --git a/meinberlin/apps/mapideas/dashboard.py b/meinberlin/apps/mapideas/dashboard.py\n--- a/meinberlin/apps/mapideas/dashboard.py\n+++ b/meinberlin/apps/mapideas/dashboard.py\n@@ -15,7 +15,7 @@\n \n def is_effective(self, module):\n return (\n- module.blueprint_type == \"MIC\"\n+ module.blueprint_type in [\"MIC\", \"MBS\"]\n and not module.project.is_draft\n and not module.is_draft\n )\n", "issue": "Export-Button Problem in Modul \"Brainstorming (with map)\" on Prod, Stage and Dev\n**URL:** https://meinberlin-dev.liqd.net/dashboard/projects/multimodul-test-merkmalkategorie/basic/\r\n**user:** initiator, admin\r\n**expected behaviour:** as I user I want to export all Ideas and Comments in all moduls which have Ideas/Proposals\r\n**behaviour:** In the Modules \"Brainstorming/Brainstorming with Map\" I cannot see the Excel-Export-Button and therefore not export the ideas/comments in my project. \r\n**important screensize:** - \r\n**device & browser:** - \r\n**Comment/Question:** Every other modul with proposals/ideas has the excel-export-button. There's a workaround when recreating the right URL, I can get to the Excel-Export page. In this case: https://meinberlin-dev.liqd.net/dashboard/modules/brainstorming-mit-karte-7/export/mapidea/\r\n<img width=\"311\" alt=\"Bildschirm\u00adfoto 2023-02-03 um 10 50 25\" src=\"https://user-images.githubusercontent.com/113608720/216568760-5075d601-eb68-44f1-9209-a3b547d994f9.png\">\r\n\r\n\r\nScreenshot?\r\n\n", "before_files": [{"content": "from django.urls import reverse\nfrom django.utils.translation import gettext_lazy as _\n\nfrom adhocracy4.dashboard import DashboardComponent\nfrom adhocracy4.dashboard import components\n\nfrom . import exports\nfrom . import views\n\n\nclass ExportMapIdeaComponent(DashboardComponent):\n identifier = \"mapidea_export\"\n weight = 50\n label = _(\"Export Excel\")\n\n def is_effective(self, module):\n return (\n module.blueprint_type == \"MIC\"\n and not module.project.is_draft\n and not module.is_draft\n )\n\n def get_progress(self, module):\n return 0, 0\n\n def get_base_url(self, module):\n return reverse(\n \"a4dashboard:mapidea-export-module\",\n kwargs={\n \"module_slug\": module.slug,\n },\n )\n\n def get_urls(self):\n return [\n (\n r\"^modules/(?P<module_slug>[-\\w_]+)/export/mapidea/$\",\n views.MapIdeaDashboardExportView.as_view(component=self),\n \"mapidea-export-module\",\n ),\n (\n r\"^modules/(?P<module_slug>[-\\w_]+)/export/mapidea/ideas/$\",\n exports.MapIdeaExportView.as_view(),\n \"mapidea-export\",\n ),\n (\n r\"^modules/(?P<module_slug>[-\\w_]+)/export/mapidea/comments/$\",\n exports.MapIdeaCommentExportView.as_view(),\n \"mapidea-comment-export\",\n ),\n ]\n\n\ncomponents.register_module(ExportMapIdeaComponent())\n", "path": "meinberlin/apps/mapideas/dashboard.py"}, {"content": "from django.urls import reverse\nfrom django.utils.translation import gettext_lazy as _\n\nfrom adhocracy4.dashboard import DashboardComponent\nfrom adhocracy4.dashboard import components\n\nfrom . import exports\nfrom . import views\n\n\nclass ExportIdeaComponent(DashboardComponent):\n identifier = \"idea_export\"\n weight = 50\n label = _(\"Export Excel\")\n\n def is_effective(self, module):\n return (\n module.blueprint_type == \"IC\"\n and not module.project.is_draft\n and not module.is_draft\n )\n\n def get_progress(self, module):\n return 0, 0\n\n def get_base_url(self, module):\n return reverse(\n \"a4dashboard:idea-export-module\",\n kwargs={\n \"module_slug\": module.slug,\n },\n )\n\n def get_urls(self):\n return [\n (\n r\"^modules/(?P<module_slug>[-\\w_]+)/export/idea/$\",\n views.IdeaDashboardExportView.as_view(component=self),\n \"idea-export-module\",\n ),\n (\n r\"^modules/(?P<module_slug>[-\\w_]+)/export/idea/ideas/$\",\n exports.IdeaExportView.as_view(),\n \"idea-export\",\n ),\n (\n r\"^modules/(?P<module_slug>[-\\w_]+)/export/idea/comments/$\",\n exports.IdeaCommentExportView.as_view(),\n \"idea-comment-export\",\n ),\n ]\n\n\ncomponents.register_module(ExportIdeaComponent())\n", "path": "meinberlin/apps/ideas/dashboard.py"}]}
1,727
227
gh_patches_debug_9528
rasdani/github-patches
git_diff
ivy-llc__ivy-23319
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> empty_strided </issue> <code> [start of ivy/functional/frontends/torch/creation_ops.py] 1 # local 2 import ivy 3 from ivy.functional.frontends.torch.func_wrapper import ( 4 to_ivy_arrays_and_back, 5 to_ivy_shape, 6 ) 7 from ivy.func_wrapper import with_unsupported_dtypes 8 import ivy.functional.frontends.torch as torch_frontend 9 10 11 @to_ivy_arrays_and_back 12 @with_unsupported_dtypes({"2.0.1 and below": ("float16",)}, "torch") 13 def arange( 14 start=0, 15 end=None, 16 step=1, 17 *, 18 out=None, 19 dtype=None, 20 layout=None, 21 device=None, 22 requires_grad=False, 23 ): 24 return ivy.arange(start, end, step, dtype=dtype, device=device, out=out) 25 26 27 @to_ivy_arrays_and_back 28 def as_strided(input, size, stride, storage_offset=None): 29 ind = ivy.array([0], dtype=ivy.int64) 30 for i, (size_i, stride_i) in enumerate(zip(size, stride)): 31 r_size = [1] * len(stride) 32 r_size[i] = -1 33 ind = ind + ivy.reshape(ivy.arange(size_i), r_size) * stride_i 34 if storage_offset: 35 ind = ind + storage_offset 36 # in case the input is a non-contiguous native array, 37 # the return will differ from torch.as_strided 38 if ivy.is_ivy_array(input) and input.base is not None: 39 return ivy.gather(ivy.flatten(input.base), ind) 40 return ivy.gather(ivy.flatten(input), ind) 41 42 43 @to_ivy_arrays_and_back 44 def as_tensor( 45 data, 46 *, 47 dtype=None, 48 device=None, 49 ): 50 if dtype is None: 51 if isinstance(data, int): 52 dtype = ivy.int64 53 elif isinstance(data, float): 54 dtype = torch_frontend.get_default_dtype() 55 elif isinstance(data, (list, tuple)): 56 if all(isinstance(d, int) for d in data): 57 dtype = ivy.int64 58 else: 59 dtype = torch_frontend.get_default_dtype() 60 return ivy.asarray(data, dtype=dtype, device=device) 61 62 63 @to_ivy_arrays_and_back 64 def asarray( 65 obj, 66 *, 67 dtype=None, 68 device=None, 69 copy=None, 70 ): 71 return ivy.asarray(obj, copy=copy, dtype=dtype, device=device) 72 73 74 @to_ivy_arrays_and_back 75 def empty( 76 *args, 77 size=None, 78 out=None, 79 dtype=None, 80 layout=None, 81 device=None, 82 requires_grad=False, 83 pin_memory=False, 84 memory_format=None, 85 ): 86 if args and size: 87 raise TypeError("empty() got multiple values for argument 'shape'") 88 if size is None: 89 size = args[0] if isinstance(args[0], (tuple, list, ivy.Shape)) else args 90 return ivy.empty(shape=size, dtype=dtype, device=device, out=out) 91 92 93 @to_ivy_arrays_and_back 94 def empty_like( 95 input, 96 *, 97 dtype=None, 98 layout=None, 99 device=None, 100 requires_grad=False, 101 memory_format=None, 102 ): 103 ret = ivy.empty_like(input, dtype=dtype, device=device) 104 return ret 105 106 107 @to_ivy_arrays_and_back 108 def eye( 109 n, m=None, *, out=None, dtype=None, layout=None, device=None, requires_grad=False 110 ): 111 return ivy.eye(n, m, dtype=dtype, device=device, out=out) 112 113 114 @to_ivy_arrays_and_back 115 def from_dlpack(ext_tensor): 116 return ivy.from_dlpack(ext_tensor) 117 118 119 @to_ivy_arrays_and_back 120 def from_numpy(data, /): 121 return ivy.asarray(data, dtype=ivy.dtype(data)) 122 123 124 @to_ivy_arrays_and_back 125 def frombuffer( 126 buffer, 127 *, 128 dtype, 129 count=-1, 130 offset=0, 131 requires_grad=False, 132 ): 133 return ivy.frombuffer(buffer, dtype=dtype, count=count, offset=offset) 134 135 136 @to_ivy_arrays_and_back 137 def full( 138 size, 139 fill_value, 140 *, 141 out=None, 142 dtype=None, 143 layout=None, 144 device=None, 145 requires_grad=None, 146 ): 147 ret = ivy.full(size, fill_value, dtype=dtype, device=device, out=out) 148 return ret 149 150 151 @to_ivy_arrays_and_back 152 def full_like( 153 input, 154 fill_value, 155 *, 156 dtype=None, 157 layout=None, 158 device=None, 159 requires_grad=False, 160 memory_format=None, 161 ): 162 fill_value = ivy.to_scalar(fill_value) 163 return ivy.full_like(input, fill_value, dtype=dtype, device=device) 164 165 166 @to_ivy_arrays_and_back 167 def heaviside(input, values, *, out=None): 168 return ivy.heaviside(input, values, out=out) 169 170 171 @to_ivy_arrays_and_back 172 @with_unsupported_dtypes({"2.0.1 and below": ("float16",)}, "torch") 173 def linspace( 174 start, 175 end, 176 steps, 177 *, 178 out=None, 179 dtype=None, 180 device=None, 181 layout=None, 182 requires_grad=False, 183 ): 184 ret = ivy.linspace(start, end, num=steps, dtype=dtype, device=device, out=out) 185 return ret 186 187 188 @to_ivy_arrays_and_back 189 @with_unsupported_dtypes({"2.0.1 and below": ("float16",)}, "torch") 190 def logspace( 191 start, 192 end, 193 steps, 194 *, 195 base=10.0, 196 out=None, 197 dtype=None, 198 layout=None, 199 device=None, 200 requires_grad=False, 201 ): 202 ret = ivy.logspace( 203 start, end, num=steps, base=base, dtype=dtype, device=device, out=out 204 ) 205 return ret 206 207 208 @to_ivy_shape 209 @to_ivy_arrays_and_back 210 def ones(*args, size=None, out=None, dtype=None, device=None, requires_grad=False): 211 if args and size: 212 raise TypeError("ones() got multiple values for argument 'shape'") 213 if size is None: 214 size = args[0] if isinstance(args[0], (tuple, list, ivy.Shape)) else args 215 return ivy.ones(shape=size, dtype=dtype, device=device, out=out) 216 217 218 @to_ivy_arrays_and_back 219 def ones_like_v_0p3p0_to_0p3p1(input, out=None): 220 return ivy.ones_like(input, out=None) 221 222 223 @to_ivy_arrays_and_back 224 def ones_like_v_0p4p0_and_above( 225 input, 226 *, 227 dtype=None, 228 layout=None, 229 device=None, 230 requires_grad=False, 231 memory_format=None, 232 ): 233 ret = ivy.ones_like(input, dtype=dtype, device=device) 234 return ret 235 236 237 @to_ivy_arrays_and_back 238 @with_unsupported_dtypes({"2.0.1 and below": ("float16",)}, "torch") 239 def range( 240 *args, 241 dtype=None, 242 layout=None, 243 device=None, 244 requires_grad=False, 245 ): 246 if len(args) == 1: 247 end = args[0] 248 start = 0 249 step = 1 250 elif len(args) == 2: 251 end = args[1] 252 start = args[0] 253 step = 1 254 elif len(args) == 3: 255 start, end, step = args 256 else: 257 ivy.utils.assertions.check_true( 258 len(args) == 1 or len(args) == 3, 259 "only 1 or 3 positional arguments are supported", 260 ) 261 range_vec = [] 262 elem = start 263 while 1: 264 range_vec = range_vec + [elem] 265 elem += step 266 if start == end: 267 break 268 if start < end: 269 if elem > end: 270 break 271 else: 272 if elem < end: 273 break 274 return ivy.array(range_vec, dtype=dtype, device=device) 275 276 277 @to_ivy_arrays_and_back 278 def tensor( 279 data, 280 *, 281 dtype=None, 282 device=None, 283 requires_grad=False, 284 pin_memory=False, 285 ): 286 return ivy.array(data, dtype=dtype, device=device) 287 288 289 @to_ivy_shape 290 @to_ivy_arrays_and_back 291 def zeros(*args, size=None, out=None, dtype=None, device=None, requires_grad=False): 292 if args and size: 293 raise TypeError("zeros() got multiple values for argument 'shape'") 294 if size is None: 295 size = args[0] if isinstance(args[0], (tuple, list, ivy.Shape)) else args 296 return ivy.zeros(shape=size, dtype=dtype, device=device, out=out) 297 298 299 @to_ivy_arrays_and_back 300 def zeros_like( 301 input, 302 *, 303 dtype=None, 304 layout=None, 305 device=None, 306 requires_grad=False, 307 memory_format=None, 308 ): 309 ret = ivy.zeros_like(input, dtype=dtype, device=device) 310 return ret 311 [end of ivy/functional/frontends/torch/creation_ops.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ivy/functional/frontends/torch/creation_ops.py b/ivy/functional/frontends/torch/creation_ops.py --- a/ivy/functional/frontends/torch/creation_ops.py +++ b/ivy/functional/frontends/torch/creation_ops.py @@ -93,6 +93,24 @@ return ret +@to_ivy_arrays_and_back +def empty_strided( + size, + stride, + *, + dtype=None, + layout=None, + device=None, + requires_grad=False, + pin_memory=False, +): + max_offsets = [(s - 1) * st for s, st in zip(size, stride)] + items = sum(max_offsets) + 1 + empty_array = empty(items, dtype=dtype, device=device) + strided_array = as_strided(empty_array, size, stride) + return strided_array + + @to_ivy_arrays_and_back def eye( n, m=None, *, out=None, dtype=None, layout=None, device=None, requires_grad=False
{"golden_diff": "diff --git a/ivy/functional/frontends/torch/creation_ops.py b/ivy/functional/frontends/torch/creation_ops.py\n--- a/ivy/functional/frontends/torch/creation_ops.py\n+++ b/ivy/functional/frontends/torch/creation_ops.py\n@@ -93,6 +93,24 @@\n return ret\n \n \n+@to_ivy_arrays_and_back\n+def empty_strided(\n+ size,\n+ stride,\n+ *,\n+ dtype=None,\n+ layout=None,\n+ device=None,\n+ requires_grad=False,\n+ pin_memory=False,\n+):\n+ max_offsets = [(s - 1) * st for s, st in zip(size, stride)]\n+ items = sum(max_offsets) + 1\n+ empty_array = empty(items, dtype=dtype, device=device)\n+ strided_array = as_strided(empty_array, size, stride)\n+ return strided_array\n+\n+\n @to_ivy_arrays_and_back\n def eye(\n n, m=None, *, out=None, dtype=None, layout=None, device=None, requires_grad=False\n", "issue": "empty_strided\n\n", "before_files": [{"content": "# local\nimport ivy\nfrom ivy.functional.frontends.torch.func_wrapper import (\n to_ivy_arrays_and_back,\n to_ivy_shape,\n)\nfrom ivy.func_wrapper import with_unsupported_dtypes\nimport ivy.functional.frontends.torch as torch_frontend\n\n\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes({\"2.0.1 and below\": (\"float16\",)}, \"torch\")\ndef arange(\n start=0,\n end=None,\n step=1,\n *,\n out=None,\n dtype=None,\n layout=None,\n device=None,\n requires_grad=False,\n):\n return ivy.arange(start, end, step, dtype=dtype, device=device, out=out)\n\n\n@to_ivy_arrays_and_back\ndef as_strided(input, size, stride, storage_offset=None):\n ind = ivy.array([0], dtype=ivy.int64)\n for i, (size_i, stride_i) in enumerate(zip(size, stride)):\n r_size = [1] * len(stride)\n r_size[i] = -1\n ind = ind + ivy.reshape(ivy.arange(size_i), r_size) * stride_i\n if storage_offset:\n ind = ind + storage_offset\n # in case the input is a non-contiguous native array,\n # the return will differ from torch.as_strided\n if ivy.is_ivy_array(input) and input.base is not None:\n return ivy.gather(ivy.flatten(input.base), ind)\n return ivy.gather(ivy.flatten(input), ind)\n\n\n@to_ivy_arrays_and_back\ndef as_tensor(\n data,\n *,\n dtype=None,\n device=None,\n):\n if dtype is None:\n if isinstance(data, int):\n dtype = ivy.int64\n elif isinstance(data, float):\n dtype = torch_frontend.get_default_dtype()\n elif isinstance(data, (list, tuple)):\n if all(isinstance(d, int) for d in data):\n dtype = ivy.int64\n else:\n dtype = torch_frontend.get_default_dtype()\n return ivy.asarray(data, dtype=dtype, device=device)\n\n\n@to_ivy_arrays_and_back\ndef asarray(\n obj,\n *,\n dtype=None,\n device=None,\n copy=None,\n):\n return ivy.asarray(obj, copy=copy, dtype=dtype, device=device)\n\n\n@to_ivy_arrays_and_back\ndef empty(\n *args,\n size=None,\n out=None,\n dtype=None,\n layout=None,\n device=None,\n requires_grad=False,\n pin_memory=False,\n memory_format=None,\n):\n if args and size:\n raise TypeError(\"empty() got multiple values for argument 'shape'\")\n if size is None:\n size = args[0] if isinstance(args[0], (tuple, list, ivy.Shape)) else args\n return ivy.empty(shape=size, dtype=dtype, device=device, out=out)\n\n\n@to_ivy_arrays_and_back\ndef empty_like(\n input,\n *,\n dtype=None,\n layout=None,\n device=None,\n requires_grad=False,\n memory_format=None,\n):\n ret = ivy.empty_like(input, dtype=dtype, device=device)\n return ret\n\n\n@to_ivy_arrays_and_back\ndef eye(\n n, m=None, *, out=None, dtype=None, layout=None, device=None, requires_grad=False\n):\n return ivy.eye(n, m, dtype=dtype, device=device, out=out)\n\n\n@to_ivy_arrays_and_back\ndef from_dlpack(ext_tensor):\n return ivy.from_dlpack(ext_tensor)\n\n\n@to_ivy_arrays_and_back\ndef from_numpy(data, /):\n return ivy.asarray(data, dtype=ivy.dtype(data))\n\n\n@to_ivy_arrays_and_back\ndef frombuffer(\n buffer,\n *,\n dtype,\n count=-1,\n offset=0,\n requires_grad=False,\n):\n return ivy.frombuffer(buffer, dtype=dtype, count=count, offset=offset)\n\n\n@to_ivy_arrays_and_back\ndef full(\n size,\n fill_value,\n *,\n out=None,\n dtype=None,\n layout=None,\n device=None,\n requires_grad=None,\n):\n ret = ivy.full(size, fill_value, dtype=dtype, device=device, out=out)\n return ret\n\n\n@to_ivy_arrays_and_back\ndef full_like(\n input,\n fill_value,\n *,\n dtype=None,\n layout=None,\n device=None,\n requires_grad=False,\n memory_format=None,\n):\n fill_value = ivy.to_scalar(fill_value)\n return ivy.full_like(input, fill_value, dtype=dtype, device=device)\n\n\n@to_ivy_arrays_and_back\ndef heaviside(input, values, *, out=None):\n return ivy.heaviside(input, values, out=out)\n\n\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes({\"2.0.1 and below\": (\"float16\",)}, \"torch\")\ndef linspace(\n start,\n end,\n steps,\n *,\n out=None,\n dtype=None,\n device=None,\n layout=None,\n requires_grad=False,\n):\n ret = ivy.linspace(start, end, num=steps, dtype=dtype, device=device, out=out)\n return ret\n\n\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes({\"2.0.1 and below\": (\"float16\",)}, \"torch\")\ndef logspace(\n start,\n end,\n steps,\n *,\n base=10.0,\n out=None,\n dtype=None,\n layout=None,\n device=None,\n requires_grad=False,\n):\n ret = ivy.logspace(\n start, end, num=steps, base=base, dtype=dtype, device=device, out=out\n )\n return ret\n\n\n@to_ivy_shape\n@to_ivy_arrays_and_back\ndef ones(*args, size=None, out=None, dtype=None, device=None, requires_grad=False):\n if args and size:\n raise TypeError(\"ones() got multiple values for argument 'shape'\")\n if size is None:\n size = args[0] if isinstance(args[0], (tuple, list, ivy.Shape)) else args\n return ivy.ones(shape=size, dtype=dtype, device=device, out=out)\n\n\n@to_ivy_arrays_and_back\ndef ones_like_v_0p3p0_to_0p3p1(input, out=None):\n return ivy.ones_like(input, out=None)\n\n\n@to_ivy_arrays_and_back\ndef ones_like_v_0p4p0_and_above(\n input,\n *,\n dtype=None,\n layout=None,\n device=None,\n requires_grad=False,\n memory_format=None,\n):\n ret = ivy.ones_like(input, dtype=dtype, device=device)\n return ret\n\n\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes({\"2.0.1 and below\": (\"float16\",)}, \"torch\")\ndef range(\n *args,\n dtype=None,\n layout=None,\n device=None,\n requires_grad=False,\n):\n if len(args) == 1:\n end = args[0]\n start = 0\n step = 1\n elif len(args) == 2:\n end = args[1]\n start = args[0]\n step = 1\n elif len(args) == 3:\n start, end, step = args\n else:\n ivy.utils.assertions.check_true(\n len(args) == 1 or len(args) == 3,\n \"only 1 or 3 positional arguments are supported\",\n )\n range_vec = []\n elem = start\n while 1:\n range_vec = range_vec + [elem]\n elem += step\n if start == end:\n break\n if start < end:\n if elem > end:\n break\n else:\n if elem < end:\n break\n return ivy.array(range_vec, dtype=dtype, device=device)\n\n\n@to_ivy_arrays_and_back\ndef tensor(\n data,\n *,\n dtype=None,\n device=None,\n requires_grad=False,\n pin_memory=False,\n):\n return ivy.array(data, dtype=dtype, device=device)\n\n\n@to_ivy_shape\n@to_ivy_arrays_and_back\ndef zeros(*args, size=None, out=None, dtype=None, device=None, requires_grad=False):\n if args and size:\n raise TypeError(\"zeros() got multiple values for argument 'shape'\")\n if size is None:\n size = args[0] if isinstance(args[0], (tuple, list, ivy.Shape)) else args\n return ivy.zeros(shape=size, dtype=dtype, device=device, out=out)\n\n\n@to_ivy_arrays_and_back\ndef zeros_like(\n input,\n *,\n dtype=None,\n layout=None,\n device=None,\n requires_grad=False,\n memory_format=None,\n):\n ret = ivy.zeros_like(input, dtype=dtype, device=device)\n return ret\n", "path": "ivy/functional/frontends/torch/creation_ops.py"}]}
3,334
242
gh_patches_debug_52881
rasdani/github-patches
git_diff
getsentry__sentry-55707
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Unable to edit WHEN conditions from issue alert ### Environment SaaS (https://sentry.io/) ### Steps to Reproduce 1. Create an issue alert with a few WHEN conditions 2. Save it 3. Go to the Alert details page 4. Click on Edit rule 5. Delete all the WHEN conditions 6. Click on Save 7. When you're back to the Alert details page, the WHEN conditions are still there, and the "Updated alert rule" message appears ### Expected Result The users should be able to edit the alert rules ### Actual Result The alert rule stays the same after editing ### Product Area Alerts ### Link _No response_ ### DSN _No response_ ### Version _No response_ </issue> <code> [start of src/sentry/mediators/project_rules/updater.py] 1 from django.db import router 2 from rest_framework.request import Request 3 4 from sentry.mediators.mediator import Mediator 5 from sentry.mediators.param import Param 6 from sentry.models import Actor, Project, Rule 7 8 9 class Updater(Mediator): 10 rule = Param(Rule) 11 name = Param(str, required=False) 12 owner = Param(int, required=False) 13 environment = Param(int, required=False) 14 project = Param(Project) 15 action_match = Param(str, required=False) 16 filter_match = Param(str, required=False) 17 actions = Param(list, required=False) 18 conditions = Param(list, required=False) 19 frequency = Param(int, required=False) 20 request = Param(Request, required=False) 21 using = router.db_for_write(Project) 22 23 def call(self): 24 self._update_name() 25 self._update_owner() 26 self._update_environment() 27 self._update_project() 28 self._update_actions() 29 self._update_action_match() 30 self._update_filter_match() 31 self._update_conditions() 32 self._update_frequency() 33 self.rule.save() 34 return self.rule 35 36 def _update_name(self): 37 if self.name: 38 self.rule.label = self.name 39 40 def _update_owner(self) -> None: 41 self.rule.owner = Actor.objects.get(id=self.owner) if self.owner else None 42 43 def _update_environment(self): 44 self.rule.environment_id = self.environment 45 46 def _update_project(self): 47 if self.project: 48 self.rule.project = self.project 49 50 def _update_actions(self): 51 if self.actions: 52 self.rule.data["actions"] = self.actions 53 54 def _update_action_match(self): 55 if self.action_match: 56 self.rule.data["action_match"] = self.action_match 57 58 def _update_filter_match(self): 59 if self.filter_match: 60 self.rule.data["filter_match"] = self.filter_match 61 62 def _update_conditions(self): 63 if self.conditions: 64 self.rule.data["conditions"] = self.conditions 65 66 def _update_frequency(self): 67 if self.frequency: 68 self.rule.data["frequency"] = self.frequency 69 [end of src/sentry/mediators/project_rules/updater.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/sentry/mediators/project_rules/updater.py b/src/sentry/mediators/project_rules/updater.py --- a/src/sentry/mediators/project_rules/updater.py +++ b/src/sentry/mediators/project_rules/updater.py @@ -60,8 +60,7 @@ self.rule.data["filter_match"] = self.filter_match def _update_conditions(self): - if self.conditions: - self.rule.data["conditions"] = self.conditions + self.rule.data["conditions"] = self.conditions or [] def _update_frequency(self): if self.frequency:
{"golden_diff": "diff --git a/src/sentry/mediators/project_rules/updater.py b/src/sentry/mediators/project_rules/updater.py\n--- a/src/sentry/mediators/project_rules/updater.py\n+++ b/src/sentry/mediators/project_rules/updater.py\n@@ -60,8 +60,7 @@\n self.rule.data[\"filter_match\"] = self.filter_match\n \n def _update_conditions(self):\n- if self.conditions:\n- self.rule.data[\"conditions\"] = self.conditions\n+ self.rule.data[\"conditions\"] = self.conditions or []\n \n def _update_frequency(self):\n if self.frequency:\n", "issue": "Unable to edit WHEN conditions from issue alert\n### Environment\n\nSaaS (https://sentry.io/)\n\n### Steps to Reproduce\n\n1. Create an issue alert with a few WHEN conditions\r\n2. Save it\r\n3. Go to the Alert details page\r\n4. Click on Edit rule\r\n5. Delete all the WHEN conditions\r\n6. Click on Save\r\n7. When you're back to the Alert details page, the WHEN conditions are still there, and the \"Updated alert rule\" message appears\n\n### Expected Result\n\nThe users should be able to edit the alert rules\n\n### Actual Result\n\nThe alert rule stays the same after editing\n\n### Product Area\n\nAlerts\n\n### Link\n\n_No response_\n\n### DSN\n\n_No response_\n\n### Version\n\n_No response_\n", "before_files": [{"content": "from django.db import router\nfrom rest_framework.request import Request\n\nfrom sentry.mediators.mediator import Mediator\nfrom sentry.mediators.param import Param\nfrom sentry.models import Actor, Project, Rule\n\n\nclass Updater(Mediator):\n rule = Param(Rule)\n name = Param(str, required=False)\n owner = Param(int, required=False)\n environment = Param(int, required=False)\n project = Param(Project)\n action_match = Param(str, required=False)\n filter_match = Param(str, required=False)\n actions = Param(list, required=False)\n conditions = Param(list, required=False)\n frequency = Param(int, required=False)\n request = Param(Request, required=False)\n using = router.db_for_write(Project)\n\n def call(self):\n self._update_name()\n self._update_owner()\n self._update_environment()\n self._update_project()\n self._update_actions()\n self._update_action_match()\n self._update_filter_match()\n self._update_conditions()\n self._update_frequency()\n self.rule.save()\n return self.rule\n\n def _update_name(self):\n if self.name:\n self.rule.label = self.name\n\n def _update_owner(self) -> None:\n self.rule.owner = Actor.objects.get(id=self.owner) if self.owner else None\n\n def _update_environment(self):\n self.rule.environment_id = self.environment\n\n def _update_project(self):\n if self.project:\n self.rule.project = self.project\n\n def _update_actions(self):\n if self.actions:\n self.rule.data[\"actions\"] = self.actions\n\n def _update_action_match(self):\n if self.action_match:\n self.rule.data[\"action_match\"] = self.action_match\n\n def _update_filter_match(self):\n if self.filter_match:\n self.rule.data[\"filter_match\"] = self.filter_match\n\n def _update_conditions(self):\n if self.conditions:\n self.rule.data[\"conditions\"] = self.conditions\n\n def _update_frequency(self):\n if self.frequency:\n self.rule.data[\"frequency\"] = self.frequency\n", "path": "src/sentry/mediators/project_rules/updater.py"}]}
1,282
132
gh_patches_debug_36777
rasdani/github-patches
git_diff
tensorflow__addons-769
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> WeightNormalization with RNNs: shape issue **System information** - OS Platform and Distribution (e.g., Linux Ubuntu 16.04): Google Colab - TensorFlow version and how it was installed (source or binary): 2.0.0 binary - TensorFlow-Addons version and how it was installed (source or binary): 0.6.0 binary - Python version: 3.6.8 - Is GPU used? (yes/no): yes **Describe the bug** WeightNormalization layer wrapper cannot be used with RNNs if the input sequence has undetermined length. See code for errors. **Code to reproduce the issue** ``` import tensorflow as tf import tensorflow_addons as tfa n_features = 3 seq_length = None rnn_units = 4 input_layer = tf.keras.layers.Input(shape=(seq_length, n_features)) rnn_layer = tf.keras.layers.SimpleRNN(rnn_units) dense_layer = tf.keras.layers.Dense(1) wn_rnn_layer = tfa.layers.WeightNormalization(rnn_layer) wn_model = tf.keras.models.Sequential(layers=(input_layer, wn_rnn_layer, dense_layer)) ``` yields ``` ValueError: as_list() is not defined on an unknown TensorShape. ``` Note that: 1. The same code without using `WeightNormalization` runs. 2. Interestingly, adding the lines ``` batch_size = 1 input_layer = tf.keras.layers.Input(batch_shape=(batch_size, seq_length, n_features)) rnn_layer = tf.keras.layers.SimpleRNN(rnn_units, return_sequences=True) dense_layer = tf.keras.layers.Dense(1) wn_rnn_layer = tfa.layers.WeightNormalization(rnn_layer) wn_model = tf.keras.models.Sequential(layers=(input_layer, wn_rnn_layer, dense_layer)) ``` gives ``` IndexError: list assignment index out of range ``` instead. **Other info / logs** </issue> <code> [start of tensorflow_addons/layers/wrappers.py] 1 # Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 # ============================================================================= 15 from __future__ import absolute_import 16 from __future__ import division 17 from __future__ import print_function 18 19 import tensorflow as tf 20 21 22 @tf.keras.utils.register_keras_serializable(package='Addons') 23 class WeightNormalization(tf.keras.layers.Wrapper): 24 """This wrapper reparameterizes a layer by decoupling the weight's 25 magnitude and direction. 26 27 This speeds up convergence by improving the 28 conditioning of the optimization problem. 29 Weight Normalization: A Simple Reparameterization to Accelerate 30 Training of Deep Neural Networks: https://arxiv.org/abs/1602.07868 31 Tim Salimans, Diederik P. Kingma (2016) 32 WeightNormalization wrapper works for keras and tf layers. 33 ```python 34 net = WeightNormalization( 35 tf.keras.layers.Conv2D(2, 2, activation='relu'), 36 input_shape=(32, 32, 3), 37 data_init=True)(x) 38 net = WeightNormalization( 39 tf.keras.layers.Conv2D(16, 5, activation='relu'), 40 data_init=True)(net) 41 net = WeightNormalization( 42 tf.keras.layers.Dense(120, activation='relu'), 43 data_init=True)(net) 44 net = WeightNormalization( 45 tf.keras.layers.Dense(n_classes), 46 data_init=True)(net) 47 ``` 48 Arguments: 49 layer: a layer instance. 50 data_init: If `True` use data dependent variable initialization 51 Raises: 52 ValueError: If not initialized with a `Layer` instance. 53 ValueError: If `Layer` does not contain a `kernel` of weights 54 NotImplementedError: If `data_init` is True and running graph execution 55 """ 56 57 def __init__(self, layer, data_init=True, **kwargs): 58 super(WeightNormalization, self).__init__(layer, **kwargs) 59 self.data_init = data_init 60 self._track_trackable(layer, name='layer') 61 62 def build(self, input_shape): 63 """Build `Layer`""" 64 input_shape = tf.TensorShape(input_shape).as_list() 65 self.input_spec = tf.keras.layers.InputSpec( 66 shape=[None] + input_shape[1:]) 67 68 if not self.layer.built: 69 self.layer.build(input_shape) 70 71 if not hasattr(self.layer, 'kernel'): 72 raise ValueError('`WeightNormalization` must wrap a layer that' 73 ' contains a `kernel` for weights') 74 75 # The kernel's filter or unit dimension is -1 76 self.layer_depth = int(self.layer.kernel.shape[-1]) 77 self.kernel_norm_axes = list(range(self.layer.kernel.shape.rank - 1)) 78 79 self.g = self.add_weight( 80 name='g', 81 shape=(self.layer_depth,), 82 initializer='ones', 83 dtype=self.layer.kernel.dtype, 84 trainable=True) 85 self.v = self.layer.kernel 86 87 self._initialized = self.add_weight( 88 name='initialized', 89 shape=None, 90 initializer='zeros', 91 dtype=tf.dtypes.bool, 92 trainable=False) 93 94 if self.data_init: 95 # Used for data initialization in self._data_dep_init. 96 with tf.name_scope('data_dep_init'): 97 layer_config = tf.keras.layers.serialize(self.layer) 98 layer_config['config']['trainable'] = False 99 self._naked_clone_layer = tf.keras.layers.deserialize( 100 layer_config) 101 self._naked_clone_layer.build(input_shape) 102 self._naked_clone_layer.set_weights(self.layer.get_weights()) 103 self._naked_clone_layer.activation = None 104 105 self.built = True 106 107 def call(self, inputs): 108 """Call `Layer`""" 109 110 def _do_nothing(): 111 return tf.identity(self.g) 112 113 def _update_weights(): 114 # Ensure we read `self.g` after _update_weights. 115 with tf.control_dependencies(self._initialize_weights(inputs)): 116 return tf.identity(self.g) 117 118 g = tf.cond(self._initialized, _do_nothing, _update_weights) 119 120 with tf.name_scope('compute_weights'): 121 # Replace kernel by normalized weight variable. 122 self.layer.kernel = tf.nn.l2_normalize( 123 self.v, axis=self.kernel_norm_axes) * g 124 125 # Ensure we calculate result after updating kernel. 126 update_kernel = tf.identity(self.layer.kernel) 127 with tf.control_dependencies([update_kernel]): 128 outputs = self.layer(inputs) 129 return outputs 130 131 def compute_output_shape(self, input_shape): 132 return tf.TensorShape( 133 self.layer.compute_output_shape(input_shape).as_list()) 134 135 def _initialize_weights(self, inputs): 136 """Initialize weight g. 137 138 The initial value of g could either from the initial value in v, 139 or by the input value if self.data_init is True. 140 """ 141 with tf.control_dependencies([ 142 tf.debugging.assert_equal( # pylint: disable=bad-continuation 143 self._initialized, 144 False, 145 message='The layer has been initialized.') 146 ]): 147 if self.data_init: 148 assign_tensors = self._data_dep_init(inputs) 149 else: 150 assign_tensors = self._init_norm() 151 assign_tensors.append(self._initialized.assign(True)) 152 return assign_tensors 153 154 def _init_norm(self): 155 """Set the weight g with the norm of the weight vector.""" 156 with tf.name_scope('init_norm'): 157 v_flat = tf.reshape(self.v, [-1, self.layer_depth]) 158 v_norm = tf.linalg.norm(v_flat, axis=0) 159 g_tensor = self.g.assign(tf.reshape(v_norm, (self.layer_depth,))) 160 return [g_tensor] 161 162 def _data_dep_init(self, inputs): 163 """Data dependent initialization.""" 164 with tf.name_scope('data_dep_init'): 165 # Generate data dependent init values 166 x_init = self._naked_clone_layer(inputs) 167 data_norm_axes = list(range(x_init.shape.rank - 1)) 168 m_init, v_init = tf.nn.moments(x_init, data_norm_axes) 169 scale_init = 1. / tf.math.sqrt(v_init + 1e-10) 170 171 # Assign data dependent init values 172 g_tensor = self.g.assign(self.g * scale_init) 173 if hasattr(self.layer, 'bias') and self.layer.bias is not None: 174 bias_tensor = self.layer.bias.assign(-m_init * scale_init) 175 return [g_tensor, bias_tensor] 176 else: 177 return [g_tensor] 178 179 def get_config(self): 180 config = {'data_init': self.data_init} 181 base_config = super(WeightNormalization, self).get_config() 182 return dict(list(base_config.items()) + list(config.items())) 183 [end of tensorflow_addons/layers/wrappers.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/tensorflow_addons/layers/wrappers.py b/tensorflow_addons/layers/wrappers.py --- a/tensorflow_addons/layers/wrappers.py +++ b/tensorflow_addons/layers/wrappers.py @@ -58,31 +58,34 @@ super(WeightNormalization, self).__init__(layer, **kwargs) self.data_init = data_init self._track_trackable(layer, name='layer') + self.is_rnn = isinstance(self.layer, tf.keras.layers.RNN) def build(self, input_shape): """Build `Layer`""" - input_shape = tf.TensorShape(input_shape).as_list() + input_shape = tf.TensorShape(input_shape) self.input_spec = tf.keras.layers.InputSpec( shape=[None] + input_shape[1:]) if not self.layer.built: self.layer.build(input_shape) - if not hasattr(self.layer, 'kernel'): + kernel_layer = self.layer.cell if self.is_rnn else self.layer + + if not hasattr(kernel_layer, 'kernel'): raise ValueError('`WeightNormalization` must wrap a layer that' ' contains a `kernel` for weights') # The kernel's filter or unit dimension is -1 - self.layer_depth = int(self.layer.kernel.shape[-1]) - self.kernel_norm_axes = list(range(self.layer.kernel.shape.rank - 1)) + self.layer_depth = int(kernel_layer.kernel.shape[-1]) + self.kernel_norm_axes = list(range(kernel_layer.kernel.shape.rank - 1)) self.g = self.add_weight( name='g', shape=(self.layer_depth,), initializer='ones', - dtype=self.layer.kernel.dtype, + dtype=kernel_layer.kernel.dtype, trainable=True) - self.v = self.layer.kernel + self.v = kernel_layer.kernel self._initialized = self.add_weight( name='initialized', @@ -100,7 +103,10 @@ layer_config) self._naked_clone_layer.build(input_shape) self._naked_clone_layer.set_weights(self.layer.get_weights()) - self._naked_clone_layer.activation = None + if self.is_rnn: + self._naked_clone_layer.cell.activation = None + else: + self._naked_clone_layer.activation = None self.built = True
{"golden_diff": "diff --git a/tensorflow_addons/layers/wrappers.py b/tensorflow_addons/layers/wrappers.py\n--- a/tensorflow_addons/layers/wrappers.py\n+++ b/tensorflow_addons/layers/wrappers.py\n@@ -58,31 +58,34 @@\n super(WeightNormalization, self).__init__(layer, **kwargs)\n self.data_init = data_init\n self._track_trackable(layer, name='layer')\n+ self.is_rnn = isinstance(self.layer, tf.keras.layers.RNN)\n \n def build(self, input_shape):\n \"\"\"Build `Layer`\"\"\"\n- input_shape = tf.TensorShape(input_shape).as_list()\n+ input_shape = tf.TensorShape(input_shape)\n self.input_spec = tf.keras.layers.InputSpec(\n shape=[None] + input_shape[1:])\n \n if not self.layer.built:\n self.layer.build(input_shape)\n \n- if not hasattr(self.layer, 'kernel'):\n+ kernel_layer = self.layer.cell if self.is_rnn else self.layer\n+\n+ if not hasattr(kernel_layer, 'kernel'):\n raise ValueError('`WeightNormalization` must wrap a layer that'\n ' contains a `kernel` for weights')\n \n # The kernel's filter or unit dimension is -1\n- self.layer_depth = int(self.layer.kernel.shape[-1])\n- self.kernel_norm_axes = list(range(self.layer.kernel.shape.rank - 1))\n+ self.layer_depth = int(kernel_layer.kernel.shape[-1])\n+ self.kernel_norm_axes = list(range(kernel_layer.kernel.shape.rank - 1))\n \n self.g = self.add_weight(\n name='g',\n shape=(self.layer_depth,),\n initializer='ones',\n- dtype=self.layer.kernel.dtype,\n+ dtype=kernel_layer.kernel.dtype,\n trainable=True)\n- self.v = self.layer.kernel\n+ self.v = kernel_layer.kernel\n \n self._initialized = self.add_weight(\n name='initialized',\n@@ -100,7 +103,10 @@\n layer_config)\n self._naked_clone_layer.build(input_shape)\n self._naked_clone_layer.set_weights(self.layer.get_weights())\n- self._naked_clone_layer.activation = None\n+ if self.is_rnn:\n+ self._naked_clone_layer.cell.activation = None\n+ else:\n+ self._naked_clone_layer.activation = None\n \n self.built = True\n", "issue": "WeightNormalization with RNNs: shape issue\n**System information**\r\n- OS Platform and Distribution (e.g., Linux Ubuntu 16.04): Google Colab\r\n- TensorFlow version and how it was installed (source or binary): 2.0.0 binary\r\n- TensorFlow-Addons version and how it was installed (source or binary): 0.6.0 binary\r\n- Python version: 3.6.8\r\n- Is GPU used? (yes/no): yes\r\n\r\n**Describe the bug**\r\n\r\nWeightNormalization layer wrapper cannot be used with RNNs if the input sequence has undetermined length. See code for errors.\r\n\r\n**Code to reproduce the issue**\r\n\r\n```\r\nimport tensorflow as tf\r\nimport tensorflow_addons as tfa\r\n\r\nn_features = 3\r\nseq_length = None\r\nrnn_units = 4\r\n\r\ninput_layer = tf.keras.layers.Input(shape=(seq_length, n_features))\r\nrnn_layer = tf.keras.layers.SimpleRNN(rnn_units)\r\ndense_layer = tf.keras.layers.Dense(1)\r\nwn_rnn_layer = tfa.layers.WeightNormalization(rnn_layer)\r\nwn_model = tf.keras.models.Sequential(layers=(input_layer, wn_rnn_layer, dense_layer))\r\n```\r\nyields\r\n```\r\nValueError: as_list() is not defined on an unknown TensorShape.\r\n```\r\n\r\nNote that:\r\n1. The same code without using `WeightNormalization` runs.\r\n2. Interestingly, adding the lines\r\n```\r\nbatch_size = 1\r\ninput_layer = tf.keras.layers.Input(batch_shape=(batch_size, seq_length, n_features))\r\nrnn_layer = tf.keras.layers.SimpleRNN(rnn_units, return_sequences=True)\r\ndense_layer = tf.keras.layers.Dense(1)\r\nwn_rnn_layer = tfa.layers.WeightNormalization(rnn_layer)\r\nwn_model = tf.keras.models.Sequential(layers=(input_layer, wn_rnn_layer, dense_layer))\r\n```\r\ngives\r\n```\r\nIndexError: list assignment index out of range\r\n```\r\ninstead.\r\n\r\n**Other info / logs**\r\n\n", "before_files": [{"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\n\[email protected]_keras_serializable(package='Addons')\nclass WeightNormalization(tf.keras.layers.Wrapper):\n \"\"\"This wrapper reparameterizes a layer by decoupling the weight's\n magnitude and direction.\n\n This speeds up convergence by improving the\n conditioning of the optimization problem.\n Weight Normalization: A Simple Reparameterization to Accelerate\n Training of Deep Neural Networks: https://arxiv.org/abs/1602.07868\n Tim Salimans, Diederik P. Kingma (2016)\n WeightNormalization wrapper works for keras and tf layers.\n ```python\n net = WeightNormalization(\n tf.keras.layers.Conv2D(2, 2, activation='relu'),\n input_shape=(32, 32, 3),\n data_init=True)(x)\n net = WeightNormalization(\n tf.keras.layers.Conv2D(16, 5, activation='relu'),\n data_init=True)(net)\n net = WeightNormalization(\n tf.keras.layers.Dense(120, activation='relu'),\n data_init=True)(net)\n net = WeightNormalization(\n tf.keras.layers.Dense(n_classes),\n data_init=True)(net)\n ```\n Arguments:\n layer: a layer instance.\n data_init: If `True` use data dependent variable initialization\n Raises:\n ValueError: If not initialized with a `Layer` instance.\n ValueError: If `Layer` does not contain a `kernel` of weights\n NotImplementedError: If `data_init` is True and running graph execution\n \"\"\"\n\n def __init__(self, layer, data_init=True, **kwargs):\n super(WeightNormalization, self).__init__(layer, **kwargs)\n self.data_init = data_init\n self._track_trackable(layer, name='layer')\n\n def build(self, input_shape):\n \"\"\"Build `Layer`\"\"\"\n input_shape = tf.TensorShape(input_shape).as_list()\n self.input_spec = tf.keras.layers.InputSpec(\n shape=[None] + input_shape[1:])\n\n if not self.layer.built:\n self.layer.build(input_shape)\n\n if not hasattr(self.layer, 'kernel'):\n raise ValueError('`WeightNormalization` must wrap a layer that'\n ' contains a `kernel` for weights')\n\n # The kernel's filter or unit dimension is -1\n self.layer_depth = int(self.layer.kernel.shape[-1])\n self.kernel_norm_axes = list(range(self.layer.kernel.shape.rank - 1))\n\n self.g = self.add_weight(\n name='g',\n shape=(self.layer_depth,),\n initializer='ones',\n dtype=self.layer.kernel.dtype,\n trainable=True)\n self.v = self.layer.kernel\n\n self._initialized = self.add_weight(\n name='initialized',\n shape=None,\n initializer='zeros',\n dtype=tf.dtypes.bool,\n trainable=False)\n\n if self.data_init:\n # Used for data initialization in self._data_dep_init.\n with tf.name_scope('data_dep_init'):\n layer_config = tf.keras.layers.serialize(self.layer)\n layer_config['config']['trainable'] = False\n self._naked_clone_layer = tf.keras.layers.deserialize(\n layer_config)\n self._naked_clone_layer.build(input_shape)\n self._naked_clone_layer.set_weights(self.layer.get_weights())\n self._naked_clone_layer.activation = None\n\n self.built = True\n\n def call(self, inputs):\n \"\"\"Call `Layer`\"\"\"\n\n def _do_nothing():\n return tf.identity(self.g)\n\n def _update_weights():\n # Ensure we read `self.g` after _update_weights.\n with tf.control_dependencies(self._initialize_weights(inputs)):\n return tf.identity(self.g)\n\n g = tf.cond(self._initialized, _do_nothing, _update_weights)\n\n with tf.name_scope('compute_weights'):\n # Replace kernel by normalized weight variable.\n self.layer.kernel = tf.nn.l2_normalize(\n self.v, axis=self.kernel_norm_axes) * g\n\n # Ensure we calculate result after updating kernel.\n update_kernel = tf.identity(self.layer.kernel)\n with tf.control_dependencies([update_kernel]):\n outputs = self.layer(inputs)\n return outputs\n\n def compute_output_shape(self, input_shape):\n return tf.TensorShape(\n self.layer.compute_output_shape(input_shape).as_list())\n\n def _initialize_weights(self, inputs):\n \"\"\"Initialize weight g.\n\n The initial value of g could either from the initial value in v,\n or by the input value if self.data_init is True.\n \"\"\"\n with tf.control_dependencies([\n tf.debugging.assert_equal( # pylint: disable=bad-continuation\n self._initialized,\n False,\n message='The layer has been initialized.')\n ]):\n if self.data_init:\n assign_tensors = self._data_dep_init(inputs)\n else:\n assign_tensors = self._init_norm()\n assign_tensors.append(self._initialized.assign(True))\n return assign_tensors\n\n def _init_norm(self):\n \"\"\"Set the weight g with the norm of the weight vector.\"\"\"\n with tf.name_scope('init_norm'):\n v_flat = tf.reshape(self.v, [-1, self.layer_depth])\n v_norm = tf.linalg.norm(v_flat, axis=0)\n g_tensor = self.g.assign(tf.reshape(v_norm, (self.layer_depth,)))\n return [g_tensor]\n\n def _data_dep_init(self, inputs):\n \"\"\"Data dependent initialization.\"\"\"\n with tf.name_scope('data_dep_init'):\n # Generate data dependent init values\n x_init = self._naked_clone_layer(inputs)\n data_norm_axes = list(range(x_init.shape.rank - 1))\n m_init, v_init = tf.nn.moments(x_init, data_norm_axes)\n scale_init = 1. / tf.math.sqrt(v_init + 1e-10)\n\n # Assign data dependent init values\n g_tensor = self.g.assign(self.g * scale_init)\n if hasattr(self.layer, 'bias') and self.layer.bias is not None:\n bias_tensor = self.layer.bias.assign(-m_init * scale_init)\n return [g_tensor, bias_tensor]\n else:\n return [g_tensor]\n\n def get_config(self):\n config = {'data_init': self.data_init}\n base_config = super(WeightNormalization, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n", "path": "tensorflow_addons/layers/wrappers.py"}]}
2,930
520
gh_patches_debug_4967
rasdani/github-patches
git_diff
aws__aws-cli-1894
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> aws cloudformation create-change-set with 'template-url' broken ``` $ aws --region eu-west-1 cloudformation create-change-set --change-set-name test --stack-name autobuild --template-url https://s3-eu-west-1.amazonaws.com/BUCKET/TEMPLATE.json --parameters ... --capabilities CAPABILITY_IAM Error parsing parameter '--template-url': Unable to retrieve https://s3-eu-west-1.amazonaws.com/BUCKET/TEMPLATE.json: received non 200 status code of 403 ``` The bucket is not public, and access is controlled via IAM. </issue> <code> [start of awscli/paramfile.py] 1 # Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"). You 4 # may not use this file except in compliance with the License. A copy of 5 # the License is located at 6 # 7 # http://aws.amazon.com/apache2.0/ 8 # 9 # or in the "license" file accompanying this file. This file is 10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF 11 # ANY KIND, either express or implied. See the License for the specific 12 # language governing permissions and limitations under the License. 13 import logging 14 import os 15 16 from botocore.vendored import requests 17 from awscli.compat import six 18 19 from awscli.compat import compat_open 20 21 22 logger = logging.getLogger(__name__) 23 24 # These are special cased arguments that do _not_ get the 25 # special param file processing. This is typically because it 26 # refers to an actual URI of some sort and we don't want to actually 27 # download the content (i.e TemplateURL in cloudformation). 28 PARAMFILE_DISABLED = set([ 29 'apigateway.put-integration.uri', 30 'cloudformation.create-stack.template-url', 31 'cloudformation.update-stack.template-url', 32 'cloudformation.validate-template.template-url', 33 'cloudformation.estimate-template-cost.template-url', 34 35 'cloudformation.create-stack.stack-policy-url', 36 'cloudformation.update-stack.stack-policy-url', 37 'cloudformation.set-stack-policy.stack-policy-url', 38 39 'cloudformation.update-stack.stack-policy-during-update-url', 40 # We will want to change the event name to ``s3`` as opposed to 41 # custom in the near future along with ``s3`` to ``s3api``. 42 'custom.cp.website-redirect', 43 'custom.mv.website-redirect', 44 'custom.sync.website-redirect', 45 46 'iam.create-open-id-connect-provider.url', 47 48 'machinelearning.predict.predict-endpoint', 49 50 'sqs.add-permission.queue-url', 51 'sqs.change-message-visibility.queue-url', 52 'sqs.change-message-visibility-batch.queue-url', 53 'sqs.delete-message.queue-url', 54 'sqs.delete-message-batch.queue-url', 55 'sqs.delete-queue.queue-url', 56 'sqs.get-queue-attributes.queue-url', 57 'sqs.list-dead-letter-source-queues.queue-url', 58 'sqs.receive-message.queue-url', 59 'sqs.remove-permission.queue-url', 60 'sqs.send-message.queue-url', 61 'sqs.send-message-batch.queue-url', 62 'sqs.set-queue-attributes.queue-url', 63 'sqs.purge-queue.queue-url', 64 65 's3.copy-object.website-redirect-location', 66 's3.create-multipart-upload.website-redirect-location', 67 's3.put-object.website-redirect-location', 68 69 # Double check that this has been renamed! 70 'sns.subscribe.notification-endpoint', 71 ]) 72 73 74 class ResourceLoadingError(Exception): 75 pass 76 77 78 def get_paramfile(path): 79 """Load parameter based on a resource URI. 80 81 It is possible to pass parameters to operations by referring 82 to files or URI's. If such a reference is detected, this 83 function attempts to retrieve the data from the file or URI 84 and returns it. If there are any errors or if the ``path`` 85 does not appear to refer to a file or URI, a ``None`` is 86 returned. 87 88 :type path: str 89 :param path: The resource URI, e.g. file://foo.txt. This value 90 may also be a non resource URI, in which case ``None`` is returned. 91 92 :return: The loaded value associated with the resource URI. 93 If the provided ``path`` is not a resource URI, then a 94 value of ``None`` is returned. 95 96 """ 97 data = None 98 if isinstance(path, six.string_types): 99 for prefix, function_spec in PREFIX_MAP.items(): 100 if path.startswith(prefix): 101 function, kwargs = function_spec 102 data = function(prefix, path, **kwargs) 103 return data 104 105 106 def get_file(prefix, path, mode): 107 file_path = os.path.expandvars(os.path.expanduser(path[len(prefix):])) 108 try: 109 with compat_open(file_path, mode) as f: 110 return f.read() 111 except UnicodeDecodeError: 112 raise ResourceLoadingError( 113 'Unable to load paramfile (%s), text contents could ' 114 'not be decoded. If this is a binary file, please use the ' 115 'fileb:// prefix instead of the file:// prefix.' % file_path) 116 except (OSError, IOError) as e: 117 raise ResourceLoadingError('Unable to load paramfile %s: %s' % ( 118 path, e)) 119 120 121 def get_uri(prefix, uri): 122 try: 123 r = requests.get(uri) 124 if r.status_code == 200: 125 return r.text 126 else: 127 raise ResourceLoadingError( 128 "received non 200 status code of %s" % ( 129 r.status_code)) 130 except Exception as e: 131 raise ResourceLoadingError('Unable to retrieve %s: %s' % (uri, e)) 132 133 134 PREFIX_MAP = { 135 'file://': (get_file, {'mode': 'r'}), 136 'fileb://': (get_file, {'mode': 'rb'}), 137 'http://': (get_uri, {}), 138 'https://': (get_uri, {}), 139 } 140 [end of awscli/paramfile.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/awscli/paramfile.py b/awscli/paramfile.py --- a/awscli/paramfile.py +++ b/awscli/paramfile.py @@ -29,6 +29,7 @@ 'apigateway.put-integration.uri', 'cloudformation.create-stack.template-url', 'cloudformation.update-stack.template-url', + 'cloudformation.create-change-set.template-url', 'cloudformation.validate-template.template-url', 'cloudformation.estimate-template-cost.template-url',
{"golden_diff": "diff --git a/awscli/paramfile.py b/awscli/paramfile.py\n--- a/awscli/paramfile.py\n+++ b/awscli/paramfile.py\n@@ -29,6 +29,7 @@\n 'apigateway.put-integration.uri',\n 'cloudformation.create-stack.template-url',\n 'cloudformation.update-stack.template-url',\n+ 'cloudformation.create-change-set.template-url',\n 'cloudformation.validate-template.template-url',\n 'cloudformation.estimate-template-cost.template-url',\n", "issue": "aws cloudformation create-change-set with 'template-url' broken\n```\n$ aws --region eu-west-1 cloudformation create-change-set --change-set-name test --stack-name autobuild --template-url https://s3-eu-west-1.amazonaws.com/BUCKET/TEMPLATE.json --parameters ... --capabilities CAPABILITY_IAM\n\nError parsing parameter '--template-url': Unable to retrieve https://s3-eu-west-1.amazonaws.com/BUCKET/TEMPLATE.json: received non 200 status code of 403\n```\n\nThe bucket is not public, and access is controlled via IAM.\n\n", "before_files": [{"content": "# Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nimport logging\nimport os\n\nfrom botocore.vendored import requests\nfrom awscli.compat import six\n\nfrom awscli.compat import compat_open\n\n\nlogger = logging.getLogger(__name__)\n\n# These are special cased arguments that do _not_ get the\n# special param file processing. This is typically because it\n# refers to an actual URI of some sort and we don't want to actually\n# download the content (i.e TemplateURL in cloudformation).\nPARAMFILE_DISABLED = set([\n 'apigateway.put-integration.uri',\n 'cloudformation.create-stack.template-url',\n 'cloudformation.update-stack.template-url',\n 'cloudformation.validate-template.template-url',\n 'cloudformation.estimate-template-cost.template-url',\n\n 'cloudformation.create-stack.stack-policy-url',\n 'cloudformation.update-stack.stack-policy-url',\n 'cloudformation.set-stack-policy.stack-policy-url',\n\n 'cloudformation.update-stack.stack-policy-during-update-url',\n # We will want to change the event name to ``s3`` as opposed to\n # custom in the near future along with ``s3`` to ``s3api``.\n 'custom.cp.website-redirect',\n 'custom.mv.website-redirect',\n 'custom.sync.website-redirect',\n\n 'iam.create-open-id-connect-provider.url',\n\n 'machinelearning.predict.predict-endpoint',\n\n 'sqs.add-permission.queue-url',\n 'sqs.change-message-visibility.queue-url',\n 'sqs.change-message-visibility-batch.queue-url',\n 'sqs.delete-message.queue-url',\n 'sqs.delete-message-batch.queue-url',\n 'sqs.delete-queue.queue-url',\n 'sqs.get-queue-attributes.queue-url',\n 'sqs.list-dead-letter-source-queues.queue-url',\n 'sqs.receive-message.queue-url',\n 'sqs.remove-permission.queue-url',\n 'sqs.send-message.queue-url',\n 'sqs.send-message-batch.queue-url',\n 'sqs.set-queue-attributes.queue-url',\n 'sqs.purge-queue.queue-url',\n\n 's3.copy-object.website-redirect-location',\n 's3.create-multipart-upload.website-redirect-location',\n 's3.put-object.website-redirect-location',\n\n # Double check that this has been renamed!\n 'sns.subscribe.notification-endpoint',\n])\n\n\nclass ResourceLoadingError(Exception):\n pass\n\n\ndef get_paramfile(path):\n \"\"\"Load parameter based on a resource URI.\n\n It is possible to pass parameters to operations by referring\n to files or URI's. If such a reference is detected, this\n function attempts to retrieve the data from the file or URI\n and returns it. If there are any errors or if the ``path``\n does not appear to refer to a file or URI, a ``None`` is\n returned.\n\n :type path: str\n :param path: The resource URI, e.g. file://foo.txt. This value\n may also be a non resource URI, in which case ``None`` is returned.\n\n :return: The loaded value associated with the resource URI.\n If the provided ``path`` is not a resource URI, then a\n value of ``None`` is returned.\n\n \"\"\"\n data = None\n if isinstance(path, six.string_types):\n for prefix, function_spec in PREFIX_MAP.items():\n if path.startswith(prefix):\n function, kwargs = function_spec\n data = function(prefix, path, **kwargs)\n return data\n\n\ndef get_file(prefix, path, mode):\n file_path = os.path.expandvars(os.path.expanduser(path[len(prefix):]))\n try:\n with compat_open(file_path, mode) as f:\n return f.read()\n except UnicodeDecodeError:\n raise ResourceLoadingError(\n 'Unable to load paramfile (%s), text contents could '\n 'not be decoded. If this is a binary file, please use the '\n 'fileb:// prefix instead of the file:// prefix.' % file_path)\n except (OSError, IOError) as e:\n raise ResourceLoadingError('Unable to load paramfile %s: %s' % (\n path, e))\n\n\ndef get_uri(prefix, uri):\n try:\n r = requests.get(uri)\n if r.status_code == 200:\n return r.text\n else:\n raise ResourceLoadingError(\n \"received non 200 status code of %s\" % (\n r.status_code))\n except Exception as e:\n raise ResourceLoadingError('Unable to retrieve %s: %s' % (uri, e))\n\n\nPREFIX_MAP = {\n 'file://': (get_file, {'mode': 'r'}),\n 'fileb://': (get_file, {'mode': 'rb'}),\n 'http://': (get_uri, {}),\n 'https://': (get_uri, {}),\n}\n", "path": "awscli/paramfile.py"}]}
2,158
107
gh_patches_debug_17454
rasdani/github-patches
git_diff
pyg-team__pytorch_geometric-7391
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `CaptumExplainer` cannot be called multiple times in a row without creating an error ### 🐛 Describe the bug Trying to call an instance of `CaptumExplainer` twice raises an error. To replicate, change https://github.com/pyg-team/pytorch_geometric/blob/dfd32668aea953c8bb56f97364d8e028f267bde6/examples/explain/captum_explainer.py#L60 to ```python explanation = explainer(data.x, data.edge_index, index=node_index) explanation_2 = explanation = explainer(data.x, data.edge_index, index=node_index+1) ``` which will raise ``` Traceback (most recent call last): File ".../pytorch_geometric/examples/explain/captum_explainer.py", line 62, in <module> explanation_2 = explainer(data.x, data.edge_index, index=11) File ".../pytorch_geometric/torch_geometric/explain/explainer.py", line 198, in __call__ explanation = self.algorithm( File ".../pytorch_geometric/env/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl return forward_call(*args, **kwargs) File ".../torch_geometric/explain/algorithm/captum_explainer.py", line 153, in forward self.attribution_method = self.attribution_method(captum_model) TypeError: 'IntegratedGradients' object is not callable ``` This is because on the second call, the `CaptumExplainer` tries to recreates an attribution method from `captum`: https://github.com/pyg-team/pytorch_geometric/blob/dfd32668aea953c8bb56f97364d8e028f267bde6/torch_geometric/explain/algorithm/captum_explainer.py#L153 ### Environment * PyG version: (from source, latest commit to date on master) * PyTorch version: 2.0.0 </issue> <code> [start of torch_geometric/explain/algorithm/captum_explainer.py] 1 import inspect 2 import logging 3 import warnings 4 from typing import Any, Dict, Optional, Union 5 6 import torch 7 from torch import Tensor 8 9 from torch_geometric.explain import Explanation, HeteroExplanation 10 from torch_geometric.explain.algorithm import ExplainerAlgorithm 11 from torch_geometric.explain.algorithm.captum import ( 12 CaptumHeteroModel, 13 CaptumModel, 14 MaskLevelType, 15 convert_captum_output, 16 to_captum_input, 17 ) 18 from torch_geometric.explain.config import MaskType, ModelMode 19 from torch_geometric.typing import EdgeType, NodeType 20 21 22 class CaptumExplainer(ExplainerAlgorithm): 23 """A `Captum <https://captum.ai>`__-based explainer for identifying compact 24 subgraph structures and node features that play a crucial role in the 25 predictions made by a GNN. 26 27 This explainer algorithm uses :captum:`null` `Captum <https://captum.ai/>`_ 28 to compute attributions. 29 30 Currently, the following attribution methods are supported: 31 32 * :class:`captum.attr.IntegratedGradients` 33 * :class:`captum.attr.Saliency` 34 * :class:`captum.attr.InputXGradient` 35 * :class:`captum.attr.Deconvolution` 36 * :class:`captum.attr.ShapleyValueSampling` 37 * :class:`captum.attr.GuidedBackprop` 38 39 Args: 40 attribution_method (Attribution or str): The Captum attribution method 41 to use. Can be a string or a :class:`captum.attr` method. 42 **kwargs: Additional arguments for the Captum attribution method. 43 """ 44 SUPPORTED_METHODS = [ # TODO: Add support for more methods. 45 'IntegratedGradients', 46 'Saliency', 47 'InputXGradient', 48 'Deconvolution', 49 'ShapleyValueSampling', 50 'GuidedBackprop', 51 ] 52 53 def __init__( 54 self, 55 attribution_method: Union[str, Any], 56 **kwargs, 57 ): 58 super().__init__() 59 60 import captum.attr # noqa 61 62 if isinstance(attribution_method, str): 63 self.attribution_method = getattr( 64 captum.attr, 65 attribution_method, 66 ) 67 else: 68 self.attribution_method = attribution_method 69 70 if not self._is_supported_attribution_method(): 71 raise ValueError(f"{self.__class__.__name__} does not support " 72 f"attribution method " 73 f"{self.attribution_method.__name__}") 74 75 if kwargs.get('internal_batch_size', 1) != 1: 76 warnings.warn("Overriding 'internal_batch_size' to 1") 77 78 if 'internal_batch_size' in self._get_attribute_parameters(): 79 kwargs['internal_batch_size'] = 1 80 81 self.kwargs = kwargs 82 83 def _get_mask_type(self) -> MaskLevelType: 84 r"""Based on the explainer config, return the mask type.""" 85 node_mask_type = self.explainer_config.node_mask_type 86 edge_mask_type = self.explainer_config.edge_mask_type 87 if node_mask_type is not None and edge_mask_type is not None: 88 mask_type = MaskLevelType.node_and_edge 89 elif node_mask_type is not None: 90 mask_type = MaskLevelType.node 91 elif edge_mask_type is not None: 92 mask_type = MaskLevelType.edge 93 else: 94 raise ValueError("Neither node mask type nor " 95 "edge mask type is specified.") 96 return mask_type 97 98 def _get_attribute_parameters(self) -> Dict[str, Any]: 99 r"""Returns the attribute arguments.""" 100 signature = inspect.signature(self.attribution_method.attribute) 101 return signature.parameters 102 103 def _needs_baseline(self) -> bool: 104 r"""Checks if the method needs a baseline.""" 105 parameters = self._get_attribute_parameters() 106 if 'baselines' in parameters: 107 param = parameters['baselines'] 108 if param.default is inspect.Parameter.empty: 109 return True 110 return False 111 112 def _is_supported_attribution_method(self) -> bool: 113 r"""Returns :obj:`True` if `self.attribution_method` is supported.""" 114 # This is redundant for now since all supported methods need a baseline 115 if self._needs_baseline(): 116 return False 117 elif self.attribution_method.__name__ in self.SUPPORTED_METHODS: 118 return True 119 return False 120 121 def forward( 122 self, 123 model: torch.nn.Module, 124 x: Union[Tensor, Dict[NodeType, Tensor]], 125 edge_index: Union[Tensor, Dict[EdgeType, Tensor]], 126 *, 127 target: Tensor, 128 index: Optional[Union[int, Tensor]] = None, 129 **kwargs, 130 ) -> Union[Explanation, HeteroExplanation]: 131 132 mask_type = self._get_mask_type() 133 134 inputs, add_forward_args = to_captum_input( 135 x, 136 edge_index, 137 mask_type, 138 *kwargs.values(), 139 ) 140 141 if isinstance(x, dict): 142 metadata = (list(x.keys()), list(edge_index.keys())) 143 captum_model = CaptumHeteroModel( 144 model, 145 mask_type, 146 index, 147 metadata, 148 ) 149 else: 150 metadata = None 151 captum_model = CaptumModel(model, mask_type, index) 152 153 self.attribution_method = self.attribution_method(captum_model) 154 155 # In captum, the target is the index for which 156 # the attribution is computed. 157 if self.model_config.mode == ModelMode.regression: 158 target = None 159 else: 160 target = target[index] 161 162 attributions = self.attribution_method.attribute( 163 inputs=inputs, 164 target=target, 165 additional_forward_args=add_forward_args, 166 **self.kwargs, 167 ) 168 169 node_mask, edge_mask = convert_captum_output( 170 attributions, 171 mask_type, 172 metadata, 173 ) 174 175 if not isinstance(x, dict): 176 return Explanation(node_mask=node_mask, edge_mask=edge_mask) 177 178 explanation = HeteroExplanation() 179 explanation.set_value_dict('node_mask', node_mask) 180 explanation.set_value_dict('edge_mask', edge_mask) 181 return explanation 182 183 def supports(self) -> bool: 184 node_mask_type = self.explainer_config.node_mask_type 185 if node_mask_type not in [None, MaskType.attributes]: 186 logging.error(f"'{self.__class__.__name__}' only supports " 187 f"'node_mask_type' None or 'attributes' " 188 f"(got '{node_mask_type.value}')") 189 return False 190 191 # TODO (ramona): Confirm that output type is valid. 192 return True 193 [end of torch_geometric/explain/algorithm/captum_explainer.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/torch_geometric/explain/algorithm/captum_explainer.py b/torch_geometric/explain/algorithm/captum_explainer.py --- a/torch_geometric/explain/algorithm/captum_explainer.py +++ b/torch_geometric/explain/algorithm/captum_explainer.py @@ -150,7 +150,7 @@ metadata = None captum_model = CaptumModel(model, mask_type, index) - self.attribution_method = self.attribution_method(captum_model) + attribution_method = self.attribution_method(captum_model) # In captum, the target is the index for which # the attribution is computed. @@ -159,7 +159,7 @@ else: target = target[index] - attributions = self.attribution_method.attribute( + attributions = attribution_method.attribute( inputs=inputs, target=target, additional_forward_args=add_forward_args,
{"golden_diff": "diff --git a/torch_geometric/explain/algorithm/captum_explainer.py b/torch_geometric/explain/algorithm/captum_explainer.py\n--- a/torch_geometric/explain/algorithm/captum_explainer.py\n+++ b/torch_geometric/explain/algorithm/captum_explainer.py\n@@ -150,7 +150,7 @@\n metadata = None\n captum_model = CaptumModel(model, mask_type, index)\n \n- self.attribution_method = self.attribution_method(captum_model)\n+ attribution_method = self.attribution_method(captum_model)\n \n # In captum, the target is the index for which\n # the attribution is computed.\n@@ -159,7 +159,7 @@\n else:\n target = target[index]\n \n- attributions = self.attribution_method.attribute(\n+ attributions = attribution_method.attribute(\n inputs=inputs,\n target=target,\n additional_forward_args=add_forward_args,\n", "issue": "`CaptumExplainer` cannot be called multiple times in a row without creating an error\n### \ud83d\udc1b Describe the bug\n\nTrying to call an instance of `CaptumExplainer` twice raises an error.\r\n\r\n\r\nTo replicate, change https://github.com/pyg-team/pytorch_geometric/blob/dfd32668aea953c8bb56f97364d8e028f267bde6/examples/explain/captum_explainer.py#L60\r\n\r\nto \r\n\r\n```python\r\nexplanation = explainer(data.x, data.edge_index, index=node_index)\r\nexplanation_2 = explanation = explainer(data.x, data.edge_index, index=node_index+1)\r\n```\r\n\r\nwhich will raise\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \".../pytorch_geometric/examples/explain/captum_explainer.py\", line 62, in <module>\r\n explanation_2 = explainer(data.x, data.edge_index, index=11)\r\n File \".../pytorch_geometric/torch_geometric/explain/explainer.py\", line 198, in __call__\r\n explanation = self.algorithm(\r\n File \".../pytorch_geometric/env/lib/python3.10/site-packages/torch/nn/modules/module.py\", line 1501, in _call_impl\r\n return forward_call(*args, **kwargs)\r\n File \".../torch_geometric/explain/algorithm/captum_explainer.py\", line 153, in forward\r\n self.attribution_method = self.attribution_method(captum_model)\r\nTypeError: 'IntegratedGradients' object is not callable\r\n```\r\n\r\nThis is because on the second call, the `CaptumExplainer` tries to recreates an attribution method from `captum`:\r\n\r\nhttps://github.com/pyg-team/pytorch_geometric/blob/dfd32668aea953c8bb56f97364d8e028f267bde6/torch_geometric/explain/algorithm/captum_explainer.py#L153 \r\n\n\n### Environment\n\n* PyG version: (from source, latest commit to date on master)\r\n* PyTorch version: 2.0.0\n", "before_files": [{"content": "import inspect\nimport logging\nimport warnings\nfrom typing import Any, Dict, Optional, Union\n\nimport torch\nfrom torch import Tensor\n\nfrom torch_geometric.explain import Explanation, HeteroExplanation\nfrom torch_geometric.explain.algorithm import ExplainerAlgorithm\nfrom torch_geometric.explain.algorithm.captum import (\n CaptumHeteroModel,\n CaptumModel,\n MaskLevelType,\n convert_captum_output,\n to_captum_input,\n)\nfrom torch_geometric.explain.config import MaskType, ModelMode\nfrom torch_geometric.typing import EdgeType, NodeType\n\n\nclass CaptumExplainer(ExplainerAlgorithm):\n \"\"\"A `Captum <https://captum.ai>`__-based explainer for identifying compact\n subgraph structures and node features that play a crucial role in the\n predictions made by a GNN.\n\n This explainer algorithm uses :captum:`null` `Captum <https://captum.ai/>`_\n to compute attributions.\n\n Currently, the following attribution methods are supported:\n\n * :class:`captum.attr.IntegratedGradients`\n * :class:`captum.attr.Saliency`\n * :class:`captum.attr.InputXGradient`\n * :class:`captum.attr.Deconvolution`\n * :class:`captum.attr.ShapleyValueSampling`\n * :class:`captum.attr.GuidedBackprop`\n\n Args:\n attribution_method (Attribution or str): The Captum attribution method\n to use. Can be a string or a :class:`captum.attr` method.\n **kwargs: Additional arguments for the Captum attribution method.\n \"\"\"\n SUPPORTED_METHODS = [ # TODO: Add support for more methods.\n 'IntegratedGradients',\n 'Saliency',\n 'InputXGradient',\n 'Deconvolution',\n 'ShapleyValueSampling',\n 'GuidedBackprop',\n ]\n\n def __init__(\n self,\n attribution_method: Union[str, Any],\n **kwargs,\n ):\n super().__init__()\n\n import captum.attr # noqa\n\n if isinstance(attribution_method, str):\n self.attribution_method = getattr(\n captum.attr,\n attribution_method,\n )\n else:\n self.attribution_method = attribution_method\n\n if not self._is_supported_attribution_method():\n raise ValueError(f\"{self.__class__.__name__} does not support \"\n f\"attribution method \"\n f\"{self.attribution_method.__name__}\")\n\n if kwargs.get('internal_batch_size', 1) != 1:\n warnings.warn(\"Overriding 'internal_batch_size' to 1\")\n\n if 'internal_batch_size' in self._get_attribute_parameters():\n kwargs['internal_batch_size'] = 1\n\n self.kwargs = kwargs\n\n def _get_mask_type(self) -> MaskLevelType:\n r\"\"\"Based on the explainer config, return the mask type.\"\"\"\n node_mask_type = self.explainer_config.node_mask_type\n edge_mask_type = self.explainer_config.edge_mask_type\n if node_mask_type is not None and edge_mask_type is not None:\n mask_type = MaskLevelType.node_and_edge\n elif node_mask_type is not None:\n mask_type = MaskLevelType.node\n elif edge_mask_type is not None:\n mask_type = MaskLevelType.edge\n else:\n raise ValueError(\"Neither node mask type nor \"\n \"edge mask type is specified.\")\n return mask_type\n\n def _get_attribute_parameters(self) -> Dict[str, Any]:\n r\"\"\"Returns the attribute arguments.\"\"\"\n signature = inspect.signature(self.attribution_method.attribute)\n return signature.parameters\n\n def _needs_baseline(self) -> bool:\n r\"\"\"Checks if the method needs a baseline.\"\"\"\n parameters = self._get_attribute_parameters()\n if 'baselines' in parameters:\n param = parameters['baselines']\n if param.default is inspect.Parameter.empty:\n return True\n return False\n\n def _is_supported_attribution_method(self) -> bool:\n r\"\"\"Returns :obj:`True` if `self.attribution_method` is supported.\"\"\"\n # This is redundant for now since all supported methods need a baseline\n if self._needs_baseline():\n return False\n elif self.attribution_method.__name__ in self.SUPPORTED_METHODS:\n return True\n return False\n\n def forward(\n self,\n model: torch.nn.Module,\n x: Union[Tensor, Dict[NodeType, Tensor]],\n edge_index: Union[Tensor, Dict[EdgeType, Tensor]],\n *,\n target: Tensor,\n index: Optional[Union[int, Tensor]] = None,\n **kwargs,\n ) -> Union[Explanation, HeteroExplanation]:\n\n mask_type = self._get_mask_type()\n\n inputs, add_forward_args = to_captum_input(\n x,\n edge_index,\n mask_type,\n *kwargs.values(),\n )\n\n if isinstance(x, dict):\n metadata = (list(x.keys()), list(edge_index.keys()))\n captum_model = CaptumHeteroModel(\n model,\n mask_type,\n index,\n metadata,\n )\n else:\n metadata = None\n captum_model = CaptumModel(model, mask_type, index)\n\n self.attribution_method = self.attribution_method(captum_model)\n\n # In captum, the target is the index for which\n # the attribution is computed.\n if self.model_config.mode == ModelMode.regression:\n target = None\n else:\n target = target[index]\n\n attributions = self.attribution_method.attribute(\n inputs=inputs,\n target=target,\n additional_forward_args=add_forward_args,\n **self.kwargs,\n )\n\n node_mask, edge_mask = convert_captum_output(\n attributions,\n mask_type,\n metadata,\n )\n\n if not isinstance(x, dict):\n return Explanation(node_mask=node_mask, edge_mask=edge_mask)\n\n explanation = HeteroExplanation()\n explanation.set_value_dict('node_mask', node_mask)\n explanation.set_value_dict('edge_mask', edge_mask)\n return explanation\n\n def supports(self) -> bool:\n node_mask_type = self.explainer_config.node_mask_type\n if node_mask_type not in [None, MaskType.attributes]:\n logging.error(f\"'{self.__class__.__name__}' only supports \"\n f\"'node_mask_type' None or 'attributes' \"\n f\"(got '{node_mask_type.value}')\")\n return False\n\n # TODO (ramona): Confirm that output type is valid.\n return True\n", "path": "torch_geometric/explain/algorithm/captum_explainer.py"}]}
2,926
220
gh_patches_debug_8246
rasdani/github-patches
git_diff
searxng__searxng-3135
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Custom Links in the Footer <!-- PLEASE FILL THESE FIELDS, IT REALLY HELPS THE MAINTAINERS OF SearXNG --> **Is your feature request related to a problem? Please describe.** No. <!-- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] --> **Describe the solution you'd like** Support for custom footer links. Currently, all that can be set are links to git repos, project-related pages, and instance owner mailto:. I would like to link back to a status page for the things that I host, and any other arbitrary thing. Preferably this would be a key(link name) -> value(url) map somewhere in the config. <!-- A clear and concise description of what you want to happen. --> **Describe alternatives you've considered** Editing the templates manually. <!-- A clear and concise description of any alternative solutions or features you've considered. --> **Additional context** <!-- Add any other context or screenshots about the feature request here. --> </issue> <code> [start of searx/settings_defaults.py] 1 # SPDX-License-Identifier: AGPL-3.0-or-later 2 # lint: pylint 3 """Implementation of the default settings. 4 5 """ 6 7 import typing 8 import numbers 9 import errno 10 import os 11 import logging 12 from base64 import b64decode 13 from os.path import dirname, abspath 14 15 from .sxng_locales import sxng_locales 16 17 searx_dir = abspath(dirname(__file__)) 18 19 logger = logging.getLogger('searx') 20 OUTPUT_FORMATS = ['html', 'csv', 'json', 'rss'] 21 SXNG_LOCALE_TAGS = ['all', 'auto'] + list(l[0] for l in sxng_locales) 22 SIMPLE_STYLE = ('auto', 'light', 'dark') 23 CATEGORIES_AS_TABS = { 24 'general': {}, 25 'images': {}, 26 'videos': {}, 27 'news': {}, 28 'map': {}, 29 'music': {}, 30 'it': {}, 31 'science': {}, 32 'files': {}, 33 'social media': {}, 34 } 35 STR_TO_BOOL = { 36 '0': False, 37 'false': False, 38 'off': False, 39 '1': True, 40 'true': True, 41 'on': True, 42 } 43 _UNDEFINED = object() 44 45 46 class SettingsValue: 47 """Check and update a setting value""" 48 49 def __init__( 50 self, 51 type_definition: typing.Union[None, typing.Any, typing.Tuple[typing.Any]] = None, 52 default: typing.Any = None, 53 environ_name: str = None, 54 ): 55 self.type_definition = ( 56 type_definition if type_definition is None or isinstance(type_definition, tuple) else (type_definition,) 57 ) 58 self.default = default 59 self.environ_name = environ_name 60 61 @property 62 def type_definition_repr(self): 63 types_str = [t.__name__ if isinstance(t, type) else repr(t) for t in self.type_definition] 64 return ', '.join(types_str) 65 66 def check_type_definition(self, value: typing.Any) -> None: 67 if value in self.type_definition: 68 return 69 type_list = tuple(t for t in self.type_definition if isinstance(t, type)) 70 if not isinstance(value, type_list): 71 raise ValueError('The value has to be one of these types/values: {}'.format(self.type_definition_repr)) 72 73 def __call__(self, value: typing.Any) -> typing.Any: 74 if value == _UNDEFINED: 75 value = self.default 76 # override existing value with environ 77 if self.environ_name and self.environ_name in os.environ: 78 value = os.environ[self.environ_name] 79 if self.type_definition == (bool,): 80 value = STR_TO_BOOL[value.lower()] 81 82 self.check_type_definition(value) 83 return value 84 85 86 class SettingSublistValue(SettingsValue): 87 """Check the value is a sublist of type definition.""" 88 89 def check_type_definition(self, value: typing.Any) -> typing.Any: 90 if not isinstance(value, list): 91 raise ValueError('The value has to a list') 92 for item in value: 93 if not item in self.type_definition[0]: 94 raise ValueError('{} not in {}'.format(item, self.type_definition)) 95 96 97 class SettingsDirectoryValue(SettingsValue): 98 """Check and update a setting value that is a directory path""" 99 100 def check_type_definition(self, value: typing.Any) -> typing.Any: 101 super().check_type_definition(value) 102 if not os.path.isdir(value): 103 raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), value) 104 105 def __call__(self, value: typing.Any) -> typing.Any: 106 if value == '': 107 value = self.default 108 return super().__call__(value) 109 110 111 class SettingsBytesValue(SettingsValue): 112 """str are base64 decoded""" 113 114 def __call__(self, value: typing.Any) -> typing.Any: 115 if isinstance(value, str): 116 value = b64decode(value) 117 return super().__call__(value) 118 119 120 def apply_schema(settings, schema, path_list): 121 error = False 122 for key, value in schema.items(): 123 if isinstance(value, SettingsValue): 124 try: 125 settings[key] = value(settings.get(key, _UNDEFINED)) 126 except Exception as e: # pylint: disable=broad-except 127 # don't stop now: check other values 128 logger.error('%s: %s', '.'.join([*path_list, key]), e) 129 error = True 130 elif isinstance(value, dict): 131 error = error or apply_schema(settings.setdefault(key, {}), schema[key], [*path_list, key]) 132 else: 133 settings.setdefault(key, value) 134 if len(path_list) == 0 and error: 135 raise ValueError('Invalid settings.yml') 136 return error 137 138 139 SCHEMA = { 140 'general': { 141 'debug': SettingsValue(bool, False, 'SEARXNG_DEBUG'), 142 'instance_name': SettingsValue(str, 'SearXNG'), 143 'privacypolicy_url': SettingsValue((None, False, str), None), 144 'contact_url': SettingsValue((None, False, str), None), 145 'donation_url': SettingsValue((bool, str), "https://docs.searxng.org/donate.html"), 146 'enable_metrics': SettingsValue(bool, True), 147 }, 148 'brand': { 149 'issue_url': SettingsValue(str, 'https://github.com/searxng/searxng/issues'), 150 'new_issue_url': SettingsValue(str, 'https://github.com/searxng/searxng/issues/new'), 151 'docs_url': SettingsValue(str, 'https://docs.searxng.org'), 152 'public_instances': SettingsValue((False, str), 'https://searx.space'), 153 'wiki_url': SettingsValue(str, 'https://github.com/searxng/searxng/wiki'), 154 }, 155 'search': { 156 'safe_search': SettingsValue((0, 1, 2), 0), 157 'autocomplete': SettingsValue(str, ''), 158 'autocomplete_min': SettingsValue(int, 4), 159 'default_lang': SettingsValue(tuple(SXNG_LOCALE_TAGS + ['']), ''), 160 'languages': SettingSublistValue(SXNG_LOCALE_TAGS, SXNG_LOCALE_TAGS), 161 'ban_time_on_fail': SettingsValue(numbers.Real, 5), 162 'max_ban_time_on_fail': SettingsValue(numbers.Real, 120), 163 'suspended_times': { 164 'SearxEngineAccessDenied': SettingsValue(numbers.Real, 86400), 165 'SearxEngineCaptcha': SettingsValue(numbers.Real, 86400), 166 'SearxEngineTooManyRequests': SettingsValue(numbers.Real, 3600), 167 'cf_SearxEngineCaptcha': SettingsValue(numbers.Real, 1296000), 168 'cf_SearxEngineAccessDenied': SettingsValue(numbers.Real, 86400), 169 'recaptcha_SearxEngineCaptcha': SettingsValue(numbers.Real, 604800), 170 }, 171 'formats': SettingsValue(list, OUTPUT_FORMATS), 172 'max_page': SettingsValue(int, 0), 173 }, 174 'server': { 175 'port': SettingsValue((int, str), 8888, 'SEARXNG_PORT'), 176 'bind_address': SettingsValue(str, '127.0.0.1', 'SEARXNG_BIND_ADDRESS'), 177 'limiter': SettingsValue(bool, False), 178 'public_instance': SettingsValue(bool, False), 179 'secret_key': SettingsValue(str, environ_name='SEARXNG_SECRET'), 180 'base_url': SettingsValue((False, str), False, 'SEARXNG_BASE_URL'), 181 'image_proxy': SettingsValue(bool, False), 182 'http_protocol_version': SettingsValue(('1.0', '1.1'), '1.0'), 183 'method': SettingsValue(('POST', 'GET'), 'POST'), 184 'default_http_headers': SettingsValue(dict, {}), 185 }, 186 'redis': { 187 'url': SettingsValue((None, False, str), False, 'SEARXNG_REDIS_URL'), 188 }, 189 'ui': { 190 'static_path': SettingsDirectoryValue(str, os.path.join(searx_dir, 'static')), 191 'static_use_hash': SettingsValue(bool, False), 192 'templates_path': SettingsDirectoryValue(str, os.path.join(searx_dir, 'templates')), 193 'default_theme': SettingsValue(str, 'simple'), 194 'default_locale': SettingsValue(str, ''), 195 'theme_args': { 196 'simple_style': SettingsValue(SIMPLE_STYLE, 'auto'), 197 }, 198 'center_alignment': SettingsValue(bool, False), 199 'results_on_new_tab': SettingsValue(bool, False), 200 'advanced_search': SettingsValue(bool, False), 201 'query_in_title': SettingsValue(bool, False), 202 'infinite_scroll': SettingsValue(bool, False), 203 'cache_url': SettingsValue(str, 'https://web.archive.org/web/'), 204 'search_on_category_select': SettingsValue(bool, True), 205 'hotkeys': SettingsValue(('default', 'vim'), 'default'), 206 }, 207 'preferences': { 208 'lock': SettingsValue(list, []), 209 }, 210 'outgoing': { 211 'useragent_suffix': SettingsValue(str, ''), 212 'request_timeout': SettingsValue(numbers.Real, 3.0), 213 'enable_http2': SettingsValue(bool, True), 214 'verify': SettingsValue((bool, str), True), 215 'max_request_timeout': SettingsValue((None, numbers.Real), None), 216 'pool_connections': SettingsValue(int, 100), 217 'pool_maxsize': SettingsValue(int, 10), 218 'keepalive_expiry': SettingsValue(numbers.Real, 5.0), 219 # default maximum redirect 220 # from https://github.com/psf/requests/blob/8c211a96cdbe9fe320d63d9e1ae15c5c07e179f8/requests/models.py#L55 221 'max_redirects': SettingsValue(int, 30), 222 'retries': SettingsValue(int, 0), 223 'proxies': SettingsValue((None, str, dict), None), 224 'source_ips': SettingsValue((None, str, list), None), 225 # Tor configuration 226 'using_tor_proxy': SettingsValue(bool, False), 227 'extra_proxy_timeout': SettingsValue(int, 0), 228 'networks': {}, 229 }, 230 'result_proxy': { 231 'url': SettingsValue((None, str), None), 232 'key': SettingsBytesValue((None, bytes), None), 233 'proxify_results': SettingsValue(bool, False), 234 }, 235 'plugins': SettingsValue(list, []), 236 'enabled_plugins': SettingsValue((None, list), None), 237 'checker': { 238 'off_when_debug': SettingsValue(bool, True, None), 239 'scheduling': SettingsValue((None, dict), None, None), 240 }, 241 'categories_as_tabs': SettingsValue(dict, CATEGORIES_AS_TABS), 242 'engines': SettingsValue(list, []), 243 'doi_resolvers': {}, 244 } 245 246 247 def settings_set_defaults(settings): 248 apply_schema(settings, SCHEMA, []) 249 return settings 250 [end of searx/settings_defaults.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/searx/settings_defaults.py b/searx/settings_defaults.py --- a/searx/settings_defaults.py +++ b/searx/settings_defaults.py @@ -151,6 +151,7 @@ 'docs_url': SettingsValue(str, 'https://docs.searxng.org'), 'public_instances': SettingsValue((False, str), 'https://searx.space'), 'wiki_url': SettingsValue(str, 'https://github.com/searxng/searxng/wiki'), + 'custom': SettingsValue(dict, {'links': {}}), }, 'search': { 'safe_search': SettingsValue((0, 1, 2), 0),
{"golden_diff": "diff --git a/searx/settings_defaults.py b/searx/settings_defaults.py\n--- a/searx/settings_defaults.py\n+++ b/searx/settings_defaults.py\n@@ -151,6 +151,7 @@\n 'docs_url': SettingsValue(str, 'https://docs.searxng.org'),\n 'public_instances': SettingsValue((False, str), 'https://searx.space'),\n 'wiki_url': SettingsValue(str, 'https://github.com/searxng/searxng/wiki'),\n+ 'custom': SettingsValue(dict, {'links': {}}),\n },\n 'search': {\n 'safe_search': SettingsValue((0, 1, 2), 0),\n", "issue": "Custom Links in the Footer\n<!-- PLEASE FILL THESE FIELDS, IT REALLY HELPS THE MAINTAINERS OF SearXNG -->\r\n\r\n**Is your feature request related to a problem? Please describe.**\r\n\r\nNo.\r\n<!-- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->\r\n\r\n**Describe the solution you'd like**\r\n\r\nSupport for custom footer links. Currently, all that can be set are links to git repos, project-related pages, and instance owner mailto:. I would like to link back to a status page for the things that I host, and any other arbitrary thing. Preferably this would be a key(link name) -> value(url) map somewhere in the config.\r\n<!-- A clear and concise description of what you want to happen. -->\r\n\r\n**Describe alternatives you've considered**\r\n\r\nEditing the templates manually.\r\n<!-- A clear and concise description of any alternative solutions or features you've considered. -->\r\n\r\n**Additional context**\r\n\r\n<!-- Add any other context or screenshots about the feature request here. -->\r\n\n", "before_files": [{"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n# lint: pylint\n\"\"\"Implementation of the default settings.\n\n\"\"\"\n\nimport typing\nimport numbers\nimport errno\nimport os\nimport logging\nfrom base64 import b64decode\nfrom os.path import dirname, abspath\n\nfrom .sxng_locales import sxng_locales\n\nsearx_dir = abspath(dirname(__file__))\n\nlogger = logging.getLogger('searx')\nOUTPUT_FORMATS = ['html', 'csv', 'json', 'rss']\nSXNG_LOCALE_TAGS = ['all', 'auto'] + list(l[0] for l in sxng_locales)\nSIMPLE_STYLE = ('auto', 'light', 'dark')\nCATEGORIES_AS_TABS = {\n 'general': {},\n 'images': {},\n 'videos': {},\n 'news': {},\n 'map': {},\n 'music': {},\n 'it': {},\n 'science': {},\n 'files': {},\n 'social media': {},\n}\nSTR_TO_BOOL = {\n '0': False,\n 'false': False,\n 'off': False,\n '1': True,\n 'true': True,\n 'on': True,\n}\n_UNDEFINED = object()\n\n\nclass SettingsValue:\n \"\"\"Check and update a setting value\"\"\"\n\n def __init__(\n self,\n type_definition: typing.Union[None, typing.Any, typing.Tuple[typing.Any]] = None,\n default: typing.Any = None,\n environ_name: str = None,\n ):\n self.type_definition = (\n type_definition if type_definition is None or isinstance(type_definition, tuple) else (type_definition,)\n )\n self.default = default\n self.environ_name = environ_name\n\n @property\n def type_definition_repr(self):\n types_str = [t.__name__ if isinstance(t, type) else repr(t) for t in self.type_definition]\n return ', '.join(types_str)\n\n def check_type_definition(self, value: typing.Any) -> None:\n if value in self.type_definition:\n return\n type_list = tuple(t for t in self.type_definition if isinstance(t, type))\n if not isinstance(value, type_list):\n raise ValueError('The value has to be one of these types/values: {}'.format(self.type_definition_repr))\n\n def __call__(self, value: typing.Any) -> typing.Any:\n if value == _UNDEFINED:\n value = self.default\n # override existing value with environ\n if self.environ_name and self.environ_name in os.environ:\n value = os.environ[self.environ_name]\n if self.type_definition == (bool,):\n value = STR_TO_BOOL[value.lower()]\n\n self.check_type_definition(value)\n return value\n\n\nclass SettingSublistValue(SettingsValue):\n \"\"\"Check the value is a sublist of type definition.\"\"\"\n\n def check_type_definition(self, value: typing.Any) -> typing.Any:\n if not isinstance(value, list):\n raise ValueError('The value has to a list')\n for item in value:\n if not item in self.type_definition[0]:\n raise ValueError('{} not in {}'.format(item, self.type_definition))\n\n\nclass SettingsDirectoryValue(SettingsValue):\n \"\"\"Check and update a setting value that is a directory path\"\"\"\n\n def check_type_definition(self, value: typing.Any) -> typing.Any:\n super().check_type_definition(value)\n if not os.path.isdir(value):\n raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), value)\n\n def __call__(self, value: typing.Any) -> typing.Any:\n if value == '':\n value = self.default\n return super().__call__(value)\n\n\nclass SettingsBytesValue(SettingsValue):\n \"\"\"str are base64 decoded\"\"\"\n\n def __call__(self, value: typing.Any) -> typing.Any:\n if isinstance(value, str):\n value = b64decode(value)\n return super().__call__(value)\n\n\ndef apply_schema(settings, schema, path_list):\n error = False\n for key, value in schema.items():\n if isinstance(value, SettingsValue):\n try:\n settings[key] = value(settings.get(key, _UNDEFINED))\n except Exception as e: # pylint: disable=broad-except\n # don't stop now: check other values\n logger.error('%s: %s', '.'.join([*path_list, key]), e)\n error = True\n elif isinstance(value, dict):\n error = error or apply_schema(settings.setdefault(key, {}), schema[key], [*path_list, key])\n else:\n settings.setdefault(key, value)\n if len(path_list) == 0 and error:\n raise ValueError('Invalid settings.yml')\n return error\n\n\nSCHEMA = {\n 'general': {\n 'debug': SettingsValue(bool, False, 'SEARXNG_DEBUG'),\n 'instance_name': SettingsValue(str, 'SearXNG'),\n 'privacypolicy_url': SettingsValue((None, False, str), None),\n 'contact_url': SettingsValue((None, False, str), None),\n 'donation_url': SettingsValue((bool, str), \"https://docs.searxng.org/donate.html\"),\n 'enable_metrics': SettingsValue(bool, True),\n },\n 'brand': {\n 'issue_url': SettingsValue(str, 'https://github.com/searxng/searxng/issues'),\n 'new_issue_url': SettingsValue(str, 'https://github.com/searxng/searxng/issues/new'),\n 'docs_url': SettingsValue(str, 'https://docs.searxng.org'),\n 'public_instances': SettingsValue((False, str), 'https://searx.space'),\n 'wiki_url': SettingsValue(str, 'https://github.com/searxng/searxng/wiki'),\n },\n 'search': {\n 'safe_search': SettingsValue((0, 1, 2), 0),\n 'autocomplete': SettingsValue(str, ''),\n 'autocomplete_min': SettingsValue(int, 4),\n 'default_lang': SettingsValue(tuple(SXNG_LOCALE_TAGS + ['']), ''),\n 'languages': SettingSublistValue(SXNG_LOCALE_TAGS, SXNG_LOCALE_TAGS),\n 'ban_time_on_fail': SettingsValue(numbers.Real, 5),\n 'max_ban_time_on_fail': SettingsValue(numbers.Real, 120),\n 'suspended_times': {\n 'SearxEngineAccessDenied': SettingsValue(numbers.Real, 86400),\n 'SearxEngineCaptcha': SettingsValue(numbers.Real, 86400),\n 'SearxEngineTooManyRequests': SettingsValue(numbers.Real, 3600),\n 'cf_SearxEngineCaptcha': SettingsValue(numbers.Real, 1296000),\n 'cf_SearxEngineAccessDenied': SettingsValue(numbers.Real, 86400),\n 'recaptcha_SearxEngineCaptcha': SettingsValue(numbers.Real, 604800),\n },\n 'formats': SettingsValue(list, OUTPUT_FORMATS),\n 'max_page': SettingsValue(int, 0),\n },\n 'server': {\n 'port': SettingsValue((int, str), 8888, 'SEARXNG_PORT'),\n 'bind_address': SettingsValue(str, '127.0.0.1', 'SEARXNG_BIND_ADDRESS'),\n 'limiter': SettingsValue(bool, False),\n 'public_instance': SettingsValue(bool, False),\n 'secret_key': SettingsValue(str, environ_name='SEARXNG_SECRET'),\n 'base_url': SettingsValue((False, str), False, 'SEARXNG_BASE_URL'),\n 'image_proxy': SettingsValue(bool, False),\n 'http_protocol_version': SettingsValue(('1.0', '1.1'), '1.0'),\n 'method': SettingsValue(('POST', 'GET'), 'POST'),\n 'default_http_headers': SettingsValue(dict, {}),\n },\n 'redis': {\n 'url': SettingsValue((None, False, str), False, 'SEARXNG_REDIS_URL'),\n },\n 'ui': {\n 'static_path': SettingsDirectoryValue(str, os.path.join(searx_dir, 'static')),\n 'static_use_hash': SettingsValue(bool, False),\n 'templates_path': SettingsDirectoryValue(str, os.path.join(searx_dir, 'templates')),\n 'default_theme': SettingsValue(str, 'simple'),\n 'default_locale': SettingsValue(str, ''),\n 'theme_args': {\n 'simple_style': SettingsValue(SIMPLE_STYLE, 'auto'),\n },\n 'center_alignment': SettingsValue(bool, False),\n 'results_on_new_tab': SettingsValue(bool, False),\n 'advanced_search': SettingsValue(bool, False),\n 'query_in_title': SettingsValue(bool, False),\n 'infinite_scroll': SettingsValue(bool, False),\n 'cache_url': SettingsValue(str, 'https://web.archive.org/web/'),\n 'search_on_category_select': SettingsValue(bool, True),\n 'hotkeys': SettingsValue(('default', 'vim'), 'default'),\n },\n 'preferences': {\n 'lock': SettingsValue(list, []),\n },\n 'outgoing': {\n 'useragent_suffix': SettingsValue(str, ''),\n 'request_timeout': SettingsValue(numbers.Real, 3.0),\n 'enable_http2': SettingsValue(bool, True),\n 'verify': SettingsValue((bool, str), True),\n 'max_request_timeout': SettingsValue((None, numbers.Real), None),\n 'pool_connections': SettingsValue(int, 100),\n 'pool_maxsize': SettingsValue(int, 10),\n 'keepalive_expiry': SettingsValue(numbers.Real, 5.0),\n # default maximum redirect\n # from https://github.com/psf/requests/blob/8c211a96cdbe9fe320d63d9e1ae15c5c07e179f8/requests/models.py#L55\n 'max_redirects': SettingsValue(int, 30),\n 'retries': SettingsValue(int, 0),\n 'proxies': SettingsValue((None, str, dict), None),\n 'source_ips': SettingsValue((None, str, list), None),\n # Tor configuration\n 'using_tor_proxy': SettingsValue(bool, False),\n 'extra_proxy_timeout': SettingsValue(int, 0),\n 'networks': {},\n },\n 'result_proxy': {\n 'url': SettingsValue((None, str), None),\n 'key': SettingsBytesValue((None, bytes), None),\n 'proxify_results': SettingsValue(bool, False),\n },\n 'plugins': SettingsValue(list, []),\n 'enabled_plugins': SettingsValue((None, list), None),\n 'checker': {\n 'off_when_debug': SettingsValue(bool, True, None),\n 'scheduling': SettingsValue((None, dict), None, None),\n },\n 'categories_as_tabs': SettingsValue(dict, CATEGORIES_AS_TABS),\n 'engines': SettingsValue(list, []),\n 'doi_resolvers': {},\n}\n\n\ndef settings_set_defaults(settings):\n apply_schema(settings, SCHEMA, [])\n return settings\n", "path": "searx/settings_defaults.py"}]}
3,803
154
gh_patches_debug_41874
rasdani/github-patches
git_diff
avocado-framework__avocado-4277
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> status server uri should be different from listen nrunner.status_server_uri should not be used to both, server and client. We should have different settings for configuring the client. </issue> <code> [start of avocado/plugins/runner_nrunner.py] 1 # This program is free software; you can redistribute it and/or modify 2 # it under the terms of the GNU General Public License as published by 3 # the Free Software Foundation; either version 2 of the License, or 4 # (at your option) any later version. 5 # 6 # This program is distributed in the hope that it will be useful, 7 # but WITHOUT ANY WARRANTY; without even the implied warranty of 8 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 9 # 10 # See LICENSE for more details. 11 # 12 # Copyright: Red Hat Inc. 2019-2020 13 # Authors: Cleber Rosa <[email protected]> 14 15 """ 16 NRunner based implementation of job compliant runner 17 """ 18 19 import asyncio 20 import json 21 import multiprocessing 22 import os 23 import random 24 from copy import copy 25 26 from avocado.core import nrunner 27 from avocado.core.dispatcher import SpawnerDispatcher 28 from avocado.core.plugin_interfaces import CLI, Init 29 from avocado.core.plugin_interfaces import Runner as RunnerInterface 30 from avocado.core.settings import settings 31 from avocado.core.status.repo import StatusRepo 32 from avocado.core.status.server import StatusServer 33 from avocado.core.task.runtime import RuntimeTask 34 from avocado.core.task.statemachine import TaskStateMachine, Worker 35 from avocado.core.test_id import TestID 36 from avocado.core.teststatus import mapping 37 38 39 class RunnerInit(Init): 40 41 name = 'nrunner' 42 description = 'nrunner initialization' 43 44 def initialize(self): 45 section = 'nrunner' 46 help_msg = 'Shuffle the tasks to be executed' 47 settings.register_option(section=section, 48 key='shuffle', 49 default=False, 50 help_msg=help_msg, 51 key_type=bool) 52 53 help_msg = 'URI for the status server, usually a "HOST:PORT" string' 54 settings.register_option(section=section, 55 key='status_server_uri', 56 default='127.0.0.1:8888', 57 metavar="HOST:PORT", 58 help_msg=help_msg) 59 60 help_msg = ('Number of maximum number tasks running in parallel. You ' 61 'can disable parallel execution by setting this to 1. ' 62 'Defaults to the amount of CPUs on this machine.') 63 settings.register_option(section=section, 64 key='max_parallel_tasks', 65 default=multiprocessing.cpu_count(), 66 key_type=int, 67 help_msg=help_msg) 68 69 help_msg = ("Spawn tasks in a specific spawner. Available spawners: " 70 "'process' and 'podman'") 71 settings.register_option(section=section, 72 key="spawner", 73 default='process', 74 help_msg=help_msg) 75 76 77 class RunnerCLI(CLI): 78 79 name = 'nrunner' 80 description = 'nrunner command line options for "run"' 81 82 def configure(self, parser): 83 super(RunnerCLI, self).configure(parser) 84 parser = parser.subcommands.choices.get('run', None) 85 if parser is None: 86 return 87 88 parser = parser.add_argument_group('nrunner specific options') 89 settings.add_argparser_to_option(namespace='nrunner.shuffle', 90 parser=parser, 91 long_arg='--nrunner-shuffle', 92 action='store_true') 93 94 settings.add_argparser_to_option(namespace='nrunner.status_server_uri', 95 parser=parser, 96 long_arg='--nrunner-status-server-uri') 97 98 settings.add_argparser_to_option(namespace='nrunner.max_parallel_tasks', 99 parser=parser, 100 long_arg='--nrunner-max-parallel-tasks') 101 102 settings.add_argparser_to_option(namespace='nrunner.spawner', 103 parser=parser, 104 long_arg='--nrunner-spawner') 105 106 def run(self, config): 107 pass 108 109 110 class Runner(RunnerInterface): 111 112 name = 'nrunner' 113 description = 'nrunner based implementation of job compliant runner' 114 115 def _save_to_file(self, filename, buff, mode='wb'): 116 with open(filename, mode) as fp: 117 fp.write(buff) 118 119 def _populate_task_logdir(self, base_path, task, statuses, debug=False): 120 # We are copying here to avoid printing duplicated information 121 local_statuses = copy(statuses) 122 last = local_statuses[-1] 123 try: 124 stdout = last.pop('stdout') 125 except KeyError: 126 stdout = None 127 try: 128 stderr = last.pop('stderr') 129 except KeyError: 130 stderr = None 131 132 # Create task dir 133 task_path = os.path.join(base_path, task.identifier.str_filesystem) 134 os.makedirs(task_path, exist_ok=True) 135 136 # Save stdout and stderr 137 if stdout is not None: 138 stdout_file = os.path.join(task_path, 'stdout') 139 self._save_to_file(stdout_file, stdout) 140 if stderr is not None: 141 stderr_file = os.path.join(task_path, 'stderr') 142 self._save_to_file(stderr_file, stderr) 143 144 # Save debug 145 if debug: 146 debug = os.path.join(task_path, 'debug') 147 with open(debug, 'w') as fp: 148 json.dump(local_statuses, fp) 149 150 data_file = os.path.join(task_path, 'data') 151 with open(data_file, 'w') as fp: 152 fp.write("{}\n".format(task.output_dir)) 153 154 def _get_all_runtime_tasks(self, test_suite): 155 result = [] 156 no_digits = len(str(len(test_suite))) 157 for index, task in enumerate(test_suite.tests, start=1): 158 task.known_runners = nrunner.RUNNERS_REGISTRY_PYTHON_CLASS 159 # this is all rubbish data 160 if test_suite.name: 161 prefix = "{}-{}".format(test_suite.name, index) 162 else: 163 prefix = index 164 test_id = TestID(prefix, 165 task.runnable.uri, 166 None, 167 no_digits) 168 task.identifier = test_id 169 result.append(RuntimeTask(task)) 170 return result 171 172 def _start_status_server(self, status_server_uri): 173 # pylint: disable=W0201 174 self.status_repo = StatusRepo() 175 # pylint: disable=W0201 176 self.status_server = StatusServer(status_server_uri, 177 self.status_repo) 178 asyncio.ensure_future(self.status_server.serve_forever()) 179 180 async def _update_status(self, job): 181 tasks_by_id = {str(runtime_task.task.identifier): runtime_task.task 182 for runtime_task in self.tasks} 183 while True: 184 try: 185 (task_id, status, _) = self.status_repo.status_journal_summary.pop(0) 186 187 except IndexError: 188 await asyncio.sleep(0.05) 189 continue 190 191 task = tasks_by_id.get(task_id) 192 early_state = {'name': task.identifier, 193 'job_logdir': job.logdir, 194 'job_unique_id': job.unique_id} 195 if status == 'started': 196 job.result.start_test(early_state) 197 job.result_events_dispatcher.map_method('start_test', 198 job.result, 199 early_state) 200 elif status == 'finished': 201 this_task_data = self.status_repo.get_task_data(task_id) 202 last_task_status = this_task_data[-1] 203 test_state = {'status': last_task_status.get('result').upper()} 204 test_state.update(early_state) 205 206 time_start = this_task_data[0]['time'] 207 time_end = last_task_status['time'] 208 time_elapsed = time_end - time_start 209 test_state['time_start'] = time_start 210 test_state['time_end'] = time_end 211 test_state['time_elapsed'] = time_elapsed 212 213 # fake log dir, needed by some result plugins such as HTML 214 test_state['logdir'] = '' 215 216 base_path = os.path.join(job.logdir, 'test-results') 217 self._populate_task_logdir(base_path, 218 task, 219 this_task_data, 220 job.config.get('core.debug')) 221 222 job.result.check_test(test_state) 223 job.result_events_dispatcher.map_method('end_test', 224 job.result, 225 test_state) 226 227 if not mapping[test_state['status']]: 228 self.summary.add("FAIL") 229 230 def run_suite(self, job, test_suite): 231 # pylint: disable=W0201 232 self.summary = set() 233 234 test_suite.tests, _ = nrunner.check_tasks_requirements(test_suite.tests) 235 job.result.tests_total = test_suite.size # no support for variants yet 236 237 status_server_uri = test_suite.config.get('nrunner.status_server_uri') 238 self._start_status_server(status_server_uri) 239 240 # pylint: disable=W0201 241 self.tasks = self._get_all_runtime_tasks(test_suite) 242 if test_suite.config.get('nrunner.shuffle'): 243 random.shuffle(self.tasks) 244 tsm = TaskStateMachine(self.tasks) 245 spawner_name = test_suite.config.get('nrunner.spawner') 246 spawner = SpawnerDispatcher(test_suite.config)[spawner_name].obj 247 max_running = test_suite.config.get('nrunner.max_parallel_tasks') 248 workers = [Worker(tsm, spawner, max_running=max_running).run() 249 for _ in range(max_running)] 250 asyncio.ensure_future(self._update_status(job)) 251 loop = asyncio.get_event_loop() 252 try: 253 loop.run_until_complete(asyncio.wait_for(asyncio.gather(*workers), 254 job.timeout or None)) 255 except (KeyboardInterrupt, asyncio.TimeoutError): 256 self.summary.add("INTERRUPTED") 257 258 # Wait until all messages may have been processed by the 259 # status_updater. This should be replaced by a mechanism 260 # that only waits if there are missing status messages to 261 # be processed, and, only for a given amount of time. 262 # Tests with non received status will always show as SKIP 263 # because of result reconciliation. 264 loop.run_until_complete(asyncio.sleep(0.05)) 265 266 job.result.end_tests() 267 return self.summary 268 [end of avocado/plugins/runner_nrunner.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/avocado/plugins/runner_nrunner.py b/avocado/plugins/runner_nrunner.py --- a/avocado/plugins/runner_nrunner.py +++ b/avocado/plugins/runner_nrunner.py @@ -50,7 +50,17 @@ help_msg=help_msg, key_type=bool) - help_msg = 'URI for the status server, usually a "HOST:PORT" string' + help_msg = ('URI for listing the status server. Usually ' + 'a "HOST:PORT" string') + settings.register_option(section=section, + key='status_server_listen', + default='127.0.0.1:8888', + metavar="HOST:PORT", + help_msg=help_msg) + + help_msg = ('URI for connecting to the status server, usually ' + 'a "HOST:PORT" string. Use this if your status server ' + 'is in another host, or different port') settings.register_option(section=section, key='status_server_uri', default='127.0.0.1:8888', @@ -91,17 +101,16 @@ long_arg='--nrunner-shuffle', action='store_true') - settings.add_argparser_to_option(namespace='nrunner.status_server_uri', - parser=parser, - long_arg='--nrunner-status-server-uri') - - settings.add_argparser_to_option(namespace='nrunner.max_parallel_tasks', - parser=parser, - long_arg='--nrunner-max-parallel-tasks') + # namespace mapping + ns = {'nrunner.status_server_listen': '--nrunner-status-server-listen', + 'nrunner.status_server_uri': '--nrunner-status-server-uri', + 'nrunner.max_parallel_tasks': '--nrunner-max-parallel-tasks', + 'nrunner.spawner': '--nrunner-spawner'} - settings.add_argparser_to_option(namespace='nrunner.spawner', - parser=parser, - long_arg='--nrunner-spawner') + for k, v in ns.items(): + settings.add_argparser_to_option(namespace=k, + parser=parser, + long_arg=v) def run(self, config): pass @@ -169,11 +178,11 @@ result.append(RuntimeTask(task)) return result - def _start_status_server(self, status_server_uri): + def _start_status_server(self, status_server_listen): # pylint: disable=W0201 self.status_repo = StatusRepo() # pylint: disable=W0201 - self.status_server = StatusServer(status_server_uri, + self.status_server = StatusServer(status_server_listen, self.status_repo) asyncio.ensure_future(self.status_server.serve_forever()) @@ -234,8 +243,8 @@ test_suite.tests, _ = nrunner.check_tasks_requirements(test_suite.tests) job.result.tests_total = test_suite.size # no support for variants yet - status_server_uri = test_suite.config.get('nrunner.status_server_uri') - self._start_status_server(status_server_uri) + listen = test_suite.config.get('nrunner.status_server_listen') + self._start_status_server(listen) # pylint: disable=W0201 self.tasks = self._get_all_runtime_tasks(test_suite)
{"golden_diff": "diff --git a/avocado/plugins/runner_nrunner.py b/avocado/plugins/runner_nrunner.py\n--- a/avocado/plugins/runner_nrunner.py\n+++ b/avocado/plugins/runner_nrunner.py\n@@ -50,7 +50,17 @@\n help_msg=help_msg,\n key_type=bool)\n \n- help_msg = 'URI for the status server, usually a \"HOST:PORT\" string'\n+ help_msg = ('URI for listing the status server. Usually '\n+ 'a \"HOST:PORT\" string')\n+ settings.register_option(section=section,\n+ key='status_server_listen',\n+ default='127.0.0.1:8888',\n+ metavar=\"HOST:PORT\",\n+ help_msg=help_msg)\n+\n+ help_msg = ('URI for connecting to the status server, usually '\n+ 'a \"HOST:PORT\" string. Use this if your status server '\n+ 'is in another host, or different port')\n settings.register_option(section=section,\n key='status_server_uri',\n default='127.0.0.1:8888',\n@@ -91,17 +101,16 @@\n long_arg='--nrunner-shuffle',\n action='store_true')\n \n- settings.add_argparser_to_option(namespace='nrunner.status_server_uri',\n- parser=parser,\n- long_arg='--nrunner-status-server-uri')\n-\n- settings.add_argparser_to_option(namespace='nrunner.max_parallel_tasks',\n- parser=parser,\n- long_arg='--nrunner-max-parallel-tasks')\n+ # namespace mapping\n+ ns = {'nrunner.status_server_listen': '--nrunner-status-server-listen',\n+ 'nrunner.status_server_uri': '--nrunner-status-server-uri',\n+ 'nrunner.max_parallel_tasks': '--nrunner-max-parallel-tasks',\n+ 'nrunner.spawner': '--nrunner-spawner'}\n \n- settings.add_argparser_to_option(namespace='nrunner.spawner',\n- parser=parser,\n- long_arg='--nrunner-spawner')\n+ for k, v in ns.items():\n+ settings.add_argparser_to_option(namespace=k,\n+ parser=parser,\n+ long_arg=v)\n \n def run(self, config):\n pass\n@@ -169,11 +178,11 @@\n result.append(RuntimeTask(task))\n return result\n \n- def _start_status_server(self, status_server_uri):\n+ def _start_status_server(self, status_server_listen):\n # pylint: disable=W0201\n self.status_repo = StatusRepo()\n # pylint: disable=W0201\n- self.status_server = StatusServer(status_server_uri,\n+ self.status_server = StatusServer(status_server_listen,\n self.status_repo)\n asyncio.ensure_future(self.status_server.serve_forever())\n \n@@ -234,8 +243,8 @@\n test_suite.tests, _ = nrunner.check_tasks_requirements(test_suite.tests)\n job.result.tests_total = test_suite.size # no support for variants yet\n \n- status_server_uri = test_suite.config.get('nrunner.status_server_uri')\n- self._start_status_server(status_server_uri)\n+ listen = test_suite.config.get('nrunner.status_server_listen')\n+ self._start_status_server(listen)\n \n # pylint: disable=W0201\n self.tasks = self._get_all_runtime_tasks(test_suite)\n", "issue": "status server uri should be different from listen\nnrunner.status_server_uri should not be used to both, server and client. We should have different settings for configuring the client.\n", "before_files": [{"content": "# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n#\n# See LICENSE for more details.\n#\n# Copyright: Red Hat Inc. 2019-2020\n# Authors: Cleber Rosa <[email protected]>\n\n\"\"\"\nNRunner based implementation of job compliant runner\n\"\"\"\n\nimport asyncio\nimport json\nimport multiprocessing\nimport os\nimport random\nfrom copy import copy\n\nfrom avocado.core import nrunner\nfrom avocado.core.dispatcher import SpawnerDispatcher\nfrom avocado.core.plugin_interfaces import CLI, Init\nfrom avocado.core.plugin_interfaces import Runner as RunnerInterface\nfrom avocado.core.settings import settings\nfrom avocado.core.status.repo import StatusRepo\nfrom avocado.core.status.server import StatusServer\nfrom avocado.core.task.runtime import RuntimeTask\nfrom avocado.core.task.statemachine import TaskStateMachine, Worker\nfrom avocado.core.test_id import TestID\nfrom avocado.core.teststatus import mapping\n\n\nclass RunnerInit(Init):\n\n name = 'nrunner'\n description = 'nrunner initialization'\n\n def initialize(self):\n section = 'nrunner'\n help_msg = 'Shuffle the tasks to be executed'\n settings.register_option(section=section,\n key='shuffle',\n default=False,\n help_msg=help_msg,\n key_type=bool)\n\n help_msg = 'URI for the status server, usually a \"HOST:PORT\" string'\n settings.register_option(section=section,\n key='status_server_uri',\n default='127.0.0.1:8888',\n metavar=\"HOST:PORT\",\n help_msg=help_msg)\n\n help_msg = ('Number of maximum number tasks running in parallel. You '\n 'can disable parallel execution by setting this to 1. '\n 'Defaults to the amount of CPUs on this machine.')\n settings.register_option(section=section,\n key='max_parallel_tasks',\n default=multiprocessing.cpu_count(),\n key_type=int,\n help_msg=help_msg)\n\n help_msg = (\"Spawn tasks in a specific spawner. Available spawners: \"\n \"'process' and 'podman'\")\n settings.register_option(section=section,\n key=\"spawner\",\n default='process',\n help_msg=help_msg)\n\n\nclass RunnerCLI(CLI):\n\n name = 'nrunner'\n description = 'nrunner command line options for \"run\"'\n\n def configure(self, parser):\n super(RunnerCLI, self).configure(parser)\n parser = parser.subcommands.choices.get('run', None)\n if parser is None:\n return\n\n parser = parser.add_argument_group('nrunner specific options')\n settings.add_argparser_to_option(namespace='nrunner.shuffle',\n parser=parser,\n long_arg='--nrunner-shuffle',\n action='store_true')\n\n settings.add_argparser_to_option(namespace='nrunner.status_server_uri',\n parser=parser,\n long_arg='--nrunner-status-server-uri')\n\n settings.add_argparser_to_option(namespace='nrunner.max_parallel_tasks',\n parser=parser,\n long_arg='--nrunner-max-parallel-tasks')\n\n settings.add_argparser_to_option(namespace='nrunner.spawner',\n parser=parser,\n long_arg='--nrunner-spawner')\n\n def run(self, config):\n pass\n\n\nclass Runner(RunnerInterface):\n\n name = 'nrunner'\n description = 'nrunner based implementation of job compliant runner'\n\n def _save_to_file(self, filename, buff, mode='wb'):\n with open(filename, mode) as fp:\n fp.write(buff)\n\n def _populate_task_logdir(self, base_path, task, statuses, debug=False):\n # We are copying here to avoid printing duplicated information\n local_statuses = copy(statuses)\n last = local_statuses[-1]\n try:\n stdout = last.pop('stdout')\n except KeyError:\n stdout = None\n try:\n stderr = last.pop('stderr')\n except KeyError:\n stderr = None\n\n # Create task dir\n task_path = os.path.join(base_path, task.identifier.str_filesystem)\n os.makedirs(task_path, exist_ok=True)\n\n # Save stdout and stderr\n if stdout is not None:\n stdout_file = os.path.join(task_path, 'stdout')\n self._save_to_file(stdout_file, stdout)\n if stderr is not None:\n stderr_file = os.path.join(task_path, 'stderr')\n self._save_to_file(stderr_file, stderr)\n\n # Save debug\n if debug:\n debug = os.path.join(task_path, 'debug')\n with open(debug, 'w') as fp:\n json.dump(local_statuses, fp)\n\n data_file = os.path.join(task_path, 'data')\n with open(data_file, 'w') as fp:\n fp.write(\"{}\\n\".format(task.output_dir))\n\n def _get_all_runtime_tasks(self, test_suite):\n result = []\n no_digits = len(str(len(test_suite)))\n for index, task in enumerate(test_suite.tests, start=1):\n task.known_runners = nrunner.RUNNERS_REGISTRY_PYTHON_CLASS\n # this is all rubbish data\n if test_suite.name:\n prefix = \"{}-{}\".format(test_suite.name, index)\n else:\n prefix = index\n test_id = TestID(prefix,\n task.runnable.uri,\n None,\n no_digits)\n task.identifier = test_id\n result.append(RuntimeTask(task))\n return result\n\n def _start_status_server(self, status_server_uri):\n # pylint: disable=W0201\n self.status_repo = StatusRepo()\n # pylint: disable=W0201\n self.status_server = StatusServer(status_server_uri,\n self.status_repo)\n asyncio.ensure_future(self.status_server.serve_forever())\n\n async def _update_status(self, job):\n tasks_by_id = {str(runtime_task.task.identifier): runtime_task.task\n for runtime_task in self.tasks}\n while True:\n try:\n (task_id, status, _) = self.status_repo.status_journal_summary.pop(0)\n\n except IndexError:\n await asyncio.sleep(0.05)\n continue\n\n task = tasks_by_id.get(task_id)\n early_state = {'name': task.identifier,\n 'job_logdir': job.logdir,\n 'job_unique_id': job.unique_id}\n if status == 'started':\n job.result.start_test(early_state)\n job.result_events_dispatcher.map_method('start_test',\n job.result,\n early_state)\n elif status == 'finished':\n this_task_data = self.status_repo.get_task_data(task_id)\n last_task_status = this_task_data[-1]\n test_state = {'status': last_task_status.get('result').upper()}\n test_state.update(early_state)\n\n time_start = this_task_data[0]['time']\n time_end = last_task_status['time']\n time_elapsed = time_end - time_start\n test_state['time_start'] = time_start\n test_state['time_end'] = time_end\n test_state['time_elapsed'] = time_elapsed\n\n # fake log dir, needed by some result plugins such as HTML\n test_state['logdir'] = ''\n\n base_path = os.path.join(job.logdir, 'test-results')\n self._populate_task_logdir(base_path,\n task,\n this_task_data,\n job.config.get('core.debug'))\n\n job.result.check_test(test_state)\n job.result_events_dispatcher.map_method('end_test',\n job.result,\n test_state)\n\n if not mapping[test_state['status']]:\n self.summary.add(\"FAIL\")\n\n def run_suite(self, job, test_suite):\n # pylint: disable=W0201\n self.summary = set()\n\n test_suite.tests, _ = nrunner.check_tasks_requirements(test_suite.tests)\n job.result.tests_total = test_suite.size # no support for variants yet\n\n status_server_uri = test_suite.config.get('nrunner.status_server_uri')\n self._start_status_server(status_server_uri)\n\n # pylint: disable=W0201\n self.tasks = self._get_all_runtime_tasks(test_suite)\n if test_suite.config.get('nrunner.shuffle'):\n random.shuffle(self.tasks)\n tsm = TaskStateMachine(self.tasks)\n spawner_name = test_suite.config.get('nrunner.spawner')\n spawner = SpawnerDispatcher(test_suite.config)[spawner_name].obj\n max_running = test_suite.config.get('nrunner.max_parallel_tasks')\n workers = [Worker(tsm, spawner, max_running=max_running).run()\n for _ in range(max_running)]\n asyncio.ensure_future(self._update_status(job))\n loop = asyncio.get_event_loop()\n try:\n loop.run_until_complete(asyncio.wait_for(asyncio.gather(*workers),\n job.timeout or None))\n except (KeyboardInterrupt, asyncio.TimeoutError):\n self.summary.add(\"INTERRUPTED\")\n\n # Wait until all messages may have been processed by the\n # status_updater. This should be replaced by a mechanism\n # that only waits if there are missing status messages to\n # be processed, and, only for a given amount of time.\n # Tests with non received status will always show as SKIP\n # because of result reconciliation.\n loop.run_until_complete(asyncio.sleep(0.05))\n\n job.result.end_tests()\n return self.summary\n", "path": "avocado/plugins/runner_nrunner.py"}]}
3,348
756
gh_patches_debug_5986
rasdani/github-patches
git_diff
comic__grand-challenge.org-1845
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Not setting `USER` directive in processor Dockerfile fails silently Building a processor image without setting `USER` directive in Dockerfile will result in the container never being marked as ready and triggers a (silent) error in sentry: https://sentry.io/organizations/grand-challenge/issues/2396054397/?project=303639&query=is%3Aunresolved It should fail properly returning a validation error to the user indicating that the `USER` directive should be set in the Dockerfile according to docker best practices. </issue> <code> [start of app/grandchallenge/components/tasks.py] 1 import json 2 import tarfile 3 import uuid 4 from datetime import timedelta 5 from typing import Dict 6 7 from billiard.exceptions import SoftTimeLimitExceeded, TimeLimitExceeded 8 from celery import shared_task 9 from django.apps import apps 10 from django.conf import settings 11 from django.core.exceptions import ValidationError 12 from django.core.files import File 13 from django.db import OperationalError 14 from django.db.models import DateTimeField, ExpressionWrapper, F 15 from django.utils.timezone import now 16 17 from grandchallenge.components.backends.docker import ComponentException 18 from grandchallenge.components.emails import send_invalid_dockerfile_email 19 from grandchallenge.jqfileupload.widgets.uploader import StagedAjaxFile 20 21 22 @shared_task() 23 def validate_docker_image(*, pk: uuid.UUID, app_label: str, model_name: str): 24 model = apps.get_model(app_label=app_label, model_name=model_name) 25 26 instance = model.objects.get(pk=pk) 27 28 if not instance.image: 29 # Create the image from the staged file 30 uploaded_image = StagedAjaxFile(instance.staged_image_uuid) 31 with uploaded_image.open() as f: 32 instance.image.save(uploaded_image.name, File(f)) 33 34 try: 35 image_sha256 = _validate_docker_image_manifest( 36 model=model, instance=instance 37 ) 38 except ValidationError: 39 send_invalid_dockerfile_email(container_image=instance) 40 raise 41 42 model.objects.filter(pk=instance.pk).update( 43 image_sha256=f"sha256:{image_sha256}", ready=True 44 ) 45 46 47 def _validate_docker_image_manifest(*, model, instance) -> str: 48 manifest = _extract_docker_image_file( 49 model=model, instance=instance, filename="manifest.json" 50 ) 51 manifest = json.loads(manifest) 52 53 if len(manifest) != 1: 54 model.objects.filter(pk=instance.pk).update( 55 status=( 56 f"The container image file should only have 1 image. " 57 f"This file contains {len(manifest)}." 58 ) 59 ) 60 raise ValidationError("Invalid Dockerfile") 61 62 image_sha256 = manifest[0]["Config"][:64] 63 64 config = _extract_docker_image_file( 65 model=model, instance=instance, filename=f"{image_sha256}.json" 66 ) 67 config = json.loads(config) 68 69 if str(config["config"]["User"].lower()) in ["", "root", "0"]: 70 model.objects.filter(pk=instance.pk).update( 71 status=( 72 "The container runs as root. Please add a user, group and " 73 "USER instruction to your Dockerfile, rebuild, test and " 74 "upload the container again, see " 75 "https://docs.docker.com/develop/develop-images/dockerfile_best-practices/#user" 76 ) 77 ) 78 raise ValidationError("Invalid Dockerfile") 79 80 return image_sha256 81 82 83 def _extract_docker_image_file(*, model, instance, filename: str): 84 """Extract a file from the root of a tarball.""" 85 try: 86 with instance.image.open(mode="rb") as im, tarfile.open( 87 fileobj=im, mode="r" 88 ) as t: 89 member = dict(zip(t.getnames(), t.getmembers()))[filename] 90 file = t.extractfile(member).read() 91 return file 92 except (KeyError, tarfile.ReadError): 93 model.objects.filter(pk=instance.pk).update( 94 status=( 95 f"{filename} not found at the root of the container image " 96 f"file. Was this created with docker save?" 97 ) 98 ) 99 raise ValidationError("Invalid Dockerfile") 100 101 102 def retry_if_dropped(func): 103 """ 104 Retry a function that relies on an open database connection. 105 106 Use this decorator when you have a long running task as sometimes the db 107 connection will drop. 108 """ 109 110 def wrapper(*args, **kwargs): 111 n_tries = 0 112 max_tries = 2 113 err = None 114 115 while n_tries < max_tries: 116 n_tries += 1 117 118 try: 119 return func(*args, **kwargs) 120 except OperationalError as e: 121 err = e 122 123 # This needs to be a local import 124 from django.db import connection 125 126 connection.close() 127 128 raise err 129 130 return wrapper 131 132 133 @retry_if_dropped 134 def get_model_instance(*, pk, app_label, model_name): 135 model = apps.get_model(app_label=app_label, model_name=model_name) 136 return model.objects.get(pk=pk) 137 138 139 @shared_task 140 def execute_job( 141 *_, job_pk: uuid.UUID, job_app_label: str, job_model_name: str 142 ) -> None: 143 Job = apps.get_model( # noqa: N806 144 app_label=job_app_label, model_name=job_model_name 145 ) 146 job = Job.objects.get(pk=job_pk) 147 148 if job.status in [job.PENDING, job.RETRY]: 149 job.update_status(status=job.STARTED) 150 else: 151 raise RuntimeError("Job is not set to be executed.") 152 153 if not job.container.ready: 154 msg = f"Method {job.container.pk} was not ready to be used." 155 job.update_status(status=job.FAILURE, error_message=msg) 156 raise RuntimeError(msg) 157 try: 158 with job.executor_cls( 159 job_id=str(job.pk), 160 job_class=Job, 161 input_files=job.input_files, 162 output_interfaces=job.output_interfaces, 163 exec_image=job.container.image, 164 exec_image_sha256=job.container.image_sha256, 165 memory_limit=job.container.requires_memory_gb, 166 ) as ev: 167 # This call is potentially very long 168 ev.execute() 169 except ComponentException as e: 170 job = get_model_instance( 171 pk=job_pk, app_label=job_app_label, model_name=job_model_name 172 ) 173 job.update_status( 174 status=job.FAILURE, 175 stdout=ev.stdout, 176 stderr=ev.stderr, 177 error_message=str(e), 178 ) 179 except (SoftTimeLimitExceeded, TimeLimitExceeded): 180 job = get_model_instance( 181 pk=job_pk, app_label=job_app_label, model_name=job_model_name 182 ) 183 job.update_status( 184 status=job.FAILURE, 185 stdout=ev.stdout, 186 stderr=ev.stderr, 187 error_message="Time limit exceeded.", 188 ) 189 except Exception: 190 job = get_model_instance( 191 pk=job_pk, app_label=job_app_label, model_name=job_model_name 192 ) 193 job.update_status( 194 status=job.FAILURE, 195 stdout=ev.stdout, 196 stderr=ev.stderr, 197 error_message="An unexpected error occurred.", 198 ) 199 raise 200 else: 201 job = get_model_instance( 202 pk=job_pk, app_label=job_app_label, model_name=job_model_name 203 ) 204 job.update_status( 205 status=job.SUCCESS, stdout=ev.stdout, stderr=ev.stderr 206 ) 207 208 209 @shared_task 210 def mark_long_running_jobs_failed( 211 *, app_label: str, model_name: str, extra_filters: Dict[str, str] = None 212 ): 213 """ 214 Mark jobs that have been started but did not finish (maybe due to 215 an unrecoverable hardware error). It will mark tasks FAILED that have the 216 status STARTED after 1.2x the task limit (which is different for each 217 queue), so, this must be scheduled on the same queue that the execute_job 218 task is run for this app_label and model_name. 219 220 If the task is still running on Celery then it will still be able to 221 report as passed later. 222 """ 223 Job = apps.get_model( # noqa: N806 224 app_label=app_label, model_name=model_name 225 ) 226 227 jobs_to_mark = Job.objects.filter( 228 started_at__lt=now() 229 - 1.2 * timedelta(seconds=settings.CELERY_TASK_TIME_LIMIT), 230 status=Job.STARTED, 231 ) 232 233 if extra_filters: 234 jobs_to_mark = jobs_to_mark.filter(**extra_filters) 235 236 for j in jobs_to_mark: 237 j.update_status( 238 status=Job.FAILURE, error_message="Time limit exceeded." 239 ) 240 241 return [j.pk for j in jobs_to_mark] 242 243 244 @shared_task 245 def start_service(*, pk: uuid.UUID, app_label: str, model_name: str): 246 session = get_model_instance( 247 pk=pk, app_label=app_label, model_name=model_name 248 ) 249 session.start() 250 251 252 @shared_task 253 def stop_service(*, pk: uuid.UUID, app_label: str, model_name: str): 254 session = get_model_instance( 255 pk=pk, app_label=app_label, model_name=model_name 256 ) 257 session.stop() 258 259 260 @shared_task 261 def stop_expired_services(*, app_label: str, model_name: str, region: str): 262 model = apps.get_model(app_label=app_label, model_name=model_name) 263 264 services_to_stop = ( 265 model.objects.annotate( 266 expires=ExpressionWrapper( 267 F("created") + F("maximum_duration"), 268 output_field=DateTimeField(), 269 ) 270 ) 271 .filter(expires__lt=now(), region=region) 272 .exclude(status=model.STOPPED) 273 ) 274 275 for service in services_to_stop: 276 service.stop() 277 278 return [str(s) for s in services_to_stop] 279 [end of app/grandchallenge/components/tasks.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/app/grandchallenge/components/tasks.py b/app/grandchallenge/components/tasks.py --- a/app/grandchallenge/components/tasks.py +++ b/app/grandchallenge/components/tasks.py @@ -66,7 +66,9 @@ ) config = json.loads(config) - if str(config["config"]["User"].lower()) in ["", "root", "0"]: + if "User" not in config["config"] or str( + config["config"]["User"].lower() + ) in ["", "root", "0"]: model.objects.filter(pk=instance.pk).update( status=( "The container runs as root. Please add a user, group and "
{"golden_diff": "diff --git a/app/grandchallenge/components/tasks.py b/app/grandchallenge/components/tasks.py\n--- a/app/grandchallenge/components/tasks.py\n+++ b/app/grandchallenge/components/tasks.py\n@@ -66,7 +66,9 @@\n )\n config = json.loads(config)\n \n- if str(config[\"config\"][\"User\"].lower()) in [\"\", \"root\", \"0\"]:\n+ if \"User\" not in config[\"config\"] or str(\n+ config[\"config\"][\"User\"].lower()\n+ ) in [\"\", \"root\", \"0\"]:\n model.objects.filter(pk=instance.pk).update(\n status=(\n \"The container runs as root. Please add a user, group and \"\n", "issue": "Not setting `USER` directive in processor Dockerfile fails silently\nBuilding a processor image without setting `USER` directive in Dockerfile will result in the container never being marked as ready and triggers a (silent) error in sentry: \r\nhttps://sentry.io/organizations/grand-challenge/issues/2396054397/?project=303639&query=is%3Aunresolved\r\n\r\nIt should fail properly returning a validation error to the user indicating that the `USER` directive should be set in the Dockerfile according to docker best practices.\r\n\n", "before_files": [{"content": "import json\nimport tarfile\nimport uuid\nfrom datetime import timedelta\nfrom typing import Dict\n\nfrom billiard.exceptions import SoftTimeLimitExceeded, TimeLimitExceeded\nfrom celery import shared_task\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.core.exceptions import ValidationError\nfrom django.core.files import File\nfrom django.db import OperationalError\nfrom django.db.models import DateTimeField, ExpressionWrapper, F\nfrom django.utils.timezone import now\n\nfrom grandchallenge.components.backends.docker import ComponentException\nfrom grandchallenge.components.emails import send_invalid_dockerfile_email\nfrom grandchallenge.jqfileupload.widgets.uploader import StagedAjaxFile\n\n\n@shared_task()\ndef validate_docker_image(*, pk: uuid.UUID, app_label: str, model_name: str):\n model = apps.get_model(app_label=app_label, model_name=model_name)\n\n instance = model.objects.get(pk=pk)\n\n if not instance.image:\n # Create the image from the staged file\n uploaded_image = StagedAjaxFile(instance.staged_image_uuid)\n with uploaded_image.open() as f:\n instance.image.save(uploaded_image.name, File(f))\n\n try:\n image_sha256 = _validate_docker_image_manifest(\n model=model, instance=instance\n )\n except ValidationError:\n send_invalid_dockerfile_email(container_image=instance)\n raise\n\n model.objects.filter(pk=instance.pk).update(\n image_sha256=f\"sha256:{image_sha256}\", ready=True\n )\n\n\ndef _validate_docker_image_manifest(*, model, instance) -> str:\n manifest = _extract_docker_image_file(\n model=model, instance=instance, filename=\"manifest.json\"\n )\n manifest = json.loads(manifest)\n\n if len(manifest) != 1:\n model.objects.filter(pk=instance.pk).update(\n status=(\n f\"The container image file should only have 1 image. \"\n f\"This file contains {len(manifest)}.\"\n )\n )\n raise ValidationError(\"Invalid Dockerfile\")\n\n image_sha256 = manifest[0][\"Config\"][:64]\n\n config = _extract_docker_image_file(\n model=model, instance=instance, filename=f\"{image_sha256}.json\"\n )\n config = json.loads(config)\n\n if str(config[\"config\"][\"User\"].lower()) in [\"\", \"root\", \"0\"]:\n model.objects.filter(pk=instance.pk).update(\n status=(\n \"The container runs as root. Please add a user, group and \"\n \"USER instruction to your Dockerfile, rebuild, test and \"\n \"upload the container again, see \"\n \"https://docs.docker.com/develop/develop-images/dockerfile_best-practices/#user\"\n )\n )\n raise ValidationError(\"Invalid Dockerfile\")\n\n return image_sha256\n\n\ndef _extract_docker_image_file(*, model, instance, filename: str):\n \"\"\"Extract a file from the root of a tarball.\"\"\"\n try:\n with instance.image.open(mode=\"rb\") as im, tarfile.open(\n fileobj=im, mode=\"r\"\n ) as t:\n member = dict(zip(t.getnames(), t.getmembers()))[filename]\n file = t.extractfile(member).read()\n return file\n except (KeyError, tarfile.ReadError):\n model.objects.filter(pk=instance.pk).update(\n status=(\n f\"{filename} not found at the root of the container image \"\n f\"file. Was this created with docker save?\"\n )\n )\n raise ValidationError(\"Invalid Dockerfile\")\n\n\ndef retry_if_dropped(func):\n \"\"\"\n Retry a function that relies on an open database connection.\n\n Use this decorator when you have a long running task as sometimes the db\n connection will drop.\n \"\"\"\n\n def wrapper(*args, **kwargs):\n n_tries = 0\n max_tries = 2\n err = None\n\n while n_tries < max_tries:\n n_tries += 1\n\n try:\n return func(*args, **kwargs)\n except OperationalError as e:\n err = e\n\n # This needs to be a local import\n from django.db import connection\n\n connection.close()\n\n raise err\n\n return wrapper\n\n\n@retry_if_dropped\ndef get_model_instance(*, pk, app_label, model_name):\n model = apps.get_model(app_label=app_label, model_name=model_name)\n return model.objects.get(pk=pk)\n\n\n@shared_task\ndef execute_job(\n *_, job_pk: uuid.UUID, job_app_label: str, job_model_name: str\n) -> None:\n Job = apps.get_model( # noqa: N806\n app_label=job_app_label, model_name=job_model_name\n )\n job = Job.objects.get(pk=job_pk)\n\n if job.status in [job.PENDING, job.RETRY]:\n job.update_status(status=job.STARTED)\n else:\n raise RuntimeError(\"Job is not set to be executed.\")\n\n if not job.container.ready:\n msg = f\"Method {job.container.pk} was not ready to be used.\"\n job.update_status(status=job.FAILURE, error_message=msg)\n raise RuntimeError(msg)\n try:\n with job.executor_cls(\n job_id=str(job.pk),\n job_class=Job,\n input_files=job.input_files,\n output_interfaces=job.output_interfaces,\n exec_image=job.container.image,\n exec_image_sha256=job.container.image_sha256,\n memory_limit=job.container.requires_memory_gb,\n ) as ev:\n # This call is potentially very long\n ev.execute()\n except ComponentException as e:\n job = get_model_instance(\n pk=job_pk, app_label=job_app_label, model_name=job_model_name\n )\n job.update_status(\n status=job.FAILURE,\n stdout=ev.stdout,\n stderr=ev.stderr,\n error_message=str(e),\n )\n except (SoftTimeLimitExceeded, TimeLimitExceeded):\n job = get_model_instance(\n pk=job_pk, app_label=job_app_label, model_name=job_model_name\n )\n job.update_status(\n status=job.FAILURE,\n stdout=ev.stdout,\n stderr=ev.stderr,\n error_message=\"Time limit exceeded.\",\n )\n except Exception:\n job = get_model_instance(\n pk=job_pk, app_label=job_app_label, model_name=job_model_name\n )\n job.update_status(\n status=job.FAILURE,\n stdout=ev.stdout,\n stderr=ev.stderr,\n error_message=\"An unexpected error occurred.\",\n )\n raise\n else:\n job = get_model_instance(\n pk=job_pk, app_label=job_app_label, model_name=job_model_name\n )\n job.update_status(\n status=job.SUCCESS, stdout=ev.stdout, stderr=ev.stderr\n )\n\n\n@shared_task\ndef mark_long_running_jobs_failed(\n *, app_label: str, model_name: str, extra_filters: Dict[str, str] = None\n):\n \"\"\"\n Mark jobs that have been started but did not finish (maybe due to\n an unrecoverable hardware error). It will mark tasks FAILED that have the\n status STARTED after 1.2x the task limit (which is different for each\n queue), so, this must be scheduled on the same queue that the execute_job\n task is run for this app_label and model_name.\n\n If the task is still running on Celery then it will still be able to\n report as passed later.\n \"\"\"\n Job = apps.get_model( # noqa: N806\n app_label=app_label, model_name=model_name\n )\n\n jobs_to_mark = Job.objects.filter(\n started_at__lt=now()\n - 1.2 * timedelta(seconds=settings.CELERY_TASK_TIME_LIMIT),\n status=Job.STARTED,\n )\n\n if extra_filters:\n jobs_to_mark = jobs_to_mark.filter(**extra_filters)\n\n for j in jobs_to_mark:\n j.update_status(\n status=Job.FAILURE, error_message=\"Time limit exceeded.\"\n )\n\n return [j.pk for j in jobs_to_mark]\n\n\n@shared_task\ndef start_service(*, pk: uuid.UUID, app_label: str, model_name: str):\n session = get_model_instance(\n pk=pk, app_label=app_label, model_name=model_name\n )\n session.start()\n\n\n@shared_task\ndef stop_service(*, pk: uuid.UUID, app_label: str, model_name: str):\n session = get_model_instance(\n pk=pk, app_label=app_label, model_name=model_name\n )\n session.stop()\n\n\n@shared_task\ndef stop_expired_services(*, app_label: str, model_name: str, region: str):\n model = apps.get_model(app_label=app_label, model_name=model_name)\n\n services_to_stop = (\n model.objects.annotate(\n expires=ExpressionWrapper(\n F(\"created\") + F(\"maximum_duration\"),\n output_field=DateTimeField(),\n )\n )\n .filter(expires__lt=now(), region=region)\n .exclude(status=model.STOPPED)\n )\n\n for service in services_to_stop:\n service.stop()\n\n return [str(s) for s in services_to_stop]\n", "path": "app/grandchallenge/components/tasks.py"}]}
3,417
150
gh_patches_debug_31714
rasdani/github-patches
git_diff
open-telemetry__opentelemetry-python-3306
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Prometheus exporter should convert non-monotonic sums to gauges The [current implementation](https://github.com/open-telemetry/opentelemetry-python/blob/main/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py#L255) of Sum export in the prometheus exporter does not differentiate between monotonic and non-monotonic sums. The [prometheus compatibility spec for sums](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/compatibility/prometheus_and_openmetrics.md#sums) says: `If the aggregation temporality is cumulative and the sum is non-monotonic, it MUST be converted to a Prometheus Gauge.`. </issue> <code> [start of exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py] 1 # Copyright The OpenTelemetry Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """ 16 This library allows export of metrics data to `Prometheus <https://prometheus.io/>`_. 17 18 Usage 19 ----- 20 21 The **OpenTelemetry Prometheus Exporter** allows export of `OpenTelemetry`_ 22 metrics to `Prometheus`_. 23 24 25 .. _Prometheus: https://prometheus.io/ 26 .. _OpenTelemetry: https://github.com/open-telemetry/opentelemetry-python/ 27 28 .. code:: python 29 30 from prometheus_client import start_http_server 31 32 from opentelemetry.exporter.prometheus import PrometheusMetricReader 33 from opentelemetry.metrics import get_meter_provider, set_meter_provider 34 from opentelemetry.sdk.metrics import MeterProvider 35 36 # Start Prometheus client 37 start_http_server(port=8000, addr="localhost") 38 39 # Exporter to export metrics to Prometheus 40 prefix = "MyAppPrefix" 41 reader = PrometheusMetricReader(prefix) 42 43 # Meter is responsible for creating and recording metrics 44 set_meter_provider(MeterProvider(metric_readers=[reader])) 45 meter = get_meter_provider().get_meter("myapp", "0.1.2") 46 47 counter = meter.create_counter( 48 "requests", 49 "requests", 50 "number of requests", 51 ) 52 53 # Labels are used to identify key-values that are associated with a specific 54 # metric that you want to record. These are useful for pre-aggregation and can 55 # be used to store custom dimensions pertaining to a metric 56 labels = {"environment": "staging"} 57 58 counter.add(25, labels) 59 input("Press any key to exit...") 60 61 API 62 --- 63 """ 64 65 from collections import deque 66 from itertools import chain 67 from json import dumps 68 from logging import getLogger 69 from re import IGNORECASE, UNICODE, compile 70 from typing import Dict, Sequence, Tuple, Union 71 72 from prometheus_client.core import ( 73 REGISTRY, 74 CounterMetricFamily, 75 GaugeMetricFamily, 76 HistogramMetricFamily, 77 InfoMetricFamily, 78 ) 79 from prometheus_client.core import Metric as PrometheusMetric 80 81 from opentelemetry.sdk.metrics import Counter 82 from opentelemetry.sdk.metrics import Histogram as HistogramInstrument 83 from opentelemetry.sdk.metrics import ( 84 ObservableCounter, 85 ObservableGauge, 86 ObservableUpDownCounter, 87 UpDownCounter, 88 ) 89 from opentelemetry.sdk.metrics.export import ( 90 AggregationTemporality, 91 Gauge, 92 Histogram, 93 HistogramDataPoint, 94 MetricReader, 95 MetricsData, 96 Sum, 97 ) 98 99 _logger = getLogger(__name__) 100 101 _TARGET_INFO_NAME = "target" 102 _TARGET_INFO_DESCRIPTION = "Target metadata" 103 104 105 def _convert_buckets( 106 bucket_counts: Sequence[int], explicit_bounds: Sequence[float] 107 ) -> Sequence[Tuple[str, int]]: 108 buckets = [] 109 total_count = 0 110 for upper_bound, count in zip( 111 chain(explicit_bounds, ["+Inf"]), 112 bucket_counts, 113 ): 114 total_count += count 115 buckets.append((f"{upper_bound}", total_count)) 116 117 return buckets 118 119 120 class PrometheusMetricReader(MetricReader): 121 """Prometheus metric exporter for OpenTelemetry.""" 122 123 def __init__(self, disable_target_info: bool = False) -> None: 124 super().__init__( 125 preferred_temporality={ 126 Counter: AggregationTemporality.CUMULATIVE, 127 UpDownCounter: AggregationTemporality.CUMULATIVE, 128 HistogramInstrument: AggregationTemporality.CUMULATIVE, 129 ObservableCounter: AggregationTemporality.CUMULATIVE, 130 ObservableUpDownCounter: AggregationTemporality.CUMULATIVE, 131 ObservableGauge: AggregationTemporality.CUMULATIVE, 132 } 133 ) 134 self._collector = _CustomCollector(disable_target_info) 135 REGISTRY.register(self._collector) 136 self._collector._callback = self.collect 137 138 def _receive_metrics( 139 self, 140 metrics_data: MetricsData, 141 timeout_millis: float = 10_000, 142 **kwargs, 143 ) -> None: 144 if metrics_data is None: 145 return 146 self._collector.add_metrics_data(metrics_data) 147 148 def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None: 149 REGISTRY.unregister(self._collector) 150 151 152 class _CustomCollector: 153 """_CustomCollector represents the Prometheus Collector object 154 155 See more: 156 https://github.com/prometheus/client_python#custom-collectors 157 """ 158 159 def __init__(self, disable_target_info: bool = False): 160 self._callback = None 161 self._metrics_datas = deque() 162 self._non_letters_digits_underscore_re = compile( 163 r"[^\w]", UNICODE | IGNORECASE 164 ) 165 self._disable_target_info = disable_target_info 166 self._target_info = None 167 168 def add_metrics_data(self, metrics_data: MetricsData) -> None: 169 """Add metrics to Prometheus data""" 170 self._metrics_datas.append(metrics_data) 171 172 def collect(self) -> None: 173 """Collect fetches the metrics from OpenTelemetry 174 and delivers them as Prometheus Metrics. 175 Collect is invoked every time a ``prometheus.Gatherer`` is run 176 for example when the HTTP endpoint is invoked by Prometheus. 177 """ 178 if self._callback is not None: 179 self._callback() 180 181 metric_family_id_metric_family = {} 182 183 if len(self._metrics_datas): 184 if not self._disable_target_info: 185 if self._target_info is None: 186 attributes = {} 187 for res in self._metrics_datas[0].resource_metrics: 188 attributes = {**attributes, **res.resource.attributes} 189 190 self._target_info = self._create_info_metric( 191 _TARGET_INFO_NAME, _TARGET_INFO_DESCRIPTION, attributes 192 ) 193 metric_family_id_metric_family[ 194 _TARGET_INFO_NAME 195 ] = self._target_info 196 197 while self._metrics_datas: 198 self._translate_to_prometheus( 199 self._metrics_datas.popleft(), metric_family_id_metric_family 200 ) 201 202 if metric_family_id_metric_family: 203 for metric_family in metric_family_id_metric_family.values(): 204 yield metric_family 205 206 # pylint: disable=too-many-locals,too-many-branches 207 def _translate_to_prometheus( 208 self, 209 metrics_data: MetricsData, 210 metric_family_id_metric_family: Dict[str, PrometheusMetric], 211 ): 212 metrics = [] 213 214 for resource_metrics in metrics_data.resource_metrics: 215 for scope_metrics in resource_metrics.scope_metrics: 216 for metric in scope_metrics.metrics: 217 metrics.append(metric) 218 219 for metric in metrics: 220 label_valuess = [] 221 values = [] 222 223 pre_metric_family_ids = [] 224 225 metric_name = "" 226 metric_name += self._sanitize(metric.name) 227 228 metric_description = metric.description or "" 229 230 for number_data_point in metric.data.data_points: 231 label_keys = [] 232 label_values = [] 233 234 for key, value in number_data_point.attributes.items(): 235 label_keys.append(self._sanitize(key)) 236 label_values.append(self._check_value(value)) 237 238 pre_metric_family_ids.append( 239 "|".join( 240 [ 241 metric_name, 242 metric_description, 243 "%".join(label_keys), 244 metric.unit, 245 ] 246 ) 247 ) 248 249 label_valuess.append(label_values) 250 if isinstance(number_data_point, HistogramDataPoint): 251 values.append( 252 { 253 "bucket_counts": number_data_point.bucket_counts, 254 "explicit_bounds": ( 255 number_data_point.explicit_bounds 256 ), 257 "sum": number_data_point.sum, 258 } 259 ) 260 else: 261 values.append(number_data_point.value) 262 263 for pre_metric_family_id, label_values, value in zip( 264 pre_metric_family_ids, label_valuess, values 265 ): 266 if isinstance(metric.data, Sum): 267 268 metric_family_id = "|".join( 269 [pre_metric_family_id, CounterMetricFamily.__name__] 270 ) 271 272 if metric_family_id not in metric_family_id_metric_family: 273 metric_family_id_metric_family[ 274 metric_family_id 275 ] = CounterMetricFamily( 276 name=metric_name, 277 documentation=metric_description, 278 labels=label_keys, 279 unit=metric.unit, 280 ) 281 metric_family_id_metric_family[ 282 metric_family_id 283 ].add_metric(labels=label_values, value=value) 284 elif isinstance(metric.data, Gauge): 285 286 metric_family_id = "|".join( 287 [pre_metric_family_id, GaugeMetricFamily.__name__] 288 ) 289 290 if ( 291 metric_family_id 292 not in metric_family_id_metric_family.keys() 293 ): 294 metric_family_id_metric_family[ 295 metric_family_id 296 ] = GaugeMetricFamily( 297 name=metric_name, 298 documentation=metric_description, 299 labels=label_keys, 300 unit=metric.unit, 301 ) 302 metric_family_id_metric_family[ 303 metric_family_id 304 ].add_metric(labels=label_values, value=value) 305 elif isinstance(metric.data, Histogram): 306 307 metric_family_id = "|".join( 308 [pre_metric_family_id, HistogramMetricFamily.__name__] 309 ) 310 311 if ( 312 metric_family_id 313 not in metric_family_id_metric_family.keys() 314 ): 315 metric_family_id_metric_family[ 316 metric_family_id 317 ] = HistogramMetricFamily( 318 name=metric_name, 319 documentation=metric_description, 320 labels=label_keys, 321 unit=metric.unit, 322 ) 323 metric_family_id_metric_family[ 324 metric_family_id 325 ].add_metric( 326 labels=label_values, 327 buckets=_convert_buckets( 328 value["bucket_counts"], value["explicit_bounds"] 329 ), 330 sum_value=value["sum"], 331 ) 332 else: 333 _logger.warning( 334 "Unsupported metric data. %s", type(metric.data) 335 ) 336 337 def _sanitize(self, key: str) -> str: 338 """sanitize the given metric name or label according to Prometheus rule. 339 Replace all characters other than [A-Za-z0-9_] with '_'. 340 """ 341 return self._non_letters_digits_underscore_re.sub("_", key) 342 343 # pylint: disable=no-self-use 344 def _check_value(self, value: Union[int, float, str, Sequence]) -> str: 345 """Check the label value and return is appropriate representation""" 346 if not isinstance(value, str): 347 return dumps(value, default=str) 348 return str(value) 349 350 def _create_info_metric( 351 self, name: str, description: str, attributes: Dict[str, str] 352 ) -> InfoMetricFamily: 353 """Create an Info Metric Family with list of attributes""" 354 info = InfoMetricFamily(name, description, labels=attributes) 355 info.add_metric(labels=list(attributes.keys()), value=attributes) 356 return info 357 [end of exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py b/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py --- a/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py +++ b/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py @@ -263,7 +263,25 @@ for pre_metric_family_id, label_values, value in zip( pre_metric_family_ids, label_valuess, values ): - if isinstance(metric.data, Sum): + is_non_monotonic_sum = ( + isinstance(metric.data, Sum) + and metric.data.is_monotonic is False + ) + is_cumulative = ( + isinstance(metric.data, Sum) + and metric.data.aggregation_temporality + == AggregationTemporality.CUMULATIVE + ) + + # The prometheus compatibility spec for sums says: If the aggregation temporality is cumulative and the sum is non-monotonic, it MUST be converted to a Prometheus Gauge. + should_convert_sum_to_gauge = ( + is_non_monotonic_sum and is_cumulative + ) + + if ( + isinstance(metric.data, Sum) + and not should_convert_sum_to_gauge + ): metric_family_id = "|".join( [pre_metric_family_id, CounterMetricFamily.__name__] @@ -281,7 +299,10 @@ metric_family_id_metric_family[ metric_family_id ].add_metric(labels=label_values, value=value) - elif isinstance(metric.data, Gauge): + elif ( + isinstance(metric.data, Gauge) + or should_convert_sum_to_gauge + ): metric_family_id = "|".join( [pre_metric_family_id, GaugeMetricFamily.__name__]
{"golden_diff": "diff --git a/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py b/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py\n--- a/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py\n+++ b/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py\n@@ -263,7 +263,25 @@\n for pre_metric_family_id, label_values, value in zip(\n pre_metric_family_ids, label_valuess, values\n ):\n- if isinstance(metric.data, Sum):\n+ is_non_monotonic_sum = (\n+ isinstance(metric.data, Sum)\n+ and metric.data.is_monotonic is False\n+ )\n+ is_cumulative = (\n+ isinstance(metric.data, Sum)\n+ and metric.data.aggregation_temporality\n+ == AggregationTemporality.CUMULATIVE\n+ )\n+\n+ # The prometheus compatibility spec for sums says: If the aggregation temporality is cumulative and the sum is non-monotonic, it MUST be converted to a Prometheus Gauge.\n+ should_convert_sum_to_gauge = (\n+ is_non_monotonic_sum and is_cumulative\n+ )\n+\n+ if (\n+ isinstance(metric.data, Sum)\n+ and not should_convert_sum_to_gauge\n+ ):\n \n metric_family_id = \"|\".join(\n [pre_metric_family_id, CounterMetricFamily.__name__]\n@@ -281,7 +299,10 @@\n metric_family_id_metric_family[\n metric_family_id\n ].add_metric(labels=label_values, value=value)\n- elif isinstance(metric.data, Gauge):\n+ elif (\n+ isinstance(metric.data, Gauge)\n+ or should_convert_sum_to_gauge\n+ ):\n \n metric_family_id = \"|\".join(\n [pre_metric_family_id, GaugeMetricFamily.__name__]\n", "issue": "Prometheus exporter should convert non-monotonic sums to gauges\nThe [current implementation](https://github.com/open-telemetry/opentelemetry-python/blob/main/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py#L255) of Sum export in the prometheus exporter does not differentiate between monotonic and non-monotonic sums.\r\n\r\nThe [prometheus compatibility spec for sums](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/compatibility/prometheus_and_openmetrics.md#sums) says: `If the aggregation temporality is cumulative and the sum is non-monotonic, it MUST be converted to a Prometheus Gauge.`.\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis library allows export of metrics data to `Prometheus <https://prometheus.io/>`_.\n\nUsage\n-----\n\nThe **OpenTelemetry Prometheus Exporter** allows export of `OpenTelemetry`_\nmetrics to `Prometheus`_.\n\n\n.. _Prometheus: https://prometheus.io/\n.. _OpenTelemetry: https://github.com/open-telemetry/opentelemetry-python/\n\n.. code:: python\n\n from prometheus_client import start_http_server\n\n from opentelemetry.exporter.prometheus import PrometheusMetricReader\n from opentelemetry.metrics import get_meter_provider, set_meter_provider\n from opentelemetry.sdk.metrics import MeterProvider\n\n # Start Prometheus client\n start_http_server(port=8000, addr=\"localhost\")\n\n # Exporter to export metrics to Prometheus\n prefix = \"MyAppPrefix\"\n reader = PrometheusMetricReader(prefix)\n\n # Meter is responsible for creating and recording metrics\n set_meter_provider(MeterProvider(metric_readers=[reader]))\n meter = get_meter_provider().get_meter(\"myapp\", \"0.1.2\")\n\n counter = meter.create_counter(\n \"requests\",\n \"requests\",\n \"number of requests\",\n )\n\n # Labels are used to identify key-values that are associated with a specific\n # metric that you want to record. These are useful for pre-aggregation and can\n # be used to store custom dimensions pertaining to a metric\n labels = {\"environment\": \"staging\"}\n\n counter.add(25, labels)\n input(\"Press any key to exit...\")\n\nAPI\n---\n\"\"\"\n\nfrom collections import deque\nfrom itertools import chain\nfrom json import dumps\nfrom logging import getLogger\nfrom re import IGNORECASE, UNICODE, compile\nfrom typing import Dict, Sequence, Tuple, Union\n\nfrom prometheus_client.core import (\n REGISTRY,\n CounterMetricFamily,\n GaugeMetricFamily,\n HistogramMetricFamily,\n InfoMetricFamily,\n)\nfrom prometheus_client.core import Metric as PrometheusMetric\n\nfrom opentelemetry.sdk.metrics import Counter\nfrom opentelemetry.sdk.metrics import Histogram as HistogramInstrument\nfrom opentelemetry.sdk.metrics import (\n ObservableCounter,\n ObservableGauge,\n ObservableUpDownCounter,\n UpDownCounter,\n)\nfrom opentelemetry.sdk.metrics.export import (\n AggregationTemporality,\n Gauge,\n Histogram,\n HistogramDataPoint,\n MetricReader,\n MetricsData,\n Sum,\n)\n\n_logger = getLogger(__name__)\n\n_TARGET_INFO_NAME = \"target\"\n_TARGET_INFO_DESCRIPTION = \"Target metadata\"\n\n\ndef _convert_buckets(\n bucket_counts: Sequence[int], explicit_bounds: Sequence[float]\n) -> Sequence[Tuple[str, int]]:\n buckets = []\n total_count = 0\n for upper_bound, count in zip(\n chain(explicit_bounds, [\"+Inf\"]),\n bucket_counts,\n ):\n total_count += count\n buckets.append((f\"{upper_bound}\", total_count))\n\n return buckets\n\n\nclass PrometheusMetricReader(MetricReader):\n \"\"\"Prometheus metric exporter for OpenTelemetry.\"\"\"\n\n def __init__(self, disable_target_info: bool = False) -> None:\n super().__init__(\n preferred_temporality={\n Counter: AggregationTemporality.CUMULATIVE,\n UpDownCounter: AggregationTemporality.CUMULATIVE,\n HistogramInstrument: AggregationTemporality.CUMULATIVE,\n ObservableCounter: AggregationTemporality.CUMULATIVE,\n ObservableUpDownCounter: AggregationTemporality.CUMULATIVE,\n ObservableGauge: AggregationTemporality.CUMULATIVE,\n }\n )\n self._collector = _CustomCollector(disable_target_info)\n REGISTRY.register(self._collector)\n self._collector._callback = self.collect\n\n def _receive_metrics(\n self,\n metrics_data: MetricsData,\n timeout_millis: float = 10_000,\n **kwargs,\n ) -> None:\n if metrics_data is None:\n return\n self._collector.add_metrics_data(metrics_data)\n\n def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None:\n REGISTRY.unregister(self._collector)\n\n\nclass _CustomCollector:\n \"\"\"_CustomCollector represents the Prometheus Collector object\n\n See more:\n https://github.com/prometheus/client_python#custom-collectors\n \"\"\"\n\n def __init__(self, disable_target_info: bool = False):\n self._callback = None\n self._metrics_datas = deque()\n self._non_letters_digits_underscore_re = compile(\n r\"[^\\w]\", UNICODE | IGNORECASE\n )\n self._disable_target_info = disable_target_info\n self._target_info = None\n\n def add_metrics_data(self, metrics_data: MetricsData) -> None:\n \"\"\"Add metrics to Prometheus data\"\"\"\n self._metrics_datas.append(metrics_data)\n\n def collect(self) -> None:\n \"\"\"Collect fetches the metrics from OpenTelemetry\n and delivers them as Prometheus Metrics.\n Collect is invoked every time a ``prometheus.Gatherer`` is run\n for example when the HTTP endpoint is invoked by Prometheus.\n \"\"\"\n if self._callback is not None:\n self._callback()\n\n metric_family_id_metric_family = {}\n\n if len(self._metrics_datas):\n if not self._disable_target_info:\n if self._target_info is None:\n attributes = {}\n for res in self._metrics_datas[0].resource_metrics:\n attributes = {**attributes, **res.resource.attributes}\n\n self._target_info = self._create_info_metric(\n _TARGET_INFO_NAME, _TARGET_INFO_DESCRIPTION, attributes\n )\n metric_family_id_metric_family[\n _TARGET_INFO_NAME\n ] = self._target_info\n\n while self._metrics_datas:\n self._translate_to_prometheus(\n self._metrics_datas.popleft(), metric_family_id_metric_family\n )\n\n if metric_family_id_metric_family:\n for metric_family in metric_family_id_metric_family.values():\n yield metric_family\n\n # pylint: disable=too-many-locals,too-many-branches\n def _translate_to_prometheus(\n self,\n metrics_data: MetricsData,\n metric_family_id_metric_family: Dict[str, PrometheusMetric],\n ):\n metrics = []\n\n for resource_metrics in metrics_data.resource_metrics:\n for scope_metrics in resource_metrics.scope_metrics:\n for metric in scope_metrics.metrics:\n metrics.append(metric)\n\n for metric in metrics:\n label_valuess = []\n values = []\n\n pre_metric_family_ids = []\n\n metric_name = \"\"\n metric_name += self._sanitize(metric.name)\n\n metric_description = metric.description or \"\"\n\n for number_data_point in metric.data.data_points:\n label_keys = []\n label_values = []\n\n for key, value in number_data_point.attributes.items():\n label_keys.append(self._sanitize(key))\n label_values.append(self._check_value(value))\n\n pre_metric_family_ids.append(\n \"|\".join(\n [\n metric_name,\n metric_description,\n \"%\".join(label_keys),\n metric.unit,\n ]\n )\n )\n\n label_valuess.append(label_values)\n if isinstance(number_data_point, HistogramDataPoint):\n values.append(\n {\n \"bucket_counts\": number_data_point.bucket_counts,\n \"explicit_bounds\": (\n number_data_point.explicit_bounds\n ),\n \"sum\": number_data_point.sum,\n }\n )\n else:\n values.append(number_data_point.value)\n\n for pre_metric_family_id, label_values, value in zip(\n pre_metric_family_ids, label_valuess, values\n ):\n if isinstance(metric.data, Sum):\n\n metric_family_id = \"|\".join(\n [pre_metric_family_id, CounterMetricFamily.__name__]\n )\n\n if metric_family_id not in metric_family_id_metric_family:\n metric_family_id_metric_family[\n metric_family_id\n ] = CounterMetricFamily(\n name=metric_name,\n documentation=metric_description,\n labels=label_keys,\n unit=metric.unit,\n )\n metric_family_id_metric_family[\n metric_family_id\n ].add_metric(labels=label_values, value=value)\n elif isinstance(metric.data, Gauge):\n\n metric_family_id = \"|\".join(\n [pre_metric_family_id, GaugeMetricFamily.__name__]\n )\n\n if (\n metric_family_id\n not in metric_family_id_metric_family.keys()\n ):\n metric_family_id_metric_family[\n metric_family_id\n ] = GaugeMetricFamily(\n name=metric_name,\n documentation=metric_description,\n labels=label_keys,\n unit=metric.unit,\n )\n metric_family_id_metric_family[\n metric_family_id\n ].add_metric(labels=label_values, value=value)\n elif isinstance(metric.data, Histogram):\n\n metric_family_id = \"|\".join(\n [pre_metric_family_id, HistogramMetricFamily.__name__]\n )\n\n if (\n metric_family_id\n not in metric_family_id_metric_family.keys()\n ):\n metric_family_id_metric_family[\n metric_family_id\n ] = HistogramMetricFamily(\n name=metric_name,\n documentation=metric_description,\n labels=label_keys,\n unit=metric.unit,\n )\n metric_family_id_metric_family[\n metric_family_id\n ].add_metric(\n labels=label_values,\n buckets=_convert_buckets(\n value[\"bucket_counts\"], value[\"explicit_bounds\"]\n ),\n sum_value=value[\"sum\"],\n )\n else:\n _logger.warning(\n \"Unsupported metric data. %s\", type(metric.data)\n )\n\n def _sanitize(self, key: str) -> str:\n \"\"\"sanitize the given metric name or label according to Prometheus rule.\n Replace all characters other than [A-Za-z0-9_] with '_'.\n \"\"\"\n return self._non_letters_digits_underscore_re.sub(\"_\", key)\n\n # pylint: disable=no-self-use\n def _check_value(self, value: Union[int, float, str, Sequence]) -> str:\n \"\"\"Check the label value and return is appropriate representation\"\"\"\n if not isinstance(value, str):\n return dumps(value, default=str)\n return str(value)\n\n def _create_info_metric(\n self, name: str, description: str, attributes: Dict[str, str]\n ) -> InfoMetricFamily:\n \"\"\"Create an Info Metric Family with list of attributes\"\"\"\n info = InfoMetricFamily(name, description, labels=attributes)\n info.add_metric(labels=list(attributes.keys()), value=attributes)\n return info\n", "path": "exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py"}]}
4,048
434
gh_patches_debug_26754
rasdani/github-patches
git_diff
searxng__searxng-917
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Switch versioning format since SearXNG is rolling release <!-- PLEASE FILL THESE FIELDS, IT REALLY HELPS THE MAINTAINERS OF SearXNG --> **Is your feature request related to a problem? Please describe.** Right now we use the shortened git commit SHA as version. This can be hard for people to know which version an instance is running on. **Describe the solution you'd like** Instad of displaying version 1.0.0-commit_sha we should do a version like this for example: `2022-02-20-1` This way its more straight forward what version and instance is running and how old this version is. The `1.0.0` is not really needed in rolling release IMO. **Describe alternatives you've considered** Use the shortened commit SHA as version but still drop the `1.0.0`. **Additional context** <!-- Add any other context or screenshots about the feature request here. --> </issue> <code> [start of searx/version.py] 1 # SPDX-License-Identifier: AGPL-3.0-or-later 2 # lint: pylint 3 # pylint: disable=,missing-module-docstring,missing-class-docstring 4 5 import re 6 import os 7 import shlex 8 import subprocess 9 import logging 10 11 # fallback values 12 # if there is searx.version_frozen module, and it is not possible to get the git tag 13 VERSION_STRING = "1.0.0" 14 VERSION_TAG = "1.0.0" 15 GIT_URL = "unknow" 16 GIT_BRANCH = "unknow" 17 18 logger = logging.getLogger("searx") 19 20 SUBPROCESS_RUN_ENV = { 21 "PATH": os.environ["PATH"], 22 "LC_ALL": "C", 23 "LANGUAGE": "", 24 } 25 26 27 def subprocess_run(args, **kwargs): 28 """Call :py:func:`subprocess.run` and return (striped) stdout. If returncode is 29 non-zero, raise a :py:func:`subprocess.CalledProcessError`. 30 """ 31 if not isinstance(args, (list, tuple)): 32 args = shlex.split(args) 33 34 kwargs["env"] = kwargs.get("env", SUBPROCESS_RUN_ENV) 35 kwargs["encoding"] = kwargs.get("encoding", "utf-8") 36 kwargs["stdout"] = subprocess.PIPE 37 kwargs["stderr"] = subprocess.PIPE 38 # raise CalledProcessError if returncode is non-zero 39 kwargs["check"] = True 40 proc = subprocess.run(args, **kwargs) # pylint: disable=subprocess-run-check 41 return proc.stdout.strip() 42 43 44 def get_git_url_and_branch(): 45 try: 46 ref = subprocess_run("git rev-parse --abbrev-ref @{upstream}") 47 except subprocess.CalledProcessError: 48 ref = subprocess_run("git rev-parse --abbrev-ref master@{upstream}") 49 origin, git_branch = ref.split("/", 1) 50 git_url = subprocess_run(["git", "remote", "get-url", origin]) 51 52 # get https:// url from git@ url 53 if git_url.startswith("git@"): 54 git_url = git_url.replace(":", "/", 2).replace("git@", "https://", 1) 55 if git_url.endswith(".git"): 56 git_url = git_url.replace(".git", "", 1) 57 58 return git_url, git_branch 59 60 61 def get_git_version(): 62 try: 63 tag = subprocess_run("git describe HEAD") 64 # a. HEAD is on tag name, example: tag = "v1.0.1" 65 # b. HEAD is not a tag name, example "<tag>-<distance>-g<commit>" 66 tag_version, tag_distance, tag_commit = (tag.split("-") + ["", ""])[:3] 67 if re.match(r"v[0-9]+\.[0-9]+\.[0-9]+", tag_version): 68 # tag_version "v1.0.0" becomes "1.0.0" (without the v) 69 # other patterns are kept untouched 70 tag_version = tag_version[1:] 71 # remove "g" prefix from tag_commit 72 if tag_commit and tag_commit[0] == "g": 73 tag_commit = tag_commit[1:] 74 # set git_version to "1.0.0-590-0686e274" or '1.0.0' 75 git_version = "-".join(filter(bool, [tag_version, tag_distance, tag_commit])) 76 except subprocess.CalledProcessError: 77 # fall back to "YYYY.MM.DD.Hash" if there is no tag at all 78 git_version = subprocess_run(r"git show -s --format='%as-%h'") 79 # PEP 440: replace - with . 80 tag_version = git_version = git_version.replace("-", ".") 81 82 # add "-dirty" suffix if there are uncommited changes except searx/settings.yml 83 try: 84 subprocess_run("git diff --quiet -- . ':!searx/settings.yml' ':!utils/brand.env'") 85 except subprocess.CalledProcessError as e: 86 if e.returncode == 1: 87 git_version += "-dirty" 88 else: 89 logger.warning('"%s" returns an unexpected return code %i', e.returncode, e.cmd) 90 return git_version, tag_version 91 92 93 try: 94 from searx.version_frozen import VERSION_STRING, VERSION_TAG, GIT_URL, GIT_BRANCH 95 except ImportError: 96 try: 97 try: 98 VERSION_STRING, VERSION_TAG = get_git_version() 99 except subprocess.CalledProcessError as ex: 100 logger.error("Error while getting the version: %s", ex.stderr) 101 try: 102 GIT_URL, GIT_BRANCH = get_git_url_and_branch() 103 except subprocess.CalledProcessError as ex: 104 logger.error("Error while getting the git URL & branch: %s", ex.stderr) 105 except FileNotFoundError as ex: 106 logger.error("%s is not found, fallback to the default version", ex.filename) 107 108 109 logger.info("version: %s", VERSION_STRING) 110 111 if __name__ == "__main__": 112 import sys 113 114 if len(sys.argv) >= 2 and sys.argv[1] == "freeze": 115 # freeze the version (to create an archive outside a git repository) 116 python_code = f"""# SPDX-License-Identifier: AGPL-3.0-or-later 117 # this file is generated automatically by searx/version.py 118 119 VERSION_STRING = "{VERSION_STRING}" 120 VERSION_TAG = "{VERSION_TAG}" 121 GIT_URL = "{GIT_URL}" 122 GIT_BRANCH = "{GIT_BRANCH}" 123 """ 124 with open(os.path.join(os.path.dirname(__file__), "version_frozen.py"), "w", encoding="utf8") as f: 125 f.write(python_code) 126 print(f"{f.name} created") 127 else: 128 # output shell code to set the variables 129 # usage: eval "$(python -m searx.version)" 130 shell_code = f""" 131 VERSION_STRING="{VERSION_STRING}" 132 VERSION_TAG="{VERSION_TAG}" 133 GIT_URL="{GIT_URL}" 134 GIT_BRANCH="{GIT_BRANCH}" 135 """ 136 print(shell_code) 137 [end of searx/version.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/searx/version.py b/searx/version.py --- a/searx/version.py +++ b/searx/version.py @@ -2,7 +2,6 @@ # lint: pylint # pylint: disable=,missing-module-docstring,missing-class-docstring -import re import os import shlex import subprocess @@ -59,25 +58,8 @@ def get_git_version(): - try: - tag = subprocess_run("git describe HEAD") - # a. HEAD is on tag name, example: tag = "v1.0.1" - # b. HEAD is not a tag name, example "<tag>-<distance>-g<commit>" - tag_version, tag_distance, tag_commit = (tag.split("-") + ["", ""])[:3] - if re.match(r"v[0-9]+\.[0-9]+\.[0-9]+", tag_version): - # tag_version "v1.0.0" becomes "1.0.0" (without the v) - # other patterns are kept untouched - tag_version = tag_version[1:] - # remove "g" prefix from tag_commit - if tag_commit and tag_commit[0] == "g": - tag_commit = tag_commit[1:] - # set git_version to "1.0.0-590-0686e274" or '1.0.0' - git_version = "-".join(filter(bool, [tag_version, tag_distance, tag_commit])) - except subprocess.CalledProcessError: - # fall back to "YYYY.MM.DD.Hash" if there is no tag at all - git_version = subprocess_run(r"git show -s --format='%as-%h'") - # PEP 440: replace - with . - tag_version = git_version = git_version.replace("-", ".") + git_commit_date_hash = subprocess_run(r"git show -s --format='%cs-%h'").replace("-", ".", 2) + tag_version = git_version = git_commit_date_hash # add "-dirty" suffix if there are uncommited changes except searx/settings.yml try:
{"golden_diff": "diff --git a/searx/version.py b/searx/version.py\n--- a/searx/version.py\n+++ b/searx/version.py\n@@ -2,7 +2,6 @@\n # lint: pylint\n # pylint: disable=,missing-module-docstring,missing-class-docstring\n \n-import re\n import os\n import shlex\n import subprocess\n@@ -59,25 +58,8 @@\n \n \n def get_git_version():\n- try:\n- tag = subprocess_run(\"git describe HEAD\")\n- # a. HEAD is on tag name, example: tag = \"v1.0.1\"\n- # b. HEAD is not a tag name, example \"<tag>-<distance>-g<commit>\"\n- tag_version, tag_distance, tag_commit = (tag.split(\"-\") + [\"\", \"\"])[:3]\n- if re.match(r\"v[0-9]+\\.[0-9]+\\.[0-9]+\", tag_version):\n- # tag_version \"v1.0.0\" becomes \"1.0.0\" (without the v)\n- # other patterns are kept untouched\n- tag_version = tag_version[1:]\n- # remove \"g\" prefix from tag_commit\n- if tag_commit and tag_commit[0] == \"g\":\n- tag_commit = tag_commit[1:]\n- # set git_version to \"1.0.0-590-0686e274\" or '1.0.0'\n- git_version = \"-\".join(filter(bool, [tag_version, tag_distance, tag_commit]))\n- except subprocess.CalledProcessError:\n- # fall back to \"YYYY.MM.DD.Hash\" if there is no tag at all\n- git_version = subprocess_run(r\"git show -s --format='%as-%h'\")\n- # PEP 440: replace - with .\n- tag_version = git_version = git_version.replace(\"-\", \".\")\n+ git_commit_date_hash = subprocess_run(r\"git show -s --format='%cs-%h'\").replace(\"-\", \".\", 2)\n+ tag_version = git_version = git_commit_date_hash\n \n # add \"-dirty\" suffix if there are uncommited changes except searx/settings.yml\n try:\n", "issue": "Switch versioning format since SearXNG is rolling release\n<!-- PLEASE FILL THESE FIELDS, IT REALLY HELPS THE MAINTAINERS OF SearXNG -->\r\n\r\n**Is your feature request related to a problem? Please describe.**\r\nRight now we use the shortened git commit SHA as version. This can be hard for people to know which version an instance is running on.\r\n\r\n**Describe the solution you'd like**\r\nInstad of displaying version 1.0.0-commit_sha we should do a version like this for example: `2022-02-20-1` This way its more straight forward what version and instance is running and how old this version is. The `1.0.0` is not really needed in rolling release IMO.\r\n\r\n**Describe alternatives you've considered**\r\nUse the shortened commit SHA as version but still drop the `1.0.0`.\r\n\r\n**Additional context**\r\n<!-- Add any other context or screenshots about the feature request here. -->\r\n\n", "before_files": [{"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n# lint: pylint\n# pylint: disable=,missing-module-docstring,missing-class-docstring\n\nimport re\nimport os\nimport shlex\nimport subprocess\nimport logging\n\n# fallback values\n# if there is searx.version_frozen module, and it is not possible to get the git tag\nVERSION_STRING = \"1.0.0\"\nVERSION_TAG = \"1.0.0\"\nGIT_URL = \"unknow\"\nGIT_BRANCH = \"unknow\"\n\nlogger = logging.getLogger(\"searx\")\n\nSUBPROCESS_RUN_ENV = {\n \"PATH\": os.environ[\"PATH\"],\n \"LC_ALL\": \"C\",\n \"LANGUAGE\": \"\",\n}\n\n\ndef subprocess_run(args, **kwargs):\n \"\"\"Call :py:func:`subprocess.run` and return (striped) stdout. If returncode is\n non-zero, raise a :py:func:`subprocess.CalledProcessError`.\n \"\"\"\n if not isinstance(args, (list, tuple)):\n args = shlex.split(args)\n\n kwargs[\"env\"] = kwargs.get(\"env\", SUBPROCESS_RUN_ENV)\n kwargs[\"encoding\"] = kwargs.get(\"encoding\", \"utf-8\")\n kwargs[\"stdout\"] = subprocess.PIPE\n kwargs[\"stderr\"] = subprocess.PIPE\n # raise CalledProcessError if returncode is non-zero\n kwargs[\"check\"] = True\n proc = subprocess.run(args, **kwargs) # pylint: disable=subprocess-run-check\n return proc.stdout.strip()\n\n\ndef get_git_url_and_branch():\n try:\n ref = subprocess_run(\"git rev-parse --abbrev-ref @{upstream}\")\n except subprocess.CalledProcessError:\n ref = subprocess_run(\"git rev-parse --abbrev-ref master@{upstream}\")\n origin, git_branch = ref.split(\"/\", 1)\n git_url = subprocess_run([\"git\", \"remote\", \"get-url\", origin])\n\n # get https:// url from git@ url\n if git_url.startswith(\"git@\"):\n git_url = git_url.replace(\":\", \"/\", 2).replace(\"git@\", \"https://\", 1)\n if git_url.endswith(\".git\"):\n git_url = git_url.replace(\".git\", \"\", 1)\n\n return git_url, git_branch\n\n\ndef get_git_version():\n try:\n tag = subprocess_run(\"git describe HEAD\")\n # a. HEAD is on tag name, example: tag = \"v1.0.1\"\n # b. HEAD is not a tag name, example \"<tag>-<distance>-g<commit>\"\n tag_version, tag_distance, tag_commit = (tag.split(\"-\") + [\"\", \"\"])[:3]\n if re.match(r\"v[0-9]+\\.[0-9]+\\.[0-9]+\", tag_version):\n # tag_version \"v1.0.0\" becomes \"1.0.0\" (without the v)\n # other patterns are kept untouched\n tag_version = tag_version[1:]\n # remove \"g\" prefix from tag_commit\n if tag_commit and tag_commit[0] == \"g\":\n tag_commit = tag_commit[1:]\n # set git_version to \"1.0.0-590-0686e274\" or '1.0.0'\n git_version = \"-\".join(filter(bool, [tag_version, tag_distance, tag_commit]))\n except subprocess.CalledProcessError:\n # fall back to \"YYYY.MM.DD.Hash\" if there is no tag at all\n git_version = subprocess_run(r\"git show -s --format='%as-%h'\")\n # PEP 440: replace - with .\n tag_version = git_version = git_version.replace(\"-\", \".\")\n\n # add \"-dirty\" suffix if there are uncommited changes except searx/settings.yml\n try:\n subprocess_run(\"git diff --quiet -- . ':!searx/settings.yml' ':!utils/brand.env'\")\n except subprocess.CalledProcessError as e:\n if e.returncode == 1:\n git_version += \"-dirty\"\n else:\n logger.warning('\"%s\" returns an unexpected return code %i', e.returncode, e.cmd)\n return git_version, tag_version\n\n\ntry:\n from searx.version_frozen import VERSION_STRING, VERSION_TAG, GIT_URL, GIT_BRANCH\nexcept ImportError:\n try:\n try:\n VERSION_STRING, VERSION_TAG = get_git_version()\n except subprocess.CalledProcessError as ex:\n logger.error(\"Error while getting the version: %s\", ex.stderr)\n try:\n GIT_URL, GIT_BRANCH = get_git_url_and_branch()\n except subprocess.CalledProcessError as ex:\n logger.error(\"Error while getting the git URL & branch: %s\", ex.stderr)\n except FileNotFoundError as ex:\n logger.error(\"%s is not found, fallback to the default version\", ex.filename)\n\n\nlogger.info(\"version: %s\", VERSION_STRING)\n\nif __name__ == \"__main__\":\n import sys\n\n if len(sys.argv) >= 2 and sys.argv[1] == \"freeze\":\n # freeze the version (to create an archive outside a git repository)\n python_code = f\"\"\"# SPDX-License-Identifier: AGPL-3.0-or-later\n# this file is generated automatically by searx/version.py\n\nVERSION_STRING = \"{VERSION_STRING}\"\nVERSION_TAG = \"{VERSION_TAG}\"\nGIT_URL = \"{GIT_URL}\"\nGIT_BRANCH = \"{GIT_BRANCH}\"\n\"\"\"\n with open(os.path.join(os.path.dirname(__file__), \"version_frozen.py\"), \"w\", encoding=\"utf8\") as f:\n f.write(python_code)\n print(f\"{f.name} created\")\n else:\n # output shell code to set the variables\n # usage: eval \"$(python -m searx.version)\"\n shell_code = f\"\"\"\nVERSION_STRING=\"{VERSION_STRING}\"\nVERSION_TAG=\"{VERSION_TAG}\"\nGIT_URL=\"{GIT_URL}\"\nGIT_BRANCH=\"{GIT_BRANCH}\"\n\"\"\"\n print(shell_code)\n", "path": "searx/version.py"}]}
2,328
489
gh_patches_debug_28625
rasdani/github-patches
git_diff
ray-project__ray-1094
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Ray fails to compile from master branch with Python 3 only Relevant log output: ``` + git clone https://github.com/ray-project/catapult.git /tmp/pip-_r2ylpv0-build/src/thirdparty/catapult Cloning into '/tmp/pip-_r2ylpv0-build/src/thirdparty/catapult'... + break + pushd /tmp/pip-_r2ylpv0-build/src/thirdparty/catapult /tmp/pip-_r2ylpv0-build/src/thirdparty/catapult /tmp/pip-_r2ylpv0-build/python + git checkout 18cd334755701cf0c3b90b7172126c686d2eb787 Note: checking out '18cd334755701cf0c3b90b7172126c686d2eb787'. You are in 'detached HEAD' state. You can look around, make experimental changes and commit them, and you can discard any commits you make in this state without impacting any branches by performing another checkout. If you want to create a new branch to retain commits you create, you may do so (now or later) by using -b with the checkout command again. Example: git checkout -b <new-branch-name> HEAD is now at 18cd334... Allow timeline scroll-to-zoom without holding ALT + popd /tmp/pip-_r2ylpv0-build/python + [[ ! -f /tmp/pip-_r2ylpv0-build/src/thirdparty/../../python/ray/core/src/catapult_files/index.html ]] + python2 /tmp/pip-_r2ylpv0-build/src/thirdparty/catapult/tracing/bin/vulcanize_trace_viewer --config chrome --output /tmp/pip-_r2ylpv0-build/src/thirdparty/../../python/ray/core/src/catapult_files/trace_viewer_full.html /tmp/pip-_r2ylpv0-build/src/thirdparty/build_ui.sh: line 52: python2: command not found Traceback (most recent call last): File "<string>", line 1, in <module> File "/tmp/pip-_r2ylpv0-build/python/setup.py", line 99, in <module> license="Apache 2.0") File "/usr/local/lib/python3.6/distutils/core.py", line 148, in setup dist.run_commands() File "/usr/local/lib/python3.6/distutils/dist.py", line 955, in run_commands self.run_command(cmd) File "/usr/local/lib/python3.6/distutils/dist.py", line 974, in run_command cmd_obj.run() File "/usr/local/lib/python3.6/site-packages/setuptools/command/install.py", line 61, in run return orig.install.run(self) File "/usr/local/lib/python3.6/distutils/command/install.py", line 545, in run self.run_command('build') File "/usr/local/lib/python3.6/distutils/cmd.py", line 313, in run_command self.distribution.run_command(command) File "/usr/local/lib/python3.6/distutils/dist.py", line 974, in run_command cmd_obj.run() File "/usr/local/lib/python3.6/distutils/command/build.py", line 135, in run self.run_command(cmd_name) File "/usr/local/lib/python3.6/distutils/cmd.py", line 313, in run_command self.distribution.run_command(command) File "/usr/local/lib/python3.6/distutils/dist.py", line 974, in run_command cmd_obj.run() File "/tmp/pip-_r2ylpv0-build/python/setup.py", line 38, in run subprocess.check_call(["../build.sh", sys.executable]) File "/usr/local/lib/python3.6/subprocess.py", line 291, in check_call raise CalledProcessError(retcode, cmd) subprocess.CalledProcessError: Command '['../build.sh', '/usr/local/bin/python3.6']' returned non-zero exit status 127. ``` </issue> <code> [start of python/setup.py] 1 from __future__ import absolute_import 2 from __future__ import division 3 from __future__ import print_function 4 5 import os 6 import shutil 7 import subprocess 8 import sys 9 10 from setuptools import setup, find_packages, Distribution 11 import setuptools.command.build_ext as _build_ext 12 13 # Ideally, we could include these files by putting them in a 14 # MANIFEST.in or using the package_data argument to setup, but the 15 # MANIFEST.in gets applied at the very beginning when setup.py runs 16 # before these files have been created, so we have to move the files 17 # manually. 18 ray_files = [ 19 "ray/core/src/common/thirdparty/redis/src/redis-server", 20 "ray/core/src/common/redis_module/libray_redis_module.so", 21 "ray/core/src/plasma/plasma_store", 22 "ray/core/src/plasma/plasma_manager", 23 "ray/core/src/local_scheduler/local_scheduler", 24 "ray/core/src/local_scheduler/liblocal_scheduler_library.so", 25 "ray/core/src/global_scheduler/global_scheduler", 26 "ray/core/src/catapult_files/index.html", 27 "ray/core/src/catapult_files/trace_viewer_full.html", 28 "ray/WebUI.ipynb" 29 ] 30 31 32 class build_ext(_build_ext.build_ext): 33 def run(self): 34 # Note: We are passing in sys.executable so that we use the same 35 # version of Python to build pyarrow inside the build.sh script. Note 36 # that certain flags will not be passed along such as --user or sudo. 37 # TODO(rkn): Fix this. 38 subprocess.check_call(["../build.sh", sys.executable]) 39 40 # We also need to install pyarrow along with Ray, so make sure that the 41 # relevant non-Python pyarrow files get copied. 42 pyarrow_files = [ 43 os.path.join("ray/pyarrow_files/pyarrow", filename) 44 for filename in os.listdir("./ray/pyarrow_files/pyarrow") 45 if not os.path.isdir(os.path.join("ray/pyarrow_files/pyarrow", 46 filename))] 47 48 files_to_include = ray_files + pyarrow_files 49 50 for filename in files_to_include: 51 self.move_file(filename) 52 # Copy over the autogenerated flatbuffer Python bindings. 53 generated_python_directory = "ray/core/generated" 54 for filename in os.listdir(generated_python_directory): 55 if filename[-3:] == ".py": 56 self.move_file(os.path.join(generated_python_directory, 57 filename)) 58 59 def move_file(self, filename): 60 # TODO(rkn): This feels very brittle. It may not handle all cases. See 61 # https://github.com/apache/arrow/blob/master/python/setup.py for an 62 # example. 63 source = filename 64 destination = os.path.join(self.build_lib, filename) 65 # Create the target directory if it doesn't already exist. 66 parent_directory = os.path.dirname(destination) 67 if not os.path.exists(parent_directory): 68 os.makedirs(parent_directory) 69 print("Copying {} to {}.".format(source, destination)) 70 shutil.copy(source, destination) 71 72 73 class BinaryDistribution(Distribution): 74 def has_ext_modules(self): 75 return True 76 77 78 setup(name="ray", 79 version="0.2.1", 80 packages=find_packages(), 81 cmdclass={"build_ext": build_ext}, 82 # The BinaryDistribution argument triggers build_ext. 83 distclass=BinaryDistribution, 84 install_requires=["numpy", 85 "funcsigs", 86 "click", 87 "colorama", 88 "psutil", 89 "pytest", 90 "redis", 91 "cloudpickle >= 0.2.2", 92 # The six module is required by pyarrow. 93 "six >= 1.0.0", 94 "flatbuffers"], 95 setup_requires=["cython >= 0.23"], 96 entry_points={"console_scripts": ["ray=ray.scripts.scripts:main"]}, 97 include_package_data=True, 98 zip_safe=False, 99 license="Apache 2.0") 100 [end of python/setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/python/setup.py b/python/setup.py --- a/python/setup.py +++ b/python/setup.py @@ -23,11 +23,23 @@ "ray/core/src/local_scheduler/local_scheduler", "ray/core/src/local_scheduler/liblocal_scheduler_library.so", "ray/core/src/global_scheduler/global_scheduler", - "ray/core/src/catapult_files/index.html", - "ray/core/src/catapult_files/trace_viewer_full.html", "ray/WebUI.ipynb" ] +optional_ray_files = [] + +ray_ui_files = [ + "ray/core/src/catapult_files/index.html", + "ray/core/src/catapult_files/trace_viewer_full.html" +] + +# The UI files are mandatory if the INCLUDE_UI environment variable equals 1. +# Otherwise, they are optional. +if "INCLUDE_UI" in os.environ and os.environ["INCLUDE_UI"] == "1": + ray_files += ray_ui_files +else: + optional_ray_files += ray_ui_files + class build_ext(_build_ext.build_ext): def run(self): @@ -56,6 +68,14 @@ self.move_file(os.path.join(generated_python_directory, filename)) + # Try to copy over the optional files. + for filename in optional_ray_files: + try: + self.move_file(filename) + except Exception as e: + print("Failed to copy optional file {}. This is ok." + .format(filename)) + def move_file(self, filename): # TODO(rkn): This feels very brittle. It may not handle all cases. See # https://github.com/apache/arrow/blob/master/python/setup.py for an
{"golden_diff": "diff --git a/python/setup.py b/python/setup.py\n--- a/python/setup.py\n+++ b/python/setup.py\n@@ -23,11 +23,23 @@\n \"ray/core/src/local_scheduler/local_scheduler\",\n \"ray/core/src/local_scheduler/liblocal_scheduler_library.so\",\n \"ray/core/src/global_scheduler/global_scheduler\",\n- \"ray/core/src/catapult_files/index.html\",\n- \"ray/core/src/catapult_files/trace_viewer_full.html\",\n \"ray/WebUI.ipynb\"\n ]\n \n+optional_ray_files = []\n+\n+ray_ui_files = [\n+ \"ray/core/src/catapult_files/index.html\",\n+ \"ray/core/src/catapult_files/trace_viewer_full.html\"\n+]\n+\n+# The UI files are mandatory if the INCLUDE_UI environment variable equals 1.\n+# Otherwise, they are optional.\n+if \"INCLUDE_UI\" in os.environ and os.environ[\"INCLUDE_UI\"] == \"1\":\n+ ray_files += ray_ui_files\n+else:\n+ optional_ray_files += ray_ui_files\n+\n \n class build_ext(_build_ext.build_ext):\n def run(self):\n@@ -56,6 +68,14 @@\n self.move_file(os.path.join(generated_python_directory,\n filename))\n \n+ # Try to copy over the optional files.\n+ for filename in optional_ray_files:\n+ try:\n+ self.move_file(filename)\n+ except Exception as e:\n+ print(\"Failed to copy optional file {}. This is ok.\"\n+ .format(filename))\n+\n def move_file(self, filename):\n # TODO(rkn): This feels very brittle. It may not handle all cases. See\n # https://github.com/apache/arrow/blob/master/python/setup.py for an\n", "issue": "Ray fails to compile from master branch with Python 3 only\nRelevant log output:\r\n\r\n```\r\n + git clone https://github.com/ray-project/catapult.git /tmp/pip-_r2ylpv0-build/src/thirdparty/catapult\r\n Cloning into '/tmp/pip-_r2ylpv0-build/src/thirdparty/catapult'...\r\n + break\r\n + pushd /tmp/pip-_r2ylpv0-build/src/thirdparty/catapult\r\n /tmp/pip-_r2ylpv0-build/src/thirdparty/catapult /tmp/pip-_r2ylpv0-build/python\r\n + git checkout 18cd334755701cf0c3b90b7172126c686d2eb787\r\n Note: checking out '18cd334755701cf0c3b90b7172126c686d2eb787'.\r\n \r\n You are in 'detached HEAD' state. You can look around, make experimental\r\n changes and commit them, and you can discard any commits you make in this\r\n state without impacting any branches by performing another checkout.\r\n \r\n If you want to create a new branch to retain commits you create, you may\r\n do so (now or later) by using -b with the checkout command again. Example:\r\n \r\n git checkout -b <new-branch-name>\r\n \r\n HEAD is now at 18cd334... Allow timeline scroll-to-zoom without holding ALT\r\n + popd\r\n /tmp/pip-_r2ylpv0-build/python\r\n + [[ ! -f /tmp/pip-_r2ylpv0-build/src/thirdparty/../../python/ray/core/src/catapult_files/index.html ]]\r\n + python2 /tmp/pip-_r2ylpv0-build/src/thirdparty/catapult/tracing/bin/vulcanize_trace_viewer --config chrome --output /tmp/pip-_r2ylpv0-build/src/thirdparty/../../python/ray/core/src/catapult_files/trace_viewer_full.html\r\n /tmp/pip-_r2ylpv0-build/src/thirdparty/build_ui.sh: line 52: python2: command not found\r\n Traceback (most recent call last):\r\n File \"<string>\", line 1, in <module>\r\n File \"/tmp/pip-_r2ylpv0-build/python/setup.py\", line 99, in <module>\r\n license=\"Apache 2.0\")\r\n File \"/usr/local/lib/python3.6/distutils/core.py\", line 148, in setup\r\n dist.run_commands()\r\n File \"/usr/local/lib/python3.6/distutils/dist.py\", line 955, in run_commands\r\n self.run_command(cmd)\r\n File \"/usr/local/lib/python3.6/distutils/dist.py\", line 974, in run_command\r\n cmd_obj.run()\r\n File \"/usr/local/lib/python3.6/site-packages/setuptools/command/install.py\", line 61, in run\r\n return orig.install.run(self)\r\n File \"/usr/local/lib/python3.6/distutils/command/install.py\", line 545, in run\r\n self.run_command('build')\r\n File \"/usr/local/lib/python3.6/distutils/cmd.py\", line 313, in run_command\r\n self.distribution.run_command(command)\r\n File \"/usr/local/lib/python3.6/distutils/dist.py\", line 974, in run_command\r\n cmd_obj.run()\r\n File \"/usr/local/lib/python3.6/distutils/command/build.py\", line 135, in run\r\n self.run_command(cmd_name)\r\n File \"/usr/local/lib/python3.6/distutils/cmd.py\", line 313, in run_command\r\n self.distribution.run_command(command)\r\n File \"/usr/local/lib/python3.6/distutils/dist.py\", line 974, in run_command\r\n cmd_obj.run()\r\n File \"/tmp/pip-_r2ylpv0-build/python/setup.py\", line 38, in run\r\n subprocess.check_call([\"../build.sh\", sys.executable])\r\n File \"/usr/local/lib/python3.6/subprocess.py\", line 291, in check_call\r\n raise CalledProcessError(retcode, cmd)\r\n subprocess.CalledProcessError: Command '['../build.sh', '/usr/local/bin/python3.6']' returned non-zero exit status 127.\r\n```\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport shutil\nimport subprocess\nimport sys\n\nfrom setuptools import setup, find_packages, Distribution\nimport setuptools.command.build_ext as _build_ext\n\n# Ideally, we could include these files by putting them in a\n# MANIFEST.in or using the package_data argument to setup, but the\n# MANIFEST.in gets applied at the very beginning when setup.py runs\n# before these files have been created, so we have to move the files\n# manually.\nray_files = [\n \"ray/core/src/common/thirdparty/redis/src/redis-server\",\n \"ray/core/src/common/redis_module/libray_redis_module.so\",\n \"ray/core/src/plasma/plasma_store\",\n \"ray/core/src/plasma/plasma_manager\",\n \"ray/core/src/local_scheduler/local_scheduler\",\n \"ray/core/src/local_scheduler/liblocal_scheduler_library.so\",\n \"ray/core/src/global_scheduler/global_scheduler\",\n \"ray/core/src/catapult_files/index.html\",\n \"ray/core/src/catapult_files/trace_viewer_full.html\",\n \"ray/WebUI.ipynb\"\n]\n\n\nclass build_ext(_build_ext.build_ext):\n def run(self):\n # Note: We are passing in sys.executable so that we use the same\n # version of Python to build pyarrow inside the build.sh script. Note\n # that certain flags will not be passed along such as --user or sudo.\n # TODO(rkn): Fix this.\n subprocess.check_call([\"../build.sh\", sys.executable])\n\n # We also need to install pyarrow along with Ray, so make sure that the\n # relevant non-Python pyarrow files get copied.\n pyarrow_files = [\n os.path.join(\"ray/pyarrow_files/pyarrow\", filename)\n for filename in os.listdir(\"./ray/pyarrow_files/pyarrow\")\n if not os.path.isdir(os.path.join(\"ray/pyarrow_files/pyarrow\",\n filename))]\n\n files_to_include = ray_files + pyarrow_files\n\n for filename in files_to_include:\n self.move_file(filename)\n # Copy over the autogenerated flatbuffer Python bindings.\n generated_python_directory = \"ray/core/generated\"\n for filename in os.listdir(generated_python_directory):\n if filename[-3:] == \".py\":\n self.move_file(os.path.join(generated_python_directory,\n filename))\n\n def move_file(self, filename):\n # TODO(rkn): This feels very brittle. It may not handle all cases. See\n # https://github.com/apache/arrow/blob/master/python/setup.py for an\n # example.\n source = filename\n destination = os.path.join(self.build_lib, filename)\n # Create the target directory if it doesn't already exist.\n parent_directory = os.path.dirname(destination)\n if not os.path.exists(parent_directory):\n os.makedirs(parent_directory)\n print(\"Copying {} to {}.\".format(source, destination))\n shutil.copy(source, destination)\n\n\nclass BinaryDistribution(Distribution):\n def has_ext_modules(self):\n return True\n\n\nsetup(name=\"ray\",\n version=\"0.2.1\",\n packages=find_packages(),\n cmdclass={\"build_ext\": build_ext},\n # The BinaryDistribution argument triggers build_ext.\n distclass=BinaryDistribution,\n install_requires=[\"numpy\",\n \"funcsigs\",\n \"click\",\n \"colorama\",\n \"psutil\",\n \"pytest\",\n \"redis\",\n \"cloudpickle >= 0.2.2\",\n # The six module is required by pyarrow.\n \"six >= 1.0.0\",\n \"flatbuffers\"],\n setup_requires=[\"cython >= 0.23\"],\n entry_points={\"console_scripts\": [\"ray=ray.scripts.scripts:main\"]},\n include_package_data=True,\n zip_safe=False,\n license=\"Apache 2.0\")\n", "path": "python/setup.py"}]}
2,518
366
gh_patches_debug_15050
rasdani/github-patches
git_diff
ansible__awx-8136
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Support `!include` and `!import` constructors in `awx import -f yaml` command <!-- Issues are for **concrete, actionable bugs and feature requests** only - if you're just asking for debugging help or technical support, please use: - http://webchat.freenode.net/?channels=ansible-awx - https://groups.google.com/forum/#!forum/awx-project We have to limit this because of limited volunteer time to respond to issues! --> ##### ISSUE TYPE - Feature Idea ##### SUMMARY <!-- Briefly describe the problem or desired enhancement. --> Support `!import` and `!include` with `awx import -f yaml` command. This is useful when working with a large number of AWX resources. This allows breaking config into smaller files, and having a top-level file like this: ```yaml # resources.yaml projects: - !include projects/project1.yaml - !include projects/project2.yaml job_templates: - !include jobs/job1.yaml # .... ``` Which allows managing resources in individual files (e.g. using a friendly directory structure), and still being able to import with `awx import -f yaml < resources.yaml`. #### SUGGESTED IMPLEMENTATION Use `awxkit.yaml_file.Loader` instead of `yaml.safe_load` in https://github.com/ansible/awx/blob/devel/awxkit/awxkit/cli/resource.py#L143 </issue> <code> [start of awxkit/awxkit/cli/resource.py] 1 import yaml 2 import json 3 import os 4 5 from awxkit import api, config 6 from awxkit.exceptions import ImportExportError 7 from awxkit.utils import to_str 8 from awxkit.api.pages import Page 9 from awxkit.api.pages.api import EXPORTABLE_RESOURCES 10 from awxkit.cli.format import FORMATTERS, format_response, add_authentication_arguments 11 from awxkit.cli.utils import CustomRegistryMeta, cprint 12 13 14 CONTROL_RESOURCES = ['ping', 'config', 'me', 'metrics'] 15 16 DEPRECATED_RESOURCES = { 17 'ad_hoc_commands': 'ad_hoc', 18 'applications': 'application', 19 'credentials': 'credential', 20 'credential_types': 'credential_type', 21 'groups': 'group', 22 'hosts': 'host', 23 'instances': 'instance', 24 'instance_groups': 'instance_group', 25 'inventory': 'inventories', 26 'inventory_scripts': 'inventory_script', 27 'inventory_sources': 'inventory_source', 28 'inventory_updates': 'inventory_update', 29 'jobs': 'job', 30 'job_templates': 'job_template', 31 'labels': 'label', 32 'workflow_job_template_nodes': 'node', 33 'notification_templates': 'notification_template', 34 'organizations': 'organization', 35 'projects': 'project', 36 'project_updates': 'project_update', 37 'roles': 'role', 38 'schedules': 'schedule', 39 'settings': 'setting', 40 'teams': 'team', 41 'workflow_job_templates': 'workflow', 42 'workflow_jobs': 'workflow_job', 43 'users': 'user' 44 } 45 DEPRECATED_RESOURCES_REVERSE = dict( 46 (v, k) for k, v in DEPRECATED_RESOURCES.items() 47 ) 48 49 50 class CustomCommand(metaclass=CustomRegistryMeta): 51 """Base class for implementing custom commands. 52 53 Custom commands represent static code which should run - they are 54 responsible for returning and formatting their own output (which may or may 55 not be JSON/YAML). 56 """ 57 58 help_text = '' 59 60 @property 61 def name(self): 62 raise NotImplementedError() 63 64 def handle(self, client, parser): 65 """To be implemented by subclasses. 66 Should return a dictionary that is JSON serializable 67 """ 68 raise NotImplementedError() 69 70 71 class Login(CustomCommand): 72 name = 'login' 73 help_text = 'authenticate and retrieve an OAuth2 token' 74 75 def print_help(self, parser): 76 add_authentication_arguments(parser, os.environ) 77 parser.print_help() 78 79 def handle(self, client, parser): 80 auth = parser.add_argument_group('OAuth2.0 Options') 81 auth.add_argument('--description', help='description of the generated OAuth2.0 token', metavar='TEXT') 82 auth.add_argument('--conf.client_id', metavar='TEXT') 83 auth.add_argument('--conf.client_secret', metavar='TEXT') 84 auth.add_argument( 85 '--conf.scope', choices=['read', 'write'], default='write' 86 ) 87 if client.help: 88 self.print_help(parser) 89 raise SystemExit() 90 parsed = parser.parse_known_args()[0] 91 kwargs = { 92 'client_id': getattr(parsed, 'conf.client_id', None), 93 'client_secret': getattr(parsed, 'conf.client_secret', None), 94 'scope': getattr(parsed, 'conf.scope', None), 95 } 96 if getattr(parsed, 'description', None): 97 kwargs['description'] = parsed.description 98 try: 99 token = api.Api().get_oauth2_token(**kwargs) 100 except Exception as e: 101 self.print_help(parser) 102 cprint( 103 'Error retrieving an OAuth2.0 token ({}).'.format(e.__class__), 104 'red' 105 ) 106 else: 107 fmt = client.get_config('format') 108 if fmt == 'human': 109 print('export TOWER_OAUTH_TOKEN={}'.format(token)) 110 else: 111 print(to_str(FORMATTERS[fmt]({'token': token}, '.')).strip()) 112 113 114 class Config(CustomCommand): 115 name = 'config' 116 help_text = 'print current configuration values' 117 118 def handle(self, client, parser): 119 if client.help: 120 parser.print_help() 121 raise SystemExit() 122 return { 123 'base_url': config.base_url, 124 'token': client.get_config('token'), 125 'use_sessions': config.use_sessions, 126 'credentials': config.credentials, 127 } 128 129 130 class Import(CustomCommand): 131 name = 'import' 132 help_text = 'import resources into Tower' 133 134 def handle(self, client, parser): 135 if client.help: 136 parser.print_help() 137 raise SystemExit() 138 139 fmt = client.get_config('format') 140 if fmt == 'json': 141 data = json.load(client.stdin) 142 elif fmt == 'yaml': 143 data = yaml.safe_load(client.stdin) 144 else: 145 raise ImportExportError("Unsupported format for Import: " + fmt) 146 147 client.authenticate() 148 client.v2.import_assets(data) 149 150 return {} 151 152 153 class Export(CustomCommand): 154 name = 'export' 155 help_text = 'export resources from Tower' 156 157 def extend_parser(self, parser): 158 resources = parser.add_argument_group('resources') 159 160 for resource in EXPORTABLE_RESOURCES: 161 # This parsing pattern will result in 3 different possible outcomes: 162 # 1) the resource flag is not used at all, which will result in the attr being None 163 # 2) the resource flag is used with no argument, which will result in the attr being '' 164 # 3) the resource flag is used with an argument, and the attr will be that argument's value 165 resources.add_argument('--{}'.format(resource), nargs='?', const='') 166 167 def handle(self, client, parser): 168 self.extend_parser(parser) 169 170 if client.help: 171 parser.print_help() 172 raise SystemExit() 173 174 parsed = parser.parse_known_args()[0] 175 kwargs = {resource: getattr(parsed, resource, None) for resource in EXPORTABLE_RESOURCES} 176 177 client.authenticate() 178 return client.v2.export_assets(**kwargs) 179 180 181 def parse_resource(client, skip_deprecated=False): 182 subparsers = client.parser.add_subparsers( 183 dest='resource', 184 metavar='resource', 185 ) 186 187 # check if the user is running a custom command 188 for command in CustomCommand.__subclasses__(): 189 client.subparsers[command.name] = subparsers.add_parser( 190 command.name, help=command.help_text 191 ) 192 193 if hasattr(client, 'v2'): 194 for k in client.v2.json.keys(): 195 if k in ('dashboard',): 196 # the Dashboard API is deprecated and not supported 197 continue 198 199 # argparse aliases are *only* supported in Python3 (not 2.7) 200 kwargs = {} 201 if not skip_deprecated: 202 if k in DEPRECATED_RESOURCES: 203 kwargs['aliases'] = [DEPRECATED_RESOURCES[k]] 204 205 client.subparsers[k] = subparsers.add_parser( 206 k, help='', **kwargs 207 ) 208 209 resource = client.parser.parse_known_args()[0].resource 210 if resource in DEPRECATED_RESOURCES.values(): 211 client.argv[ 212 client.argv.index(resource) 213 ] = DEPRECATED_RESOURCES_REVERSE[resource] 214 resource = DEPRECATED_RESOURCES_REVERSE[resource] 215 216 if resource in CustomCommand.registry: 217 parser = client.subparsers[resource] 218 command = CustomCommand.registry[resource]() 219 response = command.handle(client, parser) 220 if response: 221 _filter = client.get_config('filter') 222 if ( 223 resource == 'config' and 224 client.get_config('format') == 'human' 225 ): 226 response = { 227 'count': len(response), 228 'results': [ 229 {'key': k, 'value': v} 230 for k, v in response.items() 231 ] 232 } 233 _filter = 'key, value' 234 try: 235 connection = client.root.connection 236 except AttributeError: 237 connection = None 238 formatted = format_response( 239 Page.from_json(response, connection=connection), 240 fmt=client.get_config('format'), 241 filter=_filter 242 ) 243 print(formatted) 244 raise SystemExit() 245 else: 246 return resource 247 248 249 def is_control_resource(resource): 250 # special root level resources that don't don't represent database 251 # entities that follow the list/detail semantic 252 return resource in CONTROL_RESOURCES 253 [end of awxkit/awxkit/cli/resource.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/awxkit/awxkit/cli/resource.py b/awxkit/awxkit/cli/resource.py --- a/awxkit/awxkit/cli/resource.py +++ b/awxkit/awxkit/cli/resource.py @@ -2,7 +2,7 @@ import json import os -from awxkit import api, config +from awxkit import api, config, yaml_file from awxkit.exceptions import ImportExportError from awxkit.utils import to_str from awxkit.api.pages import Page @@ -140,7 +140,7 @@ if fmt == 'json': data = json.load(client.stdin) elif fmt == 'yaml': - data = yaml.safe_load(client.stdin) + data = yaml.load(client.stdin, Loader=yaml_file.Loader) else: raise ImportExportError("Unsupported format for Import: " + fmt)
{"golden_diff": "diff --git a/awxkit/awxkit/cli/resource.py b/awxkit/awxkit/cli/resource.py\n--- a/awxkit/awxkit/cli/resource.py\n+++ b/awxkit/awxkit/cli/resource.py\n@@ -2,7 +2,7 @@\n import json\n import os\n \n-from awxkit import api, config\n+from awxkit import api, config, yaml_file\n from awxkit.exceptions import ImportExportError\n from awxkit.utils import to_str\n from awxkit.api.pages import Page\n@@ -140,7 +140,7 @@\n if fmt == 'json':\n data = json.load(client.stdin)\n elif fmt == 'yaml':\n- data = yaml.safe_load(client.stdin)\n+ data = yaml.load(client.stdin, Loader=yaml_file.Loader)\n else:\n raise ImportExportError(\"Unsupported format for Import: \" + fmt)\n", "issue": "Support `!include` and `!import` constructors in `awx import -f yaml` command\n<!-- Issues are for **concrete, actionable bugs and feature requests** only - if you're just asking for debugging help or technical support, please use:\r\n\r\n- http://webchat.freenode.net/?channels=ansible-awx\r\n- https://groups.google.com/forum/#!forum/awx-project\r\n\r\nWe have to limit this because of limited volunteer time to respond to issues! -->\r\n\r\n##### ISSUE TYPE\r\n - Feature Idea\r\n\r\n##### SUMMARY\r\n<!-- Briefly describe the problem or desired enhancement. -->\r\n\r\nSupport `!import` and `!include` with `awx import -f yaml` command. This is useful when working with a large number of AWX resources. This allows breaking config into smaller files, and having a top-level file like this:\r\n\r\n```yaml\r\n# resources.yaml\r\nprojects:\r\n- !include projects/project1.yaml\r\n- !include projects/project2.yaml\r\njob_templates:\r\n- !include jobs/job1.yaml\r\n\r\n# ....\r\n```\r\n\r\nWhich allows managing resources in individual files (e.g. using a friendly directory structure), and still being able to import with `awx import -f yaml < resources.yaml`.\r\n\r\n#### SUGGESTED IMPLEMENTATION\r\n\r\nUse `awxkit.yaml_file.Loader` instead of `yaml.safe_load` in https://github.com/ansible/awx/blob/devel/awxkit/awxkit/cli/resource.py#L143\r\n\r\n\n", "before_files": [{"content": "import yaml\nimport json\nimport os\n\nfrom awxkit import api, config\nfrom awxkit.exceptions import ImportExportError\nfrom awxkit.utils import to_str\nfrom awxkit.api.pages import Page\nfrom awxkit.api.pages.api import EXPORTABLE_RESOURCES\nfrom awxkit.cli.format import FORMATTERS, format_response, add_authentication_arguments\nfrom awxkit.cli.utils import CustomRegistryMeta, cprint\n\n\nCONTROL_RESOURCES = ['ping', 'config', 'me', 'metrics']\n\nDEPRECATED_RESOURCES = {\n 'ad_hoc_commands': 'ad_hoc',\n 'applications': 'application',\n 'credentials': 'credential',\n 'credential_types': 'credential_type',\n 'groups': 'group',\n 'hosts': 'host',\n 'instances': 'instance',\n 'instance_groups': 'instance_group',\n 'inventory': 'inventories',\n 'inventory_scripts': 'inventory_script',\n 'inventory_sources': 'inventory_source',\n 'inventory_updates': 'inventory_update',\n 'jobs': 'job',\n 'job_templates': 'job_template',\n 'labels': 'label',\n 'workflow_job_template_nodes': 'node',\n 'notification_templates': 'notification_template',\n 'organizations': 'organization',\n 'projects': 'project',\n 'project_updates': 'project_update',\n 'roles': 'role',\n 'schedules': 'schedule',\n 'settings': 'setting',\n 'teams': 'team',\n 'workflow_job_templates': 'workflow',\n 'workflow_jobs': 'workflow_job',\n 'users': 'user'\n}\nDEPRECATED_RESOURCES_REVERSE = dict(\n (v, k) for k, v in DEPRECATED_RESOURCES.items()\n)\n\n\nclass CustomCommand(metaclass=CustomRegistryMeta):\n \"\"\"Base class for implementing custom commands.\n\n Custom commands represent static code which should run - they are\n responsible for returning and formatting their own output (which may or may\n not be JSON/YAML).\n \"\"\"\n\n help_text = ''\n\n @property\n def name(self):\n raise NotImplementedError()\n\n def handle(self, client, parser):\n \"\"\"To be implemented by subclasses.\n Should return a dictionary that is JSON serializable\n \"\"\"\n raise NotImplementedError()\n\n\nclass Login(CustomCommand):\n name = 'login'\n help_text = 'authenticate and retrieve an OAuth2 token'\n\n def print_help(self, parser):\n add_authentication_arguments(parser, os.environ)\n parser.print_help()\n\n def handle(self, client, parser):\n auth = parser.add_argument_group('OAuth2.0 Options')\n auth.add_argument('--description', help='description of the generated OAuth2.0 token', metavar='TEXT')\n auth.add_argument('--conf.client_id', metavar='TEXT')\n auth.add_argument('--conf.client_secret', metavar='TEXT')\n auth.add_argument(\n '--conf.scope', choices=['read', 'write'], default='write'\n )\n if client.help:\n self.print_help(parser)\n raise SystemExit()\n parsed = parser.parse_known_args()[0]\n kwargs = {\n 'client_id': getattr(parsed, 'conf.client_id', None),\n 'client_secret': getattr(parsed, 'conf.client_secret', None),\n 'scope': getattr(parsed, 'conf.scope', None),\n }\n if getattr(parsed, 'description', None):\n kwargs['description'] = parsed.description\n try:\n token = api.Api().get_oauth2_token(**kwargs)\n except Exception as e:\n self.print_help(parser)\n cprint(\n 'Error retrieving an OAuth2.0 token ({}).'.format(e.__class__),\n 'red'\n )\n else:\n fmt = client.get_config('format')\n if fmt == 'human':\n print('export TOWER_OAUTH_TOKEN={}'.format(token))\n else:\n print(to_str(FORMATTERS[fmt]({'token': token}, '.')).strip())\n\n\nclass Config(CustomCommand):\n name = 'config'\n help_text = 'print current configuration values'\n\n def handle(self, client, parser):\n if client.help:\n parser.print_help()\n raise SystemExit()\n return {\n 'base_url': config.base_url,\n 'token': client.get_config('token'),\n 'use_sessions': config.use_sessions,\n 'credentials': config.credentials,\n }\n\n\nclass Import(CustomCommand):\n name = 'import'\n help_text = 'import resources into Tower'\n\n def handle(self, client, parser):\n if client.help:\n parser.print_help()\n raise SystemExit()\n\n fmt = client.get_config('format')\n if fmt == 'json':\n data = json.load(client.stdin)\n elif fmt == 'yaml':\n data = yaml.safe_load(client.stdin)\n else:\n raise ImportExportError(\"Unsupported format for Import: \" + fmt)\n\n client.authenticate()\n client.v2.import_assets(data)\n\n return {}\n\n\nclass Export(CustomCommand):\n name = 'export'\n help_text = 'export resources from Tower'\n\n def extend_parser(self, parser):\n resources = parser.add_argument_group('resources')\n\n for resource in EXPORTABLE_RESOURCES:\n # This parsing pattern will result in 3 different possible outcomes:\n # 1) the resource flag is not used at all, which will result in the attr being None\n # 2) the resource flag is used with no argument, which will result in the attr being ''\n # 3) the resource flag is used with an argument, and the attr will be that argument's value\n resources.add_argument('--{}'.format(resource), nargs='?', const='')\n\n def handle(self, client, parser):\n self.extend_parser(parser)\n\n if client.help:\n parser.print_help()\n raise SystemExit()\n\n parsed = parser.parse_known_args()[0]\n kwargs = {resource: getattr(parsed, resource, None) for resource in EXPORTABLE_RESOURCES}\n\n client.authenticate()\n return client.v2.export_assets(**kwargs)\n\n\ndef parse_resource(client, skip_deprecated=False):\n subparsers = client.parser.add_subparsers(\n dest='resource',\n metavar='resource',\n )\n\n # check if the user is running a custom command\n for command in CustomCommand.__subclasses__():\n client.subparsers[command.name] = subparsers.add_parser(\n command.name, help=command.help_text\n )\n\n if hasattr(client, 'v2'):\n for k in client.v2.json.keys():\n if k in ('dashboard',):\n # the Dashboard API is deprecated and not supported\n continue\n\n # argparse aliases are *only* supported in Python3 (not 2.7)\n kwargs = {}\n if not skip_deprecated:\n if k in DEPRECATED_RESOURCES:\n kwargs['aliases'] = [DEPRECATED_RESOURCES[k]]\n\n client.subparsers[k] = subparsers.add_parser(\n k, help='', **kwargs\n )\n\n resource = client.parser.parse_known_args()[0].resource\n if resource in DEPRECATED_RESOURCES.values():\n client.argv[\n client.argv.index(resource)\n ] = DEPRECATED_RESOURCES_REVERSE[resource]\n resource = DEPRECATED_RESOURCES_REVERSE[resource]\n\n if resource in CustomCommand.registry:\n parser = client.subparsers[resource]\n command = CustomCommand.registry[resource]()\n response = command.handle(client, parser)\n if response:\n _filter = client.get_config('filter')\n if (\n resource == 'config' and\n client.get_config('format') == 'human'\n ):\n response = {\n 'count': len(response),\n 'results': [\n {'key': k, 'value': v}\n for k, v in response.items()\n ]\n }\n _filter = 'key, value'\n try:\n connection = client.root.connection\n except AttributeError:\n connection = None\n formatted = format_response(\n Page.from_json(response, connection=connection),\n fmt=client.get_config('format'),\n filter=_filter\n )\n print(formatted)\n raise SystemExit()\n else:\n return resource\n\n\ndef is_control_resource(resource):\n # special root level resources that don't don't represent database\n # entities that follow the list/detail semantic\n return resource in CONTROL_RESOURCES\n", "path": "awxkit/awxkit/cli/resource.py"}]}
3,259
201
gh_patches_debug_19284
rasdani/github-patches
git_diff
openai__evals-1395
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `Failed to open: ../registry/data/social_iqa/few_shot.jsonl` with custom registry ### Describe the bug I have created a new eval within a custom registry and installed evals from pypi (using a recent checkout from github). Calling `oaievals` works fine if I do not use few_show samples, but it does not if you include few shot examples. As can be seen in the error thread below, the issue is caused by calling `self._prefix_registry_path(self.few_shot_jsonl)` (see [_prefix_registry_path](https://github.com/openai/evals/blob/4b7a66bd45f06156656e021e170e7574f6cde3f5/evals/eval.py#L159C6-L159C6)) which transforms the path string into a `Path` object. Then, `open_by_file_pattern` ([here](https://github.com/openai/evals/blob/4b7a66bd45f06156656e021e170e7574f6cde3f5/evals/data.py#L47)) still treats it as a string, unaware of the fact that it is actually a `Path` ### To Reproduce 1. Create a new eval with few-shot samples. 2. Call `oaieval`, for instance: ``` oaieval text-ada-001 social_iqa_few_shot --registry_path=../registry ``` 3. You'll get an error thread as follows: ``` [2023-11-03 14:40:52,931] [registry.py:254] Loading registry from /home/lorenzo/venv/recog-LLM_capabilities/lib/python3.9/site-packages/evals/registry/evals [2023-11-03 14:40:54,382] [registry.py:254] Loading registry from /home/lorenzo/.evals/evals [2023-11-03 14:40:54,382] [registry.py:254] Loading registry from ../registry/evals [2023-11-03 14:40:55,007] [oaieval.py:189] Run started: 2311031440552KUYNQGH [2023-11-03 14:40:55,010] [data.py:90] Fetching ../registry/data/social_iqa/few_shot.jsonl Traceback (most recent call last): File "/home/lorenzo/venv/recog-LLM_capabilities/lib/python3.9/site-packages/evals/data.py", line 54, in open_by_file_pattern if filename.endswith(".gz"): AttributeError: 'PosixPath' object has no attribute 'endswith' The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/home/lorenzo/venv/recog-LLM_capabilities/bin/oaieval", line 8, in <module> sys.exit(main()) File "/home/lorenzo/venv/recog-LLM_capabilities/lib/python3.9/site-packages/evals/cli/oaieval.py", line 274, in main run(args) File "/home/lorenzo/venv/recog-LLM_capabilities/lib/python3.9/site-packages/evals/cli/oaieval.py", line 215, in run eval: Eval = eval_class( File "/home/lorenzo/venv/recog-LLM_capabilities/lib/python3.9/site-packages/evals/elsuite/basic/match.py", line 28, in __init__ self.few_shot = evals.get_jsonl(self._prefix_registry_path(self.few_shot_jsonl)) File "/home/lorenzo/venv/recog-LLM_capabilities/lib/python3.9/site-packages/evals/data.py", line 129, in get_jsonl return _get_jsonl_file(path) File "/home/lorenzo/venv/recog-LLM_capabilities/lib/python3.9/site-packages/evals/data.py", line 91, in _get_jsonl_file with open_by_file_pattern(path, mode="r") as f: File "/home/lorenzo/venv/recog-LLM_capabilities/lib/python3.9/site-packages/evals/data.py", line 75, in open_by_file_pattern raise RuntimeError(f"Failed to open: {filename}") from e RuntimeError: Failed to open: ../registry/data/social_iqa/few_shot.jsonl ``` ### Code snippets _No response_ ### OS Ubuntu 20.04 ### Python version python 3.9 ### Library version git+https://github.com/openai/evals.git@dd96814dd96bd64f3098afca8dc873aa8d8ce4c8 </issue> <code> [start of evals/data.py] 1 """ 2 This file defines utilities for working with data and files of various types. 3 """ 4 import csv 5 import dataclasses 6 import gzip 7 import itertools 8 import json 9 import logging 10 import os 11 import urllib 12 from collections.abc import Iterator 13 from functools import partial 14 from pathlib import Path 15 from typing import Any, List, Optional, Sequence, Text, Union 16 17 import blobfile as bf 18 import lz4.frame 19 import pydantic 20 import pyzstd 21 22 logger = logging.getLogger(__name__) 23 24 25 def gzip_open(filename: str, mode: str = "rb", openhook: Any = open) -> gzip.GzipFile: 26 """Wrap the given openhook in gzip.""" 27 if mode and "b" not in mode: 28 mode += "b" 29 30 return gzip.GzipFile(fileobj=openhook(filename, mode), mode=mode) 31 32 33 def lz4_open(filename: str, mode: str = "rb", openhook: Any = open) -> lz4.frame.LZ4FrameFile: 34 if mode and "b" not in mode: 35 mode += "b" 36 37 return lz4.frame.LZ4FrameFile(openhook(filename, mode), mode=mode) 38 39 40 def zstd_open(filename: str, mode: str = "rb", openhook: Any = open) -> pyzstd.ZstdFile: 41 if mode and "b" not in mode: 42 mode += "b" 43 44 return pyzstd.ZstdFile(openhook(filename, mode), mode=mode) 45 46 47 def open_by_file_pattern(filename: str, mode: str = "r", **kwargs: Any) -> Any: 48 """Can read/write to files on gcs/local with or without gzipping. If file 49 is stored on gcs, streams with blobfile. Otherwise use vanilla python open. If 50 filename endswith gz, then zip/unzip contents on the fly (note that gcs paths and 51 gzip are compatible)""" 52 open_fn = partial(bf.BlobFile, **kwargs) 53 try: 54 if filename.endswith(".gz"): 55 return gzip_open(filename, openhook=open_fn, mode=mode) 56 elif filename.endswith(".lz4"): 57 return lz4_open(filename, openhook=open_fn, mode=mode) 58 elif filename.endswith(".zst"): 59 return zstd_open(filename, openhook=open_fn, mode=mode) 60 else: 61 scheme = urllib.parse.urlparse(filename).scheme 62 if (not os.path.exists(filename)) and (scheme == "" or scheme == "file"): 63 return open_fn( 64 os.path.join( 65 os.path.dirname(os.path.abspath(__file__)), 66 "registry", 67 "data", 68 filename, 69 ), 70 mode=mode, 71 ) 72 else: 73 return open_fn(filename, mode=mode) 74 except Exception as e: 75 raise RuntimeError(f"Failed to open: {filename}") from e 76 77 78 def _decode_json(line, path, line_number): 79 try: 80 return json.loads(line) 81 except json.JSONDecodeError as e: 82 custom_error_message = ( 83 f"Error parsing JSON on line {line_number}: {e.msg} at {path}:{line_number}:{e.colno}" 84 ) 85 logger.error(custom_error_message) 86 raise ValueError(custom_error_message) from None 87 88 89 def _get_jsonl_file(path): 90 logger.info(f"Fetching {path}") 91 with open_by_file_pattern(path, mode="r") as f: 92 return [_decode_json(line, path, i + 1) for i, line in enumerate(f)] 93 94 95 def _get_json_file(path): 96 logger.info(f"Fetching {path}") 97 with open_by_file_pattern(path, mode="r") as f: 98 return json.loads(f.read()) 99 100 101 def _stream_jsonl_file(path) -> Iterator: 102 logger.info(f"Streaming {path}") 103 with bf.BlobFile(path, "r", streaming=True) as f: 104 for line in f: 105 yield json.loads(line) 106 107 108 def get_lines(path) -> list[dict]: 109 """ 110 Get a list of lines from a file. 111 """ 112 with open_by_file_pattern(path, mode="r") as f: 113 return f.readlines() 114 115 116 def get_jsonl(path: str) -> list[dict]: 117 """ 118 Extract json lines from the given path. 119 If the path is a directory, look in subpaths recursively. 120 121 Return all lines from all jsonl files as a single list. 122 """ 123 if bf.isdir(path): 124 result = [] 125 for filename in bf.listdir(path): 126 if filename.endswith(".jsonl"): 127 result += get_jsonl(os.path.join(path, filename)) 128 return result 129 return _get_jsonl_file(path) 130 131 132 def get_jsonls(paths: Sequence[str], line_limit=None) -> list[dict]: 133 return list(iter_jsonls(paths, line_limit)) 134 135 136 def get_json(path) -> dict: 137 if bf.isdir(path): 138 raise ValueError("Path is a directory, only files are supported") 139 return _get_json_file(path) 140 141 142 def iter_jsonls(paths: Union[str, list[str]], line_limit=None) -> Iterator[dict]: 143 """ 144 For each path in the input, iterate over the jsonl files in that path. 145 Look in subdirectories recursively. 146 147 Use an iterator to conserve memory. 148 """ 149 if type(paths) == str: 150 paths = [paths] 151 152 def _iter(): 153 for path in paths: 154 if bf.isdir(path): 155 for filename in bf.listdir(path): 156 if filename.endswith(".jsonl"): 157 yield from iter_jsonls([os.path.join(path, filename)]) 158 else: 159 yield from _stream_jsonl_file(path) 160 161 return itertools.islice(_iter(), line_limit) 162 163 164 def get_csv(path, fieldnames=None): 165 with bf.BlobFile(path, "r", cache_dir="/tmp/bf_cache", streaming=False) as f: 166 reader = csv.DictReader(f, fieldnames=fieldnames) 167 return [row for row in reader] 168 169 170 def _to_py_types(o: Any, exclude_keys: List[Text]) -> Any: 171 if isinstance(o, dict): 172 return { 173 k: _to_py_types(v, exclude_keys=exclude_keys) 174 for k, v in o.items() 175 if k not in exclude_keys 176 } 177 178 if isinstance(o, list): 179 return [_to_py_types(v, exclude_keys=exclude_keys) for v in o] 180 181 if isinstance(o, Path): 182 return o.as_posix() 183 184 if dataclasses.is_dataclass(o): 185 return _to_py_types(dataclasses.asdict(o), exclude_keys=exclude_keys) 186 187 # pydantic data classes 188 if isinstance(o, pydantic.BaseModel): 189 return { 190 k: _to_py_types(v, exclude_keys=exclude_keys) 191 for k, v in json.loads(o.json()).items() 192 if k not in exclude_keys 193 } 194 195 return o 196 197 198 class EnhancedJSONEncoder(json.JSONEncoder): 199 def __init__(self, exclude_keys: Optional[List[Text]] = None, **kwargs: Any): 200 super().__init__(**kwargs) 201 self.exclude_keys = exclude_keys if exclude_keys else [] 202 203 def default(self, o: Any) -> str: 204 return _to_py_types(o, self.exclude_keys) 205 206 207 def jsondumps(o: Any, ensure_ascii: bool = False, **kwargs: Any) -> str: 208 # The JSONEncoder class's .default method is only applied to dictionary values, 209 # not keys. In order to exclude keys from the output of this jsondumps method 210 # we need to exclude them outside the encoder. 211 if isinstance(o, dict) and "exclude_keys" in kwargs: 212 for key in kwargs["exclude_keys"]: 213 del o[key] 214 return json.dumps(o, cls=EnhancedJSONEncoder, ensure_ascii=ensure_ascii, **kwargs) 215 216 217 def jsondump(o: Any, fp: Any, ensure_ascii: bool = False, **kwargs: Any) -> None: 218 json.dump(o, fp, cls=EnhancedJSONEncoder, ensure_ascii=ensure_ascii, **kwargs) 219 220 221 def jsonloads(s: str, **kwargs: Any) -> Any: 222 return json.loads(s, **kwargs) 223 224 225 def jsonload(fp: Any, **kwargs: Any) -> Any: 226 return json.load(fp, **kwargs) 227 [end of evals/data.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/evals/data.py b/evals/data.py --- a/evals/data.py +++ b/evals/data.py @@ -43,13 +43,16 @@ return pyzstd.ZstdFile(openhook(filename, mode), mode=mode) - -def open_by_file_pattern(filename: str, mode: str = "r", **kwargs: Any) -> Any: +def open_by_file_pattern(filename: Union[str, Path], mode: str = "r", **kwargs: Any) -> Any: """Can read/write to files on gcs/local with or without gzipping. If file is stored on gcs, streams with blobfile. Otherwise use vanilla python open. If filename endswith gz, then zip/unzip contents on the fly (note that gcs paths and gzip are compatible)""" open_fn = partial(bf.BlobFile, **kwargs) + + if isinstance(filename, Path): + filename = filename.as_posix() + try: if filename.endswith(".gz"): return gzip_open(filename, openhook=open_fn, mode=mode)
{"golden_diff": "diff --git a/evals/data.py b/evals/data.py\n--- a/evals/data.py\n+++ b/evals/data.py\n@@ -43,13 +43,16 @@\n \n return pyzstd.ZstdFile(openhook(filename, mode), mode=mode)\n \n-\n-def open_by_file_pattern(filename: str, mode: str = \"r\", **kwargs: Any) -> Any:\n+def open_by_file_pattern(filename: Union[str, Path], mode: str = \"r\", **kwargs: Any) -> Any:\n \"\"\"Can read/write to files on gcs/local with or without gzipping. If file\n is stored on gcs, streams with blobfile. Otherwise use vanilla python open. If\n filename endswith gz, then zip/unzip contents on the fly (note that gcs paths and\n gzip are compatible)\"\"\"\n open_fn = partial(bf.BlobFile, **kwargs)\n+\n+ if isinstance(filename, Path):\n+ filename = filename.as_posix()\n+\n try:\n if filename.endswith(\".gz\"):\n return gzip_open(filename, openhook=open_fn, mode=mode)\n", "issue": "`Failed to open: ../registry/data/social_iqa/few_shot.jsonl` with custom registry\n### Describe the bug\n\nI have created a new eval within a custom registry and installed evals from pypi (using a recent checkout from github). Calling `oaievals` works fine if I do not use few_show samples, but it does not if you include few shot examples. \r\n\r\nAs can be seen in the error thread below, the issue is caused by calling `self._prefix_registry_path(self.few_shot_jsonl)` (see [_prefix_registry_path](https://github.com/openai/evals/blob/4b7a66bd45f06156656e021e170e7574f6cde3f5/evals/eval.py#L159C6-L159C6)) which transforms the path string into a `Path` object. Then, `open_by_file_pattern` ([here](https://github.com/openai/evals/blob/4b7a66bd45f06156656e021e170e7574f6cde3f5/evals/data.py#L47)) still treats it as a string, unaware of the fact that it is actually a `Path`\n\n### To Reproduce\n\n1. Create a new eval with few-shot samples. \r\n2. Call `oaieval`, for instance: \r\n```\r\noaieval text-ada-001 social_iqa_few_shot --registry_path=../registry\r\n```\r\n3. You'll get an error thread as follows: \r\n```\r\n[2023-11-03 14:40:52,931] [registry.py:254] Loading registry from /home/lorenzo/venv/recog-LLM_capabilities/lib/python3.9/site-packages/evals/registry/evals\r\n[2023-11-03 14:40:54,382] [registry.py:254] Loading registry from /home/lorenzo/.evals/evals\r\n[2023-11-03 14:40:54,382] [registry.py:254] Loading registry from ../registry/evals\r\n[2023-11-03 14:40:55,007] [oaieval.py:189] \u001b[1;35mRun started: 2311031440552KUYNQGH\u001b[0m\r\n[2023-11-03 14:40:55,010] [data.py:90] Fetching ../registry/data/social_iqa/few_shot.jsonl\r\nTraceback (most recent call last):\r\n File \"/home/lorenzo/venv/recog-LLM_capabilities/lib/python3.9/site-packages/evals/data.py\", line 54, in open_by_file_pattern\r\n if filename.endswith(\".gz\"):\r\nAttributeError: 'PosixPath' object has no attribute 'endswith'\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File \"/home/lorenzo/venv/recog-LLM_capabilities/bin/oaieval\", line 8, in <module>\r\n sys.exit(main())\r\n File \"/home/lorenzo/venv/recog-LLM_capabilities/lib/python3.9/site-packages/evals/cli/oaieval.py\", line 274, in main\r\n run(args)\r\n File \"/home/lorenzo/venv/recog-LLM_capabilities/lib/python3.9/site-packages/evals/cli/oaieval.py\", line 215, in run\r\n eval: Eval = eval_class(\r\n File \"/home/lorenzo/venv/recog-LLM_capabilities/lib/python3.9/site-packages/evals/elsuite/basic/match.py\", line 28, in __init__\r\n self.few_shot = evals.get_jsonl(self._prefix_registry_path(self.few_shot_jsonl))\r\n File \"/home/lorenzo/venv/recog-LLM_capabilities/lib/python3.9/site-packages/evals/data.py\", line 129, in get_jsonl\r\n return _get_jsonl_file(path)\r\n File \"/home/lorenzo/venv/recog-LLM_capabilities/lib/python3.9/site-packages/evals/data.py\", line 91, in _get_jsonl_file\r\n with open_by_file_pattern(path, mode=\"r\") as f:\r\n File \"/home/lorenzo/venv/recog-LLM_capabilities/lib/python3.9/site-packages/evals/data.py\", line 75, in open_by_file_pattern\r\n raise RuntimeError(f\"Failed to open: {filename}\") from e\r\nRuntimeError: Failed to open: ../registry/data/social_iqa/few_shot.jsonl\r\n```\r\n\r\n\n\n### Code snippets\n\n_No response_\n\n### OS\n\nUbuntu 20.04\n\n### Python version\n\npython 3.9\n\n### Library version\n\ngit+https://github.com/openai/evals.git@dd96814dd96bd64f3098afca8dc873aa8d8ce4c8\n", "before_files": [{"content": "\"\"\"\nThis file defines utilities for working with data and files of various types.\n\"\"\"\nimport csv\nimport dataclasses\nimport gzip\nimport itertools\nimport json\nimport logging\nimport os\nimport urllib\nfrom collections.abc import Iterator\nfrom functools import partial\nfrom pathlib import Path\nfrom typing import Any, List, Optional, Sequence, Text, Union\n\nimport blobfile as bf\nimport lz4.frame\nimport pydantic\nimport pyzstd\n\nlogger = logging.getLogger(__name__)\n\n\ndef gzip_open(filename: str, mode: str = \"rb\", openhook: Any = open) -> gzip.GzipFile:\n \"\"\"Wrap the given openhook in gzip.\"\"\"\n if mode and \"b\" not in mode:\n mode += \"b\"\n\n return gzip.GzipFile(fileobj=openhook(filename, mode), mode=mode)\n\n\ndef lz4_open(filename: str, mode: str = \"rb\", openhook: Any = open) -> lz4.frame.LZ4FrameFile:\n if mode and \"b\" not in mode:\n mode += \"b\"\n\n return lz4.frame.LZ4FrameFile(openhook(filename, mode), mode=mode)\n\n\ndef zstd_open(filename: str, mode: str = \"rb\", openhook: Any = open) -> pyzstd.ZstdFile:\n if mode and \"b\" not in mode:\n mode += \"b\"\n\n return pyzstd.ZstdFile(openhook(filename, mode), mode=mode)\n\n\ndef open_by_file_pattern(filename: str, mode: str = \"r\", **kwargs: Any) -> Any:\n \"\"\"Can read/write to files on gcs/local with or without gzipping. If file\n is stored on gcs, streams with blobfile. Otherwise use vanilla python open. If\n filename endswith gz, then zip/unzip contents on the fly (note that gcs paths and\n gzip are compatible)\"\"\"\n open_fn = partial(bf.BlobFile, **kwargs)\n try:\n if filename.endswith(\".gz\"):\n return gzip_open(filename, openhook=open_fn, mode=mode)\n elif filename.endswith(\".lz4\"):\n return lz4_open(filename, openhook=open_fn, mode=mode)\n elif filename.endswith(\".zst\"):\n return zstd_open(filename, openhook=open_fn, mode=mode)\n else:\n scheme = urllib.parse.urlparse(filename).scheme\n if (not os.path.exists(filename)) and (scheme == \"\" or scheme == \"file\"):\n return open_fn(\n os.path.join(\n os.path.dirname(os.path.abspath(__file__)),\n \"registry\",\n \"data\",\n filename,\n ),\n mode=mode,\n )\n else:\n return open_fn(filename, mode=mode)\n except Exception as e:\n raise RuntimeError(f\"Failed to open: {filename}\") from e\n\n\ndef _decode_json(line, path, line_number):\n try:\n return json.loads(line)\n except json.JSONDecodeError as e:\n custom_error_message = (\n f\"Error parsing JSON on line {line_number}: {e.msg} at {path}:{line_number}:{e.colno}\"\n )\n logger.error(custom_error_message)\n raise ValueError(custom_error_message) from None\n\n\ndef _get_jsonl_file(path):\n logger.info(f\"Fetching {path}\")\n with open_by_file_pattern(path, mode=\"r\") as f:\n return [_decode_json(line, path, i + 1) for i, line in enumerate(f)]\n\n\ndef _get_json_file(path):\n logger.info(f\"Fetching {path}\")\n with open_by_file_pattern(path, mode=\"r\") as f:\n return json.loads(f.read())\n\n\ndef _stream_jsonl_file(path) -> Iterator:\n logger.info(f\"Streaming {path}\")\n with bf.BlobFile(path, \"r\", streaming=True) as f:\n for line in f:\n yield json.loads(line)\n\n\ndef get_lines(path) -> list[dict]:\n \"\"\"\n Get a list of lines from a file.\n \"\"\"\n with open_by_file_pattern(path, mode=\"r\") as f:\n return f.readlines()\n\n\ndef get_jsonl(path: str) -> list[dict]:\n \"\"\"\n Extract json lines from the given path.\n If the path is a directory, look in subpaths recursively.\n\n Return all lines from all jsonl files as a single list.\n \"\"\"\n if bf.isdir(path):\n result = []\n for filename in bf.listdir(path):\n if filename.endswith(\".jsonl\"):\n result += get_jsonl(os.path.join(path, filename))\n return result\n return _get_jsonl_file(path)\n\n\ndef get_jsonls(paths: Sequence[str], line_limit=None) -> list[dict]:\n return list(iter_jsonls(paths, line_limit))\n\n\ndef get_json(path) -> dict:\n if bf.isdir(path):\n raise ValueError(\"Path is a directory, only files are supported\")\n return _get_json_file(path)\n\n\ndef iter_jsonls(paths: Union[str, list[str]], line_limit=None) -> Iterator[dict]:\n \"\"\"\n For each path in the input, iterate over the jsonl files in that path.\n Look in subdirectories recursively.\n\n Use an iterator to conserve memory.\n \"\"\"\n if type(paths) == str:\n paths = [paths]\n\n def _iter():\n for path in paths:\n if bf.isdir(path):\n for filename in bf.listdir(path):\n if filename.endswith(\".jsonl\"):\n yield from iter_jsonls([os.path.join(path, filename)])\n else:\n yield from _stream_jsonl_file(path)\n\n return itertools.islice(_iter(), line_limit)\n\n\ndef get_csv(path, fieldnames=None):\n with bf.BlobFile(path, \"r\", cache_dir=\"/tmp/bf_cache\", streaming=False) as f:\n reader = csv.DictReader(f, fieldnames=fieldnames)\n return [row for row in reader]\n\n\ndef _to_py_types(o: Any, exclude_keys: List[Text]) -> Any:\n if isinstance(o, dict):\n return {\n k: _to_py_types(v, exclude_keys=exclude_keys)\n for k, v in o.items()\n if k not in exclude_keys\n }\n\n if isinstance(o, list):\n return [_to_py_types(v, exclude_keys=exclude_keys) for v in o]\n\n if isinstance(o, Path):\n return o.as_posix()\n\n if dataclasses.is_dataclass(o):\n return _to_py_types(dataclasses.asdict(o), exclude_keys=exclude_keys)\n\n # pydantic data classes\n if isinstance(o, pydantic.BaseModel):\n return {\n k: _to_py_types(v, exclude_keys=exclude_keys)\n for k, v in json.loads(o.json()).items()\n if k not in exclude_keys\n }\n\n return o\n\n\nclass EnhancedJSONEncoder(json.JSONEncoder):\n def __init__(self, exclude_keys: Optional[List[Text]] = None, **kwargs: Any):\n super().__init__(**kwargs)\n self.exclude_keys = exclude_keys if exclude_keys else []\n\n def default(self, o: Any) -> str:\n return _to_py_types(o, self.exclude_keys)\n\n\ndef jsondumps(o: Any, ensure_ascii: bool = False, **kwargs: Any) -> str:\n # The JSONEncoder class's .default method is only applied to dictionary values,\n # not keys. In order to exclude keys from the output of this jsondumps method\n # we need to exclude them outside the encoder.\n if isinstance(o, dict) and \"exclude_keys\" in kwargs:\n for key in kwargs[\"exclude_keys\"]:\n del o[key]\n return json.dumps(o, cls=EnhancedJSONEncoder, ensure_ascii=ensure_ascii, **kwargs)\n\n\ndef jsondump(o: Any, fp: Any, ensure_ascii: bool = False, **kwargs: Any) -> None:\n json.dump(o, fp, cls=EnhancedJSONEncoder, ensure_ascii=ensure_ascii, **kwargs)\n\n\ndef jsonloads(s: str, **kwargs: Any) -> Any:\n return json.loads(s, **kwargs)\n\n\ndef jsonload(fp: Any, **kwargs: Any) -> Any:\n return json.load(fp, **kwargs)\n", "path": "evals/data.py"}]}
4,061
245
gh_patches_debug_28651
rasdani/github-patches
git_diff
mathesar-foundation__mathesar-1808
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Reflection no longer runs on a schedule ## Description <!-- A clear and concise description of what the bug is. --> With some recent performance improvements, we introduced a known regression in that we no longer reflect DB objects on some schedule under all circumstances. In particular, if you add a table (or tables) via some other client, it will never appear in the UI unless you've also done some other DDL operation _through_ the UI. ## Expected behavior <!-- A clear and concise description of what you expected to happen. --> It should be possible to reflect DB objects when needed. </issue> <code> [start of mathesar/urls.py] 1 from django.urls import include, path, re_path 2 from rest_framework_nested import routers 3 4 from mathesar import views 5 from mathesar.api.db import viewsets as db_viewsets 6 from mathesar.api.ui import viewsets as ui_viewsets 7 8 db_router = routers.DefaultRouter() 9 db_router.register(r'tables', db_viewsets.TableViewSet, basename='table') 10 db_router.register(r'queries', db_viewsets.QueryViewSet, basename='query') 11 db_router.register(r'links', db_viewsets.LinkViewSet, basename='links') 12 db_router.register(r'schemas', db_viewsets.SchemaViewSet, basename='schema') 13 db_router.register(r'databases', db_viewsets.DatabaseViewSet, basename='database') 14 db_router.register(r'data_files', db_viewsets.DataFileViewSet, basename='data-file') 15 16 db_table_router = routers.NestedSimpleRouter(db_router, r'tables', lookup='table') 17 db_table_router.register(r'records', db_viewsets.RecordViewSet, basename='table-record') 18 db_table_router.register(r'settings', db_viewsets.TableSettingsViewSet, basename='table-setting') 19 db_table_router.register(r'columns', db_viewsets.ColumnViewSet, basename='table-column') 20 db_table_router.register(r'constraints', db_viewsets.ConstraintViewSet, basename='table-constraint') 21 22 ui_router = routers.DefaultRouter() 23 ui_router.register(r'databases', ui_viewsets.DatabaseViewSet, basename='database') 24 ui_router.register(r'users', ui_viewsets.UserViewSet, basename='user') 25 ui_router.register(r'database_roles', ui_viewsets.DatabaseRoleViewSet, basename='database_role') 26 ui_router.register(r'schema_roles', ui_viewsets.SchemaRoleViewSet, basename='schema_role') 27 28 urlpatterns = [ 29 path('api/db/v0/', include(db_router.urls)), 30 path('api/db/v0/', include(db_table_router.urls)), 31 path('api/ui/v0/', include(ui_router.urls)), 32 path('auth/', include('django.contrib.auth.urls')), 33 34 path('', views.home, name='home'), 35 path('<db_name>/', views.schemas, name='schemas'), 36 re_path( 37 r'^(?P<db_name>\w+)/(?P<schema_id>\w+)/', 38 views.schema_home, 39 name='schema_home' 40 ), 41 ] 42 [end of mathesar/urls.py] [start of mathesar/views.py] 1 from django.shortcuts import render, redirect, get_object_or_404 2 from django.contrib.auth.decorators import login_required 3 4 from mathesar.models.base import Database, Schema, Table 5 from mathesar.api.serializers.databases import DatabaseSerializer, TypeSerializer 6 from mathesar.api.serializers.schemas import SchemaSerializer 7 from mathesar.api.serializers.tables import TableSerializer 8 from mathesar.api.serializers.queries import QuerySerializer 9 from mathesar.database.types import UIType 10 from mathesar.models.query import UIQuery 11 12 13 def get_schema_list(request, database): 14 schema_serializer = SchemaSerializer( 15 Schema.objects.filter(database=database), 16 many=True, 17 context={'request': request} 18 ) 19 return schema_serializer.data 20 21 22 def get_database_list(request): 23 database_serializer = DatabaseSerializer( 24 Database.objects.all(), 25 many=True, 26 context={'request': request} 27 ) 28 return database_serializer.data 29 30 31 def get_table_list(request, schema): 32 if schema is None: 33 return [] 34 table_serializer = TableSerializer( 35 Table.objects.filter(schema=schema), 36 many=True, 37 context={'request': request} 38 ) 39 return table_serializer.data 40 41 42 def get_queries_list(request, schema): 43 if schema is None: 44 return [] 45 query_serializer = QuerySerializer( 46 UIQuery.objects.filter(base_table__schema=schema), 47 many=True, 48 context={'request': request} 49 ) 50 return query_serializer.data 51 52 53 def get_ui_type_list(request, database): 54 if database is None: 55 return [] 56 type_serializer = TypeSerializer( 57 UIType, 58 many=True, 59 context={'request': request} 60 ) 61 return type_serializer.data 62 63 64 def get_common_data(request, database, schema=None): 65 return { 66 'current_db': database.name if database else None, 67 'current_schema': schema.id if schema else None, 68 'schemas': get_schema_list(request, database), 69 'databases': get_database_list(request), 70 'tables': get_table_list(request, schema), 71 'queries': get_queries_list(request, schema), 72 'abstract_types': get_ui_type_list(request, database) 73 } 74 75 76 def get_current_database(request, db_name): 77 # if there's a DB name passed in, try to retrieve the database, or return a 404 error. 78 if db_name is not None: 79 return get_object_or_404(Database, name=db_name) 80 else: 81 try: 82 # Try to get the first database available 83 return Database.objects.order_by('id').first() 84 except Database.DoesNotExist: 85 return None 86 87 88 def get_current_schema(request, schema_id, database): 89 # if there's a schema ID passed in, try to retrieve the schema, or return a 404 error. 90 if schema_id is not None: 91 return get_object_or_404(Schema, id=schema_id) 92 else: 93 try: 94 # Try to get the first schema in the DB 95 return Schema.objects.filter(database=database).order_by('id').first() 96 except Schema.DoesNotExist: 97 return None 98 99 100 def render_schema(request, database, schema): 101 # if there's no schema available, redirect to the schemas page. 102 if not schema: 103 return redirect('schemas', db_name=database.name) 104 else: 105 # We are redirecting so that the correct URL is passed to the frontend. 106 return redirect('schema_home', db_name=database.name, schema_id=schema.id) 107 108 109 @login_required 110 def home(request): 111 database = get_current_database(request, None) 112 return redirect('schemas', db_name=database.name) 113 114 115 @login_required 116 def schema_home(request, db_name, schema_id, **kwargs): 117 database = get_current_database(request, db_name) 118 schema = get_current_schema(request, schema_id, database) 119 return render(request, 'mathesar/index.html', { 120 'common_data': get_common_data(request, database, schema) 121 }) 122 123 124 @login_required 125 def schemas(request, db_name): 126 database = get_current_database(request, db_name) 127 return render(request, 'mathesar/index.html', { 128 'common_data': get_common_data(request, database, None) 129 }) 130 [end of mathesar/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mathesar/urls.py b/mathesar/urls.py --- a/mathesar/urls.py +++ b/mathesar/urls.py @@ -29,6 +29,7 @@ path('api/db/v0/', include(db_router.urls)), path('api/db/v0/', include(db_table_router.urls)), path('api/ui/v0/', include(ui_router.urls)), + path('api/ui/v0/reflect/', views.reflect_all, name='reflect_all'), path('auth/', include('django.contrib.auth.urls')), path('', views.home, name='home'), diff --git a/mathesar/views.py b/mathesar/views.py --- a/mathesar/views.py +++ b/mathesar/views.py @@ -1,6 +1,10 @@ from django.shortcuts import render, redirect, get_object_or_404 from django.contrib.auth.decorators import login_required +from rest_framework import status +from rest_framework.decorators import api_view +from rest_framework.response import Response +from mathesar.state import reset_reflection from mathesar.models.base import Database, Schema, Table from mathesar.api.serializers.databases import DatabaseSerializer, TypeSerializer from mathesar.api.serializers.schemas import SchemaSerializer @@ -106,6 +110,13 @@ return redirect('schema_home', db_name=database.name, schema_id=schema.id) +@login_required +@api_view(['POST']) +def reflect_all(_): + reset_reflection() + return Response(status=status.HTTP_200_OK) + + @login_required def home(request): database = get_current_database(request, None)
{"golden_diff": "diff --git a/mathesar/urls.py b/mathesar/urls.py\n--- a/mathesar/urls.py\n+++ b/mathesar/urls.py\n@@ -29,6 +29,7 @@\n path('api/db/v0/', include(db_router.urls)),\n path('api/db/v0/', include(db_table_router.urls)),\n path('api/ui/v0/', include(ui_router.urls)),\n+ path('api/ui/v0/reflect/', views.reflect_all, name='reflect_all'),\n path('auth/', include('django.contrib.auth.urls')),\n \n path('', views.home, name='home'),\ndiff --git a/mathesar/views.py b/mathesar/views.py\n--- a/mathesar/views.py\n+++ b/mathesar/views.py\n@@ -1,6 +1,10 @@\n from django.shortcuts import render, redirect, get_object_or_404\n from django.contrib.auth.decorators import login_required\n+from rest_framework import status\n+from rest_framework.decorators import api_view\n+from rest_framework.response import Response\n \n+from mathesar.state import reset_reflection\n from mathesar.models.base import Database, Schema, Table\n from mathesar.api.serializers.databases import DatabaseSerializer, TypeSerializer\n from mathesar.api.serializers.schemas import SchemaSerializer\n@@ -106,6 +110,13 @@\n return redirect('schema_home', db_name=database.name, schema_id=schema.id)\n \n \n+@login_required\n+@api_view(['POST'])\n+def reflect_all(_):\n+ reset_reflection()\n+ return Response(status=status.HTTP_200_OK)\n+\n+\n @login_required\n def home(request):\n database = get_current_database(request, None)\n", "issue": "Reflection no longer runs on a schedule\n## Description\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\nWith some recent performance improvements, we introduced a known regression in that we no longer reflect DB objects on some schedule under all circumstances.\r\n\r\nIn particular, if you add a table (or tables) via some other client, it will never appear in the UI unless you've also done some other DDL operation _through_ the UI.\r\n\r\n## Expected behavior\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\nIt should be possible to reflect DB objects when needed.\n", "before_files": [{"content": "from django.urls import include, path, re_path\nfrom rest_framework_nested import routers\n\nfrom mathesar import views\nfrom mathesar.api.db import viewsets as db_viewsets\nfrom mathesar.api.ui import viewsets as ui_viewsets\n\ndb_router = routers.DefaultRouter()\ndb_router.register(r'tables', db_viewsets.TableViewSet, basename='table')\ndb_router.register(r'queries', db_viewsets.QueryViewSet, basename='query')\ndb_router.register(r'links', db_viewsets.LinkViewSet, basename='links')\ndb_router.register(r'schemas', db_viewsets.SchemaViewSet, basename='schema')\ndb_router.register(r'databases', db_viewsets.DatabaseViewSet, basename='database')\ndb_router.register(r'data_files', db_viewsets.DataFileViewSet, basename='data-file')\n\ndb_table_router = routers.NestedSimpleRouter(db_router, r'tables', lookup='table')\ndb_table_router.register(r'records', db_viewsets.RecordViewSet, basename='table-record')\ndb_table_router.register(r'settings', db_viewsets.TableSettingsViewSet, basename='table-setting')\ndb_table_router.register(r'columns', db_viewsets.ColumnViewSet, basename='table-column')\ndb_table_router.register(r'constraints', db_viewsets.ConstraintViewSet, basename='table-constraint')\n\nui_router = routers.DefaultRouter()\nui_router.register(r'databases', ui_viewsets.DatabaseViewSet, basename='database')\nui_router.register(r'users', ui_viewsets.UserViewSet, basename='user')\nui_router.register(r'database_roles', ui_viewsets.DatabaseRoleViewSet, basename='database_role')\nui_router.register(r'schema_roles', ui_viewsets.SchemaRoleViewSet, basename='schema_role')\n\nurlpatterns = [\n path('api/db/v0/', include(db_router.urls)),\n path('api/db/v0/', include(db_table_router.urls)),\n path('api/ui/v0/', include(ui_router.urls)),\n path('auth/', include('django.contrib.auth.urls')),\n\n path('', views.home, name='home'),\n path('<db_name>/', views.schemas, name='schemas'),\n re_path(\n r'^(?P<db_name>\\w+)/(?P<schema_id>\\w+)/',\n views.schema_home,\n name='schema_home'\n ),\n]\n", "path": "mathesar/urls.py"}, {"content": "from django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib.auth.decorators import login_required\n\nfrom mathesar.models.base import Database, Schema, Table\nfrom mathesar.api.serializers.databases import DatabaseSerializer, TypeSerializer\nfrom mathesar.api.serializers.schemas import SchemaSerializer\nfrom mathesar.api.serializers.tables import TableSerializer\nfrom mathesar.api.serializers.queries import QuerySerializer\nfrom mathesar.database.types import UIType\nfrom mathesar.models.query import UIQuery\n\n\ndef get_schema_list(request, database):\n schema_serializer = SchemaSerializer(\n Schema.objects.filter(database=database),\n many=True,\n context={'request': request}\n )\n return schema_serializer.data\n\n\ndef get_database_list(request):\n database_serializer = DatabaseSerializer(\n Database.objects.all(),\n many=True,\n context={'request': request}\n )\n return database_serializer.data\n\n\ndef get_table_list(request, schema):\n if schema is None:\n return []\n table_serializer = TableSerializer(\n Table.objects.filter(schema=schema),\n many=True,\n context={'request': request}\n )\n return table_serializer.data\n\n\ndef get_queries_list(request, schema):\n if schema is None:\n return []\n query_serializer = QuerySerializer(\n UIQuery.objects.filter(base_table__schema=schema),\n many=True,\n context={'request': request}\n )\n return query_serializer.data\n\n\ndef get_ui_type_list(request, database):\n if database is None:\n return []\n type_serializer = TypeSerializer(\n UIType,\n many=True,\n context={'request': request}\n )\n return type_serializer.data\n\n\ndef get_common_data(request, database, schema=None):\n return {\n 'current_db': database.name if database else None,\n 'current_schema': schema.id if schema else None,\n 'schemas': get_schema_list(request, database),\n 'databases': get_database_list(request),\n 'tables': get_table_list(request, schema),\n 'queries': get_queries_list(request, schema),\n 'abstract_types': get_ui_type_list(request, database)\n }\n\n\ndef get_current_database(request, db_name):\n # if there's a DB name passed in, try to retrieve the database, or return a 404 error.\n if db_name is not None:\n return get_object_or_404(Database, name=db_name)\n else:\n try:\n # Try to get the first database available\n return Database.objects.order_by('id').first()\n except Database.DoesNotExist:\n return None\n\n\ndef get_current_schema(request, schema_id, database):\n # if there's a schema ID passed in, try to retrieve the schema, or return a 404 error.\n if schema_id is not None:\n return get_object_or_404(Schema, id=schema_id)\n else:\n try:\n # Try to get the first schema in the DB\n return Schema.objects.filter(database=database).order_by('id').first()\n except Schema.DoesNotExist:\n return None\n\n\ndef render_schema(request, database, schema):\n # if there's no schema available, redirect to the schemas page.\n if not schema:\n return redirect('schemas', db_name=database.name)\n else:\n # We are redirecting so that the correct URL is passed to the frontend.\n return redirect('schema_home', db_name=database.name, schema_id=schema.id)\n\n\n@login_required\ndef home(request):\n database = get_current_database(request, None)\n return redirect('schemas', db_name=database.name)\n\n\n@login_required\ndef schema_home(request, db_name, schema_id, **kwargs):\n database = get_current_database(request, db_name)\n schema = get_current_schema(request, schema_id, database)\n return render(request, 'mathesar/index.html', {\n 'common_data': get_common_data(request, database, schema)\n })\n\n\n@login_required\ndef schemas(request, db_name):\n database = get_current_database(request, db_name)\n return render(request, 'mathesar/index.html', {\n 'common_data': get_common_data(request, database, None)\n })\n", "path": "mathesar/views.py"}]}
2,382
356
gh_patches_debug_46346
rasdani/github-patches
git_diff
vaexio__vaex-1113
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [BUG-REPORT]Overflowing memory using a custom selection in df.count() in Vaex 4.0.0a5 Using `vaex: 4.0.0a5` I am having the issue that my memory is overflowing when I do a count of a selection that combines a `df.select()` selection with an expression. The MWE below crashes for me. The machine that I am running this on has 251G memory, reaches 100% of this and then crashes. I am pretty sure that the bug arises from combining the named selection and the `(x>y)` expression. ``` import vaex import numpy as np x,y,z = np.random.normal(size=(3,10_000_000)) df = vaex.from_arrays(x=x,y=y,z=z) df.select('x*y<z*z',name='tmp') df.count(selection='(tmp)&(x>y)') ``` </issue> <code> [start of packages/vaex-core/vaex/scopes.py] 1 from __future__ import division, print_function 2 import logging 3 import numpy as np 4 import pyarrow as pa 5 import vaex.array_types 6 import vaex.arrow.numpy_dispatch 7 8 9 from .utils import (_ensure_strings_from_expressions, 10 _ensure_string_from_expression, 11 _ensure_list, 12 _is_limit, 13 _isnumber, 14 _issequence, 15 _is_string, 16 _parse_reduction, 17 _parse_n, 18 _normalize_selection_name, 19 _normalize, 20 _parse_f, 21 _expand, 22 _expand_shape, 23 _expand_limits, 24 as_flat_float, 25 as_flat_array, 26 _split_and_combine_mask) 27 from .expression import expression_namespace 28 from vaex.arrow.numpy_dispatch import wrap, unwrap 29 import vaex.expression 30 31 logger = logging.getLogger('vaex.scopes') 32 33 34 class ScopeBase(object): 35 def get(self, attr, default=None): # otherwise pdb crashes during pytest 36 if attr == "__tracebackhide__": 37 return False 38 return default 39 40 41 class UnitScope(ScopeBase): 42 def __init__(self, df, value=None): 43 self.df = df 44 self.value = value 45 46 def __getitem__(self, variable): 47 import astropy.units 48 if variable in self.df.units: 49 unit = self.df.units[variable] 50 return (self.value * unit) if self.value is not None else unit 51 elif variable in self.df.virtual_columns: 52 return eval(self.df.virtual_columns[variable], expression_namespace, self) 53 elif variable in self.df.variables: 54 return astropy.units.dimensionless_unscaled # TODO units for variables? 55 else: 56 raise KeyError("unkown variable %s" % variable) 57 58 59 class _BlockScope(ScopeBase): 60 def __init__(self, df, i1, i2, mask=None, **variables): 61 """ 62 63 :param DataFrameLocal DataFrame: the *local* DataFrame 64 :param i1: start index 65 :param i2: end index 66 :param values: 67 :return: 68 """ 69 self.df = df 70 self.i1 = int(i1) 71 self.i2 = int(i2) 72 self.variables = variables 73 self.values = dict(self.variables) 74 self.buffers = {} 75 self.mask = mask if mask is not None else None 76 77 def move(self, i1, i2): 78 length_new = i2 - i1 79 length_old = self.i2 - self.i1 80 if length_new > length_old: # old buffers are too small, discard them 81 self.buffers = {} 82 else: 83 for name in list(self.buffers.keys()): 84 self.buffers[name] = self.buffers[name][:length_new] 85 self.i1 = int(i1) 86 self.i2 = int(i2) 87 self.values = dict(self.variables) 88 89 def __contains__(self, name): # otherwise pdb crashes during pytest 90 return name in self.buffers # not sure this should also include varibles, columns and virtual columns 91 92 def _ensure_buffer(self, column): 93 if column not in self.buffers: 94 logger.debug("creating column for: %s", column) 95 self.buffers[column] = np.zeros(self.i2 - self.i1) 96 97 def evaluate(self, expression, out=None): 98 if isinstance(expression, vaex.expression.Expression): 99 expression = expression.expression 100 try: 101 # logger.debug("try avoid evaluating: %s", expression) 102 result = self[expression] 103 except KeyError: 104 # logger.debug("no luck, eval: %s", expression) 105 # result = ne.evaluate(expression, local_dict=self, out=out) 106 # logger.debug("in eval") 107 # eval("def f(") 108 result = eval(expression, expression_namespace, self) 109 self.values[expression] = result 110 # if out is not None: 111 # out[:] = result 112 # result = out 113 # logger.debug("out eval") 114 # logger.debug("done with eval of %s", expression) 115 result = unwrap(result) 116 return result 117 118 def __getitem__(self, variable): 119 # logger.debug("get " + variable) 120 # return self.df.columns[variable][self.i1:self.i2] 121 if variable == 'df': 122 return self # to support df['no!identifier'] 123 if variable in expression_namespace: 124 return expression_namespace[variable] 125 try: 126 if variable in self.values: 127 return self.values[variable] 128 elif variable in self.df.columns: 129 offset = self.df._index_start 130 # if self.df._needs_copy(variable): 131 # self._ensure_buffer(variable) 132 # self.values[variable] = self.buffers[variable] = self.df.columns[variable][self.i1:self.i2].astype(np.float64) 133 # Previously we casted anything to .astype(np.float64), this led to rounding off of int64, when exporting 134 # self.values[variable] = self.df.columns[variable][offset+self.i1:offset+self.i2][:] 135 # else: 136 values = self.df.columns[variable][offset+self.i1:offset+self.i2] 137 if self.mask is not None: 138 # TODO: we may want to put this in array_types 139 if isinstance(values, (pa.Array, pa.ChunkedArray)): 140 values = values.filter(vaex.array_types.to_arrow(self.mask)) 141 else: 142 values = values[self.mask] 143 values = wrap(values) 144 self.values[variable] = values 145 elif variable in list(self.df.virtual_columns.keys()): 146 expression = self.df.virtual_columns[variable] 147 if isinstance(expression, dict): 148 function = expression['function'] 149 arguments = [self.evaluate(k) for k in expression['arguments']] 150 self.values[variable] = function(*arguments) 151 else: 152 # self._ensure_buffer(variable) 153 values = self.evaluate(expression) 154 values = wrap(values) 155 self.values[variable] = values 156 # self.values[variable] = self.buffers[variable] 157 elif variable in self.df.functions: 158 f = self.df.functions[variable].f 159 return vaex.arrow.numpy_dispatch.autowrapper(f) 160 if variable not in self.values: 161 raise KeyError("Unknown variables or column: %r" % (variable,)) 162 163 return self.values[variable] 164 except: 165 # logger.exception("error in evaluating: %r" % variable) 166 raise 167 168 169 class _BlockScopeSelection(ScopeBase): 170 def __init__(self, df, i1, i2, selection=None, cache=False, filter_mask=None): 171 self.df = df 172 self.i1 = i1 173 self.i2 = i2 174 self.selection = selection 175 self.store_in_cache = cache 176 self.filter_mask = filter_mask 177 178 def evaluate(self, expression): 179 if expression is True: 180 expression = "default" 181 try: 182 expression = _ensure_string_from_expression(expression) 183 result = eval(expression, expression_namespace, self) 184 except: 185 import traceback as tb 186 tb.print_stack() 187 raise 188 result = unwrap(result) 189 return result 190 191 def __contains__(self, name): # otherwise pdb crashes during pytest 192 return False 193 194 def __getitem__(self, variable): 195 if variable == "__tracebackhide__": # required for tracebacks 196 return False 197 # logger.debug("getitem for selection: %s", variable) 198 try: 199 selection = self.selection 200 if selection is None and self.df.has_selection(variable): 201 selection = self.df.get_selection(variable) 202 # logger.debug("selection for %r: %s %r", variable, selection, self.df.selection_histories) 203 key = (self.i1, self.i2) 204 if selection: 205 assert variable in self.df._selection_masks, "%s mask not found" % (variable, ) 206 cache = self.df._selection_mask_caches[variable] 207 # logger.debug("selection cache: %r" % cache) 208 full_mask = self.df._selection_masks[variable] 209 selection_in_cache, mask = cache.get(key, (None, None)) 210 211 # logger.debug("mask for %r is %r", variable, mask) 212 if selection_in_cache == selection: 213 if self.filter_mask is not None: 214 return mask[self.filter_mask] 215 return mask 216 # logger.debug("was not cached") 217 if variable in self.df.variables: 218 return self.df.variables[variable] 219 mask_values = selection.evaluate(self.df, variable, self.i1, self.i2, self.filter_mask) 220 221 # get a view on a subset of the mask 222 sub_mask = full_mask.view(self.i1, self.i2) 223 sub_mask_array = np.asarray(sub_mask) 224 # and update it 225 if self.filter_mask is not None: # if we have a mask, the selection we evaluated is also filtered 226 sub_mask_array[:] = 0 227 sub_mask_array[:][self.filter_mask] = mask_values 228 else: 229 sub_mask_array[:] = mask_values 230 # logger.debug("put selection in mask with key %r" % (key,)) 231 if self.store_in_cache: 232 cache[key] = selection, sub_mask_array 233 # cache[key] = selection, mask_values 234 if self.filter_mask is not None: 235 return sub_mask_array[self.filter_mask] 236 else: 237 return sub_mask_array 238 # return mask_values 239 else: 240 offset = self.df._index_start 241 if variable in expression_namespace: 242 return expression_namespace[variable] 243 elif variable in self.df.columns: 244 values = self.df.columns[variable][offset+self.i1:offset+self.i2] 245 # TODO: we may want to put this in array_types 246 if self.filter_mask is not None: 247 if isinstance(values, (pa.Array, pa.ChunkedArray)): 248 values = values.filter(vaex.array_types.to_arrow(self.filter_mask)) 249 else: 250 values = values[self.filter_mask] 251 values = wrap(values) 252 return values 253 elif variable in self.df.variables: 254 return self.df.variables[variable] 255 elif variable in self.df.virtual_columns: 256 expression = self.df.virtual_columns[variable] 257 # self._ensure_buffer(variable) 258 if expression == variable: 259 raise ValueError(f'Recursion protection: virtual column {variable} refers to itself') 260 values = self.evaluate(expression) # , out=self.buffers[variable]) 261 values = wrap(values) 262 return values 263 elif variable in self.df.functions: 264 f = self.df.functions[variable].f 265 return vaex.arrow.numpy_dispatch.autowrapper(f) 266 raise KeyError("Unknown variables or column: %r" % (variable,)) 267 except: 268 import traceback as tb 269 tb.print_exc() 270 logger.exception("error in evaluating: %r" % variable) 271 raise 272 273 [end of packages/vaex-core/vaex/scopes.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/packages/vaex-core/vaex/scopes.py b/packages/vaex-core/vaex/scopes.py --- a/packages/vaex-core/vaex/scopes.py +++ b/packages/vaex-core/vaex/scopes.py @@ -106,7 +106,7 @@ # logger.debug("in eval") # eval("def f(") result = eval(expression, expression_namespace, self) - self.values[expression] = result + self.values[expression] = wrap(result) # if out is not None: # out[:] = result # result = out @@ -140,19 +140,17 @@ values = values.filter(vaex.array_types.to_arrow(self.mask)) else: values = values[self.mask] - values = wrap(values) - self.values[variable] = values + self.values[variable] = wrap(values) elif variable in list(self.df.virtual_columns.keys()): expression = self.df.virtual_columns[variable] if isinstance(expression, dict): function = expression['function'] arguments = [self.evaluate(k) for k in expression['arguments']] - self.values[variable] = function(*arguments) + self.values[variable] = wrap(function(*arguments)) else: # self._ensure_buffer(variable) values = self.evaluate(expression) - values = wrap(values) - self.values[variable] = values + self.values[variable] = wrap(values) # self.values[variable] = self.buffers[variable] elif variable in self.df.functions: f = self.df.functions[variable].f @@ -211,11 +209,11 @@ # logger.debug("mask for %r is %r", variable, mask) if selection_in_cache == selection: if self.filter_mask is not None: - return mask[self.filter_mask] - return mask + return wrap(mask[self.filter_mask]) + return wrap(mask) # logger.debug("was not cached") if variable in self.df.variables: - return self.df.variables[variable] + return wrap(self.df.variables[variable]) mask_values = selection.evaluate(self.df, variable, self.i1, self.i2, self.filter_mask) # get a view on a subset of the mask @@ -232,14 +230,14 @@ cache[key] = selection, sub_mask_array # cache[key] = selection, mask_values if self.filter_mask is not None: - return sub_mask_array[self.filter_mask] + return wrap(sub_mask_array[self.filter_mask]) else: - return sub_mask_array + return wrap(sub_mask_array) # return mask_values else: offset = self.df._index_start if variable in expression_namespace: - return expression_namespace[variable] + return wrap(expression_namespace[variable]) elif variable in self.df.columns: values = self.df.columns[variable][offset+self.i1:offset+self.i2] # TODO: we may want to put this in array_types @@ -248,8 +246,7 @@ values = values.filter(vaex.array_types.to_arrow(self.filter_mask)) else: values = values[self.filter_mask] - values = wrap(values) - return values + return wrap(values) elif variable in self.df.variables: return self.df.variables[variable] elif variable in self.df.virtual_columns: @@ -258,8 +255,7 @@ if expression == variable: raise ValueError(f'Recursion protection: virtual column {variable} refers to itself') values = self.evaluate(expression) # , out=self.buffers[variable]) - values = wrap(values) - return values + return wrap(values) elif variable in self.df.functions: f = self.df.functions[variable].f return vaex.arrow.numpy_dispatch.autowrapper(f)
{"golden_diff": "diff --git a/packages/vaex-core/vaex/scopes.py b/packages/vaex-core/vaex/scopes.py\n--- a/packages/vaex-core/vaex/scopes.py\n+++ b/packages/vaex-core/vaex/scopes.py\n@@ -106,7 +106,7 @@\n # logger.debug(\"in eval\")\n # eval(\"def f(\")\n result = eval(expression, expression_namespace, self)\n- self.values[expression] = result\n+ self.values[expression] = wrap(result)\n # if out is not None:\n # out[:] = result\n # result = out\n@@ -140,19 +140,17 @@\n values = values.filter(vaex.array_types.to_arrow(self.mask))\n else:\n values = values[self.mask]\n- values = wrap(values)\n- self.values[variable] = values\n+ self.values[variable] = wrap(values)\n elif variable in list(self.df.virtual_columns.keys()):\n expression = self.df.virtual_columns[variable]\n if isinstance(expression, dict):\n function = expression['function']\n arguments = [self.evaluate(k) for k in expression['arguments']]\n- self.values[variable] = function(*arguments)\n+ self.values[variable] = wrap(function(*arguments))\n else:\n # self._ensure_buffer(variable)\n values = self.evaluate(expression)\n- values = wrap(values)\n- self.values[variable] = values\n+ self.values[variable] = wrap(values)\n # self.values[variable] = self.buffers[variable]\n elif variable in self.df.functions:\n f = self.df.functions[variable].f\n@@ -211,11 +209,11 @@\n # logger.debug(\"mask for %r is %r\", variable, mask)\n if selection_in_cache == selection:\n if self.filter_mask is not None:\n- return mask[self.filter_mask]\n- return mask\n+ return wrap(mask[self.filter_mask])\n+ return wrap(mask)\n # logger.debug(\"was not cached\")\n if variable in self.df.variables:\n- return self.df.variables[variable]\n+ return wrap(self.df.variables[variable])\n mask_values = selection.evaluate(self.df, variable, self.i1, self.i2, self.filter_mask)\n \n # get a view on a subset of the mask\n@@ -232,14 +230,14 @@\n cache[key] = selection, sub_mask_array\n # cache[key] = selection, mask_values\n if self.filter_mask is not None:\n- return sub_mask_array[self.filter_mask]\n+ return wrap(sub_mask_array[self.filter_mask])\n else:\n- return sub_mask_array\n+ return wrap(sub_mask_array)\n # return mask_values\n else:\n offset = self.df._index_start\n if variable in expression_namespace:\n- return expression_namespace[variable]\n+ return wrap(expression_namespace[variable])\n elif variable in self.df.columns:\n values = self.df.columns[variable][offset+self.i1:offset+self.i2]\n # TODO: we may want to put this in array_types\n@@ -248,8 +246,7 @@\n values = values.filter(vaex.array_types.to_arrow(self.filter_mask))\n else:\n values = values[self.filter_mask]\n- values = wrap(values)\n- return values\n+ return wrap(values)\n elif variable in self.df.variables:\n return self.df.variables[variable]\n elif variable in self.df.virtual_columns:\n@@ -258,8 +255,7 @@\n if expression == variable:\n raise ValueError(f'Recursion protection: virtual column {variable} refers to itself')\n values = self.evaluate(expression) # , out=self.buffers[variable])\n- values = wrap(values)\n- return values\n+ return wrap(values)\n elif variable in self.df.functions:\n f = self.df.functions[variable].f\n return vaex.arrow.numpy_dispatch.autowrapper(f)\n", "issue": "[BUG-REPORT]Overflowing memory using a custom selection in df.count() in Vaex 4.0.0a5\nUsing `vaex: 4.0.0a5` I am having the issue that my memory is overflowing when I do a count of a selection that combines a `df.select()` selection with an expression. The MWE below crashes for me. The machine that I am running this on has 251G memory, reaches 100% of this and then crashes.\r\n\r\nI am pretty sure that the bug arises from combining the named selection and the `(x>y)` expression.\r\n\r\n```\r\nimport vaex\r\nimport numpy as np\r\n\r\nx,y,z = np.random.normal(size=(3,10_000_000))\r\n\r\ndf = vaex.from_arrays(x=x,y=y,z=z)\r\n\r\ndf.select('x*y<z*z',name='tmp')\r\n\r\ndf.count(selection='(tmp)&(x>y)')\r\n```\n", "before_files": [{"content": "from __future__ import division, print_function\nimport logging\nimport numpy as np\nimport pyarrow as pa\nimport vaex.array_types\nimport vaex.arrow.numpy_dispatch\n\n\nfrom .utils import (_ensure_strings_from_expressions,\n _ensure_string_from_expression,\n _ensure_list,\n _is_limit,\n _isnumber,\n _issequence,\n _is_string,\n _parse_reduction,\n _parse_n,\n _normalize_selection_name,\n _normalize,\n _parse_f,\n _expand,\n _expand_shape,\n _expand_limits,\n as_flat_float,\n as_flat_array,\n _split_and_combine_mask)\nfrom .expression import expression_namespace\nfrom vaex.arrow.numpy_dispatch import wrap, unwrap\nimport vaex.expression\n\nlogger = logging.getLogger('vaex.scopes')\n\n\nclass ScopeBase(object):\n def get(self, attr, default=None): # otherwise pdb crashes during pytest\n if attr == \"__tracebackhide__\":\n return False\n return default\n\n\nclass UnitScope(ScopeBase):\n def __init__(self, df, value=None):\n self.df = df\n self.value = value\n\n def __getitem__(self, variable):\n import astropy.units\n if variable in self.df.units:\n unit = self.df.units[variable]\n return (self.value * unit) if self.value is not None else unit\n elif variable in self.df.virtual_columns:\n return eval(self.df.virtual_columns[variable], expression_namespace, self)\n elif variable in self.df.variables:\n return astropy.units.dimensionless_unscaled # TODO units for variables?\n else:\n raise KeyError(\"unkown variable %s\" % variable)\n\n\nclass _BlockScope(ScopeBase):\n def __init__(self, df, i1, i2, mask=None, **variables):\n \"\"\"\n\n :param DataFrameLocal DataFrame: the *local* DataFrame\n :param i1: start index\n :param i2: end index\n :param values:\n :return:\n \"\"\"\n self.df = df\n self.i1 = int(i1)\n self.i2 = int(i2)\n self.variables = variables\n self.values = dict(self.variables)\n self.buffers = {}\n self.mask = mask if mask is not None else None\n\n def move(self, i1, i2):\n length_new = i2 - i1\n length_old = self.i2 - self.i1\n if length_new > length_old: # old buffers are too small, discard them\n self.buffers = {}\n else:\n for name in list(self.buffers.keys()):\n self.buffers[name] = self.buffers[name][:length_new]\n self.i1 = int(i1)\n self.i2 = int(i2)\n self.values = dict(self.variables)\n\n def __contains__(self, name): # otherwise pdb crashes during pytest\n return name in self.buffers # not sure this should also include varibles, columns and virtual columns\n\n def _ensure_buffer(self, column):\n if column not in self.buffers:\n logger.debug(\"creating column for: %s\", column)\n self.buffers[column] = np.zeros(self.i2 - self.i1)\n\n def evaluate(self, expression, out=None):\n if isinstance(expression, vaex.expression.Expression):\n expression = expression.expression\n try:\n # logger.debug(\"try avoid evaluating: %s\", expression)\n result = self[expression]\n except KeyError:\n # logger.debug(\"no luck, eval: %s\", expression)\n # result = ne.evaluate(expression, local_dict=self, out=out)\n # logger.debug(\"in eval\")\n # eval(\"def f(\")\n result = eval(expression, expression_namespace, self)\n self.values[expression] = result\n # if out is not None:\n # out[:] = result\n # result = out\n # logger.debug(\"out eval\")\n # logger.debug(\"done with eval of %s\", expression)\n result = unwrap(result)\n return result\n\n def __getitem__(self, variable):\n # logger.debug(\"get \" + variable)\n # return self.df.columns[variable][self.i1:self.i2]\n if variable == 'df':\n return self # to support df['no!identifier']\n if variable in expression_namespace:\n return expression_namespace[variable]\n try:\n if variable in self.values:\n return self.values[variable]\n elif variable in self.df.columns:\n offset = self.df._index_start\n # if self.df._needs_copy(variable):\n # self._ensure_buffer(variable)\n # self.values[variable] = self.buffers[variable] = self.df.columns[variable][self.i1:self.i2].astype(np.float64)\n # Previously we casted anything to .astype(np.float64), this led to rounding off of int64, when exporting\n # self.values[variable] = self.df.columns[variable][offset+self.i1:offset+self.i2][:]\n # else:\n values = self.df.columns[variable][offset+self.i1:offset+self.i2]\n if self.mask is not None:\n # TODO: we may want to put this in array_types\n if isinstance(values, (pa.Array, pa.ChunkedArray)):\n values = values.filter(vaex.array_types.to_arrow(self.mask))\n else:\n values = values[self.mask]\n values = wrap(values)\n self.values[variable] = values\n elif variable in list(self.df.virtual_columns.keys()):\n expression = self.df.virtual_columns[variable]\n if isinstance(expression, dict):\n function = expression['function']\n arguments = [self.evaluate(k) for k in expression['arguments']]\n self.values[variable] = function(*arguments)\n else:\n # self._ensure_buffer(variable)\n values = self.evaluate(expression)\n values = wrap(values)\n self.values[variable] = values\n # self.values[variable] = self.buffers[variable]\n elif variable in self.df.functions:\n f = self.df.functions[variable].f\n return vaex.arrow.numpy_dispatch.autowrapper(f)\n if variable not in self.values:\n raise KeyError(\"Unknown variables or column: %r\" % (variable,))\n\n return self.values[variable]\n except:\n # logger.exception(\"error in evaluating: %r\" % variable)\n raise\n\n\nclass _BlockScopeSelection(ScopeBase):\n def __init__(self, df, i1, i2, selection=None, cache=False, filter_mask=None):\n self.df = df\n self.i1 = i1\n self.i2 = i2\n self.selection = selection\n self.store_in_cache = cache\n self.filter_mask = filter_mask\n\n def evaluate(self, expression):\n if expression is True:\n expression = \"default\"\n try:\n expression = _ensure_string_from_expression(expression)\n result = eval(expression, expression_namespace, self)\n except:\n import traceback as tb\n tb.print_stack()\n raise\n result = unwrap(result)\n return result\n\n def __contains__(self, name): # otherwise pdb crashes during pytest\n return False\n\n def __getitem__(self, variable):\n if variable == \"__tracebackhide__\": # required for tracebacks\n return False\n # logger.debug(\"getitem for selection: %s\", variable)\n try:\n selection = self.selection\n if selection is None and self.df.has_selection(variable):\n selection = self.df.get_selection(variable)\n # logger.debug(\"selection for %r: %s %r\", variable, selection, self.df.selection_histories)\n key = (self.i1, self.i2)\n if selection:\n assert variable in self.df._selection_masks, \"%s mask not found\" % (variable, )\n cache = self.df._selection_mask_caches[variable]\n # logger.debug(\"selection cache: %r\" % cache)\n full_mask = self.df._selection_masks[variable]\n selection_in_cache, mask = cache.get(key, (None, None))\n\n # logger.debug(\"mask for %r is %r\", variable, mask)\n if selection_in_cache == selection:\n if self.filter_mask is not None:\n return mask[self.filter_mask]\n return mask\n # logger.debug(\"was not cached\")\n if variable in self.df.variables:\n return self.df.variables[variable]\n mask_values = selection.evaluate(self.df, variable, self.i1, self.i2, self.filter_mask)\n \n # get a view on a subset of the mask\n sub_mask = full_mask.view(self.i1, self.i2)\n sub_mask_array = np.asarray(sub_mask)\n # and update it\n if self.filter_mask is not None: # if we have a mask, the selection we evaluated is also filtered\n sub_mask_array[:] = 0\n sub_mask_array[:][self.filter_mask] = mask_values\n else:\n sub_mask_array[:] = mask_values\n # logger.debug(\"put selection in mask with key %r\" % (key,))\n if self.store_in_cache:\n cache[key] = selection, sub_mask_array\n # cache[key] = selection, mask_values\n if self.filter_mask is not None:\n return sub_mask_array[self.filter_mask]\n else:\n return sub_mask_array\n # return mask_values\n else:\n offset = self.df._index_start\n if variable in expression_namespace:\n return expression_namespace[variable]\n elif variable in self.df.columns:\n values = self.df.columns[variable][offset+self.i1:offset+self.i2]\n # TODO: we may want to put this in array_types\n if self.filter_mask is not None:\n if isinstance(values, (pa.Array, pa.ChunkedArray)):\n values = values.filter(vaex.array_types.to_arrow(self.filter_mask))\n else:\n values = values[self.filter_mask]\n values = wrap(values)\n return values\n elif variable in self.df.variables:\n return self.df.variables[variable]\n elif variable in self.df.virtual_columns:\n expression = self.df.virtual_columns[variable]\n # self._ensure_buffer(variable)\n if expression == variable:\n raise ValueError(f'Recursion protection: virtual column {variable} refers to itself')\n values = self.evaluate(expression) # , out=self.buffers[variable])\n values = wrap(values)\n return values\n elif variable in self.df.functions:\n f = self.df.functions[variable].f\n return vaex.arrow.numpy_dispatch.autowrapper(f)\n raise KeyError(\"Unknown variables or column: %r\" % (variable,))\n except:\n import traceback as tb\n tb.print_exc()\n logger.exception(\"error in evaluating: %r\" % variable)\n raise\n\n", "path": "packages/vaex-core/vaex/scopes.py"}]}
3,777
869
gh_patches_debug_38585
rasdani/github-patches
git_diff
PaddlePaddle__models-2067
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> ocr_recognition 评估、预测错误 1、ocr_recognition CTC model eval.py有函数缺少参数: <img width="909" alt="e8ec154b99d031a872f8f222e1088e92" src="https://user-images.githubusercontent.com/46314656/53880163-453f1e00-404b-11e9-85c7-8dd84fe81020.png"> 2、预测时CTC和attention模型的报错: <img width="987" alt="93b4cc1a5a0309688355e42a33bddad7" src="https://user-images.githubusercontent.com/46314656/53880294-9e0eb680-404b-11e9-9619-47c6bdaff889.png"> </issue> <code> [start of PaddleCV/ocr_recognition/infer.py] 1 from __future__ import print_function 2 import paddle.fluid as fluid 3 from utility import add_arguments, print_arguments, to_lodtensor, get_ctc_feeder_data, get_attention_feeder_for_infer 4 import paddle.fluid.profiler as profiler 5 from crnn_ctc_model import ctc_infer 6 from attention_model import attention_infer 7 import numpy as np 8 import data_reader 9 import argparse 10 import functools 11 import os 12 import time 13 14 parser = argparse.ArgumentParser(description=__doc__) 15 add_arg = functools.partial(add_arguments, argparser=parser) 16 # yapf: disable 17 add_arg('model', str, "crnn_ctc", "Which type of network to be used. 'crnn_ctc' or 'attention'") 18 add_arg('model_path', str, None, "The model path to be used for inference.") 19 add_arg('input_images_dir', str, None, "The directory of images.") 20 add_arg('input_images_list', str, None, "The list file of images.") 21 add_arg('dict', str, None, "The dictionary. The result of inference will be index sequence if the dictionary was None.") 22 add_arg('use_gpu', bool, True, "Whether use GPU to infer.") 23 add_arg('iterations', int, 0, "The number of iterations. Zero or less means whole test set. More than 0 means the test set might be looped until # of iterations is reached.") 24 add_arg('profile', bool, False, "Whether to use profiling.") 25 add_arg('skip_batch_num', int, 0, "The number of first minibatches to skip as warm-up for better performance test.") 26 add_arg('batch_size', int, 1, "The minibatch size.") 27 # yapf: enable 28 29 30 def inference(args): 31 """OCR inference""" 32 if args.model == "crnn_ctc": 33 infer = ctc_infer 34 get_feeder_data = get_ctc_feeder_data 35 else: 36 infer = attention_infer 37 get_feeder_data = get_attention_feeder_for_infer 38 eos = 1 39 sos = 0 40 num_classes = data_reader.num_classes() 41 data_shape = data_reader.data_shape() 42 # define network 43 images = fluid.layers.data(name='pixel', shape=data_shape, dtype='float32') 44 ids = infer(images, num_classes, use_cudnn=True if args.use_gpu else False) 45 # data reader 46 infer_reader = data_reader.inference( 47 batch_size=args.batch_size, 48 infer_images_dir=args.input_images_dir, 49 infer_list_file=args.input_images_list, 50 cycle=True if args.iterations > 0 else False, 51 model=args.model) 52 # prepare environment 53 place = fluid.CPUPlace() 54 if args.use_gpu: 55 place = fluid.CUDAPlace(0) 56 57 exe = fluid.Executor(place) 58 exe.run(fluid.default_startup_program()) 59 60 # load dictionary 61 dict_map = None 62 if args.dict is not None and os.path.isfile(args.dict): 63 dict_map = {} 64 with open(args.dict) as dict_file: 65 for i, word in enumerate(dict_file): 66 dict_map[i] = word.strip() 67 print("Loaded dict from %s" % args.dict) 68 69 # load init model 70 model_dir = args.model_path 71 model_file_name = None 72 if not os.path.isdir(args.model_path): 73 model_dir = os.path.dirname(args.model_path) 74 model_file_name = os.path.basename(args.model_path) 75 fluid.io.load_params(exe, dirname=model_dir, filename=model_file_name) 76 print("Init model from: %s." % args.model_path) 77 78 batch_times = [] 79 iters = 0 80 for data in infer_reader(): 81 feed_dict = get_feeder_data(data, place, need_label=False) 82 if args.iterations > 0 and iters == args.iterations + args.skip_batch_num: 83 break 84 if iters < args.skip_batch_num: 85 print("Warm-up itaration") 86 if iters == args.skip_batch_num: 87 profiler.reset_profiler() 88 89 start = time.time() 90 result = exe.run(fluid.default_main_program(), 91 feed=feed_dict, 92 fetch_list=[ids], 93 return_numpy=False) 94 indexes = prune(np.array(result[0]).flatten(), 0, 1) 95 batch_time = time.time() - start 96 fps = args.batch_size / batch_time 97 batch_times.append(batch_time) 98 if dict_map is not None: 99 print("Iteration %d, latency: %.5f s, fps: %f, result: %s" % ( 100 iters, 101 batch_time, 102 fps, 103 [dict_map[index] for index in indexes], )) 104 else: 105 print("Iteration %d, latency: %.5f s, fps: %f, result: %s" % ( 106 iters, 107 batch_time, 108 fps, 109 indexes, )) 110 111 iters += 1 112 113 latencies = batch_times[args.skip_batch_num:] 114 latency_avg = np.average(latencies) 115 latency_pc99 = np.percentile(latencies, 99) 116 fpses = np.divide(args.batch_size, latencies) 117 fps_avg = np.average(fpses) 118 fps_pc99 = np.percentile(fpses, 1) 119 120 # Benchmark output 121 print('\nTotal examples (incl. warm-up): %d' % (iters * args.batch_size)) 122 print('average latency: %.5f s, 99pc latency: %.5f s' % (latency_avg, 123 latency_pc99)) 124 print('average fps: %.5f, fps for 99pc latency: %.5f' % (fps_avg, fps_pc99)) 125 126 127 def prune(words, sos, eos): 128 """Remove unused tokens in prediction result.""" 129 start_index = 0 130 end_index = len(words) 131 if sos in words: 132 start_index = np.where(words == sos)[0][0] + 1 133 if eos in words: 134 end_index = np.where(words == eos)[0][0] 135 return words[start_index:end_index] 136 137 138 def main(): 139 args = parser.parse_args() 140 print_arguments(args) 141 if args.profile: 142 if args.use_gpu: 143 with profiler.cuda_profiler("cuda_profiler.txt", 'csv') as nvprof: 144 inference(args) 145 else: 146 with profiler.profiler("CPU", sorted_key='total') as cpuprof: 147 inference(args) 148 else: 149 inference(args) 150 151 152 if __name__ == "__main__": 153 main() 154 [end of PaddleCV/ocr_recognition/infer.py] [start of PaddleCV/ocr_recognition/utility.py] 1 """Contains common utility functions.""" 2 # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. 3 # 4 #Licensed under the Apache License, Version 2.0 (the "License"); 5 #you may not use this file except in compliance with the License. 6 #You may obtain a copy of the License at 7 # 8 # http://www.apache.org/licenses/LICENSE-2.0 9 # 10 #Unless required by applicable law or agreed to in writing, software 11 #distributed under the License is distributed on an "AS IS" BASIS, 12 #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 #See the License for the specific language governing permissions and 14 #limitations under the License. 15 16 from __future__ import absolute_import 17 from __future__ import division 18 from __future__ import print_function 19 import distutils.util 20 import numpy as np 21 from paddle.fluid import core 22 import paddle.fluid as fluid 23 import six 24 25 26 def print_arguments(args): 27 """Print argparse's arguments. 28 29 Usage: 30 31 .. code-block:: python 32 33 parser = argparse.ArgumentParser() 34 parser.add_argument("name", default="Jonh", type=str, help="User name.") 35 args = parser.parse_args() 36 print_arguments(args) 37 38 :param args: Input argparse.Namespace for printing. 39 :type args: argparse.Namespace 40 """ 41 print("----------- Configuration Arguments -----------") 42 for arg, value in sorted(six.iteritems(vars(args))): 43 print("%s: %s" % (arg, value)) 44 print("------------------------------------------------") 45 46 47 def add_arguments(argname, type, default, help, argparser, **kwargs): 48 """Add argparse's argument. 49 50 Usage: 51 52 .. code-block:: python 53 54 parser = argparse.ArgumentParser() 55 add_argument("name", str, "Jonh", "User name.", parser) 56 args = parser.parse_args() 57 """ 58 type = distutils.util.strtobool if type == bool else type 59 argparser.add_argument( 60 "--" + argname, 61 default=default, 62 type=type, 63 help=help + ' Default: %(default)s.', 64 **kwargs) 65 66 67 def to_lodtensor(data, place): 68 seq_lens = [len(seq) for seq in data] 69 cur_len = 0 70 lod = [cur_len] 71 for l in seq_lens: 72 cur_len += l 73 lod.append(cur_len) 74 flattened_data = np.concatenate(data, axis=0).astype("int32") 75 flattened_data = flattened_data.reshape([len(flattened_data), 1]) 76 res = core.LoDTensor() 77 res.set(flattened_data, place) 78 res.set_lod([lod]) 79 return res 80 81 82 def get_ctc_feeder_data(data, place, need_label=True): 83 pixel_tensor = core.LoDTensor() 84 pixel_data = None 85 pixel_data = np.concatenate( 86 list(map(lambda x: x[0][np.newaxis, :], data)), axis=0).astype("float32") 87 pixel_tensor.set(pixel_data, place) 88 label_tensor = to_lodtensor(list(map(lambda x: x[1], data)), place) 89 if need_label: 90 return {"pixel": pixel_tensor, "label": label_tensor} 91 else: 92 return {"pixel": pixel_tensor} 93 94 95 def get_attention_feeder_data(data, place, need_label=True): 96 pixel_tensor = core.LoDTensor() 97 pixel_data = None 98 pixel_data = np.concatenate( 99 list(map(lambda x: x[0][np.newaxis, :], data)), axis=0).astype("float32") 100 pixel_tensor.set(pixel_data, place) 101 label_in_tensor = to_lodtensor(list(map(lambda x: x[1], data)), place) 102 label_out_tensor = to_lodtensor(list(map(lambda x: x[2], data)), place) 103 if need_label: 104 return { 105 "pixel": pixel_tensor, 106 "label_in": label_in_tensor, 107 "label_out": label_out_tensor 108 } 109 else: 110 return {"pixel": pixel_tensor} 111 112 113 def get_attention_feeder_for_infer(data, place): 114 batch_size = len(data) 115 init_ids_data = np.array([0 for _ in range(batch_size)], dtype='int64') 116 init_scores_data = np.array( 117 [1. for _ in range(batch_size)], dtype='float32') 118 init_ids_data = init_ids_data.reshape((batch_size, 1)) 119 init_scores_data = init_scores_data.reshape((batch_size, 1)) 120 init_recursive_seq_lens = [1] * batch_size 121 init_recursive_seq_lens = [init_recursive_seq_lens, init_recursive_seq_lens] 122 init_ids = fluid.create_lod_tensor(init_ids_data, init_recursive_seq_lens, 123 place) 124 init_scores = fluid.create_lod_tensor(init_scores_data, 125 init_recursive_seq_lens, place) 126 127 pixel_tensor = core.LoDTensor() 128 pixel_data = None 129 pixel_data = np.concatenate( 130 list(map(lambda x: x[0][np.newaxis, :], data)), axis=0).astype("float32") 131 pixel_tensor.set(pixel_data, place) 132 return { 133 "pixel": pixel_tensor, 134 "init_ids": init_ids, 135 "init_scores": init_scores 136 } 137 [end of PaddleCV/ocr_recognition/utility.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/PaddleCV/ocr_recognition/infer.py b/PaddleCV/ocr_recognition/infer.py --- a/PaddleCV/ocr_recognition/infer.py +++ b/PaddleCV/ocr_recognition/infer.py @@ -31,7 +31,7 @@ """OCR inference""" if args.model == "crnn_ctc": infer = ctc_infer - get_feeder_data = get_ctc_feeder_data + get_feeder_data = get_ctc_feeder_for_infer else: infer = attention_infer get_feeder_data = get_attention_feeder_for_infer @@ -78,7 +78,7 @@ batch_times = [] iters = 0 for data in infer_reader(): - feed_dict = get_feeder_data(data, place, need_label=False) + feed_dict = get_feeder_data(data, place) if args.iterations > 0 and iters == args.iterations + args.skip_batch_num: break if iters < args.skip_batch_num: diff --git a/PaddleCV/ocr_recognition/utility.py b/PaddleCV/ocr_recognition/utility.py --- a/PaddleCV/ocr_recognition/utility.py +++ b/PaddleCV/ocr_recognition/utility.py @@ -83,7 +83,8 @@ pixel_tensor = core.LoDTensor() pixel_data = None pixel_data = np.concatenate( - list(map(lambda x: x[0][np.newaxis, :], data)), axis=0).astype("float32") + list(map(lambda x: x[0][np.newaxis, :], data)), + axis=0).astype("float32") pixel_tensor.set(pixel_data, place) label_tensor = to_lodtensor(list(map(lambda x: x[1], data)), place) if need_label: @@ -92,11 +93,16 @@ return {"pixel": pixel_tensor} +def get_ctc_feeder_for_infer(data, place): + return get_ctc_feeder_data(data, place, need_label=False) + + def get_attention_feeder_data(data, place, need_label=True): pixel_tensor = core.LoDTensor() pixel_data = None pixel_data = np.concatenate( - list(map(lambda x: x[0][np.newaxis, :], data)), axis=0).astype("float32") + list(map(lambda x: x[0][np.newaxis, :], data)), + axis=0).astype("float32") pixel_tensor.set(pixel_data, place) label_in_tensor = to_lodtensor(list(map(lambda x: x[1], data)), place) label_out_tensor = to_lodtensor(list(map(lambda x: x[2], data)), place) @@ -127,7 +133,8 @@ pixel_tensor = core.LoDTensor() pixel_data = None pixel_data = np.concatenate( - list(map(lambda x: x[0][np.newaxis, :], data)), axis=0).astype("float32") + list(map(lambda x: x[0][np.newaxis, :], data)), + axis=0).astype("float32") pixel_tensor.set(pixel_data, place) return { "pixel": pixel_tensor,
{"golden_diff": "diff --git a/PaddleCV/ocr_recognition/infer.py b/PaddleCV/ocr_recognition/infer.py\n--- a/PaddleCV/ocr_recognition/infer.py\n+++ b/PaddleCV/ocr_recognition/infer.py\n@@ -31,7 +31,7 @@\n \"\"\"OCR inference\"\"\"\n if args.model == \"crnn_ctc\":\n infer = ctc_infer\n- get_feeder_data = get_ctc_feeder_data\n+ get_feeder_data = get_ctc_feeder_for_infer\n else:\n infer = attention_infer\n get_feeder_data = get_attention_feeder_for_infer\n@@ -78,7 +78,7 @@\n batch_times = []\n iters = 0\n for data in infer_reader():\n- feed_dict = get_feeder_data(data, place, need_label=False)\n+ feed_dict = get_feeder_data(data, place)\n if args.iterations > 0 and iters == args.iterations + args.skip_batch_num:\n break\n if iters < args.skip_batch_num:\ndiff --git a/PaddleCV/ocr_recognition/utility.py b/PaddleCV/ocr_recognition/utility.py\n--- a/PaddleCV/ocr_recognition/utility.py\n+++ b/PaddleCV/ocr_recognition/utility.py\n@@ -83,7 +83,8 @@\n pixel_tensor = core.LoDTensor()\n pixel_data = None\n pixel_data = np.concatenate(\n- list(map(lambda x: x[0][np.newaxis, :], data)), axis=0).astype(\"float32\")\n+ list(map(lambda x: x[0][np.newaxis, :], data)),\n+ axis=0).astype(\"float32\")\n pixel_tensor.set(pixel_data, place)\n label_tensor = to_lodtensor(list(map(lambda x: x[1], data)), place)\n if need_label:\n@@ -92,11 +93,16 @@\n return {\"pixel\": pixel_tensor}\n \n \n+def get_ctc_feeder_for_infer(data, place):\n+ return get_ctc_feeder_data(data, place, need_label=False)\n+\n+\n def get_attention_feeder_data(data, place, need_label=True):\n pixel_tensor = core.LoDTensor()\n pixel_data = None\n pixel_data = np.concatenate(\n- list(map(lambda x: x[0][np.newaxis, :], data)), axis=0).astype(\"float32\")\n+ list(map(lambda x: x[0][np.newaxis, :], data)),\n+ axis=0).astype(\"float32\")\n pixel_tensor.set(pixel_data, place)\n label_in_tensor = to_lodtensor(list(map(lambda x: x[1], data)), place)\n label_out_tensor = to_lodtensor(list(map(lambda x: x[2], data)), place)\n@@ -127,7 +133,8 @@\n pixel_tensor = core.LoDTensor()\n pixel_data = None\n pixel_data = np.concatenate(\n- list(map(lambda x: x[0][np.newaxis, :], data)), axis=0).astype(\"float32\")\n+ list(map(lambda x: x[0][np.newaxis, :], data)),\n+ axis=0).astype(\"float32\")\n pixel_tensor.set(pixel_data, place)\n return {\n \"pixel\": pixel_tensor,\n", "issue": "ocr_recognition \u8bc4\u4f30\u3001\u9884\u6d4b\u9519\u8bef\n1\u3001ocr_recognition CTC model eval.py\u6709\u51fd\u6570\u7f3a\u5c11\u53c2\u6570\uff1a\r\n<img width=\"909\" alt=\"e8ec154b99d031a872f8f222e1088e92\" src=\"https://user-images.githubusercontent.com/46314656/53880163-453f1e00-404b-11e9-85c7-8dd84fe81020.png\">\r\n2\u3001\u9884\u6d4b\u65f6CTC\u548cattention\u6a21\u578b\u7684\u62a5\u9519\uff1a\r\n<img width=\"987\" alt=\"93b4cc1a5a0309688355e42a33bddad7\" src=\"https://user-images.githubusercontent.com/46314656/53880294-9e0eb680-404b-11e9-9619-47c6bdaff889.png\">\r\n\n", "before_files": [{"content": "from __future__ import print_function\nimport paddle.fluid as fluid\nfrom utility import add_arguments, print_arguments, to_lodtensor, get_ctc_feeder_data, get_attention_feeder_for_infer\nimport paddle.fluid.profiler as profiler\nfrom crnn_ctc_model import ctc_infer\nfrom attention_model import attention_infer\nimport numpy as np\nimport data_reader\nimport argparse\nimport functools\nimport os\nimport time\n\nparser = argparse.ArgumentParser(description=__doc__)\nadd_arg = functools.partial(add_arguments, argparser=parser)\n# yapf: disable\nadd_arg('model', str, \"crnn_ctc\", \"Which type of network to be used. 'crnn_ctc' or 'attention'\")\nadd_arg('model_path', str, None, \"The model path to be used for inference.\")\nadd_arg('input_images_dir', str, None, \"The directory of images.\")\nadd_arg('input_images_list', str, None, \"The list file of images.\")\nadd_arg('dict', str, None, \"The dictionary. The result of inference will be index sequence if the dictionary was None.\")\nadd_arg('use_gpu', bool, True, \"Whether use GPU to infer.\")\nadd_arg('iterations', int, 0, \"The number of iterations. Zero or less means whole test set. More than 0 means the test set might be looped until # of iterations is reached.\")\nadd_arg('profile', bool, False, \"Whether to use profiling.\")\nadd_arg('skip_batch_num', int, 0, \"The number of first minibatches to skip as warm-up for better performance test.\")\nadd_arg('batch_size', int, 1, \"The minibatch size.\")\n# yapf: enable\n\n\ndef inference(args):\n \"\"\"OCR inference\"\"\"\n if args.model == \"crnn_ctc\":\n infer = ctc_infer\n get_feeder_data = get_ctc_feeder_data\n else:\n infer = attention_infer\n get_feeder_data = get_attention_feeder_for_infer\n eos = 1\n sos = 0\n num_classes = data_reader.num_classes()\n data_shape = data_reader.data_shape()\n # define network\n images = fluid.layers.data(name='pixel', shape=data_shape, dtype='float32')\n ids = infer(images, num_classes, use_cudnn=True if args.use_gpu else False)\n # data reader\n infer_reader = data_reader.inference(\n batch_size=args.batch_size,\n infer_images_dir=args.input_images_dir,\n infer_list_file=args.input_images_list,\n cycle=True if args.iterations > 0 else False,\n model=args.model)\n # prepare environment\n place = fluid.CPUPlace()\n if args.use_gpu:\n place = fluid.CUDAPlace(0)\n\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n\n # load dictionary\n dict_map = None\n if args.dict is not None and os.path.isfile(args.dict):\n dict_map = {}\n with open(args.dict) as dict_file:\n for i, word in enumerate(dict_file):\n dict_map[i] = word.strip()\n print(\"Loaded dict from %s\" % args.dict)\n\n # load init model\n model_dir = args.model_path\n model_file_name = None\n if not os.path.isdir(args.model_path):\n model_dir = os.path.dirname(args.model_path)\n model_file_name = os.path.basename(args.model_path)\n fluid.io.load_params(exe, dirname=model_dir, filename=model_file_name)\n print(\"Init model from: %s.\" % args.model_path)\n\n batch_times = []\n iters = 0\n for data in infer_reader():\n feed_dict = get_feeder_data(data, place, need_label=False)\n if args.iterations > 0 and iters == args.iterations + args.skip_batch_num:\n break\n if iters < args.skip_batch_num:\n print(\"Warm-up itaration\")\n if iters == args.skip_batch_num:\n profiler.reset_profiler()\n\n start = time.time()\n result = exe.run(fluid.default_main_program(),\n feed=feed_dict,\n fetch_list=[ids],\n return_numpy=False)\n indexes = prune(np.array(result[0]).flatten(), 0, 1)\n batch_time = time.time() - start\n fps = args.batch_size / batch_time\n batch_times.append(batch_time)\n if dict_map is not None:\n print(\"Iteration %d, latency: %.5f s, fps: %f, result: %s\" % (\n iters,\n batch_time,\n fps,\n [dict_map[index] for index in indexes], ))\n else:\n print(\"Iteration %d, latency: %.5f s, fps: %f, result: %s\" % (\n iters,\n batch_time,\n fps,\n indexes, ))\n\n iters += 1\n\n latencies = batch_times[args.skip_batch_num:]\n latency_avg = np.average(latencies)\n latency_pc99 = np.percentile(latencies, 99)\n fpses = np.divide(args.batch_size, latencies)\n fps_avg = np.average(fpses)\n fps_pc99 = np.percentile(fpses, 1)\n\n # Benchmark output\n print('\\nTotal examples (incl. warm-up): %d' % (iters * args.batch_size))\n print('average latency: %.5f s, 99pc latency: %.5f s' % (latency_avg,\n latency_pc99))\n print('average fps: %.5f, fps for 99pc latency: %.5f' % (fps_avg, fps_pc99))\n\n\ndef prune(words, sos, eos):\n \"\"\"Remove unused tokens in prediction result.\"\"\"\n start_index = 0\n end_index = len(words)\n if sos in words:\n start_index = np.where(words == sos)[0][0] + 1\n if eos in words:\n end_index = np.where(words == eos)[0][0]\n return words[start_index:end_index]\n\n\ndef main():\n args = parser.parse_args()\n print_arguments(args)\n if args.profile:\n if args.use_gpu:\n with profiler.cuda_profiler(\"cuda_profiler.txt\", 'csv') as nvprof:\n inference(args)\n else:\n with profiler.profiler(\"CPU\", sorted_key='total') as cpuprof:\n inference(args)\n else:\n inference(args)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "PaddleCV/ocr_recognition/infer.py"}, {"content": "\"\"\"Contains common utility functions.\"\"\"\n# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.\n#\n#Licensed under the Apache License, Version 2.0 (the \"License\");\n#you may not use this file except in compliance with the License.\n#You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n#Unless required by applicable law or agreed to in writing, software\n#distributed under the License is distributed on an \"AS IS\" BASIS,\n#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#See the License for the specific language governing permissions and\n#limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport distutils.util\nimport numpy as np\nfrom paddle.fluid import core\nimport paddle.fluid as fluid\nimport six\n\n\ndef print_arguments(args):\n \"\"\"Print argparse's arguments.\n\n Usage:\n\n .. code-block:: python\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"name\", default=\"Jonh\", type=str, help=\"User name.\")\n args = parser.parse_args()\n print_arguments(args)\n\n :param args: Input argparse.Namespace for printing.\n :type args: argparse.Namespace\n \"\"\"\n print(\"----------- Configuration Arguments -----------\")\n for arg, value in sorted(six.iteritems(vars(args))):\n print(\"%s: %s\" % (arg, value))\n print(\"------------------------------------------------\")\n\n\ndef add_arguments(argname, type, default, help, argparser, **kwargs):\n \"\"\"Add argparse's argument.\n\n Usage:\n\n .. code-block:: python\n\n parser = argparse.ArgumentParser()\n add_argument(\"name\", str, \"Jonh\", \"User name.\", parser)\n args = parser.parse_args()\n \"\"\"\n type = distutils.util.strtobool if type == bool else type\n argparser.add_argument(\n \"--\" + argname,\n default=default,\n type=type,\n help=help + ' Default: %(default)s.',\n **kwargs)\n\n\ndef to_lodtensor(data, place):\n seq_lens = [len(seq) for seq in data]\n cur_len = 0\n lod = [cur_len]\n for l in seq_lens:\n cur_len += l\n lod.append(cur_len)\n flattened_data = np.concatenate(data, axis=0).astype(\"int32\")\n flattened_data = flattened_data.reshape([len(flattened_data), 1])\n res = core.LoDTensor()\n res.set(flattened_data, place)\n res.set_lod([lod])\n return res\n\n\ndef get_ctc_feeder_data(data, place, need_label=True):\n pixel_tensor = core.LoDTensor()\n pixel_data = None\n pixel_data = np.concatenate(\n list(map(lambda x: x[0][np.newaxis, :], data)), axis=0).astype(\"float32\")\n pixel_tensor.set(pixel_data, place)\n label_tensor = to_lodtensor(list(map(lambda x: x[1], data)), place)\n if need_label:\n return {\"pixel\": pixel_tensor, \"label\": label_tensor}\n else:\n return {\"pixel\": pixel_tensor}\n\n\ndef get_attention_feeder_data(data, place, need_label=True):\n pixel_tensor = core.LoDTensor()\n pixel_data = None\n pixel_data = np.concatenate(\n list(map(lambda x: x[0][np.newaxis, :], data)), axis=0).astype(\"float32\")\n pixel_tensor.set(pixel_data, place)\n label_in_tensor = to_lodtensor(list(map(lambda x: x[1], data)), place)\n label_out_tensor = to_lodtensor(list(map(lambda x: x[2], data)), place)\n if need_label:\n return {\n \"pixel\": pixel_tensor,\n \"label_in\": label_in_tensor,\n \"label_out\": label_out_tensor\n }\n else:\n return {\"pixel\": pixel_tensor}\n\n\ndef get_attention_feeder_for_infer(data, place):\n batch_size = len(data)\n init_ids_data = np.array([0 for _ in range(batch_size)], dtype='int64')\n init_scores_data = np.array(\n [1. for _ in range(batch_size)], dtype='float32')\n init_ids_data = init_ids_data.reshape((batch_size, 1))\n init_scores_data = init_scores_data.reshape((batch_size, 1))\n init_recursive_seq_lens = [1] * batch_size\n init_recursive_seq_lens = [init_recursive_seq_lens, init_recursive_seq_lens]\n init_ids = fluid.create_lod_tensor(init_ids_data, init_recursive_seq_lens,\n place)\n init_scores = fluid.create_lod_tensor(init_scores_data,\n init_recursive_seq_lens, place)\n\n pixel_tensor = core.LoDTensor()\n pixel_data = None\n pixel_data = np.concatenate(\n list(map(lambda x: x[0][np.newaxis, :], data)), axis=0).astype(\"float32\")\n pixel_tensor.set(pixel_data, place)\n return {\n \"pixel\": pixel_tensor,\n \"init_ids\": init_ids,\n \"init_scores\": init_scores\n }\n", "path": "PaddleCV/ocr_recognition/utility.py"}]}
4,028
722
gh_patches_debug_13410
rasdani/github-patches
git_diff
plotly__plotly.py-958
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Not all dependecies are listed Would it be possible to add pandas (and numpy) to the list of dependencies? They're both imported in various places but not listed. This occasionally causes problems for packages importing plotly, since `pip` may not install all dependencies. Xref: https://github.com/deeptools/deepTools/issues/669 Not all dependecies are listed Would it be possible to add pandas (and numpy) to the list of dependencies? They're both imported in various places but not listed. This occasionally causes problems for packages importing plotly, since `pip` may not install all dependencies. Xref: https://github.com/deeptools/deepTools/issues/669 </issue> <code> [start of plotly/figure_factory/__init__.py] 1 from __future__ import absolute_import 2 3 # Require that numpy exists for figure_factory 4 import numpy 5 6 from plotly.figure_factory._2d_density import create_2d_density 7 from plotly.figure_factory._annotated_heatmap import create_annotated_heatmap 8 from plotly.figure_factory._bullet import create_bullet 9 from plotly.figure_factory._candlestick import create_candlestick 10 from plotly.figure_factory._dendrogram import create_dendrogram 11 from plotly.figure_factory._distplot import create_distplot 12 from plotly.figure_factory._facet_grid import create_facet_grid 13 from plotly.figure_factory._gantt import create_gantt 14 from plotly.figure_factory._ohlc import create_ohlc 15 from plotly.figure_factory._quiver import create_quiver 16 from plotly.figure_factory._scatterplot import create_scatterplotmatrix 17 from plotly.figure_factory._streamline import create_streamline 18 from plotly.figure_factory._table import create_table 19 from plotly.figure_factory._trisurf import create_trisurf 20 from plotly.figure_factory._violin import create_violin 21 from plotly.figure_factory._county_choropleth import create_choropleth [end of plotly/figure_factory/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/plotly/figure_factory/__init__.py b/plotly/figure_factory/__init__.py --- a/plotly/figure_factory/__init__.py +++ b/plotly/figure_factory/__init__.py @@ -1,5 +1,7 @@ from __future__ import absolute_import +from plotly import optional_imports + # Require that numpy exists for figure_factory import numpy @@ -18,4 +20,5 @@ from plotly.figure_factory._table import create_table from plotly.figure_factory._trisurf import create_trisurf from plotly.figure_factory._violin import create_violin -from plotly.figure_factory._county_choropleth import create_choropleth \ No newline at end of file +if optional_imports.get_module('pandas') is not None: + from plotly.figure_factory._county_choropleth import create_choropleth
{"golden_diff": "diff --git a/plotly/figure_factory/__init__.py b/plotly/figure_factory/__init__.py\n--- a/plotly/figure_factory/__init__.py\n+++ b/plotly/figure_factory/__init__.py\n@@ -1,5 +1,7 @@\n from __future__ import absolute_import\n \n+from plotly import optional_imports\n+\n # Require that numpy exists for figure_factory\n import numpy\n \n@@ -18,4 +20,5 @@\n from plotly.figure_factory._table import create_table\n from plotly.figure_factory._trisurf import create_trisurf\n from plotly.figure_factory._violin import create_violin\n-from plotly.figure_factory._county_choropleth import create_choropleth\n\\ No newline at end of file\n+if optional_imports.get_module('pandas') is not None:\n+ from plotly.figure_factory._county_choropleth import create_choropleth\n", "issue": "Not all dependecies are listed\nWould it be possible to add pandas (and numpy) to the list of dependencies? They're both imported in various places but not listed. This occasionally causes problems for packages importing plotly, since `pip` may not install all dependencies.\r\n\r\nXref: https://github.com/deeptools/deepTools/issues/669\nNot all dependecies are listed\nWould it be possible to add pandas (and numpy) to the list of dependencies? They're both imported in various places but not listed. This occasionally causes problems for packages importing plotly, since `pip` may not install all dependencies.\r\n\r\nXref: https://github.com/deeptools/deepTools/issues/669\n", "before_files": [{"content": "from __future__ import absolute_import\n\n# Require that numpy exists for figure_factory\nimport numpy\n\nfrom plotly.figure_factory._2d_density import create_2d_density\nfrom plotly.figure_factory._annotated_heatmap import create_annotated_heatmap\nfrom plotly.figure_factory._bullet import create_bullet\nfrom plotly.figure_factory._candlestick import create_candlestick\nfrom plotly.figure_factory._dendrogram import create_dendrogram\nfrom plotly.figure_factory._distplot import create_distplot\nfrom plotly.figure_factory._facet_grid import create_facet_grid\nfrom plotly.figure_factory._gantt import create_gantt\nfrom plotly.figure_factory._ohlc import create_ohlc\nfrom plotly.figure_factory._quiver import create_quiver\nfrom plotly.figure_factory._scatterplot import create_scatterplotmatrix\nfrom plotly.figure_factory._streamline import create_streamline\nfrom plotly.figure_factory._table import create_table\nfrom plotly.figure_factory._trisurf import create_trisurf\nfrom plotly.figure_factory._violin import create_violin\nfrom plotly.figure_factory._county_choropleth import create_choropleth", "path": "plotly/figure_factory/__init__.py"}]}
977
205
gh_patches_debug_21813
rasdani/github-patches
git_diff
openstates__openstates-scrapers-2507
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> PR failing since at least 2018-09-06 PR has been failing since 2018-09-06 Based on automated runs it appears that PR has not run successfully in 3 days (2018-09-06). ``` 00:38:11 ERROR pupa: cannot resolve pseudo id to Person: ~{"name": "Carlos J. Rodr\u00edguez Mateo"} 00:38:11 ERROR pupa: cannot resolve pseudo id to Person: ~{"name": "Roberto Rivera Ruiz De Porras"} 00:38:11 ERROR pupa: cannot resolve pseudo id to Person: ~{"name": "Guillermo Miranda Rivera"} 00:38:11 ERROR pupa: cannot resolve pseudo id to Person: ~{"name": "Antonio L. Soto Torres"} 00:38:11 ERROR pupa: cannot resolve pseudo id to Person: ~{"name": "Jos\u00e9 J. P\u00e9rez Cordero"} 00:38:11 ERROR pupa: cannot resolve pseudo id to Person: ~{"name": "Jos\u00e9 F. Aponte Hern\u00e1ndez"} 00:38:12 ERROR pupa: cannot resolve pseudo id to Person: ~{"name": "Nelson Del Valle Col\u00f3n"} 00:38:12 ERROR pupa: cannot resolve pseudo id to Person: ~{"name": "Evelyn V\u00e1zquez Nieves"} 00:38:12 ERROR pupa: cannot resolve pseudo id to Person: ~{"name": "Mar\u00eda de Lourdes Ramos Rivera"} 00:38:12 ERROR pupa: cannot resolve pseudo id to Person: ~{"name": "Migdalia Padilla Alvelo"} 00:38:12 ERROR pupa: cannot resolve pseudo id to Person: ~{"name": "Jes\u00fas Manuel Ortiz Gonz\u00e1lez"} 00:38:12 ERROR pupa: cannot resolve pseudo id to Person: ~{"name": "Jos\u00e9 L. Dalmau Santiago"} 00:38:12 ERROR pupa: cannot resolve pseudo id to Person: ~{"name": "Mar\u00eda M. Charbonier Laureano"} 00:38:12 ERROR pupa: cannot resolve pseudo id to Person: ~{"name": "Juan O. Morales Rodr\u00edguez"} 00:38:12 ERROR pupa: cannot resolve pseudo id to Person: ~{"name": "Angel J. Mart\u00ednez Santiago"} 00:38:12 ERROR pupa: cannot resolve pseudo id to Person: ~{"name": "Abel Nazario Qui\u00f1ones"} 00:38:12 ERROR pupa: cannot resolve pseudo id to Person: ~{"name": "Axel (Chino) Roque Gracia"} 00:38:12 ERROR pupa: cannot resolve pseudo id to Person: ~{"name": "Jorge Navarro Su\u00e1rez"} loaded Open States pupa settings... pr (scrape, import) people: {} committees: {} bills: {} import jurisdictions... import organizations... import people... import posts... import memberships... Traceback (most recent call last): File "/opt/openstates/venv-pupa//bin/pupa", line 11, in <module> load_entry_point('pupa', 'console_scripts', 'pupa')() File "/opt/openstates/venv-pupa/src/pupa/pupa/cli/__main__.py", line 68, in main subcommands[args.subcommand].handle(args, other) File "/opt/openstates/venv-pupa/src/pupa/pupa/cli/commands/update.py", line 260, in handle return self.do_handle(args, other, juris) File "/opt/openstates/venv-pupa/src/pupa/pupa/cli/commands/update.py", line 307, in do_handle report['import'] = self.do_import(juris, args) File "/opt/openstates/venv-pupa/src/pupa/pupa/cli/commands/update.py", line 211, in do_import report.update(membership_importer.import_directory(datadir)) File "/opt/openstates/venv-pupa/src/pupa/pupa/importers/base.py", line 196, in import_directory return self.import_data(json_stream()) File "/opt/openstates/venv-pupa/src/pupa/pupa/importers/base.py", line 233, in import_data obj_id, what = self.import_item(data) File "/opt/openstates/venv-pupa/src/pupa/pupa/importers/base.py", line 254, in import_item data = self.prepare_for_db(data) File "/opt/openstates/venv-pupa/src/pupa/pupa/importers/memberships.py", line 50, in prepare_for_db data['post_id'] = self.post_importer.resolve_json_id(data['post_id']) File "/opt/openstates/venv-pupa/src/pupa/pupa/importers/base.py", line 171, in resolve_json_id raise UnresolvedIdError(errmsg) pupa.exceptions.UnresolvedIdError: cannot resolve pseudo id to Post: ~{"label": "At-Large", "organization__classification": "lower"} ``` Visit http://bobsled.openstates.org for more info. </issue> <code> [start of openstates/pr/__init__.py] 1 from pupa.scrape import Jurisdiction, Organization 2 from .people import PRPersonScraper 3 from .committees import PRCommitteeScraper 4 from .bills import PRBillScraper 5 6 settings = dict(SCRAPELIB_TIMEOUT=300) 7 8 9 class PuertoRico(Jurisdiction): 10 division_id = "ocd-division/country:us/territory:pr" 11 classification = "government" 12 name = "Puerto Rico" 13 url = "http://www.oslpr.org/" 14 scrapers = { 15 'people': PRPersonScraper, 16 'committees': PRCommitteeScraper, 17 'bills': PRBillScraper, 18 } 19 legislative_sessions = [ 20 { 21 "_scraped_name": "2009-2012", 22 "identifier": "2009-2012", 23 "name": "2009-2012 Session" 24 }, 25 { 26 "_scraped_name": "2013-2016", 27 "identifier": "2013-2016", 28 "name": "2013-2016 Session" 29 }, 30 { 31 "_scraped_name": "2017-2020", 32 "identifier": "2017-2020", 33 "name": "2017-2020 Session", 34 "start_date": "2017-01-02", 35 "end_date": "2021-01-01", 36 } 37 ] 38 ignored_scraped_sessions = [ 39 "2005-2008", 40 "2001-2004", 41 "1997-2000", 42 "1993-1996" 43 ] 44 45 def get_organizations(self): 46 legislature_name = "Legislative Assembly of Puerto Rico" 47 lower_chamber_name = "House" 48 lower_title = "Senator" 49 upper_chamber_name = "Senate" 50 upper_title = "Senator" 51 52 legislature = Organization(name=legislature_name, 53 classification="legislature") 54 upper = Organization(upper_chamber_name, classification='upper', 55 parent_id=legislature._id) 56 lower = Organization(lower_chamber_name, classification='lower', 57 parent_id=legislature._id) 58 59 # 8 districts w/ 2 members, + 11 at larg 60 for i, d in enumerate(('I', 'II', 'III', 'IV', 'V', 'VI', 'VII', 'VIII')): 61 upper.add_post(label=d, role=upper_title, 62 division_id='{}/sldu:{}'.format(self.division_id, i + 1)) 63 64 # lower house is 40 seats, + 11 at large 65 for n in range(1, 41): 66 lower.add_post( 67 label=str(n), role=lower_title, 68 division_id='{}/sldl:{}'.format(self.division_id, n)) 69 70 # TODO: add at large 71 72 yield Organization(name='Office of the Governor', classification='executive') 73 yield legislature 74 yield upper 75 yield lower 76 77 def get_session_list(self): 78 from openstates.utils import url_xpath 79 # this URL should work even for future sessions 80 return url_xpath('http://www.oslpr.org/legislatura/tl2013/buscar_2013.asp', 81 '//select[@name="URL"]/option/text()') 82 [end of openstates/pr/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/openstates/pr/__init__.py b/openstates/pr/__init__.py --- a/openstates/pr/__init__.py +++ b/openstates/pr/__init__.py @@ -60,14 +60,16 @@ for i, d in enumerate(('I', 'II', 'III', 'IV', 'V', 'VI', 'VII', 'VIII')): upper.add_post(label=d, role=upper_title, division_id='{}/sldu:{}'.format(self.division_id, i + 1)) + upper.add_post(label='At-Large', role=upper_title, + division_id='{}/sldu'.format(self.division_id)) # lower house is 40 seats, + 11 at large for n in range(1, 41): lower.add_post( label=str(n), role=lower_title, division_id='{}/sldl:{}'.format(self.division_id, n)) - - # TODO: add at large + lower.add_post(label='At-Large', role=lower_title, + division_id='{}/sldl'.format(self.division_id)) yield Organization(name='Office of the Governor', classification='executive') yield legislature
{"golden_diff": "diff --git a/openstates/pr/__init__.py b/openstates/pr/__init__.py\n--- a/openstates/pr/__init__.py\n+++ b/openstates/pr/__init__.py\n@@ -60,14 +60,16 @@\n for i, d in enumerate(('I', 'II', 'III', 'IV', 'V', 'VI', 'VII', 'VIII')):\n upper.add_post(label=d, role=upper_title,\n division_id='{}/sldu:{}'.format(self.division_id, i + 1))\n+ upper.add_post(label='At-Large', role=upper_title,\n+ division_id='{}/sldu'.format(self.division_id))\n \n # lower house is 40 seats, + 11 at large\n for n in range(1, 41):\n lower.add_post(\n label=str(n), role=lower_title,\n division_id='{}/sldl:{}'.format(self.division_id, n))\n-\n- # TODO: add at large\n+ lower.add_post(label='At-Large', role=lower_title,\n+ division_id='{}/sldl'.format(self.division_id))\n \n yield Organization(name='Office of the Governor', classification='executive')\n yield legislature\n", "issue": "PR failing since at least 2018-09-06\nPR has been failing since 2018-09-06\n\nBased on automated runs it appears that PR has not run successfully in 3 days (2018-09-06).\n\n\n```\n 00:38:11 ERROR pupa: cannot resolve pseudo id to Person: ~{\"name\": \"Carlos J. Rodr\\u00edguez Mateo\"}\n00:38:11 ERROR pupa: cannot resolve pseudo id to Person: ~{\"name\": \"Roberto Rivera Ruiz De Porras\"}\n00:38:11 ERROR pupa: cannot resolve pseudo id to Person: ~{\"name\": \"Guillermo Miranda Rivera\"}\n00:38:11 ERROR pupa: cannot resolve pseudo id to Person: ~{\"name\": \"Antonio L. Soto Torres\"}\n00:38:11 ERROR pupa: cannot resolve pseudo id to Person: ~{\"name\": \"Jos\\u00e9 J. P\\u00e9rez Cordero\"}\n00:38:11 ERROR pupa: cannot resolve pseudo id to Person: ~{\"name\": \"Jos\\u00e9 F. Aponte Hern\\u00e1ndez\"}\n00:38:12 ERROR pupa: cannot resolve pseudo id to Person: ~{\"name\": \"Nelson Del Valle Col\\u00f3n\"}\n00:38:12 ERROR pupa: cannot resolve pseudo id to Person: ~{\"name\": \"Evelyn V\\u00e1zquez Nieves\"}\n00:38:12 ERROR pupa: cannot resolve pseudo id to Person: ~{\"name\": \"Mar\\u00eda de Lourdes Ramos Rivera\"}\n00:38:12 ERROR pupa: cannot resolve pseudo id to Person: ~{\"name\": \"Migdalia Padilla Alvelo\"}\n00:38:12 ERROR pupa: cannot resolve pseudo id to Person: ~{\"name\": \"Jes\\u00fas Manuel Ortiz Gonz\\u00e1lez\"}\n00:38:12 ERROR pupa: cannot resolve pseudo id to Person: ~{\"name\": \"Jos\\u00e9 L. Dalmau Santiago\"}\n00:38:12 ERROR pupa: cannot resolve pseudo id to Person: ~{\"name\": \"Mar\\u00eda M. Charbonier Laureano\"}\n00:38:12 ERROR pupa: cannot resolve pseudo id to Person: ~{\"name\": \"Juan O. Morales Rodr\\u00edguez\"}\n00:38:12 ERROR pupa: cannot resolve pseudo id to Person: ~{\"name\": \"Angel J. Mart\\u00ednez Santiago\"}\n00:38:12 ERROR pupa: cannot resolve pseudo id to Person: ~{\"name\": \"Abel Nazario Qui\\u00f1ones\"}\n00:38:12 ERROR pupa: cannot resolve pseudo id to Person: ~{\"name\": \"Axel (Chino) Roque Gracia\"}\n00:38:12 ERROR pupa: cannot resolve pseudo id to Person: ~{\"name\": \"Jorge Navarro Su\\u00e1rez\"}\nloaded Open States pupa settings...\npr (scrape, import)\n people: {}\n committees: {}\n bills: {}\nimport jurisdictions...\nimport organizations...\nimport people...\nimport posts...\nimport memberships...\nTraceback (most recent call last):\n File \"/opt/openstates/venv-pupa//bin/pupa\", line 11, in <module>\n load_entry_point('pupa', 'console_scripts', 'pupa')()\n File \"/opt/openstates/venv-pupa/src/pupa/pupa/cli/__main__.py\", line 68, in main\n subcommands[args.subcommand].handle(args, other)\n File \"/opt/openstates/venv-pupa/src/pupa/pupa/cli/commands/update.py\", line 260, in handle\n return self.do_handle(args, other, juris)\n File \"/opt/openstates/venv-pupa/src/pupa/pupa/cli/commands/update.py\", line 307, in do_handle\n report['import'] = self.do_import(juris, args)\n File \"/opt/openstates/venv-pupa/src/pupa/pupa/cli/commands/update.py\", line 211, in do_import\n report.update(membership_importer.import_directory(datadir))\n File \"/opt/openstates/venv-pupa/src/pupa/pupa/importers/base.py\", line 196, in import_directory\n return self.import_data(json_stream())\n File \"/opt/openstates/venv-pupa/src/pupa/pupa/importers/base.py\", line 233, in import_data\n obj_id, what = self.import_item(data)\n File \"/opt/openstates/venv-pupa/src/pupa/pupa/importers/base.py\", line 254, in import_item\n data = self.prepare_for_db(data)\n File \"/opt/openstates/venv-pupa/src/pupa/pupa/importers/memberships.py\", line 50, in prepare_for_db\n data['post_id'] = self.post_importer.resolve_json_id(data['post_id'])\n File \"/opt/openstates/venv-pupa/src/pupa/pupa/importers/base.py\", line 171, in resolve_json_id\n raise UnresolvedIdError(errmsg)\npupa.exceptions.UnresolvedIdError: cannot resolve pseudo id to Post: ~{\"label\": \"At-Large\", \"organization__classification\": \"lower\"}\n```\n\nVisit http://bobsled.openstates.org for more info.\n\n", "before_files": [{"content": "from pupa.scrape import Jurisdiction, Organization\nfrom .people import PRPersonScraper\nfrom .committees import PRCommitteeScraper\nfrom .bills import PRBillScraper\n\nsettings = dict(SCRAPELIB_TIMEOUT=300)\n\n\nclass PuertoRico(Jurisdiction):\n division_id = \"ocd-division/country:us/territory:pr\"\n classification = \"government\"\n name = \"Puerto Rico\"\n url = \"http://www.oslpr.org/\"\n scrapers = {\n 'people': PRPersonScraper,\n 'committees': PRCommitteeScraper,\n 'bills': PRBillScraper,\n }\n legislative_sessions = [\n {\n \"_scraped_name\": \"2009-2012\",\n \"identifier\": \"2009-2012\",\n \"name\": \"2009-2012 Session\"\n },\n {\n \"_scraped_name\": \"2013-2016\",\n \"identifier\": \"2013-2016\",\n \"name\": \"2013-2016 Session\"\n },\n {\n \"_scraped_name\": \"2017-2020\",\n \"identifier\": \"2017-2020\",\n \"name\": \"2017-2020 Session\",\n \"start_date\": \"2017-01-02\",\n \"end_date\": \"2021-01-01\",\n }\n ]\n ignored_scraped_sessions = [\n \"2005-2008\",\n \"2001-2004\",\n \"1997-2000\",\n \"1993-1996\"\n ]\n\n def get_organizations(self):\n legislature_name = \"Legislative Assembly of Puerto Rico\"\n lower_chamber_name = \"House\"\n lower_title = \"Senator\"\n upper_chamber_name = \"Senate\"\n upper_title = \"Senator\"\n\n legislature = Organization(name=legislature_name,\n classification=\"legislature\")\n upper = Organization(upper_chamber_name, classification='upper',\n parent_id=legislature._id)\n lower = Organization(lower_chamber_name, classification='lower',\n parent_id=legislature._id)\n\n # 8 districts w/ 2 members, + 11 at larg\n for i, d in enumerate(('I', 'II', 'III', 'IV', 'V', 'VI', 'VII', 'VIII')):\n upper.add_post(label=d, role=upper_title,\n division_id='{}/sldu:{}'.format(self.division_id, i + 1))\n\n # lower house is 40 seats, + 11 at large\n for n in range(1, 41):\n lower.add_post(\n label=str(n), role=lower_title,\n division_id='{}/sldl:{}'.format(self.division_id, n))\n\n # TODO: add at large\n\n yield Organization(name='Office of the Governor', classification='executive')\n yield legislature\n yield upper\n yield lower\n\n def get_session_list(self):\n from openstates.utils import url_xpath\n # this URL should work even for future sessions\n return url_xpath('http://www.oslpr.org/legislatura/tl2013/buscar_2013.asp',\n '//select[@name=\"URL\"]/option/text()')\n", "path": "openstates/pr/__init__.py"}]}
2,721
277
gh_patches_debug_40053
rasdani/github-patches
git_diff
getmoto__moto-242
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Multiple region support We'd like to support multiple regions. For example, an SNS instance could pass messages to SQS instances located in different regions. </issue> <code> [start of moto/ec2/responses/vpcs.py] 1 from __future__ import unicode_literals 2 from jinja2 import Template 3 4 from moto.core.responses import BaseResponse 5 from moto.ec2.models import ec2_backend 6 from moto.ec2.utils import filters_from_querystring, vpc_ids_from_querystring 7 8 9 class VPCs(BaseResponse): 10 def create_vpc(self): 11 cidr_block = self.querystring.get('CidrBlock')[0] 12 vpc = ec2_backend.create_vpc(cidr_block) 13 template = Template(CREATE_VPC_RESPONSE) 14 return template.render(vpc=vpc) 15 16 def delete_vpc(self): 17 vpc_id = self.querystring.get('VpcId')[0] 18 vpc = ec2_backend.delete_vpc(vpc_id) 19 template = Template(DELETE_VPC_RESPONSE) 20 return template.render(vpc=vpc) 21 22 def describe_vpcs(self): 23 vpc_ids = vpc_ids_from_querystring(self.querystring) 24 filters = filters_from_querystring(self.querystring) 25 vpcs = ec2_backend.get_all_vpcs(vpc_ids=vpc_ids, filters=filters) 26 template = Template(DESCRIBE_VPCS_RESPONSE) 27 return template.render(vpcs=vpcs) 28 29 30 CREATE_VPC_RESPONSE = """ 31 <CreateVpcResponse xmlns="http://ec2.amazonaws.com/doc/2012-12-01/"> 32 <requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId> 33 <vpc> 34 <vpcId>{{ vpc.id }}</vpcId> 35 <state>pending</state> 36 <cidrBlock>{{ vpc.cidr_block }}</cidrBlock> 37 <dhcpOptionsId>dopt-1a2b3c4d2</dhcpOptionsId> 38 <instanceTenancy>default</instanceTenancy> 39 <tagSet> 40 {% for tag in vpc.get_tags() %} 41 <item> 42 <resourceId>{{ tag.resource_id }}</resourceId> 43 <resourceType>{{ tag.resource_type }}</resourceType> 44 <key>{{ tag.key }}</key> 45 <value>{{ tag.value }}</value> 46 </item> 47 {% endfor %} 48 </tagSet> 49 </vpc> 50 </CreateVpcResponse>""" 51 52 DESCRIBE_VPCS_RESPONSE = """ 53 <DescribeVpcsResponse xmlns="http://ec2.amazonaws.com/doc/2012-12-01/"> 54 <requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId> 55 <vpcSet> 56 {% for vpc in vpcs %} 57 <item> 58 <vpcId>{{ vpc.id }}</vpcId> 59 <state>{{ vpc.state }}</state> 60 <cidrBlock>{{ vpc.cidr_block }}</cidrBlock> 61 <dhcpOptionsId>dopt-7a8b9c2d</dhcpOptionsId> 62 <instanceTenancy>default</instanceTenancy> 63 <tagSet> 64 {% for tag in vpc.get_tags() %} 65 <item> 66 <resourceId>{{ tag.resource_id }}</resourceId> 67 <resourceType>{{ tag.resource_type }}</resourceType> 68 <key>{{ tag.key }}</key> 69 <value>{{ tag.value }}</value> 70 </item> 71 {% endfor %} 72 </tagSet> 73 </item> 74 {% endfor %} 75 </vpcSet> 76 </DescribeVpcsResponse>""" 77 78 DELETE_VPC_RESPONSE = """ 79 <DeleteVpcResponse xmlns="http://ec2.amazonaws.com/doc/2012-12-01/"> 80 <requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId> 81 <return>true</return> 82 </DeleteVpcResponse> 83 """ 84 [end of moto/ec2/responses/vpcs.py] [start of moto/ec2/responses/subnets.py] 1 from __future__ import unicode_literals 2 from jinja2 import Template 3 4 from moto.core.responses import BaseResponse 5 from moto.ec2.models import ec2_backend 6 from moto.ec2.utils import filters_from_querystring 7 8 9 class Subnets(BaseResponse): 10 def create_subnet(self): 11 vpc_id = self.querystring.get('VpcId')[0] 12 cidr_block = self.querystring.get('CidrBlock')[0] 13 subnet = ec2_backend.create_subnet(vpc_id, cidr_block) 14 template = Template(CREATE_SUBNET_RESPONSE) 15 return template.render(subnet=subnet) 16 17 def delete_subnet(self): 18 subnet_id = self.querystring.get('SubnetId')[0] 19 subnet = ec2_backend.delete_subnet(subnet_id) 20 template = Template(DELETE_SUBNET_RESPONSE) 21 return template.render(subnet=subnet) 22 23 def describe_subnets(self): 24 filters = filters_from_querystring(self.querystring) 25 subnets = ec2_backend.get_all_subnets(filters) 26 template = Template(DESCRIBE_SUBNETS_RESPONSE) 27 return template.render(subnets=subnets) 28 29 30 CREATE_SUBNET_RESPONSE = """ 31 <CreateSubnetResponse xmlns="http://ec2.amazonaws.com/doc/2012-12-01/"> 32 <requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId> 33 <subnet> 34 <subnetId>{{ subnet.id }}</subnetId> 35 <state>pending</state> 36 <vpcId>{{ subnet.vpc_id }}</vpcId> 37 <cidrBlock>{{ subnet.cidr_block }}</cidrBlock> 38 <availableIpAddressCount>251</availableIpAddressCount> 39 <availabilityZone>us-east-1a</availabilityZone> 40 <tagSet> 41 {% for tag in subnet.get_tags() %} 42 <item> 43 <resourceId>{{ tag.resource_id }}</resourceId> 44 <resourceType>{{ tag.resource_type }}</resourceType> 45 <key>{{ tag.key }}</key> 46 <value>{{ tag.value }}</value> 47 </item> 48 {% endfor %} 49 </tagSet> 50 </subnet> 51 </CreateSubnetResponse>""" 52 53 DELETE_SUBNET_RESPONSE = """ 54 <DeleteSubnetResponse xmlns="http://ec2.amazonaws.com/doc/2012-12-01/"> 55 <requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId> 56 <return>true</return> 57 </DeleteSubnetResponse>""" 58 59 DESCRIBE_SUBNETS_RESPONSE = """ 60 <DescribeSubnetsResponse xmlns="http://ec2.amazonaws.com/doc/2012-12-01/"> 61 <requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId> 62 <subnetSet> 63 {% for subnet in subnets %} 64 <item> 65 <subnetId>{{ subnet.id }}</subnetId> 66 <state>available</state> 67 <vpcId>{{ subnet.vpc_id }}</vpcId> 68 <cidrBlock>{{ subnet.cidr_block }}</cidrBlock> 69 <availableIpAddressCount>251</availableIpAddressCount> 70 <availabilityZone>us-east-1a</availabilityZone> 71 <tagSet> 72 {% for tag in subnet.get_tags() %} 73 <item> 74 <resourceId>{{ tag.resource_id }}</resourceId> 75 <resourceType>{{ tag.resource_type }}</resourceType> 76 <key>{{ tag.key }}</key> 77 <value>{{ tag.value }}</value> 78 </item> 79 {% endfor %} 80 </tagSet> 81 </item> 82 {% endfor %} 83 </subnetSet> 84 </DescribeSubnetsResponse>""" 85 [end of moto/ec2/responses/subnets.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/moto/ec2/responses/subnets.py b/moto/ec2/responses/subnets.py --- a/moto/ec2/responses/subnets.py +++ b/moto/ec2/responses/subnets.py @@ -2,7 +2,6 @@ from jinja2 import Template from moto.core.responses import BaseResponse -from moto.ec2.models import ec2_backend from moto.ec2.utils import filters_from_querystring @@ -10,19 +9,19 @@ def create_subnet(self): vpc_id = self.querystring.get('VpcId')[0] cidr_block = self.querystring.get('CidrBlock')[0] - subnet = ec2_backend.create_subnet(vpc_id, cidr_block) + subnet = self.ec2_backend.create_subnet(vpc_id, cidr_block) template = Template(CREATE_SUBNET_RESPONSE) return template.render(subnet=subnet) def delete_subnet(self): subnet_id = self.querystring.get('SubnetId')[0] - subnet = ec2_backend.delete_subnet(subnet_id) + subnet = self.ec2_backend.delete_subnet(subnet_id) template = Template(DELETE_SUBNET_RESPONSE) return template.render(subnet=subnet) def describe_subnets(self): filters = filters_from_querystring(self.querystring) - subnets = ec2_backend.get_all_subnets(filters) + subnets = self.ec2_backend.get_all_subnets(filters) template = Template(DESCRIBE_SUBNETS_RESPONSE) return template.render(subnets=subnets) diff --git a/moto/ec2/responses/vpcs.py b/moto/ec2/responses/vpcs.py --- a/moto/ec2/responses/vpcs.py +++ b/moto/ec2/responses/vpcs.py @@ -2,27 +2,26 @@ from jinja2 import Template from moto.core.responses import BaseResponse -from moto.ec2.models import ec2_backend from moto.ec2.utils import filters_from_querystring, vpc_ids_from_querystring class VPCs(BaseResponse): def create_vpc(self): cidr_block = self.querystring.get('CidrBlock')[0] - vpc = ec2_backend.create_vpc(cidr_block) + vpc = self.ec2_backend.create_vpc(cidr_block) template = Template(CREATE_VPC_RESPONSE) return template.render(vpc=vpc) def delete_vpc(self): vpc_id = self.querystring.get('VpcId')[0] - vpc = ec2_backend.delete_vpc(vpc_id) + vpc = self.ec2_backend.delete_vpc(vpc_id) template = Template(DELETE_VPC_RESPONSE) return template.render(vpc=vpc) def describe_vpcs(self): vpc_ids = vpc_ids_from_querystring(self.querystring) filters = filters_from_querystring(self.querystring) - vpcs = ec2_backend.get_all_vpcs(vpc_ids=vpc_ids, filters=filters) + vpcs = self.ec2_backend.get_all_vpcs(vpc_ids=vpc_ids, filters=filters) template = Template(DESCRIBE_VPCS_RESPONSE) return template.render(vpcs=vpcs)
{"golden_diff": "diff --git a/moto/ec2/responses/subnets.py b/moto/ec2/responses/subnets.py\n--- a/moto/ec2/responses/subnets.py\n+++ b/moto/ec2/responses/subnets.py\n@@ -2,7 +2,6 @@\n from jinja2 import Template\n \n from moto.core.responses import BaseResponse\n-from moto.ec2.models import ec2_backend\n from moto.ec2.utils import filters_from_querystring\n \n \n@@ -10,19 +9,19 @@\n def create_subnet(self):\n vpc_id = self.querystring.get('VpcId')[0]\n cidr_block = self.querystring.get('CidrBlock')[0]\n- subnet = ec2_backend.create_subnet(vpc_id, cidr_block)\n+ subnet = self.ec2_backend.create_subnet(vpc_id, cidr_block)\n template = Template(CREATE_SUBNET_RESPONSE)\n return template.render(subnet=subnet)\n \n def delete_subnet(self):\n subnet_id = self.querystring.get('SubnetId')[0]\n- subnet = ec2_backend.delete_subnet(subnet_id)\n+ subnet = self.ec2_backend.delete_subnet(subnet_id)\n template = Template(DELETE_SUBNET_RESPONSE)\n return template.render(subnet=subnet)\n \n def describe_subnets(self):\n filters = filters_from_querystring(self.querystring)\n- subnets = ec2_backend.get_all_subnets(filters)\n+ subnets = self.ec2_backend.get_all_subnets(filters)\n template = Template(DESCRIBE_SUBNETS_RESPONSE)\n return template.render(subnets=subnets)\n \ndiff --git a/moto/ec2/responses/vpcs.py b/moto/ec2/responses/vpcs.py\n--- a/moto/ec2/responses/vpcs.py\n+++ b/moto/ec2/responses/vpcs.py\n@@ -2,27 +2,26 @@\n from jinja2 import Template\n \n from moto.core.responses import BaseResponse\n-from moto.ec2.models import ec2_backend\n from moto.ec2.utils import filters_from_querystring, vpc_ids_from_querystring\n \n \n class VPCs(BaseResponse):\n def create_vpc(self):\n cidr_block = self.querystring.get('CidrBlock')[0]\n- vpc = ec2_backend.create_vpc(cidr_block)\n+ vpc = self.ec2_backend.create_vpc(cidr_block)\n template = Template(CREATE_VPC_RESPONSE)\n return template.render(vpc=vpc)\n \n def delete_vpc(self):\n vpc_id = self.querystring.get('VpcId')[0]\n- vpc = ec2_backend.delete_vpc(vpc_id)\n+ vpc = self.ec2_backend.delete_vpc(vpc_id)\n template = Template(DELETE_VPC_RESPONSE)\n return template.render(vpc=vpc)\n \n def describe_vpcs(self):\n vpc_ids = vpc_ids_from_querystring(self.querystring)\n filters = filters_from_querystring(self.querystring)\n- vpcs = ec2_backend.get_all_vpcs(vpc_ids=vpc_ids, filters=filters)\n+ vpcs = self.ec2_backend.get_all_vpcs(vpc_ids=vpc_ids, filters=filters)\n template = Template(DESCRIBE_VPCS_RESPONSE)\n return template.render(vpcs=vpcs)\n", "issue": "Multiple region support\nWe'd like to support multiple regions. For example, an SNS instance could pass messages to SQS instances located in different regions.\n\n", "before_files": [{"content": "from __future__ import unicode_literals\nfrom jinja2 import Template\n\nfrom moto.core.responses import BaseResponse\nfrom moto.ec2.models import ec2_backend\nfrom moto.ec2.utils import filters_from_querystring, vpc_ids_from_querystring\n\n\nclass VPCs(BaseResponse):\n def create_vpc(self):\n cidr_block = self.querystring.get('CidrBlock')[0]\n vpc = ec2_backend.create_vpc(cidr_block)\n template = Template(CREATE_VPC_RESPONSE)\n return template.render(vpc=vpc)\n\n def delete_vpc(self):\n vpc_id = self.querystring.get('VpcId')[0]\n vpc = ec2_backend.delete_vpc(vpc_id)\n template = Template(DELETE_VPC_RESPONSE)\n return template.render(vpc=vpc)\n\n def describe_vpcs(self):\n vpc_ids = vpc_ids_from_querystring(self.querystring)\n filters = filters_from_querystring(self.querystring)\n vpcs = ec2_backend.get_all_vpcs(vpc_ids=vpc_ids, filters=filters)\n template = Template(DESCRIBE_VPCS_RESPONSE)\n return template.render(vpcs=vpcs)\n\n\nCREATE_VPC_RESPONSE = \"\"\"\n<CreateVpcResponse xmlns=\"http://ec2.amazonaws.com/doc/2012-12-01/\">\n <requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>\n <vpc>\n <vpcId>{{ vpc.id }}</vpcId>\n <state>pending</state>\n <cidrBlock>{{ vpc.cidr_block }}</cidrBlock>\n <dhcpOptionsId>dopt-1a2b3c4d2</dhcpOptionsId>\n <instanceTenancy>default</instanceTenancy>\n <tagSet>\n {% for tag in vpc.get_tags() %}\n <item>\n <resourceId>{{ tag.resource_id }}</resourceId>\n <resourceType>{{ tag.resource_type }}</resourceType>\n <key>{{ tag.key }}</key>\n <value>{{ tag.value }}</value>\n </item>\n {% endfor %}\n </tagSet>\n </vpc>\n</CreateVpcResponse>\"\"\"\n\nDESCRIBE_VPCS_RESPONSE = \"\"\"\n<DescribeVpcsResponse xmlns=\"http://ec2.amazonaws.com/doc/2012-12-01/\">\n <requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>\n <vpcSet>\n {% for vpc in vpcs %}\n <item>\n <vpcId>{{ vpc.id }}</vpcId>\n <state>{{ vpc.state }}</state>\n <cidrBlock>{{ vpc.cidr_block }}</cidrBlock>\n <dhcpOptionsId>dopt-7a8b9c2d</dhcpOptionsId>\n <instanceTenancy>default</instanceTenancy>\n <tagSet>\n {% for tag in vpc.get_tags() %}\n <item>\n <resourceId>{{ tag.resource_id }}</resourceId>\n <resourceType>{{ tag.resource_type }}</resourceType>\n <key>{{ tag.key }}</key>\n <value>{{ tag.value }}</value>\n </item>\n {% endfor %}\n </tagSet>\n </item>\n {% endfor %}\n </vpcSet>\n</DescribeVpcsResponse>\"\"\"\n\nDELETE_VPC_RESPONSE = \"\"\"\n<DeleteVpcResponse xmlns=\"http://ec2.amazonaws.com/doc/2012-12-01/\">\n <requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>\n <return>true</return>\n</DeleteVpcResponse>\n\"\"\"\n", "path": "moto/ec2/responses/vpcs.py"}, {"content": "from __future__ import unicode_literals\nfrom jinja2 import Template\n\nfrom moto.core.responses import BaseResponse\nfrom moto.ec2.models import ec2_backend\nfrom moto.ec2.utils import filters_from_querystring\n\n\nclass Subnets(BaseResponse):\n def create_subnet(self):\n vpc_id = self.querystring.get('VpcId')[0]\n cidr_block = self.querystring.get('CidrBlock')[0]\n subnet = ec2_backend.create_subnet(vpc_id, cidr_block)\n template = Template(CREATE_SUBNET_RESPONSE)\n return template.render(subnet=subnet)\n\n def delete_subnet(self):\n subnet_id = self.querystring.get('SubnetId')[0]\n subnet = ec2_backend.delete_subnet(subnet_id)\n template = Template(DELETE_SUBNET_RESPONSE)\n return template.render(subnet=subnet)\n\n def describe_subnets(self):\n filters = filters_from_querystring(self.querystring)\n subnets = ec2_backend.get_all_subnets(filters)\n template = Template(DESCRIBE_SUBNETS_RESPONSE)\n return template.render(subnets=subnets)\n\n\nCREATE_SUBNET_RESPONSE = \"\"\"\n<CreateSubnetResponse xmlns=\"http://ec2.amazonaws.com/doc/2012-12-01/\">\n <requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>\n <subnet>\n <subnetId>{{ subnet.id }}</subnetId>\n <state>pending</state>\n <vpcId>{{ subnet.vpc_id }}</vpcId>\n <cidrBlock>{{ subnet.cidr_block }}</cidrBlock>\n <availableIpAddressCount>251</availableIpAddressCount>\n <availabilityZone>us-east-1a</availabilityZone>\n <tagSet>\n {% for tag in subnet.get_tags() %}\n <item>\n <resourceId>{{ tag.resource_id }}</resourceId>\n <resourceType>{{ tag.resource_type }}</resourceType>\n <key>{{ tag.key }}</key>\n <value>{{ tag.value }}</value>\n </item>\n {% endfor %}\n </tagSet>\n </subnet>\n</CreateSubnetResponse>\"\"\"\n\nDELETE_SUBNET_RESPONSE = \"\"\"\n<DeleteSubnetResponse xmlns=\"http://ec2.amazonaws.com/doc/2012-12-01/\">\n <requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>\n <return>true</return>\n</DeleteSubnetResponse>\"\"\"\n\nDESCRIBE_SUBNETS_RESPONSE = \"\"\"\n<DescribeSubnetsResponse xmlns=\"http://ec2.amazonaws.com/doc/2012-12-01/\">\n <requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>\n <subnetSet>\n {% for subnet in subnets %}\n <item>\n <subnetId>{{ subnet.id }}</subnetId>\n <state>available</state>\n <vpcId>{{ subnet.vpc_id }}</vpcId>\n <cidrBlock>{{ subnet.cidr_block }}</cidrBlock>\n <availableIpAddressCount>251</availableIpAddressCount>\n <availabilityZone>us-east-1a</availabilityZone>\n <tagSet>\n {% for tag in subnet.get_tags() %}\n <item>\n <resourceId>{{ tag.resource_id }}</resourceId>\n <resourceType>{{ tag.resource_type }}</resourceType>\n <key>{{ tag.key }}</key>\n <value>{{ tag.value }}</value>\n </item>\n {% endfor %}\n </tagSet>\n </item>\n {% endfor %}\n </subnetSet>\n</DescribeSubnetsResponse>\"\"\"\n", "path": "moto/ec2/responses/subnets.py"}]}
2,575
701
gh_patches_debug_23590
rasdani/github-patches
git_diff
cupy__cupy-7176
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Cupyx.profiler benchmark implicitly converts from micro to milliseconds depending on object methods. ### Description When reading the official CuPY documentation, a note on benchmarking says that it is important to use the built-in ```cupyx.profiler``` benchmark tool. However, this function returns a type ```_PerfCaseResult```. This object has several, very sparsely documented methods. If one calls ```print()``` on the entire object, the object is printed to console and runs are reported in microseconds. This is fairly standard for benchmarking. However, what is also extremely important in benchmarking is that the units stay consistent. If one wishes to access the ```cpu_times``` or ```gpu_times``` directly from the object, an array containing the run-times is printed containing the run-times in MILLIseconds. There are no warnings or documentation of this implicit conversion, as the official documentation is: ``` cpu_times Returns an array of CPU times of size n_repeat. gpu_times Returns an array of GPU times of size n_repeat. ``` ### To Reproduce ```py import cupy as cp import sys from helpers import * from cupyx.profiler import benchmark def generate_array(size, work_units): #Split this function up for timing the work only rand = cp.random.default_rng() #This is the fast way of creating large arrays with cp x = rand.integers(0, 100_000, (size, work_units)) #Create array return x def minmax(size, work_units): x_min = cp.min(arr) x_max = cp.max(arr) y = (arr-x_min)/(x_max-x_min) size = len(y) #Force execution return size if __name__ == '__main__': sz = int(sys.argv[1]) wk = int(sys.argv[2]) arr = generate_array(sz, wk) y = benchmark(minmax, (sz, wk), n_repeat=1, n_warmup=0) print(y.cpu_times) print(y) ``` ### Installation _No response_ ### Environment ``` OS : Linux-5.15.0-52-generic-x86_64-with-glibc2.35 Python Version : 3.9.13 CuPy Version : 10.6.0 CuPy Platform : NVIDIA CUDA NumPy Version : 1.22.4 SciPy Version : 1.9.1 Cython Build Version : 0.29.30 Cython Runtime Version : None CUDA Root : /home/joachim/anaconda3/envs/rps nvcc PATH : None CUDA Build Version : 11020 CUDA Driver Version : 11070 CUDA Runtime Version : 11050 cuBLAS Version : (available) cuFFT Version : 10600 cuRAND Version : 10207 cuSOLVER Version : (11, 3, 2) cuSPARSE Version : (available) NVRTC Version : (11, 5) Thrust Version : 101000 CUB Build Version : 101000 Jitify Build Version : 87d8ccd cuDNN Build Version : None cuDNN Version : None NCCL Build Version : 21212 NCCL Runtime Version : 21403 cuTENSOR Version : None cuSPARSELt Build Version : None Device 0 Name : NVIDIA GeForce GTX 1660 SUPER Device 0 Compute Capability : 75 Device 0 PCI Bus ID : 0000:01:00.0 ``` ### Additional Information Printed to console: python3 minmax.py 100 100 [0.00204923] minmax : CPU: 2049.225 us GPU-0: 2057.504 us </issue> <code> [start of cupyx/profiler/_time.py] 1 import math as _math 2 import time as _time 3 4 import numpy as _numpy 5 6 import cupy as _cupy 7 from cupy_backends.cuda.api import runtime 8 9 10 class _PerfCaseResult: 11 """ An obscure object encompassing timing results recorded by 12 :func:`~cupyx.profiler.benchmark`. Simple statistics can be obtained by 13 converting an instance of this class to a string. 14 15 .. warning:: 16 This API is currently experimental and subject to change in future 17 releases. 18 19 """ 20 21 def __init__(self, name, ts, devices): 22 assert ts.ndim == 2 23 assert ts.shape[0] == len(devices) + 1 24 assert ts.shape[1] > 0 25 self.name = name 26 self._ts = ts 27 self._devices = devices 28 29 def __repr__(self) -> str: 30 """ Returns a string representation of the object. 31 32 Returns: 33 str: A string representation of the object. 34 """ 35 return self.to_str(show_gpu=True) 36 37 @property 38 def cpu_times(self): 39 """ Returns an array of CPU times of size ``n_repeat``. """ 40 return self._ts[0] 41 42 @property 43 def gpu_times(self): 44 """ Returns an array of GPU times of size ``n_repeat``. """ 45 return self._ts[1:] 46 47 @staticmethod 48 def _to_str_per_item(device_name, t): 49 assert t.ndim == 1 50 assert t.size > 0 51 t_us = t * 1e6 52 53 s = ' {}:{:9.03f} us'.format(device_name, t_us.mean()) 54 if t.size > 1: 55 s += ' +/-{:6.03f} (min:{:9.03f} / max:{:9.03f}) us'.format( 56 t_us.std(), t_us.min(), t_us.max()) 57 return s 58 59 def to_str(self, show_gpu=False): 60 results = [self._to_str_per_item('CPU', self._ts[0])] 61 if show_gpu: 62 for i, d in enumerate(self._devices): 63 results.append( 64 self._to_str_per_item('GPU-{}'.format(d), 65 self._ts[1 + i])) 66 return '{:<20s}:{}'.format(self.name, ' '.join(results)) 67 68 def __str__(self): 69 return self.to_str(show_gpu=True) 70 71 72 def benchmark( 73 func, args=(), kwargs={}, n_repeat=10000, *, 74 name=None, n_warmup=10, max_duration=_math.inf, devices=None): 75 """ Timing utility for measuring time spent by both CPU and GPU. 76 77 This function is a very convenient helper for setting up a timing test. The 78 GPU time is properly recorded by synchronizing internal streams. As a 79 result, to time a multi-GPU function all participating devices must be 80 passed as the ``devices`` argument so that this helper knows which devices 81 to record. A simple example is given as follows: 82 83 .. code-block:: py 84 85 import cupy as cp 86 from cupyx.profiler import benchmark 87 88 def f(a, b): 89 return 3 * cp.sin(-a) * b 90 91 a = 0.5 - cp.random.random((100,)) 92 b = cp.random.random((100,)) 93 print(benchmark(f, (a, b), n_repeat=1000)) 94 95 96 Args: 97 func (callable): a callable object to be timed. 98 args (tuple): positional argumens to be passed to the callable. 99 kwargs (dict): keyword arguments to be passed to the callable. 100 n_repeat (int): number of times the callable is called. Increasing 101 this value would improve the collected statistics at the cost 102 of longer test time. 103 name (str): the function name to be reported. If not given, the 104 callable's ``__name__`` attribute is used. 105 n_warmup (int): number of times the callable is called. The warm-up 106 runs are not timed. 107 max_duration (float): the maximum time (in seconds) that the entire 108 test can use. If the taken time is longer than this limit, the test 109 is stopped and the statistics collected up to the breakpoint is 110 reported. 111 devices (tuple): a tuple of device IDs (int) that will be timed during 112 the timing test. If not given, the current device is used. 113 114 Returns: 115 :class:`~cupyx.profiler._time._PerfCaseResult`: 116 an object collecting all test results. 117 118 """ 119 120 if name is None: 121 name = func.__name__ 122 123 if devices is None: 124 devices = (_cupy.cuda.get_device_id(),) 125 126 if not callable(func): 127 raise ValueError('`func` should be a callable object.') 128 if not isinstance(args, tuple): 129 raise ValueError('`args` should be of tuple type.') 130 if not isinstance(kwargs, dict): 131 raise ValueError('`kwargs` should be of dict type.') 132 if not isinstance(n_repeat, int): 133 raise ValueError('`n_repeat` should be an integer.') 134 if not isinstance(name, str): 135 raise ValueError('`name` should be a string.') 136 if not isinstance(n_warmup, int): 137 raise ValueError('`n_warmup` should be an integer.') 138 if not _numpy.isreal(max_duration): 139 raise ValueError('`max_duration` should be given in seconds') 140 if not isinstance(devices, tuple): 141 raise ValueError('`devices` should be of tuple type') 142 143 return _repeat( 144 func, args, kwargs, n_repeat, name, n_warmup, max_duration, devices) 145 146 147 def _repeat( 148 func, args, kwargs, n_repeat, name, n_warmup, max_duration, devices): 149 150 events_1 = [] 151 events_2 = [] 152 153 for i in devices: 154 prev_device = runtime.getDevice() 155 try: 156 runtime.setDevice(i) 157 events_1.append(_cupy.cuda.stream.Event()) 158 events_2.append(_cupy.cuda.stream.Event()) 159 finally: 160 runtime.setDevice(prev_device) 161 162 ev1 = _cupy.cuda.stream.Event() 163 ev2 = _cupy.cuda.stream.Event() 164 165 for i in range(n_warmup): 166 func(*args, **kwargs) 167 168 for event, device in zip(events_1, devices): 169 prev_device = runtime.getDevice() 170 try: 171 runtime.setDevice(device) 172 event.record() 173 finally: 174 runtime.setDevice(prev_device) 175 event.synchronize() 176 177 cpu_times = [] 178 gpu_times = [[] for i in events_1] 179 duration = 0 180 for i in range(n_repeat): 181 for event, device in zip(events_1, devices): 182 prev_device = runtime.getDevice() 183 try: 184 runtime.setDevice(device) 185 event.record() 186 finally: 187 runtime.setDevice(prev_device) 188 189 t1 = _time.perf_counter() 190 191 func(*args, **kwargs) 192 193 t2 = _time.perf_counter() 194 cpu_time = t2 - t1 195 cpu_times.append(cpu_time) 196 197 for event, device in zip(events_2, devices): 198 prev_device = runtime.getDevice() 199 try: 200 runtime.setDevice(device) 201 event.record() 202 finally: 203 runtime.setDevice(prev_device) 204 for event, device in zip(events_2, devices): 205 prev_device = runtime.getDevice() 206 try: 207 runtime.setDevice(device) 208 event.synchronize() 209 finally: 210 runtime.setDevice(prev_device) 211 for i, (ev1, ev2) in enumerate(zip(events_1, events_2)): 212 gpu_time = _cupy.cuda.get_elapsed_time(ev1, ev2) * 1e-3 213 gpu_times[i].append(gpu_time) 214 215 duration += _time.perf_counter() - t1 216 if duration > max_duration: 217 break 218 219 ts = _numpy.asarray([cpu_times] + gpu_times, dtype=_numpy.float64) 220 return _PerfCaseResult(name, ts, devices=devices) 221 [end of cupyx/profiler/_time.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/cupyx/profiler/_time.py b/cupyx/profiler/_time.py --- a/cupyx/profiler/_time.py +++ b/cupyx/profiler/_time.py @@ -35,13 +35,23 @@ return self.to_str(show_gpu=True) @property - def cpu_times(self): - """ Returns an array of CPU times of size ``n_repeat``. """ + def cpu_times(self) -> _numpy.ndarray: + """A :class:`numpy.ndarray` of shape ``(n_repeat,)``, holding times spent + on CPU in milliseconds. + + These values are delta of the host-side performance counter + (:func:`time.perf_counter`) between each repeat step. + """ return self._ts[0] @property - def gpu_times(self): - """ Returns an array of GPU times of size ``n_repeat``. """ + def gpu_times(self) -> _numpy.ndarray: + """A :class:`numpy.ndarray` of shape ``(len(devices), n_repeat)``, + holding times spent on GPU in milliseconds. + + These values are measured using ``cudaEventElapsedTime`` with events + recoreded before/after each repeat step. + """ return self._ts[1:] @staticmethod @@ -159,9 +169,6 @@ finally: runtime.setDevice(prev_device) - ev1 = _cupy.cuda.stream.Event() - ev2 = _cupy.cuda.stream.Event() - for i in range(n_warmup): func(*args, **kwargs)
{"golden_diff": "diff --git a/cupyx/profiler/_time.py b/cupyx/profiler/_time.py\n--- a/cupyx/profiler/_time.py\n+++ b/cupyx/profiler/_time.py\n@@ -35,13 +35,23 @@\n return self.to_str(show_gpu=True)\n \n @property\n- def cpu_times(self):\n- \"\"\" Returns an array of CPU times of size ``n_repeat``. \"\"\"\n+ def cpu_times(self) -> _numpy.ndarray:\n+ \"\"\"A :class:`numpy.ndarray` of shape ``(n_repeat,)``, holding times spent\n+ on CPU in milliseconds.\n+\n+ These values are delta of the host-side performance counter\n+ (:func:`time.perf_counter`) between each repeat step.\n+ \"\"\"\n return self._ts[0]\n \n @property\n- def gpu_times(self):\n- \"\"\" Returns an array of GPU times of size ``n_repeat``. \"\"\"\n+ def gpu_times(self) -> _numpy.ndarray:\n+ \"\"\"A :class:`numpy.ndarray` of shape ``(len(devices), n_repeat)``,\n+ holding times spent on GPU in milliseconds.\n+\n+ These values are measured using ``cudaEventElapsedTime`` with events\n+ recoreded before/after each repeat step.\n+ \"\"\"\n return self._ts[1:]\n \n @staticmethod\n@@ -159,9 +169,6 @@\n finally:\n runtime.setDevice(prev_device)\n \n- ev1 = _cupy.cuda.stream.Event()\n- ev2 = _cupy.cuda.stream.Event()\n-\n for i in range(n_warmup):\n func(*args, **kwargs)\n", "issue": "Cupyx.profiler benchmark implicitly converts from micro to milliseconds depending on object methods.\n### Description\r\n\r\nWhen reading the official CuPY documentation, a note on benchmarking says that it is important to use the built-in ```cupyx.profiler``` benchmark tool. However, this function returns a type ```_PerfCaseResult```. This object has several, very sparsely documented methods. If one calls ```print()``` on the entire object, the object is printed to console and runs are reported in microseconds. This is fairly standard for benchmarking. However, what is also extremely important in benchmarking is that the units stay consistent. If one wishes to access the ```cpu_times``` or ```gpu_times``` directly from the object, an array containing the run-times is printed containing the run-times in MILLIseconds. There are no warnings or documentation of this implicit conversion, as the official documentation is:\r\n\r\n```\r\ncpu_times\r\n\r\n Returns an array of CPU times of size n_repeat.\r\n\r\ngpu_times\r\n\r\n Returns an array of GPU times of size n_repeat.\r\n```\r\n\r\n### To Reproduce\r\n\r\n```py\r\nimport cupy as cp\r\nimport sys\r\nfrom helpers import *\r\nfrom cupyx.profiler import benchmark\r\n\r\ndef generate_array(size, work_units): #Split this function up for timing the work only\r\n rand = cp.random.default_rng() #This is the fast way of creating large arrays with cp\r\n x = rand.integers(0, 100_000, (size, work_units)) #Create array\r\n return x \r\n\r\n\r\ndef minmax(size, work_units):\r\n x_min = cp.min(arr)\r\n x_max = cp.max(arr)\r\n y = (arr-x_min)/(x_max-x_min)\r\n size = len(y) #Force execution\r\n return size\r\n \r\nif __name__ == '__main__':\r\n sz = int(sys.argv[1])\r\n wk = int(sys.argv[2])\r\n arr = generate_array(sz, wk) \r\n y = benchmark(minmax, (sz, wk), n_repeat=1, n_warmup=0)\r\n \r\n print(y.cpu_times)\r\n print(y)\r\n```\r\n\r\n\r\n### Installation\r\n\r\n_No response_\r\n\r\n### Environment\r\n\r\n```\r\nOS : Linux-5.15.0-52-generic-x86_64-with-glibc2.35\r\nPython Version : 3.9.13\r\nCuPy Version : 10.6.0\r\nCuPy Platform : NVIDIA CUDA\r\nNumPy Version : 1.22.4\r\nSciPy Version : 1.9.1\r\nCython Build Version : 0.29.30\r\nCython Runtime Version : None\r\nCUDA Root : /home/joachim/anaconda3/envs/rps\r\nnvcc PATH : None\r\nCUDA Build Version : 11020\r\nCUDA Driver Version : 11070\r\nCUDA Runtime Version : 11050\r\ncuBLAS Version : (available)\r\ncuFFT Version : 10600\r\ncuRAND Version : 10207\r\ncuSOLVER Version : (11, 3, 2)\r\ncuSPARSE Version : (available)\r\nNVRTC Version : (11, 5)\r\nThrust Version : 101000\r\nCUB Build Version : 101000\r\nJitify Build Version : 87d8ccd\r\ncuDNN Build Version : None\r\ncuDNN Version : None\r\nNCCL Build Version : 21212\r\nNCCL Runtime Version : 21403\r\ncuTENSOR Version : None\r\ncuSPARSELt Build Version : None\r\nDevice 0 Name : NVIDIA GeForce GTX 1660 SUPER\r\nDevice 0 Compute Capability : 75\r\nDevice 0 PCI Bus ID : 0000:01:00.0\r\n```\r\n\r\n\r\n### Additional Information\r\n\r\nPrinted to console:\r\n\r\npython3 minmax.py 100 100\r\n[0.00204923]\r\nminmax : CPU: 2049.225 us GPU-0: 2057.504 us\n", "before_files": [{"content": "import math as _math\nimport time as _time\n\nimport numpy as _numpy\n\nimport cupy as _cupy\nfrom cupy_backends.cuda.api import runtime\n\n\nclass _PerfCaseResult:\n \"\"\" An obscure object encompassing timing results recorded by\n :func:`~cupyx.profiler.benchmark`. Simple statistics can be obtained by\n converting an instance of this class to a string.\n\n .. warning::\n This API is currently experimental and subject to change in future\n releases.\n\n \"\"\"\n\n def __init__(self, name, ts, devices):\n assert ts.ndim == 2\n assert ts.shape[0] == len(devices) + 1\n assert ts.shape[1] > 0\n self.name = name\n self._ts = ts\n self._devices = devices\n\n def __repr__(self) -> str:\n \"\"\" Returns a string representation of the object.\n\n Returns:\n str: A string representation of the object.\n \"\"\"\n return self.to_str(show_gpu=True)\n\n @property\n def cpu_times(self):\n \"\"\" Returns an array of CPU times of size ``n_repeat``. \"\"\"\n return self._ts[0]\n\n @property\n def gpu_times(self):\n \"\"\" Returns an array of GPU times of size ``n_repeat``. \"\"\"\n return self._ts[1:]\n\n @staticmethod\n def _to_str_per_item(device_name, t):\n assert t.ndim == 1\n assert t.size > 0\n t_us = t * 1e6\n\n s = ' {}:{:9.03f} us'.format(device_name, t_us.mean())\n if t.size > 1:\n s += ' +/-{:6.03f} (min:{:9.03f} / max:{:9.03f}) us'.format(\n t_us.std(), t_us.min(), t_us.max())\n return s\n\n def to_str(self, show_gpu=False):\n results = [self._to_str_per_item('CPU', self._ts[0])]\n if show_gpu:\n for i, d in enumerate(self._devices):\n results.append(\n self._to_str_per_item('GPU-{}'.format(d),\n self._ts[1 + i]))\n return '{:<20s}:{}'.format(self.name, ' '.join(results))\n\n def __str__(self):\n return self.to_str(show_gpu=True)\n\n\ndef benchmark(\n func, args=(), kwargs={}, n_repeat=10000, *,\n name=None, n_warmup=10, max_duration=_math.inf, devices=None):\n \"\"\" Timing utility for measuring time spent by both CPU and GPU.\n\n This function is a very convenient helper for setting up a timing test. The\n GPU time is properly recorded by synchronizing internal streams. As a\n result, to time a multi-GPU function all participating devices must be\n passed as the ``devices`` argument so that this helper knows which devices\n to record. A simple example is given as follows:\n\n .. code-block:: py\n\n import cupy as cp\n from cupyx.profiler import benchmark\n\n def f(a, b):\n return 3 * cp.sin(-a) * b\n\n a = 0.5 - cp.random.random((100,))\n b = cp.random.random((100,))\n print(benchmark(f, (a, b), n_repeat=1000))\n\n\n Args:\n func (callable): a callable object to be timed.\n args (tuple): positional argumens to be passed to the callable.\n kwargs (dict): keyword arguments to be passed to the callable.\n n_repeat (int): number of times the callable is called. Increasing\n this value would improve the collected statistics at the cost\n of longer test time.\n name (str): the function name to be reported. If not given, the\n callable's ``__name__`` attribute is used.\n n_warmup (int): number of times the callable is called. The warm-up\n runs are not timed.\n max_duration (float): the maximum time (in seconds) that the entire\n test can use. If the taken time is longer than this limit, the test\n is stopped and the statistics collected up to the breakpoint is\n reported.\n devices (tuple): a tuple of device IDs (int) that will be timed during\n the timing test. If not given, the current device is used.\n\n Returns:\n :class:`~cupyx.profiler._time._PerfCaseResult`:\n an object collecting all test results.\n\n \"\"\"\n\n if name is None:\n name = func.__name__\n\n if devices is None:\n devices = (_cupy.cuda.get_device_id(),)\n\n if not callable(func):\n raise ValueError('`func` should be a callable object.')\n if not isinstance(args, tuple):\n raise ValueError('`args` should be of tuple type.')\n if not isinstance(kwargs, dict):\n raise ValueError('`kwargs` should be of dict type.')\n if not isinstance(n_repeat, int):\n raise ValueError('`n_repeat` should be an integer.')\n if not isinstance(name, str):\n raise ValueError('`name` should be a string.')\n if not isinstance(n_warmup, int):\n raise ValueError('`n_warmup` should be an integer.')\n if not _numpy.isreal(max_duration):\n raise ValueError('`max_duration` should be given in seconds')\n if not isinstance(devices, tuple):\n raise ValueError('`devices` should be of tuple type')\n\n return _repeat(\n func, args, kwargs, n_repeat, name, n_warmup, max_duration, devices)\n\n\ndef _repeat(\n func, args, kwargs, n_repeat, name, n_warmup, max_duration, devices):\n\n events_1 = []\n events_2 = []\n\n for i in devices:\n prev_device = runtime.getDevice()\n try:\n runtime.setDevice(i)\n events_1.append(_cupy.cuda.stream.Event())\n events_2.append(_cupy.cuda.stream.Event())\n finally:\n runtime.setDevice(prev_device)\n\n ev1 = _cupy.cuda.stream.Event()\n ev2 = _cupy.cuda.stream.Event()\n\n for i in range(n_warmup):\n func(*args, **kwargs)\n\n for event, device in zip(events_1, devices):\n prev_device = runtime.getDevice()\n try:\n runtime.setDevice(device)\n event.record()\n finally:\n runtime.setDevice(prev_device)\n event.synchronize()\n\n cpu_times = []\n gpu_times = [[] for i in events_1]\n duration = 0\n for i in range(n_repeat):\n for event, device in zip(events_1, devices):\n prev_device = runtime.getDevice()\n try:\n runtime.setDevice(device)\n event.record()\n finally:\n runtime.setDevice(prev_device)\n\n t1 = _time.perf_counter()\n\n func(*args, **kwargs)\n\n t2 = _time.perf_counter()\n cpu_time = t2 - t1\n cpu_times.append(cpu_time)\n\n for event, device in zip(events_2, devices):\n prev_device = runtime.getDevice()\n try:\n runtime.setDevice(device)\n event.record()\n finally:\n runtime.setDevice(prev_device)\n for event, device in zip(events_2, devices):\n prev_device = runtime.getDevice()\n try:\n runtime.setDevice(device)\n event.synchronize()\n finally:\n runtime.setDevice(prev_device)\n for i, (ev1, ev2) in enumerate(zip(events_1, events_2)):\n gpu_time = _cupy.cuda.get_elapsed_time(ev1, ev2) * 1e-3\n gpu_times[i].append(gpu_time)\n\n duration += _time.perf_counter() - t1\n if duration > max_duration:\n break\n\n ts = _numpy.asarray([cpu_times] + gpu_times, dtype=_numpy.float64)\n return _PerfCaseResult(name, ts, devices=devices)\n", "path": "cupyx/profiler/_time.py"}]}
3,795
360
gh_patches_debug_28031
rasdani/github-patches
git_diff
sonic-net__sonic-mgmt-2921
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [dualtor] Missing Loopback2 interface in device minigraph <!-- If you are reporting a new issue, make sure that we do not have any duplicates already open. You can ensure this by searching the issue list for this repository. If there is a duplicate, please close your issue and add a comment to the existing issue instead. If you suspect your issue is a bug, please edit your issue description to include the BUG REPORT INFORMATION shown below. If you fail to provide this information within 7 days, we cannot debug your issue and will close it. We will, however, reopen it if you later provide the information. For more information about reporting issues, see https://github.com/Azure/SONiC/wiki#report-issues --------------------------------------------------- GENERAL SUPPORT INFORMATION --------------------------------------------------- The GitHub issue tracker is for bug reports and feature requests. General support can be found at the following locations: - SONiC Support Forums - https://groups.google.com/forum/#!forum/sonicproject --------------------------------------------------- BUG REPORT INFORMATION --------------------------------------------------- Use the commands below to provide key information from your environment: You do NOT have to include this information if this is a FEATURE REQUEST --> **Description** Minigraph generated for Dual ToR devices does not have `Loopback2` interface. The `linkmrgd` waits on `Loopback2` interface to perform the probe and to indicate subsequent switchover tasks. Further, APP DB is missing *MUX* keys. <!-- Briefly describe the problem you are having in a few paragraphs. --> **Steps to reproduce the issue:** 1. Manual switchover using CLI in dual TOR on standby TOR. 2. The switchover fails to happen. 3. **Describe the results you received:** ToR switchover failed. **Describe the results you expected:** Switchover should work. APP DB should contain *MUX* keys. **Additional information you deem important:** <!-- software/ASIC/Hardware Flatform version and info --> **Output of `show version`:** ``` (paste your output here) ``` **Attach debug file `sudo generate_dump`:** ``` (paste your output here) ``` </issue> <code> [start of ansible/library/dual_tor_facts.py] 1 from collections import defaultdict 2 class DualTorParser: 3 4 def __init__(self, hostname, testbed_facts, host_vars, vm_config, port_alias, vlan_intfs): 5 self.hostname = hostname 6 self.testbed_facts = testbed_facts 7 self.host_vars = host_vars 8 self.vm_config = vm_config 9 self.port_alias = port_alias 10 self.vlan_intfs = vlan_intfs 11 self.dual_tor_facts = {} 12 13 def parse_neighbor_tor(self): 14 ''' 15 Parses information about the other ToR in a dual ToR pair 16 ''' 17 neighbor = {} 18 neighbor['hostname'] = [dut for dut in self.testbed_facts['duts'] if dut != self.hostname][0] 19 neighbor['ip'] = self.host_vars[neighbor['hostname']]['ansible_host'] 20 neighbor['hwsku'] = self.host_vars[neighbor['hostname']]['hwsku'] 21 22 self.dual_tor_facts['neighbor'] = neighbor 23 24 def parse_tor_position(self): 25 ''' 26 Determines the position ('U' for upper and 'L' for lower) of the ToR. 27 28 The upper ToR is always the first ToR listed in the testbed file 29 ''' 30 self.dual_tor_facts['positions'] = {'upper': self.testbed_facts['duts'][0], 'lower': self.testbed_facts['duts'][1]} 31 32 def parse_loopback_ips(self): 33 ''' 34 Parses the IPv4 and IPv6 loopback IPs for the DUTs 35 36 Similar to `parse_tor_position`, the ToR which comes first alphabetically is always assigned the first IP 37 ''' 38 39 loopback_ips = defaultdict(dict) 40 41 ipv4_loopbacks = sorted(self.vm_config['DUT']['loopback']['ipv4']) 42 ipv6_loopbacks = sorted(self.vm_config['DUT']['loopback']['ipv6']) 43 44 for i, dut in enumerate(sorted(self.testbed_facts['duts'])): 45 loopback_ips[dut]['ipv4'] = ipv4_loopbacks[i] 46 loopback_ips[dut]['ipv6'] = ipv6_loopbacks[i] 47 48 self.dual_tor_facts['loopback'] = loopback_ips 49 50 def generate_cable_names(self): 51 cables = [] 52 53 for server_num, dut_intf in enumerate(self.vlan_intfs): 54 name = '{}-Servers{}-SC'.format(self.hostname, server_num) 55 cable = {"hostname": name, "dut_intf": dut_intf} 56 cables.append(cable) 57 58 self.dual_tor_facts['cables'] = cables 59 60 def get_dual_tor_facts(self): 61 ''' 62 Gathers facts related to a dual ToR configuration 63 ''' 64 if 'dualtor' in self.testbed_facts['topo']: 65 self.parse_neighbor_tor() 66 self.parse_tor_position() 67 self.generate_cable_names() 68 self.parse_loopback_ips() 69 70 return self.dual_tor_facts 71 72 73 def main(): 74 module = AnsibleModule( 75 argument_spec=dict( 76 hostname=dict(required=True, default=None, type='str'), 77 testbed_facts=dict(required=True, default=None, type='dict'), 78 hostvars=dict(required=True, default=None, type='dict'), 79 vm_config=dict(required=True, default=None, type='dict'), 80 port_alias=dict(required=True, default=None, type='list'), 81 vlan_intfs=dict(required=True, default=None, type='list') 82 ), 83 supports_check_mode=True 84 ) 85 m_args = module.params 86 # testbed_facts ={u'comment': u'Dual-TOR testbed', u'conf-name': u'vms-kvm-dual-t0', u'ptf_ip': u'10.250.0.109', u'ptf_netmask': u'255.255.255.0', u'ptf_ipv6': u'fec0::ffff:afa:9', u'vm_base': u'VM0108', u'server': u'server_1', u'topo': u'dualtor', u'group-name': u'vms6-4', u'ptf': u'ptf-04', u'duts_map': {u'vlab-06': 1, u'vlab-05': 0}, u'ptf_netmask_v6': u'ffff:ffff:ffff:ffff::', u'ptf_image_name': u'docker-ptf', u'duts': [u'vlab-05', u'vlab-06']} 87 hostname = m_args['hostname'] 88 testbed_facts = m_args['testbed_facts'] 89 host_vars = m_args['hostvars'] 90 vm_config = m_args['vm_config'] 91 port_alias = m_args['port_alias'] 92 vlan_intfs = m_args['vlan_intfs'] 93 try: 94 dual_tor_parser = DualTorParser(hostname, testbed_facts, host_vars, vm_config, port_alias, vlan_intfs) 95 module.exit_json(ansible_facts={'dual_tor_facts': dual_tor_parser.get_dual_tor_facts()}) 96 except Exception as e: 97 module.fail_json(msg=traceback.format_exc()) 98 99 from ansible.module_utils.basic import * 100 if __name__== "__main__": 101 main() 102 [end of ansible/library/dual_tor_facts.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ansible/library/dual_tor_facts.py b/ansible/library/dual_tor_facts.py --- a/ansible/library/dual_tor_facts.py +++ b/ansible/library/dual_tor_facts.py @@ -33,19 +33,26 @@ ''' Parses the IPv4 and IPv6 loopback IPs for the DUTs - Similar to `parse_tor_position`, the ToR which comes first alphabetically is always assigned the first IP + Similar to `parse_tor_position`, the ToR which comes first in the testbed file is always assigned the first IP ''' loopback_ips = defaultdict(dict) - - ipv4_loopbacks = sorted(self.vm_config['DUT']['loopback']['ipv4']) - ipv6_loopbacks = sorted(self.vm_config['DUT']['loopback']['ipv6']) - - for i, dut in enumerate(sorted(self.testbed_facts['duts'])): - loopback_ips[dut]['ipv4'] = ipv4_loopbacks[i] - loopback_ips[dut]['ipv6'] = ipv6_loopbacks[i] - - self.dual_tor_facts['loopback'] = loopback_ips + addl_loopback_ips = defaultdict(dict) + + for dut_num, dut in enumerate(self.testbed_facts['duts']): + loopback_ips[dut]['ipv4'] = self.vm_config['DUT']['loopback']['ipv4'][dut_num] + loopback_ips[dut]['ipv6'] = self.vm_config['DUT']['loopback']['ipv6'][dut_num] + + for loopback_num in range(1, 3): # Generate two additional loopback IPs, Loopback1 and Loopback2 + loopback_key = 'loopback{}'.format(loopback_num) + loopback_dict = {} + loopback_dict['ipv4'] = self.vm_config['DUT'][loopback_key]['ipv4'][dut_num] + loopback_dict['ipv6'] = self.vm_config['DUT'][loopback_key]['ipv6'][dut_num] + loopback_dict['host_ip_base_index'] = loopback_num * 2 + addl_loopback_ips[dut][loopback_num] = loopback_dict + + self.dual_tor_facts['loopback'] = loopback_ips + self.dual_tor_facts['addl_loopbacks'] = addl_loopback_ips def generate_cable_names(self): cables = []
{"golden_diff": "diff --git a/ansible/library/dual_tor_facts.py b/ansible/library/dual_tor_facts.py\n--- a/ansible/library/dual_tor_facts.py\n+++ b/ansible/library/dual_tor_facts.py\n@@ -33,19 +33,26 @@\n '''\n Parses the IPv4 and IPv6 loopback IPs for the DUTs\n \n- Similar to `parse_tor_position`, the ToR which comes first alphabetically is always assigned the first IP\n+ Similar to `parse_tor_position`, the ToR which comes first in the testbed file is always assigned the first IP\n '''\n \n loopback_ips = defaultdict(dict)\n-\n- ipv4_loopbacks = sorted(self.vm_config['DUT']['loopback']['ipv4'])\n- ipv6_loopbacks = sorted(self.vm_config['DUT']['loopback']['ipv6'])\n-\n- for i, dut in enumerate(sorted(self.testbed_facts['duts'])):\n- loopback_ips[dut]['ipv4'] = ipv4_loopbacks[i]\n- loopback_ips[dut]['ipv6'] = ipv6_loopbacks[i] \n-\n- self.dual_tor_facts['loopback'] = loopback_ips \n+ addl_loopback_ips = defaultdict(dict)\n+\n+ for dut_num, dut in enumerate(self.testbed_facts['duts']):\n+ loopback_ips[dut]['ipv4'] = self.vm_config['DUT']['loopback']['ipv4'][dut_num]\n+ loopback_ips[dut]['ipv6'] = self.vm_config['DUT']['loopback']['ipv6'][dut_num] \n+\n+ for loopback_num in range(1, 3): # Generate two additional loopback IPs, Loopback1 and Loopback2\n+ loopback_key = 'loopback{}'.format(loopback_num)\n+ loopback_dict = {}\n+ loopback_dict['ipv4'] = self.vm_config['DUT'][loopback_key]['ipv4'][dut_num]\n+ loopback_dict['ipv6'] = self.vm_config['DUT'][loopback_key]['ipv6'][dut_num]\n+ loopback_dict['host_ip_base_index'] = loopback_num * 2\n+ addl_loopback_ips[dut][loopback_num] = loopback_dict\n+\n+ self.dual_tor_facts['loopback'] = loopback_ips \n+ self.dual_tor_facts['addl_loopbacks'] = addl_loopback_ips\n \n def generate_cable_names(self):\n cables = []\n", "issue": "[dualtor] Missing Loopback2 interface in device minigraph\n<!--\r\nIf you are reporting a new issue, make sure that we do not have any duplicates\r\nalready open. You can ensure this by searching the issue list for this\r\nrepository. If there is a duplicate, please close your issue and add a comment\r\nto the existing issue instead.\r\n\r\nIf you suspect your issue is a bug, please edit your issue description to\r\ninclude the BUG REPORT INFORMATION shown below. If you fail to provide this\r\ninformation within 7 days, we cannot debug your issue and will close it. We\r\nwill, however, reopen it if you later provide the information.\r\n\r\nFor more information about reporting issues, see\r\nhttps://github.com/Azure/SONiC/wiki#report-issues\r\n\r\n---------------------------------------------------\r\nGENERAL SUPPORT INFORMATION\r\n---------------------------------------------------\r\n\r\nThe GitHub issue tracker is for bug reports and feature requests.\r\nGeneral support can be found at the following locations:\r\n\r\n- SONiC Support Forums - https://groups.google.com/forum/#!forum/sonicproject\r\n\r\n---------------------------------------------------\r\nBUG REPORT INFORMATION\r\n---------------------------------------------------\r\nUse the commands below to provide key information from your environment:\r\nYou do NOT have to include this information if this is a FEATURE REQUEST\r\n-->\r\n\r\n**Description**\r\nMinigraph generated for Dual ToR devices does not have `Loopback2` interface.\r\nThe `linkmrgd` waits on `Loopback2` interface to perform the probe and to indicate subsequent switchover tasks.\r\nFurther, APP DB is missing *MUX* keys.\r\n\r\n<!--\r\nBriefly describe the problem you are having in a few paragraphs.\r\n-->\r\n\r\n**Steps to reproduce the issue:**\r\n1. Manual switchover using CLI in dual TOR on standby TOR.\r\n2. The switchover fails to happen.\r\n3.\r\n\r\n**Describe the results you received:**\r\nToR switchover failed.\r\n\r\n**Describe the results you expected:**\r\nSwitchover should work. APP DB should contain *MUX* keys.\r\n\r\n**Additional information you deem important:**\r\n<!--\r\nsoftware/ASIC/Hardware Flatform version and info\r\n-->\r\n **Output of `show version`:**\r\n\r\n ```\r\n (paste your output here)\r\n ```\r\n\r\n **Attach debug file `sudo generate_dump`:**\r\n\r\n ```\r\n (paste your output here)\r\n ```\r\n\n", "before_files": [{"content": "from collections import defaultdict\nclass DualTorParser:\n\n def __init__(self, hostname, testbed_facts, host_vars, vm_config, port_alias, vlan_intfs):\n self.hostname = hostname\n self.testbed_facts = testbed_facts\n self.host_vars = host_vars\n self.vm_config = vm_config\n self.port_alias = port_alias\n self.vlan_intfs = vlan_intfs\n self.dual_tor_facts = {}\n\n def parse_neighbor_tor(self):\n '''\n Parses information about the other ToR in a dual ToR pair\n '''\n neighbor = {}\n neighbor['hostname'] = [dut for dut in self.testbed_facts['duts'] if dut != self.hostname][0]\n neighbor['ip'] = self.host_vars[neighbor['hostname']]['ansible_host']\n neighbor['hwsku'] = self.host_vars[neighbor['hostname']]['hwsku']\n\n self.dual_tor_facts['neighbor'] = neighbor\n\n def parse_tor_position(self):\n '''\n Determines the position ('U' for upper and 'L' for lower) of the ToR.\n\n The upper ToR is always the first ToR listed in the testbed file\n '''\n self.dual_tor_facts['positions'] = {'upper': self.testbed_facts['duts'][0], 'lower': self.testbed_facts['duts'][1]}\n\n def parse_loopback_ips(self):\n '''\n Parses the IPv4 and IPv6 loopback IPs for the DUTs\n\n Similar to `parse_tor_position`, the ToR which comes first alphabetically is always assigned the first IP\n '''\n\n loopback_ips = defaultdict(dict)\n\n ipv4_loopbacks = sorted(self.vm_config['DUT']['loopback']['ipv4'])\n ipv6_loopbacks = sorted(self.vm_config['DUT']['loopback']['ipv6'])\n\n for i, dut in enumerate(sorted(self.testbed_facts['duts'])):\n loopback_ips[dut]['ipv4'] = ipv4_loopbacks[i]\n loopback_ips[dut]['ipv6'] = ipv6_loopbacks[i] \n\n self.dual_tor_facts['loopback'] = loopback_ips \n\n def generate_cable_names(self):\n cables = []\n\n for server_num, dut_intf in enumerate(self.vlan_intfs):\n name = '{}-Servers{}-SC'.format(self.hostname, server_num)\n cable = {\"hostname\": name, \"dut_intf\": dut_intf}\n cables.append(cable)\n\n self.dual_tor_facts['cables'] = cables\n\n def get_dual_tor_facts(self):\n '''\n Gathers facts related to a dual ToR configuration\n '''\n if 'dualtor' in self.testbed_facts['topo']:\n self.parse_neighbor_tor()\n self.parse_tor_position()\n self.generate_cable_names()\n self.parse_loopback_ips()\n\n return self.dual_tor_facts\n\n\ndef main():\n module = AnsibleModule(\n argument_spec=dict(\n hostname=dict(required=True, default=None, type='str'),\n testbed_facts=dict(required=True, default=None, type='dict'),\n hostvars=dict(required=True, default=None, type='dict'),\n vm_config=dict(required=True, default=None, type='dict'),\n port_alias=dict(required=True, default=None, type='list'),\n vlan_intfs=dict(required=True, default=None, type='list')\n ),\n supports_check_mode=True\n )\n m_args = module.params\n # testbed_facts ={u'comment': u'Dual-TOR testbed', u'conf-name': u'vms-kvm-dual-t0', u'ptf_ip': u'10.250.0.109', u'ptf_netmask': u'255.255.255.0', u'ptf_ipv6': u'fec0::ffff:afa:9', u'vm_base': u'VM0108', u'server': u'server_1', u'topo': u'dualtor', u'group-name': u'vms6-4', u'ptf': u'ptf-04', u'duts_map': {u'vlab-06': 1, u'vlab-05': 0}, u'ptf_netmask_v6': u'ffff:ffff:ffff:ffff::', u'ptf_image_name': u'docker-ptf', u'duts': [u'vlab-05', u'vlab-06']}\n hostname = m_args['hostname']\n testbed_facts = m_args['testbed_facts']\n host_vars = m_args['hostvars']\n vm_config = m_args['vm_config']\n port_alias = m_args['port_alias']\n vlan_intfs = m_args['vlan_intfs']\n try:\n dual_tor_parser = DualTorParser(hostname, testbed_facts, host_vars, vm_config, port_alias, vlan_intfs)\n module.exit_json(ansible_facts={'dual_tor_facts': dual_tor_parser.get_dual_tor_facts()})\n except Exception as e:\n module.fail_json(msg=traceback.format_exc())\n\nfrom ansible.module_utils.basic import *\nif __name__== \"__main__\":\n main()\n", "path": "ansible/library/dual_tor_facts.py"}]}
2,380
557
gh_patches_debug_20290
rasdani/github-patches
git_diff
WeblateOrg__weblate-3602
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Unchanged check should honor placeholders **Is your feature request related to a problem? Please describe.** It is possible to define placeholders using [`placeholder:` flag](https://docs.weblate.org/en/latest/admin/checks.html#customizing-behavior). The [Unchanged translation check](https://docs.weblate.org/en/latest/user/checks.html#unchanged-translation) should deal with those correctly. **Describe the solution you'd like** The check strip custom placeholders from the string similarly like we do with format strings: https://github.com/WeblateOrg/weblate/blob/ddfbd298a31a95229fa4487cfb2a5a4878da7b2b/weblate/checks/same.py#L107-L137 **Additional context** Getting `placeholder:` flag value: https://github.com/WeblateOrg/weblate/blob/256fa9370edc90c2404df6178d1daa002d441e2f/weblate/checks/base.py#L179 Building regexp for placeholders: https://github.com/WeblateOrg/weblate/blob/256fa9370edc90c2404df6178d1daa002d441e2f/weblate/checks/placeholders.py#L57 <bountysource-plugin> --- Want to back this issue? **[Post a bounty on it!](https://www.bountysource.com/issues/88807457-unchanged-check-should-honor-placeholders?utm_campaign=plugin&utm_content=tracker%2F253393&utm_medium=issues&utm_source=github)** We accept bounties via [Bountysource](https://www.bountysource.com/?utm_campaign=plugin&utm_content=tracker%2F253393&utm_medium=issues&utm_source=github). </bountysource-plugin> </issue> <code> [start of weblate/checks/same.py] 1 # 2 # Copyright © 2012 - 2020 Michal Čihař <[email protected]> 3 # 4 # This file is part of Weblate <https://weblate.org/> 5 # 6 # This program is free software: you can redistribute it and/or modify 7 # it under the terms of the GNU General Public License as published by 8 # the Free Software Foundation, either version 3 of the License, or 9 # (at your option) any later version. 10 # 11 # This program is distributed in the hope that it will be useful, 12 # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 # GNU General Public License for more details. 15 # 16 # You should have received a copy of the GNU General Public License 17 # along with this program. If not, see <https://www.gnu.org/licenses/>. 18 # 19 20 21 import re 22 23 from django.utils.html import strip_tags 24 from django.utils.translation import gettext_lazy as _ 25 26 from weblate.checks.base import TargetCheck 27 from weblate.checks.data import SAME_BLACKLIST 28 from weblate.checks.format import ( 29 C_PRINTF_MATCH, 30 PHP_PRINTF_MATCH, 31 PYTHON_BRACE_MATCH, 32 PYTHON_PRINTF_MATCH, 33 ) 34 from weblate.checks.languages import LANGUAGES 35 from weblate.checks.qt import QT_FORMAT_MATCH, QT_PLURAL_MATCH 36 from weblate.checks.ruby import RUBY_FORMAT_MATCH 37 38 # Email address to ignore 39 EMAIL_RE = re.compile(r"[a-z0-9_.-]+@[a-z0-9_.-]+\.[a-z0-9-]{2,}", re.IGNORECASE) 40 41 URL_RE = re.compile( 42 r"(?:http|ftp)s?://" # http:// or https:// 43 r"(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+" 44 r"(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|" # domain... 45 r"localhost|" # localhost... 46 r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})" # ...or ip 47 r"(?::\d+)?" # optional port 48 r"(?:/?|[/?]\S+)$", 49 re.IGNORECASE, 50 ) 51 52 HASH_RE = re.compile(r"#[A-Za-z0-9_-]*") 53 54 DOMAIN_RE = re.compile( 55 r"(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+" 56 r"(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)", 57 re.IGNORECASE, 58 ) 59 60 PATH_RE = re.compile(r"(^|[ ])(/[a-zA-Z0-9=:?._-]+)+") 61 62 TEMPLATE_RE = re.compile(r"{[a-z_-]+}|@[A-Z_]@", re.IGNORECASE) 63 64 RST_MATCH = re.compile(r"(?::(ref|config:option|file|guilabel):`[^`]+`|``[^`]+``)") 65 66 SPLIT_RE = re.compile( 67 r"(?:\&(?:nbsp|rsaquo|lt|gt|amp|ldquo|rdquo|times|quot);|" 68 + r'[() ,.^`"\'\\/_<>!?;:|{}*^@%#&~=+\r\n✓—‑…\[\]0-9-])+', 69 re.IGNORECASE, 70 ) 71 72 EMOJI_RE = re.compile("[\U00002600-\U000027BF]|[\U0001f000-\U0001fffd]") 73 74 # Docbook tags to ignore 75 DB_TAGS = ("screen", "indexterm", "programlisting") 76 77 78 def strip_format(msg, flags): 79 """Remove format strings from the strings. 80 81 These are quite often not changed by translators. 82 """ 83 if "python-format" in flags: 84 regex = PYTHON_PRINTF_MATCH 85 elif "python-brace-format" in flags: 86 regex = PYTHON_BRACE_MATCH 87 elif "php-format" in flags: 88 regex = PHP_PRINTF_MATCH 89 elif "c-format" in flags: 90 regex = C_PRINTF_MATCH 91 elif "qt-format" in flags: 92 regex = QT_FORMAT_MATCH 93 elif "qt-plural-format" in flags: 94 regex = QT_PLURAL_MATCH 95 elif "ruby-format" in flags: 96 regex = RUBY_FORMAT_MATCH 97 elif "rst-text" in flags: 98 regex = RST_MATCH 99 else: 100 return msg 101 stripped = regex.sub("", msg) 102 return stripped 103 104 105 def strip_string(msg, flags): 106 """Strip (usually) not translated parts from the string.""" 107 # Strip HTML markup 108 stripped = strip_tags(msg) 109 110 # Strip format strings 111 stripped = strip_format(stripped, flags) 112 113 # Remove emojis 114 stripped = EMOJI_RE.sub(" ", stripped) 115 116 # Remove email addresses 117 stripped = EMAIL_RE.sub("", stripped) 118 119 # Strip full URLs 120 stripped = URL_RE.sub("", stripped) 121 122 # Strip hash tags / IRC channels 123 stripped = HASH_RE.sub("", stripped) 124 125 # Strip domain names/URLs 126 stripped = DOMAIN_RE.sub("", stripped) 127 128 # Strip file/URL paths 129 stripped = PATH_RE.sub("", stripped) 130 131 # Strip template markup 132 stripped = TEMPLATE_RE.sub("", stripped) 133 134 # Cleanup trailing/leading chars 135 return stripped 136 137 138 def test_word(word): 139 """Test whether word should be ignored.""" 140 return len(word) <= 2 or word in SAME_BLACKLIST or word in LANGUAGES 141 142 143 class SameCheck(TargetCheck): 144 """Check for not translated entries.""" 145 146 check_id = "same" 147 name = _("Unchanged translation") 148 description = _("Source and translation are identical") 149 severity = "warning" 150 151 def should_ignore(self, source, unit): 152 """Check whether given unit should be ignored.""" 153 if "strict-same" in unit.all_flags: 154 return False 155 # Ignore some docbook tags 156 if unit.note.startswith("Tag: ") and unit.note[5:] in DB_TAGS: 157 return True 158 159 # Lower case source 160 lower_source = source.lower() 161 162 # Check special things like 1:4 1/2 or copyright 163 if ( 164 len(source.strip("0123456789:/,.")) <= 1 165 or "(c) copyright" in lower_source 166 or "©" in source 167 ): 168 return True 169 # Strip format strings 170 stripped = strip_string(source, unit.all_flags) 171 172 # Ignore strings which don't contain any string to translate 173 # or just single letter (usually unit or something like that) 174 # or are whole uppercase (abbreviations) 175 if len(stripped) <= 1 or stripped.isupper(): 176 return True 177 # Check if we have any word which is not in blacklist 178 # (words which are often same in foreign language) 179 for word in SPLIT_RE.split(stripped.lower()): 180 if not test_word(word): 181 return False 182 return True 183 184 def should_skip(self, unit): 185 if super().should_skip(unit): 186 return True 187 188 source_language = unit.translation.component.project.source_language.base_code 189 190 # Ignore the check for source language, 191 # English variants will have most things not translated 192 # Interlingua is also quite often similar to English 193 if self.is_language(unit, source_language) or ( 194 source_language == "en" and self.is_language(unit, ("en", "ia")) 195 ): 196 return True 197 198 return False 199 200 def check_single(self, source, target, unit): 201 # One letter things are usually labels or decimal/thousand separators 202 if len(source) <= 1 and len(target) <= 1: 203 return False 204 205 # Check for ignoring 206 if self.should_ignore(source, unit): 207 return False 208 209 return source == target 210 [end of weblate/checks/same.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/weblate/checks/same.py b/weblate/checks/same.py --- a/weblate/checks/same.py +++ b/weblate/checks/same.py @@ -140,6 +140,17 @@ return len(word) <= 2 or word in SAME_BLACKLIST or word in LANGUAGES +def strip_placeholders(msg, unit): + + return re.sub( + "|".join( + re.escape(param) for param in unit.all_flags.get_value("placeholders") + ), + "", + msg, + ) + + class SameCheck(TargetCheck): """Check for not translated entries.""" @@ -169,6 +180,10 @@ # Strip format strings stripped = strip_string(source, unit.all_flags) + # Strip placeholder strings + if "placeholders" in unit.all_flags: + stripped = strip_placeholders(stripped, unit) + # Ignore strings which don't contain any string to translate # or just single letter (usually unit or something like that) # or are whole uppercase (abbreviations)
{"golden_diff": "diff --git a/weblate/checks/same.py b/weblate/checks/same.py\n--- a/weblate/checks/same.py\n+++ b/weblate/checks/same.py\n@@ -140,6 +140,17 @@\n return len(word) <= 2 or word in SAME_BLACKLIST or word in LANGUAGES\n \n \n+def strip_placeholders(msg, unit):\n+\n+ return re.sub(\n+ \"|\".join(\n+ re.escape(param) for param in unit.all_flags.get_value(\"placeholders\")\n+ ),\n+ \"\",\n+ msg,\n+ )\n+\n+\n class SameCheck(TargetCheck):\n \"\"\"Check for not translated entries.\"\"\"\n \n@@ -169,6 +180,10 @@\n # Strip format strings\n stripped = strip_string(source, unit.all_flags)\n \n+ # Strip placeholder strings\n+ if \"placeholders\" in unit.all_flags:\n+ stripped = strip_placeholders(stripped, unit)\n+\n # Ignore strings which don't contain any string to translate\n # or just single letter (usually unit or something like that)\n # or are whole uppercase (abbreviations)\n", "issue": "Unchanged check should honor placeholders\n**Is your feature request related to a problem? Please describe.**\r\nIt is possible to define placeholders using [`placeholder:` flag](https://docs.weblate.org/en/latest/admin/checks.html#customizing-behavior). The [Unchanged translation check](https://docs.weblate.org/en/latest/user/checks.html#unchanged-translation) should deal with those correctly.\r\n\r\n**Describe the solution you'd like**\r\nThe check strip custom placeholders from the string similarly like we do with format strings:\r\n\r\nhttps://github.com/WeblateOrg/weblate/blob/ddfbd298a31a95229fa4487cfb2a5a4878da7b2b/weblate/checks/same.py#L107-L137\r\n\r\n**Additional context**\r\n\r\nGetting `placeholder:` flag value:\r\n\r\nhttps://github.com/WeblateOrg/weblate/blob/256fa9370edc90c2404df6178d1daa002d441e2f/weblate/checks/base.py#L179\r\n\r\nBuilding regexp for placeholders:\r\n\r\nhttps://github.com/WeblateOrg/weblate/blob/256fa9370edc90c2404df6178d1daa002d441e2f/weblate/checks/placeholders.py#L57 \r\n\r\n<bountysource-plugin>\r\n\r\n---\r\nWant to back this issue? **[Post a bounty on it!](https://www.bountysource.com/issues/88807457-unchanged-check-should-honor-placeholders?utm_campaign=plugin&utm_content=tracker%2F253393&utm_medium=issues&utm_source=github)** We accept bounties via [Bountysource](https://www.bountysource.com/?utm_campaign=plugin&utm_content=tracker%2F253393&utm_medium=issues&utm_source=github).\r\n</bountysource-plugin>\n", "before_files": [{"content": "#\n# Copyright \u00a9 2012 - 2020 Michal \u010ciha\u0159 <[email protected]>\n#\n# This file is part of Weblate <https://weblate.org/>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <https://www.gnu.org/licenses/>.\n#\n\n\nimport re\n\nfrom django.utils.html import strip_tags\nfrom django.utils.translation import gettext_lazy as _\n\nfrom weblate.checks.base import TargetCheck\nfrom weblate.checks.data import SAME_BLACKLIST\nfrom weblate.checks.format import (\n C_PRINTF_MATCH,\n PHP_PRINTF_MATCH,\n PYTHON_BRACE_MATCH,\n PYTHON_PRINTF_MATCH,\n)\nfrom weblate.checks.languages import LANGUAGES\nfrom weblate.checks.qt import QT_FORMAT_MATCH, QT_PLURAL_MATCH\nfrom weblate.checks.ruby import RUBY_FORMAT_MATCH\n\n# Email address to ignore\nEMAIL_RE = re.compile(r\"[a-z0-9_.-]+@[a-z0-9_.-]+\\.[a-z0-9-]{2,}\", re.IGNORECASE)\n\nURL_RE = re.compile(\n r\"(?:http|ftp)s?://\" # http:// or https://\n r\"(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\.)+\"\n r\"(?:[A-Z]{2,6}\\.?|[A-Z0-9-]{2,}\\.?)|\" # domain...\n r\"localhost|\" # localhost...\n r\"\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})\" # ...or ip\n r\"(?::\\d+)?\" # optional port\n r\"(?:/?|[/?]\\S+)$\",\n re.IGNORECASE,\n)\n\nHASH_RE = re.compile(r\"#[A-Za-z0-9_-]*\")\n\nDOMAIN_RE = re.compile(\n r\"(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\.)+\"\n r\"(?:[A-Z]{2,6}\\.?|[A-Z0-9-]{2,}\\.?)\",\n re.IGNORECASE,\n)\n\nPATH_RE = re.compile(r\"(^|[ ])(/[a-zA-Z0-9=:?._-]+)+\")\n\nTEMPLATE_RE = re.compile(r\"{[a-z_-]+}|@[A-Z_]@\", re.IGNORECASE)\n\nRST_MATCH = re.compile(r\"(?::(ref|config:option|file|guilabel):`[^`]+`|``[^`]+``)\")\n\nSPLIT_RE = re.compile(\n r\"(?:\\&(?:nbsp|rsaquo|lt|gt|amp|ldquo|rdquo|times|quot);|\"\n + r'[() ,.^`\"\\'\\\\/_<>!?;:|{}*^@%#&~=+\\r\\n\u2713\u2014\u2011\u2026\\[\\]0-9-])+',\n re.IGNORECASE,\n)\n\nEMOJI_RE = re.compile(\"[\\U00002600-\\U000027BF]|[\\U0001f000-\\U0001fffd]\")\n\n# Docbook tags to ignore\nDB_TAGS = (\"screen\", \"indexterm\", \"programlisting\")\n\n\ndef strip_format(msg, flags):\n \"\"\"Remove format strings from the strings.\n\n These are quite often not changed by translators.\n \"\"\"\n if \"python-format\" in flags:\n regex = PYTHON_PRINTF_MATCH\n elif \"python-brace-format\" in flags:\n regex = PYTHON_BRACE_MATCH\n elif \"php-format\" in flags:\n regex = PHP_PRINTF_MATCH\n elif \"c-format\" in flags:\n regex = C_PRINTF_MATCH\n elif \"qt-format\" in flags:\n regex = QT_FORMAT_MATCH\n elif \"qt-plural-format\" in flags:\n regex = QT_PLURAL_MATCH\n elif \"ruby-format\" in flags:\n regex = RUBY_FORMAT_MATCH\n elif \"rst-text\" in flags:\n regex = RST_MATCH\n else:\n return msg\n stripped = regex.sub(\"\", msg)\n return stripped\n\n\ndef strip_string(msg, flags):\n \"\"\"Strip (usually) not translated parts from the string.\"\"\"\n # Strip HTML markup\n stripped = strip_tags(msg)\n\n # Strip format strings\n stripped = strip_format(stripped, flags)\n\n # Remove emojis\n stripped = EMOJI_RE.sub(\" \", stripped)\n\n # Remove email addresses\n stripped = EMAIL_RE.sub(\"\", stripped)\n\n # Strip full URLs\n stripped = URL_RE.sub(\"\", stripped)\n\n # Strip hash tags / IRC channels\n stripped = HASH_RE.sub(\"\", stripped)\n\n # Strip domain names/URLs\n stripped = DOMAIN_RE.sub(\"\", stripped)\n\n # Strip file/URL paths\n stripped = PATH_RE.sub(\"\", stripped)\n\n # Strip template markup\n stripped = TEMPLATE_RE.sub(\"\", stripped)\n\n # Cleanup trailing/leading chars\n return stripped\n\n\ndef test_word(word):\n \"\"\"Test whether word should be ignored.\"\"\"\n return len(word) <= 2 or word in SAME_BLACKLIST or word in LANGUAGES\n\n\nclass SameCheck(TargetCheck):\n \"\"\"Check for not translated entries.\"\"\"\n\n check_id = \"same\"\n name = _(\"Unchanged translation\")\n description = _(\"Source and translation are identical\")\n severity = \"warning\"\n\n def should_ignore(self, source, unit):\n \"\"\"Check whether given unit should be ignored.\"\"\"\n if \"strict-same\" in unit.all_flags:\n return False\n # Ignore some docbook tags\n if unit.note.startswith(\"Tag: \") and unit.note[5:] in DB_TAGS:\n return True\n\n # Lower case source\n lower_source = source.lower()\n\n # Check special things like 1:4 1/2 or copyright\n if (\n len(source.strip(\"0123456789:/,.\")) <= 1\n or \"(c) copyright\" in lower_source\n or \"\u00a9\" in source\n ):\n return True\n # Strip format strings\n stripped = strip_string(source, unit.all_flags)\n\n # Ignore strings which don't contain any string to translate\n # or just single letter (usually unit or something like that)\n # or are whole uppercase (abbreviations)\n if len(stripped) <= 1 or stripped.isupper():\n return True\n # Check if we have any word which is not in blacklist\n # (words which are often same in foreign language)\n for word in SPLIT_RE.split(stripped.lower()):\n if not test_word(word):\n return False\n return True\n\n def should_skip(self, unit):\n if super().should_skip(unit):\n return True\n\n source_language = unit.translation.component.project.source_language.base_code\n\n # Ignore the check for source language,\n # English variants will have most things not translated\n # Interlingua is also quite often similar to English\n if self.is_language(unit, source_language) or (\n source_language == \"en\" and self.is_language(unit, (\"en\", \"ia\"))\n ):\n return True\n\n return False\n\n def check_single(self, source, target, unit):\n # One letter things are usually labels or decimal/thousand separators\n if len(source) <= 1 and len(target) <= 1:\n return False\n\n # Check for ignoring\n if self.should_ignore(source, unit):\n return False\n\n return source == target\n", "path": "weblate/checks/same.py"}]}
3,297
252
gh_patches_debug_5509
rasdani/github-patches
git_diff
mathesar-foundation__mathesar-788
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Install.py script assumes database to exist ## Description The install.py script has a hard check on the hostname to check if we're using the docker created postgres container and only creates the databases if we do. `if DATABASES[database_key]["HOST"] == "db":` In https://github.com/centerofci/mathesar/pull/747, we gave our containers persistent names. The db container's name was set as `mathesar_db`. This leads to all fresh setup of mathesar to fail. ## Expected behavior Following the instructions in readme should work without any error. ## To Reproduce <!-- How can we recreate this bug? Please try to provide a Minimal, Complete, and Verifiable (http://stackoverflow.com/help/mcve) example if code-related. --> * Stop mathesar, if you have it running * Remove your database * Start mathesar and run migrations and install.py script </issue> <code> [start of install.py] 1 """ 2 This script installs functions and types for Mathesar onto the configured DB. 3 """ 4 from config.settings import DATABASES 5 from db import install 6 import sys 7 import getopt 8 9 10 def main(): 11 skip_confirm = False 12 (opts, _) = getopt.getopt(sys.argv[1:], ":s", ["skip-confirm"]) 13 for (opt, value) in opts: 14 if (opt == "-s") or (opt == "--skip-confirm"): 15 skip_confirm = True 16 for database_key in [key for key in DATABASES if key != "default"]: 17 install_on_db_with_key(database_key, skip_confirm) 18 19 20 def install_on_db_with_key(database_key, skip_confirm): 21 if DATABASES[database_key]["HOST"] == "db": 22 # if we're going to install on the docker-created Postgres, we'll 23 # create the DB 24 print("Creating Mathesar DB on docker-created PostgreSQL instance") 25 install.create_mathesar_database( 26 DATABASES[database_key]["NAME"], 27 DATABASES["default"]["USER"], 28 DATABASES["default"]["PASSWORD"], 29 DATABASES["default"]["HOST"], 30 DATABASES["default"]["NAME"], 31 DATABASES["default"]["PORT"], 32 ) 33 print(f"Created DB is {DATABASES['mathesar_tables']['NAME']}") 34 else: 35 # if we're installing anywhere else, we require the DB to exist in 36 # advance. 37 username = DATABASES[database_key]["USER"] 38 password = DATABASES[database_key]["PASSWORD"] 39 host = DATABASES[database_key]["HOST"] 40 db_name = DATABASES[database_key]["NAME"] 41 port = DATABASES[database_key]["PORT"] 42 print("Installing Mathesar DB on preexisting PostgreSQL instance...") 43 if skip_confirm is False: 44 confirmation = input( 45 f"Mathesar will be installed on DB {db_name} at host {host}." 46 "Confirm? (y/n) > " 47 ) 48 if (confirmation.lower() in ["y", "yes"]) or (skip_confirm is True): 49 print("Installing...") 50 install.install_mathesar_on_preexisting_database( 51 username, 52 password, 53 host, 54 db_name, 55 port, 56 ) 57 else: 58 print("Skipping DB with key {database_key}.") 59 60 61 if __name__ == "__main__": 62 main() 63 [end of install.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/install.py b/install.py --- a/install.py +++ b/install.py @@ -18,7 +18,7 @@ def install_on_db_with_key(database_key, skip_confirm): - if DATABASES[database_key]["HOST"] == "db": + if DATABASES[database_key]["HOST"] == "mathesar_db": # if we're going to install on the docker-created Postgres, we'll # create the DB print("Creating Mathesar DB on docker-created PostgreSQL instance")
{"golden_diff": "diff --git a/install.py b/install.py\n--- a/install.py\n+++ b/install.py\n@@ -18,7 +18,7 @@\n \n \n def install_on_db_with_key(database_key, skip_confirm):\n- if DATABASES[database_key][\"HOST\"] == \"db\":\n+ if DATABASES[database_key][\"HOST\"] == \"mathesar_db\":\n # if we're going to install on the docker-created Postgres, we'll\n # create the DB\n print(\"Creating Mathesar DB on docker-created PostgreSQL instance\")\n", "issue": "Install.py script assumes database to exist\n## Description\r\nThe install.py script has a hard check on the hostname to check if we're using the docker created postgres container and only creates the databases if we do.\r\n\r\n`if DATABASES[database_key][\"HOST\"] == \"db\":`\r\n\r\nIn https://github.com/centerofci/mathesar/pull/747, we gave our containers persistent names. The db container's name was set as `mathesar_db`.\r\n\r\nThis leads to all fresh setup of mathesar to fail.\r\n\r\n## Expected behavior\r\nFollowing the instructions in readme should work without any error.\r\n\r\n## To Reproduce\r\n<!-- How can we recreate this bug? Please try to provide a Minimal, Complete, and Verifiable (http://stackoverflow.com/help/mcve) example if code-related. -->\r\n* Stop mathesar, if you have it running\r\n* Remove your database\r\n* Start mathesar and run migrations and install.py script \n", "before_files": [{"content": "\"\"\"\nThis script installs functions and types for Mathesar onto the configured DB.\n\"\"\"\nfrom config.settings import DATABASES\nfrom db import install\nimport sys\nimport getopt\n\n\ndef main():\n skip_confirm = False\n (opts, _) = getopt.getopt(sys.argv[1:], \":s\", [\"skip-confirm\"])\n for (opt, value) in opts:\n if (opt == \"-s\") or (opt == \"--skip-confirm\"):\n skip_confirm = True\n for database_key in [key for key in DATABASES if key != \"default\"]:\n install_on_db_with_key(database_key, skip_confirm)\n\n\ndef install_on_db_with_key(database_key, skip_confirm):\n if DATABASES[database_key][\"HOST\"] == \"db\":\n # if we're going to install on the docker-created Postgres, we'll\n # create the DB\n print(\"Creating Mathesar DB on docker-created PostgreSQL instance\")\n install.create_mathesar_database(\n DATABASES[database_key][\"NAME\"],\n DATABASES[\"default\"][\"USER\"],\n DATABASES[\"default\"][\"PASSWORD\"],\n DATABASES[\"default\"][\"HOST\"],\n DATABASES[\"default\"][\"NAME\"],\n DATABASES[\"default\"][\"PORT\"],\n )\n print(f\"Created DB is {DATABASES['mathesar_tables']['NAME']}\")\n else:\n # if we're installing anywhere else, we require the DB to exist in\n # advance.\n username = DATABASES[database_key][\"USER\"]\n password = DATABASES[database_key][\"PASSWORD\"]\n host = DATABASES[database_key][\"HOST\"]\n db_name = DATABASES[database_key][\"NAME\"]\n port = DATABASES[database_key][\"PORT\"]\n print(\"Installing Mathesar DB on preexisting PostgreSQL instance...\")\n if skip_confirm is False:\n confirmation = input(\n f\"Mathesar will be installed on DB {db_name} at host {host}.\"\n \"Confirm? (y/n) > \"\n )\n if (confirmation.lower() in [\"y\", \"yes\"]) or (skip_confirm is True):\n print(\"Installing...\")\n install.install_mathesar_on_preexisting_database(\n username,\n password,\n host,\n db_name,\n port,\n )\n else:\n print(\"Skipping DB with key {database_key}.\")\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "install.py"}]}
1,327
114
gh_patches_debug_1109
rasdani/github-patches
git_diff
flairNLP__flair-435
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Cannot install allennlp due to matplotlib dependency conflict Hello, thanks for the great package. I want to play with ELMoEmbeddings, which requires package allennlp, not installed by default with Flair. However, installing latest allennlp fails because it requires matplotlib==2.2.3, while Flair requires >=3.0.0. When I install allennlp==0.7.0 (without the matplotlib requirement), it still fails because of PyTorch conflicts. Makes me wonder what versions I should use that are actually compatible? </issue> <code> [start of setup.py] 1 from setuptools import setup, find_packages 2 3 setup( 4 name='flair', 5 version='0.4.0', 6 description='A very simple framework for state-of-the-art NLP', 7 long_description=open("README.md", encoding='utf-8').read(), 8 long_description_content_type="text/markdown", 9 author='Alan Akbik', 10 author_email='[email protected]', 11 url='https://github.com/zalandoresearch/flair', 12 packages=find_packages(exclude='test'), # same as name 13 license='MIT', 14 install_requires=[ 15 'torch>=1.0.0', 16 'gensim>=3.4.0', 17 'tqdm>=4.26.0', 18 'segtok>=1.5.7', 19 'matplotlib>=3.0.0', 20 'mpld3>=0.3', 21 'sklearn', 22 'sqlitedict>=1.6.0', 23 'deprecated>=1.2.4', 24 'hyperopt>=0.1.1', 25 'pytorch-pretrained-bert>=0.3.0' 26 ], 27 include_package_data=True, 28 python_requires='>=3.6', 29 ) 30 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -16,7 +16,7 @@ 'gensim>=3.4.0', 'tqdm>=4.26.0', 'segtok>=1.5.7', - 'matplotlib>=3.0.0', + 'matplotlib>=2.2.3', 'mpld3>=0.3', 'sklearn', 'sqlitedict>=1.6.0',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -16,7 +16,7 @@\n 'gensim>=3.4.0',\n 'tqdm>=4.26.0',\n 'segtok>=1.5.7',\n- 'matplotlib>=3.0.0',\n+ 'matplotlib>=2.2.3',\n 'mpld3>=0.3',\n 'sklearn',\n 'sqlitedict>=1.6.0',\n", "issue": "Cannot install allennlp due to matplotlib dependency conflict\nHello, thanks for the great package. I want to play with ELMoEmbeddings, which requires package allennlp, not installed by default with Flair. However, installing latest allennlp fails because it requires matplotlib==2.2.3, while Flair requires >=3.0.0. When I install allennlp==0.7.0 (without the matplotlib requirement), it still fails because of PyTorch conflicts. Makes me wonder what versions I should use that are actually compatible?\r\n\n", "before_files": [{"content": "from setuptools import setup, find_packages\n\nsetup(\n name='flair',\n version='0.4.0',\n description='A very simple framework for state-of-the-art NLP',\n long_description=open(\"README.md\", encoding='utf-8').read(),\n long_description_content_type=\"text/markdown\",\n author='Alan Akbik',\n author_email='[email protected]',\n url='https://github.com/zalandoresearch/flair',\n packages=find_packages(exclude='test'), # same as name\n license='MIT',\n install_requires=[\n 'torch>=1.0.0',\n 'gensim>=3.4.0',\n 'tqdm>=4.26.0',\n 'segtok>=1.5.7',\n 'matplotlib>=3.0.0',\n 'mpld3>=0.3',\n 'sklearn',\n 'sqlitedict>=1.6.0',\n 'deprecated>=1.2.4',\n 'hyperopt>=0.1.1',\n 'pytorch-pretrained-bert>=0.3.0'\n ],\n include_package_data=True,\n python_requires='>=3.6',\n)\n", "path": "setup.py"}]}
962
117
gh_patches_debug_11244
rasdani/github-patches
git_diff
scikit-hep__awkward-1401
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `with_name` fails for `ak._v2.Record` ### Version of Awkward Array 278dccee ### Description and code to reproduce ```python import awkward._v2 as ak ak.Record({'x': 10.0}, with_name="X") ``` </issue> <code> [start of src/awkward/_v2/record.py] 1 # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE 2 3 import copy 4 from collections.abc import Iterable 5 6 import awkward as ak 7 from awkward._v2.contents.content import Content 8 9 np = ak.nplike.NumpyMetadata.instance() 10 11 12 class Record: 13 def __init__(self, array, at): 14 if not isinstance(array, ak._v2.contents.recordarray.RecordArray): 15 raise ak._v2._util.error( 16 TypeError(f"Record 'array' must be a RecordArray, not {array!r}") 17 ) 18 if not ak._util.isint(at): 19 raise ak._v2._util.error( 20 TypeError(f"Record 'at' must be an integer, not {array!r}") 21 ) 22 if at < 0 or at >= array.length: 23 raise ak._v2._util.error( 24 ValueError( 25 f"Record 'at' must be >= 0 and < len(array) == {array.length}, not {at}" 26 ) 27 ) 28 else: 29 self._array = array 30 self._at = at 31 32 @property 33 def array(self): 34 return self._array 35 36 @property 37 def at(self): 38 return self._at 39 40 @property 41 def fields(self): 42 return self._array.fields 43 44 @property 45 def is_tuple(self): 46 return self._array.is_tuple 47 48 @property 49 def as_tuple(self): 50 return Record(self._array.as_tuple, self._at) 51 52 @property 53 def contents(self): 54 out = [] 55 for field in self._array.fields: 56 out.append(self._array[field][self._at]) 57 return out 58 59 def content(self, index_or_field): 60 return self._array.content(index_or_field)[self._at] 61 62 def __repr__(self): 63 return self._repr("", "", "") 64 65 def _repr(self, indent, pre, post): 66 out = [indent, pre, "<Record at="] 67 out.append(repr(str(self._at))) 68 out.append(">\n") 69 out.append(self._array._repr(indent + " ", "<array>", "</array>\n")) 70 out.append(indent) 71 out.append("</Record>") 72 out.append(post) 73 return "".join(out) 74 75 def validityerror(self, path="layout.array"): 76 return self._array.validityerror(path) 77 78 @property 79 def parameters(self): 80 return self._array.parameters 81 82 def parameter(self, key): 83 return self._array.parameter(key) 84 85 def purelist_parameter(self, key): 86 return self._array.purelist_parameter(key) 87 88 @property 89 def purelist_isregular(self): 90 return self._array.purelist_isregular 91 92 @property 93 def purelist_depth(self): 94 return 0 95 96 @property 97 def minmax_depth(self): 98 mindepth, maxdepth = self._array.minmax_depth 99 return mindepth - 1, maxdepth - 1 100 101 @property 102 def branch_depth(self): 103 branch, depth = self._array.branch_depth 104 return branch, depth - 1 105 106 def __getitem__(self, where): 107 with ak._v2._util.SlicingErrorContext(self, where): 108 return self._getitem(where) 109 110 def _getitem(self, where): 111 if ak._util.isint(where): 112 raise ak._v2._util.error( 113 IndexError("scalar Record cannot be sliced by an integer") 114 ) 115 116 elif isinstance(where, slice): 117 raise ak._v2._util.error( 118 IndexError("scalar Record cannot be sliced by a range slice (`:`)") 119 ) 120 121 elif ak._util.isstr(where): 122 return self._getitem_field(where) 123 124 elif where is np.newaxis: 125 raise ak._v2._util.error( 126 IndexError("scalar Record cannot be sliced by np.newaxis (`None`)") 127 ) 128 129 elif where is Ellipsis: 130 raise ak._v2._util.error( 131 IndexError("scalar Record cannot be sliced by an ellipsis (`...`)") 132 ) 133 134 elif isinstance(where, tuple) and len(where) == 0: 135 return self 136 137 elif isinstance(where, tuple) and len(where) == 1: 138 return self._getitem(where[0]) 139 140 elif isinstance(where, tuple) and ak._util.isstr(where[0]): 141 return self._getitem_field(where[0])._getitem(where[1:]) 142 143 elif isinstance(where, ak.highlevel.Array): 144 raise ak._v2._util.error( 145 IndexError("scalar Record cannot be sliced by an array") 146 ) 147 148 elif isinstance(where, ak.layout.Content): 149 raise ak._v2._util.error( 150 IndexError("scalar Record cannot be sliced by an array") 151 ) 152 153 elif isinstance(where, Content): 154 raise ak._v2._util.error( 155 IndexError("scalar Record cannot be sliced by an array") 156 ) 157 158 elif isinstance(where, Iterable) and all(ak._util.isstr(x) for x in where): 159 return self._getitem_fields(where) 160 161 elif isinstance(where, Iterable): 162 raise ak._v2._util.error( 163 IndexError("scalar Record cannot be sliced by an array") 164 ) 165 166 else: 167 raise ak._v2._util.error( 168 TypeError( 169 "only field name (str) or names (non-tuple iterable of str) " 170 "are valid indices for slicing a scalar record, not\n\n " 171 + repr(where) 172 ) 173 ) 174 175 def _getitem_field(self, where): 176 return self._array._getitem_field(where)._getitem_at(self._at) 177 178 def _getitem_fields(self, where): 179 return self._array._getitem_fields(where)._getitem_at(self._at) 180 181 def packed(self): 182 if self._array.length == 1: 183 return Record(self._array.packed(), self._at) 184 else: 185 return Record(self._array[self._at : self._at + 1].packed(), 0) 186 187 def to_list(self, behavior=None): 188 cls = ak._v2._util.recordclass(self._array, behavior) 189 if cls is not ak._v2.highlevel.Record: 190 return cls(self) 191 192 return self._array[self._at : self._at + 1].to_list(behavior)[0] 193 194 def deep_copy(self): 195 return Record(self._array.deep_copy(), copy.deepcopy(self._at)) 196 [end of src/awkward/_v2/record.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/awkward/_v2/record.py b/src/awkward/_v2/record.py --- a/src/awkward/_v2/record.py +++ b/src/awkward/_v2/record.py @@ -193,3 +193,29 @@ def deep_copy(self): return Record(self._array.deep_copy(), copy.deepcopy(self._at)) + + def recursively_apply( + self, + action, + depth_context=None, + lateral_context=None, + keep_parameters=True, + numpy_to_regular=True, + return_array=True, + function_name=None, + ): + + out = self._array.recursively_apply( + action, + depth_context, + lateral_context, + keep_parameters, + numpy_to_regular, + return_array, + function_name, + ) + + if return_array: + return Record(out, self._at) + else: + return None
{"golden_diff": "diff --git a/src/awkward/_v2/record.py b/src/awkward/_v2/record.py\n--- a/src/awkward/_v2/record.py\n+++ b/src/awkward/_v2/record.py\n@@ -193,3 +193,29 @@\n \n def deep_copy(self):\n return Record(self._array.deep_copy(), copy.deepcopy(self._at))\n+\n+ def recursively_apply(\n+ self,\n+ action,\n+ depth_context=None,\n+ lateral_context=None,\n+ keep_parameters=True,\n+ numpy_to_regular=True,\n+ return_array=True,\n+ function_name=None,\n+ ):\n+\n+ out = self._array.recursively_apply(\n+ action,\n+ depth_context,\n+ lateral_context,\n+ keep_parameters,\n+ numpy_to_regular,\n+ return_array,\n+ function_name,\n+ )\n+\n+ if return_array:\n+ return Record(out, self._at)\n+ else:\n+ return None\n", "issue": "`with_name` fails for `ak._v2.Record`\n### Version of Awkward Array\n\n278dccee\n\n### Description and code to reproduce\n\n```python\r\nimport awkward._v2 as ak\r\n\r\nak.Record({'x': 10.0}, with_name=\"X\")\r\n```\n", "before_files": [{"content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\nimport copy\nfrom collections.abc import Iterable\n\nimport awkward as ak\nfrom awkward._v2.contents.content import Content\n\nnp = ak.nplike.NumpyMetadata.instance()\n\n\nclass Record:\n def __init__(self, array, at):\n if not isinstance(array, ak._v2.contents.recordarray.RecordArray):\n raise ak._v2._util.error(\n TypeError(f\"Record 'array' must be a RecordArray, not {array!r}\")\n )\n if not ak._util.isint(at):\n raise ak._v2._util.error(\n TypeError(f\"Record 'at' must be an integer, not {array!r}\")\n )\n if at < 0 or at >= array.length:\n raise ak._v2._util.error(\n ValueError(\n f\"Record 'at' must be >= 0 and < len(array) == {array.length}, not {at}\"\n )\n )\n else:\n self._array = array\n self._at = at\n\n @property\n def array(self):\n return self._array\n\n @property\n def at(self):\n return self._at\n\n @property\n def fields(self):\n return self._array.fields\n\n @property\n def is_tuple(self):\n return self._array.is_tuple\n\n @property\n def as_tuple(self):\n return Record(self._array.as_tuple, self._at)\n\n @property\n def contents(self):\n out = []\n for field in self._array.fields:\n out.append(self._array[field][self._at])\n return out\n\n def content(self, index_or_field):\n return self._array.content(index_or_field)[self._at]\n\n def __repr__(self):\n return self._repr(\"\", \"\", \"\")\n\n def _repr(self, indent, pre, post):\n out = [indent, pre, \"<Record at=\"]\n out.append(repr(str(self._at)))\n out.append(\">\\n\")\n out.append(self._array._repr(indent + \" \", \"<array>\", \"</array>\\n\"))\n out.append(indent)\n out.append(\"</Record>\")\n out.append(post)\n return \"\".join(out)\n\n def validityerror(self, path=\"layout.array\"):\n return self._array.validityerror(path)\n\n @property\n def parameters(self):\n return self._array.parameters\n\n def parameter(self, key):\n return self._array.parameter(key)\n\n def purelist_parameter(self, key):\n return self._array.purelist_parameter(key)\n\n @property\n def purelist_isregular(self):\n return self._array.purelist_isregular\n\n @property\n def purelist_depth(self):\n return 0\n\n @property\n def minmax_depth(self):\n mindepth, maxdepth = self._array.minmax_depth\n return mindepth - 1, maxdepth - 1\n\n @property\n def branch_depth(self):\n branch, depth = self._array.branch_depth\n return branch, depth - 1\n\n def __getitem__(self, where):\n with ak._v2._util.SlicingErrorContext(self, where):\n return self._getitem(where)\n\n def _getitem(self, where):\n if ak._util.isint(where):\n raise ak._v2._util.error(\n IndexError(\"scalar Record cannot be sliced by an integer\")\n )\n\n elif isinstance(where, slice):\n raise ak._v2._util.error(\n IndexError(\"scalar Record cannot be sliced by a range slice (`:`)\")\n )\n\n elif ak._util.isstr(where):\n return self._getitem_field(where)\n\n elif where is np.newaxis:\n raise ak._v2._util.error(\n IndexError(\"scalar Record cannot be sliced by np.newaxis (`None`)\")\n )\n\n elif where is Ellipsis:\n raise ak._v2._util.error(\n IndexError(\"scalar Record cannot be sliced by an ellipsis (`...`)\")\n )\n\n elif isinstance(where, tuple) and len(where) == 0:\n return self\n\n elif isinstance(where, tuple) and len(where) == 1:\n return self._getitem(where[0])\n\n elif isinstance(where, tuple) and ak._util.isstr(where[0]):\n return self._getitem_field(where[0])._getitem(where[1:])\n\n elif isinstance(where, ak.highlevel.Array):\n raise ak._v2._util.error(\n IndexError(\"scalar Record cannot be sliced by an array\")\n )\n\n elif isinstance(where, ak.layout.Content):\n raise ak._v2._util.error(\n IndexError(\"scalar Record cannot be sliced by an array\")\n )\n\n elif isinstance(where, Content):\n raise ak._v2._util.error(\n IndexError(\"scalar Record cannot be sliced by an array\")\n )\n\n elif isinstance(where, Iterable) and all(ak._util.isstr(x) for x in where):\n return self._getitem_fields(where)\n\n elif isinstance(where, Iterable):\n raise ak._v2._util.error(\n IndexError(\"scalar Record cannot be sliced by an array\")\n )\n\n else:\n raise ak._v2._util.error(\n TypeError(\n \"only field name (str) or names (non-tuple iterable of str) \"\n \"are valid indices for slicing a scalar record, not\\n\\n \"\n + repr(where)\n )\n )\n\n def _getitem_field(self, where):\n return self._array._getitem_field(where)._getitem_at(self._at)\n\n def _getitem_fields(self, where):\n return self._array._getitem_fields(where)._getitem_at(self._at)\n\n def packed(self):\n if self._array.length == 1:\n return Record(self._array.packed(), self._at)\n else:\n return Record(self._array[self._at : self._at + 1].packed(), 0)\n\n def to_list(self, behavior=None):\n cls = ak._v2._util.recordclass(self._array, behavior)\n if cls is not ak._v2.highlevel.Record:\n return cls(self)\n\n return self._array[self._at : self._at + 1].to_list(behavior)[0]\n\n def deep_copy(self):\n return Record(self._array.deep_copy(), copy.deepcopy(self._at))\n", "path": "src/awkward/_v2/record.py"}]}
2,476
220
gh_patches_debug_35917
rasdani/github-patches
git_diff
lutris__lutris-1429
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Lutris takes extremely long time to parse very long string keys in registry If user.reg contains very long (we a talking megabytes long here) string key it takes a very long time to start anything in this wineprefix with lutris. With ~15MB key lutris was taking ~30 minutes. It happens before starting wine, in "preparing to start" phase. P.S. If you ask how does this happens - Battletech ny HBS writes some analytics into string key in registry. And it only adds to it, never cleans. </issue> <code> [start of lutris/util/wine/registry.py] 1 import os 2 import re 3 from collections import OrderedDict 4 from datetime import datetime 5 from lutris.util.log import logger 6 from lutris.util import system 7 from lutris.util.wine.wine import WINE_DEFAULT_ARCH 8 9 ( 10 REG_NONE, 11 REG_SZ, 12 REG_EXPAND_SZ, 13 REG_BINARY, 14 REG_DWORD, 15 REG_DWORD_BIG_ENDIAN, 16 REG_LINK, 17 REG_MULTI_SZ, 18 ) = range(8) 19 20 DATA_TYPES = { 21 '"': REG_SZ, 22 'str:"': REG_SZ, 23 'str(2):"': REG_EXPAND_SZ, 24 'str(7):"': REG_MULTI_SZ, 25 "hex": REG_BINARY, 26 "dword": REG_DWORD, 27 } 28 29 30 class WindowsFileTime: 31 """Utility class to deal with Windows FILETIME structures. 32 33 See: https://msdn.microsoft.com/en-us/library/ms724284(v=vs.85).aspx 34 """ 35 36 ticks_per_seconds = 10000000 # 1 tick every 100 nanoseconds 37 epoch_delta = 11644473600 # 3600 * 24 * ((1970 - 1601) * 365 + 89) 38 39 def __init__(self, timestamp=None): 40 self.timestamp = timestamp 41 42 def __repr__(self): 43 return "<{}>: {}".format(self.__class__.__name__, self.timestamp) 44 45 @classmethod 46 def from_hex(cls, hexvalue): 47 timestamp = int(hexvalue, 16) 48 return WindowsFileTime(timestamp) 49 50 def to_hex(self): 51 return "{:x}".format(self.timestamp) 52 53 @classmethod 54 def from_unix_timestamp(cls, timestamp): 55 timestamp = timestamp + cls.epoch_delta 56 timestamp = int(timestamp * cls.ticks_per_seconds) 57 return WindowsFileTime(timestamp) 58 59 def to_unix_timestamp(self): 60 if not self.timestamp: 61 raise ValueError("No timestamp set") 62 unix_ts = self.timestamp / self.ticks_per_seconds 63 unix_ts = unix_ts - self.epoch_delta 64 return unix_ts 65 66 def to_date_time(self): 67 return datetime.fromtimestamp(self.to_unix_timestamp()) 68 69 70 class WineRegistry: 71 version_header = "WINE REGISTRY Version " 72 relative_to_header = ";; All keys relative to " 73 74 def __init__(self, reg_filename=None): 75 self.arch = WINE_DEFAULT_ARCH 76 self.version = 2 77 self.relative_to = "\\\\User\\\\S-1-5-21-0-0-0-1000" 78 self.keys = OrderedDict() 79 self.reg_filename = reg_filename 80 if reg_filename: 81 if not system.path_exists(reg_filename): 82 logger.error("Unexisting registry %s", reg_filename) 83 self.parse_reg_file(reg_filename) 84 85 @property 86 def prefix_path(self): 87 """Return the Wine prefix path (where the .reg files are located)""" 88 if self.reg_filename: 89 return os.path.dirname(self.reg_filename) 90 91 @staticmethod 92 def get_raw_registry(reg_filename): 93 """Return an array of the unprocessed contents of a registry file""" 94 if not system.path_exists(reg_filename): 95 return [] 96 with open(reg_filename, "r") as reg_file: 97 98 try: 99 registry_content = reg_file.readlines() 100 except Exception: # pylint: disable=broad-except 101 logger.exception( 102 "Failed to registry read %s, please send attach this file in a bug report", 103 reg_filename 104 ) 105 registry_content = [] 106 return registry_content 107 108 def parse_reg_file(self, reg_filename): 109 registry_lines = self.get_raw_registry(reg_filename) 110 current_key = None 111 add_next_to_value = False 112 for line in registry_lines: 113 line = line.rstrip("\n") # Remove trailing newlines 114 115 if line.startswith(self.version_header): 116 self.version = int(line[len(self.version_header):]) 117 continue 118 119 if line.startswith(self.relative_to_header): 120 self.relative_to = line[len(self.relative_to_header):] 121 continue 122 123 if line.startswith("#arch"): 124 self.arch = line.split("=")[1] 125 continue 126 127 if line.startswith("["): 128 current_key = WineRegistryKey(key_def=line) 129 self.keys[current_key.name] = current_key 130 continue 131 132 if current_key: 133 if add_next_to_value: 134 current_key.add_to_last(line) 135 else: 136 current_key.parse(line) 137 add_next_to_value = line.endswith("\\") 138 139 def render(self): 140 content = "{}{}\n".format(self.version_header, self.version) 141 content += "{}{}\n\n".format(self.relative_to_header, self.relative_to) 142 content += "#arch={}\n".format(self.arch) 143 for key in self.keys: 144 content += "\n" 145 content += self.keys[key].render() 146 return content 147 148 def save(self, path=None): 149 """Write the registry to a file""" 150 if not path: 151 path = self.reg_filename 152 if not path: 153 raise OSError("No filename provided") 154 with open(path, "w") as registry_file: 155 registry_file.write(self.render()) 156 157 def query(self, path, subkey): 158 key = self.keys.get(path) 159 if key: 160 return key.get_subkey(subkey) 161 162 def set_value(self, path, subkey, value): 163 key = self.keys.get(path) 164 if not key: 165 key = WineRegistryKey(path=path) 166 self.keys[key.name] = key 167 key.set_subkey(subkey, value) 168 169 def clear_key(self, path): 170 """Removes all subkeys from a key""" 171 key = self.keys.get(path) 172 if not key: 173 return 174 key.subkeys.clear() 175 176 def clear_subkeys(self, path, keys): 177 """Remove some subkeys from a key""" 178 key = self.keys.get(path) 179 if not key: 180 return 181 for subkey in list(key.subkeys.keys()): 182 if subkey not in keys: 183 continue 184 key.subkeys.pop(subkey) 185 186 def get_unix_path(self, windows_path): 187 windows_path = windows_path.replace("\\\\", "/") 188 if not self.prefix_path: 189 return 190 drives_path = os.path.join(self.prefix_path, "dosdevices") 191 if not system.path_exists(drives_path): 192 return 193 letter, relpath = windows_path.split(":", 1) 194 relpath = relpath.strip("/") 195 drive_link = os.path.join(drives_path, letter.lower() + ":") 196 try: 197 drive_path = os.readlink(drive_link) 198 except FileNotFoundError: 199 logger.error("Unable to read link for %s", drive_link) 200 return 201 202 if not os.path.isabs(drive_path): 203 drive_path = os.path.join(drives_path, drive_path) 204 return os.path.join(drive_path, relpath) 205 206 207 class WineRegistryKey: 208 def __init__(self, key_def=None, path=None): 209 210 self.subkeys = OrderedDict() 211 self.metas = OrderedDict() 212 213 if path: 214 # Key is created by path, it's a new key 215 timestamp = datetime.now().timestamp() 216 self.name = path 217 self.raw_name = "[{}]".format(path.replace("/", "\\\\")) 218 self.raw_timestamp = " ".join(str(timestamp).split(".")) 219 220 windows_timestamp = WindowsFileTime.from_unix_timestamp(timestamp) 221 self.metas["time"] = windows_timestamp.to_hex() 222 else: 223 # Existing key loaded from file 224 self.raw_name, self.raw_timestamp = re.split( 225 re.compile(r"(?<=[^\\]\]) "), key_def, maxsplit=1 226 ) 227 self.name = self.raw_name.replace("\\\\", "/").strip("[]") 228 229 # Parse timestamp either as int or float 230 ts_parts = self.raw_timestamp.strip().split() 231 if len(ts_parts) == 1: 232 self.timestamp = int(ts_parts[0]) 233 else: 234 self.timestamp = float("{}.{}".format(ts_parts[0], ts_parts[1])) 235 236 def __str__(self): 237 return "{0} {1}".format(self.raw_name, self.raw_timestamp) 238 239 def parse(self, line): 240 """Parse a registry line, populating meta and subkeys""" 241 if len(line) < 4: 242 # Line is too short, nothing to parse 243 return 244 245 if line.startswith("#"): 246 self.add_meta(line) 247 elif line.startswith('"'): 248 try: 249 key, value = re.split(re.compile(r"(?<![^\\]\\\")="), line, maxsplit=1) 250 except ValueError as ex: 251 logger.error("Unable to parse line %s", line) 252 logger.exception(ex) 253 return 254 key = key[1:-1] 255 self.subkeys[key] = value 256 elif line.startswith("@"): 257 key, value = line.split("=", 1) 258 self.subkeys["default"] = value 259 260 def add_to_last(self, line): 261 last_subkey = list(self.subkeys.keys())[-1] 262 self.subkeys[last_subkey] += "\n{}".format(line) 263 264 def render(self): 265 """Return the content of the key in the wine .reg format""" 266 content = self.raw_name + " " + self.raw_timestamp + "\n" 267 for key, value in self.metas.items(): 268 if value is None: 269 content += "#{}\n".format(key) 270 else: 271 content += "#{}={}\n".format(key, value) 272 for key, value in self.subkeys.items(): 273 if key == "default": 274 key = "@" 275 else: 276 key = '"{}"'.format(key) 277 content += "{}={}\n".format(key, value) 278 return content 279 280 def render_value(self, value): 281 if isinstance(value, int): 282 return "dword:{:08x}".format(value) 283 if isinstance(value, str): 284 return '"{}"'.format(value) 285 raise NotImplementedError("TODO") 286 287 def add_meta(self, meta_line): 288 if not meta_line.startswith("#"): 289 raise ValueError("Key metas should start with '#'") 290 meta_line = meta_line[1:] 291 parts = meta_line.split("=") 292 if len(parts) == 2: 293 key = parts[0] 294 value = parts[1] 295 elif len(parts) == 1: 296 key = parts[0] 297 value = None 298 else: 299 raise ValueError("Invalid meta line '{}'".format(meta_line)) 300 self.metas[key] = value 301 302 def get_meta(self, name): 303 return self.metas.get(name) 304 305 def set_subkey(self, name, value): 306 self.subkeys[name] = self.render_value(value) 307 308 def get_subkey(self, name): 309 if name not in self.subkeys: 310 return None 311 value = self.subkeys[name] 312 if value.startswith('"') and value.endswith('"'): 313 return value[1:-1] 314 if value.startswith("dword:"): 315 return int(value[6:], 16) 316 raise ValueError("Handle %s" % value) 317 [end of lutris/util/wine/registry.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lutris/util/wine/registry.py b/lutris/util/wine/registry.py --- a/lutris/util/wine/registry.py +++ b/lutris/util/wine/registry.py @@ -109,32 +109,29 @@ registry_lines = self.get_raw_registry(reg_filename) current_key = None add_next_to_value = False + additional_values = [] for line in registry_lines: - line = line.rstrip("\n") # Remove trailing newlines - - if line.startswith(self.version_header): - self.version = int(line[len(self.version_header):]) - continue - - if line.startswith(self.relative_to_header): - self.relative_to = line[len(self.relative_to_header):] - continue - - if line.startswith("#arch"): - self.arch = line.split("=")[1] - continue + line = line.rstrip("\n") if line.startswith("["): current_key = WineRegistryKey(key_def=line) self.keys[current_key.name] = current_key - continue - - if current_key: + elif current_key: if add_next_to_value: - current_key.add_to_last(line) - else: + additional_values.append(line) + elif not add_next_to_value: + if additional_values: + additional_values = '\n'.join(additional_values) + current_key.add_to_last(additional_values) + additional_values = [] current_key.parse(line) add_next_to_value = line.endswith("\\") + elif line.startswith(self.version_header): + self.version = int(line[len(self.version_header):]) + elif line.startswith(self.relative_to_header): + self.relative_to = line[len(self.relative_to_header):] + elif line.startswith("#arch"): + self.arch = line.split("=")[1] def render(self): content = "{}{}\n".format(self.version_header, self.version) @@ -258,7 +255,7 @@ self.subkeys["default"] = value def add_to_last(self, line): - last_subkey = list(self.subkeys.keys())[-1] + last_subkey = next(reversed(self.subkeys)) self.subkeys[last_subkey] += "\n{}".format(line) def render(self):
{"golden_diff": "diff --git a/lutris/util/wine/registry.py b/lutris/util/wine/registry.py\n--- a/lutris/util/wine/registry.py\n+++ b/lutris/util/wine/registry.py\n@@ -109,32 +109,29 @@\n registry_lines = self.get_raw_registry(reg_filename)\n current_key = None\n add_next_to_value = False\n+ additional_values = []\n for line in registry_lines:\n- line = line.rstrip(\"\\n\") # Remove trailing newlines\n-\n- if line.startswith(self.version_header):\n- self.version = int(line[len(self.version_header):])\n- continue\n-\n- if line.startswith(self.relative_to_header):\n- self.relative_to = line[len(self.relative_to_header):]\n- continue\n-\n- if line.startswith(\"#arch\"):\n- self.arch = line.split(\"=\")[1]\n- continue\n+ line = line.rstrip(\"\\n\")\n \n if line.startswith(\"[\"):\n current_key = WineRegistryKey(key_def=line)\n self.keys[current_key.name] = current_key\n- continue\n-\n- if current_key:\n+ elif current_key:\n if add_next_to_value:\n- current_key.add_to_last(line)\n- else:\n+ additional_values.append(line)\n+ elif not add_next_to_value:\n+ if additional_values:\n+ additional_values = '\\n'.join(additional_values)\n+ current_key.add_to_last(additional_values)\n+ additional_values = []\n current_key.parse(line)\n add_next_to_value = line.endswith(\"\\\\\")\n+ elif line.startswith(self.version_header):\n+ self.version = int(line[len(self.version_header):])\n+ elif line.startswith(self.relative_to_header):\n+ self.relative_to = line[len(self.relative_to_header):]\n+ elif line.startswith(\"#arch\"):\n+ self.arch = line.split(\"=\")[1]\n \n def render(self):\n content = \"{}{}\\n\".format(self.version_header, self.version)\n@@ -258,7 +255,7 @@\n self.subkeys[\"default\"] = value\n \n def add_to_last(self, line):\n- last_subkey = list(self.subkeys.keys())[-1]\n+ last_subkey = next(reversed(self.subkeys))\n self.subkeys[last_subkey] += \"\\n{}\".format(line)\n \n def render(self):\n", "issue": "Lutris takes extremely long time to parse very long string keys in registry\nIf user.reg contains very long (we a talking megabytes long here) string key it takes a very long time to start anything in this wineprefix with lutris. With ~15MB key lutris was taking ~30 minutes. It happens before starting wine, in \"preparing to start\" phase.\r\n\r\nP.S. If you ask how does this happens - Battletech ny HBS writes some analytics into string key in registry. And it only adds to it, never cleans.\n", "before_files": [{"content": "import os\nimport re\nfrom collections import OrderedDict\nfrom datetime import datetime\nfrom lutris.util.log import logger\nfrom lutris.util import system\nfrom lutris.util.wine.wine import WINE_DEFAULT_ARCH\n\n(\n REG_NONE,\n REG_SZ,\n REG_EXPAND_SZ,\n REG_BINARY,\n REG_DWORD,\n REG_DWORD_BIG_ENDIAN,\n REG_LINK,\n REG_MULTI_SZ,\n) = range(8)\n\nDATA_TYPES = {\n '\"': REG_SZ,\n 'str:\"': REG_SZ,\n 'str(2):\"': REG_EXPAND_SZ,\n 'str(7):\"': REG_MULTI_SZ,\n \"hex\": REG_BINARY,\n \"dword\": REG_DWORD,\n}\n\n\nclass WindowsFileTime:\n \"\"\"Utility class to deal with Windows FILETIME structures.\n\n See: https://msdn.microsoft.com/en-us/library/ms724284(v=vs.85).aspx\n \"\"\"\n\n ticks_per_seconds = 10000000 # 1 tick every 100 nanoseconds\n epoch_delta = 11644473600 # 3600 * 24 * ((1970 - 1601) * 365 + 89)\n\n def __init__(self, timestamp=None):\n self.timestamp = timestamp\n\n def __repr__(self):\n return \"<{}>: {}\".format(self.__class__.__name__, self.timestamp)\n\n @classmethod\n def from_hex(cls, hexvalue):\n timestamp = int(hexvalue, 16)\n return WindowsFileTime(timestamp)\n\n def to_hex(self):\n return \"{:x}\".format(self.timestamp)\n\n @classmethod\n def from_unix_timestamp(cls, timestamp):\n timestamp = timestamp + cls.epoch_delta\n timestamp = int(timestamp * cls.ticks_per_seconds)\n return WindowsFileTime(timestamp)\n\n def to_unix_timestamp(self):\n if not self.timestamp:\n raise ValueError(\"No timestamp set\")\n unix_ts = self.timestamp / self.ticks_per_seconds\n unix_ts = unix_ts - self.epoch_delta\n return unix_ts\n\n def to_date_time(self):\n return datetime.fromtimestamp(self.to_unix_timestamp())\n\n\nclass WineRegistry:\n version_header = \"WINE REGISTRY Version \"\n relative_to_header = \";; All keys relative to \"\n\n def __init__(self, reg_filename=None):\n self.arch = WINE_DEFAULT_ARCH\n self.version = 2\n self.relative_to = \"\\\\\\\\User\\\\\\\\S-1-5-21-0-0-0-1000\"\n self.keys = OrderedDict()\n self.reg_filename = reg_filename\n if reg_filename:\n if not system.path_exists(reg_filename):\n logger.error(\"Unexisting registry %s\", reg_filename)\n self.parse_reg_file(reg_filename)\n\n @property\n def prefix_path(self):\n \"\"\"Return the Wine prefix path (where the .reg files are located)\"\"\"\n if self.reg_filename:\n return os.path.dirname(self.reg_filename)\n\n @staticmethod\n def get_raw_registry(reg_filename):\n \"\"\"Return an array of the unprocessed contents of a registry file\"\"\"\n if not system.path_exists(reg_filename):\n return []\n with open(reg_filename, \"r\") as reg_file:\n\n try:\n registry_content = reg_file.readlines()\n except Exception: # pylint: disable=broad-except\n logger.exception(\n \"Failed to registry read %s, please send attach this file in a bug report\",\n reg_filename\n )\n registry_content = []\n return registry_content\n\n def parse_reg_file(self, reg_filename):\n registry_lines = self.get_raw_registry(reg_filename)\n current_key = None\n add_next_to_value = False\n for line in registry_lines:\n line = line.rstrip(\"\\n\") # Remove trailing newlines\n\n if line.startswith(self.version_header):\n self.version = int(line[len(self.version_header):])\n continue\n\n if line.startswith(self.relative_to_header):\n self.relative_to = line[len(self.relative_to_header):]\n continue\n\n if line.startswith(\"#arch\"):\n self.arch = line.split(\"=\")[1]\n continue\n\n if line.startswith(\"[\"):\n current_key = WineRegistryKey(key_def=line)\n self.keys[current_key.name] = current_key\n continue\n\n if current_key:\n if add_next_to_value:\n current_key.add_to_last(line)\n else:\n current_key.parse(line)\n add_next_to_value = line.endswith(\"\\\\\")\n\n def render(self):\n content = \"{}{}\\n\".format(self.version_header, self.version)\n content += \"{}{}\\n\\n\".format(self.relative_to_header, self.relative_to)\n content += \"#arch={}\\n\".format(self.arch)\n for key in self.keys:\n content += \"\\n\"\n content += self.keys[key].render()\n return content\n\n def save(self, path=None):\n \"\"\"Write the registry to a file\"\"\"\n if not path:\n path = self.reg_filename\n if not path:\n raise OSError(\"No filename provided\")\n with open(path, \"w\") as registry_file:\n registry_file.write(self.render())\n\n def query(self, path, subkey):\n key = self.keys.get(path)\n if key:\n return key.get_subkey(subkey)\n\n def set_value(self, path, subkey, value):\n key = self.keys.get(path)\n if not key:\n key = WineRegistryKey(path=path)\n self.keys[key.name] = key\n key.set_subkey(subkey, value)\n\n def clear_key(self, path):\n \"\"\"Removes all subkeys from a key\"\"\"\n key = self.keys.get(path)\n if not key:\n return\n key.subkeys.clear()\n\n def clear_subkeys(self, path, keys):\n \"\"\"Remove some subkeys from a key\"\"\"\n key = self.keys.get(path)\n if not key:\n return\n for subkey in list(key.subkeys.keys()):\n if subkey not in keys:\n continue\n key.subkeys.pop(subkey)\n\n def get_unix_path(self, windows_path):\n windows_path = windows_path.replace(\"\\\\\\\\\", \"/\")\n if not self.prefix_path:\n return\n drives_path = os.path.join(self.prefix_path, \"dosdevices\")\n if not system.path_exists(drives_path):\n return\n letter, relpath = windows_path.split(\":\", 1)\n relpath = relpath.strip(\"/\")\n drive_link = os.path.join(drives_path, letter.lower() + \":\")\n try:\n drive_path = os.readlink(drive_link)\n except FileNotFoundError:\n logger.error(\"Unable to read link for %s\", drive_link)\n return\n\n if not os.path.isabs(drive_path):\n drive_path = os.path.join(drives_path, drive_path)\n return os.path.join(drive_path, relpath)\n\n\nclass WineRegistryKey:\n def __init__(self, key_def=None, path=None):\n\n self.subkeys = OrderedDict()\n self.metas = OrderedDict()\n\n if path:\n # Key is created by path, it's a new key\n timestamp = datetime.now().timestamp()\n self.name = path\n self.raw_name = \"[{}]\".format(path.replace(\"/\", \"\\\\\\\\\"))\n self.raw_timestamp = \" \".join(str(timestamp).split(\".\"))\n\n windows_timestamp = WindowsFileTime.from_unix_timestamp(timestamp)\n self.metas[\"time\"] = windows_timestamp.to_hex()\n else:\n # Existing key loaded from file\n self.raw_name, self.raw_timestamp = re.split(\n re.compile(r\"(?<=[^\\\\]\\]) \"), key_def, maxsplit=1\n )\n self.name = self.raw_name.replace(\"\\\\\\\\\", \"/\").strip(\"[]\")\n\n # Parse timestamp either as int or float\n ts_parts = self.raw_timestamp.strip().split()\n if len(ts_parts) == 1:\n self.timestamp = int(ts_parts[0])\n else:\n self.timestamp = float(\"{}.{}\".format(ts_parts[0], ts_parts[1]))\n\n def __str__(self):\n return \"{0} {1}\".format(self.raw_name, self.raw_timestamp)\n\n def parse(self, line):\n \"\"\"Parse a registry line, populating meta and subkeys\"\"\"\n if len(line) < 4:\n # Line is too short, nothing to parse\n return\n\n if line.startswith(\"#\"):\n self.add_meta(line)\n elif line.startswith('\"'):\n try:\n key, value = re.split(re.compile(r\"(?<![^\\\\]\\\\\\\")=\"), line, maxsplit=1)\n except ValueError as ex:\n logger.error(\"Unable to parse line %s\", line)\n logger.exception(ex)\n return\n key = key[1:-1]\n self.subkeys[key] = value\n elif line.startswith(\"@\"):\n key, value = line.split(\"=\", 1)\n self.subkeys[\"default\"] = value\n\n def add_to_last(self, line):\n last_subkey = list(self.subkeys.keys())[-1]\n self.subkeys[last_subkey] += \"\\n{}\".format(line)\n\n def render(self):\n \"\"\"Return the content of the key in the wine .reg format\"\"\"\n content = self.raw_name + \" \" + self.raw_timestamp + \"\\n\"\n for key, value in self.metas.items():\n if value is None:\n content += \"#{}\\n\".format(key)\n else:\n content += \"#{}={}\\n\".format(key, value)\n for key, value in self.subkeys.items():\n if key == \"default\":\n key = \"@\"\n else:\n key = '\"{}\"'.format(key)\n content += \"{}={}\\n\".format(key, value)\n return content\n\n def render_value(self, value):\n if isinstance(value, int):\n return \"dword:{:08x}\".format(value)\n if isinstance(value, str):\n return '\"{}\"'.format(value)\n raise NotImplementedError(\"TODO\")\n\n def add_meta(self, meta_line):\n if not meta_line.startswith(\"#\"):\n raise ValueError(\"Key metas should start with '#'\")\n meta_line = meta_line[1:]\n parts = meta_line.split(\"=\")\n if len(parts) == 2:\n key = parts[0]\n value = parts[1]\n elif len(parts) == 1:\n key = parts[0]\n value = None\n else:\n raise ValueError(\"Invalid meta line '{}'\".format(meta_line))\n self.metas[key] = value\n\n def get_meta(self, name):\n return self.metas.get(name)\n\n def set_subkey(self, name, value):\n self.subkeys[name] = self.render_value(value)\n\n def get_subkey(self, name):\n if name not in self.subkeys:\n return None\n value = self.subkeys[name]\n if value.startswith('\"') and value.endswith('\"'):\n return value[1:-1]\n if value.startswith(\"dword:\"):\n return int(value[6:], 16)\n raise ValueError(\"Handle %s\" % value)\n", "path": "lutris/util/wine/registry.py"}]}
3,896
509
gh_patches_debug_4937
rasdani/github-patches
git_diff
open-telemetry__opentelemetry-python-621
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Bug in b3 propagation When configuring b3 propagation, if the parent of a Span is an instance of a SpanContext, the following error occurs: `'SpanContext' object has no attribute 'context'` </issue> <code> [start of opentelemetry-sdk/src/opentelemetry/sdk/trace/propagation/b3_format.py] 1 # Copyright The OpenTelemetry Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import typing 16 17 import opentelemetry.trace as trace 18 from opentelemetry.context import Context 19 from opentelemetry.trace.propagation import ( 20 get_span_from_context, 21 set_span_in_context, 22 ) 23 from opentelemetry.trace.propagation.httptextformat import ( 24 Getter, 25 HTTPTextFormat, 26 HTTPTextFormatT, 27 Setter, 28 ) 29 30 31 class B3Format(HTTPTextFormat): 32 """Propagator for the B3 HTTP header format. 33 34 See: https://github.com/openzipkin/b3-propagation 35 """ 36 37 SINGLE_HEADER_KEY = "b3" 38 TRACE_ID_KEY = "x-b3-traceid" 39 SPAN_ID_KEY = "x-b3-spanid" 40 PARENT_SPAN_ID_KEY = "x-b3-parentspanid" 41 SAMPLED_KEY = "x-b3-sampled" 42 FLAGS_KEY = "x-b3-flags" 43 _SAMPLE_PROPAGATE_VALUES = set(["1", "True", "true", "d"]) 44 45 def extract( 46 self, 47 get_from_carrier: Getter[HTTPTextFormatT], 48 carrier: HTTPTextFormatT, 49 context: typing.Optional[Context] = None, 50 ) -> Context: 51 trace_id = format_trace_id(trace.INVALID_TRACE_ID) 52 span_id = format_span_id(trace.INVALID_SPAN_ID) 53 sampled = "0" 54 flags = None 55 56 single_header = _extract_first_element( 57 get_from_carrier(carrier, self.SINGLE_HEADER_KEY) 58 ) 59 if single_header: 60 # The b3 spec calls for the sampling state to be 61 # "deferred", which is unspecified. This concept does not 62 # translate to SpanContext, so we set it as recorded. 63 sampled = "1" 64 fields = single_header.split("-", 4) 65 66 if len(fields) == 1: 67 sampled = fields[0] 68 elif len(fields) == 2: 69 trace_id, span_id = fields 70 elif len(fields) == 3: 71 trace_id, span_id, sampled = fields 72 elif len(fields) == 4: 73 trace_id, span_id, sampled, _ = fields 74 else: 75 return set_span_in_context(trace.INVALID_SPAN) 76 else: 77 trace_id = ( 78 _extract_first_element( 79 get_from_carrier(carrier, self.TRACE_ID_KEY) 80 ) 81 or trace_id 82 ) 83 span_id = ( 84 _extract_first_element( 85 get_from_carrier(carrier, self.SPAN_ID_KEY) 86 ) 87 or span_id 88 ) 89 sampled = ( 90 _extract_first_element( 91 get_from_carrier(carrier, self.SAMPLED_KEY) 92 ) 93 or sampled 94 ) 95 flags = ( 96 _extract_first_element( 97 get_from_carrier(carrier, self.FLAGS_KEY) 98 ) 99 or flags 100 ) 101 102 options = 0 103 # The b3 spec provides no defined behavior for both sample and 104 # flag values set. Since the setting of at least one implies 105 # the desire for some form of sampling, propagate if either 106 # header is set to allow. 107 if sampled in self._SAMPLE_PROPAGATE_VALUES or flags == "1": 108 options |= trace.TraceFlags.SAMPLED 109 return set_span_in_context( 110 trace.DefaultSpan( 111 trace.SpanContext( 112 # trace an span ids are encoded in hex, so must be converted 113 trace_id=int(trace_id, 16), 114 span_id=int(span_id, 16), 115 is_remote=True, 116 trace_flags=trace.TraceFlags(options), 117 trace_state=trace.TraceState(), 118 ) 119 ) 120 ) 121 122 def inject( 123 self, 124 set_in_carrier: Setter[HTTPTextFormatT], 125 carrier: HTTPTextFormatT, 126 context: typing.Optional[Context] = None, 127 ) -> None: 128 span = get_span_from_context(context=context) 129 sampled = (trace.TraceFlags.SAMPLED & span.context.trace_flags) != 0 130 set_in_carrier( 131 carrier, self.TRACE_ID_KEY, format_trace_id(span.context.trace_id), 132 ) 133 set_in_carrier( 134 carrier, self.SPAN_ID_KEY, format_span_id(span.context.span_id) 135 ) 136 if span.parent is not None: 137 set_in_carrier( 138 carrier, 139 self.PARENT_SPAN_ID_KEY, 140 format_span_id(span.parent.context.span_id), 141 ) 142 set_in_carrier(carrier, self.SAMPLED_KEY, "1" if sampled else "0") 143 144 145 def format_trace_id(trace_id: int) -> str: 146 """Format the trace id according to b3 specification.""" 147 return format(trace_id, "032x") 148 149 150 def format_span_id(span_id: int) -> str: 151 """Format the span id according to b3 specification.""" 152 return format(span_id, "016x") 153 154 155 def _extract_first_element( 156 items: typing.Iterable[HTTPTextFormatT], 157 ) -> typing.Optional[HTTPTextFormatT]: 158 if items is None: 159 return None 160 return next(iter(items), None) 161 [end of opentelemetry-sdk/src/opentelemetry/sdk/trace/propagation/b3_format.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/trace/propagation/b3_format.py b/opentelemetry-sdk/src/opentelemetry/sdk/trace/propagation/b3_format.py --- a/opentelemetry-sdk/src/opentelemetry/sdk/trace/propagation/b3_format.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/trace/propagation/b3_format.py @@ -137,7 +137,7 @@ set_in_carrier( carrier, self.PARENT_SPAN_ID_KEY, - format_span_id(span.parent.context.span_id), + format_span_id(span.parent.span_id), ) set_in_carrier(carrier, self.SAMPLED_KEY, "1" if sampled else "0")
{"golden_diff": "diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/trace/propagation/b3_format.py b/opentelemetry-sdk/src/opentelemetry/sdk/trace/propagation/b3_format.py\n--- a/opentelemetry-sdk/src/opentelemetry/sdk/trace/propagation/b3_format.py\n+++ b/opentelemetry-sdk/src/opentelemetry/sdk/trace/propagation/b3_format.py\n@@ -137,7 +137,7 @@\n set_in_carrier(\n carrier,\n self.PARENT_SPAN_ID_KEY,\n- format_span_id(span.parent.context.span_id),\n+ format_span_id(span.parent.span_id),\n )\n set_in_carrier(carrier, self.SAMPLED_KEY, \"1\" if sampled else \"0\")\n", "issue": "Bug in b3 propagation\nWhen configuring b3 propagation, if the parent of a Span is an instance of a SpanContext, the following error occurs:\r\n\r\n`'SpanContext' object has no attribute 'context'`\r\n\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport typing\n\nimport opentelemetry.trace as trace\nfrom opentelemetry.context import Context\nfrom opentelemetry.trace.propagation import (\n get_span_from_context,\n set_span_in_context,\n)\nfrom opentelemetry.trace.propagation.httptextformat import (\n Getter,\n HTTPTextFormat,\n HTTPTextFormatT,\n Setter,\n)\n\n\nclass B3Format(HTTPTextFormat):\n \"\"\"Propagator for the B3 HTTP header format.\n\n See: https://github.com/openzipkin/b3-propagation\n \"\"\"\n\n SINGLE_HEADER_KEY = \"b3\"\n TRACE_ID_KEY = \"x-b3-traceid\"\n SPAN_ID_KEY = \"x-b3-spanid\"\n PARENT_SPAN_ID_KEY = \"x-b3-parentspanid\"\n SAMPLED_KEY = \"x-b3-sampled\"\n FLAGS_KEY = \"x-b3-flags\"\n _SAMPLE_PROPAGATE_VALUES = set([\"1\", \"True\", \"true\", \"d\"])\n\n def extract(\n self,\n get_from_carrier: Getter[HTTPTextFormatT],\n carrier: HTTPTextFormatT,\n context: typing.Optional[Context] = None,\n ) -> Context:\n trace_id = format_trace_id(trace.INVALID_TRACE_ID)\n span_id = format_span_id(trace.INVALID_SPAN_ID)\n sampled = \"0\"\n flags = None\n\n single_header = _extract_first_element(\n get_from_carrier(carrier, self.SINGLE_HEADER_KEY)\n )\n if single_header:\n # The b3 spec calls for the sampling state to be\n # \"deferred\", which is unspecified. This concept does not\n # translate to SpanContext, so we set it as recorded.\n sampled = \"1\"\n fields = single_header.split(\"-\", 4)\n\n if len(fields) == 1:\n sampled = fields[0]\n elif len(fields) == 2:\n trace_id, span_id = fields\n elif len(fields) == 3:\n trace_id, span_id, sampled = fields\n elif len(fields) == 4:\n trace_id, span_id, sampled, _ = fields\n else:\n return set_span_in_context(trace.INVALID_SPAN)\n else:\n trace_id = (\n _extract_first_element(\n get_from_carrier(carrier, self.TRACE_ID_KEY)\n )\n or trace_id\n )\n span_id = (\n _extract_first_element(\n get_from_carrier(carrier, self.SPAN_ID_KEY)\n )\n or span_id\n )\n sampled = (\n _extract_first_element(\n get_from_carrier(carrier, self.SAMPLED_KEY)\n )\n or sampled\n )\n flags = (\n _extract_first_element(\n get_from_carrier(carrier, self.FLAGS_KEY)\n )\n or flags\n )\n\n options = 0\n # The b3 spec provides no defined behavior for both sample and\n # flag values set. Since the setting of at least one implies\n # the desire for some form of sampling, propagate if either\n # header is set to allow.\n if sampled in self._SAMPLE_PROPAGATE_VALUES or flags == \"1\":\n options |= trace.TraceFlags.SAMPLED\n return set_span_in_context(\n trace.DefaultSpan(\n trace.SpanContext(\n # trace an span ids are encoded in hex, so must be converted\n trace_id=int(trace_id, 16),\n span_id=int(span_id, 16),\n is_remote=True,\n trace_flags=trace.TraceFlags(options),\n trace_state=trace.TraceState(),\n )\n )\n )\n\n def inject(\n self,\n set_in_carrier: Setter[HTTPTextFormatT],\n carrier: HTTPTextFormatT,\n context: typing.Optional[Context] = None,\n ) -> None:\n span = get_span_from_context(context=context)\n sampled = (trace.TraceFlags.SAMPLED & span.context.trace_flags) != 0\n set_in_carrier(\n carrier, self.TRACE_ID_KEY, format_trace_id(span.context.trace_id),\n )\n set_in_carrier(\n carrier, self.SPAN_ID_KEY, format_span_id(span.context.span_id)\n )\n if span.parent is not None:\n set_in_carrier(\n carrier,\n self.PARENT_SPAN_ID_KEY,\n format_span_id(span.parent.context.span_id),\n )\n set_in_carrier(carrier, self.SAMPLED_KEY, \"1\" if sampled else \"0\")\n\n\ndef format_trace_id(trace_id: int) -> str:\n \"\"\"Format the trace id according to b3 specification.\"\"\"\n return format(trace_id, \"032x\")\n\n\ndef format_span_id(span_id: int) -> str:\n \"\"\"Format the span id according to b3 specification.\"\"\"\n return format(span_id, \"016x\")\n\n\ndef _extract_first_element(\n items: typing.Iterable[HTTPTextFormatT],\n) -> typing.Optional[HTTPTextFormatT]:\n if items is None:\n return None\n return next(iter(items), None)\n", "path": "opentelemetry-sdk/src/opentelemetry/sdk/trace/propagation/b3_format.py"}]}
2,197
162
gh_patches_debug_17371
rasdani/github-patches
git_diff
translate__pootle-4635
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Put `Review suggestions` action button before others Let's change order of action buttons and put `Review suggestions` before `Needs translation` to encourage going through suggestion backlog. </issue> <code> [start of pootle/core/browser.py] 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 # 4 # Copyright (C) Pootle contributors. 5 # 6 # This file is a part of the Pootle project. It is distributed under the GPL3 7 # or later license. See the LICENSE file for a copy of the license and the 8 # AUTHORS file for copyright and authorship information. 9 10 from django.utils.translation import ugettext_lazy as _ 11 12 13 HEADING_CHOICES = [ 14 { 15 'id': 'name', 16 'class': 'stats', 17 'display_name': _("Name"), 18 }, 19 { 20 'id': 'priority', 21 'class': 'stats-number sorttable_numeric', 22 'display_name': _("Priority"), 23 }, 24 { 25 'id': 'project', 26 'class': 'stats', 27 'display_name': _("Project"), 28 }, 29 { 30 'id': 'language', 31 'class': 'stats', 32 'display_name': _("Language"), 33 }, 34 { 35 'id': 'progress', 36 'class': 'stats', 37 # Translators: noun. The graphical representation of translation status 38 'display_name': _("Progress"), 39 }, 40 { 41 'id': 'total', 42 'class': 'stats-number sorttable_numeric when-loaded', 43 # Translators: Heading representing the total number of words of a file 44 # or directory 45 'display_name': _("Total"), 46 }, 47 { 48 'id': 'last-updated', 49 'class': 'stats sorttable_numeric when-loaded', 50 'display_name': _("Last updated"), 51 }, 52 { 53 'id': 'need-translation', 54 'class': 'stats-number sorttable_numeric when-loaded', 55 'display_name': _("Need Translation"), 56 }, 57 { 58 'id': 'suggestions', 59 'class': 'stats-number sorttable_numeric when-loaded', 60 # Translators: The number of suggestions pending review 61 'display_name': _("Suggestions"), 62 }, 63 { 64 'id': 'critical', 65 'class': 'stats-number sorttable_numeric when-loaded', 66 'display_name': _("Critical"), 67 }, 68 { 69 'id': 'activity', 70 'class': 'stats sorttable_numeric when-loaded', 71 'display_name': _("Last Activity"), 72 }, 73 ] 74 75 76 def get_table_headings(choices): 77 """Filters the list of available table headings to the given `choices`.""" 78 return filter(lambda x: x['id'] in choices, HEADING_CHOICES) 79 80 81 def make_generic_item(path_obj, **kwargs): 82 """Template variables for each row in the table.""" 83 return { 84 'href': path_obj.get_absolute_url(), 85 'href_all': path_obj.get_translate_url(), 86 'href_todo': path_obj.get_translate_url(state='incomplete', **kwargs), 87 'href_sugg': path_obj.get_translate_url(state='suggestions', **kwargs), 88 'href_critical': path_obj.get_critical_url(**kwargs), 89 'title': path_obj.name, 90 'code': path_obj.code, 91 'is_disabled': getattr(path_obj, 'disabled', False), 92 } 93 94 95 def make_directory_item(directory, **filters): 96 item = make_generic_item(directory, **filters) 97 item.update({ 98 'icon': 'folder', 99 }) 100 return item 101 102 103 def make_store_item(store): 104 item = make_generic_item(store) 105 item.update({ 106 'icon': 'file', 107 }) 108 return item 109 110 111 def get_parent(path_obj): 112 """Retrieves a representation of the parent object. 113 114 :param path_obj: either a `Directory` or Store` instance. 115 """ 116 parent_dir = path_obj.parent 117 118 if parent_dir.is_project(): 119 return None 120 121 if parent_dir.is_language(): 122 label = _('Back to language') 123 else: 124 label = _('Back to parent folder') 125 126 return { 127 'title': label, 128 'href': parent_dir.get_absolute_url() 129 } 130 131 132 def make_project_item(translation_project): 133 item = make_generic_item(translation_project) 134 item.update({ 135 'icon': 'project', 136 'title': translation_project.project.name, 137 }) 138 return item 139 140 141 def make_language_item(translation_project): 142 item = make_generic_item(translation_project) 143 item.update({ 144 'icon': 'language', 145 'title': translation_project.language.name, 146 }) 147 return item 148 149 150 def make_xlanguage_item(resource_obj): 151 translation_project = resource_obj.translation_project 152 item = make_generic_item(resource_obj) 153 item.update({ 154 'icon': 'language', 155 'code': translation_project.language.code, 156 'title': translation_project.language.name, 157 }) 158 return item 159 160 161 def make_project_list_item(project): 162 item = make_generic_item(project) 163 item.update({ 164 'icon': 'project', 165 'title': project.fullname, 166 }) 167 return item 168 [end of pootle/core/browser.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pootle/core/browser.py b/pootle/core/browser.py --- a/pootle/core/browser.py +++ b/pootle/core/browser.py @@ -50,9 +50,9 @@ 'display_name': _("Last updated"), }, { - 'id': 'need-translation', + 'id': 'critical', 'class': 'stats-number sorttable_numeric when-loaded', - 'display_name': _("Need Translation"), + 'display_name': _("Critical"), }, { 'id': 'suggestions', @@ -61,9 +61,9 @@ 'display_name': _("Suggestions"), }, { - 'id': 'critical', + 'id': 'need-translation', 'class': 'stats-number sorttable_numeric when-loaded', - 'display_name': _("Critical"), + 'display_name': _("Need Translation"), }, { 'id': 'activity',
{"golden_diff": "diff --git a/pootle/core/browser.py b/pootle/core/browser.py\n--- a/pootle/core/browser.py\n+++ b/pootle/core/browser.py\n@@ -50,9 +50,9 @@\n 'display_name': _(\"Last updated\"),\n },\n {\n- 'id': 'need-translation',\n+ 'id': 'critical',\n 'class': 'stats-number sorttable_numeric when-loaded',\n- 'display_name': _(\"Need Translation\"),\n+ 'display_name': _(\"Critical\"),\n },\n {\n 'id': 'suggestions',\n@@ -61,9 +61,9 @@\n 'display_name': _(\"Suggestions\"),\n },\n {\n- 'id': 'critical',\n+ 'id': 'need-translation',\n 'class': 'stats-number sorttable_numeric when-loaded',\n- 'display_name': _(\"Critical\"),\n+ 'display_name': _(\"Need Translation\"),\n },\n {\n 'id': 'activity',\n", "issue": "Put `Review suggestions` action button before others\nLet's change order of action buttons and put `Review suggestions` before `Needs translation` to encourage going through suggestion backlog.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django.utils.translation import ugettext_lazy as _\n\n\nHEADING_CHOICES = [\n {\n 'id': 'name',\n 'class': 'stats',\n 'display_name': _(\"Name\"),\n },\n {\n 'id': 'priority',\n 'class': 'stats-number sorttable_numeric',\n 'display_name': _(\"Priority\"),\n },\n {\n 'id': 'project',\n 'class': 'stats',\n 'display_name': _(\"Project\"),\n },\n {\n 'id': 'language',\n 'class': 'stats',\n 'display_name': _(\"Language\"),\n },\n {\n 'id': 'progress',\n 'class': 'stats',\n # Translators: noun. The graphical representation of translation status\n 'display_name': _(\"Progress\"),\n },\n {\n 'id': 'total',\n 'class': 'stats-number sorttable_numeric when-loaded',\n # Translators: Heading representing the total number of words of a file\n # or directory\n 'display_name': _(\"Total\"),\n },\n {\n 'id': 'last-updated',\n 'class': 'stats sorttable_numeric when-loaded',\n 'display_name': _(\"Last updated\"),\n },\n {\n 'id': 'need-translation',\n 'class': 'stats-number sorttable_numeric when-loaded',\n 'display_name': _(\"Need Translation\"),\n },\n {\n 'id': 'suggestions',\n 'class': 'stats-number sorttable_numeric when-loaded',\n # Translators: The number of suggestions pending review\n 'display_name': _(\"Suggestions\"),\n },\n {\n 'id': 'critical',\n 'class': 'stats-number sorttable_numeric when-loaded',\n 'display_name': _(\"Critical\"),\n },\n {\n 'id': 'activity',\n 'class': 'stats sorttable_numeric when-loaded',\n 'display_name': _(\"Last Activity\"),\n },\n]\n\n\ndef get_table_headings(choices):\n \"\"\"Filters the list of available table headings to the given `choices`.\"\"\"\n return filter(lambda x: x['id'] in choices, HEADING_CHOICES)\n\n\ndef make_generic_item(path_obj, **kwargs):\n \"\"\"Template variables for each row in the table.\"\"\"\n return {\n 'href': path_obj.get_absolute_url(),\n 'href_all': path_obj.get_translate_url(),\n 'href_todo': path_obj.get_translate_url(state='incomplete', **kwargs),\n 'href_sugg': path_obj.get_translate_url(state='suggestions', **kwargs),\n 'href_critical': path_obj.get_critical_url(**kwargs),\n 'title': path_obj.name,\n 'code': path_obj.code,\n 'is_disabled': getattr(path_obj, 'disabled', False),\n }\n\n\ndef make_directory_item(directory, **filters):\n item = make_generic_item(directory, **filters)\n item.update({\n 'icon': 'folder',\n })\n return item\n\n\ndef make_store_item(store):\n item = make_generic_item(store)\n item.update({\n 'icon': 'file',\n })\n return item\n\n\ndef get_parent(path_obj):\n \"\"\"Retrieves a representation of the parent object.\n\n :param path_obj: either a `Directory` or Store` instance.\n \"\"\"\n parent_dir = path_obj.parent\n\n if parent_dir.is_project():\n return None\n\n if parent_dir.is_language():\n label = _('Back to language')\n else:\n label = _('Back to parent folder')\n\n return {\n 'title': label,\n 'href': parent_dir.get_absolute_url()\n }\n\n\ndef make_project_item(translation_project):\n item = make_generic_item(translation_project)\n item.update({\n 'icon': 'project',\n 'title': translation_project.project.name,\n })\n return item\n\n\ndef make_language_item(translation_project):\n item = make_generic_item(translation_project)\n item.update({\n 'icon': 'language',\n 'title': translation_project.language.name,\n })\n return item\n\n\ndef make_xlanguage_item(resource_obj):\n translation_project = resource_obj.translation_project\n item = make_generic_item(resource_obj)\n item.update({\n 'icon': 'language',\n 'code': translation_project.language.code,\n 'title': translation_project.language.name,\n })\n return item\n\n\ndef make_project_list_item(project):\n item = make_generic_item(project)\n item.update({\n 'icon': 'project',\n 'title': project.fullname,\n })\n return item\n", "path": "pootle/core/browser.py"}]}
1,994
208
gh_patches_debug_23732
rasdani/github-patches
git_diff
scrapy__scrapy-4814
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Test the upcoming pip dependency resolver in CI From [email protected], about pip 20.2: > NOTICE: This release includes the beta of the next-generation dependency resolver. It is significantly stricter and more consistent when it receives incompatible instructions, and reduces support for certain kinds of constraints files, so some workarounds and workflows may break. Please test it with the `--use-feature=2020-resolver` flag. Please see our guide on how to test and migrate, and how to report issues <https://pip.pypa.io/en/latest/user_guide/#changes-to-the-pip-dependency-resolver-in-20-2-2020>. > > The new dependency resolver is *off by default* because it is *not yet ready for everyday use*. </issue> <code> [start of setup.py] 1 from os.path import dirname, join 2 from pkg_resources import parse_version 3 from setuptools import setup, find_packages, __version__ as setuptools_version 4 5 6 with open(join(dirname(__file__), 'scrapy/VERSION'), 'rb') as f: 7 version = f.read().decode('ascii').strip() 8 9 10 def has_environment_marker_platform_impl_support(): 11 """Code extracted from 'pytest/setup.py' 12 https://github.com/pytest-dev/pytest/blob/7538680c/setup.py#L31 13 14 The first known release to support environment marker with range operators 15 it is 18.5, see: 16 https://setuptools.readthedocs.io/en/latest/history.html#id235 17 """ 18 return parse_version(setuptools_version) >= parse_version('18.5') 19 20 21 install_requires = [ 22 'Twisted>=17.9.0', 23 'cryptography>=2.0', 24 'cssselect>=0.9.1', 25 'itemloaders>=1.0.1', 26 'parsel>=1.5.0', 27 'PyDispatcher>=2.0.5', 28 'pyOpenSSL>=16.2.0', 29 'queuelib>=1.4.2', 30 'service_identity>=16.0.0', 31 'w3lib>=1.17.0', 32 'zope.interface>=4.1.3', 33 'protego>=0.1.15', 34 'itemadapter>=0.1.0', 35 ] 36 extras_require = {} 37 38 if has_environment_marker_platform_impl_support(): 39 extras_require[':platform_python_implementation == "CPython"'] = [ 40 'lxml>=3.5.0', 41 ] 42 extras_require[':platform_python_implementation == "PyPy"'] = [ 43 # Earlier lxml versions are affected by 44 # https://foss.heptapod.net/pypy/pypy/-/issues/2498, 45 # which was fixed in Cython 0.26, released on 2017-06-19, and used to 46 # generate the C headers of lxml release tarballs published since then, the 47 # first of which was: 48 'lxml>=4.0.0', 49 'PyPyDispatcher>=2.1.0', 50 ] 51 else: 52 install_requires.append('lxml>=3.5.0') 53 54 55 setup( 56 name='Scrapy', 57 version=version, 58 url='https://scrapy.org', 59 project_urls={ 60 'Documentation': 'https://docs.scrapy.org/', 61 'Source': 'https://github.com/scrapy/scrapy', 62 'Tracker': 'https://github.com/scrapy/scrapy/issues', 63 }, 64 description='A high-level Web Crawling and Web Scraping framework', 65 long_description=open('README.rst').read(), 66 author='Scrapy developers', 67 maintainer='Pablo Hoffman', 68 maintainer_email='[email protected]', 69 license='BSD', 70 packages=find_packages(exclude=('tests', 'tests.*')), 71 include_package_data=True, 72 zip_safe=False, 73 entry_points={ 74 'console_scripts': ['scrapy = scrapy.cmdline:execute'] 75 }, 76 classifiers=[ 77 'Framework :: Scrapy', 78 'Development Status :: 5 - Production/Stable', 79 'Environment :: Console', 80 'Intended Audience :: Developers', 81 'License :: OSI Approved :: BSD License', 82 'Operating System :: OS Independent', 83 'Programming Language :: Python', 84 'Programming Language :: Python :: 3', 85 'Programming Language :: Python :: 3.6', 86 'Programming Language :: Python :: 3.7', 87 'Programming Language :: Python :: 3.8', 88 'Programming Language :: Python :: Implementation :: CPython', 89 'Programming Language :: Python :: Implementation :: PyPy', 90 'Topic :: Internet :: WWW/HTTP', 91 'Topic :: Software Development :: Libraries :: Application Frameworks', 92 'Topic :: Software Development :: Libraries :: Python Modules', 93 ], 94 python_requires='>=3.6', 95 install_requires=install_requires, 96 extras_require=extras_require, 97 ) 98 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -24,7 +24,6 @@ 'cssselect>=0.9.1', 'itemloaders>=1.0.1', 'parsel>=1.5.0', - 'PyDispatcher>=2.0.5', 'pyOpenSSL>=16.2.0', 'queuelib>=1.4.2', 'service_identity>=16.0.0', @@ -34,11 +33,12 @@ 'itemadapter>=0.1.0', ] extras_require = {} - +cpython_dependencies = [ + 'lxml>=3.5.0', + 'PyDispatcher>=2.0.5', +] if has_environment_marker_platform_impl_support(): - extras_require[':platform_python_implementation == "CPython"'] = [ - 'lxml>=3.5.0', - ] + extras_require[':platform_python_implementation == "CPython"'] = cpython_dependencies extras_require[':platform_python_implementation == "PyPy"'] = [ # Earlier lxml versions are affected by # https://foss.heptapod.net/pypy/pypy/-/issues/2498, @@ -49,7 +49,7 @@ 'PyPyDispatcher>=2.1.0', ] else: - install_requires.append('lxml>=3.5.0') + install_requires.extend(cpython_dependencies) setup(
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -24,7 +24,6 @@\n 'cssselect>=0.9.1',\n 'itemloaders>=1.0.1',\n 'parsel>=1.5.0',\n- 'PyDispatcher>=2.0.5',\n 'pyOpenSSL>=16.2.0',\n 'queuelib>=1.4.2',\n 'service_identity>=16.0.0',\n@@ -34,11 +33,12 @@\n 'itemadapter>=0.1.0',\n ]\n extras_require = {}\n-\n+cpython_dependencies = [\n+ 'lxml>=3.5.0',\n+ 'PyDispatcher>=2.0.5',\n+]\n if has_environment_marker_platform_impl_support():\n- extras_require[':platform_python_implementation == \"CPython\"'] = [\n- 'lxml>=3.5.0',\n- ]\n+ extras_require[':platform_python_implementation == \"CPython\"'] = cpython_dependencies\n extras_require[':platform_python_implementation == \"PyPy\"'] = [\n # Earlier lxml versions are affected by\n # https://foss.heptapod.net/pypy/pypy/-/issues/2498,\n@@ -49,7 +49,7 @@\n 'PyPyDispatcher>=2.1.0',\n ]\n else:\n- install_requires.append('lxml>=3.5.0')\n+ install_requires.extend(cpython_dependencies)\n \n \n setup(\n", "issue": "Test the upcoming pip dependency resolver in CI\nFrom [email protected], about pip 20.2:\r\n\r\n> NOTICE: This release includes the beta of the next-generation dependency resolver. It is significantly stricter and more consistent when it receives incompatible instructions, and reduces support for certain kinds of constraints files, so some workarounds and workflows may break. Please test it with the `--use-feature=2020-resolver` flag. Please see our guide on how to test and migrate, and how to report issues\r\n<https://pip.pypa.io/en/latest/user_guide/#changes-to-the-pip-dependency-resolver-in-20-2-2020>.\r\n> \r\n> The new dependency resolver is *off by default* because it is *not yet\r\nready for everyday use*.\n", "before_files": [{"content": "from os.path import dirname, join\nfrom pkg_resources import parse_version\nfrom setuptools import setup, find_packages, __version__ as setuptools_version\n\n\nwith open(join(dirname(__file__), 'scrapy/VERSION'), 'rb') as f:\n version = f.read().decode('ascii').strip()\n\n\ndef has_environment_marker_platform_impl_support():\n \"\"\"Code extracted from 'pytest/setup.py'\n https://github.com/pytest-dev/pytest/blob/7538680c/setup.py#L31\n\n The first known release to support environment marker with range operators\n it is 18.5, see:\n https://setuptools.readthedocs.io/en/latest/history.html#id235\n \"\"\"\n return parse_version(setuptools_version) >= parse_version('18.5')\n\n\ninstall_requires = [\n 'Twisted>=17.9.0',\n 'cryptography>=2.0',\n 'cssselect>=0.9.1',\n 'itemloaders>=1.0.1',\n 'parsel>=1.5.0',\n 'PyDispatcher>=2.0.5',\n 'pyOpenSSL>=16.2.0',\n 'queuelib>=1.4.2',\n 'service_identity>=16.0.0',\n 'w3lib>=1.17.0',\n 'zope.interface>=4.1.3',\n 'protego>=0.1.15',\n 'itemadapter>=0.1.0',\n]\nextras_require = {}\n\nif has_environment_marker_platform_impl_support():\n extras_require[':platform_python_implementation == \"CPython\"'] = [\n 'lxml>=3.5.0',\n ]\n extras_require[':platform_python_implementation == \"PyPy\"'] = [\n # Earlier lxml versions are affected by\n # https://foss.heptapod.net/pypy/pypy/-/issues/2498,\n # which was fixed in Cython 0.26, released on 2017-06-19, and used to\n # generate the C headers of lxml release tarballs published since then, the\n # first of which was:\n 'lxml>=4.0.0',\n 'PyPyDispatcher>=2.1.0',\n ]\nelse:\n install_requires.append('lxml>=3.5.0')\n\n\nsetup(\n name='Scrapy',\n version=version,\n url='https://scrapy.org',\n project_urls={\n 'Documentation': 'https://docs.scrapy.org/',\n 'Source': 'https://github.com/scrapy/scrapy',\n 'Tracker': 'https://github.com/scrapy/scrapy/issues',\n },\n description='A high-level Web Crawling and Web Scraping framework',\n long_description=open('README.rst').read(),\n author='Scrapy developers',\n maintainer='Pablo Hoffman',\n maintainer_email='[email protected]',\n license='BSD',\n packages=find_packages(exclude=('tests', 'tests.*')),\n include_package_data=True,\n zip_safe=False,\n entry_points={\n 'console_scripts': ['scrapy = scrapy.cmdline:execute']\n },\n classifiers=[\n 'Framework :: Scrapy',\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n python_requires='>=3.6',\n install_requires=install_requires,\n extras_require=extras_require,\n)\n", "path": "setup.py"}]}
1,777
345
gh_patches_debug_1705
rasdani/github-patches
git_diff
ManimCommunity__manim-2013
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Improve TransformMatchingAbstractBase to support different types of objects ## Enhancement proposal <!-- Add a clear and concise description of your enhancement proposal. In particular, if your enhancement introduces changes to the API, illustrate them with (fictional) code examples. --> The first line of `TransformMatchingAbstractBase __init__()` method contains an `assert` keyword to make sure the `mobjects` are of the same type. This implementation makes using the class with submobjects of `Tex` or `MathTex` a bit difficult and it just doesn't look right. Here's what I mean: I've made this class: ```py class TransformTest(Scene): def construct(self): text1 = Tex("What's that? ", "the ", "morse code") text2 = Tex("Look! ", "here come dots").next_to(text1, DOWN) self.add(text1, text2[0]) self.wait(0.5) #self.play(TransformMatchingShapes(text1[1:], text2[1])) **This does not works (throws assertion error)** self.play(TransformMatchingShapes(text1[1:], text2[1:])) #**This does** ``` The line that is commented throws an `assertion error` because the first object is a `VGroup` and the second one is a `SingleStringMathTex`. But the line after that does work. So basically that `assert` statement is preventing the user from making certain transforms which are (and should be IMO) possible. If some extra logic is added to `TransformMatchingAbstractBase`, this kind of issue would be solved. Here's another example: ```py class TransformTest(Scene): def construct(self): text3 = Tex("Result", "eleven", " plus", " two", color=RED).next_to(text2, DOWN) text4 = Tex("twelve plus one", color=RED).next_to(text3, DOWN) self.add(text3, text4[0]) self.wait(0.5) #self.play(TransformMatchingShapes(text3[1:], text4)) **This doesn't work** self.play(TransformMatchingShapes(text3[1:], text4[:])) #**This does** self.wait(0.5) ``` Clearly the Transfroms are possible, but I don't think the user should need to add weird syntax that isn't necessary just so the program doesn't spit out an error. ## Additional comments <!-- Add further context that you think might be relevant. --> I'm using `TransformMatchingShapes` here but of course this also affects `TransformMatchingTex` since they share the same parent class. </issue> <code> [start of manim/animation/transform_matching_parts.py] 1 """Animations that try to transform Mobjects while keeping track of identical parts.""" 2 3 __all__ = ["TransformMatchingShapes", "TransformMatchingTex"] 4 5 from typing import TYPE_CHECKING, List, Optional 6 7 import numpy as np 8 9 from .._config import config 10 from ..mobject.mobject import Group, Mobject 11 from ..mobject.opengl_mobject import OpenGLGroup, OpenGLMobject 12 from ..mobject.types.opengl_vectorized_mobject import OpenGLVGroup, OpenGLVMobject 13 from ..mobject.types.vectorized_mobject import VGroup, VMobject 14 from .composition import AnimationGroup 15 from .fading import FadeIn, FadeOut 16 from .transform import FadeTransformPieces, Transform 17 18 if TYPE_CHECKING: 19 from ..scene.scene import Scene 20 21 22 class TransformMatchingAbstractBase(AnimationGroup): 23 """Abstract base class for transformations that keep track of matching parts. 24 25 Subclasses have to implement the two static methods 26 :meth:`~.TransformMatchingAbstractBase.get_mobject_parts` and 27 :meth:`~.TransformMatchingAbstractBase.get_mobject_key`. 28 29 Basically, this transformation first maps all submobjects returned 30 by the ``get_mobject_parts`` method to certain keys by applying the 31 ``get_mobject_key`` method. Then, submobjects with matching keys 32 are transformed into each other. 33 34 Parameters 35 ---------- 36 mobject 37 The starting :class:`~.Mobject`. 38 target_mobject 39 The target :class:`~.Mobject`. 40 transform_mismatches 41 Controls whether submobjects without a matching key are transformed 42 into each other by using :class:`~.Transform`. Default: ``False``. 43 fade_transform_mismatches 44 Controls whether submobjects without a matching key are transformed 45 into each other by using :class:`~.FadeTransform`. Default: ``False``. 46 key_map 47 Optional. A dictionary mapping keys belonging to some of the starting mobject's 48 submobjects (i.e., the return values of the ``get_mobject_key`` method) 49 to some keys belonging to the target mobject's submobjects that should 50 be transformed although the keys don't match. 51 kwargs 52 All further keyword arguments are passed to the submobject transformations. 53 54 55 Note 56 ---- 57 If neither ``transform_mismatches`` nor ``fade_transform_mismatches`` 58 are set to ``True``, submobjects without matching keys in the starting 59 mobject are faded out in the direction of the unmatched submobjects in 60 the target mobject, and unmatched submobjects in the target mobject 61 are faded in from the direction of the unmatched submobjects in the 62 start mobject. 63 64 """ 65 66 def __init__( 67 self, 68 mobject: "Mobject", 69 target_mobject: "Mobject", 70 transform_mismatches: bool = False, 71 fade_transform_mismatches: bool = False, 72 key_map: Optional[dict] = None, 73 **kwargs 74 ): 75 assert type(mobject) is type(target_mobject) 76 77 if isinstance(mobject, OpenGLVMobject): 78 group_type = OpenGLVGroup 79 elif isinstance(mobject, OpenGLMobject): 80 group_type = OpenGLGroup 81 elif isinstance(mobject, VMobject): 82 group_type = VGroup 83 else: 84 group_type = Group 85 86 source_map = self.get_shape_map(mobject) 87 target_map = self.get_shape_map(target_mobject) 88 89 if key_map is None: 90 key_map = {} 91 92 # Create two mobjects whose submobjects all match each other 93 # according to whatever keys are used for source_map and 94 # target_map 95 transform_source = group_type() 96 transform_target = group_type() 97 kwargs["final_alpha_value"] = 0 98 for key in set(source_map).intersection(target_map): 99 transform_source.add(source_map[key]) 100 transform_target.add(target_map[key]) 101 anims = [Transform(transform_source, transform_target, **kwargs)] 102 # User can manually specify when one part should transform 103 # into another despite not matching by using key_map 104 key_mapped_source = group_type() 105 key_mapped_target = group_type() 106 for key1, key2 in key_map.items(): 107 if key1 in source_map and key2 in target_map: 108 key_mapped_source.add(source_map[key1]) 109 key_mapped_target.add(target_map[key2]) 110 source_map.pop(key1, None) 111 target_map.pop(key2, None) 112 if len(key_mapped_source) > 0: 113 anims.append( 114 FadeTransformPieces(key_mapped_source, key_mapped_target, **kwargs), 115 ) 116 117 fade_source = group_type() 118 fade_target = group_type() 119 for key in set(source_map).difference(target_map): 120 fade_source.add(source_map[key]) 121 for key in set(target_map).difference(source_map): 122 fade_target.add(target_map[key]) 123 124 if transform_mismatches: 125 if "replace_mobject_with_target_in_scene" not in kwargs: 126 kwargs["replace_mobject_with_target_in_scene"] = True 127 anims.append(Transform(fade_source, fade_target, **kwargs)) 128 elif fade_transform_mismatches: 129 anims.append(FadeTransformPieces(fade_source, fade_target, **kwargs)) 130 else: 131 anims.append(FadeOut(fade_source, target_position=fade_target, **kwargs)) 132 anims.append( 133 FadeIn(fade_target.copy(), target_position=fade_target, **kwargs), 134 ) 135 136 super().__init__(*anims) 137 138 self.to_remove = mobject 139 self.to_add = target_mobject 140 141 def get_shape_map(self, mobject: "Mobject") -> dict: 142 shape_map = {} 143 for sm in self.get_mobject_parts(mobject): 144 key = self.get_mobject_key(sm) 145 if key not in shape_map: 146 if config["renderer"] == "opengl": 147 shape_map[key] = OpenGLVGroup() 148 else: 149 shape_map[key] = VGroup() 150 shape_map[key].add(sm) 151 return shape_map 152 153 def clean_up_from_scene(self, scene: "Scene") -> None: 154 for anim in self.animations: 155 anim.interpolate(0) 156 scene.remove(self.mobject) 157 scene.remove(self.to_remove) 158 scene.add(self.to_add) 159 160 @staticmethod 161 def get_mobject_parts(mobject: "Mobject"): 162 raise NotImplementedError("To be implemented in subclass.") 163 164 @staticmethod 165 def get_mobject_key(mobject: "Mobject"): 166 raise NotImplementedError("To be implemented in subclass.") 167 168 169 class TransformMatchingShapes(TransformMatchingAbstractBase): 170 """An animation trying to transform groups by matching the shape 171 of their submobjects. 172 173 Two submobjects match if the hash of their point coordinates after 174 normalization (i.e., after translation to the origin, fixing the submobject 175 height at 1 unit, and rounding the coordinates to three decimal places) 176 matches. 177 178 See also 179 -------- 180 :class:`~.TransformMatchingAbstractBase` 181 182 Examples 183 -------- 184 185 .. manim:: Anagram 186 187 class Anagram(Scene): 188 def construct(self): 189 src = Text("the morse code") 190 tar = Text("here come dots") 191 self.play(Write(src)) 192 self.wait(0.5) 193 self.play(TransformMatchingShapes(src, tar, path_arc=PI/2)) 194 self.wait(0.5) 195 196 """ 197 198 def __init__( 199 self, 200 mobject: "Mobject", 201 target_mobject: "Mobject", 202 transform_mismatches: bool = False, 203 fade_transform_mismatches: bool = False, 204 key_map: Optional[dict] = None, 205 **kwargs 206 ): 207 super().__init__( 208 mobject, 209 target_mobject, 210 transform_mismatches=transform_mismatches, 211 fade_transform_mismatches=fade_transform_mismatches, 212 key_map=key_map, 213 **kwargs 214 ) 215 216 @staticmethod 217 def get_mobject_parts(mobject: "Mobject") -> List["Mobject"]: 218 return mobject.family_members_with_points() 219 220 @staticmethod 221 def get_mobject_key(mobject: "Mobject") -> int: 222 mobject.save_state() 223 mobject.center() 224 mobject.set_height(1) 225 result = hash(np.round(mobject.points, 3).tobytes()) 226 mobject.restore() 227 return result 228 229 230 class TransformMatchingTex(TransformMatchingAbstractBase): 231 """A transformation trying to transform rendered LaTeX strings. 232 233 Two submobjects match if their ``tex_string`` matches. 234 235 See also 236 -------- 237 :class:`~.TransformMatchingAbstractBase` 238 239 Examples 240 -------- 241 242 .. manim:: MatchingEquationParts 243 244 class MatchingEquationParts(Scene): 245 def construct(self): 246 eq1 = MathTex("{{a^2}} + {{b^2}} = {{c^2}}") 247 eq2 = MathTex("{{a^2}} = {{c^2}} - {{b^2}}") 248 self.add(eq1) 249 self.wait(0.5) 250 self.play(TransformMatchingTex(eq1, eq2)) 251 self.wait(0.5) 252 253 """ 254 255 def __init__( 256 self, 257 mobject: "Mobject", 258 target_mobject: "Mobject", 259 transform_mismatches: bool = False, 260 fade_transform_mismatches: bool = False, 261 key_map: Optional[dict] = None, 262 **kwargs 263 ): 264 assert hasattr(mobject, "tex_string") 265 assert hasattr(target_mobject, "tex_string") 266 super().__init__( 267 mobject, 268 target_mobject, 269 transform_mismatches=transform_mismatches, 270 fade_transform_mismatches=fade_transform_mismatches, 271 key_map=key_map, 272 **kwargs 273 ) 274 275 @staticmethod 276 def get_mobject_parts(mobject: "Mobject") -> List["Mobject"]: 277 return mobject.submobjects 278 279 @staticmethod 280 def get_mobject_key(mobject: "Mobject") -> str: 281 return mobject.tex_string 282 [end of manim/animation/transform_matching_parts.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/manim/animation/transform_matching_parts.py b/manim/animation/transform_matching_parts.py --- a/manim/animation/transform_matching_parts.py +++ b/manim/animation/transform_matching_parts.py @@ -72,7 +72,6 @@ key_map: Optional[dict] = None, **kwargs ): - assert type(mobject) is type(target_mobject) if isinstance(mobject, OpenGLVMobject): group_type = OpenGLVGroup
{"golden_diff": "diff --git a/manim/animation/transform_matching_parts.py b/manim/animation/transform_matching_parts.py\n--- a/manim/animation/transform_matching_parts.py\n+++ b/manim/animation/transform_matching_parts.py\n@@ -72,7 +72,6 @@\n key_map: Optional[dict] = None,\n **kwargs\n ):\n- assert type(mobject) is type(target_mobject)\n \n if isinstance(mobject, OpenGLVMobject):\n group_type = OpenGLVGroup\n", "issue": "Improve TransformMatchingAbstractBase to support different types of objects\n## Enhancement proposal\r\n<!-- Add a clear and concise description of your enhancement proposal. In particular,\r\n if your enhancement introduces changes to the API, illustrate them with\r\n (fictional) code examples. -->\r\n\r\nThe first line of `TransformMatchingAbstractBase __init__()` method contains an `assert` keyword to make sure the `mobjects` are of the same type. This implementation makes using the class with submobjects of `Tex` or `MathTex` a bit difficult and it just doesn't look right. Here's what I mean: \r\n\r\nI've made this class:\r\n\r\n```py\r\nclass TransformTest(Scene):\r\n def construct(self):\r\n text1 = Tex(\"What's that? \", \"the \", \"morse code\")\r\n text2 = Tex(\"Look! \", \"here come dots\").next_to(text1, DOWN)\r\n\r\n self.add(text1, text2[0])\r\n self.wait(0.5)\r\n #self.play(TransformMatchingShapes(text1[1:], text2[1])) **This does not works (throws assertion error)**\r\n self.play(TransformMatchingShapes(text1[1:], text2[1:])) #**This does**\r\n```\r\nThe line that is commented throws an `assertion error` because the first object is a `VGroup` and the second one is a `SingleStringMathTex`. But the line after that does work. So basically that `assert` statement is preventing the user from making certain transforms which are (and should be IMO) possible. If some extra logic is added to `TransformMatchingAbstractBase`, this kind of issue would be solved.\r\n\r\nHere's another example:\r\n\r\n```py\r\nclass TransformTest(Scene):\r\n def construct(self):\r\n\r\n text3 = Tex(\"Result\", \"eleven\", \" plus\", \" two\", color=RED).next_to(text2, DOWN)\r\n text4 = Tex(\"twelve plus one\", color=RED).next_to(text3, DOWN)\r\n\r\n self.add(text3, text4[0])\r\n self.wait(0.5)\r\n #self.play(TransformMatchingShapes(text3[1:], text4)) **This doesn't work**\r\n self.play(TransformMatchingShapes(text3[1:], text4[:])) #**This does**\r\n self.wait(0.5)\r\n```\r\n\r\nClearly the Transfroms are possible, but I don't think the user should need to add weird syntax that isn't necessary just so the program doesn't spit out an error.\r\n\r\n## Additional comments\r\n<!-- Add further context that you think might be relevant. -->\r\nI'm using `TransformMatchingShapes` here but of course this also affects `TransformMatchingTex` since they share the same parent class.\r\n\n", "before_files": [{"content": "\"\"\"Animations that try to transform Mobjects while keeping track of identical parts.\"\"\"\n\n__all__ = [\"TransformMatchingShapes\", \"TransformMatchingTex\"]\n\nfrom typing import TYPE_CHECKING, List, Optional\n\nimport numpy as np\n\nfrom .._config import config\nfrom ..mobject.mobject import Group, Mobject\nfrom ..mobject.opengl_mobject import OpenGLGroup, OpenGLMobject\nfrom ..mobject.types.opengl_vectorized_mobject import OpenGLVGroup, OpenGLVMobject\nfrom ..mobject.types.vectorized_mobject import VGroup, VMobject\nfrom .composition import AnimationGroup\nfrom .fading import FadeIn, FadeOut\nfrom .transform import FadeTransformPieces, Transform\n\nif TYPE_CHECKING:\n from ..scene.scene import Scene\n\n\nclass TransformMatchingAbstractBase(AnimationGroup):\n \"\"\"Abstract base class for transformations that keep track of matching parts.\n\n Subclasses have to implement the two static methods\n :meth:`~.TransformMatchingAbstractBase.get_mobject_parts` and\n :meth:`~.TransformMatchingAbstractBase.get_mobject_key`.\n\n Basically, this transformation first maps all submobjects returned\n by the ``get_mobject_parts`` method to certain keys by applying the\n ``get_mobject_key`` method. Then, submobjects with matching keys\n are transformed into each other.\n\n Parameters\n ----------\n mobject\n The starting :class:`~.Mobject`.\n target_mobject\n The target :class:`~.Mobject`.\n transform_mismatches\n Controls whether submobjects without a matching key are transformed\n into each other by using :class:`~.Transform`. Default: ``False``.\n fade_transform_mismatches\n Controls whether submobjects without a matching key are transformed\n into each other by using :class:`~.FadeTransform`. Default: ``False``.\n key_map\n Optional. A dictionary mapping keys belonging to some of the starting mobject's\n submobjects (i.e., the return values of the ``get_mobject_key`` method)\n to some keys belonging to the target mobject's submobjects that should\n be transformed although the keys don't match.\n kwargs\n All further keyword arguments are passed to the submobject transformations.\n\n\n Note\n ----\n If neither ``transform_mismatches`` nor ``fade_transform_mismatches``\n are set to ``True``, submobjects without matching keys in the starting\n mobject are faded out in the direction of the unmatched submobjects in\n the target mobject, and unmatched submobjects in the target mobject\n are faded in from the direction of the unmatched submobjects in the\n start mobject.\n\n \"\"\"\n\n def __init__(\n self,\n mobject: \"Mobject\",\n target_mobject: \"Mobject\",\n transform_mismatches: bool = False,\n fade_transform_mismatches: bool = False,\n key_map: Optional[dict] = None,\n **kwargs\n ):\n assert type(mobject) is type(target_mobject)\n\n if isinstance(mobject, OpenGLVMobject):\n group_type = OpenGLVGroup\n elif isinstance(mobject, OpenGLMobject):\n group_type = OpenGLGroup\n elif isinstance(mobject, VMobject):\n group_type = VGroup\n else:\n group_type = Group\n\n source_map = self.get_shape_map(mobject)\n target_map = self.get_shape_map(target_mobject)\n\n if key_map is None:\n key_map = {}\n\n # Create two mobjects whose submobjects all match each other\n # according to whatever keys are used for source_map and\n # target_map\n transform_source = group_type()\n transform_target = group_type()\n kwargs[\"final_alpha_value\"] = 0\n for key in set(source_map).intersection(target_map):\n transform_source.add(source_map[key])\n transform_target.add(target_map[key])\n anims = [Transform(transform_source, transform_target, **kwargs)]\n # User can manually specify when one part should transform\n # into another despite not matching by using key_map\n key_mapped_source = group_type()\n key_mapped_target = group_type()\n for key1, key2 in key_map.items():\n if key1 in source_map and key2 in target_map:\n key_mapped_source.add(source_map[key1])\n key_mapped_target.add(target_map[key2])\n source_map.pop(key1, None)\n target_map.pop(key2, None)\n if len(key_mapped_source) > 0:\n anims.append(\n FadeTransformPieces(key_mapped_source, key_mapped_target, **kwargs),\n )\n\n fade_source = group_type()\n fade_target = group_type()\n for key in set(source_map).difference(target_map):\n fade_source.add(source_map[key])\n for key in set(target_map).difference(source_map):\n fade_target.add(target_map[key])\n\n if transform_mismatches:\n if \"replace_mobject_with_target_in_scene\" not in kwargs:\n kwargs[\"replace_mobject_with_target_in_scene\"] = True\n anims.append(Transform(fade_source, fade_target, **kwargs))\n elif fade_transform_mismatches:\n anims.append(FadeTransformPieces(fade_source, fade_target, **kwargs))\n else:\n anims.append(FadeOut(fade_source, target_position=fade_target, **kwargs))\n anims.append(\n FadeIn(fade_target.copy(), target_position=fade_target, **kwargs),\n )\n\n super().__init__(*anims)\n\n self.to_remove = mobject\n self.to_add = target_mobject\n\n def get_shape_map(self, mobject: \"Mobject\") -> dict:\n shape_map = {}\n for sm in self.get_mobject_parts(mobject):\n key = self.get_mobject_key(sm)\n if key not in shape_map:\n if config[\"renderer\"] == \"opengl\":\n shape_map[key] = OpenGLVGroup()\n else:\n shape_map[key] = VGroup()\n shape_map[key].add(sm)\n return shape_map\n\n def clean_up_from_scene(self, scene: \"Scene\") -> None:\n for anim in self.animations:\n anim.interpolate(0)\n scene.remove(self.mobject)\n scene.remove(self.to_remove)\n scene.add(self.to_add)\n\n @staticmethod\n def get_mobject_parts(mobject: \"Mobject\"):\n raise NotImplementedError(\"To be implemented in subclass.\")\n\n @staticmethod\n def get_mobject_key(mobject: \"Mobject\"):\n raise NotImplementedError(\"To be implemented in subclass.\")\n\n\nclass TransformMatchingShapes(TransformMatchingAbstractBase):\n \"\"\"An animation trying to transform groups by matching the shape\n of their submobjects.\n\n Two submobjects match if the hash of their point coordinates after\n normalization (i.e., after translation to the origin, fixing the submobject\n height at 1 unit, and rounding the coordinates to three decimal places)\n matches.\n\n See also\n --------\n :class:`~.TransformMatchingAbstractBase`\n\n Examples\n --------\n\n .. manim:: Anagram\n\n class Anagram(Scene):\n def construct(self):\n src = Text(\"the morse code\")\n tar = Text(\"here come dots\")\n self.play(Write(src))\n self.wait(0.5)\n self.play(TransformMatchingShapes(src, tar, path_arc=PI/2))\n self.wait(0.5)\n\n \"\"\"\n\n def __init__(\n self,\n mobject: \"Mobject\",\n target_mobject: \"Mobject\",\n transform_mismatches: bool = False,\n fade_transform_mismatches: bool = False,\n key_map: Optional[dict] = None,\n **kwargs\n ):\n super().__init__(\n mobject,\n target_mobject,\n transform_mismatches=transform_mismatches,\n fade_transform_mismatches=fade_transform_mismatches,\n key_map=key_map,\n **kwargs\n )\n\n @staticmethod\n def get_mobject_parts(mobject: \"Mobject\") -> List[\"Mobject\"]:\n return mobject.family_members_with_points()\n\n @staticmethod\n def get_mobject_key(mobject: \"Mobject\") -> int:\n mobject.save_state()\n mobject.center()\n mobject.set_height(1)\n result = hash(np.round(mobject.points, 3).tobytes())\n mobject.restore()\n return result\n\n\nclass TransformMatchingTex(TransformMatchingAbstractBase):\n \"\"\"A transformation trying to transform rendered LaTeX strings.\n\n Two submobjects match if their ``tex_string`` matches.\n\n See also\n --------\n :class:`~.TransformMatchingAbstractBase`\n\n Examples\n --------\n\n .. manim:: MatchingEquationParts\n\n class MatchingEquationParts(Scene):\n def construct(self):\n eq1 = MathTex(\"{{a^2}} + {{b^2}} = {{c^2}}\")\n eq2 = MathTex(\"{{a^2}} = {{c^2}} - {{b^2}}\")\n self.add(eq1)\n self.wait(0.5)\n self.play(TransformMatchingTex(eq1, eq2))\n self.wait(0.5)\n\n \"\"\"\n\n def __init__(\n self,\n mobject: \"Mobject\",\n target_mobject: \"Mobject\",\n transform_mismatches: bool = False,\n fade_transform_mismatches: bool = False,\n key_map: Optional[dict] = None,\n **kwargs\n ):\n assert hasattr(mobject, \"tex_string\")\n assert hasattr(target_mobject, \"tex_string\")\n super().__init__(\n mobject,\n target_mobject,\n transform_mismatches=transform_mismatches,\n fade_transform_mismatches=fade_transform_mismatches,\n key_map=key_map,\n **kwargs\n )\n\n @staticmethod\n def get_mobject_parts(mobject: \"Mobject\") -> List[\"Mobject\"]:\n return mobject.submobjects\n\n @staticmethod\n def get_mobject_key(mobject: \"Mobject\") -> str:\n return mobject.tex_string\n", "path": "manim/animation/transform_matching_parts.py"}]}
4,022
109
gh_patches_debug_11321
rasdani/github-patches
git_diff
Kinto__kinto-963
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Don't use /tmp but os.gettempdir ``` >> Issue: [B108:hardcoded_tmp_directory] Probable insecure usage of temp file/directory. Severity: Medium Confidence: Medium Location: kinto/core/__init__.py:66 ``` Don't use /tmp but os.gettempdir ``` >> Issue: [B108:hardcoded_tmp_directory] Probable insecure usage of temp file/directory. Severity: Medium Confidence: Medium Location: kinto/core/__init__.py:66 ``` </issue> <code> [start of kinto/core/__init__.py] 1 """Main entry point 2 """ 3 import pkg_resources 4 5 from cornice import Service as CorniceService 6 from pyramid.settings import aslist 7 8 from kinto.core import errors 9 from kinto.core import events 10 from kinto.core.initialization import ( # NOQA 11 initialize, install_middlewares, 12 load_default_settings) 13 from kinto.core.utils import ( 14 follow_subrequest, current_service, current_resource_name, 15 prefixed_userid, prefixed_principals) 16 from kinto.core.logs import logger 17 18 19 # Module version, as defined in PEP-0396. 20 __version__ = pkg_resources.get_distribution('kinto').version # FIXME? 21 22 23 DEFAULT_SETTINGS = { 24 'backoff': None, 25 'batch_max_requests': 25, 26 'cache_backend': '', 27 'cache_url': '', 28 'cache_pool_size': 25, 29 'cache_prefix': '', 30 'cors_origins': '*', 31 'cors_max_age_seconds': 3600, 32 'eos': None, 33 'eos_message': None, 34 'eos_url': None, 35 'error_info_link': 'https://github.com/Kinto/kinto/issues/', 36 'http_host': None, 37 'http_scheme': None, 38 'id_generator': 'kinto.core.storage.generators.UUID4', 39 'includes': '', 40 'initialization_sequence': ( 41 'kinto.core.initialization.setup_request_bound_data', 42 'kinto.core.initialization.setup_json_serializer', 43 'kinto.core.initialization.setup_logging', 44 'kinto.core.initialization.setup_storage', 45 'kinto.core.initialization.setup_permission', 46 'kinto.core.initialization.setup_cache', 47 'kinto.core.initialization.setup_requests_scheme', 48 'kinto.core.initialization.setup_version_redirection', 49 'kinto.core.initialization.setup_deprecation', 50 'kinto.core.initialization.setup_authentication', 51 'kinto.core.initialization.setup_backoff', 52 'kinto.core.initialization.setup_statsd', 53 'kinto.core.initialization.setup_listeners', 54 'kinto.core.events.setup_transaction_hook', 55 ), 56 'event_listeners': '', 57 'heartbeat_timeout_seconds': 10, 58 'logging_renderer': 'kinto.core.logs.ClassicLogRenderer', 59 'newrelic_config': None, 60 'newrelic_env': 'dev', 61 'paginate_by': None, 62 'permission_backend': '', 63 'permission_url': '', 64 'permission_pool_size': 25, 65 'profiler_dir': '/tmp', 66 'profiler_enabled': False, 67 'project_docs': '', 68 'project_name': '', 69 'project_version': '', 70 'readonly': False, 71 'retry_after_seconds': 30, 72 'statsd_backend': 'kinto.core.statsd', 73 'statsd_prefix': 'kinto.core', 74 'statsd_url': None, 75 'storage_backend': '', 76 'storage_url': '', 77 'storage_max_fetch_size': 10000, 78 'storage_pool_size': 25, 79 'tm.annotate_user': False, # Do annotate transactions with the user-id. 80 'transaction_per_request': True, 81 'userid_hmac_secret': '', 82 'version_json_path': 'version.json', 83 'version_prefix_redirect_enabled': True, 84 'trailing_slash_redirect_enabled': True, 85 'multiauth.groupfinder': 'kinto.core.authorization.groupfinder', 86 'multiauth.policies': 'basicauth', 87 'multiauth.policy.basicauth.use': ('kinto.core.authentication.' 88 'BasicAuthAuthenticationPolicy'), 89 'multiauth.authorization_policy': ('kinto.core.authorization.' 90 'AuthorizationPolicy') 91 } 92 93 94 class Service(CorniceService): 95 """Subclass of the default cornice service. 96 97 This is useful in order to attach specific behaviours without monkey 98 patching the default cornice service (which would impact other uses of it) 99 """ 100 default_cors_headers = ('Backoff', 'Retry-After', 'Alert', 101 'Content-Length') 102 103 def error_handler(self, request): 104 return errors.json_error_handler(request) 105 106 @classmethod 107 def init_from_settings(cls, settings): 108 cls.cors_origins = tuple(aslist(settings['cors_origins'])) 109 cors_max_age = settings['cors_max_age_seconds'] 110 cls.cors_max_age = int(cors_max_age) if cors_max_age else None 111 112 113 def includeme(config): 114 settings = config.get_settings() 115 116 # Heartbeat registry. 117 config.registry.heartbeats = {} 118 119 # Public settings registry. 120 config.registry.public_settings = {'batch_max_requests', 'readonly'} 121 122 # Directive to declare arbitrary API capabilities. 123 def add_api_capability(config, identifier, description="", url="", **kw): 124 existing = config.registry.api_capabilities.get(identifier) 125 if existing: 126 error_msg = "The '%s' API capability was already registered (%s)." 127 raise ValueError(error_msg % (identifier, existing)) 128 129 capability = dict(description=description, url=url, **kw) 130 config.registry.api_capabilities[identifier] = capability 131 132 config.add_directive('add_api_capability', add_api_capability) 133 config.registry.api_capabilities = {} 134 135 # Resource events helpers. 136 config.add_request_method(events.get_resource_events, 137 name='get_resource_events') 138 config.add_request_method(events.notify_resource_event, 139 name='notify_resource_event') 140 141 # Setup cornice. 142 config.include("cornice") 143 144 # Per-request transaction. 145 config.include("pyramid_tm") 146 147 # Add CORS settings to the base kinto.core Service class. 148 Service.init_from_settings(settings) 149 150 # Setup components. 151 for step in aslist(settings['initialization_sequence']): 152 step_func = config.maybe_dotted(step) 153 step_func(config) 154 155 # Custom helpers. 156 config.add_request_method(follow_subrequest) 157 config.add_request_method(prefixed_userid, property=True) 158 config.add_request_method(prefixed_principals, reify=True) 159 config.add_request_method(lambda r: { 160 'id': r.prefixed_userid, 161 'principals': r.prefixed_principals}, 162 name='get_user_info') 163 config.add_request_method(current_resource_name, reify=True) 164 config.add_request_method(current_service, reify=True) 165 config.commit() 166 167 # Include plugins after init, unlike pyramid includes. 168 includes = aslist(settings['includes']) 169 for app in includes: 170 config.include(app) 171 172 # # Show settings to output. 173 # for key, value in settings.items(): 174 # logger.info('Using %s = %s' % (key, value)) 175 176 # Scan views. 177 config.scan("kinto.core.views") 178 179 # Give sign of life. 180 msg = "Running %(project_name)s %(project_version)s." 181 logger.info(msg % settings) 182 [end of kinto/core/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/kinto/core/__init__.py b/kinto/core/__init__.py --- a/kinto/core/__init__.py +++ b/kinto/core/__init__.py @@ -1,6 +1,7 @@ """Main entry point """ import pkg_resources +import tempfile from cornice import Service as CorniceService from pyramid.settings import aslist @@ -62,7 +63,7 @@ 'permission_backend': '', 'permission_url': '', 'permission_pool_size': 25, - 'profiler_dir': '/tmp', + 'profiler_dir': tempfile.gettempdir(), 'profiler_enabled': False, 'project_docs': '', 'project_name': '',
{"golden_diff": "diff --git a/kinto/core/__init__.py b/kinto/core/__init__.py\n--- a/kinto/core/__init__.py\n+++ b/kinto/core/__init__.py\n@@ -1,6 +1,7 @@\n \"\"\"Main entry point\n \"\"\"\n import pkg_resources\n+import tempfile\n \n from cornice import Service as CorniceService\n from pyramid.settings import aslist\n@@ -62,7 +63,7 @@\n 'permission_backend': '',\n 'permission_url': '',\n 'permission_pool_size': 25,\n- 'profiler_dir': '/tmp',\n+ 'profiler_dir': tempfile.gettempdir(),\n 'profiler_enabled': False,\n 'project_docs': '',\n 'project_name': '',\n", "issue": "Don't use /tmp but os.gettempdir\n```\r\n>> Issue: [B108:hardcoded_tmp_directory] Probable insecure usage of temp file/directory.\r\n Severity: Medium Confidence: Medium\r\n Location: kinto/core/__init__.py:66\r\n```\nDon't use /tmp but os.gettempdir\n```\r\n>> Issue: [B108:hardcoded_tmp_directory] Probable insecure usage of temp file/directory.\r\n Severity: Medium Confidence: Medium\r\n Location: kinto/core/__init__.py:66\r\n```\n", "before_files": [{"content": "\"\"\"Main entry point\n\"\"\"\nimport pkg_resources\n\nfrom cornice import Service as CorniceService\nfrom pyramid.settings import aslist\n\nfrom kinto.core import errors\nfrom kinto.core import events\nfrom kinto.core.initialization import ( # NOQA\n initialize, install_middlewares,\n load_default_settings)\nfrom kinto.core.utils import (\n follow_subrequest, current_service, current_resource_name,\n prefixed_userid, prefixed_principals)\nfrom kinto.core.logs import logger\n\n\n# Module version, as defined in PEP-0396.\n__version__ = pkg_resources.get_distribution('kinto').version # FIXME?\n\n\nDEFAULT_SETTINGS = {\n 'backoff': None,\n 'batch_max_requests': 25,\n 'cache_backend': '',\n 'cache_url': '',\n 'cache_pool_size': 25,\n 'cache_prefix': '',\n 'cors_origins': '*',\n 'cors_max_age_seconds': 3600,\n 'eos': None,\n 'eos_message': None,\n 'eos_url': None,\n 'error_info_link': 'https://github.com/Kinto/kinto/issues/',\n 'http_host': None,\n 'http_scheme': None,\n 'id_generator': 'kinto.core.storage.generators.UUID4',\n 'includes': '',\n 'initialization_sequence': (\n 'kinto.core.initialization.setup_request_bound_data',\n 'kinto.core.initialization.setup_json_serializer',\n 'kinto.core.initialization.setup_logging',\n 'kinto.core.initialization.setup_storage',\n 'kinto.core.initialization.setup_permission',\n 'kinto.core.initialization.setup_cache',\n 'kinto.core.initialization.setup_requests_scheme',\n 'kinto.core.initialization.setup_version_redirection',\n 'kinto.core.initialization.setup_deprecation',\n 'kinto.core.initialization.setup_authentication',\n 'kinto.core.initialization.setup_backoff',\n 'kinto.core.initialization.setup_statsd',\n 'kinto.core.initialization.setup_listeners',\n 'kinto.core.events.setup_transaction_hook',\n ),\n 'event_listeners': '',\n 'heartbeat_timeout_seconds': 10,\n 'logging_renderer': 'kinto.core.logs.ClassicLogRenderer',\n 'newrelic_config': None,\n 'newrelic_env': 'dev',\n 'paginate_by': None,\n 'permission_backend': '',\n 'permission_url': '',\n 'permission_pool_size': 25,\n 'profiler_dir': '/tmp',\n 'profiler_enabled': False,\n 'project_docs': '',\n 'project_name': '',\n 'project_version': '',\n 'readonly': False,\n 'retry_after_seconds': 30,\n 'statsd_backend': 'kinto.core.statsd',\n 'statsd_prefix': 'kinto.core',\n 'statsd_url': None,\n 'storage_backend': '',\n 'storage_url': '',\n 'storage_max_fetch_size': 10000,\n 'storage_pool_size': 25,\n 'tm.annotate_user': False, # Do annotate transactions with the user-id.\n 'transaction_per_request': True,\n 'userid_hmac_secret': '',\n 'version_json_path': 'version.json',\n 'version_prefix_redirect_enabled': True,\n 'trailing_slash_redirect_enabled': True,\n 'multiauth.groupfinder': 'kinto.core.authorization.groupfinder',\n 'multiauth.policies': 'basicauth',\n 'multiauth.policy.basicauth.use': ('kinto.core.authentication.'\n 'BasicAuthAuthenticationPolicy'),\n 'multiauth.authorization_policy': ('kinto.core.authorization.'\n 'AuthorizationPolicy')\n}\n\n\nclass Service(CorniceService):\n \"\"\"Subclass of the default cornice service.\n\n This is useful in order to attach specific behaviours without monkey\n patching the default cornice service (which would impact other uses of it)\n \"\"\"\n default_cors_headers = ('Backoff', 'Retry-After', 'Alert',\n 'Content-Length')\n\n def error_handler(self, request):\n return errors.json_error_handler(request)\n\n @classmethod\n def init_from_settings(cls, settings):\n cls.cors_origins = tuple(aslist(settings['cors_origins']))\n cors_max_age = settings['cors_max_age_seconds']\n cls.cors_max_age = int(cors_max_age) if cors_max_age else None\n\n\ndef includeme(config):\n settings = config.get_settings()\n\n # Heartbeat registry.\n config.registry.heartbeats = {}\n\n # Public settings registry.\n config.registry.public_settings = {'batch_max_requests', 'readonly'}\n\n # Directive to declare arbitrary API capabilities.\n def add_api_capability(config, identifier, description=\"\", url=\"\", **kw):\n existing = config.registry.api_capabilities.get(identifier)\n if existing:\n error_msg = \"The '%s' API capability was already registered (%s).\"\n raise ValueError(error_msg % (identifier, existing))\n\n capability = dict(description=description, url=url, **kw)\n config.registry.api_capabilities[identifier] = capability\n\n config.add_directive('add_api_capability', add_api_capability)\n config.registry.api_capabilities = {}\n\n # Resource events helpers.\n config.add_request_method(events.get_resource_events,\n name='get_resource_events')\n config.add_request_method(events.notify_resource_event,\n name='notify_resource_event')\n\n # Setup cornice.\n config.include(\"cornice\")\n\n # Per-request transaction.\n config.include(\"pyramid_tm\")\n\n # Add CORS settings to the base kinto.core Service class.\n Service.init_from_settings(settings)\n\n # Setup components.\n for step in aslist(settings['initialization_sequence']):\n step_func = config.maybe_dotted(step)\n step_func(config)\n\n # Custom helpers.\n config.add_request_method(follow_subrequest)\n config.add_request_method(prefixed_userid, property=True)\n config.add_request_method(prefixed_principals, reify=True)\n config.add_request_method(lambda r: {\n 'id': r.prefixed_userid,\n 'principals': r.prefixed_principals},\n name='get_user_info')\n config.add_request_method(current_resource_name, reify=True)\n config.add_request_method(current_service, reify=True)\n config.commit()\n\n # Include plugins after init, unlike pyramid includes.\n includes = aslist(settings['includes'])\n for app in includes:\n config.include(app)\n\n # # Show settings to output.\n # for key, value in settings.items():\n # logger.info('Using %s = %s' % (key, value))\n\n # Scan views.\n config.scan(\"kinto.core.views\")\n\n # Give sign of life.\n msg = \"Running %(project_name)s %(project_version)s.\"\n logger.info(msg % settings)\n", "path": "kinto/core/__init__.py"}]}
2,550
157
gh_patches_debug_5803
rasdani/github-patches
git_diff
pre-commit__pre-commit-986
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Error when building RPM When building RPM for Fedora i got this: `*** ERROR: ambiguous python shebang in /usr/lib/python3.7/site-packages/pre_commit/resources/hook-tmpl: #!/usr/bin/env python. Change it to python3 (or python2) explicitly.` Would be great if this fixed. </issue> <code> [start of pre_commit/commands/install_uninstall.py] 1 from __future__ import print_function 2 from __future__ import unicode_literals 3 4 import io 5 import itertools 6 import logging 7 import os.path 8 import sys 9 10 from pre_commit import git 11 from pre_commit import output 12 from pre_commit.clientlib import load_config 13 from pre_commit.repository import all_hooks 14 from pre_commit.repository import install_hook_envs 15 from pre_commit.util import cmd_output 16 from pre_commit.util import make_executable 17 from pre_commit.util import mkdirp 18 from pre_commit.util import resource_text 19 20 21 logger = logging.getLogger(__name__) 22 23 # This is used to identify the hook file we install 24 PRIOR_HASHES = ( 25 '4d9958c90bc262f47553e2c073f14cfe', 26 'd8ee923c46731b42cd95cc869add4062', 27 '49fd668cb42069aa1b6048464be5d395', 28 '79f09a650522a87b0da915d0d983b2de', 29 'e358c9dae00eac5d06b38dfdb1e33a8c', 30 ) 31 CURRENT_HASH = '138fd403232d2ddd5efb44317e38bf03' 32 TEMPLATE_START = '# start templated\n' 33 TEMPLATE_END = '# end templated\n' 34 35 36 def _hook_paths(hook_type): 37 pth = os.path.join(git.get_git_dir(), 'hooks', hook_type) 38 return pth, '{}.legacy'.format(pth) 39 40 41 def is_our_script(filename): 42 if not os.path.exists(filename): # pragma: windows no cover (symlink) 43 return False 44 with io.open(filename) as f: 45 contents = f.read() 46 return any(h in contents for h in (CURRENT_HASH,) + PRIOR_HASHES) 47 48 49 def shebang(): 50 if sys.platform == 'win32': 51 py = 'python' 52 else: 53 # Homebrew/homebrew-core#35825: be more timid about appropriate `PATH` 54 path_choices = [p for p in os.defpath.split(os.pathsep) if p] 55 exe_choices = [ 56 'python{}'.format('.'.join(str(v) for v in sys.version_info[:i])) 57 for i in range(3) 58 ] 59 for path, exe in itertools.product(path_choices, exe_choices): 60 if os.path.exists(os.path.join(path, exe)): 61 py = exe 62 break 63 else: 64 py = 'python' 65 return '#!/usr/bin/env {}'.format(py) 66 67 68 def install( 69 config_file, store, 70 overwrite=False, hooks=False, hook_type='pre-commit', 71 skip_on_missing_conf=False, 72 ): 73 """Install the pre-commit hooks.""" 74 if cmd_output('git', 'config', 'core.hooksPath', retcode=None)[1].strip(): 75 logger.error( 76 'Cowardly refusing to install hooks with `core.hooksPath` set.\n' 77 'hint: `git config --unset-all core.hooksPath`', 78 ) 79 return 1 80 81 hook_path, legacy_path = _hook_paths(hook_type) 82 83 mkdirp(os.path.dirname(hook_path)) 84 85 # If we have an existing hook, move it to pre-commit.legacy 86 if os.path.lexists(hook_path) and not is_our_script(hook_path): 87 os.rename(hook_path, legacy_path) 88 89 # If we specify overwrite, we simply delete the legacy file 90 if overwrite and os.path.exists(legacy_path): 91 os.remove(legacy_path) 92 elif os.path.exists(legacy_path): 93 output.write_line( 94 'Running in migration mode with existing hooks at {}\n' 95 'Use -f to use only pre-commit.'.format(legacy_path), 96 ) 97 98 params = { 99 'CONFIG': config_file, 100 'HOOK_TYPE': hook_type, 101 'INSTALL_PYTHON': sys.executable, 102 'SKIP_ON_MISSING_CONFIG': skip_on_missing_conf, 103 } 104 105 with io.open(hook_path, 'w') as hook_file: 106 contents = resource_text('hook-tmpl') 107 before, rest = contents.split(TEMPLATE_START) 108 to_template, after = rest.split(TEMPLATE_END) 109 110 before = before.replace('#!/usr/bin/env python', shebang()) 111 112 hook_file.write(before + TEMPLATE_START) 113 for line in to_template.splitlines(): 114 var = line.split()[0] 115 hook_file.write('{} = {!r}\n'.format(var, params[var])) 116 hook_file.write(TEMPLATE_END + after) 117 make_executable(hook_path) 118 119 output.write_line('pre-commit installed at {}'.format(hook_path)) 120 121 # If they requested we install all of the hooks, do so. 122 if hooks: 123 install_hooks(config_file, store) 124 125 return 0 126 127 128 def install_hooks(config_file, store): 129 install_hook_envs(all_hooks(load_config(config_file), store), store) 130 131 132 def uninstall(hook_type='pre-commit'): 133 """Uninstall the pre-commit hooks.""" 134 hook_path, legacy_path = _hook_paths(hook_type) 135 136 # If our file doesn't exist or it isn't ours, gtfo. 137 if not os.path.exists(hook_path) or not is_our_script(hook_path): 138 return 0 139 140 os.remove(hook_path) 141 output.write_line('{} uninstalled'.format(hook_type)) 142 143 if os.path.exists(legacy_path): 144 os.rename(legacy_path, hook_path) 145 output.write_line('Restored previous hooks to {}'.format(hook_path)) 146 147 return 0 148 [end of pre_commit/commands/install_uninstall.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pre_commit/commands/install_uninstall.py b/pre_commit/commands/install_uninstall.py --- a/pre_commit/commands/install_uninstall.py +++ b/pre_commit/commands/install_uninstall.py @@ -107,7 +107,7 @@ before, rest = contents.split(TEMPLATE_START) to_template, after = rest.split(TEMPLATE_END) - before = before.replace('#!/usr/bin/env python', shebang()) + before = before.replace('#!/usr/bin/env python3', shebang()) hook_file.write(before + TEMPLATE_START) for line in to_template.splitlines():
{"golden_diff": "diff --git a/pre_commit/commands/install_uninstall.py b/pre_commit/commands/install_uninstall.py\n--- a/pre_commit/commands/install_uninstall.py\n+++ b/pre_commit/commands/install_uninstall.py\n@@ -107,7 +107,7 @@\n before, rest = contents.split(TEMPLATE_START)\n to_template, after = rest.split(TEMPLATE_END)\n \n- before = before.replace('#!/usr/bin/env python', shebang())\n+ before = before.replace('#!/usr/bin/env python3', shebang())\n \n hook_file.write(before + TEMPLATE_START)\n for line in to_template.splitlines():\n", "issue": "Error when building RPM\nWhen building RPM for Fedora i got this:\r\n\r\n`*** ERROR: ambiguous python shebang in /usr/lib/python3.7/site-packages/pre_commit/resources/hook-tmpl: #!/usr/bin/env python. Change it to python3 (or python2) explicitly.`\r\n\r\nWould be great if this fixed.\n", "before_files": [{"content": "from __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport io\nimport itertools\nimport logging\nimport os.path\nimport sys\n\nfrom pre_commit import git\nfrom pre_commit import output\nfrom pre_commit.clientlib import load_config\nfrom pre_commit.repository import all_hooks\nfrom pre_commit.repository import install_hook_envs\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import make_executable\nfrom pre_commit.util import mkdirp\nfrom pre_commit.util import resource_text\n\n\nlogger = logging.getLogger(__name__)\n\n# This is used to identify the hook file we install\nPRIOR_HASHES = (\n '4d9958c90bc262f47553e2c073f14cfe',\n 'd8ee923c46731b42cd95cc869add4062',\n '49fd668cb42069aa1b6048464be5d395',\n '79f09a650522a87b0da915d0d983b2de',\n 'e358c9dae00eac5d06b38dfdb1e33a8c',\n)\nCURRENT_HASH = '138fd403232d2ddd5efb44317e38bf03'\nTEMPLATE_START = '# start templated\\n'\nTEMPLATE_END = '# end templated\\n'\n\n\ndef _hook_paths(hook_type):\n pth = os.path.join(git.get_git_dir(), 'hooks', hook_type)\n return pth, '{}.legacy'.format(pth)\n\n\ndef is_our_script(filename):\n if not os.path.exists(filename): # pragma: windows no cover (symlink)\n return False\n with io.open(filename) as f:\n contents = f.read()\n return any(h in contents for h in (CURRENT_HASH,) + PRIOR_HASHES)\n\n\ndef shebang():\n if sys.platform == 'win32':\n py = 'python'\n else:\n # Homebrew/homebrew-core#35825: be more timid about appropriate `PATH`\n path_choices = [p for p in os.defpath.split(os.pathsep) if p]\n exe_choices = [\n 'python{}'.format('.'.join(str(v) for v in sys.version_info[:i]))\n for i in range(3)\n ]\n for path, exe in itertools.product(path_choices, exe_choices):\n if os.path.exists(os.path.join(path, exe)):\n py = exe\n break\n else:\n py = 'python'\n return '#!/usr/bin/env {}'.format(py)\n\n\ndef install(\n config_file, store,\n overwrite=False, hooks=False, hook_type='pre-commit',\n skip_on_missing_conf=False,\n):\n \"\"\"Install the pre-commit hooks.\"\"\"\n if cmd_output('git', 'config', 'core.hooksPath', retcode=None)[1].strip():\n logger.error(\n 'Cowardly refusing to install hooks with `core.hooksPath` set.\\n'\n 'hint: `git config --unset-all core.hooksPath`',\n )\n return 1\n\n hook_path, legacy_path = _hook_paths(hook_type)\n\n mkdirp(os.path.dirname(hook_path))\n\n # If we have an existing hook, move it to pre-commit.legacy\n if os.path.lexists(hook_path) and not is_our_script(hook_path):\n os.rename(hook_path, legacy_path)\n\n # If we specify overwrite, we simply delete the legacy file\n if overwrite and os.path.exists(legacy_path):\n os.remove(legacy_path)\n elif os.path.exists(legacy_path):\n output.write_line(\n 'Running in migration mode with existing hooks at {}\\n'\n 'Use -f to use only pre-commit.'.format(legacy_path),\n )\n\n params = {\n 'CONFIG': config_file,\n 'HOOK_TYPE': hook_type,\n 'INSTALL_PYTHON': sys.executable,\n 'SKIP_ON_MISSING_CONFIG': skip_on_missing_conf,\n }\n\n with io.open(hook_path, 'w') as hook_file:\n contents = resource_text('hook-tmpl')\n before, rest = contents.split(TEMPLATE_START)\n to_template, after = rest.split(TEMPLATE_END)\n\n before = before.replace('#!/usr/bin/env python', shebang())\n\n hook_file.write(before + TEMPLATE_START)\n for line in to_template.splitlines():\n var = line.split()[0]\n hook_file.write('{} = {!r}\\n'.format(var, params[var]))\n hook_file.write(TEMPLATE_END + after)\n make_executable(hook_path)\n\n output.write_line('pre-commit installed at {}'.format(hook_path))\n\n # If they requested we install all of the hooks, do so.\n if hooks:\n install_hooks(config_file, store)\n\n return 0\n\n\ndef install_hooks(config_file, store):\n install_hook_envs(all_hooks(load_config(config_file), store), store)\n\n\ndef uninstall(hook_type='pre-commit'):\n \"\"\"Uninstall the pre-commit hooks.\"\"\"\n hook_path, legacy_path = _hook_paths(hook_type)\n\n # If our file doesn't exist or it isn't ours, gtfo.\n if not os.path.exists(hook_path) or not is_our_script(hook_path):\n return 0\n\n os.remove(hook_path)\n output.write_line('{} uninstalled'.format(hook_type))\n\n if os.path.exists(legacy_path):\n os.rename(legacy_path, hook_path)\n output.write_line('Restored previous hooks to {}'.format(hook_path))\n\n return 0\n", "path": "pre_commit/commands/install_uninstall.py"}]}
2,200
134
gh_patches_debug_42533
rasdani/github-patches
git_diff
dmlc__dgl-5914
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> On Disk Node & Edge feature & file loader </issue> <code> [start of python/dgl/graphbolt/feature_store.py] 1 """Feature store for GraphBolt.""" 2 import torch 3 4 5 class FeatureStore: 6 r"""Base class for feature store.""" 7 8 def __init__(self): 9 pass 10 11 def read(self, key: str, ids: torch.Tensor = None): 12 """Read a feature from the feature store. 13 14 Parameters 15 ---------- 16 key : str 17 The key that uniquely identifies the feature in the feature store. 18 ids : torch.Tensor, optional 19 The index of the feature. If specified, only the specified indices 20 of the feature are read. If None, the entire feature is returned. 21 22 Returns 23 ------- 24 torch.Tensor 25 The read feature. 26 """ 27 raise NotImplementedError 28 29 def update(self, key: str, value: torch.Tensor, ids: torch.Tensor = None): 30 """Update a feature in the feature store. 31 32 This function is used to update a feature in the feature store. The 33 feature is identified by a unique key, and its value is specified using 34 a tensor. 35 36 Parameters 37 ---------- 38 key : str 39 The key that uniquely identifies the feature in the feature store. 40 value : torch.Tensor 41 The updated value of the feature. 42 ids : torch.Tensor, optional 43 The indices of the feature to update. If specified, only the 44 specified indices of the feature will be updated. For the feature, 45 the `ids[i]` row is updated to `value[i]`. So the indices and value 46 must have the same length. If None, the entire feature will be 47 updated. 48 """ 49 raise NotImplementedError 50 51 52 class InMemoryFeatureStore(FeatureStore): 53 r"""In-memory key-value feature store, where the key is a string and value 54 is Pytorch tensor.""" 55 56 def __init__(self, feature_dict: dict): 57 """Initialize an in-memory feature store. 58 59 The feature store is initialized with a dictionary of tensors, where the 60 key is the name of a feature and the value is the tensor. The value can 61 be multi-dimensional, where the first dimension is the index of the 62 feature. 63 64 Parameters 65 ---------- 66 feature_dict : dict, optional 67 A dictionary of tensors. 68 69 Examples 70 -------- 71 >>> import torch 72 >>> feature_dict = { 73 ... "user": torch.arange(0, 5), 74 ... "item": torch.arange(0, 6), 75 ... "rel": torch.arange(0, 6).view(2, 3), 76 ... } 77 >>> feature_store = InMemoryFeatureStore(feature_dict) 78 >>> feature_store.read("user", torch.tensor([0, 1, 2])) 79 tensor([0, 1, 2]) 80 >>> feature_store.read("item", torch.tensor([0, 1, 2])) 81 tensor([0, 1, 2]) 82 >>> feature_store.read("rel", torch.tensor([0])) 83 tensor([[0, 1, 2]]) 84 >>> feature_store.update("user", 85 ... torch.ones(3, dtype=torch.long), torch.tensor([0, 1, 2])) 86 >>> feature_store.read("user", torch.tensor([0, 1, 2])) 87 tensor([1, 1, 1]) 88 """ 89 super(InMemoryFeatureStore, self).__init__() 90 assert isinstance(feature_dict, dict), ( 91 f"feature_dict in InMemoryFeatureStore must be dict, " 92 f"but got {type(feature_dict)}." 93 ) 94 for k, v in feature_dict.items(): 95 assert isinstance( 96 k, str 97 ), f"Key in InMemoryFeatureStore must be str, but got {k}." 98 assert isinstance(v, torch.Tensor), ( 99 f"Value in InMemoryFeatureStore must be torch.Tensor," 100 f"but got {v}." 101 ) 102 103 self._feature_dict = feature_dict 104 105 def read(self, key: str, ids: torch.Tensor = None): 106 """Read a feature from the feature store by index. 107 108 Parameters 109 ---------- 110 key : str 111 The key of the feature. 112 ids : torch.Tensor, optional 113 The index of the feature. If specified, only the specified indices 114 of the feature are read. If None, the entire feature is returned. 115 116 Returns 117 ------- 118 torch.Tensor 119 The read feature. 120 """ 121 assert ( 122 key in self._feature_dict 123 ), f"key {key} not in {self._feature_dict.keys()}" 124 if ids is None: 125 return self._feature_dict[key] 126 return self._feature_dict[key][ids] 127 128 def update(self, key: str, value: torch.Tensor, ids: torch.Tensor = None): 129 """Update a feature in the feature store. 130 131 This function is used to update a feature in the feature store. The 132 feature is identified by a unique key, and its value is specified using 133 a tensor. 134 135 Parameters 136 ---------- 137 key : str 138 The key that uniquely identifies the feature in the feature store. 139 value : torch.Tensor 140 The updated value of the feature. 141 ids : torch.Tensor, optional 142 The indices of the feature to update. If specified, only the 143 specified indices of the feature will be updated. For the feature, 144 the `ids[i]` row is updated to `value[i]`. So the indices and value 145 must have the same length. If None, the entire feature will be 146 updated. 147 """ 148 assert ( 149 key in self._feature_dict 150 ), f"key {key} not in {self._feature_dict.keys()}" 151 if ids is None: 152 self._feature_dict[key] = value 153 else: 154 assert ids.shape[0] == value.shape[0], ( 155 f"ids and value must have the same length, " 156 f"but got {ids.shape[0]} and {value.shape[0]}." 157 ) 158 self._feature_dict[key][ids] = value 159 [end of python/dgl/graphbolt/feature_store.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/python/dgl/graphbolt/feature_store.py b/python/dgl/graphbolt/feature_store.py --- a/python/dgl/graphbolt/feature_store.py +++ b/python/dgl/graphbolt/feature_store.py @@ -49,18 +49,20 @@ raise NotImplementedError -class InMemoryFeatureStore(FeatureStore): - r"""In-memory key-value feature store, where the key is a string and value - is Pytorch tensor.""" +class TorchBasedFeatureStore(FeatureStore): + r"""Torch based key-value feature store, where the key are strings and + values are Pytorch tensors.""" def __init__(self, feature_dict: dict): - """Initialize an in-memory feature store. + """Initialize a torch based feature store. The feature store is initialized with a dictionary of tensors, where the key is the name of a feature and the value is the tensor. The value can be multi-dimensional, where the first dimension is the index of the feature. + Note that the values can be in memory or on disk. + Parameters ---------- feature_dict : dict, optional @@ -74,7 +76,7 @@ ... "item": torch.arange(0, 6), ... "rel": torch.arange(0, 6).view(2, 3), ... } - >>> feature_store = InMemoryFeatureStore(feature_dict) + >>> feature_store = TorchBasedFeatureStore(feature_dict) >>> feature_store.read("user", torch.tensor([0, 1, 2])) tensor([0, 1, 2]) >>> feature_store.read("item", torch.tensor([0, 1, 2])) @@ -85,18 +87,35 @@ ... torch.ones(3, dtype=torch.long), torch.tensor([0, 1, 2])) >>> feature_store.read("user", torch.tensor([0, 1, 2])) tensor([1, 1, 1]) + + >>> import numpy as np + >>> user = np.arange(0, 5) + >>> item = np.arange(0, 6) + >>> np.save("/tmp/user.npy", user) + >>. np.save("/tmp/item.npy", item) + >>> feature_dict = { + ... "user": torch.as_tensor(np.load("/tmp/user.npy", + ... mmap_mode="r+")), + ... "item": torch.as_tensor(np.load("/tmp/item.npy", + ... mmap_mode="r+")), + ... } + >>> feature_store = TorchBasedFeatureStore(feature_dict) + >>> feature_store.read("user", torch.tensor([0, 1, 2])) + tensor([0, 1, 2]) + >>> feature_store.read("item", torch.tensor([3, 4, 2])) + tensor([3, 4, 2]) """ - super(InMemoryFeatureStore, self).__init__() + super(TorchBasedFeatureStore, self).__init__() assert isinstance(feature_dict, dict), ( - f"feature_dict in InMemoryFeatureStore must be dict, " + f"feature_dict in TorchBasedFeatureStore must be dict, " f"but got {type(feature_dict)}." ) for k, v in feature_dict.items(): assert isinstance( k, str - ), f"Key in InMemoryFeatureStore must be str, but got {k}." + ), f"Key in TorchBasedFeatureStore must be str, but got {k}." assert isinstance(v, torch.Tensor), ( - f"Value in InMemoryFeatureStore must be torch.Tensor," + f"Value in TorchBasedFeatureStore must be torch.Tensor," f"but got {v}." ) @@ -105,6 +124,9 @@ def read(self, key: str, ids: torch.Tensor = None): """Read a feature from the feature store by index. + The returned feature is always in memory, no matter whether the feature + to read is in memory or on disk. + Parameters ---------- key : str
{"golden_diff": "diff --git a/python/dgl/graphbolt/feature_store.py b/python/dgl/graphbolt/feature_store.py\n--- a/python/dgl/graphbolt/feature_store.py\n+++ b/python/dgl/graphbolt/feature_store.py\n@@ -49,18 +49,20 @@\n raise NotImplementedError\n \n \n-class InMemoryFeatureStore(FeatureStore):\n- r\"\"\"In-memory key-value feature store, where the key is a string and value\n- is Pytorch tensor.\"\"\"\n+class TorchBasedFeatureStore(FeatureStore):\n+ r\"\"\"Torch based key-value feature store, where the key are strings and\n+ values are Pytorch tensors.\"\"\"\n \n def __init__(self, feature_dict: dict):\n- \"\"\"Initialize an in-memory feature store.\n+ \"\"\"Initialize a torch based feature store.\n \n The feature store is initialized with a dictionary of tensors, where the\n key is the name of a feature and the value is the tensor. The value can\n be multi-dimensional, where the first dimension is the index of the\n feature.\n \n+ Note that the values can be in memory or on disk.\n+\n Parameters\n ----------\n feature_dict : dict, optional\n@@ -74,7 +76,7 @@\n ... \"item\": torch.arange(0, 6),\n ... \"rel\": torch.arange(0, 6).view(2, 3),\n ... }\n- >>> feature_store = InMemoryFeatureStore(feature_dict)\n+ >>> feature_store = TorchBasedFeatureStore(feature_dict)\n >>> feature_store.read(\"user\", torch.tensor([0, 1, 2]))\n tensor([0, 1, 2])\n >>> feature_store.read(\"item\", torch.tensor([0, 1, 2]))\n@@ -85,18 +87,35 @@\n ... torch.ones(3, dtype=torch.long), torch.tensor([0, 1, 2]))\n >>> feature_store.read(\"user\", torch.tensor([0, 1, 2]))\n tensor([1, 1, 1])\n+\n+ >>> import numpy as np\n+ >>> user = np.arange(0, 5)\n+ >>> item = np.arange(0, 6)\n+ >>> np.save(\"/tmp/user.npy\", user)\n+ >>. np.save(\"/tmp/item.npy\", item)\n+ >>> feature_dict = {\n+ ... \"user\": torch.as_tensor(np.load(\"/tmp/user.npy\",\n+ ... mmap_mode=\"r+\")),\n+ ... \"item\": torch.as_tensor(np.load(\"/tmp/item.npy\",\n+ ... mmap_mode=\"r+\")),\n+ ... }\n+ >>> feature_store = TorchBasedFeatureStore(feature_dict)\n+ >>> feature_store.read(\"user\", torch.tensor([0, 1, 2]))\n+ tensor([0, 1, 2])\n+ >>> feature_store.read(\"item\", torch.tensor([3, 4, 2]))\n+ tensor([3, 4, 2])\n \"\"\"\n- super(InMemoryFeatureStore, self).__init__()\n+ super(TorchBasedFeatureStore, self).__init__()\n assert isinstance(feature_dict, dict), (\n- f\"feature_dict in InMemoryFeatureStore must be dict, \"\n+ f\"feature_dict in TorchBasedFeatureStore must be dict, \"\n f\"but got {type(feature_dict)}.\"\n )\n for k, v in feature_dict.items():\n assert isinstance(\n k, str\n- ), f\"Key in InMemoryFeatureStore must be str, but got {k}.\"\n+ ), f\"Key in TorchBasedFeatureStore must be str, but got {k}.\"\n assert isinstance(v, torch.Tensor), (\n- f\"Value in InMemoryFeatureStore must be torch.Tensor,\"\n+ f\"Value in TorchBasedFeatureStore must be torch.Tensor,\"\n f\"but got {v}.\"\n )\n \n@@ -105,6 +124,9 @@\n def read(self, key: str, ids: torch.Tensor = None):\n \"\"\"Read a feature from the feature store by index.\n \n+ The returned feature is always in memory, no matter whether the feature\n+ to read is in memory or on disk.\n+\n Parameters\n ----------\n key : str\n", "issue": "On Disk Node & Edge feature & file loader\n\n", "before_files": [{"content": "\"\"\"Feature store for GraphBolt.\"\"\"\nimport torch\n\n\nclass FeatureStore:\n r\"\"\"Base class for feature store.\"\"\"\n\n def __init__(self):\n pass\n\n def read(self, key: str, ids: torch.Tensor = None):\n \"\"\"Read a feature from the feature store.\n\n Parameters\n ----------\n key : str\n The key that uniquely identifies the feature in the feature store.\n ids : torch.Tensor, optional\n The index of the feature. If specified, only the specified indices\n of the feature are read. If None, the entire feature is returned.\n\n Returns\n -------\n torch.Tensor\n The read feature.\n \"\"\"\n raise NotImplementedError\n\n def update(self, key: str, value: torch.Tensor, ids: torch.Tensor = None):\n \"\"\"Update a feature in the feature store.\n\n This function is used to update a feature in the feature store. The\n feature is identified by a unique key, and its value is specified using\n a tensor.\n\n Parameters\n ----------\n key : str\n The key that uniquely identifies the feature in the feature store.\n value : torch.Tensor\n The updated value of the feature.\n ids : torch.Tensor, optional\n The indices of the feature to update. If specified, only the\n specified indices of the feature will be updated. For the feature,\n the `ids[i]` row is updated to `value[i]`. So the indices and value\n must have the same length. If None, the entire feature will be\n updated.\n \"\"\"\n raise NotImplementedError\n\n\nclass InMemoryFeatureStore(FeatureStore):\n r\"\"\"In-memory key-value feature store, where the key is a string and value\n is Pytorch tensor.\"\"\"\n\n def __init__(self, feature_dict: dict):\n \"\"\"Initialize an in-memory feature store.\n\n The feature store is initialized with a dictionary of tensors, where the\n key is the name of a feature and the value is the tensor. The value can\n be multi-dimensional, where the first dimension is the index of the\n feature.\n\n Parameters\n ----------\n feature_dict : dict, optional\n A dictionary of tensors.\n\n Examples\n --------\n >>> import torch\n >>> feature_dict = {\n ... \"user\": torch.arange(0, 5),\n ... \"item\": torch.arange(0, 6),\n ... \"rel\": torch.arange(0, 6).view(2, 3),\n ... }\n >>> feature_store = InMemoryFeatureStore(feature_dict)\n >>> feature_store.read(\"user\", torch.tensor([0, 1, 2]))\n tensor([0, 1, 2])\n >>> feature_store.read(\"item\", torch.tensor([0, 1, 2]))\n tensor([0, 1, 2])\n >>> feature_store.read(\"rel\", torch.tensor([0]))\n tensor([[0, 1, 2]])\n >>> feature_store.update(\"user\",\n ... torch.ones(3, dtype=torch.long), torch.tensor([0, 1, 2]))\n >>> feature_store.read(\"user\", torch.tensor([0, 1, 2]))\n tensor([1, 1, 1])\n \"\"\"\n super(InMemoryFeatureStore, self).__init__()\n assert isinstance(feature_dict, dict), (\n f\"feature_dict in InMemoryFeatureStore must be dict, \"\n f\"but got {type(feature_dict)}.\"\n )\n for k, v in feature_dict.items():\n assert isinstance(\n k, str\n ), f\"Key in InMemoryFeatureStore must be str, but got {k}.\"\n assert isinstance(v, torch.Tensor), (\n f\"Value in InMemoryFeatureStore must be torch.Tensor,\"\n f\"but got {v}.\"\n )\n\n self._feature_dict = feature_dict\n\n def read(self, key: str, ids: torch.Tensor = None):\n \"\"\"Read a feature from the feature store by index.\n\n Parameters\n ----------\n key : str\n The key of the feature.\n ids : torch.Tensor, optional\n The index of the feature. If specified, only the specified indices\n of the feature are read. If None, the entire feature is returned.\n\n Returns\n -------\n torch.Tensor\n The read feature.\n \"\"\"\n assert (\n key in self._feature_dict\n ), f\"key {key} not in {self._feature_dict.keys()}\"\n if ids is None:\n return self._feature_dict[key]\n return self._feature_dict[key][ids]\n\n def update(self, key: str, value: torch.Tensor, ids: torch.Tensor = None):\n \"\"\"Update a feature in the feature store.\n\n This function is used to update a feature in the feature store. The\n feature is identified by a unique key, and its value is specified using\n a tensor.\n\n Parameters\n ----------\n key : str\n The key that uniquely identifies the feature in the feature store.\n value : torch.Tensor\n The updated value of the feature.\n ids : torch.Tensor, optional\n The indices of the feature to update. If specified, only the\n specified indices of the feature will be updated. For the feature,\n the `ids[i]` row is updated to `value[i]`. So the indices and value\n must have the same length. If None, the entire feature will be\n updated.\n \"\"\"\n assert (\n key in self._feature_dict\n ), f\"key {key} not in {self._feature_dict.keys()}\"\n if ids is None:\n self._feature_dict[key] = value\n else:\n assert ids.shape[0] == value.shape[0], (\n f\"ids and value must have the same length, \"\n f\"but got {ids.shape[0]} and {value.shape[0]}.\"\n )\n self._feature_dict[key][ids] = value\n", "path": "python/dgl/graphbolt/feature_store.py"}]}
2,202
922
gh_patches_debug_20957
rasdani/github-patches
git_diff
obspy__obspy-1908
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> https FDSN web service end points not working When checking the `InsecureRequestWarning` in #1779, I noticed that those FDSNWS end points we have that go to https addresses don't seem to work.. ```python In [1]: from obspy.clients.fdsn import Client In [2]: client = Client('GFZ') --------------------------------------------------------------------------- FDSNException Traceback (most recent call last) <ipython-input-2-1d7f32e7dd73> in <module>() ----> 1 client = Client('GFZ') /home/megies/git/obspy-master/obspy/clients/fdsn/client.pyc in __init__(self, base_url, major_versions, user, password, user_agent, debug, timeout, service_mappings, force_redirect) 260 print("Request Headers: %s" % str(self.request_headers)) 261 --> 262 self._discover_services() 263 264 def get_events(self, starttime=None, endtime=None, minlatitude=None, /home/megies/git/obspy-master/obspy/clients/fdsn/client.pyc in _discover_services(self) 1479 "be due to a temporary service outage or an invalid FDSN " 1480 "service address." % self.base_url) -> 1481 raise FDSNException(msg) 1482 1483 # Cache. FDSNException: No FDSN services could be discovered at 'https://geofon.gfz-potsdam.de'. This could be due to a temporary service outage or an invalid FDSN service address. In [3]: client = Client('SCEDC') --------------------------------------------------------------------------- FDSNException Traceback (most recent call last) <ipython-input-3-7c19ea139f48> in <module>() ----> 1 client = Client('SCEDC') /home/megies/git/obspy-master/obspy/clients/fdsn/client.pyc in __init__(self, base_url, major_versions, user, password, user_agent, debug, timeout, service_mappings, force_redirect) 260 print("Request Headers: %s" % str(self.request_headers)) 261 --> 262 self._discover_services() 263 264 def get_events(self, starttime=None, endtime=None, minlatitude=None, /home/megies/git/obspy-master/obspy/clients/fdsn/client.pyc in _discover_services(self) 1479 "be due to a temporary service outage or an invalid FDSN " 1480 "service address." % self.base_url) -> 1481 raise FDSNException(msg) 1482 1483 # Cache. FDSNException: No FDSN services could be discovered at 'https://service.scedc.caltech.edu'. This could be due to a temporary service outage or an invalid FDSN service address. In [4]: client = Client('USGS') --------------------------------------------------------------------------- FDSNException Traceback (most recent call last) <ipython-input-4-2d8335176e68> in <module>() ----> 1 client = Client('USGS') /home/megies/git/obspy-master/obspy/clients/fdsn/client.pyc in __init__(self, base_url, major_versions, user, password, user_agent, debug, timeout, service_mappings, force_redirect) 260 print("Request Headers: %s" % str(self.request_headers)) 261 --> 262 self._discover_services() 263 264 def get_events(self, starttime=None, endtime=None, minlatitude=None, /home/megies/git/obspy-master/obspy/clients/fdsn/client.pyc in _discover_services(self) 1479 "be due to a temporary service outage or an invalid FDSN " 1480 "service address." % self.base_url) -> 1481 raise FDSNException(msg) 1482 1483 # Cache. FDSNException: No FDSN services could be discovered at 'https://earthquake.usgs.gov'. This could be due to a temporary service outage or an invalid FDSN service address. ``` </issue> <code> [start of obspy/clients/fdsn/header.py] 1 # -*- coding: utf-8 -*- 2 """ 3 Header files for the FDSN webservice. 4 5 :copyright: 6 The ObsPy Development Team ([email protected]) 7 :license: 8 GNU Lesser General Public License, Version 3 9 (https://www.gnu.org/copyleft/lesser.html) 10 """ 11 from __future__ import (absolute_import, division, print_function, 12 unicode_literals) 13 from future.builtins import * # NOQA 14 from future.utils import PY2 15 16 import platform 17 import sys 18 19 from obspy import UTCDateTime, __version__ 20 21 22 class FDSNException(Exception): 23 def __init__(self, value, server_info=None): 24 if server_info is not None: 25 value = "\n".join([value, "Detailed response of server:", "", 26 server_info]) 27 super(FDSNException, self).__init__(value) 28 29 30 class FDSNRedirectException(FDSNException): 31 pass 32 33 34 class FDSNNoDataException(FDSNException): 35 pass 36 37 38 # A curated list collecting some implementations: 39 # https://www.fdsn.org/webservices/datacenters/ 40 # http://www.orfeus-eu.org/eida/eida_odc.html 41 URL_MAPPINGS = { 42 "BGR": "http://eida.bgr.de", 43 "ETH": "http://eida.ethz.ch", 44 "EMSC": "http://www.seismicportal.eu", 45 "GEONET": "http://service.geonet.org.nz", 46 "GFZ": "https://geofon.gfz-potsdam.de", 47 "ICGC": "http://ws.icgc.cat", 48 "INGV": "http://webservices.rm.ingv.it", 49 "IPGP": "http://eida.ipgp.fr", 50 "IRIS": "http://service.iris.edu", 51 "ISC": "http://isc-mirror.iris.washington.edu", 52 "KOERI": "http://eida.koeri.boun.edu.tr", 53 "LMU": "http://erde.geophysik.uni-muenchen.de", 54 "NCEDC": "http://service.ncedc.org", 55 "NIEP": "http://eida-sc3.infp.ro", 56 "NOA": "http://eida.gein.noa.gr", 57 "ODC": "http://www.orfeus-eu.org", 58 "ORFEUS": "http://www.orfeus-eu.org", 59 "RESIF": "http://ws.resif.fr", 60 "SCEDC": "https://service.scedc.caltech.edu", 61 "TEXNET": "http://rtserve.beg.utexas.edu", 62 "USGS": "https://earthquake.usgs.gov", 63 "USP": "http://sismo.iag.usp.br"} 64 65 FDSNWS = ("dataselect", "event", "station") 66 67 if PY2: 68 platform_ = platform.platform().decode("ascii", "ignore") 69 else: 70 encoding = sys.getdefaultencoding() or "UTF-8" 71 platform_ = platform.platform().encode(encoding).decode("ascii", "ignore") 72 # The default User Agent that will be sent with every request. 73 DEFAULT_USER_AGENT = "ObsPy/%s (%s, Python %s)" % ( 74 __version__, platform_, platform.python_version()) 75 76 77 # The default parameters. Different services can choose to add more. It always 78 # contains the long name first and the short name second. If it has no short 79 # name, it is simply a tuple with only one entry. 80 DEFAULT_DATASELECT_PARAMETERS = [ 81 "starttime", "endtime", "network", "station", "location", "channel"] 82 83 OPTIONAL_DATASELECT_PARAMETERS = [ 84 "quality", "minimumlength", "longestonly"] 85 86 DEFAULT_STATION_PARAMETERS = [ 87 "starttime", "endtime", "network", "station", "location", "channel", 88 "minlatitude", "maxlatitude", "minlongitude", "maxlongitude", "level"] 89 90 OPTIONAL_STATION_PARAMETERS = [ 91 "startbefore", "startafter", "endbefore", "endafter", "latitude", 92 "longitude", "minradius", "maxradius", "includerestricted", 93 "includeavailability", "updatedafter", "matchtimeseries", "format"] 94 95 DEFAULT_EVENT_PARAMETERS = [ 96 "starttime", "endtime", "minlatitude", "maxlatitude", "minlongitude", 97 "maxlongitude", "mindepth", "maxdepth", "minmagnitude", "maxmagnitude", 98 "orderby"] 99 100 OPTIONAL_EVENT_PARAMETERS = [ 101 "latitude", "longitude", "minradius", "maxradius", "magnitudetype", 102 "includeallorigins", "includeallmagnitudes", "includearrivals", "eventid", 103 "limit", "offset", "catalog", "contributor", "updatedafter"] 104 105 DEFAULT_PARAMETERS = { 106 "dataselect": DEFAULT_DATASELECT_PARAMETERS, 107 "event": DEFAULT_EVENT_PARAMETERS, 108 "station": DEFAULT_STATION_PARAMETERS} 109 110 OPTIONAL_PARAMETERS = { 111 "dataselect": OPTIONAL_DATASELECT_PARAMETERS, 112 "event": OPTIONAL_EVENT_PARAMETERS, 113 "station": OPTIONAL_STATION_PARAMETERS} 114 115 PARAMETER_ALIASES = { 116 "net": "network", 117 "sta": "station", 118 "loc": "location", 119 "cha": "channel", 120 "start": "starttime", 121 "end": "endtime", 122 "minlat": "minlatitude", 123 "maxlat": "maxlatitude", 124 "minlon": "minlongitude", 125 "maxlon": "maxlongitude", 126 "lat": "latitude", 127 "lon": "longitude", 128 "minmag": "minmagnitude", 129 "maxmag": "maxmagnitude", 130 "magtype": "magnitudetype", 131 } 132 133 134 # The default types if none are given. If the parameter can not be found in 135 # here and has no specified type, the type will be assumed to be a string. 136 DEFAULT_TYPES = { 137 "starttime": UTCDateTime, 138 "endtime": UTCDateTime, 139 "network": str, 140 "station": str, 141 "location": str, 142 "channel": str, 143 "quality": str, 144 "minimumlength": float, 145 "longestonly": bool, 146 "startbefore": UTCDateTime, 147 "startafter": UTCDateTime, 148 "endbefore": UTCDateTime, 149 "endafter": UTCDateTime, 150 "maxlongitude": float, 151 "minlongitude": float, 152 "longitude": float, 153 "maxlatitude": float, 154 "minlatitude": float, 155 "latitude": float, 156 "maxdepth": float, 157 "mindepth": float, 158 "maxmagnitude": float, 159 "minmagnitude": float, 160 "magnitudetype": str, 161 "maxradius": float, 162 "minradius": float, 163 "level": str, 164 "includerestricted": bool, 165 "includeavailability": bool, 166 "includeallorigins": bool, 167 "includeallmagnitudes": bool, 168 "includearrivals": bool, 169 "matchtimeseries": bool, 170 "eventid": str, 171 "limit": int, 172 "offset": int, 173 "orderby": str, 174 "catalog": str, 175 "contributor": str, 176 "updatedafter": UTCDateTime} 177 178 # This list collects WADL parameters that will not be parsed because they are 179 # not useful for the ObsPy client. 180 # Current the nodata parameter used by IRIS is part of that list. The ObsPy 181 # client relies on the HTTP codes. 182 WADL_PARAMETERS_NOT_TO_BE_PARSED = ["nodata"] 183 [end of obspy/clients/fdsn/header.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/obspy/clients/fdsn/header.py b/obspy/clients/fdsn/header.py --- a/obspy/clients/fdsn/header.py +++ b/obspy/clients/fdsn/header.py @@ -43,7 +43,7 @@ "ETH": "http://eida.ethz.ch", "EMSC": "http://www.seismicportal.eu", "GEONET": "http://service.geonet.org.nz", - "GFZ": "https://geofon.gfz-potsdam.de", + "GFZ": "http://geofon.gfz-potsdam.de", "ICGC": "http://ws.icgc.cat", "INGV": "http://webservices.rm.ingv.it", "IPGP": "http://eida.ipgp.fr", @@ -57,9 +57,9 @@ "ODC": "http://www.orfeus-eu.org", "ORFEUS": "http://www.orfeus-eu.org", "RESIF": "http://ws.resif.fr", - "SCEDC": "https://service.scedc.caltech.edu", + "SCEDC": "http://service.scedc.caltech.edu", "TEXNET": "http://rtserve.beg.utexas.edu", - "USGS": "https://earthquake.usgs.gov", + "USGS": "http://earthquake.usgs.gov", "USP": "http://sismo.iag.usp.br"} FDSNWS = ("dataselect", "event", "station")
{"golden_diff": "diff --git a/obspy/clients/fdsn/header.py b/obspy/clients/fdsn/header.py\n--- a/obspy/clients/fdsn/header.py\n+++ b/obspy/clients/fdsn/header.py\n@@ -43,7 +43,7 @@\n \"ETH\": \"http://eida.ethz.ch\",\n \"EMSC\": \"http://www.seismicportal.eu\",\n \"GEONET\": \"http://service.geonet.org.nz\",\n- \"GFZ\": \"https://geofon.gfz-potsdam.de\",\n+ \"GFZ\": \"http://geofon.gfz-potsdam.de\",\n \"ICGC\": \"http://ws.icgc.cat\",\n \"INGV\": \"http://webservices.rm.ingv.it\",\n \"IPGP\": \"http://eida.ipgp.fr\",\n@@ -57,9 +57,9 @@\n \"ODC\": \"http://www.orfeus-eu.org\",\n \"ORFEUS\": \"http://www.orfeus-eu.org\",\n \"RESIF\": \"http://ws.resif.fr\",\n- \"SCEDC\": \"https://service.scedc.caltech.edu\",\n+ \"SCEDC\": \"http://service.scedc.caltech.edu\",\n \"TEXNET\": \"http://rtserve.beg.utexas.edu\",\n- \"USGS\": \"https://earthquake.usgs.gov\",\n+ \"USGS\": \"http://earthquake.usgs.gov\",\n \"USP\": \"http://sismo.iag.usp.br\"}\n \n FDSNWS = (\"dataselect\", \"event\", \"station\")\n", "issue": "https FDSN web service end points not working\nWhen checking the `InsecureRequestWarning` in #1779, I noticed that those FDSNWS end points we have that go to https addresses don't seem to work..\r\n\r\n```python\r\nIn [1]: from obspy.clients.fdsn import Client\r\n\r\nIn [2]: client = Client('GFZ')\r\n---------------------------------------------------------------------------\r\nFDSNException Traceback (most recent call last)\r\n<ipython-input-2-1d7f32e7dd73> in <module>()\r\n----> 1 client = Client('GFZ')\r\n\r\n/home/megies/git/obspy-master/obspy/clients/fdsn/client.pyc in __init__(self, base_url, major_versions, user, password, user_agent, debug, timeout, service_mappings, force_redirect)\r\n 260 print(\"Request Headers: %s\" % str(self.request_headers))\r\n 261 \r\n--> 262 self._discover_services()\r\n 263 \r\n 264 def get_events(self, starttime=None, endtime=None, minlatitude=None,\r\n\r\n/home/megies/git/obspy-master/obspy/clients/fdsn/client.pyc in _discover_services(self)\r\n 1479 \"be due to a temporary service outage or an invalid FDSN \"\r\n 1480 \"service address.\" % self.base_url)\r\n-> 1481 raise FDSNException(msg)\r\n 1482 \r\n 1483 # Cache.\r\n\r\nFDSNException: No FDSN services could be discovered at 'https://geofon.gfz-potsdam.de'. This could be due to a temporary service outage or an invalid FDSN service address.\r\n\r\nIn [3]: client = Client('SCEDC')\r\n---------------------------------------------------------------------------\r\nFDSNException Traceback (most recent call last)\r\n<ipython-input-3-7c19ea139f48> in <module>()\r\n----> 1 client = Client('SCEDC')\r\n\r\n/home/megies/git/obspy-master/obspy/clients/fdsn/client.pyc in __init__(self, base_url, major_versions, user, password, user_agent, debug, timeout, service_mappings, force_redirect)\r\n 260 print(\"Request Headers: %s\" % str(self.request_headers))\r\n 261 \r\n--> 262 self._discover_services()\r\n 263 \r\n 264 def get_events(self, starttime=None, endtime=None, minlatitude=None,\r\n\r\n/home/megies/git/obspy-master/obspy/clients/fdsn/client.pyc in _discover_services(self)\r\n 1479 \"be due to a temporary service outage or an invalid FDSN \"\r\n 1480 \"service address.\" % self.base_url)\r\n-> 1481 raise FDSNException(msg)\r\n 1482 \r\n 1483 # Cache.\r\n\r\nFDSNException: No FDSN services could be discovered at 'https://service.scedc.caltech.edu'. This could be due to a temporary service outage or an invalid FDSN service address.\r\n\r\nIn [4]: client = Client('USGS')\r\n---------------------------------------------------------------------------\r\nFDSNException Traceback (most recent call last)\r\n<ipython-input-4-2d8335176e68> in <module>()\r\n----> 1 client = Client('USGS')\r\n\r\n/home/megies/git/obspy-master/obspy/clients/fdsn/client.pyc in __init__(self, base_url, major_versions, user, password, user_agent, debug, timeout, service_mappings, force_redirect)\r\n 260 print(\"Request Headers: %s\" % str(self.request_headers))\r\n 261 \r\n--> 262 self._discover_services()\r\n 263 \r\n 264 def get_events(self, starttime=None, endtime=None, minlatitude=None,\r\n\r\n/home/megies/git/obspy-master/obspy/clients/fdsn/client.pyc in _discover_services(self)\r\n 1479 \"be due to a temporary service outage or an invalid FDSN \"\r\n 1480 \"service address.\" % self.base_url)\r\n-> 1481 raise FDSNException(msg)\r\n 1482 \r\n 1483 # Cache.\r\n\r\nFDSNException: No FDSN services could be discovered at 'https://earthquake.usgs.gov'. This could be due to a temporary service outage or an invalid FDSN service address.\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nHeader files for the FDSN webservice.\n\n:copyright:\n The ObsPy Development Team ([email protected])\n:license:\n GNU Lesser General Public License, Version 3\n (https://www.gnu.org/copyleft/lesser.html)\n\"\"\"\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\nfrom future.builtins import * # NOQA\nfrom future.utils import PY2\n\nimport platform\nimport sys\n\nfrom obspy import UTCDateTime, __version__\n\n\nclass FDSNException(Exception):\n def __init__(self, value, server_info=None):\n if server_info is not None:\n value = \"\\n\".join([value, \"Detailed response of server:\", \"\",\n server_info])\n super(FDSNException, self).__init__(value)\n\n\nclass FDSNRedirectException(FDSNException):\n pass\n\n\nclass FDSNNoDataException(FDSNException):\n pass\n\n\n# A curated list collecting some implementations:\n# https://www.fdsn.org/webservices/datacenters/\n# http://www.orfeus-eu.org/eida/eida_odc.html\nURL_MAPPINGS = {\n \"BGR\": \"http://eida.bgr.de\",\n \"ETH\": \"http://eida.ethz.ch\",\n \"EMSC\": \"http://www.seismicportal.eu\",\n \"GEONET\": \"http://service.geonet.org.nz\",\n \"GFZ\": \"https://geofon.gfz-potsdam.de\",\n \"ICGC\": \"http://ws.icgc.cat\",\n \"INGV\": \"http://webservices.rm.ingv.it\",\n \"IPGP\": \"http://eida.ipgp.fr\",\n \"IRIS\": \"http://service.iris.edu\",\n \"ISC\": \"http://isc-mirror.iris.washington.edu\",\n \"KOERI\": \"http://eida.koeri.boun.edu.tr\",\n \"LMU\": \"http://erde.geophysik.uni-muenchen.de\",\n \"NCEDC\": \"http://service.ncedc.org\",\n \"NIEP\": \"http://eida-sc3.infp.ro\",\n \"NOA\": \"http://eida.gein.noa.gr\",\n \"ODC\": \"http://www.orfeus-eu.org\",\n \"ORFEUS\": \"http://www.orfeus-eu.org\",\n \"RESIF\": \"http://ws.resif.fr\",\n \"SCEDC\": \"https://service.scedc.caltech.edu\",\n \"TEXNET\": \"http://rtserve.beg.utexas.edu\",\n \"USGS\": \"https://earthquake.usgs.gov\",\n \"USP\": \"http://sismo.iag.usp.br\"}\n\nFDSNWS = (\"dataselect\", \"event\", \"station\")\n\nif PY2:\n platform_ = platform.platform().decode(\"ascii\", \"ignore\")\nelse:\n encoding = sys.getdefaultencoding() or \"UTF-8\"\n platform_ = platform.platform().encode(encoding).decode(\"ascii\", \"ignore\")\n# The default User Agent that will be sent with every request.\nDEFAULT_USER_AGENT = \"ObsPy/%s (%s, Python %s)\" % (\n __version__, platform_, platform.python_version())\n\n\n# The default parameters. Different services can choose to add more. It always\n# contains the long name first and the short name second. If it has no short\n# name, it is simply a tuple with only one entry.\nDEFAULT_DATASELECT_PARAMETERS = [\n \"starttime\", \"endtime\", \"network\", \"station\", \"location\", \"channel\"]\n\nOPTIONAL_DATASELECT_PARAMETERS = [\n \"quality\", \"minimumlength\", \"longestonly\"]\n\nDEFAULT_STATION_PARAMETERS = [\n \"starttime\", \"endtime\", \"network\", \"station\", \"location\", \"channel\",\n \"minlatitude\", \"maxlatitude\", \"minlongitude\", \"maxlongitude\", \"level\"]\n\nOPTIONAL_STATION_PARAMETERS = [\n \"startbefore\", \"startafter\", \"endbefore\", \"endafter\", \"latitude\",\n \"longitude\", \"minradius\", \"maxradius\", \"includerestricted\",\n \"includeavailability\", \"updatedafter\", \"matchtimeseries\", \"format\"]\n\nDEFAULT_EVENT_PARAMETERS = [\n \"starttime\", \"endtime\", \"minlatitude\", \"maxlatitude\", \"minlongitude\",\n \"maxlongitude\", \"mindepth\", \"maxdepth\", \"minmagnitude\", \"maxmagnitude\",\n \"orderby\"]\n\nOPTIONAL_EVENT_PARAMETERS = [\n \"latitude\", \"longitude\", \"minradius\", \"maxradius\", \"magnitudetype\",\n \"includeallorigins\", \"includeallmagnitudes\", \"includearrivals\", \"eventid\",\n \"limit\", \"offset\", \"catalog\", \"contributor\", \"updatedafter\"]\n\nDEFAULT_PARAMETERS = {\n \"dataselect\": DEFAULT_DATASELECT_PARAMETERS,\n \"event\": DEFAULT_EVENT_PARAMETERS,\n \"station\": DEFAULT_STATION_PARAMETERS}\n\nOPTIONAL_PARAMETERS = {\n \"dataselect\": OPTIONAL_DATASELECT_PARAMETERS,\n \"event\": OPTIONAL_EVENT_PARAMETERS,\n \"station\": OPTIONAL_STATION_PARAMETERS}\n\nPARAMETER_ALIASES = {\n \"net\": \"network\",\n \"sta\": \"station\",\n \"loc\": \"location\",\n \"cha\": \"channel\",\n \"start\": \"starttime\",\n \"end\": \"endtime\",\n \"minlat\": \"minlatitude\",\n \"maxlat\": \"maxlatitude\",\n \"minlon\": \"minlongitude\",\n \"maxlon\": \"maxlongitude\",\n \"lat\": \"latitude\",\n \"lon\": \"longitude\",\n \"minmag\": \"minmagnitude\",\n \"maxmag\": \"maxmagnitude\",\n \"magtype\": \"magnitudetype\",\n}\n\n\n# The default types if none are given. If the parameter can not be found in\n# here and has no specified type, the type will be assumed to be a string.\nDEFAULT_TYPES = {\n \"starttime\": UTCDateTime,\n \"endtime\": UTCDateTime,\n \"network\": str,\n \"station\": str,\n \"location\": str,\n \"channel\": str,\n \"quality\": str,\n \"minimumlength\": float,\n \"longestonly\": bool,\n \"startbefore\": UTCDateTime,\n \"startafter\": UTCDateTime,\n \"endbefore\": UTCDateTime,\n \"endafter\": UTCDateTime,\n \"maxlongitude\": float,\n \"minlongitude\": float,\n \"longitude\": float,\n \"maxlatitude\": float,\n \"minlatitude\": float,\n \"latitude\": float,\n \"maxdepth\": float,\n \"mindepth\": float,\n \"maxmagnitude\": float,\n \"minmagnitude\": float,\n \"magnitudetype\": str,\n \"maxradius\": float,\n \"minradius\": float,\n \"level\": str,\n \"includerestricted\": bool,\n \"includeavailability\": bool,\n \"includeallorigins\": bool,\n \"includeallmagnitudes\": bool,\n \"includearrivals\": bool,\n \"matchtimeseries\": bool,\n \"eventid\": str,\n \"limit\": int,\n \"offset\": int,\n \"orderby\": str,\n \"catalog\": str,\n \"contributor\": str,\n \"updatedafter\": UTCDateTime}\n\n# This list collects WADL parameters that will not be parsed because they are\n# not useful for the ObsPy client.\n# Current the nodata parameter used by IRIS is part of that list. The ObsPy\n# client relies on the HTTP codes.\nWADL_PARAMETERS_NOT_TO_BE_PARSED = [\"nodata\"]\n", "path": "obspy/clients/fdsn/header.py"}]}
3,620
356
gh_patches_debug_40293
rasdani/github-patches
git_diff
beetbox__beets-1295
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> info: Select specific fields Right now beet info spews all tags, but it should have an option for pulling a single key/value, like so: > $ beet info -k acoustid_id "05 Le Freak.aiff" > 616e1881-cb7e-4d9f-9988-fa2b546c7085 This is useful if you wanted to do a multi-step operation cleanly from the command line. Here is the bash script I am using to get around this right now: > # !/usr/bin/env bash > # > > FILE=$1 > > if [ -z "$FILE" ]; then > echo "need filename" > exit 0 > fi > > beet info "$FILE" | grep acoustid_id | sed -e 's/^ *acoustid_id: //g' </issue> <code> [start of beetsplug/info.py] 1 # This file is part of beets. 2 # Copyright 2015, Adrian Sampson. 3 # 4 # Permission is hereby granted, free of charge, to any person obtaining 5 # a copy of this software and associated documentation files (the 6 # "Software"), to deal in the Software without restriction, including 7 # without limitation the rights to use, copy, modify, merge, publish, 8 # distribute, sublicense, and/or sell copies of the Software, and to 9 # permit persons to whom the Software is furnished to do so, subject to 10 # the following conditions: 11 # 12 # The above copyright notice and this permission notice shall be 13 # included in all copies or substantial portions of the Software. 14 15 """Shows file metadata. 16 """ 17 18 from __future__ import (division, absolute_import, print_function, 19 unicode_literals) 20 21 import os 22 23 from beets.plugins import BeetsPlugin 24 from beets import ui 25 from beets import mediafile 26 from beets.util import displayable_path, normpath, syspath 27 28 29 def tag_data(lib, args): 30 query = [] 31 for arg in args: 32 path = normpath(arg) 33 if os.path.isfile(syspath(path)): 34 yield tag_data_emitter(path) 35 else: 36 query.append(arg) 37 38 if query: 39 for item in lib.items(query): 40 yield tag_data_emitter(item.path) 41 42 43 def tag_data_emitter(path): 44 def emitter(): 45 fields = list(mediafile.MediaFile.readable_fields()) 46 fields.remove('images') 47 mf = mediafile.MediaFile(syspath(path)) 48 tags = {} 49 for field in fields: 50 tags[field] = getattr(mf, field) 51 tags['art'] = mf.art is not None 52 tags['path'] = displayable_path(path) 53 return tags 54 return emitter 55 56 57 def library_data(lib, args): 58 for item in lib.items(args): 59 yield library_data_emitter(item) 60 61 62 def library_data_emitter(item): 63 def emitter(): 64 data = dict(item.formatted()) 65 data['path'] = displayable_path(item.path) 66 return data 67 return emitter 68 69 70 def update_summary(summary, tags): 71 for key, value in tags.iteritems(): 72 if key not in summary: 73 summary[key] = value 74 elif summary[key] != value: 75 summary[key] = '[various]' 76 return summary 77 78 79 def print_data(data): 80 path = data.pop('path') 81 formatted = {} 82 for key, value in data.iteritems(): 83 if isinstance(value, list): 84 formatted[key] = u'; '.join(value) 85 if value is not None: 86 formatted[key] = value 87 88 maxwidth = max(len(key) for key in formatted) 89 lineformat = u'{{0:>{0}}}: {{1}}'.format(maxwidth) 90 91 if path: 92 ui.print_(displayable_path(path)) 93 94 for field in sorted(formatted): 95 value = formatted[field] 96 if isinstance(value, list): 97 value = u'; '.join(value) 98 ui.print_(lineformat.format(field, value)) 99 100 101 class InfoPlugin(BeetsPlugin): 102 103 def commands(self): 104 cmd = ui.Subcommand('info', help='show file metadata') 105 cmd.func = self.run 106 cmd.parser.add_option('-l', '--library', action='store_true', 107 help='show library fields instead of tags') 108 cmd.parser.add_option('-s', '--summarize', action='store_true', 109 help='summarize the tags of all files') 110 return [cmd] 111 112 def run(self, lib, opts, args): 113 """Print tag info or library data for each file referenced by args. 114 115 Main entry point for the `beet info ARGS...` command. 116 117 If an argument is a path pointing to an existing file, then the tags 118 of that file are printed. All other arguments are considered 119 queries, and for each item matching all those queries the tags from 120 the file are printed. 121 122 If `opts.summarize` is true, the function merges all tags into one 123 dictionary and only prints that. If two files have different values 124 for the same tag, the value is set to '[various]' 125 """ 126 if opts.library: 127 data_collector = library_data 128 else: 129 data_collector = tag_data 130 131 first = True 132 summary = {} 133 for data_emitter in data_collector(lib, ui.decargs(args)): 134 try: 135 data = data_emitter() 136 except mediafile.UnreadableFileError as ex: 137 self._log.error(u'cannot read file: {0}', ex) 138 continue 139 140 if opts.summarize: 141 update_summary(summary, data) 142 else: 143 if not first: 144 ui.print_() 145 print_data(data) 146 first = False 147 148 if opts.summarize: 149 print_data(summary) 150 [end of beetsplug/info.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/beetsplug/info.py b/beetsplug/info.py --- a/beetsplug/info.py +++ b/beetsplug/info.py @@ -19,6 +19,7 @@ unicode_literals) import os +import re from beets.plugins import BeetsPlugin from beets import ui @@ -77,7 +78,7 @@ def print_data(data): - path = data.pop('path') + path = data.pop('path', None) formatted = {} for key, value in data.iteritems(): if isinstance(value, list): @@ -85,6 +86,9 @@ if value is not None: formatted[key] = value + if len(formatted) == 0: + return + maxwidth = max(len(key) for key in formatted) lineformat = u'{{0:>{0}}}: {{1}}'.format(maxwidth) @@ -107,6 +111,9 @@ help='show library fields instead of tags') cmd.parser.add_option('-s', '--summarize', action='store_true', help='summarize the tags of all files') + cmd.parser.add_option('-i', '--include-keys', default=[], + action='append', dest='included_keys', + help='comma separated list of keys to show') return [cmd] def run(self, lib, opts, args): @@ -128,6 +135,11 @@ else: data_collector = tag_data + included_keys = [] + for keys in opts.included_keys: + included_keys.extend(keys.split(',')) + key_filter = make_key_filter(included_keys) + first = True summary = {} for data_emitter in data_collector(lib, ui.decargs(args)): @@ -137,6 +149,9 @@ self._log.error(u'cannot read file: {0}', ex) continue + path = data.get('path') + data = key_filter(data) + data['path'] = path # always show path if opts.summarize: update_summary(summary, data) else: @@ -147,3 +162,33 @@ if opts.summarize: print_data(summary) + + +def make_key_filter(include): + """Return a function that filters a dictionary. + + The returned filter takes a dictionary and returns another + dictionary that only includes the key-value pairs where the key + glob-matches one of the keys in `include`. + """ + if not include: + return identity + + matchers = [] + for key in include: + key = re.escape(key) + key = key.replace(r'\*', '.*') + matchers.append(re.compile(key + '$')) + + def filter(data): + filtered = dict() + for key, value in data.items(): + if any(map(lambda m: m.match(key), matchers)): + filtered[key] = value + return filtered + + return filter + + +def identity(val): + return val
{"golden_diff": "diff --git a/beetsplug/info.py b/beetsplug/info.py\n--- a/beetsplug/info.py\n+++ b/beetsplug/info.py\n@@ -19,6 +19,7 @@\n unicode_literals)\n \n import os\n+import re\n \n from beets.plugins import BeetsPlugin\n from beets import ui\n@@ -77,7 +78,7 @@\n \n \n def print_data(data):\n- path = data.pop('path')\n+ path = data.pop('path', None)\n formatted = {}\n for key, value in data.iteritems():\n if isinstance(value, list):\n@@ -85,6 +86,9 @@\n if value is not None:\n formatted[key] = value\n \n+ if len(formatted) == 0:\n+ return\n+\n maxwidth = max(len(key) for key in formatted)\n lineformat = u'{{0:>{0}}}: {{1}}'.format(maxwidth)\n \n@@ -107,6 +111,9 @@\n help='show library fields instead of tags')\n cmd.parser.add_option('-s', '--summarize', action='store_true',\n help='summarize the tags of all files')\n+ cmd.parser.add_option('-i', '--include-keys', default=[],\n+ action='append', dest='included_keys',\n+ help='comma separated list of keys to show')\n return [cmd]\n \n def run(self, lib, opts, args):\n@@ -128,6 +135,11 @@\n else:\n data_collector = tag_data\n \n+ included_keys = []\n+ for keys in opts.included_keys:\n+ included_keys.extend(keys.split(','))\n+ key_filter = make_key_filter(included_keys)\n+\n first = True\n summary = {}\n for data_emitter in data_collector(lib, ui.decargs(args)):\n@@ -137,6 +149,9 @@\n self._log.error(u'cannot read file: {0}', ex)\n continue\n \n+ path = data.get('path')\n+ data = key_filter(data)\n+ data['path'] = path # always show path\n if opts.summarize:\n update_summary(summary, data)\n else:\n@@ -147,3 +162,33 @@\n \n if opts.summarize:\n print_data(summary)\n+\n+\n+def make_key_filter(include):\n+ \"\"\"Return a function that filters a dictionary.\n+\n+ The returned filter takes a dictionary and returns another\n+ dictionary that only includes the key-value pairs where the key\n+ glob-matches one of the keys in `include`.\n+ \"\"\"\n+ if not include:\n+ return identity\n+\n+ matchers = []\n+ for key in include:\n+ key = re.escape(key)\n+ key = key.replace(r'\\*', '.*')\n+ matchers.append(re.compile(key + '$'))\n+\n+ def filter(data):\n+ filtered = dict()\n+ for key, value in data.items():\n+ if any(map(lambda m: m.match(key), matchers)):\n+ filtered[key] = value\n+ return filtered\n+\n+ return filter\n+\n+\n+def identity(val):\n+ return val\n", "issue": "info: Select specific fields\nRight now beet info spews all tags, but it should have an option for pulling a single key/value, like so:\n\n> $ beet info -k acoustid_id \"05 Le Freak.aiff\"\n> 616e1881-cb7e-4d9f-9988-fa2b546c7085\n\nThis is useful if you wanted to do a multi-step operation cleanly from the command line.\n\nHere is the bash script I am using to get around this right now:\n\n> # !/usr/bin/env bash\n> # \n> \n> FILE=$1\n> \n> if [ -z \"$FILE\" ]; then\n> echo \"need filename\"\n> exit 0\n> fi\n> \n> beet info \"$FILE\" | grep acoustid_id | sed -e 's/^ *acoustid_id: //g' \n\n", "before_files": [{"content": "# This file is part of beets.\n# Copyright 2015, Adrian Sampson.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Shows file metadata.\n\"\"\"\n\nfrom __future__ import (division, absolute_import, print_function,\n unicode_literals)\n\nimport os\n\nfrom beets.plugins import BeetsPlugin\nfrom beets import ui\nfrom beets import mediafile\nfrom beets.util import displayable_path, normpath, syspath\n\n\ndef tag_data(lib, args):\n query = []\n for arg in args:\n path = normpath(arg)\n if os.path.isfile(syspath(path)):\n yield tag_data_emitter(path)\n else:\n query.append(arg)\n\n if query:\n for item in lib.items(query):\n yield tag_data_emitter(item.path)\n\n\ndef tag_data_emitter(path):\n def emitter():\n fields = list(mediafile.MediaFile.readable_fields())\n fields.remove('images')\n mf = mediafile.MediaFile(syspath(path))\n tags = {}\n for field in fields:\n tags[field] = getattr(mf, field)\n tags['art'] = mf.art is not None\n tags['path'] = displayable_path(path)\n return tags\n return emitter\n\n\ndef library_data(lib, args):\n for item in lib.items(args):\n yield library_data_emitter(item)\n\n\ndef library_data_emitter(item):\n def emitter():\n data = dict(item.formatted())\n data['path'] = displayable_path(item.path)\n return data\n return emitter\n\n\ndef update_summary(summary, tags):\n for key, value in tags.iteritems():\n if key not in summary:\n summary[key] = value\n elif summary[key] != value:\n summary[key] = '[various]'\n return summary\n\n\ndef print_data(data):\n path = data.pop('path')\n formatted = {}\n for key, value in data.iteritems():\n if isinstance(value, list):\n formatted[key] = u'; '.join(value)\n if value is not None:\n formatted[key] = value\n\n maxwidth = max(len(key) for key in formatted)\n lineformat = u'{{0:>{0}}}: {{1}}'.format(maxwidth)\n\n if path:\n ui.print_(displayable_path(path))\n\n for field in sorted(formatted):\n value = formatted[field]\n if isinstance(value, list):\n value = u'; '.join(value)\n ui.print_(lineformat.format(field, value))\n\n\nclass InfoPlugin(BeetsPlugin):\n\n def commands(self):\n cmd = ui.Subcommand('info', help='show file metadata')\n cmd.func = self.run\n cmd.parser.add_option('-l', '--library', action='store_true',\n help='show library fields instead of tags')\n cmd.parser.add_option('-s', '--summarize', action='store_true',\n help='summarize the tags of all files')\n return [cmd]\n\n def run(self, lib, opts, args):\n \"\"\"Print tag info or library data for each file referenced by args.\n\n Main entry point for the `beet info ARGS...` command.\n\n If an argument is a path pointing to an existing file, then the tags\n of that file are printed. All other arguments are considered\n queries, and for each item matching all those queries the tags from\n the file are printed.\n\n If `opts.summarize` is true, the function merges all tags into one\n dictionary and only prints that. If two files have different values\n for the same tag, the value is set to '[various]'\n \"\"\"\n if opts.library:\n data_collector = library_data\n else:\n data_collector = tag_data\n\n first = True\n summary = {}\n for data_emitter in data_collector(lib, ui.decargs(args)):\n try:\n data = data_emitter()\n except mediafile.UnreadableFileError as ex:\n self._log.error(u'cannot read file: {0}', ex)\n continue\n\n if opts.summarize:\n update_summary(summary, data)\n else:\n if not first:\n ui.print_()\n print_data(data)\n first = False\n\n if opts.summarize:\n print_data(summary)\n", "path": "beetsplug/info.py"}]}
2,107
702
gh_patches_debug_32468
rasdani/github-patches
git_diff
alltheplaces__alltheplaces-1102
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Pizza Ranch Spider #29 is not collecting all locations, it's only capturing the 1st page on each state list. </issue> <code> [start of locations/spiders/pizzaranch.py] 1 import scrapy 2 import re 3 from locations.items import GeojsonPointItem 4 import json 5 class CVSSpider(scrapy.Spider): 6 7 name = "pizzaranch" 8 allowed_domains = ["pizzaranch.com"] 9 download_delay = 0.5 10 start_urls = ( 11 'https://pizzaranch.com/locations', 12 ) 13 def parse_times(self, times): 14 if times.strip() == 'Open 24 hours': 15 return '24/7' 16 hours_to = [x.strip() for x in times.split('-')] 17 cleaned_times = [] 18 19 for hour in hours_to: 20 if re.search('PM$', hour): 21 hour = re.sub('PM', '', hour).strip() 22 hour_min = hour.split(":") 23 if int(hour_min[0]) < 12: 24 hour_min[0] = str(12 + int(hour_min[0])) 25 cleaned_times.append(":".join(hour_min)) 26 27 if re.search('AM$', hour): 28 hour = re.sub('AM', '', hour).strip() 29 hour_min = hour.split(":") 30 if len(hour_min[0]) <2: 31 hour_min[0] = hour_min[0].zfill(2) 32 else: 33 hour_min[0] = str(int(hour_min[0])) 34 35 cleaned_times.append(":".join(hour_min)) 36 return "-".join(cleaned_times) 37 38 def parse_hours(self, lis): 39 hours = [] 40 for li in lis: 41 day = li.xpath('normalize-space(.//td[@class="c-location-hours-details-row-day"]/text())').extract_first()[:2] 42 times = li.xpath('.//td[@class="c-location-hours-details-row-intervals"]/span/span/text()').extract() 43 times = "".join(x for x in times) 44 if times and day: 45 parsed_time = self.parse_times(times) 46 hours.append(day + ' ' + parsed_time) 47 48 return "; ".join(hours) 49 50 def parse_stores(self, response): 51 map_data = response.xpath('normalize-space(//script[@id="js-map-config-dir-map-nap-map"]/text())').extract_first() 52 map_json = json.loads(map_data) 53 properties = { 54 'addr_full': response.xpath('normalize-space(//span[@itemprop="streetAddress"]/span/text())').extract_first(), 55 'phone': response.xpath('normalize-space(//div[@class="c-phone-number c-phone-main-number"]/span[@class="c-phone-number-span c-phone-main-number-span"]/text())').extract_first(), 56 'city': response.xpath('normalize-space(//span[@itemprop="addressLocality"]/text())').extract_first(), 57 'state': response.xpath('normalize-space(//abbr[@itemprop="addressRegion"]/text())').extract_first(), 58 'postcode': response.xpath('normalize-space(//span[@itemprop="postalCode"]/text())').extract_first(), 59 'ref': map_json['locs'][0]['id'], 60 'website': response.url, 61 'lat': float( map_json['locs'][0]['latitude']), 62 'lon': float( map_json['locs'][0]['longitude']), 63 } 64 hours = self.parse_hours(response.xpath('//div[@class="hours-primary hidden-xs"]/div[@class="c-location-hours"]/div[@class="c-location-hours-details-wrapper js-location-hours"]/table/tbody/tr')) 65 66 if hours: 67 properties['opening_hours'] = hours 68 69 yield GeojsonPointItem(**properties) 70 71 def parse_state_stores(self, response): 72 stores = response.xpath('//h3[@class="title"]/a/@href').extract() 73 for store in stores: 74 yield scrapy.Request(response.urljoin(store), callback=self.parse_stores) 75 76 def parse(self, response): 77 urls = response.xpath('//ol[@class="state-list"]/li/a/@href').extract() 78 for path in urls: 79 yield scrapy.Request(response.urljoin(path), callback=self.parse_state_stores) 80 [end of locations/spiders/pizzaranch.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/locations/spiders/pizzaranch.py b/locations/spiders/pizzaranch.py --- a/locations/spiders/pizzaranch.py +++ b/locations/spiders/pizzaranch.py @@ -51,6 +51,7 @@ map_data = response.xpath('normalize-space(//script[@id="js-map-config-dir-map-nap-map"]/text())').extract_first() map_json = json.loads(map_data) properties = { + 'name': response.xpath('//span[@class="location-name-geo"]/text()').extract_first(), 'addr_full': response.xpath('normalize-space(//span[@itemprop="streetAddress"]/span/text())').extract_first(), 'phone': response.xpath('normalize-space(//div[@class="c-phone-number c-phone-main-number"]/span[@class="c-phone-number-span c-phone-main-number-span"]/text())').extract_first(), 'city': response.xpath('normalize-space(//span[@itemprop="addressLocality"]/text())').extract_first(), @@ -68,11 +69,17 @@ yield GeojsonPointItem(**properties) + def parse_state_stores(self, response): stores = response.xpath('//h3[@class="title"]/a/@href').extract() for store in stores: yield scrapy.Request(response.urljoin(store), callback=self.parse_stores) + next_page_url = response.xpath('//div[@class="pagination"]//li[@class="next"]/a/@href').extract_first() + + if next_page_url: + yield scrapy.Request(next_page_url, callback=self.parse_state_stores) + def parse(self, response): urls = response.xpath('//ol[@class="state-list"]/li/a/@href').extract() for path in urls:
{"golden_diff": "diff --git a/locations/spiders/pizzaranch.py b/locations/spiders/pizzaranch.py\n--- a/locations/spiders/pizzaranch.py\n+++ b/locations/spiders/pizzaranch.py\n@@ -51,6 +51,7 @@\n map_data = response.xpath('normalize-space(//script[@id=\"js-map-config-dir-map-nap-map\"]/text())').extract_first()\n map_json = json.loads(map_data)\n properties = {\n+ 'name': response.xpath('//span[@class=\"location-name-geo\"]/text()').extract_first(),\n 'addr_full': response.xpath('normalize-space(//span[@itemprop=\"streetAddress\"]/span/text())').extract_first(),\n 'phone': response.xpath('normalize-space(//div[@class=\"c-phone-number c-phone-main-number\"]/span[@class=\"c-phone-number-span c-phone-main-number-span\"]/text())').extract_first(),\n 'city': response.xpath('normalize-space(//span[@itemprop=\"addressLocality\"]/text())').extract_first(),\n@@ -68,11 +69,17 @@\n \n yield GeojsonPointItem(**properties)\n \n+\n def parse_state_stores(self, response):\n stores = response.xpath('//h3[@class=\"title\"]/a/@href').extract()\n for store in stores:\n yield scrapy.Request(response.urljoin(store), callback=self.parse_stores)\n \n+ next_page_url = response.xpath('//div[@class=\"pagination\"]//li[@class=\"next\"]/a/@href').extract_first()\n+\n+ if next_page_url:\n+ yield scrapy.Request(next_page_url, callback=self.parse_state_stores)\n+\n def parse(self, response):\n urls = response.xpath('//ol[@class=\"state-list\"]/li/a/@href').extract()\n for path in urls:\n", "issue": "Pizza Ranch \nSpider #29 is not collecting all locations, it's only capturing the 1st page on each state list.\r\n\n", "before_files": [{"content": "import scrapy\nimport re\nfrom locations.items import GeojsonPointItem\nimport json\nclass CVSSpider(scrapy.Spider):\n\n name = \"pizzaranch\"\n allowed_domains = [\"pizzaranch.com\"]\n download_delay = 0.5\n start_urls = (\n 'https://pizzaranch.com/locations',\n )\n def parse_times(self, times):\n if times.strip() == 'Open 24 hours':\n return '24/7'\n hours_to = [x.strip() for x in times.split('-')]\n cleaned_times = []\n\n for hour in hours_to:\n if re.search('PM$', hour):\n hour = re.sub('PM', '', hour).strip()\n hour_min = hour.split(\":\")\n if int(hour_min[0]) < 12:\n hour_min[0] = str(12 + int(hour_min[0]))\n cleaned_times.append(\":\".join(hour_min))\n\n if re.search('AM$', hour):\n hour = re.sub('AM', '', hour).strip()\n hour_min = hour.split(\":\")\n if len(hour_min[0]) <2:\n hour_min[0] = hour_min[0].zfill(2)\n else:\n hour_min[0] = str(int(hour_min[0]))\n\n cleaned_times.append(\":\".join(hour_min))\n return \"-\".join(cleaned_times)\n\n def parse_hours(self, lis):\n hours = []\n for li in lis:\n day = li.xpath('normalize-space(.//td[@class=\"c-location-hours-details-row-day\"]/text())').extract_first()[:2]\n times = li.xpath('.//td[@class=\"c-location-hours-details-row-intervals\"]/span/span/text()').extract()\n times = \"\".join(x for x in times)\n if times and day:\n parsed_time = self.parse_times(times)\n hours.append(day + ' ' + parsed_time)\n\n return \"; \".join(hours)\n\n def parse_stores(self, response):\n map_data = response.xpath('normalize-space(//script[@id=\"js-map-config-dir-map-nap-map\"]/text())').extract_first()\n map_json = json.loads(map_data)\n properties = {\n 'addr_full': response.xpath('normalize-space(//span[@itemprop=\"streetAddress\"]/span/text())').extract_first(),\n 'phone': response.xpath('normalize-space(//div[@class=\"c-phone-number c-phone-main-number\"]/span[@class=\"c-phone-number-span c-phone-main-number-span\"]/text())').extract_first(),\n 'city': response.xpath('normalize-space(//span[@itemprop=\"addressLocality\"]/text())').extract_first(),\n 'state': response.xpath('normalize-space(//abbr[@itemprop=\"addressRegion\"]/text())').extract_first(),\n 'postcode': response.xpath('normalize-space(//span[@itemprop=\"postalCode\"]/text())').extract_first(),\n 'ref': map_json['locs'][0]['id'],\n 'website': response.url,\n 'lat': float( map_json['locs'][0]['latitude']),\n 'lon': float( map_json['locs'][0]['longitude']),\n }\n hours = self.parse_hours(response.xpath('//div[@class=\"hours-primary hidden-xs\"]/div[@class=\"c-location-hours\"]/div[@class=\"c-location-hours-details-wrapper js-location-hours\"]/table/tbody/tr'))\n\n if hours:\n properties['opening_hours'] = hours\n\n yield GeojsonPointItem(**properties)\n\n def parse_state_stores(self, response):\n stores = response.xpath('//h3[@class=\"title\"]/a/@href').extract()\n for store in stores:\n yield scrapy.Request(response.urljoin(store), callback=self.parse_stores)\n\n def parse(self, response):\n urls = response.xpath('//ol[@class=\"state-list\"]/li/a/@href').extract()\n for path in urls:\n yield scrapy.Request(response.urljoin(path), callback=self.parse_state_stores)\n", "path": "locations/spiders/pizzaranch.py"}]}
1,562
386
gh_patches_debug_4925
rasdani/github-patches
git_diff
scalableminds__webknossos-libs-324
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> BoundingBox and BoundingBoxNamedTuple should have bidirectional conversion A lot of voxelytics code does nothing but converting these, which could be DRYer by moving the conversion into the API </issue> <code> [start of wkcuber/api/bounding_box.py] 1 # mypy: allow-untyped-defs 2 import json 3 import re 4 from typing import ( 5 Dict, 6 Generator, 7 Iterable, 8 List, 9 Optional, 10 Tuple, 11 Union, 12 NamedTuple, 13 cast, 14 ) 15 16 import numpy as np 17 18 from wkcuber.mag import Mag 19 20 Shape3D = Union[List[int], Tuple[int, int, int], np.ndarray] 21 22 23 class BoundingBoxNamedTuple(NamedTuple): 24 topleft: Tuple[int, int, int] 25 size: Tuple[int, int, int] 26 27 28 class BoundingBox: 29 def __init__(self, topleft: Shape3D, size: Shape3D): 30 31 self.topleft = np.array(topleft, dtype=np.int) 32 self.size = np.array(size, dtype=np.int) 33 34 @property 35 def bottomright(self) -> np.ndarray: 36 37 return self.topleft + self.size 38 39 @staticmethod 40 def from_wkw(bbox: Dict) -> "BoundingBox": 41 return BoundingBox( 42 bbox["topLeft"], [bbox["width"], bbox["height"], bbox["depth"]] 43 ) 44 45 @staticmethod 46 def from_config(bbox: Dict) -> "BoundingBox": 47 return BoundingBox(bbox["topleft"], bbox["size"]) 48 49 @staticmethod 50 def from_tuple6(tuple6: Tuple[int, int, int, int, int, int]) -> "BoundingBox": 51 return BoundingBox(tuple6[0:3], tuple6[3:6]) 52 53 @staticmethod 54 def from_tuple2(tuple2: Tuple[Shape3D, Shape3D]) -> "BoundingBox": 55 return BoundingBox(tuple2[0], tuple2[1]) 56 57 @staticmethod 58 def from_points(points: Iterable[Shape3D]) -> "BoundingBox": 59 60 all_points = np.array(points) 61 topleft = all_points.min(axis=0) 62 bottomright = all_points.max(axis=0) 63 64 # bottomright is exclusive 65 bottomright += 1 66 67 return BoundingBox(topleft, bottomright - topleft) 68 69 @staticmethod 70 def from_named_tuple(bb_named_tuple: BoundingBoxNamedTuple): 71 72 return BoundingBox(bb_named_tuple.topleft, bb_named_tuple.size) 73 74 @staticmethod 75 def from_checkpoint_name(checkpoint_name: str) -> "BoundingBox": 76 """This function extracts a bounding box in the format x_y_z_sx_sy_xz which is contained in a string.""" 77 regex = r"(([0-9]+_){5}([0-9]+))" 78 match = re.search(regex, checkpoint_name) 79 assert ( 80 match is not None 81 ), f"Could not extract bounding box from {checkpoint_name}" 82 bbox_tuple = tuple(int(value) for value in match.group().split("_")) 83 topleft = cast(Tuple[int, int, int], bbox_tuple[:3]) 84 size = cast(Tuple[int, int, int], bbox_tuple[3:6]) 85 return BoundingBox.from_tuple2((topleft, size)) 86 87 @staticmethod 88 def from_csv(csv_bbox: str) -> "BoundingBox": 89 bbox_tuple = tuple(int(x) for x in csv_bbox.split(",")) 90 return BoundingBox.from_tuple6( 91 cast(Tuple[int, int, int, int, int, int], bbox_tuple) 92 ) 93 94 @staticmethod 95 def from_auto(obj) -> "BoundingBox": 96 if isinstance(obj, BoundingBox): 97 return obj 98 elif isinstance(obj, str): 99 if ":" in obj: 100 return BoundingBox.from_auto(json.loads(obj)) 101 else: 102 return BoundingBox.from_csv(obj) 103 elif isinstance(obj, dict): 104 return BoundingBox.from_wkw(obj) 105 elif isinstance(obj, BoundingBoxNamedTuple): 106 return BoundingBox.from_named_tuple(obj) 107 elif isinstance(obj, list) or isinstance(obj, tuple): 108 if len(obj) == 2: 109 return BoundingBox.from_tuple2(obj) # type: ignore 110 elif len(obj) == 6: 111 return BoundingBox.from_tuple6(obj) # type: ignore 112 113 raise Exception("Unknown bounding box format.") 114 115 def as_wkw(self) -> dict: 116 117 width, height, depth = self.size.tolist() 118 119 return { 120 "topLeft": self.topleft.tolist(), 121 "width": width, 122 "height": height, 123 "depth": depth, 124 } 125 126 def as_config(self) -> dict: 127 128 return {"topleft": self.topleft.tolist(), "size": self.size.tolist()} 129 130 def as_checkpoint_name(self) -> str: 131 132 x, y, z = self.topleft 133 width, height, depth = self.size 134 return "{x}_{y}_{z}_{width}_{height}_{depth}".format( 135 x=x, y=y, z=z, width=width, height=height, depth=depth 136 ) 137 138 def as_tuple6(self) -> Tuple[int, int, int, int, int, int]: 139 140 return tuple(self.topleft.tolist() + self.size.tolist()) # type: ignore 141 142 def as_csv(self) -> str: 143 144 return ",".join(map(str, self.as_tuple6())) 145 146 def __repr__(self) -> str: 147 148 return "BoundingBox(topleft={}, size={})".format( 149 str(tuple(self.topleft)), str(tuple(self.size)) 150 ) 151 152 def __str__(self) -> str: 153 154 return self.__repr__() 155 156 def __eq__(self, other) -> bool: 157 158 return np.array_equal(self.topleft, other.topleft) and np.array_equal( 159 self.size, other.size 160 ) 161 162 def padded_with_margins( 163 self, margins_left: Shape3D, margins_right: Optional[Shape3D] = None 164 ) -> "BoundingBox": 165 166 if margins_right is None: 167 margins_right = margins_left 168 169 margins_left = np.array(margins_left) 170 margins_right = np.array(margins_right) 171 172 return BoundingBox( 173 topleft=self.topleft - margins_left, 174 size=self.size + (margins_left + margins_right), 175 ) 176 177 def intersected_with( 178 self, other: "BoundingBox", dont_assert=False 179 ) -> "BoundingBox": 180 """ If dont_assert is set to False, this method may return empty bounding boxes (size == (0, 0, 0)) """ 181 182 topleft = np.maximum(self.topleft, other.topleft) 183 bottomright = np.minimum(self.bottomright, other.bottomright) 184 size = np.maximum(bottomright - topleft, (0, 0, 0)) 185 186 intersection = BoundingBox(topleft, size) 187 188 if not dont_assert: 189 assert ( 190 not intersection.is_empty() 191 ), f"No intersection between bounding boxes {self} and {other}." 192 193 return intersection 194 195 def extended_by(self, other: "BoundingBox") -> "BoundingBox": 196 197 topleft = np.minimum(self.topleft, other.topleft) 198 bottomright = np.maximum(self.bottomright, other.bottomright) 199 size = bottomright - topleft 200 201 return BoundingBox(topleft, size) 202 203 def is_empty(self) -> bool: 204 205 return not all(self.size > 0) 206 207 def in_mag(self, mag: Mag) -> "BoundingBox": 208 209 np_mag = np.array(mag.to_array()) 210 211 assert ( 212 np.count_nonzero(self.topleft % np_mag) == 0 213 ), f"topleft {self.topleft} is not aligned with the mag {mag}. Use BoundingBox.align_with_mag()." 214 assert ( 215 np.count_nonzero(self.bottomright % np_mag) == 0 216 ), f"bottomright {self.bottomright} is not aligned with the mag {mag}. Use BoundingBox.align_with_mag()." 217 218 return BoundingBox( 219 topleft=(self.topleft // np_mag).astype(np.int), 220 size=(self.size // np_mag).astype(np.int), 221 ) 222 223 def align_with_mag(self, mag: Mag, ceil=False): 224 """Rounds the bounding box, so that both topleft and bottomright are divisible by mag. 225 226 :argument ceil: If true, the bounding box is enlarged when necessary. If false, it's shrinked when necessary. 227 """ 228 229 np_mag = np.array(mag.to_array()) 230 231 align = lambda point, round_fn: round_fn(point / np_mag).astype(np.int) * np_mag 232 233 if ceil: 234 topleft = align(self.topleft, np.floor) 235 bottomright = align(self.bottomright, np.ceil) 236 else: 237 topleft = align(self.topleft, np.ceil) 238 bottomright = align(self.bottomright, np.floor) 239 return BoundingBox(topleft, bottomright - topleft) 240 241 def contains(self, coord: Shape3D) -> bool: 242 243 coord = np.array(coord) 244 245 return np.all(coord >= self.topleft) and np.all( 246 coord < self.topleft + self.size 247 ) 248 249 def contains_bbox(self, inner_bbox: "BoundingBox") -> bool: 250 return inner_bbox.intersected_with(self, dont_assert=True) == inner_bbox 251 252 def chunk( 253 self, chunk_size: Shape3D, chunk_border_alignments: Optional[List[int]] = None 254 ) -> Generator["BoundingBox", None, None]: 255 """Decompose the bounding box into smaller chunks of size `chunk_size`. 256 257 Chunks at the border of the bounding box might be smaller than chunk_size. 258 If `chunk_border_alignment` is set, all border coordinates 259 *between two chunks* will be divisible by that value. 260 """ 261 262 start = self.topleft.copy() 263 chunk_size = np.array(chunk_size) 264 265 start_adjust = np.array([0, 0, 0]) 266 if chunk_border_alignments is not None: 267 268 chunk_border_alignments = np.array(chunk_border_alignments) 269 assert np.all( 270 chunk_size % chunk_border_alignments == 0 271 ), f"{chunk_size} not divisible by {chunk_border_alignments}" 272 273 # Move the start to be aligned correctly. This doesn't actually change 274 # the start of the first chunk, because we'll intersect with `self`, 275 # but it'll lead to all chunk borders being aligned correctly. 276 start_adjust = start % chunk_border_alignments 277 278 for x in range( 279 start[0] - start_adjust[0], start[0] + self.size[0], chunk_size[0] 280 ): 281 for y in range( 282 start[1] - start_adjust[1], start[1] + self.size[1], chunk_size[1] 283 ): 284 for z in range( 285 start[2] - start_adjust[2], start[2] + self.size[2], chunk_size[2] 286 ): 287 288 yield BoundingBox([x, y, z], chunk_size).intersected_with(self) 289 290 def volume(self) -> int: 291 292 return self.size.prod() 293 294 def slice_array(self, array: np.ndarray) -> np.ndarray: 295 296 return array[ 297 self.topleft[0] : self.bottomright[0], 298 self.topleft[1] : self.bottomright[1], 299 self.topleft[2] : self.bottomright[2], 300 ] 301 302 def as_slices(self) -> Tuple[slice, slice, slice]: 303 return np.index_exp[ 304 self.topleft[0] : self.bottomright[0], 305 self.topleft[1] : self.bottomright[1], 306 self.topleft[2] : self.bottomright[2], 307 ] 308 309 def copy(self) -> "BoundingBox": 310 311 return BoundingBox(self.topleft.copy(), self.bottomright.copy()) 312 313 def offset(self, vector: Tuple[int, int, int]) -> "BoundingBox": 314 315 return BoundingBox(self.topleft + np.array(vector), self.size.copy()) 316 [end of wkcuber/api/bounding_box.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/wkcuber/api/bounding_box.py b/wkcuber/api/bounding_box.py --- a/wkcuber/api/bounding_box.py +++ b/wkcuber/api/bounding_box.py @@ -143,6 +143,12 @@ return ",".join(map(str, self.as_tuple6())) + def as_named_tuple(self) -> BoundingBoxNamedTuple: + return BoundingBoxNamedTuple( + topleft=cast(Tuple[int, int, int], tuple(self.topleft)), + size=cast(Tuple[int, int, int], tuple(self.size)), + ) + def __repr__(self) -> str: return "BoundingBox(topleft={}, size={})".format(
{"golden_diff": "diff --git a/wkcuber/api/bounding_box.py b/wkcuber/api/bounding_box.py\n--- a/wkcuber/api/bounding_box.py\n+++ b/wkcuber/api/bounding_box.py\n@@ -143,6 +143,12 @@\n \n return \",\".join(map(str, self.as_tuple6()))\n \n+ def as_named_tuple(self) -> BoundingBoxNamedTuple:\n+ return BoundingBoxNamedTuple(\n+ topleft=cast(Tuple[int, int, int], tuple(self.topleft)),\n+ size=cast(Tuple[int, int, int], tuple(self.size)),\n+ )\n+\n def __repr__(self) -> str:\n \n return \"BoundingBox(topleft={}, size={})\".format(\n", "issue": "BoundingBox and BoundingBoxNamedTuple should have bidirectional conversion\nA lot of voxelytics code does nothing but converting these, which could be DRYer by moving the conversion into the API\n", "before_files": [{"content": "# mypy: allow-untyped-defs\nimport json\nimport re\nfrom typing import (\n Dict,\n Generator,\n Iterable,\n List,\n Optional,\n Tuple,\n Union,\n NamedTuple,\n cast,\n)\n\nimport numpy as np\n\nfrom wkcuber.mag import Mag\n\nShape3D = Union[List[int], Tuple[int, int, int], np.ndarray]\n\n\nclass BoundingBoxNamedTuple(NamedTuple):\n topleft: Tuple[int, int, int]\n size: Tuple[int, int, int]\n\n\nclass BoundingBox:\n def __init__(self, topleft: Shape3D, size: Shape3D):\n\n self.topleft = np.array(topleft, dtype=np.int)\n self.size = np.array(size, dtype=np.int)\n\n @property\n def bottomright(self) -> np.ndarray:\n\n return self.topleft + self.size\n\n @staticmethod\n def from_wkw(bbox: Dict) -> \"BoundingBox\":\n return BoundingBox(\n bbox[\"topLeft\"], [bbox[\"width\"], bbox[\"height\"], bbox[\"depth\"]]\n )\n\n @staticmethod\n def from_config(bbox: Dict) -> \"BoundingBox\":\n return BoundingBox(bbox[\"topleft\"], bbox[\"size\"])\n\n @staticmethod\n def from_tuple6(tuple6: Tuple[int, int, int, int, int, int]) -> \"BoundingBox\":\n return BoundingBox(tuple6[0:3], tuple6[3:6])\n\n @staticmethod\n def from_tuple2(tuple2: Tuple[Shape3D, Shape3D]) -> \"BoundingBox\":\n return BoundingBox(tuple2[0], tuple2[1])\n\n @staticmethod\n def from_points(points: Iterable[Shape3D]) -> \"BoundingBox\":\n\n all_points = np.array(points)\n topleft = all_points.min(axis=0)\n bottomright = all_points.max(axis=0)\n\n # bottomright is exclusive\n bottomright += 1\n\n return BoundingBox(topleft, bottomright - topleft)\n\n @staticmethod\n def from_named_tuple(bb_named_tuple: BoundingBoxNamedTuple):\n\n return BoundingBox(bb_named_tuple.topleft, bb_named_tuple.size)\n\n @staticmethod\n def from_checkpoint_name(checkpoint_name: str) -> \"BoundingBox\":\n \"\"\"This function extracts a bounding box in the format x_y_z_sx_sy_xz which is contained in a string.\"\"\"\n regex = r\"(([0-9]+_){5}([0-9]+))\"\n match = re.search(regex, checkpoint_name)\n assert (\n match is not None\n ), f\"Could not extract bounding box from {checkpoint_name}\"\n bbox_tuple = tuple(int(value) for value in match.group().split(\"_\"))\n topleft = cast(Tuple[int, int, int], bbox_tuple[:3])\n size = cast(Tuple[int, int, int], bbox_tuple[3:6])\n return BoundingBox.from_tuple2((topleft, size))\n\n @staticmethod\n def from_csv(csv_bbox: str) -> \"BoundingBox\":\n bbox_tuple = tuple(int(x) for x in csv_bbox.split(\",\"))\n return BoundingBox.from_tuple6(\n cast(Tuple[int, int, int, int, int, int], bbox_tuple)\n )\n\n @staticmethod\n def from_auto(obj) -> \"BoundingBox\":\n if isinstance(obj, BoundingBox):\n return obj\n elif isinstance(obj, str):\n if \":\" in obj:\n return BoundingBox.from_auto(json.loads(obj))\n else:\n return BoundingBox.from_csv(obj)\n elif isinstance(obj, dict):\n return BoundingBox.from_wkw(obj)\n elif isinstance(obj, BoundingBoxNamedTuple):\n return BoundingBox.from_named_tuple(obj)\n elif isinstance(obj, list) or isinstance(obj, tuple):\n if len(obj) == 2:\n return BoundingBox.from_tuple2(obj) # type: ignore\n elif len(obj) == 6:\n return BoundingBox.from_tuple6(obj) # type: ignore\n\n raise Exception(\"Unknown bounding box format.\")\n\n def as_wkw(self) -> dict:\n\n width, height, depth = self.size.tolist()\n\n return {\n \"topLeft\": self.topleft.tolist(),\n \"width\": width,\n \"height\": height,\n \"depth\": depth,\n }\n\n def as_config(self) -> dict:\n\n return {\"topleft\": self.topleft.tolist(), \"size\": self.size.tolist()}\n\n def as_checkpoint_name(self) -> str:\n\n x, y, z = self.topleft\n width, height, depth = self.size\n return \"{x}_{y}_{z}_{width}_{height}_{depth}\".format(\n x=x, y=y, z=z, width=width, height=height, depth=depth\n )\n\n def as_tuple6(self) -> Tuple[int, int, int, int, int, int]:\n\n return tuple(self.topleft.tolist() + self.size.tolist()) # type: ignore\n\n def as_csv(self) -> str:\n\n return \",\".join(map(str, self.as_tuple6()))\n\n def __repr__(self) -> str:\n\n return \"BoundingBox(topleft={}, size={})\".format(\n str(tuple(self.topleft)), str(tuple(self.size))\n )\n\n def __str__(self) -> str:\n\n return self.__repr__()\n\n def __eq__(self, other) -> bool:\n\n return np.array_equal(self.topleft, other.topleft) and np.array_equal(\n self.size, other.size\n )\n\n def padded_with_margins(\n self, margins_left: Shape3D, margins_right: Optional[Shape3D] = None\n ) -> \"BoundingBox\":\n\n if margins_right is None:\n margins_right = margins_left\n\n margins_left = np.array(margins_left)\n margins_right = np.array(margins_right)\n\n return BoundingBox(\n topleft=self.topleft - margins_left,\n size=self.size + (margins_left + margins_right),\n )\n\n def intersected_with(\n self, other: \"BoundingBox\", dont_assert=False\n ) -> \"BoundingBox\":\n \"\"\" If dont_assert is set to False, this method may return empty bounding boxes (size == (0, 0, 0)) \"\"\"\n\n topleft = np.maximum(self.topleft, other.topleft)\n bottomright = np.minimum(self.bottomright, other.bottomright)\n size = np.maximum(bottomright - topleft, (0, 0, 0))\n\n intersection = BoundingBox(topleft, size)\n\n if not dont_assert:\n assert (\n not intersection.is_empty()\n ), f\"No intersection between bounding boxes {self} and {other}.\"\n\n return intersection\n\n def extended_by(self, other: \"BoundingBox\") -> \"BoundingBox\":\n\n topleft = np.minimum(self.topleft, other.topleft)\n bottomright = np.maximum(self.bottomright, other.bottomright)\n size = bottomright - topleft\n\n return BoundingBox(topleft, size)\n\n def is_empty(self) -> bool:\n\n return not all(self.size > 0)\n\n def in_mag(self, mag: Mag) -> \"BoundingBox\":\n\n np_mag = np.array(mag.to_array())\n\n assert (\n np.count_nonzero(self.topleft % np_mag) == 0\n ), f\"topleft {self.topleft} is not aligned with the mag {mag}. Use BoundingBox.align_with_mag().\"\n assert (\n np.count_nonzero(self.bottomright % np_mag) == 0\n ), f\"bottomright {self.bottomright} is not aligned with the mag {mag}. Use BoundingBox.align_with_mag().\"\n\n return BoundingBox(\n topleft=(self.topleft // np_mag).astype(np.int),\n size=(self.size // np_mag).astype(np.int),\n )\n\n def align_with_mag(self, mag: Mag, ceil=False):\n \"\"\"Rounds the bounding box, so that both topleft and bottomright are divisible by mag.\n\n :argument ceil: If true, the bounding box is enlarged when necessary. If false, it's shrinked when necessary.\n \"\"\"\n\n np_mag = np.array(mag.to_array())\n\n align = lambda point, round_fn: round_fn(point / np_mag).astype(np.int) * np_mag\n\n if ceil:\n topleft = align(self.topleft, np.floor)\n bottomright = align(self.bottomright, np.ceil)\n else:\n topleft = align(self.topleft, np.ceil)\n bottomright = align(self.bottomright, np.floor)\n return BoundingBox(topleft, bottomright - topleft)\n\n def contains(self, coord: Shape3D) -> bool:\n\n coord = np.array(coord)\n\n return np.all(coord >= self.topleft) and np.all(\n coord < self.topleft + self.size\n )\n\n def contains_bbox(self, inner_bbox: \"BoundingBox\") -> bool:\n return inner_bbox.intersected_with(self, dont_assert=True) == inner_bbox\n\n def chunk(\n self, chunk_size: Shape3D, chunk_border_alignments: Optional[List[int]] = None\n ) -> Generator[\"BoundingBox\", None, None]:\n \"\"\"Decompose the bounding box into smaller chunks of size `chunk_size`.\n\n Chunks at the border of the bounding box might be smaller than chunk_size.\n If `chunk_border_alignment` is set, all border coordinates\n *between two chunks* will be divisible by that value.\n \"\"\"\n\n start = self.topleft.copy()\n chunk_size = np.array(chunk_size)\n\n start_adjust = np.array([0, 0, 0])\n if chunk_border_alignments is not None:\n\n chunk_border_alignments = np.array(chunk_border_alignments)\n assert np.all(\n chunk_size % chunk_border_alignments == 0\n ), f\"{chunk_size} not divisible by {chunk_border_alignments}\"\n\n # Move the start to be aligned correctly. This doesn't actually change\n # the start of the first chunk, because we'll intersect with `self`,\n # but it'll lead to all chunk borders being aligned correctly.\n start_adjust = start % chunk_border_alignments\n\n for x in range(\n start[0] - start_adjust[0], start[0] + self.size[0], chunk_size[0]\n ):\n for y in range(\n start[1] - start_adjust[1], start[1] + self.size[1], chunk_size[1]\n ):\n for z in range(\n start[2] - start_adjust[2], start[2] + self.size[2], chunk_size[2]\n ):\n\n yield BoundingBox([x, y, z], chunk_size).intersected_with(self)\n\n def volume(self) -> int:\n\n return self.size.prod()\n\n def slice_array(self, array: np.ndarray) -> np.ndarray:\n\n return array[\n self.topleft[0] : self.bottomright[0],\n self.topleft[1] : self.bottomright[1],\n self.topleft[2] : self.bottomright[2],\n ]\n\n def as_slices(self) -> Tuple[slice, slice, slice]:\n return np.index_exp[\n self.topleft[0] : self.bottomright[0],\n self.topleft[1] : self.bottomright[1],\n self.topleft[2] : self.bottomright[2],\n ]\n\n def copy(self) -> \"BoundingBox\":\n\n return BoundingBox(self.topleft.copy(), self.bottomright.copy())\n\n def offset(self, vector: Tuple[int, int, int]) -> \"BoundingBox\":\n\n return BoundingBox(self.topleft + np.array(vector), self.size.copy())\n", "path": "wkcuber/api/bounding_box.py"}]}
3,963
161
gh_patches_debug_11513
rasdani/github-patches
git_diff
microsoft__nni-5135
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Error: Dispatcher stream error, tuner may have crashed. When using PolicyBasedRL to do nas and set concurrency>1 **Hello guys, i am using PolicyBasedRL strategy to do multi trial NAS experiments, when I set concurrency=2, the tuner crased, and concurrency=1 works just fine. Here is the error message**: ``` [2022-09-08 02:09:28] ERROR: 1 Traceback (most recent call last): File "/home/liangkai/miniconda3/envs/camp/lib/python3.8/site-packages/nni/runtime/msg_dispatcher_base.py", line 97, in command_queue_worker self.process_command(command, data) File "/home/liangkai/miniconda3/envs/camp/lib/python3.8/site-packages/nni/runtime/msg_dispatcher_base.py", line 143, in process_command command_handlers[command](data) File "/home/liangkai/miniconda3/envs/camp/lib/python3.8/site-packages/nni/nas/execution/common/integration.py", line 223, in handle_report_metric_data self.invoke_callback('intermediate_metric', data['parameter_id'], self._process_value(data['value'])) File "/home/liangkai/miniconda3/envs/camp/lib/python3.8/site-packages/nni/nas/execution/common/integration.py", line 119, in invoke_callback self.process_queued_callbacks() File "/home/liangkai/miniconda3/envs/camp/lib/python3.8/site-packages/nni/nas/execution/common/integration.py", line 104, in process_queued_callbacks self.intermediate_metric_callback(*call_args) # pylint: disable=not-callable File "/home/liangkai/miniconda3/envs/camp/lib/python3.8/site-packages/nni/nas/execution/pytorch/graph.py", line 130, in _intermediate_metric_callback model = self._running_models[trial_id] KeyError: 1 [2022-09-08 02:09:29] Dispatcher exiting... [2022-09-08 02:09:30] Dispatcher terminiated Error: Dispatcher stream error, tuner may have crashed. at EventEmitter.<anonymous> (/home/liangkai/miniconda3/envs/camp/lib/python3.8/site-packages/nni_node/core/nnimanager.js:647:32) at EventEmitter.emit (node:events:526:28) at WebSocketChannelImpl.handleError (/home/liangkai/miniconda3/envs/camp/lib/python3.8/site-packages/nni_node/core/tuner_command_channel/websocket_channel.js:107:22) at WebSocket.<anonymous> (/home/liangkai/miniconda3/envs/camp/lib/python3.8/site-packages/nni_node/core/tuner_command_channel/websocket_channel.js:41:37) at WebSocket.emit (node:events:538:35) at WebSocket.emitClose (/home/liangkai/miniconda3/envs/camp/lib/python3.8/site-packages/nni_node/node_modules/express-ws/node_modules/ws/lib/websocket.js:246:10) at Socket.socketOnClose (/home/liangkai/miniconda3/envs/camp/lib/python3.8/site-packages/nni_node/node_modules/express-ws/node_modules/ws/lib/websocket.js:1127:15) at Socket.emit (node:events:526:28) at TCP.<anonymous> (node:net:687:12) ``` **Environment**: - NNI version:2.9/2.8 - Training service (local): - Client OS: - Server OS (for remote mode [only):](url) - Python version: 3.8 - PyTorch/TensorFlow version: pytorch 1.21.1 - Is conda/virtualenv/venv used?: conda - Is running in Docker?: no **Configuration**: - Experiment config (remember to remove secrets!): - Search space: **Log message**: - nnimanager.log: [nnimanager.log](https://github.com/microsoft/nni/files/9522943/nnimanager.log) - dispatcher.log: [dispatcher.log](https://github.com/microsoft/nni/files/9522946/dispatcher.log) - nnictl stdout and stderr: <!-- Where can you find the log files: LOG: https://github.com/microsoft/nni/blob/master/docs/en_US/Tutorial/HowToDebug.md#experiment-root-director STDOUT/STDERR: https://nni.readthedocs.io/en/stable/reference/nnictl.html#nnictl-log-stdout --> </issue> <code> [start of nni/nas/strategy/_rl_impl.py] 1 # Copyright (c) Microsoft Corporation. 2 # Licensed under the MIT license. 3 4 # This file might cause import error for those who didn't install RL-related dependencies 5 6 import logging 7 import threading 8 from multiprocessing.pool import ThreadPool 9 from typing import Tuple 10 11 import gym 12 import numpy as np 13 import tianshou 14 import torch 15 import torch.nn as nn 16 import torch.nn.functional as F 17 18 from gym import spaces 19 from tianshou.data import to_torch 20 from tianshou.env.worker import EnvWorker 21 22 from typing_extensions import TypedDict 23 24 from nni.nas.execution import submit_models, wait_models 25 from nni.nas.execution.common import ModelStatus 26 from .utils import get_targeted_model 27 28 29 _logger = logging.getLogger(__name__) 30 _thread_lock = threading.Lock() 31 32 33 class MultiThreadEnvWorker(EnvWorker): 34 def __init__(self, env_fn): 35 self.env = env_fn() 36 self.pool = ThreadPool(processes=1) 37 super().__init__(env_fn) 38 39 def get_env_attr(self, key): 40 return getattr(self.env, key) 41 42 def set_env_attr(self, key, value): 43 return setattr(self.env, key, value) 44 45 def __getattr__(self, key): 46 if tianshou.__version__ >= '0.4.5': # not a strict check here 47 return super().__getattr__(key) # https://github.com/thu-ml/tianshou/pull/478 48 return getattr(self.env, key) 49 50 def reset(self): 51 return self.env.reset() 52 53 def send(self, action): 54 # for tianshou >= 0.4.6 55 if action is None: 56 self.result = self.pool.apply_async(self.env.reset) 57 else: 58 self.send_action(action) 59 60 def recv(self): 61 # for tianshou >= 0.4.6 62 return self.result.get() 63 64 @staticmethod 65 def wait(*args, **kwargs): 66 raise NotImplementedError('Async collect is not supported yet.') 67 68 def send_action(self, action) -> None: 69 # self.result is actually a handle 70 self.result = self.pool.apply_async(self.env.step, (action,)) 71 72 def get_result(self): 73 return self.result.get() 74 75 def seed(self, seed): 76 super().seed(seed) 77 return self.env.seed(seed) 78 79 def render(self, **kwargs): 80 return self.env.render(**kwargs) 81 82 def close_env(self) -> None: 83 self.pool.terminate() 84 return self.env.close() 85 86 class ObservationType(TypedDict): 87 action_history: np.ndarray 88 cur_step: int 89 action_dim: int 90 91 92 class ModelEvaluationEnv(gym.Env[ObservationType, int]): 93 def __init__(self, base_model, mutators, search_space): 94 self.base_model = base_model 95 self.mutators = mutators 96 self.search_space = search_space 97 self.ss_keys = list(self.search_space.keys()) 98 self.action_dim = max(map(lambda v: len(v), self.search_space.values())) 99 self.num_steps = len(self.search_space) 100 101 @property 102 def observation_space(self): 103 return spaces.Dict({ 104 'action_history': spaces.MultiDiscrete([self.action_dim] * self.num_steps), 105 'cur_step': spaces.Discrete(self.num_steps + 1), 106 'action_dim': spaces.Discrete(self.action_dim + 1) 107 }) 108 109 @property 110 def action_space(self): 111 return spaces.Discrete(self.action_dim) 112 113 def reset(self) -> ObservationType: 114 self.action_history = np.zeros(self.num_steps, dtype=np.int32) 115 self.cur_step = 0 116 self.sample = {} 117 return { 118 'action_history': self.action_history, 119 'cur_step': self.cur_step, 120 'action_dim': len(self.search_space[self.ss_keys[self.cur_step]]) 121 } 122 123 def step(self, action: int) -> Tuple[ObservationType, float, bool, dict]: 124 cur_key = self.ss_keys[self.cur_step] 125 assert action < len(self.search_space[cur_key]), \ 126 f'Current action {action} out of range {self.search_space[cur_key]}.' 127 self.action_history[self.cur_step] = action 128 self.sample[cur_key] = self.search_space[cur_key][action] 129 self.cur_step += 1 130 obs: ObservationType = { 131 'action_history': self.action_history, 132 'cur_step': self.cur_step, 133 'action_dim': len(self.search_space[self.ss_keys[self.cur_step]]) \ 134 if self.cur_step < self.num_steps else self.action_dim 135 } 136 if self.cur_step == self.num_steps: 137 with _thread_lock: 138 model = get_targeted_model(self.base_model, self.mutators, self.sample) 139 _logger.info(f'New model created: {self.sample}') 140 submit_models(model) 141 wait_models(model) 142 if model.status == ModelStatus.Failed: 143 return self.reset(), 0., False, {} 144 rew = float(model.metric) # type: ignore 145 _logger.info(f'Model metric received as reward: {rew}') 146 return obs, rew, True, {} 147 else: 148 149 return obs, 0., False, {} 150 151 152 class Preprocessor(nn.Module): 153 def __init__(self, obs_space, hidden_dim=64, num_layers=1): 154 super().__init__() 155 self.action_dim = obs_space['action_history'].nvec[0] 156 self.hidden_dim = hidden_dim 157 # first token is [SOS] 158 self.embedding = nn.Embedding(self.action_dim + 1, hidden_dim) 159 self.rnn = nn.LSTM(hidden_dim, hidden_dim, num_layers, batch_first=True) 160 161 def forward(self, obs): 162 seq = F.pad(obs['action_history'] + 1, (1, 1)) # pad the start token and end token 163 # end token is used to avoid out-of-range of v_s_. Will not actually affect BP. 164 seq = self.embedding(seq.long()) 165 feature, _ = self.rnn(seq) 166 return feature[torch.arange(len(feature), device=feature.device), obs['cur_step'].long() + 1] 167 168 169 class Actor(nn.Module): 170 def __init__(self, action_space, preprocess): 171 super().__init__() 172 self.preprocess = preprocess 173 self.action_dim = action_space.n 174 self.linear = nn.Linear(self.preprocess.hidden_dim, self.action_dim) 175 176 def forward(self, obs, **kwargs): 177 obs = to_torch(obs, device=self.linear.weight.device) 178 out = self.linear(self.preprocess(obs)) 179 # to take care of choices with different number of options 180 mask = torch.arange(self.action_dim).expand(len(out), self.action_dim) >= obs['action_dim'].unsqueeze(1) 181 out[mask.to(out.device)] = float('-inf') 182 return F.softmax(out, dim=-1), kwargs.get('state', None) 183 184 185 class Critic(nn.Module): 186 def __init__(self, preprocess): 187 super().__init__() 188 self.preprocess = preprocess 189 self.linear = nn.Linear(self.preprocess.hidden_dim, 1) 190 191 def forward(self, obs, **kwargs): 192 obs = to_torch(obs, device=self.linear.weight.device) 193 return self.linear(self.preprocess(obs)).squeeze(-1) 194 [end of nni/nas/strategy/_rl_impl.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/nni/nas/strategy/_rl_impl.py b/nni/nas/strategy/_rl_impl.py --- a/nni/nas/strategy/_rl_impl.py +++ b/nni/nas/strategy/_rl_impl.py @@ -136,8 +136,8 @@ if self.cur_step == self.num_steps: with _thread_lock: model = get_targeted_model(self.base_model, self.mutators, self.sample) - _logger.info(f'New model created: {self.sample}') - submit_models(model) + _logger.info(f'New model created: {self.sample}') + submit_models(model) wait_models(model) if model.status == ModelStatus.Failed: return self.reset(), 0., False, {}
{"golden_diff": "diff --git a/nni/nas/strategy/_rl_impl.py b/nni/nas/strategy/_rl_impl.py\n--- a/nni/nas/strategy/_rl_impl.py\n+++ b/nni/nas/strategy/_rl_impl.py\n@@ -136,8 +136,8 @@\n if self.cur_step == self.num_steps:\n with _thread_lock:\n model = get_targeted_model(self.base_model, self.mutators, self.sample)\n- _logger.info(f'New model created: {self.sample}')\n- submit_models(model)\n+ _logger.info(f'New model created: {self.sample}')\n+ submit_models(model)\n wait_models(model)\n if model.status == ModelStatus.Failed:\n return self.reset(), 0., False, {}\n", "issue": "Error: Dispatcher stream error, tuner may have crashed. When using PolicyBasedRL to do nas and set concurrency>1\n**Hello guys, i am using PolicyBasedRL strategy to do multi trial NAS experiments, when I set concurrency=2, the tuner crased, and concurrency=1 works just fine. Here is the error message**:\r\n```\r\n[2022-09-08 02:09:28] ERROR: 1\r\nTraceback (most recent call last):\r\n File \"/home/liangkai/miniconda3/envs/camp/lib/python3.8/site-packages/nni/runtime/msg_dispatcher_base.py\", line 97, in command_queue_worker\r\n self.process_command(command, data)\r\n File \"/home/liangkai/miniconda3/envs/camp/lib/python3.8/site-packages/nni/runtime/msg_dispatcher_base.py\", line 143, in process_command\r\n command_handlers[command](data)\r\n File \"/home/liangkai/miniconda3/envs/camp/lib/python3.8/site-packages/nni/nas/execution/common/integration.py\", line 223, in handle_report_metric_data\r\n self.invoke_callback('intermediate_metric', data['parameter_id'], self._process_value(data['value']))\r\n File \"/home/liangkai/miniconda3/envs/camp/lib/python3.8/site-packages/nni/nas/execution/common/integration.py\", line 119, in invoke_callback\r\n self.process_queued_callbacks()\r\n File \"/home/liangkai/miniconda3/envs/camp/lib/python3.8/site-packages/nni/nas/execution/common/integration.py\", line 104, in process_queued_callbacks\r\n self.intermediate_metric_callback(*call_args) # pylint: disable=not-callable\r\n File \"/home/liangkai/miniconda3/envs/camp/lib/python3.8/site-packages/nni/nas/execution/pytorch/graph.py\", line 130, in _intermediate_metric_callback\r\n model = self._running_models[trial_id]\r\nKeyError: 1\r\n[2022-09-08 02:09:29] Dispatcher exiting...\r\n[2022-09-08 02:09:30] Dispatcher terminiated\r\nError: Dispatcher stream error, tuner may have crashed.\r\n at EventEmitter.<anonymous> (/home/liangkai/miniconda3/envs/camp/lib/python3.8/site-packages/nni_node/core/nnimanager.js:647:32)\r\n at EventEmitter.emit (node:events:526:28)\r\n at WebSocketChannelImpl.handleError (/home/liangkai/miniconda3/envs/camp/lib/python3.8/site-packages/nni_node/core/tuner_command_channel/websocket_channel.js:107:22)\r\n at WebSocket.<anonymous> (/home/liangkai/miniconda3/envs/camp/lib/python3.8/site-packages/nni_node/core/tuner_command_channel/websocket_channel.js:41:37)\r\n at WebSocket.emit (node:events:538:35)\r\n at WebSocket.emitClose (/home/liangkai/miniconda3/envs/camp/lib/python3.8/site-packages/nni_node/node_modules/express-ws/node_modules/ws/lib/websocket.js:246:10)\r\n at Socket.socketOnClose (/home/liangkai/miniconda3/envs/camp/lib/python3.8/site-packages/nni_node/node_modules/express-ws/node_modules/ws/lib/websocket.js:1127:15)\r\n at Socket.emit (node:events:526:28)\r\n at TCP.<anonymous> (node:net:687:12)\r\n```\r\n\r\n\r\n**Environment**:\r\n- NNI version:2.9/2.8\r\n- Training service (local):\r\n- Client OS: \r\n- Server OS (for remote mode [only):](url)\r\n- Python version: 3.8\r\n- PyTorch/TensorFlow version: pytorch 1.21.1\r\n- Is conda/virtualenv/venv used?: conda\r\n- Is running in Docker?: no\r\n\r\n\r\n**Configuration**:\r\n - Experiment config (remember to remove secrets!):\r\n - Search space:\r\n\r\n\r\n**Log message**:\r\n\r\n - nnimanager.log: [nnimanager.log](https://github.com/microsoft/nni/files/9522943/nnimanager.log)\r\n\r\n - dispatcher.log: [dispatcher.log](https://github.com/microsoft/nni/files/9522946/dispatcher.log)\r\n - nnictl stdout and stderr:\r\n \r\n\r\n\r\n<!--\r\nWhere can you find the log files:\r\nLOG: https://github.com/microsoft/nni/blob/master/docs/en_US/Tutorial/HowToDebug.md#experiment-root-director\r\nSTDOUT/STDERR: https://nni.readthedocs.io/en/stable/reference/nnictl.html#nnictl-log-stdout\r\n-->\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT license.\n\n# This file might cause import error for those who didn't install RL-related dependencies\n\nimport logging\nimport threading\nfrom multiprocessing.pool import ThreadPool\nfrom typing import Tuple\n\nimport gym\nimport numpy as np\nimport tianshou\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom gym import spaces\nfrom tianshou.data import to_torch\nfrom tianshou.env.worker import EnvWorker\n\nfrom typing_extensions import TypedDict\n\nfrom nni.nas.execution import submit_models, wait_models\nfrom nni.nas.execution.common import ModelStatus\nfrom .utils import get_targeted_model\n\n\n_logger = logging.getLogger(__name__)\n_thread_lock = threading.Lock()\n\n\nclass MultiThreadEnvWorker(EnvWorker):\n def __init__(self, env_fn):\n self.env = env_fn()\n self.pool = ThreadPool(processes=1)\n super().__init__(env_fn)\n\n def get_env_attr(self, key):\n return getattr(self.env, key)\n\n def set_env_attr(self, key, value):\n return setattr(self.env, key, value)\n\n def __getattr__(self, key):\n if tianshou.__version__ >= '0.4.5': # not a strict check here\n return super().__getattr__(key) # https://github.com/thu-ml/tianshou/pull/478\n return getattr(self.env, key)\n\n def reset(self):\n return self.env.reset()\n\n def send(self, action):\n # for tianshou >= 0.4.6\n if action is None:\n self.result = self.pool.apply_async(self.env.reset)\n else:\n self.send_action(action)\n\n def recv(self):\n # for tianshou >= 0.4.6\n return self.result.get()\n\n @staticmethod\n def wait(*args, **kwargs):\n raise NotImplementedError('Async collect is not supported yet.')\n\n def send_action(self, action) -> None:\n # self.result is actually a handle\n self.result = self.pool.apply_async(self.env.step, (action,))\n\n def get_result(self):\n return self.result.get()\n\n def seed(self, seed):\n super().seed(seed)\n return self.env.seed(seed)\n\n def render(self, **kwargs):\n return self.env.render(**kwargs)\n\n def close_env(self) -> None:\n self.pool.terminate()\n return self.env.close()\n\nclass ObservationType(TypedDict):\n action_history: np.ndarray\n cur_step: int\n action_dim: int\n\n\nclass ModelEvaluationEnv(gym.Env[ObservationType, int]):\n def __init__(self, base_model, mutators, search_space):\n self.base_model = base_model\n self.mutators = mutators\n self.search_space = search_space\n self.ss_keys = list(self.search_space.keys())\n self.action_dim = max(map(lambda v: len(v), self.search_space.values()))\n self.num_steps = len(self.search_space)\n\n @property\n def observation_space(self):\n return spaces.Dict({\n 'action_history': spaces.MultiDiscrete([self.action_dim] * self.num_steps),\n 'cur_step': spaces.Discrete(self.num_steps + 1),\n 'action_dim': spaces.Discrete(self.action_dim + 1)\n })\n\n @property\n def action_space(self):\n return spaces.Discrete(self.action_dim)\n\n def reset(self) -> ObservationType:\n self.action_history = np.zeros(self.num_steps, dtype=np.int32)\n self.cur_step = 0\n self.sample = {}\n return {\n 'action_history': self.action_history,\n 'cur_step': self.cur_step,\n 'action_dim': len(self.search_space[self.ss_keys[self.cur_step]])\n }\n\n def step(self, action: int) -> Tuple[ObservationType, float, bool, dict]:\n cur_key = self.ss_keys[self.cur_step]\n assert action < len(self.search_space[cur_key]), \\\n f'Current action {action} out of range {self.search_space[cur_key]}.'\n self.action_history[self.cur_step] = action\n self.sample[cur_key] = self.search_space[cur_key][action]\n self.cur_step += 1\n obs: ObservationType = {\n 'action_history': self.action_history,\n 'cur_step': self.cur_step,\n 'action_dim': len(self.search_space[self.ss_keys[self.cur_step]]) \\\n if self.cur_step < self.num_steps else self.action_dim\n }\n if self.cur_step == self.num_steps:\n with _thread_lock:\n model = get_targeted_model(self.base_model, self.mutators, self.sample)\n _logger.info(f'New model created: {self.sample}')\n submit_models(model)\n wait_models(model)\n if model.status == ModelStatus.Failed:\n return self.reset(), 0., False, {}\n rew = float(model.metric) # type: ignore\n _logger.info(f'Model metric received as reward: {rew}')\n return obs, rew, True, {}\n else:\n\n return obs, 0., False, {}\n\n\nclass Preprocessor(nn.Module):\n def __init__(self, obs_space, hidden_dim=64, num_layers=1):\n super().__init__()\n self.action_dim = obs_space['action_history'].nvec[0]\n self.hidden_dim = hidden_dim\n # first token is [SOS]\n self.embedding = nn.Embedding(self.action_dim + 1, hidden_dim)\n self.rnn = nn.LSTM(hidden_dim, hidden_dim, num_layers, batch_first=True)\n\n def forward(self, obs):\n seq = F.pad(obs['action_history'] + 1, (1, 1)) # pad the start token and end token\n # end token is used to avoid out-of-range of v_s_. Will not actually affect BP.\n seq = self.embedding(seq.long())\n feature, _ = self.rnn(seq)\n return feature[torch.arange(len(feature), device=feature.device), obs['cur_step'].long() + 1]\n\n\nclass Actor(nn.Module):\n def __init__(self, action_space, preprocess):\n super().__init__()\n self.preprocess = preprocess\n self.action_dim = action_space.n\n self.linear = nn.Linear(self.preprocess.hidden_dim, self.action_dim)\n\n def forward(self, obs, **kwargs):\n obs = to_torch(obs, device=self.linear.weight.device)\n out = self.linear(self.preprocess(obs))\n # to take care of choices with different number of options\n mask = torch.arange(self.action_dim).expand(len(out), self.action_dim) >= obs['action_dim'].unsqueeze(1)\n out[mask.to(out.device)] = float('-inf')\n return F.softmax(out, dim=-1), kwargs.get('state', None)\n\n\nclass Critic(nn.Module):\n def __init__(self, preprocess):\n super().__init__()\n self.preprocess = preprocess\n self.linear = nn.Linear(self.preprocess.hidden_dim, 1)\n\n def forward(self, obs, **kwargs):\n obs = to_torch(obs, device=self.linear.weight.device)\n return self.linear(self.preprocess(obs)).squeeze(-1)\n", "path": "nni/nas/strategy/_rl_impl.py"}]}
3,682
173
gh_patches_debug_19813
rasdani/github-patches
git_diff
pyro-ppl__pyro-1855
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> LDA tutorial: incorrect tensor shape when document size is larger than the vocabulary size https://github.com/pyro-ppl/pyro/blob/56c06176763f1676bd586c89b2dc503c64315f0a/examples/lda.py#L101 The filler array of ones should have expanded into the size of the sliced data. This is problematic when the number of words per document is larger than the vocabulary size Try `counts.scatter_add_(0, data[:, ind], torch.tensor(1.).expand(data[:,ind].shape))` instead </issue> <code> [start of examples/lda.py] 1 """ 2 This example demonstrates how to marginalize out discrete assignment variables 3 in a Pyro model. 4 5 Our example model is Latent Dirichlet Allocation. While the model in this 6 example does work, it is not the recommended way of coding up LDA in Pyro. 7 Whereas the model in this example treats documents as vectors of categorical 8 variables (vectors of word ids), it is usually more efficient to treat 9 documents as bags of words (histograms of word counts). 10 """ 11 from __future__ import absolute_import, division, print_function 12 13 import argparse 14 import functools 15 import logging 16 17 import torch 18 from torch import nn 19 from torch.distributions import constraints 20 21 import pyro 22 import pyro.distributions as dist 23 from pyro.infer import SVI, JitTraceEnum_ELBO, TraceEnum_ELBO 24 from pyro.optim import Adam 25 26 logging.basicConfig(format='%(relativeCreated) 9d %(message)s', level=logging.INFO) 27 28 29 # This is a fully generative model of a batch of documents. 30 # data is a [num_words_per_doc, num_documents] shaped array of word ids 31 # (specifically it is not a histogram). We assume in this simple example 32 # that all documents have the same number of words. 33 def model(data=None, args=None, batch_size=None): 34 # Globals. 35 with pyro.plate("topics", args.num_topics): 36 topic_weights = pyro.sample("topic_weights", dist.Gamma(1. / args.num_topics, 1.)) 37 topic_words = pyro.sample("topic_words", 38 dist.Dirichlet(torch.ones(args.num_words) / args.num_words)) 39 40 # Locals. 41 with pyro.plate("documents", args.num_docs) as ind: 42 if data is not None: 43 with pyro.util.ignore_jit_warnings(): 44 assert data.shape == (args.num_words_per_doc, args.num_docs) 45 data = data[:, ind] 46 doc_topics = pyro.sample("doc_topics", dist.Dirichlet(topic_weights)) 47 with pyro.plate("words", args.num_words_per_doc): 48 # The word_topics variable is marginalized out during inference, 49 # achieved by specifying infer={"enumerate": "parallel"} and using 50 # TraceEnum_ELBO for inference. Thus we can ignore this variable in 51 # the guide. 52 word_topics = pyro.sample("word_topics", dist.Categorical(doc_topics), 53 infer={"enumerate": "parallel"}) 54 data = pyro.sample("doc_words", dist.Categorical(topic_words[word_topics]), 55 obs=data) 56 57 return topic_weights, topic_words, data 58 59 60 # We will use amortized inference of the local topic variables, achieved by a 61 # multi-layer perceptron. We'll wrap the guide in an nn.Module. 62 def make_predictor(args): 63 layer_sizes = ([args.num_words] + 64 [int(s) for s in args.layer_sizes.split('-')] + 65 [args.num_topics]) 66 logging.info('Creating MLP with sizes {}'.format(layer_sizes)) 67 layers = [] 68 for in_size, out_size in zip(layer_sizes, layer_sizes[1:]): 69 layer = nn.Linear(in_size, out_size) 70 layer.weight.data.normal_(0, 0.001) 71 layer.bias.data.normal_(0, 0.001) 72 layers.append(layer) 73 layers.append(nn.Sigmoid()) 74 layers.append(nn.Softmax(dim=-1)) 75 return nn.Sequential(*layers) 76 77 78 def parametrized_guide(predictor, data, args, batch_size=None): 79 # Use a conjugate guide for global variables. 80 topic_weights_posterior = pyro.param( 81 "topic_weights_posterior", 82 lambda: torch.ones(args.num_topics), 83 constraint=constraints.positive) 84 topic_words_posterior = pyro.param( 85 "topic_words_posterior", 86 lambda: torch.ones(args.num_topics, args.num_words), 87 constraint=constraints.greater_than(0.5)) 88 with pyro.plate("topics", args.num_topics): 89 pyro.sample("topic_weights", dist.Gamma(topic_weights_posterior, 1.)) 90 pyro.sample("topic_words", dist.Dirichlet(topic_words_posterior)) 91 92 # Use an amortized guide for local variables. 93 pyro.module("predictor", predictor) 94 with pyro.plate("documents", args.num_docs, batch_size) as ind: 95 # The neural network will operate on histograms rather than word 96 # index vectors, so we'll convert the raw data to a histogram. 97 if torch._C._get_tracing_state(): 98 counts = torch.eye(1024)[data[:, ind]].sum(0).t() 99 else: 100 counts = torch.zeros(args.num_words, ind.size(0)) 101 counts.scatter_add_(0, data[:, ind], torch.tensor(1.).expand(counts.shape)) 102 doc_topics = predictor(counts.transpose(0, 1)) 103 pyro.sample("doc_topics", dist.Delta(doc_topics, event_dim=1)) 104 105 106 def main(args): 107 logging.info('Generating data') 108 pyro.set_rng_seed(0) 109 pyro.clear_param_store() 110 pyro.enable_validation(True) 111 112 # We can generate synthetic data directly by calling the model. 113 true_topic_weights, true_topic_words, data = model(args=args) 114 115 # We'll train using SVI. 116 logging.info('-' * 40) 117 logging.info('Training on {} documents'.format(args.num_docs)) 118 predictor = make_predictor(args) 119 guide = functools.partial(parametrized_guide, predictor) 120 Elbo = JitTraceEnum_ELBO if args.jit else TraceEnum_ELBO 121 elbo = Elbo(max_plate_nesting=2) 122 optim = Adam({'lr': args.learning_rate}) 123 svi = SVI(model, guide, optim, elbo) 124 logging.info('Step\tLoss') 125 for step in range(args.num_steps): 126 loss = svi.step(data, args=args, batch_size=args.batch_size) 127 if step % 10 == 0: 128 logging.info('{: >5d}\t{}'.format(step, loss)) 129 loss = elbo.loss(model, guide, data, args=args) 130 logging.info('final loss = {}'.format(loss)) 131 132 133 if __name__ == '__main__': 134 assert pyro.__version__.startswith('0.3.3') 135 parser = argparse.ArgumentParser(description="Amortized Latent Dirichlet Allocation") 136 parser.add_argument("-t", "--num-topics", default=8, type=int) 137 parser.add_argument("-w", "--num-words", default=1024, type=int) 138 parser.add_argument("-d", "--num-docs", default=1000, type=int) 139 parser.add_argument("-wd", "--num-words-per-doc", default=64, type=int) 140 parser.add_argument("-n", "--num-steps", default=1000, type=int) 141 parser.add_argument("-l", "--layer-sizes", default="100-100") 142 parser.add_argument("-lr", "--learning-rate", default=0.001, type=float) 143 parser.add_argument("-b", "--batch-size", default=32, type=int) 144 parser.add_argument('--jit', action='store_true') 145 args = parser.parse_args() 146 main(args) 147 [end of examples/lda.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/examples/lda.py b/examples/lda.py --- a/examples/lda.py +++ b/examples/lda.py @@ -92,13 +92,11 @@ # Use an amortized guide for local variables. pyro.module("predictor", predictor) with pyro.plate("documents", args.num_docs, batch_size) as ind: + data = data[:, ind] # The neural network will operate on histograms rather than word # index vectors, so we'll convert the raw data to a histogram. - if torch._C._get_tracing_state(): - counts = torch.eye(1024)[data[:, ind]].sum(0).t() - else: - counts = torch.zeros(args.num_words, ind.size(0)) - counts.scatter_add_(0, data[:, ind], torch.tensor(1.).expand(counts.shape)) + counts = (torch.zeros(args.num_words, ind.size(0)) + .scatter_add(0, data, torch.ones(data.shape))) doc_topics = predictor(counts.transpose(0, 1)) pyro.sample("doc_topics", dist.Delta(doc_topics, event_dim=1))
{"golden_diff": "diff --git a/examples/lda.py b/examples/lda.py\n--- a/examples/lda.py\n+++ b/examples/lda.py\n@@ -92,13 +92,11 @@\n # Use an amortized guide for local variables.\n pyro.module(\"predictor\", predictor)\n with pyro.plate(\"documents\", args.num_docs, batch_size) as ind:\n+ data = data[:, ind]\n # The neural network will operate on histograms rather than word\n # index vectors, so we'll convert the raw data to a histogram.\n- if torch._C._get_tracing_state():\n- counts = torch.eye(1024)[data[:, ind]].sum(0).t()\n- else:\n- counts = torch.zeros(args.num_words, ind.size(0))\n- counts.scatter_add_(0, data[:, ind], torch.tensor(1.).expand(counts.shape))\n+ counts = (torch.zeros(args.num_words, ind.size(0))\n+ .scatter_add(0, data, torch.ones(data.shape)))\n doc_topics = predictor(counts.transpose(0, 1))\n pyro.sample(\"doc_topics\", dist.Delta(doc_topics, event_dim=1))\n", "issue": "LDA tutorial: incorrect tensor shape when document size is larger than the vocabulary size\nhttps://github.com/pyro-ppl/pyro/blob/56c06176763f1676bd586c89b2dc503c64315f0a/examples/lda.py#L101\r\n\r\nThe filler array of ones should have expanded into the size of the sliced data. This is problematic when the number of words per document is larger than the vocabulary size\r\nTry \r\n`counts.scatter_add_(0, data[:, ind], torch.tensor(1.).expand(data[:,ind].shape))`\r\ninstead\n", "before_files": [{"content": "\"\"\"\nThis example demonstrates how to marginalize out discrete assignment variables\nin a Pyro model.\n\nOur example model is Latent Dirichlet Allocation. While the model in this\nexample does work, it is not the recommended way of coding up LDA in Pyro.\nWhereas the model in this example treats documents as vectors of categorical\nvariables (vectors of word ids), it is usually more efficient to treat\ndocuments as bags of words (histograms of word counts).\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\n\nimport argparse\nimport functools\nimport logging\n\nimport torch\nfrom torch import nn\nfrom torch.distributions import constraints\n\nimport pyro\nimport pyro.distributions as dist\nfrom pyro.infer import SVI, JitTraceEnum_ELBO, TraceEnum_ELBO\nfrom pyro.optim import Adam\n\nlogging.basicConfig(format='%(relativeCreated) 9d %(message)s', level=logging.INFO)\n\n\n# This is a fully generative model of a batch of documents.\n# data is a [num_words_per_doc, num_documents] shaped array of word ids\n# (specifically it is not a histogram). We assume in this simple example\n# that all documents have the same number of words.\ndef model(data=None, args=None, batch_size=None):\n # Globals.\n with pyro.plate(\"topics\", args.num_topics):\n topic_weights = pyro.sample(\"topic_weights\", dist.Gamma(1. / args.num_topics, 1.))\n topic_words = pyro.sample(\"topic_words\",\n dist.Dirichlet(torch.ones(args.num_words) / args.num_words))\n\n # Locals.\n with pyro.plate(\"documents\", args.num_docs) as ind:\n if data is not None:\n with pyro.util.ignore_jit_warnings():\n assert data.shape == (args.num_words_per_doc, args.num_docs)\n data = data[:, ind]\n doc_topics = pyro.sample(\"doc_topics\", dist.Dirichlet(topic_weights))\n with pyro.plate(\"words\", args.num_words_per_doc):\n # The word_topics variable is marginalized out during inference,\n # achieved by specifying infer={\"enumerate\": \"parallel\"} and using\n # TraceEnum_ELBO for inference. Thus we can ignore this variable in\n # the guide.\n word_topics = pyro.sample(\"word_topics\", dist.Categorical(doc_topics),\n infer={\"enumerate\": \"parallel\"})\n data = pyro.sample(\"doc_words\", dist.Categorical(topic_words[word_topics]),\n obs=data)\n\n return topic_weights, topic_words, data\n\n\n# We will use amortized inference of the local topic variables, achieved by a\n# multi-layer perceptron. We'll wrap the guide in an nn.Module.\ndef make_predictor(args):\n layer_sizes = ([args.num_words] +\n [int(s) for s in args.layer_sizes.split('-')] +\n [args.num_topics])\n logging.info('Creating MLP with sizes {}'.format(layer_sizes))\n layers = []\n for in_size, out_size in zip(layer_sizes, layer_sizes[1:]):\n layer = nn.Linear(in_size, out_size)\n layer.weight.data.normal_(0, 0.001)\n layer.bias.data.normal_(0, 0.001)\n layers.append(layer)\n layers.append(nn.Sigmoid())\n layers.append(nn.Softmax(dim=-1))\n return nn.Sequential(*layers)\n\n\ndef parametrized_guide(predictor, data, args, batch_size=None):\n # Use a conjugate guide for global variables.\n topic_weights_posterior = pyro.param(\n \"topic_weights_posterior\",\n lambda: torch.ones(args.num_topics),\n constraint=constraints.positive)\n topic_words_posterior = pyro.param(\n \"topic_words_posterior\",\n lambda: torch.ones(args.num_topics, args.num_words),\n constraint=constraints.greater_than(0.5))\n with pyro.plate(\"topics\", args.num_topics):\n pyro.sample(\"topic_weights\", dist.Gamma(topic_weights_posterior, 1.))\n pyro.sample(\"topic_words\", dist.Dirichlet(topic_words_posterior))\n\n # Use an amortized guide for local variables.\n pyro.module(\"predictor\", predictor)\n with pyro.plate(\"documents\", args.num_docs, batch_size) as ind:\n # The neural network will operate on histograms rather than word\n # index vectors, so we'll convert the raw data to a histogram.\n if torch._C._get_tracing_state():\n counts = torch.eye(1024)[data[:, ind]].sum(0).t()\n else:\n counts = torch.zeros(args.num_words, ind.size(0))\n counts.scatter_add_(0, data[:, ind], torch.tensor(1.).expand(counts.shape))\n doc_topics = predictor(counts.transpose(0, 1))\n pyro.sample(\"doc_topics\", dist.Delta(doc_topics, event_dim=1))\n\n\ndef main(args):\n logging.info('Generating data')\n pyro.set_rng_seed(0)\n pyro.clear_param_store()\n pyro.enable_validation(True)\n\n # We can generate synthetic data directly by calling the model.\n true_topic_weights, true_topic_words, data = model(args=args)\n\n # We'll train using SVI.\n logging.info('-' * 40)\n logging.info('Training on {} documents'.format(args.num_docs))\n predictor = make_predictor(args)\n guide = functools.partial(parametrized_guide, predictor)\n Elbo = JitTraceEnum_ELBO if args.jit else TraceEnum_ELBO\n elbo = Elbo(max_plate_nesting=2)\n optim = Adam({'lr': args.learning_rate})\n svi = SVI(model, guide, optim, elbo)\n logging.info('Step\\tLoss')\n for step in range(args.num_steps):\n loss = svi.step(data, args=args, batch_size=args.batch_size)\n if step % 10 == 0:\n logging.info('{: >5d}\\t{}'.format(step, loss))\n loss = elbo.loss(model, guide, data, args=args)\n logging.info('final loss = {}'.format(loss))\n\n\nif __name__ == '__main__':\n assert pyro.__version__.startswith('0.3.3')\n parser = argparse.ArgumentParser(description=\"Amortized Latent Dirichlet Allocation\")\n parser.add_argument(\"-t\", \"--num-topics\", default=8, type=int)\n parser.add_argument(\"-w\", \"--num-words\", default=1024, type=int)\n parser.add_argument(\"-d\", \"--num-docs\", default=1000, type=int)\n parser.add_argument(\"-wd\", \"--num-words-per-doc\", default=64, type=int)\n parser.add_argument(\"-n\", \"--num-steps\", default=1000, type=int)\n parser.add_argument(\"-l\", \"--layer-sizes\", default=\"100-100\")\n parser.add_argument(\"-lr\", \"--learning-rate\", default=0.001, type=float)\n parser.add_argument(\"-b\", \"--batch-size\", default=32, type=int)\n parser.add_argument('--jit', action='store_true')\n args = parser.parse_args()\n main(args)\n", "path": "examples/lda.py"}]}
2,554
258
gh_patches_debug_34766
rasdani/github-patches
git_diff
bokeh__bokeh-9461
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> bokeh.palettes refguide page header mis-renders Add lots of space, changes font size, for some reason (just this page) https://docs.bokeh.org/en/latest/docs/reference/palettes.html </issue> <code> [start of bokeh/sphinxext/bokeh_palette_group.py] 1 #----------------------------------------------------------------------------- 2 # Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors. 3 # All rights reserved. 4 # 5 # The full license is in the file LICENSE.txt, distributed with this software. 6 #----------------------------------------------------------------------------- 7 ''' Generate visual representations of palettes in Bokeh palette groups. 8 9 The ``bokeh.palettes`` modules expose attributes such as ``mpl``, ``brewer``, 10 and ``d3`` that provide groups of palettes. The ``bokeh-palette-group`` 11 directive accepts the name of one of these groups, and generates a visual 12 matrix of colors for every palette in the group. 13 14 As an example, the following usage of the the directive: 15 16 .. code-block:: rest 17 18 .. bokeh-palette-group:: mpl 19 20 Generates the output: 21 22 .. bokeh-palette-group:: mpl 23 24 ''' 25 26 #----------------------------------------------------------------------------- 27 # Boilerplate 28 #----------------------------------------------------------------------------- 29 import logging # isort:skip 30 log = logging.getLogger(__name__) 31 32 #----------------------------------------------------------------------------- 33 # Imports 34 #----------------------------------------------------------------------------- 35 36 # External imports 37 from docutils import nodes 38 from docutils.parsers.rst import Directive 39 from sphinx.errors import SphinxError 40 41 # Bokeh imports 42 from .. import palettes as bp 43 from .templates import PALETTE_GROUP_DETAIL 44 45 #----------------------------------------------------------------------------- 46 # Globals and constants 47 #----------------------------------------------------------------------------- 48 49 __all__ = ( 50 'bokeh_palette_group', 51 'BokehPaletteGroupDirective', 52 'html_visit_bokeh_palette_group', 53 'setup', 54 ) 55 56 #----------------------------------------------------------------------------- 57 # General API 58 #----------------------------------------------------------------------------- 59 60 #----------------------------------------------------------------------------- 61 # Dev API 62 #----------------------------------------------------------------------------- 63 64 class bokeh_palette_group(nodes.General, nodes.Element): 65 pass 66 67 68 class BokehPaletteGroupDirective(Directive): 69 70 has_content = False 71 required_arguments = 1 72 73 def run(self): 74 node = bokeh_palette_group() 75 node['group'] = self.arguments[0] 76 return [node] 77 78 def html_visit_bokeh_palette_group(self, node): 79 self.body.append(_BOOTSTRAP_CSS) 80 self.body.append('<div class="container-fluid"><div class="row">"') 81 group = getattr(bp, node['group'], None) 82 if not isinstance(group, dict): 83 raise SphinxError("invalid palette group name %r" % node['group']) 84 names = sorted(group) 85 for name in names: 86 palettes = group[name] 87 # arbitrary cuttoff here, idea is to not show large (e.g 256 length) palettes 88 numbers = [x for x in sorted(palettes) if x < 30] 89 html = PALETTE_GROUP_DETAIL.render(name=name, numbers=numbers, palettes=palettes) 90 self.body.append(html) 91 self.body.append('</div></div>') 92 self.body.append(_BOOTSTRAP_JS) 93 raise nodes.SkipNode 94 95 def setup(app): 96 ''' Required Sphinx extension setup function. ''' 97 app.add_node(bokeh_palette_group, html=(html_visit_bokeh_palette_group, None)) 98 app.add_directive('bokeh-palette-group', BokehPaletteGroupDirective) 99 100 #----------------------------------------------------------------------------- 101 # Private API 102 #----------------------------------------------------------------------------- 103 104 _BOOTSTRAP_CSS = """ 105 <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/css/bootstrap.min.css"> 106 """ 107 108 _BOOTSTRAP_JS = """ 109 <script src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.3/jquery.min.js"></script> 110 <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/js/bootstrap.min.js"></script> 111 """ 112 113 #----------------------------------------------------------------------------- 114 # Code 115 #----------------------------------------------------------------------------- 116 [end of bokeh/sphinxext/bokeh_palette_group.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/bokeh/sphinxext/bokeh_palette_group.py b/bokeh/sphinxext/bokeh_palette_group.py --- a/bokeh/sphinxext/bokeh_palette_group.py +++ b/bokeh/sphinxext/bokeh_palette_group.py @@ -21,6 +21,11 @@ .. bokeh-palette-group:: mpl +.. note:: + This extension assumes both Bootstrap and JQuery are present (which is the + case for the Bokeh documentation theme). If using this theme outside the + Bokeh documentation, be sure to include those resources by hand. + ''' #----------------------------------------------------------------------------- @@ -75,8 +80,9 @@ node['group'] = self.arguments[0] return [node] +# NOTE: This extension now *assumes* both Bootstrap and JQuery are present +# (which is now the case for the Bokeh docs theme). def html_visit_bokeh_palette_group(self, node): - self.body.append(_BOOTSTRAP_CSS) self.body.append('<div class="container-fluid"><div class="row">"') group = getattr(bp, node['group'], None) if not isinstance(group, dict): @@ -89,7 +95,6 @@ html = PALETTE_GROUP_DETAIL.render(name=name, numbers=numbers, palettes=palettes) self.body.append(html) self.body.append('</div></div>') - self.body.append(_BOOTSTRAP_JS) raise nodes.SkipNode def setup(app): @@ -101,15 +106,6 @@ # Private API #----------------------------------------------------------------------------- -_BOOTSTRAP_CSS = """ -<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/css/bootstrap.min.css"> -""" - -_BOOTSTRAP_JS = """ -<script src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.3/jquery.min.js"></script> -<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/js/bootstrap.min.js"></script> -""" - #----------------------------------------------------------------------------- # Code #-----------------------------------------------------------------------------
{"golden_diff": "diff --git a/bokeh/sphinxext/bokeh_palette_group.py b/bokeh/sphinxext/bokeh_palette_group.py\n--- a/bokeh/sphinxext/bokeh_palette_group.py\n+++ b/bokeh/sphinxext/bokeh_palette_group.py\n@@ -21,6 +21,11 @@\n \n .. bokeh-palette-group:: mpl\n \n+.. note::\n+ This extension assumes both Bootstrap and JQuery are present (which is the\n+ case for the Bokeh documentation theme). If using this theme outside the\n+ Bokeh documentation, be sure to include those resources by hand.\n+\n '''\n \n #-----------------------------------------------------------------------------\n@@ -75,8 +80,9 @@\n node['group'] = self.arguments[0]\n return [node]\n \n+# NOTE: This extension now *assumes* both Bootstrap and JQuery are present\n+# (which is now the case for the Bokeh docs theme).\n def html_visit_bokeh_palette_group(self, node):\n- self.body.append(_BOOTSTRAP_CSS)\n self.body.append('<div class=\"container-fluid\"><div class=\"row\">\"')\n group = getattr(bp, node['group'], None)\n if not isinstance(group, dict):\n@@ -89,7 +95,6 @@\n html = PALETTE_GROUP_DETAIL.render(name=name, numbers=numbers, palettes=palettes)\n self.body.append(html)\n self.body.append('</div></div>')\n- self.body.append(_BOOTSTRAP_JS)\n raise nodes.SkipNode\n \n def setup(app):\n@@ -101,15 +106,6 @@\n # Private API\n #-----------------------------------------------------------------------------\n \n-_BOOTSTRAP_CSS = \"\"\"\n-<link rel=\"stylesheet\" href=\"https://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/css/bootstrap.min.css\">\n-\"\"\"\n-\n-_BOOTSTRAP_JS = \"\"\"\n-<script src=\"https://ajax.googleapis.com/ajax/libs/jquery/1.11.3/jquery.min.js\"></script>\n-<script src=\"https://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/js/bootstrap.min.js\"></script>\n-\"\"\"\n-\n #-----------------------------------------------------------------------------\n # Code\n #-----------------------------------------------------------------------------\n", "issue": "bokeh.palettes refguide page header mis-renders\nAdd lots of space, changes font size, for some reason (just this page)\r\n\r\nhttps://docs.bokeh.org/en/latest/docs/reference/palettes.html\r\n\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.\n# All rights reserved.\n#\n# The full license is in the file LICENSE.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n''' Generate visual representations of palettes in Bokeh palette groups.\n\nThe ``bokeh.palettes`` modules expose attributes such as ``mpl``, ``brewer``,\nand ``d3`` that provide groups of palettes. The ``bokeh-palette-group``\ndirective accepts the name of one of these groups, and generates a visual\nmatrix of colors for every palette in the group.\n\nAs an example, the following usage of the the directive:\n\n.. code-block:: rest\n\n .. bokeh-palette-group:: mpl\n\nGenerates the output:\n\n .. bokeh-palette-group:: mpl\n\n'''\n\n#-----------------------------------------------------------------------------\n# Boilerplate\n#-----------------------------------------------------------------------------\nimport logging # isort:skip\nlog = logging.getLogger(__name__)\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\n# External imports\nfrom docutils import nodes\nfrom docutils.parsers.rst import Directive\nfrom sphinx.errors import SphinxError\n\n# Bokeh imports\nfrom .. import palettes as bp\nfrom .templates import PALETTE_GROUP_DETAIL\n\n#-----------------------------------------------------------------------------\n# Globals and constants\n#-----------------------------------------------------------------------------\n\n__all__ = (\n 'bokeh_palette_group',\n 'BokehPaletteGroupDirective',\n 'html_visit_bokeh_palette_group',\n 'setup',\n)\n\n#-----------------------------------------------------------------------------\n# General API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Dev API\n#-----------------------------------------------------------------------------\n\nclass bokeh_palette_group(nodes.General, nodes.Element):\n pass\n\n\nclass BokehPaletteGroupDirective(Directive):\n\n has_content = False\n required_arguments = 1\n\n def run(self):\n node = bokeh_palette_group()\n node['group'] = self.arguments[0]\n return [node]\n\ndef html_visit_bokeh_palette_group(self, node):\n self.body.append(_BOOTSTRAP_CSS)\n self.body.append('<div class=\"container-fluid\"><div class=\"row\">\"')\n group = getattr(bp, node['group'], None)\n if not isinstance(group, dict):\n raise SphinxError(\"invalid palette group name %r\" % node['group'])\n names = sorted(group)\n for name in names:\n palettes = group[name]\n # arbitrary cuttoff here, idea is to not show large (e.g 256 length) palettes\n numbers = [x for x in sorted(palettes) if x < 30]\n html = PALETTE_GROUP_DETAIL.render(name=name, numbers=numbers, palettes=palettes)\n self.body.append(html)\n self.body.append('</div></div>')\n self.body.append(_BOOTSTRAP_JS)\n raise nodes.SkipNode\n\ndef setup(app):\n ''' Required Sphinx extension setup function. '''\n app.add_node(bokeh_palette_group, html=(html_visit_bokeh_palette_group, None))\n app.add_directive('bokeh-palette-group', BokehPaletteGroupDirective)\n\n#-----------------------------------------------------------------------------\n# Private API\n#-----------------------------------------------------------------------------\n\n_BOOTSTRAP_CSS = \"\"\"\n<link rel=\"stylesheet\" href=\"https://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/css/bootstrap.min.css\">\n\"\"\"\n\n_BOOTSTRAP_JS = \"\"\"\n<script src=\"https://ajax.googleapis.com/ajax/libs/jquery/1.11.3/jquery.min.js\"></script>\n<script src=\"https://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/js/bootstrap.min.js\"></script>\n\"\"\"\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n", "path": "bokeh/sphinxext/bokeh_palette_group.py"}]}
1,601
459
gh_patches_debug_1831
rasdani/github-patches
git_diff
huggingface__accelerate-811
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Error when pickling accelerated optimizers with PyTorch 1.13 ### System Info ```Shell $ pip list Package Version ------------------------ ---------- accelerate 0.13.2 numpy 1.23.4 nvidia-cublas-cu11 11.10.3.66 nvidia-cuda-nvrtc-cu11 11.7.99 nvidia-cuda-runtime-cu11 11.7.99 nvidia-cudnn-cu11 8.5.0.96 packaging 21.3 pip 22.3 psutil 5.9.3 pyparsing 3.0.9 PyYAML 6.0 setuptools 65.5.0 torch 1.13.0 typing_extensions 4.4.0 wheel 0.37.1 ``` Same issue with torch-cpu. OS: Ubuntu 20.04 ### Information - [ ] The official example scripts - [X] My own modified scripts ### Tasks - [ ] One of the scripts in the examples/ folder of Accelerate or an officially supported `no_trainer` script in the `examples` folder of the `transformers` repo (such as `run_no_trainer_glue.py`) - [X] My own task or dataset (give details below) ### Reproduction ```python import pickle import accelerate import torch model = torch.nn.Linear(10, 10) optimizer = torch.optim.SGD(model.parameters(), 0.1) pickle.loads(pickle.dumps(optimizer)) # works accelerator = accelerate.Accelerator(mixed_precision='fp16') optimizer2 = accelerator.prepare(optimizer) pickle.loads(pickle.dumps(optimizer2)) # fails ``` ### Expected behavior I ran into a problem with accelerate and PyTorch 1.13. It appears that optimizers cannot be pickled anymore after being accelerated. When running the attached script, I get: ``` Traceback (most recent call last): File ".../skorch/foo.py", line 12, in <module> pickle.loads(pickle.dumps(optimizer2)) File ".../torch/optim/optimizer.py", line 84, in __setstate__ self.defaults.setdefault('differentiable', False) File ".../accelerate/optimizer.py", line 90, in defaults return self.optimizer.defaults AttributeError: 'AcceleratedOptimizer' object has no attribute 'optimizer' ``` The offending addition on PyTorch seems to be this line: https://github.com/pytorch/pytorch/blob/23fe6c8ca15ec2cf6ea74f93aa91cae343ea534f/torch/optim/optimizer.py#L84 which was not present in PyTorch 1.12. At object creation time, PyTorch now tries to access the `defaults` attribute, which in turn calls the `defaults` property in accelerate, which requires the `optimizer` attribute, which doesn't exist and thus errors. At first glance, it looks like `AcceleratedOptimizer` might need its own `__getstate__` and `__setstate__` to solve this but I'm not sure. </issue> <code> [start of src/accelerate/optimizer.py] 1 # Copyright 2021 The HuggingFace Team. All rights reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import inspect 16 import warnings 17 18 import torch 19 20 from .state import AcceleratorState, GradientState 21 from .utils import DistributedType, honor_type, is_torch_version, is_tpu_available 22 23 24 if is_tpu_available(check_device=False): 25 import torch_xla.core.xla_model as xm 26 27 28 def move_to_device(state, device): 29 if isinstance(state, (list, tuple)): 30 return honor_type(state, (move_to_device(t, device) for t in state)) 31 elif isinstance(state, dict): 32 return type(state)({k: move_to_device(v, device) for k, v in state.items()}) 33 elif isinstance(state, torch.Tensor): 34 return state.to(device) 35 return state 36 37 38 class AcceleratedOptimizer(torch.optim.Optimizer): 39 """ 40 Internal wrapper around a torch optimizer. 41 42 Conditionally will perform `step` and `zero_grad` if gradients should be synchronized when performing gradient 43 accumulation. 44 45 Args: 46 optimizer (`torch.optim.optimizer.Optimizer`): 47 The optimizer to wrap. 48 device_placement (`bool`, *optional*, defaults to `True`): 49 Whether or not the optimizer should handle device placement. If so, it will place the state dictionary of 50 `optimizer` on the right device. 51 scaler (`torch.cuda.amp.grad_scaler.GradScaler`, *optional*): 52 The scaler to use in the step function if training with mixed precision. 53 """ 54 55 def __init__(self, optimizer, device_placement=True, scaler=None): 56 self.optimizer = optimizer 57 self.scaler = scaler 58 self.accelerator_state = AcceleratorState() 59 self.gradient_state = GradientState() 60 self.device_placement = device_placement 61 self._is_overflow = False 62 63 # Handle device placement 64 if device_placement: 65 state_dict = self.optimizer.state_dict() 66 if self.accelerator_state.distributed_type == DistributedType.TPU: 67 xm.send_cpu_data_to_device(state_dict, self.accelerator_state.device) 68 else: 69 state_dict = move_to_device(state_dict, self.accelerator_state.device) 70 self.optimizer.load_state_dict(state_dict) 71 72 @property 73 def state(self): 74 return self.optimizer.state 75 76 @state.setter 77 def state(self, state): 78 self.optimizer.state = state 79 80 @property 81 def param_groups(self): 82 return self.optimizer.param_groups 83 84 @param_groups.setter 85 def param_groups(self, param_groups): 86 self.optimizer.param_groups = param_groups 87 88 @property 89 def defaults(self): 90 return self.optimizer.defaults 91 92 @defaults.setter 93 def defaults(self, defaults): 94 self.optimizer.defaults = defaults 95 96 def add_param_group(self, param_group): 97 self.optimizer.add_param_group(param_group) 98 99 def load_state_dict(self, state_dict): 100 if self.accelerator_state.distributed_type == DistributedType.TPU and self.device_placement: 101 xm.send_cpu_data_to_device(state_dict, self.accelerator_state.device) 102 self.optimizer.load_state_dict(state_dict) 103 104 def state_dict(self): 105 return self.optimizer.state_dict() 106 107 def zero_grad(self, set_to_none=None): 108 if self.gradient_state.sync_gradients: 109 if is_torch_version("<", "1.7.0"): 110 if set_to_none is not None: 111 raise ValueError( 112 "`set_to_none` for Optimizer.zero_grad` was introduced in PyTorch 1.7.0 and can't be used for " 113 f"earlier versions (found version {torch.__version__})." 114 ) 115 self.optimizer.zero_grad() 116 else: 117 accept_arg = "set_to_none" in inspect.signature(self.optimizer.zero_grad).parameters 118 if accept_arg: 119 if set_to_none is None: 120 set_to_none = False 121 self.optimizer.zero_grad(set_to_none=set_to_none) 122 else: 123 if set_to_none is not None: 124 raise ValueError("`set_to_none` for Optimizer.zero_grad` is not supported by this optimizer.") 125 self.optimizer.zero_grad() 126 127 def step(self, closure=None): 128 if self.gradient_state.sync_gradients: 129 if self.accelerator_state.distributed_type == DistributedType.TPU: 130 optimizer_args = {"closure": closure} if closure is not None else {} 131 xm.optimizer_step(self.optimizer, optimizer_args=optimizer_args) 132 elif self.scaler is not None: 133 scale_before = self.scaler.get_scale() 134 self.scaler.step(self.optimizer, closure) 135 self.scaler.update() 136 scale_after = self.scaler.get_scale() 137 # If we reduced the loss scale, it means the optimizer step was skipped because of gradient overflow. 138 self._is_overflow = scale_after < scale_before 139 else: 140 self.optimizer.step(closure) 141 142 def _switch_parameters(self, parameters_map): 143 for param_group in self.optimizer.param_groups: 144 param_group["params"] = [parameters_map.get(p, p) for p in param_group["params"]] 145 146 @property 147 def is_overflow(self): 148 """Whether or not the optimizer step was done, or skipped because of gradient overflow.""" 149 warnings.warn( 150 "The `is_overflow` property is deprecated and will be removed in version 1.0 of Accelerate use " 151 "`optimizer.step_was_skipped` instead.", 152 FutureWarning, 153 ) 154 return self._is_overflow 155 156 @property 157 def step_was_skipped(self): 158 """Whether or not the optimizer step was skipped.""" 159 return self._is_overflow 160 [end of src/accelerate/optimizer.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/accelerate/optimizer.py b/src/accelerate/optimizer.py --- a/src/accelerate/optimizer.py +++ b/src/accelerate/optimizer.py @@ -157,3 +157,9 @@ def step_was_skipped(self): """Whether or not the optimizer step was skipped.""" return self._is_overflow + + def __getstate__(self): + return self.__dict__.copy() + + def __setstate__(self, state): + self.__dict__.update(state)
{"golden_diff": "diff --git a/src/accelerate/optimizer.py b/src/accelerate/optimizer.py\n--- a/src/accelerate/optimizer.py\n+++ b/src/accelerate/optimizer.py\n@@ -157,3 +157,9 @@\n def step_was_skipped(self):\n \"\"\"Whether or not the optimizer step was skipped.\"\"\"\n return self._is_overflow\n+\n+ def __getstate__(self):\n+ return self.__dict__.copy()\n+\n+ def __setstate__(self, state):\n+ self.__dict__.update(state)\n", "issue": "Error when pickling accelerated optimizers with PyTorch 1.13\n### System Info\r\n\r\n```Shell\r\n$ pip list\r\nPackage Version\r\n------------------------ ----------\r\naccelerate 0.13.2\r\nnumpy 1.23.4\r\nnvidia-cublas-cu11 11.10.3.66\r\nnvidia-cuda-nvrtc-cu11 11.7.99\r\nnvidia-cuda-runtime-cu11 11.7.99\r\nnvidia-cudnn-cu11 8.5.0.96\r\npackaging 21.3\r\npip 22.3\r\npsutil 5.9.3\r\npyparsing 3.0.9\r\nPyYAML 6.0\r\nsetuptools 65.5.0\r\ntorch 1.13.0\r\ntyping_extensions 4.4.0\r\nwheel 0.37.1\r\n```\r\n\r\nSame issue with torch-cpu.\r\n\r\nOS: Ubuntu 20.04\r\n\r\n### Information\r\n\r\n- [ ] The official example scripts\r\n- [X] My own modified scripts\r\n\r\n### Tasks\r\n\r\n- [ ] One of the scripts in the examples/ folder of Accelerate or an officially supported `no_trainer` script in the `examples` folder of the `transformers` repo (such as `run_no_trainer_glue.py`)\r\n- [X] My own task or dataset (give details below)\r\n\r\n### Reproduction\r\n\r\n```python\r\nimport pickle\r\nimport accelerate\r\nimport torch\r\n\r\nmodel = torch.nn.Linear(10, 10)\r\noptimizer = torch.optim.SGD(model.parameters(), 0.1)\r\npickle.loads(pickle.dumps(optimizer)) # works\r\n\r\naccelerator = accelerate.Accelerator(mixed_precision='fp16')\r\noptimizer2 = accelerator.prepare(optimizer)\r\npickle.loads(pickle.dumps(optimizer2)) # fails\r\n```\r\n\r\n### Expected behavior\r\n\r\nI ran into a problem with accelerate and PyTorch 1.13. It appears that optimizers cannot be pickled anymore after being accelerated. When running the attached script, I get:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \".../skorch/foo.py\", line 12, in <module>\r\n pickle.loads(pickle.dumps(optimizer2))\r\n File \".../torch/optim/optimizer.py\", line 84, in __setstate__\r\n self.defaults.setdefault('differentiable', False)\r\n File \".../accelerate/optimizer.py\", line 90, in defaults\r\n return self.optimizer.defaults\r\nAttributeError: 'AcceleratedOptimizer' object has no attribute 'optimizer'\r\n```\r\n\r\nThe offending addition on PyTorch seems to be this line:\r\n\r\nhttps://github.com/pytorch/pytorch/blob/23fe6c8ca15ec2cf6ea74f93aa91cae343ea534f/torch/optim/optimizer.py#L84\r\n\r\nwhich was not present in PyTorch 1.12. At object creation time, PyTorch now tries to access the `defaults` attribute, which in turn calls the `defaults` property in accelerate, which requires the `optimizer` attribute, which doesn't exist and thus errors. At first glance, it looks like `AcceleratedOptimizer` might need its own `__getstate__` and `__setstate__` to solve this but I'm not sure.\r\n\n", "before_files": [{"content": "# Copyright 2021 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport inspect\nimport warnings\n\nimport torch\n\nfrom .state import AcceleratorState, GradientState\nfrom .utils import DistributedType, honor_type, is_torch_version, is_tpu_available\n\n\nif is_tpu_available(check_device=False):\n import torch_xla.core.xla_model as xm\n\n\ndef move_to_device(state, device):\n if isinstance(state, (list, tuple)):\n return honor_type(state, (move_to_device(t, device) for t in state))\n elif isinstance(state, dict):\n return type(state)({k: move_to_device(v, device) for k, v in state.items()})\n elif isinstance(state, torch.Tensor):\n return state.to(device)\n return state\n\n\nclass AcceleratedOptimizer(torch.optim.Optimizer):\n \"\"\"\n Internal wrapper around a torch optimizer.\n\n Conditionally will perform `step` and `zero_grad` if gradients should be synchronized when performing gradient\n accumulation.\n\n Args:\n optimizer (`torch.optim.optimizer.Optimizer`):\n The optimizer to wrap.\n device_placement (`bool`, *optional*, defaults to `True`):\n Whether or not the optimizer should handle device placement. If so, it will place the state dictionary of\n `optimizer` on the right device.\n scaler (`torch.cuda.amp.grad_scaler.GradScaler`, *optional*):\n The scaler to use in the step function if training with mixed precision.\n \"\"\"\n\n def __init__(self, optimizer, device_placement=True, scaler=None):\n self.optimizer = optimizer\n self.scaler = scaler\n self.accelerator_state = AcceleratorState()\n self.gradient_state = GradientState()\n self.device_placement = device_placement\n self._is_overflow = False\n\n # Handle device placement\n if device_placement:\n state_dict = self.optimizer.state_dict()\n if self.accelerator_state.distributed_type == DistributedType.TPU:\n xm.send_cpu_data_to_device(state_dict, self.accelerator_state.device)\n else:\n state_dict = move_to_device(state_dict, self.accelerator_state.device)\n self.optimizer.load_state_dict(state_dict)\n\n @property\n def state(self):\n return self.optimizer.state\n\n @state.setter\n def state(self, state):\n self.optimizer.state = state\n\n @property\n def param_groups(self):\n return self.optimizer.param_groups\n\n @param_groups.setter\n def param_groups(self, param_groups):\n self.optimizer.param_groups = param_groups\n\n @property\n def defaults(self):\n return self.optimizer.defaults\n\n @defaults.setter\n def defaults(self, defaults):\n self.optimizer.defaults = defaults\n\n def add_param_group(self, param_group):\n self.optimizer.add_param_group(param_group)\n\n def load_state_dict(self, state_dict):\n if self.accelerator_state.distributed_type == DistributedType.TPU and self.device_placement:\n xm.send_cpu_data_to_device(state_dict, self.accelerator_state.device)\n self.optimizer.load_state_dict(state_dict)\n\n def state_dict(self):\n return self.optimizer.state_dict()\n\n def zero_grad(self, set_to_none=None):\n if self.gradient_state.sync_gradients:\n if is_torch_version(\"<\", \"1.7.0\"):\n if set_to_none is not None:\n raise ValueError(\n \"`set_to_none` for Optimizer.zero_grad` was introduced in PyTorch 1.7.0 and can't be used for \"\n f\"earlier versions (found version {torch.__version__}).\"\n )\n self.optimizer.zero_grad()\n else:\n accept_arg = \"set_to_none\" in inspect.signature(self.optimizer.zero_grad).parameters\n if accept_arg:\n if set_to_none is None:\n set_to_none = False\n self.optimizer.zero_grad(set_to_none=set_to_none)\n else:\n if set_to_none is not None:\n raise ValueError(\"`set_to_none` for Optimizer.zero_grad` is not supported by this optimizer.\")\n self.optimizer.zero_grad()\n\n def step(self, closure=None):\n if self.gradient_state.sync_gradients:\n if self.accelerator_state.distributed_type == DistributedType.TPU:\n optimizer_args = {\"closure\": closure} if closure is not None else {}\n xm.optimizer_step(self.optimizer, optimizer_args=optimizer_args)\n elif self.scaler is not None:\n scale_before = self.scaler.get_scale()\n self.scaler.step(self.optimizer, closure)\n self.scaler.update()\n scale_after = self.scaler.get_scale()\n # If we reduced the loss scale, it means the optimizer step was skipped because of gradient overflow.\n self._is_overflow = scale_after < scale_before\n else:\n self.optimizer.step(closure)\n\n def _switch_parameters(self, parameters_map):\n for param_group in self.optimizer.param_groups:\n param_group[\"params\"] = [parameters_map.get(p, p) for p in param_group[\"params\"]]\n\n @property\n def is_overflow(self):\n \"\"\"Whether or not the optimizer step was done, or skipped because of gradient overflow.\"\"\"\n warnings.warn(\n \"The `is_overflow` property is deprecated and will be removed in version 1.0 of Accelerate use \"\n \"`optimizer.step_was_skipped` instead.\",\n FutureWarning,\n )\n return self._is_overflow\n\n @property\n def step_was_skipped(self):\n \"\"\"Whether or not the optimizer step was skipped.\"\"\"\n return self._is_overflow\n", "path": "src/accelerate/optimizer.py"}]}
2,942
120
gh_patches_debug_5768
rasdani/github-patches
git_diff
LMFDB__lmfdb-5112
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Problem with zeta zeroes download script The python script at https://github.com/LMFDB/lmfdb/blob/master/lmfdb/zeros/zeta/platt_zeros.py created by David Platt to parse downloaded zeta zeros files seems to no longer work correctly, possibly because of missing a file `index.db` that should be in the download directory at https://beta.lmfdb.org/data/riemann-zeta-zeros/. One can work around this problem by applying the following changes Change #1 - line 70 in the code was " infile.seek(offset, 0)" - changed line 70 to " infile.seek(offset, 1)" Change #2 - using sqlite3, create table "zero_index" - in table "zero_index" insert columns "t, N, filename, offset, block_number" - add a row to the zero_index table for each file you want to read (e.g. "1, 1, 'zeros_14.dat', 0, 1" for the first file) We should recover or recreate the missing index file and/or modify the script platt_zeros.py as required so that it works correctly without any changes. </issue> <code> [start of lmfdb/zeros/zeta/zetazeros.py] 1 2 from mpmath import nstr, inf 3 from sage.all import floor, log 4 from lmfdb.logger import make_logger 5 from flask import render_template, request, url_for, Blueprint, Response 6 7 ZetaZeros = Blueprint("zeta zeros", __name__, template_folder="templates") 8 logger = make_logger(ZetaZeros) 9 10 from .platt_zeros import zeros_starting_at_N, zeros_starting_at_t 11 12 credit_string = "David Platt" 13 14 15 def learnmore_list(): 16 return [('Completeness of the data', url_for(".completeness")), 17 ('Source of the data', url_for(".source")), 18 ('Reliability of the data', url_for(".reliability"))] 19 20 def friends_list(): 21 return [('L-function', url_for("l_functions.l_function_riemann_page"))] 22 23 def downloads(): 24 return [('Bulk download', "https://beta.lmfdb.org/data/riemann-zeta-zeros/")] 25 26 # Return the learnmore list with the matchstring entry removed 27 def learnmore_list_remove(matchstring): 28 return [t for t in learnmore_list() if t[0].find(matchstring) < 0] 29 30 31 @ZetaZeros.route("/") 32 def zetazeros(): 33 N = request.args.get("N", None, int) 34 t = request.args.get("t", 0, float) 35 limit = request.args.get("limit", 100, int) 36 if limit > 1000: 37 return list_zeros(N=N, t=t, limit=limit) 38 else: 39 title = r"Zeros of $\zeta(s)$" 40 bread = [("L-functions", url_for("l_functions.index")), (r'Zeros of $\zeta(s)$', ' ')] 41 return render_template('zeta.html', N=N, t=t, limit=limit, title=title, bread=bread, learnmore=learnmore_list(), friends=friends_list(), downloads=downloads()) 42 43 44 @ZetaZeros.route("/Completeness") 45 def completeness(): 46 t = 'Completeness of Riemann zeta zeros data' 47 bread = [("L-functions", url_for("l_functions.index")),(r"Zeros of $\zeta(s)$", url_for(".zetazeros")),('Completeness', ' ')] 48 return render_template("single.html", kid='rcs.cande.zeros.zeta', credit=credit_string, title=t, bread=bread, learnmore=learnmore_list_remove('Completeness')) 49 50 @ZetaZeros.route("/Source") 51 def source(): 52 t = 'Source of Riemann zeta zeros data' 53 bread = [("L-functions", url_for("l_functions.index")),(r"Zeros of $\zeta(s)$", url_for(".zetazeros")),('Source', ' ')] 54 return render_template("single.html", kid='rcs.source.zeros.zeta', credit=credit_string, title=t, bread=bread, learnmore=learnmore_list_remove('Source')) 55 56 @ZetaZeros.route("/Reliability") 57 def reliability(): 58 t = 'Reliability of Riemann zeta zeros data' 59 bread = [("L-functions", url_for("l_functions.index")),(r"Zeros of $\zeta(s)$", url_for(".zetazeros")),('Reliability', ' ')] 60 return render_template("single.html", kid='rcs.rigor.zeros.zeta', credit=credit_string, title=t, bread=bread, learnmore=learnmore_list_remove('Reliability')) 61 62 @ZetaZeros.route("/list") 63 def list_zeros(N=None, 64 t=None, 65 limit=None, 66 fmt=None, 67 download=None): 68 if N is None: 69 N = request.args.get("N", None, int) 70 if t is None: 71 t = request.args.get("t", 0, float) 72 if limit is None: 73 limit = request.args.get("limit", 100, int) 74 if fmt is None: 75 fmt = request.args.get("format", "plain") 76 if download is None: 77 download = request.args.get("download", "no") 78 79 if limit < 0: 80 limit = 100 81 if N is not None: # None is < 0!! WHAT THE WHAT! 82 if N < 0: 83 N = 0 84 if t < 0: 85 t = 0 86 87 if limit > 100000: 88 # limit = 100000 89 # 90 bread = [("L-functions", url_for("l_functions.index")),(r"Zeros of $\zeta(s)$", url_for(".zetazeros"))] 91 return render_template('single.html', title="Too many zeros", bread=bread, kid = "dq.zeros.zeta.toomany") 92 93 if N is not None: 94 zeros = zeros_starting_at_N(N, limit) 95 else: 96 zeros = zeros_starting_at_t(t, limit) 97 98 if fmt == 'plain': 99 response = Response(("%d %s\n" % (n, nstr(z,31+floor(log(z,10))+1,strip_zeros=False,min_fixed=-inf,max_fixed=+inf)) for (n, z) in zeros)) 100 response.headers['content-type'] = 'text/plain' 101 if download == "yes": 102 response.headers['content-disposition'] = 'attachment; filename=zetazeros' 103 else: 104 response = str(list(zeros)) 105 106 return response 107 [end of lmfdb/zeros/zeta/zetazeros.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lmfdb/zeros/zeta/zetazeros.py b/lmfdb/zeros/zeta/zetazeros.py --- a/lmfdb/zeros/zeta/zetazeros.py +++ b/lmfdb/zeros/zeta/zetazeros.py @@ -21,7 +21,7 @@ return [('L-function', url_for("l_functions.l_function_riemann_page"))] def downloads(): - return [('Bulk download', "https://beta.lmfdb.org/data/riemann-zeta-zeros/")] + return [('Bulk download', "https://beta.lmfdb.org/riemann-zeta-zeros/")] # Return the learnmore list with the matchstring entry removed def learnmore_list_remove(matchstring):
{"golden_diff": "diff --git a/lmfdb/zeros/zeta/zetazeros.py b/lmfdb/zeros/zeta/zetazeros.py\n--- a/lmfdb/zeros/zeta/zetazeros.py\n+++ b/lmfdb/zeros/zeta/zetazeros.py\n@@ -21,7 +21,7 @@\n return [('L-function', url_for(\"l_functions.l_function_riemann_page\"))]\n \n def downloads():\n- return [('Bulk download', \"https://beta.lmfdb.org/data/riemann-zeta-zeros/\")]\n+ return [('Bulk download', \"https://beta.lmfdb.org/riemann-zeta-zeros/\")]\n \n # Return the learnmore list with the matchstring entry removed\n def learnmore_list_remove(matchstring):\n", "issue": "Problem with zeta zeroes download script\nThe python script at https://github.com/LMFDB/lmfdb/blob/master/lmfdb/zeros/zeta/platt_zeros.py created by David Platt to parse downloaded zeta zeros files seems to no longer work correctly, possibly because of missing a file `index.db` that should be in the download directory at https://beta.lmfdb.org/data/riemann-zeta-zeros/.\r\n\r\nOne can work around this problem by applying the following changes\r\n\r\nChange #1\r\n- line 70 in the code was \" infile.seek(offset, 0)\"\r\n- changed line 70 to \" infile.seek(offset, 1)\"\r\n\r\nChange #2\r\n- using sqlite3, create table \"zero_index\"\r\n- in table \"zero_index\" insert columns \"t, N, filename, offset, block_number\"\r\n- add a row to the zero_index table for each file you want to read (e.g. \"1, 1, 'zeros_14.dat', 0, 1\" for the first file)\r\n\r\nWe should recover or recreate the missing index file and/or modify the script platt_zeros.py as required so that it works correctly without any changes.\r\n\n", "before_files": [{"content": "\nfrom mpmath import nstr, inf\nfrom sage.all import floor, log\nfrom lmfdb.logger import make_logger\nfrom flask import render_template, request, url_for, Blueprint, Response\n\nZetaZeros = Blueprint(\"zeta zeros\", __name__, template_folder=\"templates\")\nlogger = make_logger(ZetaZeros)\n\nfrom .platt_zeros import zeros_starting_at_N, zeros_starting_at_t\n\ncredit_string = \"David Platt\"\n\n\ndef learnmore_list():\n return [('Completeness of the data', url_for(\".completeness\")),\n ('Source of the data', url_for(\".source\")),\n ('Reliability of the data', url_for(\".reliability\"))]\n\ndef friends_list():\n return [('L-function', url_for(\"l_functions.l_function_riemann_page\"))]\n\ndef downloads():\n return [('Bulk download', \"https://beta.lmfdb.org/data/riemann-zeta-zeros/\")]\n\n# Return the learnmore list with the matchstring entry removed\ndef learnmore_list_remove(matchstring):\n return [t for t in learnmore_list() if t[0].find(matchstring) < 0]\n\n\[email protected](\"/\")\ndef zetazeros():\n N = request.args.get(\"N\", None, int)\n t = request.args.get(\"t\", 0, float)\n limit = request.args.get(\"limit\", 100, int)\n if limit > 1000:\n return list_zeros(N=N, t=t, limit=limit)\n else:\n title = r\"Zeros of $\\zeta(s)$\"\n bread = [(\"L-functions\", url_for(\"l_functions.index\")), (r'Zeros of $\\zeta(s)$', ' ')]\n return render_template('zeta.html', N=N, t=t, limit=limit, title=title, bread=bread, learnmore=learnmore_list(), friends=friends_list(), downloads=downloads())\n\n\[email protected](\"/Completeness\")\ndef completeness():\n t = 'Completeness of Riemann zeta zeros data'\n bread = [(\"L-functions\", url_for(\"l_functions.index\")),(r\"Zeros of $\\zeta(s)$\", url_for(\".zetazeros\")),('Completeness', ' ')]\n return render_template(\"single.html\", kid='rcs.cande.zeros.zeta', credit=credit_string, title=t, bread=bread, learnmore=learnmore_list_remove('Completeness'))\n\[email protected](\"/Source\")\ndef source():\n t = 'Source of Riemann zeta zeros data'\n bread = [(\"L-functions\", url_for(\"l_functions.index\")),(r\"Zeros of $\\zeta(s)$\", url_for(\".zetazeros\")),('Source', ' ')]\n return render_template(\"single.html\", kid='rcs.source.zeros.zeta', credit=credit_string, title=t, bread=bread, learnmore=learnmore_list_remove('Source'))\n\[email protected](\"/Reliability\")\ndef reliability():\n t = 'Reliability of Riemann zeta zeros data'\n bread = [(\"L-functions\", url_for(\"l_functions.index\")),(r\"Zeros of $\\zeta(s)$\", url_for(\".zetazeros\")),('Reliability', ' ')]\n return render_template(\"single.html\", kid='rcs.rigor.zeros.zeta', credit=credit_string, title=t, bread=bread, learnmore=learnmore_list_remove('Reliability'))\n\[email protected](\"/list\")\ndef list_zeros(N=None,\n t=None,\n limit=None,\n fmt=None,\n download=None):\n if N is None:\n N = request.args.get(\"N\", None, int)\n if t is None:\n t = request.args.get(\"t\", 0, float)\n if limit is None:\n limit = request.args.get(\"limit\", 100, int)\n if fmt is None:\n fmt = request.args.get(\"format\", \"plain\")\n if download is None:\n download = request.args.get(\"download\", \"no\")\n\n if limit < 0:\n limit = 100\n if N is not None: # None is < 0!! WHAT THE WHAT!\n if N < 0:\n N = 0\n if t < 0:\n t = 0\n\n if limit > 100000:\n # limit = 100000\n #\n bread = [(\"L-functions\", url_for(\"l_functions.index\")),(r\"Zeros of $\\zeta(s)$\", url_for(\".zetazeros\"))]\n return render_template('single.html', title=\"Too many zeros\", bread=bread, kid = \"dq.zeros.zeta.toomany\")\n\n if N is not None:\n zeros = zeros_starting_at_N(N, limit)\n else:\n zeros = zeros_starting_at_t(t, limit)\n\n if fmt == 'plain':\n response = Response((\"%d %s\\n\" % (n, nstr(z,31+floor(log(z,10))+1,strip_zeros=False,min_fixed=-inf,max_fixed=+inf)) for (n, z) in zeros))\n response.headers['content-type'] = 'text/plain'\n if download == \"yes\":\n response.headers['content-disposition'] = 'attachment; filename=zetazeros'\n else:\n response = str(list(zeros))\n\n return response\n", "path": "lmfdb/zeros/zeta/zetazeros.py"}]}
2,184
166
gh_patches_debug_10670
rasdani/github-patches
git_diff
freedomofpress__securedrop-4346
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Update SQLAlchemy to 1.3.0 ## Description Multiple vulnerabilities were reported in SQLAlchemy <1.3.0: - CVE-2019-7164 (moderate) : SQLAlchemy through 1.2.17 and 1.3.x through 1.3.0b2 allows SQL Injection via the order_by parameter. - CVE-2019-7548 (moderate) : SQLAlchemy 1.2.17 has SQL Injection when the group_by parameter can be controlled. We should update this dependency ASAP for the SecureDrop application. </issue> <code> [start of install_files/ansible-base/callback_plugins/ansible_version_check.py] 1 # -*- encoding:utf-8 -*- 2 from __future__ import absolute_import, division, print_function, \ 3 unicode_literals 4 5 import sys 6 7 import ansible 8 9 try: 10 # Version 2.0+ 11 from ansible.plugins.callback import CallbackBase 12 except ImportError: 13 CallbackBase = object 14 15 16 def print_red_bold(text): 17 print('\x1b[31;1m' + text + '\x1b[0m') 18 19 20 class CallbackModule(CallbackBase): 21 def __init__(self): 22 # Can't use `on_X` because this isn't forwards compatible 23 # with Ansible 2.0+ 24 required_version = '2.6.8' # Keep synchronized with requirements files 25 if not ansible.__version__.startswith(required_version): 26 print_red_bold( 27 "SecureDrop restriction: only Ansible {version}.*" 28 "is supported." 29 .format(version=required_version) 30 ) 31 sys.exit(1) 32 [end of install_files/ansible-base/callback_plugins/ansible_version_check.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/install_files/ansible-base/callback_plugins/ansible_version_check.py b/install_files/ansible-base/callback_plugins/ansible_version_check.py --- a/install_files/ansible-base/callback_plugins/ansible_version_check.py +++ b/install_files/ansible-base/callback_plugins/ansible_version_check.py @@ -21,7 +21,7 @@ def __init__(self): # Can't use `on_X` because this isn't forwards compatible # with Ansible 2.0+ - required_version = '2.6.8' # Keep synchronized with requirements files + required_version = '2.6.14' # Keep synchronized with requirements files if not ansible.__version__.startswith(required_version): print_red_bold( "SecureDrop restriction: only Ansible {version}.*"
{"golden_diff": "diff --git a/install_files/ansible-base/callback_plugins/ansible_version_check.py b/install_files/ansible-base/callback_plugins/ansible_version_check.py\n--- a/install_files/ansible-base/callback_plugins/ansible_version_check.py\n+++ b/install_files/ansible-base/callback_plugins/ansible_version_check.py\n@@ -21,7 +21,7 @@\n def __init__(self):\n # Can't use `on_X` because this isn't forwards compatible\n # with Ansible 2.0+\n- required_version = '2.6.8' # Keep synchronized with requirements files\n+ required_version = '2.6.14' # Keep synchronized with requirements files\n if not ansible.__version__.startswith(required_version):\n print_red_bold(\n \"SecureDrop restriction: only Ansible {version}.*\"\n", "issue": "Update SQLAlchemy to 1.3.0\n## Description\r\n\r\nMultiple vulnerabilities were reported in SQLAlchemy <1.3.0:\r\n- CVE-2019-7164 (moderate) : SQLAlchemy through 1.2.17 and 1.3.x through 1.3.0b2 allows SQL Injection via the order_by parameter.\r\n- CVE-2019-7548 (moderate) : SQLAlchemy 1.2.17 has SQL Injection when the group_by parameter can be controlled.\r\n\r\nWe should update this dependency ASAP for the SecureDrop application.\r\n\n", "before_files": [{"content": "# -*- encoding:utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, \\\n unicode_literals\n\nimport sys\n\nimport ansible\n\ntry:\n # Version 2.0+\n from ansible.plugins.callback import CallbackBase\nexcept ImportError:\n CallbackBase = object\n\n\ndef print_red_bold(text):\n print('\\x1b[31;1m' + text + '\\x1b[0m')\n\n\nclass CallbackModule(CallbackBase):\n def __init__(self):\n # Can't use `on_X` because this isn't forwards compatible\n # with Ansible 2.0+\n required_version = '2.6.8' # Keep synchronized with requirements files\n if not ansible.__version__.startswith(required_version):\n print_red_bold(\n \"SecureDrop restriction: only Ansible {version}.*\"\n \"is supported.\"\n .format(version=required_version)\n )\n sys.exit(1)\n", "path": "install_files/ansible-base/callback_plugins/ansible_version_check.py"}]}
937
176
gh_patches_debug_34201
rasdani/github-patches
git_diff
ytdl-org__youtube-dl-12587
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> openload.co extractor not working youtube-dl --get-url --verbose https://openload.co/embed/kUEfGclsU9o/ [debug] System config: [] [debug] User config: [] [debug] Command-line args: [u'--get-url', u'--verbose', u'https://openload.co/embed/kUEfGclsU9o/'] [debug] Encodings: locale UTF-8, fs UTF-8, out UTF-8, pref UTF-8 [debug] youtube-dl version 2016.08.22 [debug] Python version 2.6.6 - Linux-2.6.32-642.1.1.el6.x86_64-x86_64-with-centos-6.8-Final [debug] exe versions: ffmpeg 0.6.5, ffprobe 0.6.5 [debug] Proxy map: {} ERROR: Unable to extract link image; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output. Traceback (most recent call last): File "/usr/local/bin/youtube-dl/youtube_dl/YoutubeDL.py", line 691, in extract_info ie_result = ie.extract(url) File "/usr/local/bin/youtube-dl/youtube_dl/extractor/common.py", line 347, in extract return self._real_extract(url) File "/usr/local/bin/youtube-dl/youtube_dl/extractor/openload.py", line 62, in _real_extract r'<img[^>]+id="linkimg"[^>]+src="([^"]+)"', webpage, 'link image') File "/usr/local/bin/youtube-dl/youtube_dl/extractor/common.py", line 650, in _search_regex raise RegexNotFoundError('Unable to extract %s' % _name) RegexNotFoundError: Unable to extract link image; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output. </issue> <code> [start of youtube_dl/extractor/openload.py] 1 # coding: utf-8 2 from __future__ import unicode_literals 3 4 import re 5 6 from .common import InfoExtractor 7 from ..compat import compat_chr 8 from ..utils import ( 9 determine_ext, 10 ExtractorError, 11 ) 12 13 14 class OpenloadIE(InfoExtractor): 15 _VALID_URL = r'https?://(?:openload\.(?:co|io)|oload\.tv)/(?:f|embed)/(?P<id>[a-zA-Z0-9-_]+)' 16 17 _TESTS = [{ 18 'url': 'https://openload.co/f/kUEfGclsU9o', 19 'md5': 'bf1c059b004ebc7a256f89408e65c36e', 20 'info_dict': { 21 'id': 'kUEfGclsU9o', 22 'ext': 'mp4', 23 'title': 'skyrim_no-audio_1080.mp4', 24 'thumbnail': r're:^https?://.*\.jpg$', 25 }, 26 }, { 27 'url': 'https://openload.co/embed/rjC09fkPLYs', 28 'info_dict': { 29 'id': 'rjC09fkPLYs', 30 'ext': 'mp4', 31 'title': 'movie.mp4', 32 'thumbnail': r're:^https?://.*\.jpg$', 33 'subtitles': { 34 'en': [{ 35 'ext': 'vtt', 36 }], 37 }, 38 }, 39 'params': { 40 'skip_download': True, # test subtitles only 41 }, 42 }, { 43 'url': 'https://openload.co/embed/kUEfGclsU9o/skyrim_no-audio_1080.mp4', 44 'only_matching': True, 45 }, { 46 'url': 'https://openload.io/f/ZAn6oz-VZGE/', 47 'only_matching': True, 48 }, { 49 'url': 'https://openload.co/f/_-ztPaZtMhM/', 50 'only_matching': True, 51 }, { 52 # unavailable via https://openload.co/f/Sxz5sADo82g/, different layout 53 # for title and ext 54 'url': 'https://openload.co/embed/Sxz5sADo82g/', 55 'only_matching': True, 56 }, { 57 'url': 'https://oload.tv/embed/KnG-kKZdcfY/', 58 'only_matching': True, 59 }] 60 61 @staticmethod 62 def _extract_urls(webpage): 63 return re.findall( 64 r'<iframe[^>]+src=["\']((?:https?://)?(?:openload\.(?:co|io)|oload\.tv)/embed/[a-zA-Z0-9-_]+)', 65 webpage) 66 67 def _real_extract(self, url): 68 video_id = self._match_id(url) 69 webpage = self._download_webpage('https://openload.co/embed/%s/' % video_id, video_id) 70 71 if 'File not found' in webpage or 'deleted by the owner' in webpage: 72 raise ExtractorError('File not found', expected=True) 73 74 ol_id = self._search_regex( 75 '<span[^>]+id="[^"]+"[^>]*>([0-9A-Za-z]+)</span>', 76 webpage, 'openload ID') 77 78 video_url_chars = [] 79 80 first_char = ord(ol_id[0]) 81 key = first_char - 55 82 maxKey = max(2, key) 83 key = min(maxKey, len(ol_id) - 38) 84 t = ol_id[key:key + 36] 85 86 hashMap = {} 87 v = ol_id.replace(t, '') 88 h = 0 89 90 while h < len(t): 91 f = t[h:h + 3] 92 i = int(f, 8) 93 hashMap[h / 3] = i 94 h += 3 95 96 h = 0 97 H = 0 98 while h < len(v): 99 B = '' 100 C = '' 101 if len(v) >= h + 2: 102 B = v[h:h + 2] 103 if len(v) >= h + 3: 104 C = v[h:h + 3] 105 i = int(B, 16) 106 h += 2 107 if H % 3 == 0: 108 i = int(C, 8) 109 h += 1 110 elif H % 2 == 0 and H != 0 and ord(v[H - 1]) < 60: 111 i = int(C, 10) 112 h += 1 113 index = H % 7 114 115 A = hashMap[index] 116 i ^= 213 117 i ^= A 118 video_url_chars.append(compat_chr(i)) 119 H += 1 120 121 video_url = 'https://openload.co/stream/%s?mime=true' 122 video_url = video_url % (''.join(video_url_chars)) 123 124 title = self._og_search_title(webpage, default=None) or self._search_regex( 125 r'<span[^>]+class=["\']title["\'][^>]*>([^<]+)', webpage, 126 'title', default=None) or self._html_search_meta( 127 'description', webpage, 'title', fatal=True) 128 129 entries = self._parse_html5_media_entries(url, webpage, video_id) 130 subtitles = entries[0]['subtitles'] if entries else None 131 132 info_dict = { 133 'id': video_id, 134 'title': title, 135 'thumbnail': self._og_search_thumbnail(webpage, default=None), 136 'url': video_url, 137 # Seems all videos have extensions in their titles 138 'ext': determine_ext(title, 'mp4'), 139 'subtitles': subtitles, 140 } 141 return info_dict 142 [end of youtube_dl/extractor/openload.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/youtube_dl/extractor/openload.py b/youtube_dl/extractor/openload.py --- a/youtube_dl/extractor/openload.py +++ b/youtube_dl/extractor/openload.py @@ -75,51 +75,38 @@ '<span[^>]+id="[^"]+"[^>]*>([0-9A-Za-z]+)</span>', webpage, 'openload ID') - video_url_chars = [] - - first_char = ord(ol_id[0]) - key = first_char - 55 - maxKey = max(2, key) - key = min(maxKey, len(ol_id) - 38) - t = ol_id[key:key + 36] - - hashMap = {} - v = ol_id.replace(t, '') - h = 0 - - while h < len(t): - f = t[h:h + 3] - i = int(f, 8) - hashMap[h / 3] = i - h += 3 - - h = 0 - H = 0 - while h < len(v): - B = '' - C = '' - if len(v) >= h + 2: - B = v[h:h + 2] - if len(v) >= h + 3: - C = v[h:h + 3] - i = int(B, 16) - h += 2 - if H % 3 == 0: - i = int(C, 8) - h += 1 - elif H % 2 == 0 and H != 0 and ord(v[H - 1]) < 60: - i = int(C, 10) - h += 1 - index = H % 7 - - A = hashMap[index] - i ^= 213 - i ^= A - video_url_chars.append(compat_chr(i)) - H += 1 + decoded = '' + a = ol_id[0:24] + b = [] + for i in range(0, len(a), 8): + b.append(int(a[i:i + 8] or '0', 16)) + ol_id = ol_id[24:] + j = 0 + k = 0 + while j < len(ol_id): + c = 128 + d = 0 + e = 0 + f = 0 + _more = True + while _more: + if j + 1 >= len(ol_id): + c = 143 + f = int(ol_id[j:j + 2] or '0', 16) + j += 2 + d += (f & 127) << e + e += 7 + _more = f >= c + g = d ^ b[k % 3] + for i in range(4): + char_dec = (g >> 8 * i) & (c + 127) + char = compat_chr(char_dec) + if char != '#': + decoded += char + k += 1 video_url = 'https://openload.co/stream/%s?mime=true' - video_url = video_url % (''.join(video_url_chars)) + video_url = video_url % decoded title = self._og_search_title(webpage, default=None) or self._search_regex( r'<span[^>]+class=["\']title["\'][^>]*>([^<]+)', webpage,
{"golden_diff": "diff --git a/youtube_dl/extractor/openload.py b/youtube_dl/extractor/openload.py\n--- a/youtube_dl/extractor/openload.py\n+++ b/youtube_dl/extractor/openload.py\n@@ -75,51 +75,38 @@\n '<span[^>]+id=\"[^\"]+\"[^>]*>([0-9A-Za-z]+)</span>',\n webpage, 'openload ID')\n \n- video_url_chars = []\n-\n- first_char = ord(ol_id[0])\n- key = first_char - 55\n- maxKey = max(2, key)\n- key = min(maxKey, len(ol_id) - 38)\n- t = ol_id[key:key + 36]\n-\n- hashMap = {}\n- v = ol_id.replace(t, '')\n- h = 0\n-\n- while h < len(t):\n- f = t[h:h + 3]\n- i = int(f, 8)\n- hashMap[h / 3] = i\n- h += 3\n-\n- h = 0\n- H = 0\n- while h < len(v):\n- B = ''\n- C = ''\n- if len(v) >= h + 2:\n- B = v[h:h + 2]\n- if len(v) >= h + 3:\n- C = v[h:h + 3]\n- i = int(B, 16)\n- h += 2\n- if H % 3 == 0:\n- i = int(C, 8)\n- h += 1\n- elif H % 2 == 0 and H != 0 and ord(v[H - 1]) < 60:\n- i = int(C, 10)\n- h += 1\n- index = H % 7\n-\n- A = hashMap[index]\n- i ^= 213\n- i ^= A\n- video_url_chars.append(compat_chr(i))\n- H += 1\n+ decoded = ''\n+ a = ol_id[0:24]\n+ b = []\n+ for i in range(0, len(a), 8):\n+ b.append(int(a[i:i + 8] or '0', 16))\n+ ol_id = ol_id[24:]\n+ j = 0\n+ k = 0\n+ while j < len(ol_id):\n+ c = 128\n+ d = 0\n+ e = 0\n+ f = 0\n+ _more = True\n+ while _more:\n+ if j + 1 >= len(ol_id):\n+ c = 143\n+ f = int(ol_id[j:j + 2] or '0', 16)\n+ j += 2\n+ d += (f & 127) << e\n+ e += 7\n+ _more = f >= c\n+ g = d ^ b[k % 3]\n+ for i in range(4):\n+ char_dec = (g >> 8 * i) & (c + 127)\n+ char = compat_chr(char_dec)\n+ if char != '#':\n+ decoded += char\n+ k += 1\n \n video_url = 'https://openload.co/stream/%s?mime=true'\n- video_url = video_url % (''.join(video_url_chars))\n+ video_url = video_url % decoded\n \n title = self._og_search_title(webpage, default=None) or self._search_regex(\n r'<span[^>]+class=[\"\\']title[\"\\'][^>]*>([^<]+)', webpage,\n", "issue": "openload.co extractor not working\n youtube-dl --get-url --verbose https://openload.co/embed/kUEfGclsU9o/\n[debug] System config: []\n[debug] User config: []\n[debug] Command-line args: [u'--get-url', u'--verbose', u'https://openload.co/embed/kUEfGclsU9o/']\n[debug] Encodings: locale UTF-8, fs UTF-8, out UTF-8, pref UTF-8\n[debug] youtube-dl version 2016.08.22\n[debug] Python version 2.6.6 - Linux-2.6.32-642.1.1.el6.x86_64-x86_64-with-centos-6.8-Final\n[debug] exe versions: ffmpeg 0.6.5, ffprobe 0.6.5\n[debug] Proxy map: {}\nERROR: Unable to extract link image; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.\nTraceback (most recent call last):\n File \"/usr/local/bin/youtube-dl/youtube_dl/YoutubeDL.py\", line 691, in extract_info\n ie_result = ie.extract(url)\n File \"/usr/local/bin/youtube-dl/youtube_dl/extractor/common.py\", line 347, in extract\n return self._real_extract(url)\n File \"/usr/local/bin/youtube-dl/youtube_dl/extractor/openload.py\", line 62, in _real_extract\n r'<img[^>]+id=\"linkimg\"[^>]+src=\"([^\"]+)\"', webpage, 'link image')\n File \"/usr/local/bin/youtube-dl/youtube_dl/extractor/common.py\", line 650, in _search_regex\n raise RegexNotFoundError('Unable to extract %s' % _name)\nRegexNotFoundError: Unable to extract link image; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the --verbose flag and include its complete output.\n\n", "before_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport re\n\nfrom .common import InfoExtractor\nfrom ..compat import compat_chr\nfrom ..utils import (\n determine_ext,\n ExtractorError,\n)\n\n\nclass OpenloadIE(InfoExtractor):\n _VALID_URL = r'https?://(?:openload\\.(?:co|io)|oload\\.tv)/(?:f|embed)/(?P<id>[a-zA-Z0-9-_]+)'\n\n _TESTS = [{\n 'url': 'https://openload.co/f/kUEfGclsU9o',\n 'md5': 'bf1c059b004ebc7a256f89408e65c36e',\n 'info_dict': {\n 'id': 'kUEfGclsU9o',\n 'ext': 'mp4',\n 'title': 'skyrim_no-audio_1080.mp4',\n 'thumbnail': r're:^https?://.*\\.jpg$',\n },\n }, {\n 'url': 'https://openload.co/embed/rjC09fkPLYs',\n 'info_dict': {\n 'id': 'rjC09fkPLYs',\n 'ext': 'mp4',\n 'title': 'movie.mp4',\n 'thumbnail': r're:^https?://.*\\.jpg$',\n 'subtitles': {\n 'en': [{\n 'ext': 'vtt',\n }],\n },\n },\n 'params': {\n 'skip_download': True, # test subtitles only\n },\n }, {\n 'url': 'https://openload.co/embed/kUEfGclsU9o/skyrim_no-audio_1080.mp4',\n 'only_matching': True,\n }, {\n 'url': 'https://openload.io/f/ZAn6oz-VZGE/',\n 'only_matching': True,\n }, {\n 'url': 'https://openload.co/f/_-ztPaZtMhM/',\n 'only_matching': True,\n }, {\n # unavailable via https://openload.co/f/Sxz5sADo82g/, different layout\n # for title and ext\n 'url': 'https://openload.co/embed/Sxz5sADo82g/',\n 'only_matching': True,\n }, {\n 'url': 'https://oload.tv/embed/KnG-kKZdcfY/',\n 'only_matching': True,\n }]\n\n @staticmethod\n def _extract_urls(webpage):\n return re.findall(\n r'<iframe[^>]+src=[\"\\']((?:https?://)?(?:openload\\.(?:co|io)|oload\\.tv)/embed/[a-zA-Z0-9-_]+)',\n webpage)\n\n def _real_extract(self, url):\n video_id = self._match_id(url)\n webpage = self._download_webpage('https://openload.co/embed/%s/' % video_id, video_id)\n\n if 'File not found' in webpage or 'deleted by the owner' in webpage:\n raise ExtractorError('File not found', expected=True)\n\n ol_id = self._search_regex(\n '<span[^>]+id=\"[^\"]+\"[^>]*>([0-9A-Za-z]+)</span>',\n webpage, 'openload ID')\n\n video_url_chars = []\n\n first_char = ord(ol_id[0])\n key = first_char - 55\n maxKey = max(2, key)\n key = min(maxKey, len(ol_id) - 38)\n t = ol_id[key:key + 36]\n\n hashMap = {}\n v = ol_id.replace(t, '')\n h = 0\n\n while h < len(t):\n f = t[h:h + 3]\n i = int(f, 8)\n hashMap[h / 3] = i\n h += 3\n\n h = 0\n H = 0\n while h < len(v):\n B = ''\n C = ''\n if len(v) >= h + 2:\n B = v[h:h + 2]\n if len(v) >= h + 3:\n C = v[h:h + 3]\n i = int(B, 16)\n h += 2\n if H % 3 == 0:\n i = int(C, 8)\n h += 1\n elif H % 2 == 0 and H != 0 and ord(v[H - 1]) < 60:\n i = int(C, 10)\n h += 1\n index = H % 7\n\n A = hashMap[index]\n i ^= 213\n i ^= A\n video_url_chars.append(compat_chr(i))\n H += 1\n\n video_url = 'https://openload.co/stream/%s?mime=true'\n video_url = video_url % (''.join(video_url_chars))\n\n title = self._og_search_title(webpage, default=None) or self._search_regex(\n r'<span[^>]+class=[\"\\']title[\"\\'][^>]*>([^<]+)', webpage,\n 'title', default=None) or self._html_search_meta(\n 'description', webpage, 'title', fatal=True)\n\n entries = self._parse_html5_media_entries(url, webpage, video_id)\n subtitles = entries[0]['subtitles'] if entries else None\n\n info_dict = {\n 'id': video_id,\n 'title': title,\n 'thumbnail': self._og_search_thumbnail(webpage, default=None),\n 'url': video_url,\n # Seems all videos have extensions in their titles\n 'ext': determine_ext(title, 'mp4'),\n 'subtitles': subtitles,\n }\n return info_dict\n", "path": "youtube_dl/extractor/openload.py"}]}
2,644
838
gh_patches_debug_8450
rasdani/github-patches
git_diff
easybuilders__easybuild-easyblocks-1796
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> SuiteSparse easyblock fails when using Python 3 When running `eb` on top of Python 3.6.8: ``` $ eb SuiteSparse-5.4.0-foss-2019a-METIS-5.1.0.eb ... == configuring... ERROR: Traceback (most recent call last): File "/home/kehoste/easybuild-framework/easybuild/main.py", line 112, in build_and_install_software (ec_res['success'], app_log, err) = build_and_install_one(ec, init_env) File "/home/kehoste/easybuild-framework/easybuild/framework/easyblock.py", line 3046, in build_and_install_one result = app.run_all_steps(run_test_cases=run_test_cases) File "/home/kehoste/easybuild-framework/easybuild/framework/easyblock.py", line 2954, in run_all_steps self.run_step(step_name, step_methods) File "/home/kehoste/easybuild-framework/easybuild/framework/easyblock.py", line 2824, in run_step step_method(self)() File "/home/kehoste/easybuild-easyblocks/easybuild/easyblocks/s/suitesparse.py", line 113, in configure_step for (var, val) in cfgvars.items(): RuntimeError: dictionary changed size during iteration ``` </issue> <code> [start of easybuild/easyblocks/s/suitesparse.py] 1 ## 2 # Copyright 2009-2019 Ghent University 3 # 4 # This file is part of EasyBuild, 5 # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en), 6 # with support of Ghent University (http://ugent.be/hpc), 7 # the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be), 8 # Flemish Research Foundation (FWO) (http://www.fwo.be/en) 9 # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en). 10 # 11 # https://github.com/easybuilders/easybuild 12 # 13 # EasyBuild is free software: you can redistribute it and/or modify 14 # it under the terms of the GNU General Public License as published by 15 # the Free Software Foundation v2. 16 # 17 # EasyBuild is distributed in the hope that it will be useful, 18 # but WITHOUT ANY WARRANTY; without even the implied warranty of 19 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 # GNU General Public License for more details. 21 # 22 # You should have received a copy of the GNU General Public License 23 # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>. 24 ## 25 """ 26 EasyBuild support for SuiteSparse, implemented as an easyblock 27 28 @author: Stijn De Weirdt (Ghent University) 29 @author: Dries Verdegem (Ghent University) 30 @author: Kenneth Hoste (Ghent University) 31 @author: Pieter De Baets (Ghent University) 32 @author: Jens Timmerman (Ghent University) 33 """ 34 import fileinput 35 import re 36 import os 37 import shutil 38 import sys 39 import stat 40 from distutils.version import LooseVersion 41 42 from easybuild.easyblocks.generic.configuremake import ConfigureMake 43 from easybuild.tools.build_log import EasyBuildError 44 from easybuild.tools.filetools import mkdir, write_file, adjust_permissions 45 from easybuild.tools.modules import get_software_root 46 from easybuild.tools.modules import get_software_libdir 47 from easybuild.tools.systemtools import get_shared_lib_ext 48 49 50 class EB_SuiteSparse(ConfigureMake): 51 """Support for building SuiteSparse.""" 52 53 def __init__(self, *args, **kwargs): 54 """Custom constructor for SuiteSparse easyblock, initialize custom class parameters.""" 55 super(EB_SuiteSparse, self).__init__(*args, **kwargs) 56 self.config_name = 'UNKNOWN' 57 58 def configure_step(self): 59 """Configure build by patching UFconfig.mk or SuiteSparse_config.mk.""" 60 61 if LooseVersion(self.version) < LooseVersion('4.0'): 62 self.config_name = 'UFconfig' 63 else: 64 self.config_name = 'SuiteSparse_config' 65 66 cfgvars = { 67 'CC': os.getenv('MPICC'), 68 'CFLAGS': os.getenv('CFLAGS'), 69 'CXX': os.getenv('MPICXX'), 70 'F77': os.getenv('MPIF77'), 71 'F77FLAGS': os.getenv('F77FLAGS'), 72 } 73 74 # avoid that (system) Intel compilers are always considered 75 self.cfg.update('buildopts', 'AUTOCC=no') 76 77 # Set BLAS and LAPACK libraries as specified in SuiteSparse README.txt 78 self.cfg.update('buildopts', 'BLAS="%s"' % os.getenv('LIBBLAS_MT')) 79 self.cfg.update('buildopts', 'LAPACK="%s"' % os.getenv('LIBLAPACK_MT')) 80 81 # Get METIS or ParMETIS settings 82 metis = get_software_root('METIS') 83 parmetis = get_software_root('ParMETIS') 84 if parmetis: 85 metis_path = parmetis 86 metis_include = os.path.join(parmetis, 'include') 87 metis_libs = os.path.join(parmetis, get_software_libdir('ParMETIS'), 'libmetis.a') 88 89 elif metis: 90 metis_path = metis 91 metis_include = os.path.join(metis, 'include') 92 metis_libs = os.path.join(metis, get_software_libdir('METIS'), 'libmetis.a') 93 94 else: 95 raise EasyBuildError("Neither METIS or ParMETIS module loaded.") 96 97 if LooseVersion(self.version) >= LooseVersion('4.5.1'): 98 cfgvars.update({ 99 'MY_METIS_LIB': metis_libs, 100 'MY_METIS_INC': metis_include, 101 }) 102 else: 103 cfgvars.update({ 104 'METIS_PATH': metis_path, 105 'METIS': metis_libs, 106 }) 107 108 # patch file 109 fp = os.path.join(self.cfg['start_dir'], self.config_name, '%s.mk' % self.config_name) 110 111 try: 112 for line in fileinput.input(fp, inplace=1, backup='.orig'): 113 for (var, val) in cfgvars.items(): 114 orig_line = line 115 # for variables in cfgvars, substiture lines assignment 116 # in the file, whatever they are, by assignments to the 117 # values in cfgvars 118 line = re.sub(r"^\s*(%s\s*=\s*).*\n$" % var, 119 r"\1 %s # patched by EasyBuild\n" % val, 120 line) 121 if line != orig_line: 122 cfgvars.pop(var) 123 sys.stdout.write(line) 124 except IOError as err: 125 raise EasyBuildError("Failed to patch %s in: %s", fp, err) 126 127 # add remaining entries at the end 128 if cfgvars: 129 cfgtxt = '# lines below added automatically by EasyBuild\n' 130 cfgtxt += '\n'.join(["%s = %s" % (var, val) for (var, val) in cfgvars.items()]) 131 write_file(fp, cfgtxt, append=True) 132 133 def install_step(self): 134 """Install by copying the contents of the builddir to the installdir (preserving permissions)""" 135 for x in os.listdir(self.cfg['start_dir']): 136 src = os.path.join(self.cfg['start_dir'], x) 137 dst = os.path.join(self.installdir, x) 138 try: 139 if os.path.isdir(src): 140 shutil.copytree(src, dst) 141 # symlink 142 # - dst/Lib to dst/lib 143 # - dst/Include to dst/include 144 for c in ['Lib', 'Include']: 145 nsrc = os.path.join(dst, c) 146 ndst = os.path.join(dst, c.lower()) 147 if os.path.exists(nsrc): 148 os.symlink(nsrc, ndst) 149 # enable r-x permissions for group/others 150 perms = stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH 151 adjust_permissions(dst, perms, add=True, recursive=True, onlydirs=True) 152 else: 153 shutil.copy2(src, dst) 154 except OSError as err: 155 raise EasyBuildError("Copying src %s to dst %s failed: %s", src, dst, err) 156 157 # some extra symlinks are necessary for UMFPACK to work. 158 paths = [ 159 os.path.join('AMD', 'include', 'amd.h'), 160 os.path.join('AMD', 'include', 'amd_internal.h'), 161 os.path.join(self.config_name, '%s.h' % self.config_name), 162 os.path.join('AMD', 'lib', 'libamd.a') 163 ] 164 for path in paths: 165 src = os.path.join(self.installdir, path) 166 dn = path.split(os.path.sep)[-2] 167 fn = path.split(os.path.sep)[-1] 168 dstdir = os.path.join(self.installdir, 'UMFPACK', dn) 169 mkdir(dstdir) 170 if os.path.exists(src): 171 try: 172 os.symlink(src, os.path.join(dstdir, fn)) 173 except OSError as err: 174 raise EasyBuildError("Failed to make symbolic link from %s to %s: %s", src, dst, err) 175 176 def make_module_req_guess(self): 177 """ 178 Extra path to consider for module file: 179 * add config dir and include to $CPATH so include files are found 180 * add UMFPACK and AMD library, and lib dirs to $LD_LIBRARY_PATH 181 """ 182 183 guesses = super(EB_SuiteSparse, self).make_module_req_guess() 184 185 # Previous versions of SuiteSparse used specific directories for includes and libraries 186 if LooseVersion(self.version) < LooseVersion('4.5'): 187 include_dirs = [self.config_name] 188 ld_library_path = ['AMD/lib', 'BTF/lib', 'CAMD/lib', 'CCOLAMD/lib', 'CHOLAMD/lib', 'CHOLMOD/lib', 189 'COLAMD/lib/', 'CSparse/lib', 'CXSparse/lib', 'KLU/lib', 'LDL/lib', 'RBio/lib', 190 'UMFPACK/lib', self.config_name] 191 192 guesses['CPATH'].extend(include_dirs) 193 guesses['LD_LIBRARY_PATH'].extend(ld_library_path) 194 guesses['LIBRARY_PATH'].extend(ld_library_path) 195 196 return guesses 197 198 def sanity_check_step(self): 199 """Custom sanity check for SuiteSparse.""" 200 201 # Make sure that SuiteSparse did NOT compile its own Metis 202 if os.path.exists(os.path.join(self.installdir, 'lib', 'libmetis.%s' % get_shared_lib_ext())): 203 raise EasyBuildError("SuiteSparse has compiled its own Metis. This will conflict with the Metis build." 204 " The SuiteSparse EasyBlock need to be updated!") 205 206 libnames = ['AMD', 'BTF', 'CAMD', 'CCOLAMD', 'CHOLMOD', 'COLAMD', 'CXSparse', 'KLU', 207 'LDL', 'RBio', 'SPQR', 'UMFPACK'] 208 libs = [os.path.join(x, 'lib', 'lib%s.a' % x.lower()) for x in libnames] 209 210 if LooseVersion(self.version) < LooseVersion('4.0'): 211 csparse_dir = 'CSparse3' 212 else: 213 csparse_dir = 'CSparse' 214 libs.append(os.path.join(csparse_dir, 'lib', 'libcsparse.a')) 215 216 # Latest version of SuiteSparse also compiles shared library and put them in 'lib' 217 shlib_ext = get_shared_lib_ext() 218 if LooseVersion(self.version) >= LooseVersion('4.5.1'): 219 libs += [os.path.join('lib', 'lib%s.%s' % (l.lower(), shlib_ext)) for l in libnames] 220 221 custom_paths = { 222 'files': libs, 223 'dirs': ['MATLAB_Tools'], 224 } 225 226 super(EB_SuiteSparse, self).sanity_check_step(custom_paths=custom_paths) 227 [end of easybuild/easyblocks/s/suitesparse.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/easybuild/easyblocks/s/suitesparse.py b/easybuild/easyblocks/s/suitesparse.py --- a/easybuild/easyblocks/s/suitesparse.py +++ b/easybuild/easyblocks/s/suitesparse.py @@ -110,7 +110,7 @@ try: for line in fileinput.input(fp, inplace=1, backup='.orig'): - for (var, val) in cfgvars.items(): + for (var, val) in list(cfgvars.items()): orig_line = line # for variables in cfgvars, substiture lines assignment # in the file, whatever they are, by assignments to the
{"golden_diff": "diff --git a/easybuild/easyblocks/s/suitesparse.py b/easybuild/easyblocks/s/suitesparse.py\n--- a/easybuild/easyblocks/s/suitesparse.py\n+++ b/easybuild/easyblocks/s/suitesparse.py\n@@ -110,7 +110,7 @@\n \n try:\n for line in fileinput.input(fp, inplace=1, backup='.orig'):\n- for (var, val) in cfgvars.items():\n+ for (var, val) in list(cfgvars.items()):\n orig_line = line\n # for variables in cfgvars, substiture lines assignment\n # in the file, whatever they are, by assignments to the\n", "issue": "SuiteSparse easyblock fails when using Python 3\nWhen running `eb` on top of Python 3.6.8:\r\n\r\n```\r\n$ eb SuiteSparse-5.4.0-foss-2019a-METIS-5.1.0.eb\r\n...\r\n== configuring...\r\nERROR: Traceback (most recent call last):\r\n File \"/home/kehoste/easybuild-framework/easybuild/main.py\", line 112, in build_and_install_software\r\n (ec_res['success'], app_log, err) = build_and_install_one(ec, init_env)\r\n File \"/home/kehoste/easybuild-framework/easybuild/framework/easyblock.py\", line 3046, in build_and_install_one\r\n result = app.run_all_steps(run_test_cases=run_test_cases)\r\n File \"/home/kehoste/easybuild-framework/easybuild/framework/easyblock.py\", line 2954, in run_all_steps\r\n self.run_step(step_name, step_methods)\r\n File \"/home/kehoste/easybuild-framework/easybuild/framework/easyblock.py\", line 2824, in run_step\r\n step_method(self)()\r\n File \"/home/kehoste/easybuild-easyblocks/easybuild/easyblocks/s/suitesparse.py\", line 113, in configure_step\r\n for (var, val) in cfgvars.items():\r\nRuntimeError: dictionary changed size during iteration\r\n```\n", "before_files": [{"content": "##\n# Copyright 2009-2019 Ghent University\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),\n# with support of Ghent University (http://ugent.be/hpc),\n# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),\n# Flemish Research Foundation (FWO) (http://www.fwo.be/en)\n# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).\n#\n# https://github.com/easybuilders/easybuild\n#\n# EasyBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation v2.\n#\n# EasyBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.\n##\n\"\"\"\nEasyBuild support for SuiteSparse, implemented as an easyblock\n\n@author: Stijn De Weirdt (Ghent University)\n@author: Dries Verdegem (Ghent University)\n@author: Kenneth Hoste (Ghent University)\n@author: Pieter De Baets (Ghent University)\n@author: Jens Timmerman (Ghent University)\n\"\"\"\nimport fileinput\nimport re\nimport os\nimport shutil\nimport sys\nimport stat\nfrom distutils.version import LooseVersion\n\nfrom easybuild.easyblocks.generic.configuremake import ConfigureMake\nfrom easybuild.tools.build_log import EasyBuildError\nfrom easybuild.tools.filetools import mkdir, write_file, adjust_permissions\nfrom easybuild.tools.modules import get_software_root\nfrom easybuild.tools.modules import get_software_libdir\nfrom easybuild.tools.systemtools import get_shared_lib_ext\n\n\nclass EB_SuiteSparse(ConfigureMake):\n \"\"\"Support for building SuiteSparse.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Custom constructor for SuiteSparse easyblock, initialize custom class parameters.\"\"\"\n super(EB_SuiteSparse, self).__init__(*args, **kwargs)\n self.config_name = 'UNKNOWN'\n\n def configure_step(self):\n \"\"\"Configure build by patching UFconfig.mk or SuiteSparse_config.mk.\"\"\"\n\n if LooseVersion(self.version) < LooseVersion('4.0'):\n self.config_name = 'UFconfig'\n else:\n self.config_name = 'SuiteSparse_config'\n\n cfgvars = {\n 'CC': os.getenv('MPICC'),\n 'CFLAGS': os.getenv('CFLAGS'),\n 'CXX': os.getenv('MPICXX'),\n 'F77': os.getenv('MPIF77'),\n 'F77FLAGS': os.getenv('F77FLAGS'),\n }\n\n # avoid that (system) Intel compilers are always considered\n self.cfg.update('buildopts', 'AUTOCC=no')\n\n # Set BLAS and LAPACK libraries as specified in SuiteSparse README.txt\n self.cfg.update('buildopts', 'BLAS=\"%s\"' % os.getenv('LIBBLAS_MT'))\n self.cfg.update('buildopts', 'LAPACK=\"%s\"' % os.getenv('LIBLAPACK_MT'))\n\n # Get METIS or ParMETIS settings\n metis = get_software_root('METIS')\n parmetis = get_software_root('ParMETIS')\n if parmetis:\n metis_path = parmetis\n metis_include = os.path.join(parmetis, 'include')\n metis_libs = os.path.join(parmetis, get_software_libdir('ParMETIS'), 'libmetis.a')\n\n elif metis:\n metis_path = metis\n metis_include = os.path.join(metis, 'include')\n metis_libs = os.path.join(metis, get_software_libdir('METIS'), 'libmetis.a')\n\n else:\n raise EasyBuildError(\"Neither METIS or ParMETIS module loaded.\")\n\n if LooseVersion(self.version) >= LooseVersion('4.5.1'):\n cfgvars.update({\n 'MY_METIS_LIB': metis_libs,\n 'MY_METIS_INC': metis_include,\n })\n else:\n cfgvars.update({\n 'METIS_PATH': metis_path,\n 'METIS': metis_libs,\n })\n\n # patch file\n fp = os.path.join(self.cfg['start_dir'], self.config_name, '%s.mk' % self.config_name)\n\n try:\n for line in fileinput.input(fp, inplace=1, backup='.orig'):\n for (var, val) in cfgvars.items():\n orig_line = line\n # for variables in cfgvars, substiture lines assignment\n # in the file, whatever they are, by assignments to the\n # values in cfgvars\n line = re.sub(r\"^\\s*(%s\\s*=\\s*).*\\n$\" % var,\n r\"\\1 %s # patched by EasyBuild\\n\" % val,\n line)\n if line != orig_line:\n cfgvars.pop(var)\n sys.stdout.write(line)\n except IOError as err:\n raise EasyBuildError(\"Failed to patch %s in: %s\", fp, err)\n\n # add remaining entries at the end\n if cfgvars:\n cfgtxt = '# lines below added automatically by EasyBuild\\n'\n cfgtxt += '\\n'.join([\"%s = %s\" % (var, val) for (var, val) in cfgvars.items()])\n write_file(fp, cfgtxt, append=True)\n\n def install_step(self):\n \"\"\"Install by copying the contents of the builddir to the installdir (preserving permissions)\"\"\"\n for x in os.listdir(self.cfg['start_dir']):\n src = os.path.join(self.cfg['start_dir'], x)\n dst = os.path.join(self.installdir, x)\n try:\n if os.path.isdir(src):\n shutil.copytree(src, dst)\n # symlink\n # - dst/Lib to dst/lib\n # - dst/Include to dst/include\n for c in ['Lib', 'Include']:\n nsrc = os.path.join(dst, c)\n ndst = os.path.join(dst, c.lower())\n if os.path.exists(nsrc):\n os.symlink(nsrc, ndst)\n # enable r-x permissions for group/others\n perms = stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH\n adjust_permissions(dst, perms, add=True, recursive=True, onlydirs=True)\n else:\n shutil.copy2(src, dst)\n except OSError as err:\n raise EasyBuildError(\"Copying src %s to dst %s failed: %s\", src, dst, err)\n\n # some extra symlinks are necessary for UMFPACK to work.\n paths = [\n os.path.join('AMD', 'include', 'amd.h'),\n os.path.join('AMD', 'include', 'amd_internal.h'),\n os.path.join(self.config_name, '%s.h' % self.config_name),\n os.path.join('AMD', 'lib', 'libamd.a')\n ]\n for path in paths:\n src = os.path.join(self.installdir, path)\n dn = path.split(os.path.sep)[-2]\n fn = path.split(os.path.sep)[-1]\n dstdir = os.path.join(self.installdir, 'UMFPACK', dn)\n mkdir(dstdir)\n if os.path.exists(src):\n try:\n os.symlink(src, os.path.join(dstdir, fn))\n except OSError as err:\n raise EasyBuildError(\"Failed to make symbolic link from %s to %s: %s\", src, dst, err)\n\n def make_module_req_guess(self):\n \"\"\"\n Extra path to consider for module file:\n * add config dir and include to $CPATH so include files are found\n * add UMFPACK and AMD library, and lib dirs to $LD_LIBRARY_PATH\n \"\"\"\n\n guesses = super(EB_SuiteSparse, self).make_module_req_guess()\n\n # Previous versions of SuiteSparse used specific directories for includes and libraries\n if LooseVersion(self.version) < LooseVersion('4.5'):\n include_dirs = [self.config_name]\n ld_library_path = ['AMD/lib', 'BTF/lib', 'CAMD/lib', 'CCOLAMD/lib', 'CHOLAMD/lib', 'CHOLMOD/lib',\n 'COLAMD/lib/', 'CSparse/lib', 'CXSparse/lib', 'KLU/lib', 'LDL/lib', 'RBio/lib',\n 'UMFPACK/lib', self.config_name]\n\n guesses['CPATH'].extend(include_dirs)\n guesses['LD_LIBRARY_PATH'].extend(ld_library_path)\n guesses['LIBRARY_PATH'].extend(ld_library_path)\n\n return guesses\n\n def sanity_check_step(self):\n \"\"\"Custom sanity check for SuiteSparse.\"\"\"\n\n # Make sure that SuiteSparse did NOT compile its own Metis\n if os.path.exists(os.path.join(self.installdir, 'lib', 'libmetis.%s' % get_shared_lib_ext())):\n raise EasyBuildError(\"SuiteSparse has compiled its own Metis. This will conflict with the Metis build.\"\n \" The SuiteSparse EasyBlock need to be updated!\")\n\n libnames = ['AMD', 'BTF', 'CAMD', 'CCOLAMD', 'CHOLMOD', 'COLAMD', 'CXSparse', 'KLU',\n 'LDL', 'RBio', 'SPQR', 'UMFPACK']\n libs = [os.path.join(x, 'lib', 'lib%s.a' % x.lower()) for x in libnames]\n\n if LooseVersion(self.version) < LooseVersion('4.0'):\n csparse_dir = 'CSparse3'\n else:\n csparse_dir = 'CSparse'\n libs.append(os.path.join(csparse_dir, 'lib', 'libcsparse.a'))\n\n # Latest version of SuiteSparse also compiles shared library and put them in 'lib'\n shlib_ext = get_shared_lib_ext()\n if LooseVersion(self.version) >= LooseVersion('4.5.1'):\n libs += [os.path.join('lib', 'lib%s.%s' % (l.lower(), shlib_ext)) for l in libnames]\n\n custom_paths = {\n 'files': libs,\n 'dirs': ['MATLAB_Tools'],\n }\n\n super(EB_SuiteSparse, self).sanity_check_step(custom_paths=custom_paths)\n", "path": "easybuild/easyblocks/s/suitesparse.py"}]}
3,769
156
gh_patches_debug_29546
rasdani/github-patches
git_diff
Mailu__Mailu-1014
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Should `antispam` be hardcoded? I'm in a middle of setting up my own mailu instances, however I'm using namespaced container names (i.e. `mailu-admin`, `mailu-front` etc.). Everything works pretty well so far, however I have some problems with imap and antispam. After some investigation I noticed, that files `core/dovecot/conf/bin/ham` and `core/dovecot/conf/bin/spam` have antispam host hardcoded instead of using `HOST_ANTISPAM`. I'm happy to provide PR, but firstly want to be sure - is it in fact oversight, or is this by design? Should `antispam` be hardcoded? I'm in a middle of setting up my own mailu instances, however I'm using namespaced container names (i.e. `mailu-admin`, `mailu-front` etc.). Everything works pretty well so far, however I have some problems with imap and antispam. After some investigation I noticed, that files `core/dovecot/conf/bin/ham` and `core/dovecot/conf/bin/spam` have antispam host hardcoded instead of using `HOST_ANTISPAM`. I'm happy to provide PR, but firstly want to be sure - is it in fact oversight, or is this by design? </issue> <code> [start of core/dovecot/start.py] 1 #!/usr/bin/python3 2 3 import os 4 import glob 5 import multiprocessing 6 import logging as log 7 import sys 8 9 from podop import run_server 10 from socrate import system, conf 11 12 log.basicConfig(stream=sys.stderr, level=os.environ.get("LOG_LEVEL", "WARNING")) 13 14 def start_podop(): 15 os.setuid(8) 16 url = "http://" + os.environ["ADMIN_ADDRESS"] + "/internal/dovecot/§" 17 run_server(0, "dovecot", "/tmp/podop.socket", [ 18 ("quota", "url", url ), 19 ("auth", "url", url), 20 ("sieve", "url", url), 21 ]) 22 23 # Actual startup script 24 os.environ["FRONT_ADDRESS"] = system.resolve_address(os.environ.get("HOST_FRONT", "front")) 25 os.environ["REDIS_ADDRESS"] = system.resolve_address(os.environ.get("HOST_REDIS", "redis")) 26 os.environ["ADMIN_ADDRESS"] = system.resolve_address(os.environ.get("HOST_ADMIN", "admin")) 27 if os.environ["WEBMAIL"] != "none": 28 os.environ["WEBMAIL_ADDRESS"] = system.resolve_address(os.environ.get("HOST_WEBMAIL", "webmail")) 29 30 for dovecot_file in glob.glob("/conf/*.conf"): 31 conf.jinja(dovecot_file, os.environ, os.path.join("/etc/dovecot", os.path.basename(dovecot_file))) 32 33 # Run Podop, then postfix 34 multiprocessing.Process(target=start_podop).start() 35 os.system("chown mail:mail /mail") 36 os.system("chown -R mail:mail /var/lib/dovecot /conf") 37 os.execv("/usr/sbin/dovecot", ["dovecot", "-c", "/etc/dovecot/dovecot.conf", "-F"]) 38 [end of core/dovecot/start.py] [start of services/rspamd/start.py] 1 #!/usr/bin/python3 2 3 import os 4 import glob 5 import logging as log 6 import sys 7 from socrate import system, conf 8 9 log.basicConfig(stream=sys.stderr, level=os.environ.get("LOG_LEVEL", "WARNING")) 10 11 # Actual startup script 12 os.environ["FRONT_ADDRESS"] = system.resolve_address(os.environ.get("HOST_FRONT", "front")) 13 14 if "HOST_REDIS" not in os.environ: 15 os.environ["REDIS_ADDRESS"] = system.resolve_address(os.environ.get("HOST_REDIS", "redis")) 16 17 18 for rspamd_file in glob.glob("/conf/*"): 19 conf.jinja(rspamd_file, os.environ, os.path.join("/etc/rspamd/local.d", os.path.basename(rspamd_file))) 20 21 # Run rspamd 22 os.execv("/usr/sbin/rspamd", ["rspamd", "-i", "-f"]) 23 [end of services/rspamd/start.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/core/dovecot/start.py b/core/dovecot/start.py --- a/core/dovecot/start.py +++ b/core/dovecot/start.py @@ -24,6 +24,7 @@ os.environ["FRONT_ADDRESS"] = system.resolve_address(os.environ.get("HOST_FRONT", "front")) os.environ["REDIS_ADDRESS"] = system.resolve_address(os.environ.get("HOST_REDIS", "redis")) os.environ["ADMIN_ADDRESS"] = system.resolve_address(os.environ.get("HOST_ADMIN", "admin")) +os.environ["ANTISPAM_ADDRESS"] = system.resolve_address(os.environ.get("HOST_ANTISPAM", "antispam:11334")) if os.environ["WEBMAIL"] != "none": os.environ["WEBMAIL_ADDRESS"] = system.resolve_address(os.environ.get("HOST_WEBMAIL", "webmail")) diff --git a/services/rspamd/start.py b/services/rspamd/start.py --- a/services/rspamd/start.py +++ b/services/rspamd/start.py @@ -9,11 +9,14 @@ log.basicConfig(stream=sys.stderr, level=os.environ.get("LOG_LEVEL", "WARNING")) # Actual startup script + os.environ["FRONT_ADDRESS"] = system.resolve_address(os.environ.get("HOST_FRONT", "front")) if "HOST_REDIS" not in os.environ: os.environ["REDIS_ADDRESS"] = system.resolve_address(os.environ.get("HOST_REDIS", "redis")) +if os.environ.get("ANTIVIRUS") == 'clamav': + os.environ["ANTIVIRUS_ADDRESS"] = system.resolve_address(os.environ.get("HOST_ANTIVIRUS", "antivirus:3310")) for rspamd_file in glob.glob("/conf/*"): conf.jinja(rspamd_file, os.environ, os.path.join("/etc/rspamd/local.d", os.path.basename(rspamd_file)))
{"golden_diff": "diff --git a/core/dovecot/start.py b/core/dovecot/start.py\n--- a/core/dovecot/start.py\n+++ b/core/dovecot/start.py\n@@ -24,6 +24,7 @@\n os.environ[\"FRONT_ADDRESS\"] = system.resolve_address(os.environ.get(\"HOST_FRONT\", \"front\"))\n os.environ[\"REDIS_ADDRESS\"] = system.resolve_address(os.environ.get(\"HOST_REDIS\", \"redis\"))\n os.environ[\"ADMIN_ADDRESS\"] = system.resolve_address(os.environ.get(\"HOST_ADMIN\", \"admin\"))\n+os.environ[\"ANTISPAM_ADDRESS\"] = system.resolve_address(os.environ.get(\"HOST_ANTISPAM\", \"antispam:11334\"))\n if os.environ[\"WEBMAIL\"] != \"none\":\n os.environ[\"WEBMAIL_ADDRESS\"] = system.resolve_address(os.environ.get(\"HOST_WEBMAIL\", \"webmail\"))\n \ndiff --git a/services/rspamd/start.py b/services/rspamd/start.py\n--- a/services/rspamd/start.py\n+++ b/services/rspamd/start.py\n@@ -9,11 +9,14 @@\n log.basicConfig(stream=sys.stderr, level=os.environ.get(\"LOG_LEVEL\", \"WARNING\"))\n \n # Actual startup script\n+\n os.environ[\"FRONT_ADDRESS\"] = system.resolve_address(os.environ.get(\"HOST_FRONT\", \"front\"))\n \n if \"HOST_REDIS\" not in os.environ:\n os.environ[\"REDIS_ADDRESS\"] = system.resolve_address(os.environ.get(\"HOST_REDIS\", \"redis\"))\n \n+if os.environ.get(\"ANTIVIRUS\") == 'clamav':\n+ os.environ[\"ANTIVIRUS_ADDRESS\"] = system.resolve_address(os.environ.get(\"HOST_ANTIVIRUS\", \"antivirus:3310\"))\n \n for rspamd_file in glob.glob(\"/conf/*\"):\n conf.jinja(rspamd_file, os.environ, os.path.join(\"/etc/rspamd/local.d\", os.path.basename(rspamd_file)))\n", "issue": "Should `antispam` be hardcoded?\nI'm in a middle of setting up my own mailu instances, however I'm using namespaced container names (i.e. `mailu-admin`, `mailu-front` etc.). Everything works pretty well so far, however I have some problems with imap and antispam. After some investigation I noticed, that files `core/dovecot/conf/bin/ham` and `core/dovecot/conf/bin/spam` have antispam host hardcoded instead of using `HOST_ANTISPAM`. \r\n\r\nI'm happy to provide PR, but firstly want to be sure - is it in fact oversight, or is this by design?\nShould `antispam` be hardcoded?\nI'm in a middle of setting up my own mailu instances, however I'm using namespaced container names (i.e. `mailu-admin`, `mailu-front` etc.). Everything works pretty well so far, however I have some problems with imap and antispam. After some investigation I noticed, that files `core/dovecot/conf/bin/ham` and `core/dovecot/conf/bin/spam` have antispam host hardcoded instead of using `HOST_ANTISPAM`. \r\n\r\nI'm happy to provide PR, but firstly want to be sure - is it in fact oversight, or is this by design?\n", "before_files": [{"content": "#!/usr/bin/python3\n\nimport os\nimport glob\nimport multiprocessing\nimport logging as log\nimport sys\n\nfrom podop import run_server\nfrom socrate import system, conf\n\nlog.basicConfig(stream=sys.stderr, level=os.environ.get(\"LOG_LEVEL\", \"WARNING\"))\n\ndef start_podop():\n os.setuid(8)\n url = \"http://\" + os.environ[\"ADMIN_ADDRESS\"] + \"/internal/dovecot/\u00a7\"\n run_server(0, \"dovecot\", \"/tmp/podop.socket\", [\n\t\t(\"quota\", \"url\", url ),\n\t\t(\"auth\", \"url\", url),\n\t\t(\"sieve\", \"url\", url),\n ])\n\n# Actual startup script\nos.environ[\"FRONT_ADDRESS\"] = system.resolve_address(os.environ.get(\"HOST_FRONT\", \"front\"))\nos.environ[\"REDIS_ADDRESS\"] = system.resolve_address(os.environ.get(\"HOST_REDIS\", \"redis\"))\nos.environ[\"ADMIN_ADDRESS\"] = system.resolve_address(os.environ.get(\"HOST_ADMIN\", \"admin\"))\nif os.environ[\"WEBMAIL\"] != \"none\":\n os.environ[\"WEBMAIL_ADDRESS\"] = system.resolve_address(os.environ.get(\"HOST_WEBMAIL\", \"webmail\"))\n\nfor dovecot_file in glob.glob(\"/conf/*.conf\"):\n conf.jinja(dovecot_file, os.environ, os.path.join(\"/etc/dovecot\", os.path.basename(dovecot_file)))\n\n# Run Podop, then postfix\nmultiprocessing.Process(target=start_podop).start()\nos.system(\"chown mail:mail /mail\")\nos.system(\"chown -R mail:mail /var/lib/dovecot /conf\")\nos.execv(\"/usr/sbin/dovecot\", [\"dovecot\", \"-c\", \"/etc/dovecot/dovecot.conf\", \"-F\"])\n", "path": "core/dovecot/start.py"}, {"content": "#!/usr/bin/python3\n\nimport os\nimport glob\nimport logging as log\nimport sys\nfrom socrate import system, conf\n\nlog.basicConfig(stream=sys.stderr, level=os.environ.get(\"LOG_LEVEL\", \"WARNING\"))\n\n# Actual startup script\nos.environ[\"FRONT_ADDRESS\"] = system.resolve_address(os.environ.get(\"HOST_FRONT\", \"front\"))\n\nif \"HOST_REDIS\" not in os.environ:\n os.environ[\"REDIS_ADDRESS\"] = system.resolve_address(os.environ.get(\"HOST_REDIS\", \"redis\"))\n\n\nfor rspamd_file in glob.glob(\"/conf/*\"):\n conf.jinja(rspamd_file, os.environ, os.path.join(\"/etc/rspamd/local.d\", os.path.basename(rspamd_file)))\n\n# Run rspamd\nos.execv(\"/usr/sbin/rspamd\", [\"rspamd\", \"-i\", \"-f\"])\n", "path": "services/rspamd/start.py"}]}
1,487
401
gh_patches_debug_15349
rasdani/github-patches
git_diff
hpcaitech__ColossalAI-4248
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [tensor] fix some unittests [tensor] fix some unittests [tensor] fix some unittests </issue> <code> [start of applications/Chat/coati/models/base/actor.py] 1 from typing import Optional 2 3 import torch 4 import torch.nn as nn 5 6 from ..lora import LoRAModule 7 8 9 class Actor(LoRAModule): 10 """ 11 Actor model base class. 12 13 Args: 14 model (nn.Module): Actor Model. 15 lora_rank (int): LoRA rank. 16 lora_train_bias (str): LoRA bias training mode. 17 """ 18 19 def __init__(self, model: nn.Module, lora_rank: int = 0, lora_train_bias: str = 'none') -> None: 20 super().__init__(lora_rank=lora_rank, lora_train_bias=lora_train_bias) 21 self.model = model 22 self.convert_to_lora() 23 24 def forward(self, 25 input_ids: torch.LongTensor, 26 attention_mask: Optional[torch.Tensor] = None, 27 **model_kwargs, # HACK: `generate` method may pass more kwargs 28 ) -> torch.Tensor: 29 """Returns model output. 30 """ 31 output = self.model( 32 input_ids, 33 attention_mask=attention_mask, 34 **model_kwargs 35 ) 36 return output 37 [end of applications/Chat/coati/models/base/actor.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/applications/Chat/coati/models/base/actor.py b/applications/Chat/coati/models/base/actor.py --- a/applications/Chat/coati/models/base/actor.py +++ b/applications/Chat/coati/models/base/actor.py @@ -21,16 +21,13 @@ self.model = model self.convert_to_lora() - def forward(self, - input_ids: torch.LongTensor, - attention_mask: Optional[torch.Tensor] = None, - **model_kwargs, # HACK: `generate` method may pass more kwargs - ) -> torch.Tensor: + def forward( + self, + input_ids: torch.LongTensor, + attention_mask: Optional[torch.Tensor] = None, + **model_kwargs, # HACK: `generate` method may pass more kwargs + ) -> torch.Tensor: """Returns model output. """ - output = self.model( - input_ids, - attention_mask=attention_mask, - **model_kwargs - ) + output = self.model(input_ids, attention_mask=attention_mask, **model_kwargs) return output
{"golden_diff": "diff --git a/applications/Chat/coati/models/base/actor.py b/applications/Chat/coati/models/base/actor.py\n--- a/applications/Chat/coati/models/base/actor.py\n+++ b/applications/Chat/coati/models/base/actor.py\n@@ -21,16 +21,13 @@\n self.model = model\n self.convert_to_lora()\n \n- def forward(self,\n- input_ids: torch.LongTensor,\n- attention_mask: Optional[torch.Tensor] = None,\n- **model_kwargs, # HACK: `generate` method may pass more kwargs\n- ) -> torch.Tensor:\n+ def forward(\n+ self,\n+ input_ids: torch.LongTensor,\n+ attention_mask: Optional[torch.Tensor] = None,\n+ **model_kwargs, # HACK: `generate` method may pass more kwargs\n+ ) -> torch.Tensor:\n \"\"\"Returns model output.\n \"\"\"\n- output = self.model(\n- input_ids,\n- attention_mask=attention_mask,\n- **model_kwargs\n- )\n+ output = self.model(input_ids, attention_mask=attention_mask, **model_kwargs)\n return output\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "from typing import Optional\n\nimport torch\nimport torch.nn as nn\n\nfrom ..lora import LoRAModule\n\n\nclass Actor(LoRAModule):\n \"\"\"\n Actor model base class.\n\n Args:\n model (nn.Module): Actor Model.\n lora_rank (int): LoRA rank.\n lora_train_bias (str): LoRA bias training mode.\n \"\"\"\n\n def __init__(self, model: nn.Module, lora_rank: int = 0, lora_train_bias: str = 'none') -> None:\n super().__init__(lora_rank=lora_rank, lora_train_bias=lora_train_bias)\n self.model = model\n self.convert_to_lora()\n\n def forward(self,\n input_ids: torch.LongTensor,\n attention_mask: Optional[torch.Tensor] = None,\n **model_kwargs, # HACK: `generate` method may pass more kwargs\n ) -> torch.Tensor:\n \"\"\"Returns model output.\n \"\"\"\n output = self.model(\n input_ids,\n attention_mask=attention_mask,\n **model_kwargs\n )\n return output\n", "path": "applications/Chat/coati/models/base/actor.py"}]}
871
255
gh_patches_debug_31496
rasdani/github-patches
git_diff
cisagov__manage.get.gov-247
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Save a new domain application on form submission _Outcome_: On "save" press, save any form changes to the Application model. Relies on #221 </issue> <code> [start of src/registrar/forms/application_wizard.py] 1 """Forms Wizard for creating a new domain application.""" 2 3 import logging 4 5 from django import forms 6 7 from django.contrib.auth.mixins import LoginRequiredMixin 8 9 from formtools.wizard.views import NamedUrlSessionWizardView # type: ignore 10 11 12 logger = logging.getLogger(__name__) 13 14 15 class OrganizationForm(forms.Form): 16 organization_type = forms.ChoiceField( 17 required=True, 18 choices=[ 19 ("Federal", "Federal: a federal agency"), 20 ("Interstate", "Interstate: an organization of two or more states"), 21 ( 22 "State_or_Territory", 23 ( 24 "State or Territory: One of the 50 U.S. states, the District of " 25 "Columbia, American Samoa, Guam, Northern Mariana Islands, " 26 "Puerto Rico, or the U.S. Virgin Islands" 27 ), 28 ), 29 ( 30 "Tribal", 31 ( 32 "Tribal: a tribal government recognized by the federal or " 33 "state government" 34 ), 35 ), 36 ("County", "County: a county, parish, or borough"), 37 ("City", "City: a city, town, township, village, etc."), 38 ( 39 "Special_District", 40 "Special District: an independent organization within a single state", 41 ), 42 ], 43 widget=forms.RadioSelect, 44 ) 45 federal_type = forms.ChoiceField( 46 required=False, 47 choices=[ 48 ("Executive", "Executive"), 49 ("Judicial", "Judicial"), 50 ("Legislative", "Legislative"), 51 ], 52 widget=forms.RadioSelect, 53 ) 54 is_election_board = forms.ChoiceField( 55 required=False, 56 choices=[ 57 ("Yes", "Yes"), 58 ("No", "No"), 59 ], 60 widget=forms.RadioSelect, 61 ) 62 63 64 class ContactForm(forms.Form): 65 organization_name = forms.CharField(label="Organization Name") 66 street_address = forms.CharField(label="Street address") 67 68 69 # List of forms in our wizard. Each entry is a tuple of a name and a form 70 # subclass 71 FORMS = [ 72 ("organization", OrganizationForm), 73 ("contact", ContactForm), 74 ] 75 76 # Dict to match up the right template with the right step. Keys here must 77 # match the first elements of the tuples in FORMS 78 TEMPLATES = { 79 "organization": "application_organization.html", 80 "contact": "application_contact.html", 81 } 82 83 # We need to pass our page titles as context to the templates, indexed 84 # by the step names 85 TITLES = { 86 "organization": "About your organization", 87 "contact": "Your organization's contact information", 88 } 89 90 91 class ApplicationWizard(LoginRequiredMixin, NamedUrlSessionWizardView): 92 93 """Multi-page form ("wizard") for new domain applications. 94 95 This sets up a sequence of forms that gather information for new 96 domain applications. Each form in the sequence has its own URL and 97 the progress through the form is stored in the Django session (thus 98 "NamedUrlSessionWizardView"). 99 """ 100 101 form_list = FORMS 102 103 def get_template_names(self): 104 """Template for the current step. 105 106 The return is a singleton list. 107 """ 108 return [TEMPLATES[self.steps.current]] 109 110 def get_context_data(self, form, **kwargs): 111 """Add title information to the context for all steps.""" 112 context = super().get_context_data(form=form, **kwargs) 113 context["form_titles"] = TITLES 114 return context 115 116 def done(self, form_list, **kwargs): 117 logger.info("Application form submitted.") 118 [end of src/registrar/forms/application_wizard.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/registrar/forms/application_wizard.py b/src/registrar/forms/application_wizard.py --- a/src/registrar/forms/application_wizard.py +++ b/src/registrar/forms/application_wizard.py @@ -3,11 +3,14 @@ import logging from django import forms +from django.shortcuts import redirect from django.contrib.auth.mixins import LoginRequiredMixin from formtools.wizard.views import NamedUrlSessionWizardView # type: ignore +from registrar.models import DomainApplication, Website + logger = logging.getLogger(__name__) @@ -113,5 +116,33 @@ context["form_titles"] = TITLES return context - def done(self, form_list, **kwargs): - logger.info("Application form submitted.") + def forms_to_object(self, form_dict: dict) -> DomainApplication: + """Unpack the form responses onto the model object properties.""" + application = DomainApplication.objects.create(creator=self.request.user) + + # organization information + organization_data = form_dict["organization"].cleaned_data + application.organization_type = organization_data["organization_type"] + application.federal_branch = organization_data["federal_type"] + application.is_election_office = organization_data["is_election_board"] + + # contact information + contact_data = form_dict["contact"].cleaned_data + application.organization_name = contact_data["organization_name"] + application.street_address = contact_data["street_address"] + # TODO: add the rest of these fields when they are created in the forms + + # This isn't really the requested_domain field + # but we need something in this field to make the form submittable + requested_site, _ = Website.objects.get_or_create( + website=contact_data["organization_name"] + ".gov" + ) + application.requested_domain = requested_site + return application + + def done(self, form_list, form_dict, **kwargs): + application = self.forms_to_object(form_dict) + application.submit() # change the status to submitted + application.save() + logger.debug("Application object saved:", application.id) + return redirect("home")
{"golden_diff": "diff --git a/src/registrar/forms/application_wizard.py b/src/registrar/forms/application_wizard.py\n--- a/src/registrar/forms/application_wizard.py\n+++ b/src/registrar/forms/application_wizard.py\n@@ -3,11 +3,14 @@\n import logging\n \n from django import forms\n+from django.shortcuts import redirect\n \n from django.contrib.auth.mixins import LoginRequiredMixin\n \n from formtools.wizard.views import NamedUrlSessionWizardView # type: ignore\n \n+from registrar.models import DomainApplication, Website\n+\n \n logger = logging.getLogger(__name__)\n \n@@ -113,5 +116,33 @@\n context[\"form_titles\"] = TITLES\n return context\n \n- def done(self, form_list, **kwargs):\n- logger.info(\"Application form submitted.\")\n+ def forms_to_object(self, form_dict: dict) -> DomainApplication:\n+ \"\"\"Unpack the form responses onto the model object properties.\"\"\"\n+ application = DomainApplication.objects.create(creator=self.request.user)\n+\n+ # organization information\n+ organization_data = form_dict[\"organization\"].cleaned_data\n+ application.organization_type = organization_data[\"organization_type\"]\n+ application.federal_branch = organization_data[\"federal_type\"]\n+ application.is_election_office = organization_data[\"is_election_board\"]\n+\n+ # contact information\n+ contact_data = form_dict[\"contact\"].cleaned_data\n+ application.organization_name = contact_data[\"organization_name\"]\n+ application.street_address = contact_data[\"street_address\"]\n+ # TODO: add the rest of these fields when they are created in the forms\n+\n+ # This isn't really the requested_domain field\n+ # but we need something in this field to make the form submittable\n+ requested_site, _ = Website.objects.get_or_create(\n+ website=contact_data[\"organization_name\"] + \".gov\"\n+ )\n+ application.requested_domain = requested_site\n+ return application\n+\n+ def done(self, form_list, form_dict, **kwargs):\n+ application = self.forms_to_object(form_dict)\n+ application.submit() # change the status to submitted\n+ application.save()\n+ logger.debug(\"Application object saved:\", application.id)\n+ return redirect(\"home\")\n", "issue": "Save a new domain application on form submission\n_Outcome_: On \"save\" press, save any form changes to the Application model. \n\nRelies on #221 \n", "before_files": [{"content": "\"\"\"Forms Wizard for creating a new domain application.\"\"\"\n\nimport logging\n\nfrom django import forms\n\nfrom django.contrib.auth.mixins import LoginRequiredMixin\n\nfrom formtools.wizard.views import NamedUrlSessionWizardView # type: ignore\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass OrganizationForm(forms.Form):\n organization_type = forms.ChoiceField(\n required=True,\n choices=[\n (\"Federal\", \"Federal: a federal agency\"),\n (\"Interstate\", \"Interstate: an organization of two or more states\"),\n (\n \"State_or_Territory\",\n (\n \"State or Territory: One of the 50 U.S. states, the District of \"\n \"Columbia, American Samoa, Guam, Northern Mariana Islands, \"\n \"Puerto Rico, or the U.S. Virgin Islands\"\n ),\n ),\n (\n \"Tribal\",\n (\n \"Tribal: a tribal government recognized by the federal or \"\n \"state government\"\n ),\n ),\n (\"County\", \"County: a county, parish, or borough\"),\n (\"City\", \"City: a city, town, township, village, etc.\"),\n (\n \"Special_District\",\n \"Special District: an independent organization within a single state\",\n ),\n ],\n widget=forms.RadioSelect,\n )\n federal_type = forms.ChoiceField(\n required=False,\n choices=[\n (\"Executive\", \"Executive\"),\n (\"Judicial\", \"Judicial\"),\n (\"Legislative\", \"Legislative\"),\n ],\n widget=forms.RadioSelect,\n )\n is_election_board = forms.ChoiceField(\n required=False,\n choices=[\n (\"Yes\", \"Yes\"),\n (\"No\", \"No\"),\n ],\n widget=forms.RadioSelect,\n )\n\n\nclass ContactForm(forms.Form):\n organization_name = forms.CharField(label=\"Organization Name\")\n street_address = forms.CharField(label=\"Street address\")\n\n\n# List of forms in our wizard. Each entry is a tuple of a name and a form\n# subclass\nFORMS = [\n (\"organization\", OrganizationForm),\n (\"contact\", ContactForm),\n]\n\n# Dict to match up the right template with the right step. Keys here must\n# match the first elements of the tuples in FORMS\nTEMPLATES = {\n \"organization\": \"application_organization.html\",\n \"contact\": \"application_contact.html\",\n}\n\n# We need to pass our page titles as context to the templates, indexed\n# by the step names\nTITLES = {\n \"organization\": \"About your organization\",\n \"contact\": \"Your organization's contact information\",\n}\n\n\nclass ApplicationWizard(LoginRequiredMixin, NamedUrlSessionWizardView):\n\n \"\"\"Multi-page form (\"wizard\") for new domain applications.\n\n This sets up a sequence of forms that gather information for new\n domain applications. Each form in the sequence has its own URL and\n the progress through the form is stored in the Django session (thus\n \"NamedUrlSessionWizardView\").\n \"\"\"\n\n form_list = FORMS\n\n def get_template_names(self):\n \"\"\"Template for the current step.\n\n The return is a singleton list.\n \"\"\"\n return [TEMPLATES[self.steps.current]]\n\n def get_context_data(self, form, **kwargs):\n \"\"\"Add title information to the context for all steps.\"\"\"\n context = super().get_context_data(form=form, **kwargs)\n context[\"form_titles\"] = TITLES\n return context\n\n def done(self, form_list, **kwargs):\n logger.info(\"Application form submitted.\")\n", "path": "src/registrar/forms/application_wizard.py"}]}
1,577
482
gh_patches_debug_38213
rasdani/github-patches
git_diff
mlflow__mlflow-7757
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [FR] Allow override of gunicorn app entrypoint for MLflow server ### Willingness to contribute Yes. I can contribute this feature independently. ### Proposal Summary The MLflow server executes gunicorn with a hard-coded value `mlflow.server:app` ([reference](https://github.com/mlflow/mlflow/blob/master/mlflow/server/__init__.py#L106)) gunicorn application cli parameter. This prevents easy customization of the MLflow server Flask app i.e. adding `bofore_request` hooks. ### Motivation > #### What is the use case for this feature? The goal is adding a `before_request` hook to perform authorization by checking JWT authenication header provided by an AWS application load balancer > #### Why is this use case valuable to support for MLflow users in general? In order to allow extra flexibility for self-hosted MLflow users to customize the mlflow Flask server with pre/post request hooks > #### Why is this use case valuable to support for your project(s) or organization? This would simplify the logic for authorization of users of the MLflow server > #### Why is it currently difficult to achieve this use case? Because MLflow server doesn't natively allow custom pre/post process request hooks for the tracking webserver component ### Details It would be desirable that the aforementioned gunicorn app entrypoint can be overridden by a custom value (either via --gunicorn-opts or an additional `mlflow server` cli option) Ideally one would be able to write a custom script as this: ```python # customapp.py from flask import request from mlflow.server import app @app.before_request def hook(): print(request.path) ``` and run: `mlflow server ... --gunicorn-app customapp:app` ### What component(s) does this bug affect? - [ ] `area/artifacts`: Artifact stores and artifact logging - [ ] `area/build`: Build and test infrastructure for MLflow - [ ] `area/docs`: MLflow documentation pages - [ ] `area/examples`: Example code - [ ] `area/model-registry`: Model Registry service, APIs, and the fluent client calls for Model Registry - [ ] `area/models`: MLmodel format, model serialization/deserialization, flavors - [ ] `area/recipes`: Recipes, Recipe APIs, Recipe configs, Recipe Templates - [ ] `area/projects`: MLproject format, project running backends - [ ] `area/scoring`: MLflow Model server, model deployment tools, Spark UDFs - [ ] `area/server-infra`: MLflow Tracking server backend - [X] `area/tracking`: Tracking Service, tracking client APIs, autologging ### What interface(s) does this bug affect? - [ ] `area/uiux`: Front-end, user experience, plotting, JavaScript, JavaScript dev server - [ ] `area/docker`: Docker use across MLflow's components, such as MLflow Projects and MLflow Models - [ ] `area/sqlalchemy`: Use of SQLAlchemy in the Tracking Service or Model Registry - [ ] `area/windows`: Windows support ### What language(s) does this bug affect? - [ ] `language/r`: R APIs and clients - [ ] `language/java`: Java APIs and clients - [ ] `language/new`: Proposals for new client languages ### What integration(s) does this bug affect? - [ ] `integrations/azure`: Azure and Azure ML integrations - [ ] `integrations/sagemaker`: SageMaker integrations - [ ] `integrations/databricks`: Databricks integrations </issue> <code> [start of mlflow/server/__init__.py] 1 import os 2 import shlex 3 import sys 4 import textwrap 5 6 from flask import Flask, send_from_directory, Response 7 8 from mlflow.server import handlers 9 from mlflow.server.handlers import ( 10 get_artifact_handler, 11 get_metric_history_bulk_handler, 12 STATIC_PREFIX_ENV_VAR, 13 _add_static_prefix, 14 get_model_version_artifact_handler, 15 ) 16 from mlflow.utils.process import _exec_cmd 17 from mlflow.version import VERSION 18 19 # NB: These are internal environment variables used for communication between 20 # the cli and the forked gunicorn processes. 21 BACKEND_STORE_URI_ENV_VAR = "_MLFLOW_SERVER_FILE_STORE" 22 REGISTRY_STORE_URI_ENV_VAR = "_MLFLOW_SERVER_REGISTRY_STORE" 23 ARTIFACT_ROOT_ENV_VAR = "_MLFLOW_SERVER_ARTIFACT_ROOT" 24 ARTIFACTS_DESTINATION_ENV_VAR = "_MLFLOW_SERVER_ARTIFACT_DESTINATION" 25 PROMETHEUS_EXPORTER_ENV_VAR = "prometheus_multiproc_dir" 26 SERVE_ARTIFACTS_ENV_VAR = "_MLFLOW_SERVER_SERVE_ARTIFACTS" 27 ARTIFACTS_ONLY_ENV_VAR = "_MLFLOW_SERVER_ARTIFACTS_ONLY" 28 29 REL_STATIC_DIR = "js/build" 30 31 app = Flask(__name__, static_folder=REL_STATIC_DIR) 32 STATIC_DIR = os.path.join(app.root_path, REL_STATIC_DIR) 33 34 35 for http_path, handler, methods in handlers.get_endpoints(): 36 app.add_url_rule(http_path, handler.__name__, handler, methods=methods) 37 38 if os.getenv(PROMETHEUS_EXPORTER_ENV_VAR): 39 from mlflow.server.prometheus_exporter import activate_prometheus_exporter 40 41 prometheus_metrics_path = os.getenv(PROMETHEUS_EXPORTER_ENV_VAR) 42 if not os.path.exists(prometheus_metrics_path): 43 os.makedirs(prometheus_metrics_path) 44 activate_prometheus_exporter(app) 45 46 47 # Provide a health check endpoint to ensure the application is responsive 48 @app.route("/health") 49 def health(): 50 return "OK", 200 51 52 53 # Provide an endpoint to query the version of mlflow running on the server 54 @app.route("/version") 55 def version(): 56 return VERSION, 200 57 58 59 # Serve the "get-artifact" route. 60 @app.route(_add_static_prefix("/get-artifact")) 61 def serve_artifacts(): 62 return get_artifact_handler() 63 64 65 # Serve the "model-versions/get-artifact" route. 66 @app.route(_add_static_prefix("/model-versions/get-artifact")) 67 def serve_model_version_artifact(): 68 return get_model_version_artifact_handler() 69 70 71 # Serve the "metrics/get-history-bulk" route. 72 @app.route(_add_static_prefix("/ajax-api/2.0/mlflow/metrics/get-history-bulk")) 73 def serve_get_metric_history_bulk(): 74 return get_metric_history_bulk_handler() 75 76 77 # We expect the react app to be built assuming it is hosted at /static-files, so that requests for 78 # CSS/JS resources will be made to e.g. /static-files/main.css and we can handle them here. 79 @app.route(_add_static_prefix("/static-files/<path:path>")) 80 def serve_static_file(path): 81 return send_from_directory(STATIC_DIR, path) 82 83 84 # Serve the index.html for the React App for all other routes. 85 @app.route(_add_static_prefix("/")) 86 def serve(): 87 if os.path.exists(os.path.join(STATIC_DIR, "index.html")): 88 return send_from_directory(STATIC_DIR, "index.html") 89 90 text = textwrap.dedent( 91 """ 92 Unable to display MLflow UI - landing page (index.html) not found. 93 94 You are very likely running the MLflow server using a source installation of the Python MLflow 95 package. 96 97 If you are a developer making MLflow source code changes and intentionally running a source 98 installation of MLflow, you can view the UI by running the Javascript dev server: 99 https://github.com/mlflow/mlflow/blob/master/CONTRIBUTING.md#running-the-javascript-dev-server 100 101 Otherwise, uninstall MLflow via 'pip uninstall mlflow', reinstall an official MLflow release 102 from PyPI via 'pip install mlflow', and rerun the MLflow server. 103 """ 104 ) 105 return Response(text, mimetype="text/plain") 106 107 108 def _build_waitress_command(waitress_opts, host, port): 109 opts = shlex.split(waitress_opts) if waitress_opts else [] 110 return ( 111 ["waitress-serve"] 112 + opts 113 + ["--host=%s" % host, "--port=%s" % port, "--ident=mlflow", "mlflow.server:app"] 114 ) 115 116 117 def _build_gunicorn_command(gunicorn_opts, host, port, workers): 118 bind_address = f"{host}:{port}" 119 opts = shlex.split(gunicorn_opts) if gunicorn_opts else [] 120 return ["gunicorn"] + opts + ["-b", bind_address, "-w", "%s" % workers, "mlflow.server:app"] 121 122 123 def _run_server( 124 file_store_path, 125 registry_store_uri, 126 default_artifact_root, 127 serve_artifacts, 128 artifacts_only, 129 artifacts_destination, 130 host, 131 port, 132 static_prefix=None, 133 workers=None, 134 gunicorn_opts=None, 135 waitress_opts=None, 136 expose_prometheus=None, 137 ): 138 """ 139 Run the MLflow server, wrapping it in gunicorn or waitress on windows 140 :param static_prefix: If set, the index.html asset will be served from the path static_prefix. 141 If left None, the index.html asset will be served from the root path. 142 :return: None 143 """ 144 env_map = {} 145 if file_store_path: 146 env_map[BACKEND_STORE_URI_ENV_VAR] = file_store_path 147 if registry_store_uri: 148 env_map[REGISTRY_STORE_URI_ENV_VAR] = registry_store_uri 149 if default_artifact_root: 150 env_map[ARTIFACT_ROOT_ENV_VAR] = default_artifact_root 151 if serve_artifacts: 152 env_map[SERVE_ARTIFACTS_ENV_VAR] = "true" 153 if artifacts_only: 154 env_map[ARTIFACTS_ONLY_ENV_VAR] = "true" 155 if artifacts_destination: 156 env_map[ARTIFACTS_DESTINATION_ENV_VAR] = artifacts_destination 157 if static_prefix: 158 env_map[STATIC_PREFIX_ENV_VAR] = static_prefix 159 160 if expose_prometheus: 161 env_map[PROMETHEUS_EXPORTER_ENV_VAR] = expose_prometheus 162 163 # TODO: eventually may want waitress on non-win32 164 if sys.platform == "win32": 165 full_command = _build_waitress_command(waitress_opts, host, port) 166 else: 167 full_command = _build_gunicorn_command(gunicorn_opts, host, port, workers or 4) 168 _exec_cmd(full_command, extra_env=env_map, capture_output=False) 169 [end of mlflow/server/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mlflow/server/__init__.py b/mlflow/server/__init__.py --- a/mlflow/server/__init__.py +++ b/mlflow/server/__init__.py @@ -1,3 +1,4 @@ +import entrypoints import os import shlex import sys @@ -5,6 +6,7 @@ from flask import Flask, send_from_directory, Response +from mlflow.exceptions import MlflowException from mlflow.server import handlers from mlflow.server.handlers import ( get_artifact_handler, @@ -105,19 +107,37 @@ return Response(text, mimetype="text/plain") -def _build_waitress_command(waitress_opts, host, port): +def _get_app_name() -> str: + """Search for plugins for custom mlflow app, otherwise return default.""" + apps = list(entrypoints.get_group_all("mlflow.app")) + # Default, nothing installed + if len(apps) == 0: + return f"{__name__}:app" + # Cannot install more than one + if len(apps) > 1: + raise MlflowException( + "Multiple server plugins detected. " + "Only one server plugin may be installed. " + f"Detected plugins: {', '.join([f'{a.module_name}.{a.object_name}' for a in apps])}" + ) + # Has a plugin installed + plugin_app = apps[0] + return f"{plugin_app.module_name}:{plugin_app.object_name}" + + +def _build_waitress_command(waitress_opts, host, port, app_name): opts = shlex.split(waitress_opts) if waitress_opts else [] return ( ["waitress-serve"] + opts - + ["--host=%s" % host, "--port=%s" % port, "--ident=mlflow", "mlflow.server:app"] + + ["--host=%s" % host, "--port=%s" % port, "--ident=mlflow", app_name] ) -def _build_gunicorn_command(gunicorn_opts, host, port, workers): +def _build_gunicorn_command(gunicorn_opts, host, port, workers, app_name): bind_address = f"{host}:{port}" opts = shlex.split(gunicorn_opts) if gunicorn_opts else [] - return ["gunicorn"] + opts + ["-b", bind_address, "-w", "%s" % workers, "mlflow.server:app"] + return ["gunicorn"] + opts + ["-b", bind_address, "-w", "%s" % workers, app_name] def _run_server( @@ -160,9 +180,10 @@ if expose_prometheus: env_map[PROMETHEUS_EXPORTER_ENV_VAR] = expose_prometheus + app_name = _get_app_name() # TODO: eventually may want waitress on non-win32 if sys.platform == "win32": - full_command = _build_waitress_command(waitress_opts, host, port) + full_command = _build_waitress_command(waitress_opts, host, port, app_name) else: - full_command = _build_gunicorn_command(gunicorn_opts, host, port, workers or 4) + full_command = _build_gunicorn_command(gunicorn_opts, host, port, workers or 4, app_name) _exec_cmd(full_command, extra_env=env_map, capture_output=False)
{"golden_diff": "diff --git a/mlflow/server/__init__.py b/mlflow/server/__init__.py\n--- a/mlflow/server/__init__.py\n+++ b/mlflow/server/__init__.py\n@@ -1,3 +1,4 @@\n+import entrypoints\n import os\n import shlex\n import sys\n@@ -5,6 +6,7 @@\n \n from flask import Flask, send_from_directory, Response\n \n+from mlflow.exceptions import MlflowException\n from mlflow.server import handlers\n from mlflow.server.handlers import (\n get_artifact_handler,\n@@ -105,19 +107,37 @@\n return Response(text, mimetype=\"text/plain\")\n \n \n-def _build_waitress_command(waitress_opts, host, port):\n+def _get_app_name() -> str:\n+ \"\"\"Search for plugins for custom mlflow app, otherwise return default.\"\"\"\n+ apps = list(entrypoints.get_group_all(\"mlflow.app\"))\n+ # Default, nothing installed\n+ if len(apps) == 0:\n+ return f\"{__name__}:app\"\n+ # Cannot install more than one\n+ if len(apps) > 1:\n+ raise MlflowException(\n+ \"Multiple server plugins detected. \"\n+ \"Only one server plugin may be installed. \"\n+ f\"Detected plugins: {', '.join([f'{a.module_name}.{a.object_name}' for a in apps])}\"\n+ )\n+ # Has a plugin installed\n+ plugin_app = apps[0]\n+ return f\"{plugin_app.module_name}:{plugin_app.object_name}\"\n+\n+\n+def _build_waitress_command(waitress_opts, host, port, app_name):\n opts = shlex.split(waitress_opts) if waitress_opts else []\n return (\n [\"waitress-serve\"]\n + opts\n- + [\"--host=%s\" % host, \"--port=%s\" % port, \"--ident=mlflow\", \"mlflow.server:app\"]\n+ + [\"--host=%s\" % host, \"--port=%s\" % port, \"--ident=mlflow\", app_name]\n )\n \n \n-def _build_gunicorn_command(gunicorn_opts, host, port, workers):\n+def _build_gunicorn_command(gunicorn_opts, host, port, workers, app_name):\n bind_address = f\"{host}:{port}\"\n opts = shlex.split(gunicorn_opts) if gunicorn_opts else []\n- return [\"gunicorn\"] + opts + [\"-b\", bind_address, \"-w\", \"%s\" % workers, \"mlflow.server:app\"]\n+ return [\"gunicorn\"] + opts + [\"-b\", bind_address, \"-w\", \"%s\" % workers, app_name]\n \n \n def _run_server(\n@@ -160,9 +180,10 @@\n if expose_prometheus:\n env_map[PROMETHEUS_EXPORTER_ENV_VAR] = expose_prometheus\n \n+ app_name = _get_app_name()\n # TODO: eventually may want waitress on non-win32\n if sys.platform == \"win32\":\n- full_command = _build_waitress_command(waitress_opts, host, port)\n+ full_command = _build_waitress_command(waitress_opts, host, port, app_name)\n else:\n- full_command = _build_gunicorn_command(gunicorn_opts, host, port, workers or 4)\n+ full_command = _build_gunicorn_command(gunicorn_opts, host, port, workers or 4, app_name)\n _exec_cmd(full_command, extra_env=env_map, capture_output=False)\n", "issue": "[FR] Allow override of gunicorn app entrypoint for MLflow server\n### Willingness to contribute\r\n\r\nYes. I can contribute this feature independently.\r\n\r\n### Proposal Summary\r\n\r\nThe MLflow server executes gunicorn with a hard-coded value `mlflow.server:app` ([reference](https://github.com/mlflow/mlflow/blob/master/mlflow/server/__init__.py#L106)) gunicorn application cli parameter. This prevents easy customization of the MLflow server Flask app i.e. adding `bofore_request` hooks.\r\n\r\n### Motivation\r\n\r\n> #### What is the use case for this feature?\r\nThe goal is adding a `before_request` hook to perform authorization by checking JWT authenication header provided by an AWS application load balancer \r\n\r\n> #### Why is this use case valuable to support for MLflow users in general?\r\nIn order to allow extra flexibility for self-hosted MLflow users to customize the mlflow Flask server with pre/post request hooks \r\n\r\n> #### Why is this use case valuable to support for your project(s) or organization?\r\nThis would simplify the logic for authorization of users of the MLflow server\r\n\r\n> #### Why is it currently difficult to achieve this use case?\r\nBecause MLflow server doesn't natively allow custom pre/post process request hooks for the tracking webserver component\r\n\r\n\r\n### Details\r\n\r\nIt would be desirable that the aforementioned gunicorn app entrypoint can be overridden by a custom value (either via --gunicorn-opts or an additional `mlflow server` cli option)\r\n\r\nIdeally one would be able to write a custom script as this:\r\n\r\n```python\r\n# customapp.py\r\n\r\nfrom flask import request\r\nfrom mlflow.server import app\r\n\r\[email protected]_request\r\ndef hook():\r\n print(request.path)\r\n```\r\nand run:\r\n`mlflow server ... --gunicorn-app customapp:app`\r\n\r\n\r\n### What component(s) does this bug affect?\r\n\r\n- [ ] `area/artifacts`: Artifact stores and artifact logging\r\n- [ ] `area/build`: Build and test infrastructure for MLflow\r\n- [ ] `area/docs`: MLflow documentation pages\r\n- [ ] `area/examples`: Example code\r\n- [ ] `area/model-registry`: Model Registry service, APIs, and the fluent client calls for Model Registry\r\n- [ ] `area/models`: MLmodel format, model serialization/deserialization, flavors\r\n- [ ] `area/recipes`: Recipes, Recipe APIs, Recipe configs, Recipe Templates\r\n- [ ] `area/projects`: MLproject format, project running backends\r\n- [ ] `area/scoring`: MLflow Model server, model deployment tools, Spark UDFs\r\n- [ ] `area/server-infra`: MLflow Tracking server backend\r\n- [X] `area/tracking`: Tracking Service, tracking client APIs, autologging\r\n\r\n### What interface(s) does this bug affect?\r\n\r\n- [ ] `area/uiux`: Front-end, user experience, plotting, JavaScript, JavaScript dev server\r\n- [ ] `area/docker`: Docker use across MLflow's components, such as MLflow Projects and MLflow Models\r\n- [ ] `area/sqlalchemy`: Use of SQLAlchemy in the Tracking Service or Model Registry\r\n- [ ] `area/windows`: Windows support\r\n\r\n### What language(s) does this bug affect?\r\n\r\n- [ ] `language/r`: R APIs and clients\r\n- [ ] `language/java`: Java APIs and clients\r\n- [ ] `language/new`: Proposals for new client languages\r\n\r\n### What integration(s) does this bug affect?\r\n\r\n- [ ] `integrations/azure`: Azure and Azure ML integrations\r\n- [ ] `integrations/sagemaker`: SageMaker integrations\r\n- [ ] `integrations/databricks`: Databricks integrations\n", "before_files": [{"content": "import os\nimport shlex\nimport sys\nimport textwrap\n\nfrom flask import Flask, send_from_directory, Response\n\nfrom mlflow.server import handlers\nfrom mlflow.server.handlers import (\n get_artifact_handler,\n get_metric_history_bulk_handler,\n STATIC_PREFIX_ENV_VAR,\n _add_static_prefix,\n get_model_version_artifact_handler,\n)\nfrom mlflow.utils.process import _exec_cmd\nfrom mlflow.version import VERSION\n\n# NB: These are internal environment variables used for communication between\n# the cli and the forked gunicorn processes.\nBACKEND_STORE_URI_ENV_VAR = \"_MLFLOW_SERVER_FILE_STORE\"\nREGISTRY_STORE_URI_ENV_VAR = \"_MLFLOW_SERVER_REGISTRY_STORE\"\nARTIFACT_ROOT_ENV_VAR = \"_MLFLOW_SERVER_ARTIFACT_ROOT\"\nARTIFACTS_DESTINATION_ENV_VAR = \"_MLFLOW_SERVER_ARTIFACT_DESTINATION\"\nPROMETHEUS_EXPORTER_ENV_VAR = \"prometheus_multiproc_dir\"\nSERVE_ARTIFACTS_ENV_VAR = \"_MLFLOW_SERVER_SERVE_ARTIFACTS\"\nARTIFACTS_ONLY_ENV_VAR = \"_MLFLOW_SERVER_ARTIFACTS_ONLY\"\n\nREL_STATIC_DIR = \"js/build\"\n\napp = Flask(__name__, static_folder=REL_STATIC_DIR)\nSTATIC_DIR = os.path.join(app.root_path, REL_STATIC_DIR)\n\n\nfor http_path, handler, methods in handlers.get_endpoints():\n app.add_url_rule(http_path, handler.__name__, handler, methods=methods)\n\nif os.getenv(PROMETHEUS_EXPORTER_ENV_VAR):\n from mlflow.server.prometheus_exporter import activate_prometheus_exporter\n\n prometheus_metrics_path = os.getenv(PROMETHEUS_EXPORTER_ENV_VAR)\n if not os.path.exists(prometheus_metrics_path):\n os.makedirs(prometheus_metrics_path)\n activate_prometheus_exporter(app)\n\n\n# Provide a health check endpoint to ensure the application is responsive\[email protected](\"/health\")\ndef health():\n return \"OK\", 200\n\n\n# Provide an endpoint to query the version of mlflow running on the server\[email protected](\"/version\")\ndef version():\n return VERSION, 200\n\n\n# Serve the \"get-artifact\" route.\[email protected](_add_static_prefix(\"/get-artifact\"))\ndef serve_artifacts():\n return get_artifact_handler()\n\n\n# Serve the \"model-versions/get-artifact\" route.\[email protected](_add_static_prefix(\"/model-versions/get-artifact\"))\ndef serve_model_version_artifact():\n return get_model_version_artifact_handler()\n\n\n# Serve the \"metrics/get-history-bulk\" route.\[email protected](_add_static_prefix(\"/ajax-api/2.0/mlflow/metrics/get-history-bulk\"))\ndef serve_get_metric_history_bulk():\n return get_metric_history_bulk_handler()\n\n\n# We expect the react app to be built assuming it is hosted at /static-files, so that requests for\n# CSS/JS resources will be made to e.g. /static-files/main.css and we can handle them here.\[email protected](_add_static_prefix(\"/static-files/<path:path>\"))\ndef serve_static_file(path):\n return send_from_directory(STATIC_DIR, path)\n\n\n# Serve the index.html for the React App for all other routes.\[email protected](_add_static_prefix(\"/\"))\ndef serve():\n if os.path.exists(os.path.join(STATIC_DIR, \"index.html\")):\n return send_from_directory(STATIC_DIR, \"index.html\")\n\n text = textwrap.dedent(\n \"\"\"\n Unable to display MLflow UI - landing page (index.html) not found.\n\n You are very likely running the MLflow server using a source installation of the Python MLflow\n package.\n\n If you are a developer making MLflow source code changes and intentionally running a source\n installation of MLflow, you can view the UI by running the Javascript dev server:\n https://github.com/mlflow/mlflow/blob/master/CONTRIBUTING.md#running-the-javascript-dev-server\n\n Otherwise, uninstall MLflow via 'pip uninstall mlflow', reinstall an official MLflow release\n from PyPI via 'pip install mlflow', and rerun the MLflow server.\n \"\"\"\n )\n return Response(text, mimetype=\"text/plain\")\n\n\ndef _build_waitress_command(waitress_opts, host, port):\n opts = shlex.split(waitress_opts) if waitress_opts else []\n return (\n [\"waitress-serve\"]\n + opts\n + [\"--host=%s\" % host, \"--port=%s\" % port, \"--ident=mlflow\", \"mlflow.server:app\"]\n )\n\n\ndef _build_gunicorn_command(gunicorn_opts, host, port, workers):\n bind_address = f\"{host}:{port}\"\n opts = shlex.split(gunicorn_opts) if gunicorn_opts else []\n return [\"gunicorn\"] + opts + [\"-b\", bind_address, \"-w\", \"%s\" % workers, \"mlflow.server:app\"]\n\n\ndef _run_server(\n file_store_path,\n registry_store_uri,\n default_artifact_root,\n serve_artifacts,\n artifacts_only,\n artifacts_destination,\n host,\n port,\n static_prefix=None,\n workers=None,\n gunicorn_opts=None,\n waitress_opts=None,\n expose_prometheus=None,\n):\n \"\"\"\n Run the MLflow server, wrapping it in gunicorn or waitress on windows\n :param static_prefix: If set, the index.html asset will be served from the path static_prefix.\n If left None, the index.html asset will be served from the root path.\n :return: None\n \"\"\"\n env_map = {}\n if file_store_path:\n env_map[BACKEND_STORE_URI_ENV_VAR] = file_store_path\n if registry_store_uri:\n env_map[REGISTRY_STORE_URI_ENV_VAR] = registry_store_uri\n if default_artifact_root:\n env_map[ARTIFACT_ROOT_ENV_VAR] = default_artifact_root\n if serve_artifacts:\n env_map[SERVE_ARTIFACTS_ENV_VAR] = \"true\"\n if artifacts_only:\n env_map[ARTIFACTS_ONLY_ENV_VAR] = \"true\"\n if artifacts_destination:\n env_map[ARTIFACTS_DESTINATION_ENV_VAR] = artifacts_destination\n if static_prefix:\n env_map[STATIC_PREFIX_ENV_VAR] = static_prefix\n\n if expose_prometheus:\n env_map[PROMETHEUS_EXPORTER_ENV_VAR] = expose_prometheus\n\n # TODO: eventually may want waitress on non-win32\n if sys.platform == \"win32\":\n full_command = _build_waitress_command(waitress_opts, host, port)\n else:\n full_command = _build_gunicorn_command(gunicorn_opts, host, port, workers or 4)\n _exec_cmd(full_command, extra_env=env_map, capture_output=False)\n", "path": "mlflow/server/__init__.py"}]}
3,150
768
gh_patches_debug_5444
rasdani/github-patches
git_diff
strawberry-graphql__strawberry-1726
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `dacite` library doesn't compatible after 0.69.0 strawberry-graphql. (unhasable type 'StrawberryOptional' problem) I have a regression issue with StrawberryOptional (breaking changes in 0.69.0) When I use dacite library to convert dict to strawberry.type class instance, it works in 0.67.x version. ``` from dacite import from_dict @strawberry.type class Tag: tag_id: int = None category: str = None images: List[str] = None @strawberry.field() def image(self, info: Info) -> Optional[str]: ... from_dict(data_class=Tag, data=dict(tag_id=34, category="song", images=["a", "b"])) // same with Tag(tag_id=34, category="song", images=["a", "b"]) ``` `dacite.from_dict` method convert `dict(**attributes)` to `Tag(tag_id=~, ...)` automatically. It makes ORM model instance <-> Strawberry type instance conversion easily. After upgrade, it occur an error unhashable type: 'StrawberryOptional' because of Optional[str] type. Can we make `StrawberryOptional`, `StrawberryList` to be hashable type to use dacite.from_dict ? Or is there any alternative way to convert dict to strawberry.type instance? </issue> <code> [start of strawberry/type.py] 1 from __future__ import annotations 2 3 from abc import ABC, abstractmethod 4 from typing import TYPE_CHECKING, List, Mapping, TypeVar, Union 5 6 7 if TYPE_CHECKING: 8 from .types.types import TypeDefinition 9 10 11 class StrawberryType(ABC): 12 @property 13 def type_params(self) -> List[TypeVar]: 14 return [] 15 16 @abstractmethod 17 def copy_with( 18 self, type_var_map: Mapping[TypeVar, Union[StrawberryType, type]] 19 ) -> Union[StrawberryType, type]: 20 raise NotImplementedError() 21 22 @property 23 @abstractmethod 24 def is_generic(self) -> bool: 25 raise NotImplementedError() 26 27 def __eq__(self, other: object) -> bool: 28 from strawberry.annotation import StrawberryAnnotation 29 30 if isinstance(other, StrawberryType): 31 return self is other 32 33 elif isinstance(other, StrawberryAnnotation): 34 return self == other.resolve() 35 36 else: 37 # This could be simplified if StrawberryAnnotation.resolve() always returned 38 # a StrawberryType 39 resolved = StrawberryAnnotation(other).resolve() 40 if isinstance(resolved, StrawberryType): 41 return self == resolved 42 else: 43 return NotImplemented 44 45 def __hash__(self) -> int: 46 # TODO: Is this a bad idea? __eq__ objects are supposed to have the same hash 47 return id(self) 48 49 50 class StrawberryContainer(StrawberryType): 51 def __init__(self, of_type: Union[StrawberryType, type]): 52 self.of_type = of_type 53 54 def __eq__(self, other: object) -> bool: 55 if isinstance(other, StrawberryType): 56 if isinstance(other, StrawberryContainer): 57 return self.of_type == other.of_type 58 else: 59 return False 60 61 return super().__eq__(other) 62 63 @property 64 def type_params(self) -> List[TypeVar]: 65 if hasattr(self.of_type, "_type_definition"): 66 parameters = getattr(self.of_type, "__parameters__", None) 67 68 return list(parameters) if parameters else [] 69 70 elif isinstance(self.of_type, StrawberryType): 71 return self.of_type.type_params 72 73 else: 74 return [] 75 76 def copy_with( 77 self, type_var_map: Mapping[TypeVar, Union[StrawberryType, type]] 78 ) -> StrawberryType: 79 of_type_copy: Union[StrawberryType, type] 80 81 # TODO: Obsolete with StrawberryObject 82 if hasattr(self.of_type, "_type_definition"): 83 type_definition: TypeDefinition = ( 84 self.of_type._type_definition # type: ignore 85 ) 86 87 if type_definition.is_generic: 88 of_type_copy = type_definition.copy_with(type_var_map) 89 90 elif isinstance(self.of_type, StrawberryType) and self.of_type.is_generic: 91 of_type_copy = self.of_type.copy_with(type_var_map) 92 93 assert of_type_copy 94 95 return type(self)(of_type_copy) 96 97 @property 98 def is_generic(self) -> bool: 99 # TODO: Obsolete with StrawberryObject 100 type_ = self.of_type 101 if hasattr(self.of_type, "_type_definition"): 102 type_ = self.of_type._type_definition # type: ignore 103 104 if isinstance(type_, StrawberryType): 105 return type_.is_generic 106 107 return False 108 109 110 class StrawberryList(StrawberryContainer): 111 ... 112 113 114 class StrawberryOptional(StrawberryContainer): 115 ... 116 117 118 class StrawberryTypeVar(StrawberryType): 119 def __init__(self, type_var: TypeVar): 120 self.type_var = type_var 121 122 def copy_with( 123 self, type_var_map: Mapping[TypeVar, Union[StrawberryType, type]] 124 ) -> Union[StrawberryType, type]: 125 return type_var_map[self.type_var] 126 127 @property 128 def is_generic(self) -> bool: 129 return True 130 131 @property 132 def type_params(self) -> List[TypeVar]: 133 return [self.type_var] 134 135 def __eq__(self, other) -> bool: 136 if isinstance(other, StrawberryTypeVar): 137 return self.type_var == other.type_var 138 if isinstance(other, TypeVar): 139 return self.type_var == other 140 141 return super().__eq__(other) 142 [end of strawberry/type.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/strawberry/type.py b/strawberry/type.py --- a/strawberry/type.py +++ b/strawberry/type.py @@ -51,6 +51,9 @@ def __init__(self, of_type: Union[StrawberryType, type]): self.of_type = of_type + def __hash__(self) -> int: + return hash((self.__class__, self.of_type)) + def __eq__(self, other: object) -> bool: if isinstance(other, StrawberryType): if isinstance(other, StrawberryContainer):
{"golden_diff": "diff --git a/strawberry/type.py b/strawberry/type.py\n--- a/strawberry/type.py\n+++ b/strawberry/type.py\n@@ -51,6 +51,9 @@\n def __init__(self, of_type: Union[StrawberryType, type]):\n self.of_type = of_type\n \n+ def __hash__(self) -> int:\n+ return hash((self.__class__, self.of_type))\n+\n def __eq__(self, other: object) -> bool:\n if isinstance(other, StrawberryType):\n if isinstance(other, StrawberryContainer):\n", "issue": "`dacite` library doesn't compatible after 0.69.0 strawberry-graphql. (unhasable type 'StrawberryOptional' problem)\nI have a regression issue with StrawberryOptional (breaking changes in 0.69.0) \r\nWhen I use dacite library to convert dict to strawberry.type class instance, it works in 0.67.x version. \r\n\r\n```\r\nfrom dacite import from_dict\r\n\r\[email protected]\r\nclass Tag:\r\n tag_id: int = None\r\n category: str = None\r\n images: List[str] = None\r\n\r\n @strawberry.field()\r\n def image(self, info: Info) -> Optional[str]:\r\n ...\r\n\r\nfrom_dict(data_class=Tag, data=dict(tag_id=34, category=\"song\", images=[\"a\", \"b\"]))\r\n// same with Tag(tag_id=34, category=\"song\", images=[\"a\", \"b\"]) \r\n```\r\n\r\n`dacite.from_dict` method convert `dict(**attributes)` to `Tag(tag_id=~, ...)` automatically. \r\nIt makes ORM model instance <-> Strawberry type instance conversion easily. \r\n\r\nAfter upgrade, it occur an error unhashable type: 'StrawberryOptional' because of Optional[str] type. \r\n\r\nCan we make `StrawberryOptional`, `StrawberryList` to be hashable type to use dacite.from_dict ? \r\nOr is there any alternative way to convert dict to strawberry.type instance?\r\n\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom abc import ABC, abstractmethod\nfrom typing import TYPE_CHECKING, List, Mapping, TypeVar, Union\n\n\nif TYPE_CHECKING:\n from .types.types import TypeDefinition\n\n\nclass StrawberryType(ABC):\n @property\n def type_params(self) -> List[TypeVar]:\n return []\n\n @abstractmethod\n def copy_with(\n self, type_var_map: Mapping[TypeVar, Union[StrawberryType, type]]\n ) -> Union[StrawberryType, type]:\n raise NotImplementedError()\n\n @property\n @abstractmethod\n def is_generic(self) -> bool:\n raise NotImplementedError()\n\n def __eq__(self, other: object) -> bool:\n from strawberry.annotation import StrawberryAnnotation\n\n if isinstance(other, StrawberryType):\n return self is other\n\n elif isinstance(other, StrawberryAnnotation):\n return self == other.resolve()\n\n else:\n # This could be simplified if StrawberryAnnotation.resolve() always returned\n # a StrawberryType\n resolved = StrawberryAnnotation(other).resolve()\n if isinstance(resolved, StrawberryType):\n return self == resolved\n else:\n return NotImplemented\n\n def __hash__(self) -> int:\n # TODO: Is this a bad idea? __eq__ objects are supposed to have the same hash\n return id(self)\n\n\nclass StrawberryContainer(StrawberryType):\n def __init__(self, of_type: Union[StrawberryType, type]):\n self.of_type = of_type\n\n def __eq__(self, other: object) -> bool:\n if isinstance(other, StrawberryType):\n if isinstance(other, StrawberryContainer):\n return self.of_type == other.of_type\n else:\n return False\n\n return super().__eq__(other)\n\n @property\n def type_params(self) -> List[TypeVar]:\n if hasattr(self.of_type, \"_type_definition\"):\n parameters = getattr(self.of_type, \"__parameters__\", None)\n\n return list(parameters) if parameters else []\n\n elif isinstance(self.of_type, StrawberryType):\n return self.of_type.type_params\n\n else:\n return []\n\n def copy_with(\n self, type_var_map: Mapping[TypeVar, Union[StrawberryType, type]]\n ) -> StrawberryType:\n of_type_copy: Union[StrawberryType, type]\n\n # TODO: Obsolete with StrawberryObject\n if hasattr(self.of_type, \"_type_definition\"):\n type_definition: TypeDefinition = (\n self.of_type._type_definition # type: ignore\n )\n\n if type_definition.is_generic:\n of_type_copy = type_definition.copy_with(type_var_map)\n\n elif isinstance(self.of_type, StrawberryType) and self.of_type.is_generic:\n of_type_copy = self.of_type.copy_with(type_var_map)\n\n assert of_type_copy\n\n return type(self)(of_type_copy)\n\n @property\n def is_generic(self) -> bool:\n # TODO: Obsolete with StrawberryObject\n type_ = self.of_type\n if hasattr(self.of_type, \"_type_definition\"):\n type_ = self.of_type._type_definition # type: ignore\n\n if isinstance(type_, StrawberryType):\n return type_.is_generic\n\n return False\n\n\nclass StrawberryList(StrawberryContainer):\n ...\n\n\nclass StrawberryOptional(StrawberryContainer):\n ...\n\n\nclass StrawberryTypeVar(StrawberryType):\n def __init__(self, type_var: TypeVar):\n self.type_var = type_var\n\n def copy_with(\n self, type_var_map: Mapping[TypeVar, Union[StrawberryType, type]]\n ) -> Union[StrawberryType, type]:\n return type_var_map[self.type_var]\n\n @property\n def is_generic(self) -> bool:\n return True\n\n @property\n def type_params(self) -> List[TypeVar]:\n return [self.type_var]\n\n def __eq__(self, other) -> bool:\n if isinstance(other, StrawberryTypeVar):\n return self.type_var == other.type_var\n if isinstance(other, TypeVar):\n return self.type_var == other\n\n return super().__eq__(other)\n", "path": "strawberry/type.py"}]}
2,063
130
gh_patches_debug_37020
rasdani/github-patches
git_diff
open-telemetry__opentelemetry-python-2642
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Convert Values to JSON for prometheus collector * Fixes #2502 # Convert values to native JSON Convert values to native JSON instead of string in the Prometheus Collector * Changes in the list for `_translate_to_prometheus` Fixes #2502 - [ ] Bug fix (non-breaking change which fixes an issue) - [x] New feature (non-breaking change which adds functionality) - [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected) - [ ] This change requires a documentation update # Checklist: - [x] Followed the style guidelines of this project - [x] Changelogs have been updated - [ ] Unit tests have been added - [ ] Documentation has been updated </issue> <code> [start of exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py] 1 # Copyright The OpenTelemetry Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """ 16 This library allows export of metrics data to `Prometheus <https://prometheus.io/>`_. 17 18 Usage 19 ----- 20 21 The **OpenTelemetry Prometheus Exporter** allows export of `OpenTelemetry`_ 22 metrics to `Prometheus`_. 23 24 25 .. _Prometheus: https://prometheus.io/ 26 .. _OpenTelemetry: https://github.com/open-telemetry/opentelemetry-python/ 27 28 .. code:: python 29 30 from prometheus_client import start_http_server 31 32 from opentelemetry._metrics import get_meter_provider, set_meter_provider 33 from opentelemetry.exporter.prometheus import PrometheusMetricReader 34 from opentelemetry.sdk._metrics import MeterProvider 35 36 # Start Prometheus client 37 start_http_server(port=8000, addr="localhost") 38 39 # Exporter to export metrics to Prometheus 40 prefix = "MyAppPrefix" 41 reader = PrometheusMetricReader(prefix) 42 43 # Meter is responsible for creating and recording metrics 44 set_meter_provider(MeterProvider(metric_readers=[reader])) 45 meter = get_meter_provider().get_meter("myapp", "0.1.2") 46 47 counter = meter.create_counter( 48 "requests", 49 "requests", 50 "number of requests", 51 ) 52 53 # Labels are used to identify key-values that are associated with a specific 54 # metric that you want to record. These are useful for pre-aggregation and can 55 # be used to store custom dimensions pertaining to a metric 56 labels = {"environment": "staging"} 57 58 counter.add(25, labels) 59 input("Press any key to exit...") 60 61 API 62 --- 63 """ 64 65 import collections 66 import logging 67 import re 68 from itertools import chain 69 from typing import Iterable, Optional, Sequence, Tuple 70 71 from prometheus_client import core 72 73 from opentelemetry.sdk._metrics.export import MetricReader 74 from opentelemetry.sdk._metrics.point import Gauge, Histogram, Metric, Sum 75 76 _logger = logging.getLogger(__name__) 77 78 79 def _convert_buckets(metric: Metric) -> Sequence[Tuple[str, int]]: 80 buckets = [] 81 total_count = 0 82 for upper_bound, count in zip( 83 chain(metric.point.explicit_bounds, ["+Inf"]), 84 metric.point.bucket_counts, 85 ): 86 total_count += count 87 buckets.append((f"{upper_bound}", total_count)) 88 89 return buckets 90 91 92 class PrometheusMetricReader(MetricReader): 93 """Prometheus metric exporter for OpenTelemetry. 94 95 Args: 96 prefix: single-word application prefix relevant to the domain 97 the metric belongs to. 98 """ 99 100 def __init__(self, prefix: str = "") -> None: 101 super().__init__() 102 self._collector = _CustomCollector(prefix) 103 core.REGISTRY.register(self._collector) 104 self._collector._callback = self.collect 105 106 def _receive_metrics(self, metrics: Iterable[Metric]) -> None: 107 if metrics is None: 108 return 109 self._collector.add_metrics_data(metrics) 110 111 def shutdown(self) -> bool: 112 core.REGISTRY.unregister(self._collector) 113 return True 114 115 116 class _CustomCollector: 117 """_CustomCollector represents the Prometheus Collector object 118 119 See more: 120 https://github.com/prometheus/client_python#custom-collectors 121 """ 122 123 def __init__(self, prefix: str = ""): 124 self._prefix = prefix 125 self._callback = None 126 self._metrics_to_export = collections.deque() 127 self._non_letters_digits_underscore_re = re.compile( 128 r"[^\w]", re.UNICODE | re.IGNORECASE 129 ) 130 131 def add_metrics_data(self, export_records: Sequence[Metric]) -> None: 132 """Add metrics to Prometheus data""" 133 self._metrics_to_export.append(export_records) 134 135 def collect(self) -> None: 136 """Collect fetches the metrics from OpenTelemetry 137 and delivers them as Prometheus Metrics. 138 Collect is invoked every time a ``prometheus.Gatherer`` is run 139 for example when the HTTP endpoint is invoked by Prometheus. 140 """ 141 if self._callback is not None: 142 self._callback() 143 144 while self._metrics_to_export: 145 for export_record in self._metrics_to_export.popleft(): 146 prometheus_metric = self._translate_to_prometheus( 147 export_record 148 ) 149 if prometheus_metric is not None: 150 yield prometheus_metric 151 152 def _translate_to_prometheus( 153 self, metric: Metric 154 ) -> Optional[core.Metric]: 155 prometheus_metric = None 156 label_values = [] 157 label_keys = [] 158 for key, value in metric.attributes.items(): 159 label_keys.append(self._sanitize(key)) 160 label_values.append(str(value)) 161 162 metric_name = "" 163 if self._prefix != "": 164 metric_name = self._prefix + "_" 165 metric_name += self._sanitize(metric.name) 166 167 description = metric.description or "" 168 if isinstance(metric.point, Sum): 169 prometheus_metric = core.CounterMetricFamily( 170 name=metric_name, 171 documentation=description, 172 labels=label_keys, 173 unit=metric.unit, 174 ) 175 prometheus_metric.add_metric( 176 labels=label_values, value=metric.point.value 177 ) 178 elif isinstance(metric.point, Gauge): 179 prometheus_metric = core.GaugeMetricFamily( 180 name=metric_name, 181 documentation=description, 182 labels=label_keys, 183 unit=metric.unit, 184 ) 185 prometheus_metric.add_metric( 186 labels=label_values, value=metric.point.value 187 ) 188 elif isinstance(metric.point, Histogram): 189 value = metric.point.sum 190 prometheus_metric = core.HistogramMetricFamily( 191 name=metric_name, 192 documentation=description, 193 labels=label_keys, 194 unit=metric.unit, 195 ) 196 buckets = _convert_buckets(metric) 197 prometheus_metric.add_metric( 198 labels=label_values, buckets=buckets, sum_value=value 199 ) 200 else: 201 _logger.warning("Unsupported metric type. %s", type(metric.point)) 202 return prometheus_metric 203 204 def _sanitize(self, key: str) -> str: 205 """sanitize the given metric name or label according to Prometheus rule. 206 Replace all characters other than [A-Za-z0-9_] with '_'. 207 """ 208 return self._non_letters_digits_underscore_re.sub("_", key) 209 [end of exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py b/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py --- a/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py +++ b/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py @@ -62,18 +62,19 @@ --- """ -import collections -import logging -import re +from collections import deque from itertools import chain -from typing import Iterable, Optional, Sequence, Tuple +from json import dumps +from logging import getLogger +from re import IGNORECASE, UNICODE, compile +from typing import Iterable, Optional, Sequence, Tuple, Union from prometheus_client import core from opentelemetry.sdk._metrics.export import MetricReader from opentelemetry.sdk._metrics.point import Gauge, Histogram, Metric, Sum -_logger = logging.getLogger(__name__) +_logger = getLogger(__name__) def _convert_buckets(metric: Metric) -> Sequence[Tuple[str, int]]: @@ -123,9 +124,9 @@ def __init__(self, prefix: str = ""): self._prefix = prefix self._callback = None - self._metrics_to_export = collections.deque() - self._non_letters_digits_underscore_re = re.compile( - r"[^\w]", re.UNICODE | re.IGNORECASE + self._metrics_to_export = deque() + self._non_letters_digits_underscore_re = compile( + r"[^\w]", UNICODE | IGNORECASE ) def add_metrics_data(self, export_records: Sequence[Metric]) -> None: @@ -157,7 +158,7 @@ label_keys = [] for key, value in metric.attributes.items(): label_keys.append(self._sanitize(key)) - label_values.append(str(value)) + label_values.append(self._check_value(value)) metric_name = "" if self._prefix != "": @@ -206,3 +207,10 @@ Replace all characters other than [A-Za-z0-9_] with '_'. """ return self._non_letters_digits_underscore_re.sub("_", key) + + # pylint: disable=no-self-use + def _check_value(self, value: Union[int, float, str, Sequence]) -> str: + """Check the label value and return is appropriate representation""" + if not isinstance(value, str) and isinstance(value, Sequence): + return dumps(value, default=str) + return str(value)
{"golden_diff": "diff --git a/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py b/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py\n--- a/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py\n+++ b/exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py\n@@ -62,18 +62,19 @@\n ---\n \"\"\"\n \n-import collections\n-import logging\n-import re\n+from collections import deque\n from itertools import chain\n-from typing import Iterable, Optional, Sequence, Tuple\n+from json import dumps\n+from logging import getLogger\n+from re import IGNORECASE, UNICODE, compile\n+from typing import Iterable, Optional, Sequence, Tuple, Union\n \n from prometheus_client import core\n \n from opentelemetry.sdk._metrics.export import MetricReader\n from opentelemetry.sdk._metrics.point import Gauge, Histogram, Metric, Sum\n \n-_logger = logging.getLogger(__name__)\n+_logger = getLogger(__name__)\n \n \n def _convert_buckets(metric: Metric) -> Sequence[Tuple[str, int]]:\n@@ -123,9 +124,9 @@\n def __init__(self, prefix: str = \"\"):\n self._prefix = prefix\n self._callback = None\n- self._metrics_to_export = collections.deque()\n- self._non_letters_digits_underscore_re = re.compile(\n- r\"[^\\w]\", re.UNICODE | re.IGNORECASE\n+ self._metrics_to_export = deque()\n+ self._non_letters_digits_underscore_re = compile(\n+ r\"[^\\w]\", UNICODE | IGNORECASE\n )\n \n def add_metrics_data(self, export_records: Sequence[Metric]) -> None:\n@@ -157,7 +158,7 @@\n label_keys = []\n for key, value in metric.attributes.items():\n label_keys.append(self._sanitize(key))\n- label_values.append(str(value))\n+ label_values.append(self._check_value(value))\n \n metric_name = \"\"\n if self._prefix != \"\":\n@@ -206,3 +207,10 @@\n Replace all characters other than [A-Za-z0-9_] with '_'.\n \"\"\"\n return self._non_letters_digits_underscore_re.sub(\"_\", key)\n+\n+ # pylint: disable=no-self-use\n+ def _check_value(self, value: Union[int, float, str, Sequence]) -> str:\n+ \"\"\"Check the label value and return is appropriate representation\"\"\"\n+ if not isinstance(value, str) and isinstance(value, Sequence):\n+ return dumps(value, default=str)\n+ return str(value)\n", "issue": "Convert Values to JSON for prometheus collector\n* Fixes #2502\r\n\r\n# Convert values to native JSON \r\n\r\nConvert values to native JSON instead of string in the Prometheus Collector\r\n* Changes in the list for `_translate_to_prometheus`\r\n\r\nFixes #2502\r\n\r\n- [ ] Bug fix (non-breaking change which fixes an issue)\r\n- [x] New feature (non-breaking change which adds functionality)\r\n- [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected)\r\n- [ ] This change requires a documentation update\r\n\r\n# Checklist:\r\n\r\n- [x] Followed the style guidelines of this project\r\n- [x] Changelogs have been updated\r\n- [ ] Unit tests have been added\r\n- [ ] Documentation has been updated\r\n\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis library allows export of metrics data to `Prometheus <https://prometheus.io/>`_.\n\nUsage\n-----\n\nThe **OpenTelemetry Prometheus Exporter** allows export of `OpenTelemetry`_\nmetrics to `Prometheus`_.\n\n\n.. _Prometheus: https://prometheus.io/\n.. _OpenTelemetry: https://github.com/open-telemetry/opentelemetry-python/\n\n.. code:: python\n\n from prometheus_client import start_http_server\n\n from opentelemetry._metrics import get_meter_provider, set_meter_provider\n from opentelemetry.exporter.prometheus import PrometheusMetricReader\n from opentelemetry.sdk._metrics import MeterProvider\n\n # Start Prometheus client\n start_http_server(port=8000, addr=\"localhost\")\n\n # Exporter to export metrics to Prometheus\n prefix = \"MyAppPrefix\"\n reader = PrometheusMetricReader(prefix)\n\n # Meter is responsible for creating and recording metrics\n set_meter_provider(MeterProvider(metric_readers=[reader]))\n meter = get_meter_provider().get_meter(\"myapp\", \"0.1.2\")\n\n counter = meter.create_counter(\n \"requests\",\n \"requests\",\n \"number of requests\",\n )\n\n # Labels are used to identify key-values that are associated with a specific\n # metric that you want to record. These are useful for pre-aggregation and can\n # be used to store custom dimensions pertaining to a metric\n labels = {\"environment\": \"staging\"}\n\n counter.add(25, labels)\n input(\"Press any key to exit...\")\n\nAPI\n---\n\"\"\"\n\nimport collections\nimport logging\nimport re\nfrom itertools import chain\nfrom typing import Iterable, Optional, Sequence, Tuple\n\nfrom prometheus_client import core\n\nfrom opentelemetry.sdk._metrics.export import MetricReader\nfrom opentelemetry.sdk._metrics.point import Gauge, Histogram, Metric, Sum\n\n_logger = logging.getLogger(__name__)\n\n\ndef _convert_buckets(metric: Metric) -> Sequence[Tuple[str, int]]:\n buckets = []\n total_count = 0\n for upper_bound, count in zip(\n chain(metric.point.explicit_bounds, [\"+Inf\"]),\n metric.point.bucket_counts,\n ):\n total_count += count\n buckets.append((f\"{upper_bound}\", total_count))\n\n return buckets\n\n\nclass PrometheusMetricReader(MetricReader):\n \"\"\"Prometheus metric exporter for OpenTelemetry.\n\n Args:\n prefix: single-word application prefix relevant to the domain\n the metric belongs to.\n \"\"\"\n\n def __init__(self, prefix: str = \"\") -> None:\n super().__init__()\n self._collector = _CustomCollector(prefix)\n core.REGISTRY.register(self._collector)\n self._collector._callback = self.collect\n\n def _receive_metrics(self, metrics: Iterable[Metric]) -> None:\n if metrics is None:\n return\n self._collector.add_metrics_data(metrics)\n\n def shutdown(self) -> bool:\n core.REGISTRY.unregister(self._collector)\n return True\n\n\nclass _CustomCollector:\n \"\"\"_CustomCollector represents the Prometheus Collector object\n\n See more:\n https://github.com/prometheus/client_python#custom-collectors\n \"\"\"\n\n def __init__(self, prefix: str = \"\"):\n self._prefix = prefix\n self._callback = None\n self._metrics_to_export = collections.deque()\n self._non_letters_digits_underscore_re = re.compile(\n r\"[^\\w]\", re.UNICODE | re.IGNORECASE\n )\n\n def add_metrics_data(self, export_records: Sequence[Metric]) -> None:\n \"\"\"Add metrics to Prometheus data\"\"\"\n self._metrics_to_export.append(export_records)\n\n def collect(self) -> None:\n \"\"\"Collect fetches the metrics from OpenTelemetry\n and delivers them as Prometheus Metrics.\n Collect is invoked every time a ``prometheus.Gatherer`` is run\n for example when the HTTP endpoint is invoked by Prometheus.\n \"\"\"\n if self._callback is not None:\n self._callback()\n\n while self._metrics_to_export:\n for export_record in self._metrics_to_export.popleft():\n prometheus_metric = self._translate_to_prometheus(\n export_record\n )\n if prometheus_metric is not None:\n yield prometheus_metric\n\n def _translate_to_prometheus(\n self, metric: Metric\n ) -> Optional[core.Metric]:\n prometheus_metric = None\n label_values = []\n label_keys = []\n for key, value in metric.attributes.items():\n label_keys.append(self._sanitize(key))\n label_values.append(str(value))\n\n metric_name = \"\"\n if self._prefix != \"\":\n metric_name = self._prefix + \"_\"\n metric_name += self._sanitize(metric.name)\n\n description = metric.description or \"\"\n if isinstance(metric.point, Sum):\n prometheus_metric = core.CounterMetricFamily(\n name=metric_name,\n documentation=description,\n labels=label_keys,\n unit=metric.unit,\n )\n prometheus_metric.add_metric(\n labels=label_values, value=metric.point.value\n )\n elif isinstance(metric.point, Gauge):\n prometheus_metric = core.GaugeMetricFamily(\n name=metric_name,\n documentation=description,\n labels=label_keys,\n unit=metric.unit,\n )\n prometheus_metric.add_metric(\n labels=label_values, value=metric.point.value\n )\n elif isinstance(metric.point, Histogram):\n value = metric.point.sum\n prometheus_metric = core.HistogramMetricFamily(\n name=metric_name,\n documentation=description,\n labels=label_keys,\n unit=metric.unit,\n )\n buckets = _convert_buckets(metric)\n prometheus_metric.add_metric(\n labels=label_values, buckets=buckets, sum_value=value\n )\n else:\n _logger.warning(\"Unsupported metric type. %s\", type(metric.point))\n return prometheus_metric\n\n def _sanitize(self, key: str) -> str:\n \"\"\"sanitize the given metric name or label according to Prometheus rule.\n Replace all characters other than [A-Za-z0-9_] with '_'.\n \"\"\"\n return self._non_letters_digits_underscore_re.sub(\"_\", key)\n", "path": "exporter/opentelemetry-exporter-prometheus/src/opentelemetry/exporter/prometheus/__init__.py"}]}
2,700
581
gh_patches_debug_20332
rasdani/github-patches
git_diff
TheAlgorithms__Python-2467
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add Python type hints and doctests to other/two_sum.py @tonydelanuez Would you be willing to add Python type hints and doctests to [`other/two_sum.py`](../blob/master/other/two_sum.py)? The function comments say that it will return a `list[int]` but instead it either returns a `list[list[int]]` or `False`. </issue> <code> [start of other/two_sum.py] 1 """ 2 Given an array of integers, return indices of the two numbers such that they add up to 3 a specific target. 4 5 You may assume that each input would have exactly one solution, and you may not use the 6 same element twice. 7 8 Example: 9 Given nums = [2, 7, 11, 15], target = 9, 10 11 Because nums[0] + nums[1] = 2 + 7 = 9, 12 return [0, 1]. 13 """ 14 15 16 def twoSum(nums, target): 17 """ 18 :type nums: List[int] 19 :type target: int 20 :rtype: List[int] 21 """ 22 chk_map = {} 23 for index, val in enumerate(nums): 24 compl = target - val 25 if compl in chk_map: 26 indices = [chk_map[compl], index] 27 print(indices) 28 return [indices] 29 else: 30 chk_map[val] = index 31 return False 32 [end of other/two_sum.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/other/two_sum.py b/other/two_sum.py --- a/other/two_sum.py +++ b/other/two_sum.py @@ -11,21 +11,37 @@ Because nums[0] + nums[1] = 2 + 7 = 9, return [0, 1]. """ +from __future__ import annotations -def twoSum(nums, target): +def two_sum(nums: list[int], target: int) -> list[int]: """ - :type nums: List[int] - :type target: int - :rtype: List[int] + >>> two_sum([2, 7, 11, 15], 9) + [0, 1] + >>> two_sum([15, 2, 11, 7], 13) + [1, 2] + >>> two_sum([2, 7, 11, 15], 17) + [0, 3] + >>> two_sum([7, 15, 11, 2], 18) + [0, 2] + >>> two_sum([2, 7, 11, 15], 26) + [2, 3] + >>> two_sum([2, 7, 11, 15], 8) + [] + >>> two_sum([3 * i for i in range(10)], 19) + [] """ chk_map = {} for index, val in enumerate(nums): compl = target - val if compl in chk_map: - indices = [chk_map[compl], index] - print(indices) - return [indices] - else: - chk_map[val] = index - return False + return [chk_map[compl], index] + chk_map[val] = index + return [] + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + print(f"{two_sum([2, 7, 11, 15], 9) = }")
{"golden_diff": "diff --git a/other/two_sum.py b/other/two_sum.py\n--- a/other/two_sum.py\n+++ b/other/two_sum.py\n@@ -11,21 +11,37 @@\n Because nums[0] + nums[1] = 2 + 7 = 9,\n return [0, 1].\n \"\"\"\n+from __future__ import annotations\n \n \n-def twoSum(nums, target):\n+def two_sum(nums: list[int], target: int) -> list[int]:\n \"\"\"\n- :type nums: List[int]\n- :type target: int\n- :rtype: List[int]\n+ >>> two_sum([2, 7, 11, 15], 9)\n+ [0, 1]\n+ >>> two_sum([15, 2, 11, 7], 13)\n+ [1, 2]\n+ >>> two_sum([2, 7, 11, 15], 17)\n+ [0, 3]\n+ >>> two_sum([7, 15, 11, 2], 18)\n+ [0, 2]\n+ >>> two_sum([2, 7, 11, 15], 26)\n+ [2, 3]\n+ >>> two_sum([2, 7, 11, 15], 8)\n+ []\n+ >>> two_sum([3 * i for i in range(10)], 19)\n+ []\n \"\"\"\n chk_map = {}\n for index, val in enumerate(nums):\n compl = target - val\n if compl in chk_map:\n- indices = [chk_map[compl], index]\n- print(indices)\n- return [indices]\n- else:\n- chk_map[val] = index\n- return False\n+ return [chk_map[compl], index]\n+ chk_map[val] = index\n+ return []\n+\n+\n+if __name__ == \"__main__\":\n+ import doctest\n+\n+ doctest.testmod()\n+ print(f\"{two_sum([2, 7, 11, 15], 9) = }\")\n", "issue": "Add Python type hints and doctests to other/two_sum.py\n@tonydelanuez Would you be willing to add Python type hints and doctests to [`other/two_sum.py`](../blob/master/other/two_sum.py)? The function comments say that it will return a `list[int]` but instead it either returns a `list[list[int]]` or `False`.\n", "before_files": [{"content": "\"\"\"\nGiven an array of integers, return indices of the two numbers such that they add up to\na specific target.\n\nYou may assume that each input would have exactly one solution, and you may not use the\nsame element twice.\n\nExample:\nGiven nums = [2, 7, 11, 15], target = 9,\n\nBecause nums[0] + nums[1] = 2 + 7 = 9,\nreturn [0, 1].\n\"\"\"\n\n\ndef twoSum(nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n chk_map = {}\n for index, val in enumerate(nums):\n compl = target - val\n if compl in chk_map:\n indices = [chk_map[compl], index]\n print(indices)\n return [indices]\n else:\n chk_map[val] = index\n return False\n", "path": "other/two_sum.py"}]}
874
490
gh_patches_debug_34623
rasdani/github-patches
git_diff
chainer__chainer-505
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add a function for summation along axes `sum` function reduces all elements, while there are many cases that I want to take a summation over some axes. This is analogous to `numpy.sum` with the `axis` argument. The implementation should contain a general function for GPU to compute a reduction over specified axes. </issue> <code> [start of chainer/functions/math/sum.py] 1 import numpy 2 3 from chainer import cuda 4 from chainer import function 5 from chainer.utils import type_check 6 7 8 class Sum(function.Function): 9 """Sum of array elements over a given axis.""" 10 11 def __init__(self, axis=None): 12 self.axis = axis 13 14 def check_type_forward(self, in_types): 15 type_check.expect( 16 in_types.size() == 1, 17 in_types[0].dtype == numpy.float32 18 ) 19 20 if self.axis is not None: 21 type_check.expect( 22 self.axis < in_types[0].ndim, 23 ) 24 25 def forward(self, x): 26 xp = cuda.get_array_module(*x) 27 return xp.asarray(x[0].sum(axis=self.axis)), 28 29 def backward(self, x, gy): 30 xp = cuda.get_array_module(*x) 31 32 gx = xp.empty_like(x[0]) 33 if self.axis is None: 34 gx[:] = gy[0] 35 else: 36 gx[:] = xp.expand_dims(gy[0], axis=self.axis) 37 38 return gx, 39 40 41 def sum(x, axis=None): 42 """Sum of array elements over a given axis. 43 44 Args: 45 x (~chainer.Variable): Elements to sum. 46 axis (None or int): Axis which a sum is performed. 47 The default (axis = None) is perform a sum over all the dimensions 48 of the input array. 49 50 Returns: 51 ~chainer.Variable: Output variable. 52 53 """ 54 return Sum(axis)(x) 55 [end of chainer/functions/math/sum.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/chainer/functions/math/sum.py b/chainer/functions/math/sum.py --- a/chainer/functions/math/sum.py +++ b/chainer/functions/math/sum.py @@ -9,7 +9,17 @@ """Sum of array elements over a given axis.""" def __init__(self, axis=None): - self.axis = axis + if axis is None: + self.axis = None + elif isinstance(axis, int): + self.axis = (axis,) + elif isinstance(axis, tuple) and all(isinstance(a, int) for a in axis): + if len(set(axis)) != len(axis): + raise ValueError('duplicate value in axis: ({})'.format( + ', '.join(map(str, axis)))) + self.axis = axis + else: + raise TypeError('None, int or tuple of int are required') def check_type_forward(self, in_types): type_check.expect( @@ -18,9 +28,15 @@ ) if self.axis is not None: - type_check.expect( - self.axis < in_types[0].ndim, - ) + for axis in self.axis: + if axis >= 0: + type_check.expect( + axis < in_types[0].ndim, + ) + else: + type_check.expect( + -axis - 1 < in_types[0].ndim, + ) def forward(self, x): xp = cuda.get_array_module(*x) @@ -33,7 +49,15 @@ if self.axis is None: gx[:] = gy[0] else: - gx[:] = xp.expand_dims(gy[0], axis=self.axis) + gy = gy[0] + actual_axis = [] + for axis in self.axis: + if axis < 0: + axis = len(gx.shape) + axis + actual_axis.append(axis) + for axis in sorted(actual_axis): + gy = xp.expand_dims(gy, axis=axis) + gx[:] = gy return gx, @@ -43,7 +67,7 @@ Args: x (~chainer.Variable): Elements to sum. - axis (None or int): Axis which a sum is performed. + axis (None, int, or tuple of int): Axis which a sum is performed. The default (axis = None) is perform a sum over all the dimensions of the input array.
{"golden_diff": "diff --git a/chainer/functions/math/sum.py b/chainer/functions/math/sum.py\n--- a/chainer/functions/math/sum.py\n+++ b/chainer/functions/math/sum.py\n@@ -9,7 +9,17 @@\n \"\"\"Sum of array elements over a given axis.\"\"\"\n \n def __init__(self, axis=None):\n- self.axis = axis\n+ if axis is None:\n+ self.axis = None\n+ elif isinstance(axis, int):\n+ self.axis = (axis,)\n+ elif isinstance(axis, tuple) and all(isinstance(a, int) for a in axis):\n+ if len(set(axis)) != len(axis):\n+ raise ValueError('duplicate value in axis: ({})'.format(\n+ ', '.join(map(str, axis))))\n+ self.axis = axis\n+ else:\n+ raise TypeError('None, int or tuple of int are required')\n \n def check_type_forward(self, in_types):\n type_check.expect(\n@@ -18,9 +28,15 @@\n )\n \n if self.axis is not None:\n- type_check.expect(\n- self.axis < in_types[0].ndim,\n- )\n+ for axis in self.axis:\n+ if axis >= 0:\n+ type_check.expect(\n+ axis < in_types[0].ndim,\n+ )\n+ else:\n+ type_check.expect(\n+ -axis - 1 < in_types[0].ndim,\n+ )\n \n def forward(self, x):\n xp = cuda.get_array_module(*x)\n@@ -33,7 +49,15 @@\n if self.axis is None:\n gx[:] = gy[0]\n else:\n- gx[:] = xp.expand_dims(gy[0], axis=self.axis)\n+ gy = gy[0]\n+ actual_axis = []\n+ for axis in self.axis:\n+ if axis < 0:\n+ axis = len(gx.shape) + axis\n+ actual_axis.append(axis)\n+ for axis in sorted(actual_axis):\n+ gy = xp.expand_dims(gy, axis=axis)\n+ gx[:] = gy\n \n return gx,\n \n@@ -43,7 +67,7 @@\n \n Args:\n x (~chainer.Variable): Elements to sum.\n- axis (None or int): Axis which a sum is performed.\n+ axis (None, int, or tuple of int): Axis which a sum is performed.\n The default (axis = None) is perform a sum over all the dimensions\n of the input array.\n", "issue": "Add a function for summation along axes\n`sum` function reduces all elements, while there are many cases that I want to take a summation over some axes. This is analogous to `numpy.sum` with the `axis` argument.\n\nThe implementation should contain a general function for GPU to compute a reduction over specified axes.\n\n", "before_files": [{"content": "import numpy\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.utils import type_check\n\n\nclass Sum(function.Function):\n \"\"\"Sum of array elements over a given axis.\"\"\"\n\n def __init__(self, axis=None):\n self.axis = axis\n\n def check_type_forward(self, in_types):\n type_check.expect(\n in_types.size() == 1,\n in_types[0].dtype == numpy.float32\n )\n\n if self.axis is not None:\n type_check.expect(\n self.axis < in_types[0].ndim,\n )\n\n def forward(self, x):\n xp = cuda.get_array_module(*x)\n return xp.asarray(x[0].sum(axis=self.axis)),\n\n def backward(self, x, gy):\n xp = cuda.get_array_module(*x)\n\n gx = xp.empty_like(x[0])\n if self.axis is None:\n gx[:] = gy[0]\n else:\n gx[:] = xp.expand_dims(gy[0], axis=self.axis)\n\n return gx,\n\n\ndef sum(x, axis=None):\n \"\"\"Sum of array elements over a given axis.\n\n Args:\n x (~chainer.Variable): Elements to sum.\n axis (None or int): Axis which a sum is performed.\n The default (axis = None) is perform a sum over all the dimensions\n of the input array.\n\n Returns:\n ~chainer.Variable: Output variable.\n\n \"\"\"\n return Sum(axis)(x)\n", "path": "chainer/functions/math/sum.py"}]}
1,029
555
gh_patches_debug_37512
rasdani/github-patches
git_diff
pytorch__vision-2642
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Different types of IoU ## 🚀 Feature <!-- A clear and concise description of the feature proposal --> Implementation of 3 other types of IoU metric in torchvision.ops.boxes: 1. Generalized IoU 2. Distance IoU 3. Complete IoU ## Motivation <!-- Please outline the motivation for the proposal. Is your feature request related to a problem? e.g., I'm always frustrated when [...]. If this is related to another GitHub issue, please link here too --> Since these metrics can lead to better results in some cases (object detection, etc.) it could be useful to have them natively implemented. ## Pitch <!-- A clear and concise description of what you want to happen. --> Users who need IoU computation can simply choose from 4 different implementation using the same inputs (e.g. two lists of boxes). ## Additional context <!-- Add any other context or screenshots about the feature request here. --> Mathematical details can be found here: 1. Generalized IoU: [Generalized Intersection over Union](https://giou.stanford.edu/) 2. Distance/Complete IoU: [Distance-IoU Loss: Faster and Better Learning for Bounding Box Regression](https://arxiv.org/abs/1911.08287) </issue> <code> [start of torchvision/ops/__init__.py] 1 from .boxes import nms, batched_nms, remove_small_boxes, clip_boxes_to_image, box_area, box_iou 2 from .new_empty_tensor import _new_empty_tensor 3 from .deform_conv import deform_conv2d, DeformConv2d 4 from .roi_align import roi_align, RoIAlign 5 from .roi_pool import roi_pool, RoIPool 6 from .ps_roi_align import ps_roi_align, PSRoIAlign 7 from .ps_roi_pool import ps_roi_pool, PSRoIPool 8 from .poolers import MultiScaleRoIAlign 9 from .feature_pyramid_network import FeaturePyramidNetwork 10 11 from ._register_onnx_ops import _register_custom_op 12 13 _register_custom_op() 14 15 16 __all__ = [ 17 'deform_conv2d', 'DeformConv2d', 'nms', 'batched_nms', 'remove_small_boxes', 18 'clip_boxes_to_image', 'box_area', 'box_iou', 'roi_align', 'RoIAlign', 'roi_pool', 19 'RoIPool', '_new_empty_tensor', 'ps_roi_align', 'PSRoIAlign', 'ps_roi_pool', 20 'PSRoIPool', 'MultiScaleRoIAlign', 'FeaturePyramidNetwork' 21 ] 22 [end of torchvision/ops/__init__.py] [start of torchvision/ops/boxes.py] 1 import torch 2 from torch.jit.annotations import Tuple 3 from torch import Tensor 4 import torchvision 5 6 7 def nms(boxes: Tensor, scores: Tensor, iou_threshold: float) -> Tensor: 8 """ 9 Performs non-maximum suppression (NMS) on the boxes according 10 to their intersection-over-union (IoU). 11 12 NMS iteratively removes lower scoring boxes which have an 13 IoU greater than iou_threshold with another (higher scoring) 14 box. 15 16 If multiple boxes have the exact same score and satisfy the IoU 17 criterion with respect to a reference box, the selected box is 18 not guaranteed to be the same between CPU and GPU. This is similar 19 to the behavior of argsort in PyTorch when repeated values are present. 20 21 Parameters 22 ---------- 23 boxes : Tensor[N, 4]) 24 boxes to perform NMS on. They 25 are expected to be in (x1, y1, x2, y2) format 26 scores : Tensor[N] 27 scores for each one of the boxes 28 iou_threshold : float 29 discards all overlapping 30 boxes with IoU > iou_threshold 31 32 Returns 33 ------- 34 keep : Tensor 35 int64 tensor with the indices 36 of the elements that have been kept 37 by NMS, sorted in decreasing order of scores 38 """ 39 return torch.ops.torchvision.nms(boxes, scores, iou_threshold) 40 41 42 @torch.jit._script_if_tracing 43 def batched_nms( 44 boxes: Tensor, 45 scores: Tensor, 46 idxs: Tensor, 47 iou_threshold: float, 48 ) -> Tensor: 49 """ 50 Performs non-maximum suppression in a batched fashion. 51 52 Each index value correspond to a category, and NMS 53 will not be applied between elements of different categories. 54 55 Parameters 56 ---------- 57 boxes : Tensor[N, 4] 58 boxes where NMS will be performed. They 59 are expected to be in (x1, y1, x2, y2) format 60 scores : Tensor[N] 61 scores for each one of the boxes 62 idxs : Tensor[N] 63 indices of the categories for each one of the boxes. 64 iou_threshold : float 65 discards all overlapping boxes 66 with IoU > iou_threshold 67 68 Returns 69 ------- 70 keep : Tensor 71 int64 tensor with the indices of 72 the elements that have been kept by NMS, sorted 73 in decreasing order of scores 74 """ 75 if boxes.numel() == 0: 76 return torch.empty((0,), dtype=torch.int64, device=boxes.device) 77 # strategy: in order to perform NMS independently per class. 78 # we add an offset to all the boxes. The offset is dependent 79 # only on the class idx, and is large enough so that boxes 80 # from different classes do not overlap 81 else: 82 max_coordinate = boxes.max() 83 offsets = idxs.to(boxes) * (max_coordinate + torch.tensor(1).to(boxes)) 84 boxes_for_nms = boxes + offsets[:, None] 85 keep = nms(boxes_for_nms, scores, iou_threshold) 86 return keep 87 88 89 def remove_small_boxes(boxes: Tensor, min_size: float) -> Tensor: 90 """ 91 Remove boxes which contains at least one side smaller than min_size. 92 93 Arguments: 94 boxes (Tensor[N, 4]): boxes in (x1, y1, x2, y2) format 95 min_size (float): minimum size 96 97 Returns: 98 keep (Tensor[K]): indices of the boxes that have both sides 99 larger than min_size 100 """ 101 ws, hs = boxes[:, 2] - boxes[:, 0], boxes[:, 3] - boxes[:, 1] 102 keep = (ws >= min_size) & (hs >= min_size) 103 keep = torch.where(keep)[0] 104 return keep 105 106 107 def clip_boxes_to_image(boxes: Tensor, size: Tuple[int, int]) -> Tensor: 108 """ 109 Clip boxes so that they lie inside an image of size `size`. 110 111 Arguments: 112 boxes (Tensor[N, 4]): boxes in (x1, y1, x2, y2) format 113 size (Tuple[height, width]): size of the image 114 115 Returns: 116 clipped_boxes (Tensor[N, 4]) 117 """ 118 dim = boxes.dim() 119 boxes_x = boxes[..., 0::2] 120 boxes_y = boxes[..., 1::2] 121 height, width = size 122 123 if torchvision._is_tracing(): 124 boxes_x = torch.max(boxes_x, torch.tensor(0, dtype=boxes.dtype, device=boxes.device)) 125 boxes_x = torch.min(boxes_x, torch.tensor(width, dtype=boxes.dtype, device=boxes.device)) 126 boxes_y = torch.max(boxes_y, torch.tensor(0, dtype=boxes.dtype, device=boxes.device)) 127 boxes_y = torch.min(boxes_y, torch.tensor(height, dtype=boxes.dtype, device=boxes.device)) 128 else: 129 boxes_x = boxes_x.clamp(min=0, max=width) 130 boxes_y = boxes_y.clamp(min=0, max=height) 131 132 clipped_boxes = torch.stack((boxes_x, boxes_y), dim=dim) 133 return clipped_boxes.reshape(boxes.shape) 134 135 136 def box_area(boxes: Tensor) -> Tensor: 137 """ 138 Computes the area of a set of bounding boxes, which are specified by its 139 (x1, y1, x2, y2) coordinates. 140 141 Arguments: 142 boxes (Tensor[N, 4]): boxes for which the area will be computed. They 143 are expected to be in (x1, y1, x2, y2) format 144 145 Returns: 146 area (Tensor[N]): area for each box 147 """ 148 return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) 149 150 151 # implementation from https://github.com/kuangliu/torchcv/blob/master/torchcv/utils/box.py 152 # with slight modifications 153 def box_iou(boxes1: Tensor, boxes2: Tensor) -> Tensor: 154 """ 155 Return intersection-over-union (Jaccard index) of boxes. 156 157 Both sets of boxes are expected to be in (x1, y1, x2, y2) format. 158 159 Arguments: 160 boxes1 (Tensor[N, 4]) 161 boxes2 (Tensor[M, 4]) 162 163 Returns: 164 iou (Tensor[N, M]): the NxM matrix containing the pairwise 165 IoU values for every element in boxes1 and boxes2 166 """ 167 area1 = box_area(boxes1) 168 area2 = box_area(boxes2) 169 170 lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2] 171 rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2] 172 173 wh = (rb - lt).clamp(min=0) # [N,M,2] 174 inter = wh[:, :, 0] * wh[:, :, 1] # [N,M] 175 176 iou = inter / (area1[:, None] + area2 - inter) 177 return iou 178 [end of torchvision/ops/boxes.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/torchvision/ops/__init__.py b/torchvision/ops/__init__.py --- a/torchvision/ops/__init__.py +++ b/torchvision/ops/__init__.py @@ -1,4 +1,4 @@ -from .boxes import nms, batched_nms, remove_small_boxes, clip_boxes_to_image, box_area, box_iou +from .boxes import nms, batched_nms, remove_small_boxes, clip_boxes_to_image, box_area, box_iou, generalized_box_iou from .new_empty_tensor import _new_empty_tensor from .deform_conv import deform_conv2d, DeformConv2d from .roi_align import roi_align, RoIAlign @@ -15,7 +15,7 @@ __all__ = [ 'deform_conv2d', 'DeformConv2d', 'nms', 'batched_nms', 'remove_small_boxes', - 'clip_boxes_to_image', 'box_area', 'box_iou', 'roi_align', 'RoIAlign', 'roi_pool', + 'clip_boxes_to_image', 'box_area', 'box_iou', 'generalized_box_iou', 'roi_align', 'RoIAlign', 'roi_pool', 'RoIPool', '_new_empty_tensor', 'ps_roi_align', 'PSRoIAlign', 'ps_roi_pool', 'PSRoIPool', 'MultiScaleRoIAlign', 'FeaturePyramidNetwork' ] diff --git a/torchvision/ops/boxes.py b/torchvision/ops/boxes.py --- a/torchvision/ops/boxes.py +++ b/torchvision/ops/boxes.py @@ -161,8 +161,7 @@ boxes2 (Tensor[M, 4]) Returns: - iou (Tensor[N, M]): the NxM matrix containing the pairwise - IoU values for every element in boxes1 and boxes2 + iou (Tensor[N, M]): the NxM matrix containing the pairwise IoU values for every element in boxes1 and boxes2 """ area1 = box_area(boxes1) area2 = box_area(boxes2) @@ -175,3 +174,46 @@ iou = inter / (area1[:, None] + area2 - inter) return iou + + +# Implementation adapted from https://github.com/facebookresearch/detr/blob/master/util/box_ops.py +def generalized_box_iou(boxes1: Tensor, boxes2: Tensor) -> Tensor: + """ + Return generalized intersection-over-union (Jaccard index) of boxes. + + Both sets of boxes are expected to be in (x1, y1, x2, y2) format. + + Arguments: + boxes1 (Tensor[N, 4]) + boxes2 (Tensor[M, 4]) + + Returns: + generalized_iou (Tensor[N, M]): the NxM matrix containing the pairwise generalized_IoU values + for every element in boxes1 and boxes2 + """ + + # degenerate boxes gives inf / nan results + # so do an early check + assert (boxes1[:, 2:] >= boxes1[:, :2]).all() + assert (boxes2[:, 2:] >= boxes2[:, :2]).all() + + area1 = box_area(boxes1) + area2 = box_area(boxes2) + + lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2] + rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2] + + wh = (rb - lt).clamp(min=0) # [N,M,2] + inter = wh[:, :, 0] * wh[:, :, 1] # [N,M] + + union = area1[:, None] + area2 - inter + + iou = inter / union + + lti = torch.min(boxes1[:, None, :2], boxes2[:, :2]) + rbi = torch.max(boxes1[:, None, 2:], boxes2[:, 2:]) + + whi = (rbi - lti).clamp(min=0) # [N,M,2] + areai = whi[:, :, 0] * whi[:, :, 1] + + return iou - (areai - union) / areai
{"golden_diff": "diff --git a/torchvision/ops/__init__.py b/torchvision/ops/__init__.py\n--- a/torchvision/ops/__init__.py\n+++ b/torchvision/ops/__init__.py\n@@ -1,4 +1,4 @@\n-from .boxes import nms, batched_nms, remove_small_boxes, clip_boxes_to_image, box_area, box_iou\n+from .boxes import nms, batched_nms, remove_small_boxes, clip_boxes_to_image, box_area, box_iou, generalized_box_iou\n from .new_empty_tensor import _new_empty_tensor\n from .deform_conv import deform_conv2d, DeformConv2d\n from .roi_align import roi_align, RoIAlign\n@@ -15,7 +15,7 @@\n \n __all__ = [\n 'deform_conv2d', 'DeformConv2d', 'nms', 'batched_nms', 'remove_small_boxes',\n- 'clip_boxes_to_image', 'box_area', 'box_iou', 'roi_align', 'RoIAlign', 'roi_pool',\n+ 'clip_boxes_to_image', 'box_area', 'box_iou', 'generalized_box_iou', 'roi_align', 'RoIAlign', 'roi_pool',\n 'RoIPool', '_new_empty_tensor', 'ps_roi_align', 'PSRoIAlign', 'ps_roi_pool',\n 'PSRoIPool', 'MultiScaleRoIAlign', 'FeaturePyramidNetwork'\n ]\ndiff --git a/torchvision/ops/boxes.py b/torchvision/ops/boxes.py\n--- a/torchvision/ops/boxes.py\n+++ b/torchvision/ops/boxes.py\n@@ -161,8 +161,7 @@\n boxes2 (Tensor[M, 4])\n \n Returns:\n- iou (Tensor[N, M]): the NxM matrix containing the pairwise\n- IoU values for every element in boxes1 and boxes2\n+ iou (Tensor[N, M]): the NxM matrix containing the pairwise IoU values for every element in boxes1 and boxes2\n \"\"\"\n area1 = box_area(boxes1)\n area2 = box_area(boxes2)\n@@ -175,3 +174,46 @@\n \n iou = inter / (area1[:, None] + area2 - inter)\n return iou\n+\n+\n+# Implementation adapted from https://github.com/facebookresearch/detr/blob/master/util/box_ops.py\n+def generalized_box_iou(boxes1: Tensor, boxes2: Tensor) -> Tensor:\n+ \"\"\"\n+ Return generalized intersection-over-union (Jaccard index) of boxes.\n+\n+ Both sets of boxes are expected to be in (x1, y1, x2, y2) format.\n+\n+ Arguments:\n+ boxes1 (Tensor[N, 4])\n+ boxes2 (Tensor[M, 4])\n+\n+ Returns:\n+ generalized_iou (Tensor[N, M]): the NxM matrix containing the pairwise generalized_IoU values\n+ for every element in boxes1 and boxes2\n+ \"\"\"\n+\n+ # degenerate boxes gives inf / nan results\n+ # so do an early check\n+ assert (boxes1[:, 2:] >= boxes1[:, :2]).all()\n+ assert (boxes2[:, 2:] >= boxes2[:, :2]).all()\n+\n+ area1 = box_area(boxes1)\n+ area2 = box_area(boxes2)\n+\n+ lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]\n+ rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]\n+\n+ wh = (rb - lt).clamp(min=0) # [N,M,2]\n+ inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]\n+\n+ union = area1[:, None] + area2 - inter\n+\n+ iou = inter / union\n+\n+ lti = torch.min(boxes1[:, None, :2], boxes2[:, :2])\n+ rbi = torch.max(boxes1[:, None, 2:], boxes2[:, 2:])\n+\n+ whi = (rbi - lti).clamp(min=0) # [N,M,2]\n+ areai = whi[:, :, 0] * whi[:, :, 1]\n+\n+ return iou - (areai - union) / areai\n", "issue": "Different types of IoU\n## \ud83d\ude80 Feature\r\n<!-- A clear and concise description of the feature proposal -->\r\nImplementation of 3 other types of IoU metric in torchvision.ops.boxes:\r\n1. Generalized IoU\r\n2. Distance IoU\r\n3. Complete IoU\r\n\r\n## Motivation\r\n\r\n<!-- Please outline the motivation for the proposal. Is your feature request related to a problem? e.g., I'm always frustrated when [...]. If this is related to another GitHub issue, please link here too -->\r\nSince these metrics can lead to better results in some cases (object detection, etc.) it could be useful to have them natively implemented.\r\n\r\n## Pitch\r\n\r\n<!-- A clear and concise description of what you want to happen. -->\r\nUsers who need IoU computation can simply choose from 4 different implementation using the same inputs (e.g. two lists of boxes).\r\n\r\n## Additional context\r\n\r\n<!-- Add any other context or screenshots about the feature request here. -->\r\nMathematical details can be found here:\r\n1. Generalized IoU: [Generalized Intersection over Union](https://giou.stanford.edu/)\r\n2. Distance/Complete IoU: [Distance-IoU Loss: Faster and Better Learning for Bounding Box Regression](https://arxiv.org/abs/1911.08287)\n", "before_files": [{"content": "from .boxes import nms, batched_nms, remove_small_boxes, clip_boxes_to_image, box_area, box_iou\nfrom .new_empty_tensor import _new_empty_tensor\nfrom .deform_conv import deform_conv2d, DeformConv2d\nfrom .roi_align import roi_align, RoIAlign\nfrom .roi_pool import roi_pool, RoIPool\nfrom .ps_roi_align import ps_roi_align, PSRoIAlign\nfrom .ps_roi_pool import ps_roi_pool, PSRoIPool\nfrom .poolers import MultiScaleRoIAlign\nfrom .feature_pyramid_network import FeaturePyramidNetwork\n\nfrom ._register_onnx_ops import _register_custom_op\n\n_register_custom_op()\n\n\n__all__ = [\n 'deform_conv2d', 'DeformConv2d', 'nms', 'batched_nms', 'remove_small_boxes',\n 'clip_boxes_to_image', 'box_area', 'box_iou', 'roi_align', 'RoIAlign', 'roi_pool',\n 'RoIPool', '_new_empty_tensor', 'ps_roi_align', 'PSRoIAlign', 'ps_roi_pool',\n 'PSRoIPool', 'MultiScaleRoIAlign', 'FeaturePyramidNetwork'\n]\n", "path": "torchvision/ops/__init__.py"}, {"content": "import torch\nfrom torch.jit.annotations import Tuple\nfrom torch import Tensor\nimport torchvision\n\n\ndef nms(boxes: Tensor, scores: Tensor, iou_threshold: float) -> Tensor:\n \"\"\"\n Performs non-maximum suppression (NMS) on the boxes according\n to their intersection-over-union (IoU).\n\n NMS iteratively removes lower scoring boxes which have an\n IoU greater than iou_threshold with another (higher scoring)\n box.\n\n If multiple boxes have the exact same score and satisfy the IoU\n criterion with respect to a reference box, the selected box is\n not guaranteed to be the same between CPU and GPU. This is similar\n to the behavior of argsort in PyTorch when repeated values are present.\n\n Parameters\n ----------\n boxes : Tensor[N, 4])\n boxes to perform NMS on. They\n are expected to be in (x1, y1, x2, y2) format\n scores : Tensor[N]\n scores for each one of the boxes\n iou_threshold : float\n discards all overlapping\n boxes with IoU > iou_threshold\n\n Returns\n -------\n keep : Tensor\n int64 tensor with the indices\n of the elements that have been kept\n by NMS, sorted in decreasing order of scores\n \"\"\"\n return torch.ops.torchvision.nms(boxes, scores, iou_threshold)\n\n\[email protected]._script_if_tracing\ndef batched_nms(\n boxes: Tensor,\n scores: Tensor,\n idxs: Tensor,\n iou_threshold: float,\n) -> Tensor:\n \"\"\"\n Performs non-maximum suppression in a batched fashion.\n\n Each index value correspond to a category, and NMS\n will not be applied between elements of different categories.\n\n Parameters\n ----------\n boxes : Tensor[N, 4]\n boxes where NMS will be performed. They\n are expected to be in (x1, y1, x2, y2) format\n scores : Tensor[N]\n scores for each one of the boxes\n idxs : Tensor[N]\n indices of the categories for each one of the boxes.\n iou_threshold : float\n discards all overlapping boxes\n with IoU > iou_threshold\n\n Returns\n -------\n keep : Tensor\n int64 tensor with the indices of\n the elements that have been kept by NMS, sorted\n in decreasing order of scores\n \"\"\"\n if boxes.numel() == 0:\n return torch.empty((0,), dtype=torch.int64, device=boxes.device)\n # strategy: in order to perform NMS independently per class.\n # we add an offset to all the boxes. The offset is dependent\n # only on the class idx, and is large enough so that boxes\n # from different classes do not overlap\n else:\n max_coordinate = boxes.max()\n offsets = idxs.to(boxes) * (max_coordinate + torch.tensor(1).to(boxes))\n boxes_for_nms = boxes + offsets[:, None]\n keep = nms(boxes_for_nms, scores, iou_threshold)\n return keep\n\n\ndef remove_small_boxes(boxes: Tensor, min_size: float) -> Tensor:\n \"\"\"\n Remove boxes which contains at least one side smaller than min_size.\n\n Arguments:\n boxes (Tensor[N, 4]): boxes in (x1, y1, x2, y2) format\n min_size (float): minimum size\n\n Returns:\n keep (Tensor[K]): indices of the boxes that have both sides\n larger than min_size\n \"\"\"\n ws, hs = boxes[:, 2] - boxes[:, 0], boxes[:, 3] - boxes[:, 1]\n keep = (ws >= min_size) & (hs >= min_size)\n keep = torch.where(keep)[0]\n return keep\n\n\ndef clip_boxes_to_image(boxes: Tensor, size: Tuple[int, int]) -> Tensor:\n \"\"\"\n Clip boxes so that they lie inside an image of size `size`.\n\n Arguments:\n boxes (Tensor[N, 4]): boxes in (x1, y1, x2, y2) format\n size (Tuple[height, width]): size of the image\n\n Returns:\n clipped_boxes (Tensor[N, 4])\n \"\"\"\n dim = boxes.dim()\n boxes_x = boxes[..., 0::2]\n boxes_y = boxes[..., 1::2]\n height, width = size\n\n if torchvision._is_tracing():\n boxes_x = torch.max(boxes_x, torch.tensor(0, dtype=boxes.dtype, device=boxes.device))\n boxes_x = torch.min(boxes_x, torch.tensor(width, dtype=boxes.dtype, device=boxes.device))\n boxes_y = torch.max(boxes_y, torch.tensor(0, dtype=boxes.dtype, device=boxes.device))\n boxes_y = torch.min(boxes_y, torch.tensor(height, dtype=boxes.dtype, device=boxes.device))\n else:\n boxes_x = boxes_x.clamp(min=0, max=width)\n boxes_y = boxes_y.clamp(min=0, max=height)\n\n clipped_boxes = torch.stack((boxes_x, boxes_y), dim=dim)\n return clipped_boxes.reshape(boxes.shape)\n\n\ndef box_area(boxes: Tensor) -> Tensor:\n \"\"\"\n Computes the area of a set of bounding boxes, which are specified by its\n (x1, y1, x2, y2) coordinates.\n\n Arguments:\n boxes (Tensor[N, 4]): boxes for which the area will be computed. They\n are expected to be in (x1, y1, x2, y2) format\n\n Returns:\n area (Tensor[N]): area for each box\n \"\"\"\n return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])\n\n\n# implementation from https://github.com/kuangliu/torchcv/blob/master/torchcv/utils/box.py\n# with slight modifications\ndef box_iou(boxes1: Tensor, boxes2: Tensor) -> Tensor:\n \"\"\"\n Return intersection-over-union (Jaccard index) of boxes.\n\n Both sets of boxes are expected to be in (x1, y1, x2, y2) format.\n\n Arguments:\n boxes1 (Tensor[N, 4])\n boxes2 (Tensor[M, 4])\n\n Returns:\n iou (Tensor[N, M]): the NxM matrix containing the pairwise\n IoU values for every element in boxes1 and boxes2\n \"\"\"\n area1 = box_area(boxes1)\n area2 = box_area(boxes2)\n\n lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]\n rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]\n\n wh = (rb - lt).clamp(min=0) # [N,M,2]\n inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]\n\n iou = inter / (area1[:, None] + area2 - inter)\n return iou\n", "path": "torchvision/ops/boxes.py"}]}
3,142
1,001
gh_patches_debug_43314
rasdani/github-patches
git_diff
pymedusa__Medusa-1988
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Error while searching on T411 I was searching ChäoS;Child and I saw on my debug log this error because of the ";" This should url-encoded, but it doesn't seems so.. ![img](http://i.imgur.com/m00RH06.png) Thanks a lot, </issue> <code> [start of medusa/providers/torrent/json/t411.py] 1 # coding=utf-8 2 # Author: djoole <[email protected]> 3 # 4 # This file is part of Medusa. 5 # 6 # Medusa is free software: you can redistribute it and/or modify 7 # it under the terms of the GNU General Public License as published by 8 # the Free Software Foundation, either version 3 of the License, or 9 # (at your option) any later version. 10 # 11 # Medusa is distributed in the hope that it will be useful, 12 # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 # GNU General Public License for more details. 15 # 16 # You should have received a copy of the GNU General Public License 17 # along with Medusa. If not, see <http://www.gnu.org/licenses/>. 18 """Provider code for T411.""" 19 from __future__ import unicode_literals 20 21 import time 22 import traceback 23 24 from operator import itemgetter 25 26 from requests.auth import AuthBase 27 from requests.compat import urljoin 28 29 from ..torrent_provider import TorrentProvider 30 from .... import logger, tv_cache 31 from ....common import USER_AGENT 32 from ....helper.common import convert_size, try_int 33 34 35 class T411Provider(TorrentProvider): 36 """T411 Torrent provider.""" 37 38 def __init__(self): 39 """Initialize the class.""" 40 super(self.__class__, self).__init__("T411") 41 42 # Credentials 43 self.username = None 44 self.password = None 45 self.token = None 46 self.tokenLastUpdate = None 47 48 # URLs 49 self.url = 'https://api.t411.li' 50 self.urls = { 51 'search': urljoin(self.url, 'torrents/search/%s*?cid=%s&limit=100'), 52 'rss': urljoin(self.url, 'torrents/top/today'), 53 'login_page': urljoin(self.url, 'auth'), 54 'download': urljoin(self.url, 'torrents/download/%s'), 55 } 56 57 # Proper Strings 58 59 # Miscellaneous Options 60 self.headers.update({'User-Agent': USER_AGENT}) 61 self.subcategories = [433, 637, 455, 639] 62 self.confirmed = False 63 64 # Torrent Stats 65 self.minseed = 0 66 self.minleech = 0 67 68 # Cache 69 self.cache = tv_cache.TVCache(self, min_time=10) # Only poll T411 every 10 minutes max 70 71 def search(self, search_strings, age=0, ep_obj=None): 72 """Search a provider and parse the results. 73 74 :param search_strings: A dict with mode (key) and the search value (value) 75 :param age: Not used 76 :param ep_obj: Not used 77 :returns: A list of search results (structure) 78 """ 79 results = [] 80 if not self.login(): 81 return results 82 83 for mode in search_strings: 84 logger.log('Search mode: {0}'.format(mode), logger.DEBUG) 85 86 for search_string in search_strings[mode]: 87 if mode != 'RSS': 88 logger.log('Search string: {search}'.format 89 (search=search_string), logger.DEBUG) 90 if self.confirmed: 91 logger.log('Searching only confirmed torrents', logger.DEBUG) 92 93 search_urls = ([self.urls['search'] % (search_string, u) 94 for u in self.subcategories], [self.urls['rss']])[mode == 'RSS'] 95 for search_url in search_urls: 96 response = self.get_url(search_url, returns='response') 97 98 if not response or not response.content: 99 logger.log('No data returned from provider', logger.DEBUG) 100 continue 101 102 try: 103 jdata = response.json() 104 except ValueError: # also catches JSONDecodeError if simplejson is installed 105 logger.log('No data returned from provider', logger.DEBUG) 106 continue 107 108 results += self.parse(jdata, mode) 109 110 return results 111 112 def parse(self, data, mode): 113 """Parse search results for items. 114 115 :param data: The raw response from a search 116 :param mode: The current mode used to search, e.g. RSS 117 118 :return: A list of items found 119 """ 120 items = [] 121 122 unsorted_torrent_rows = data.get('torrents') if mode != 'RSS' else data 123 124 if not unsorted_torrent_rows or not isinstance(unsorted_torrent_rows, dict): 125 logger.log('Data returned from provider does not contain any {0}torrents'.format( 126 'confirmed ' if self.confirmed else ''), logger.DEBUG) 127 return items 128 129 torrent_rows = sorted(unsorted_torrent_rows, key=itemgetter('added'), reverse=True) 130 131 for row in torrent_rows: 132 if not isinstance(row, dict): 133 logger.log('Invalid data returned from provider', logger.WARNING) 134 continue 135 136 if mode == 'RSS' and 'category' in row and try_int(row['category'], 0) not in self.subcategories: 137 continue 138 139 try: 140 title = row['name'] 141 torrent_id = row['id'] 142 download_url = (self.urls['download'] % torrent_id) 143 if not all([title, download_url]): 144 continue 145 146 seeders = try_int(row['seeders']) 147 leechers = try_int(row['leechers']) 148 verified = bool(row['isVerified']) 149 150 # Filter unseeded torrent 151 if seeders < min(self.minseed, 1): 152 if mode != 'RSS': 153 logger.log("Discarding torrent because it doesn't meet the " 154 "minimum seeders: {0}. Seeders: {1}".format 155 (title, seeders), logger.DEBUG) 156 continue 157 158 if self.confirmed and not verified and mode != 'RSS': 159 logger.log("Found result {0} but that doesn't seem like a verified" 160 " result so I'm ignoring it".format(title), logger.DEBUG) 161 continue 162 163 torrent_size = row['size'] 164 size = convert_size(torrent_size) or -1 165 166 item = { 167 'title': title, 168 'link': download_url, 169 'size': size, 170 'seeders': seeders, 171 'leechers': leechers, 172 'pubdate': None, 173 } 174 if mode != 'RSS': 175 logger.log('Found result: {0} with {1} seeders and {2} leechers'.format 176 (title, seeders, leechers), logger.DEBUG) 177 178 items.append(item) 179 except (AttributeError, TypeError, KeyError, ValueError, IndexError): 180 logger.log('Failed parsing provider. Traceback: {0!r}'.format 181 (traceback.format_exc()), logger.ERROR) 182 183 return items 184 185 def login(self): 186 """Log into provider.""" 187 if self.token is not None: 188 if time.time() < (self.tokenLastUpdate + 30 * 60): 189 return True 190 191 login_params = { 192 'username': self.username, 193 'password': self.password, 194 } 195 196 response = self.get_url(self.urls['login_page'], post_data=login_params, returns='json') 197 if not response: 198 logger.log('Unable to connect to provider', logger.WARNING) 199 return False 200 201 if response and 'token' in response: 202 self.token = response['token'] 203 self.tokenLastUpdate = time.time() 204 # self.uid = response['uid'].encode('ascii', 'ignore') 205 self.session.auth = T411Auth(self.token) 206 return True 207 else: 208 logger.log('Token not found in authentication response', logger.WARNING) 209 return False 210 211 212 class T411Auth(AuthBase): 213 """Attach HTTP Authentication to the given Request object.""" 214 215 def __init__(self, token): 216 """Init object.""" 217 self.token = token 218 219 def __call__(self, r): 220 """Add token to request header.""" 221 r.headers['Authorization'] = self.token 222 return r 223 224 225 provider = T411Provider() 226 [end of medusa/providers/torrent/json/t411.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/medusa/providers/torrent/json/t411.py b/medusa/providers/torrent/json/t411.py --- a/medusa/providers/torrent/json/t411.py +++ b/medusa/providers/torrent/json/t411.py @@ -24,7 +24,7 @@ from operator import itemgetter from requests.auth import AuthBase -from requests.compat import urljoin +from requests.compat import quote, urljoin from ..torrent_provider import TorrentProvider from .... import logger, tv_cache @@ -48,10 +48,10 @@ # URLs self.url = 'https://api.t411.li' self.urls = { - 'search': urljoin(self.url, 'torrents/search/%s*?cid=%s&limit=100'), + 'search': urljoin(self.url, 'torrents/search/{search}'), 'rss': urljoin(self.url, 'torrents/top/today'), 'login_page': urljoin(self.url, 'auth'), - 'download': urljoin(self.url, 'torrents/download/%s'), + 'download': urljoin(self.url, 'torrents/download/{id}'), } # Proper Strings @@ -80,6 +80,8 @@ if not self.login(): return results + search_params = {} + for mode in search_strings: logger.log('Search mode: {0}'.format(mode), logger.DEBUG) @@ -90,10 +92,25 @@ if self.confirmed: logger.log('Searching only confirmed torrents', logger.DEBUG) - search_urls = ([self.urls['search'] % (search_string, u) - for u in self.subcategories], [self.urls['rss']])[mode == 'RSS'] - for search_url in search_urls: - response = self.get_url(search_url, returns='response') + # use string formatting to safely coerce the search term + # to unicode then utf-8 encode the unicode string + term = '{term}'.format(term=search_string).encode('utf-8') + # build the search URL + search_url = self.urls['search'].format( + search=quote(term) # URL encode the search term + ) + categories = self.subcategories + search_params.update({'limit': 100}) + else: + search_url = self.urls['rss'] + # Using None as a category removes it as a search param + categories = [None] # Must be a list for iteration + + for category in categories: + search_params.update({'cid': category}) + response = self.get_url( + search_url, params=search_params, returns='response' + ) if not response or not response.content: logger.log('No data returned from provider', logger.DEBUG) @@ -121,9 +138,12 @@ unsorted_torrent_rows = data.get('torrents') if mode != 'RSS' else data - if not unsorted_torrent_rows or not isinstance(unsorted_torrent_rows, dict): - logger.log('Data returned from provider does not contain any {0}torrents'.format( - 'confirmed ' if self.confirmed else ''), logger.DEBUG) + if not unsorted_torrent_rows: + logger.log( + 'Data returned from provider does not contain any {torrents}'.format( + torrents='confirmed torrents' if self.confirmed else 'torrents' + ), logger.DEBUG + ) return items torrent_rows = sorted(unsorted_torrent_rows, key=itemgetter('added'), reverse=True) @@ -139,7 +159,7 @@ try: title = row['name'] torrent_id = row['id'] - download_url = (self.urls['download'] % torrent_id) + download_url = self.urls['download'].format(id=torrent_id) if not all([title, download_url]): continue
{"golden_diff": "diff --git a/medusa/providers/torrent/json/t411.py b/medusa/providers/torrent/json/t411.py\n--- a/medusa/providers/torrent/json/t411.py\n+++ b/medusa/providers/torrent/json/t411.py\n@@ -24,7 +24,7 @@\n from operator import itemgetter\n \n from requests.auth import AuthBase\n-from requests.compat import urljoin\n+from requests.compat import quote, urljoin\n \n from ..torrent_provider import TorrentProvider\n from .... import logger, tv_cache\n@@ -48,10 +48,10 @@\n # URLs\n self.url = 'https://api.t411.li'\n self.urls = {\n- 'search': urljoin(self.url, 'torrents/search/%s*?cid=%s&limit=100'),\n+ 'search': urljoin(self.url, 'torrents/search/{search}'),\n 'rss': urljoin(self.url, 'torrents/top/today'),\n 'login_page': urljoin(self.url, 'auth'),\n- 'download': urljoin(self.url, 'torrents/download/%s'),\n+ 'download': urljoin(self.url, 'torrents/download/{id}'),\n }\n \n # Proper Strings\n@@ -80,6 +80,8 @@\n if not self.login():\n return results\n \n+ search_params = {}\n+\n for mode in search_strings:\n logger.log('Search mode: {0}'.format(mode), logger.DEBUG)\n \n@@ -90,10 +92,25 @@\n if self.confirmed:\n logger.log('Searching only confirmed torrents', logger.DEBUG)\n \n- search_urls = ([self.urls['search'] % (search_string, u)\n- for u in self.subcategories], [self.urls['rss']])[mode == 'RSS']\n- for search_url in search_urls:\n- response = self.get_url(search_url, returns='response')\n+ # use string formatting to safely coerce the search term\n+ # to unicode then utf-8 encode the unicode string\n+ term = '{term}'.format(term=search_string).encode('utf-8')\n+ # build the search URL\n+ search_url = self.urls['search'].format(\n+ search=quote(term) # URL encode the search term\n+ )\n+ categories = self.subcategories\n+ search_params.update({'limit': 100})\n+ else:\n+ search_url = self.urls['rss']\n+ # Using None as a category removes it as a search param\n+ categories = [None] # Must be a list for iteration\n+\n+ for category in categories:\n+ search_params.update({'cid': category})\n+ response = self.get_url(\n+ search_url, params=search_params, returns='response'\n+ )\n \n if not response or not response.content:\n logger.log('No data returned from provider', logger.DEBUG)\n@@ -121,9 +138,12 @@\n \n unsorted_torrent_rows = data.get('torrents') if mode != 'RSS' else data\n \n- if not unsorted_torrent_rows or not isinstance(unsorted_torrent_rows, dict):\n- logger.log('Data returned from provider does not contain any {0}torrents'.format(\n- 'confirmed ' if self.confirmed else ''), logger.DEBUG)\n+ if not unsorted_torrent_rows:\n+ logger.log(\n+ 'Data returned from provider does not contain any {torrents}'.format(\n+ torrents='confirmed torrents' if self.confirmed else 'torrents'\n+ ), logger.DEBUG\n+ )\n return items\n \n torrent_rows = sorted(unsorted_torrent_rows, key=itemgetter('added'), reverse=True)\n@@ -139,7 +159,7 @@\n try:\n title = row['name']\n torrent_id = row['id']\n- download_url = (self.urls['download'] % torrent_id)\n+ download_url = self.urls['download'].format(id=torrent_id)\n if not all([title, download_url]):\n continue\n", "issue": "Error while searching on T411\nI was searching Ch\u00e4oS;Child and I saw on my debug log this error because of the \";\"\r\n\r\nThis should url-encoded, but it doesn't seems so..\r\n\r\n![img](http://i.imgur.com/m00RH06.png)\r\n\r\nThanks a lot,\r\n\n", "before_files": [{"content": "# coding=utf-8\n# Author: djoole <[email protected]>\n#\n# This file is part of Medusa.\n#\n# Medusa is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Medusa is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Medusa. If not, see <http://www.gnu.org/licenses/>.\n\"\"\"Provider code for T411.\"\"\"\nfrom __future__ import unicode_literals\n\nimport time\nimport traceback\n\nfrom operator import itemgetter\n\nfrom requests.auth import AuthBase\nfrom requests.compat import urljoin\n\nfrom ..torrent_provider import TorrentProvider\nfrom .... import logger, tv_cache\nfrom ....common import USER_AGENT\nfrom ....helper.common import convert_size, try_int\n\n\nclass T411Provider(TorrentProvider):\n \"\"\"T411 Torrent provider.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize the class.\"\"\"\n super(self.__class__, self).__init__(\"T411\")\n\n # Credentials\n self.username = None\n self.password = None\n self.token = None\n self.tokenLastUpdate = None\n\n # URLs\n self.url = 'https://api.t411.li'\n self.urls = {\n 'search': urljoin(self.url, 'torrents/search/%s*?cid=%s&limit=100'),\n 'rss': urljoin(self.url, 'torrents/top/today'),\n 'login_page': urljoin(self.url, 'auth'),\n 'download': urljoin(self.url, 'torrents/download/%s'),\n }\n\n # Proper Strings\n\n # Miscellaneous Options\n self.headers.update({'User-Agent': USER_AGENT})\n self.subcategories = [433, 637, 455, 639]\n self.confirmed = False\n\n # Torrent Stats\n self.minseed = 0\n self.minleech = 0\n\n # Cache\n self.cache = tv_cache.TVCache(self, min_time=10) # Only poll T411 every 10 minutes max\n\n def search(self, search_strings, age=0, ep_obj=None):\n \"\"\"Search a provider and parse the results.\n\n :param search_strings: A dict with mode (key) and the search value (value)\n :param age: Not used\n :param ep_obj: Not used\n :returns: A list of search results (structure)\n \"\"\"\n results = []\n if not self.login():\n return results\n\n for mode in search_strings:\n logger.log('Search mode: {0}'.format(mode), logger.DEBUG)\n\n for search_string in search_strings[mode]:\n if mode != 'RSS':\n logger.log('Search string: {search}'.format\n (search=search_string), logger.DEBUG)\n if self.confirmed:\n logger.log('Searching only confirmed torrents', logger.DEBUG)\n\n search_urls = ([self.urls['search'] % (search_string, u)\n for u in self.subcategories], [self.urls['rss']])[mode == 'RSS']\n for search_url in search_urls:\n response = self.get_url(search_url, returns='response')\n\n if not response or not response.content:\n logger.log('No data returned from provider', logger.DEBUG)\n continue\n\n try:\n jdata = response.json()\n except ValueError: # also catches JSONDecodeError if simplejson is installed\n logger.log('No data returned from provider', logger.DEBUG)\n continue\n\n results += self.parse(jdata, mode)\n\n return results\n\n def parse(self, data, mode):\n \"\"\"Parse search results for items.\n\n :param data: The raw response from a search\n :param mode: The current mode used to search, e.g. RSS\n\n :return: A list of items found\n \"\"\"\n items = []\n\n unsorted_torrent_rows = data.get('torrents') if mode != 'RSS' else data\n\n if not unsorted_torrent_rows or not isinstance(unsorted_torrent_rows, dict):\n logger.log('Data returned from provider does not contain any {0}torrents'.format(\n 'confirmed ' if self.confirmed else ''), logger.DEBUG)\n return items\n\n torrent_rows = sorted(unsorted_torrent_rows, key=itemgetter('added'), reverse=True)\n\n for row in torrent_rows:\n if not isinstance(row, dict):\n logger.log('Invalid data returned from provider', logger.WARNING)\n continue\n\n if mode == 'RSS' and 'category' in row and try_int(row['category'], 0) not in self.subcategories:\n continue\n\n try:\n title = row['name']\n torrent_id = row['id']\n download_url = (self.urls['download'] % torrent_id)\n if not all([title, download_url]):\n continue\n\n seeders = try_int(row['seeders'])\n leechers = try_int(row['leechers'])\n verified = bool(row['isVerified'])\n\n # Filter unseeded torrent\n if seeders < min(self.minseed, 1):\n if mode != 'RSS':\n logger.log(\"Discarding torrent because it doesn't meet the \"\n \"minimum seeders: {0}. Seeders: {1}\".format\n (title, seeders), logger.DEBUG)\n continue\n\n if self.confirmed and not verified and mode != 'RSS':\n logger.log(\"Found result {0} but that doesn't seem like a verified\"\n \" result so I'm ignoring it\".format(title), logger.DEBUG)\n continue\n\n torrent_size = row['size']\n size = convert_size(torrent_size) or -1\n\n item = {\n 'title': title,\n 'link': download_url,\n 'size': size,\n 'seeders': seeders,\n 'leechers': leechers,\n 'pubdate': None,\n }\n if mode != 'RSS':\n logger.log('Found result: {0} with {1} seeders and {2} leechers'.format\n (title, seeders, leechers), logger.DEBUG)\n\n items.append(item)\n except (AttributeError, TypeError, KeyError, ValueError, IndexError):\n logger.log('Failed parsing provider. Traceback: {0!r}'.format\n (traceback.format_exc()), logger.ERROR)\n\n return items\n\n def login(self):\n \"\"\"Log into provider.\"\"\"\n if self.token is not None:\n if time.time() < (self.tokenLastUpdate + 30 * 60):\n return True\n\n login_params = {\n 'username': self.username,\n 'password': self.password,\n }\n\n response = self.get_url(self.urls['login_page'], post_data=login_params, returns='json')\n if not response:\n logger.log('Unable to connect to provider', logger.WARNING)\n return False\n\n if response and 'token' in response:\n self.token = response['token']\n self.tokenLastUpdate = time.time()\n # self.uid = response['uid'].encode('ascii', 'ignore')\n self.session.auth = T411Auth(self.token)\n return True\n else:\n logger.log('Token not found in authentication response', logger.WARNING)\n return False\n\n\nclass T411Auth(AuthBase):\n \"\"\"Attach HTTP Authentication to the given Request object.\"\"\"\n\n def __init__(self, token):\n \"\"\"Init object.\"\"\"\n self.token = token\n\n def __call__(self, r):\n \"\"\"Add token to request header.\"\"\"\n r.headers['Authorization'] = self.token\n return r\n\n\nprovider = T411Provider()\n", "path": "medusa/providers/torrent/json/t411.py"}]}
2,933
885
gh_patches_debug_41954
rasdani/github-patches
git_diff
litestar-org__litestar-2124
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> StaticFilesConfig and virtual directories I'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. This is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems. https://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32 </issue> <code> [start of litestar/contrib/sqlalchemy/dto.py] 1 from __future__ import annotations 2 3 from dataclasses import replace 4 from functools import singledispatchmethod 5 from typing import TYPE_CHECKING, Collection, Generic, Optional, TypeVar 6 7 from sqlalchemy import Column, inspect, orm, sql 8 from sqlalchemy.ext.associationproxy import AssociationProxy, AssociationProxyExtensionType 9 from sqlalchemy.ext.hybrid import HybridExtensionType, hybrid_property 10 from sqlalchemy.orm import ( 11 ColumnProperty, 12 DeclarativeBase, 13 InspectionAttr, 14 Mapped, 15 NotExtension, 16 QueryableAttribute, 17 RelationshipDirection, 18 RelationshipProperty, 19 ) 20 21 from litestar.dto.base_dto import AbstractDTO 22 from litestar.dto.data_structures import DTOFieldDefinition 23 from litestar.dto.field import DTO_FIELD_META_KEY, DTOField, Mark 24 from litestar.exceptions import ImproperlyConfiguredException 25 from litestar.types.empty import Empty 26 from litestar.typing import FieldDefinition 27 from litestar.utils.signature import ParsedSignature 28 29 if TYPE_CHECKING: 30 from typing import Any, Generator 31 32 from typing_extensions import TypeAlias 33 34 __all__ = ("SQLAlchemyDTO",) 35 36 T = TypeVar("T", bound="DeclarativeBase | Collection[DeclarativeBase]") 37 38 ElementType: TypeAlias = "Column | RelationshipProperty" 39 SQLA_NS = {**vars(orm), **vars(sql)} 40 41 42 class SQLAlchemyDTO(AbstractDTO[T], Generic[T]): 43 """Support for domain modelling with SQLAlchemy.""" 44 45 @singledispatchmethod 46 @classmethod 47 def handle_orm_descriptor( 48 cls, 49 extension_type: NotExtension | AssociationProxyExtensionType | HybridExtensionType, 50 orm_descriptor: InspectionAttr, 51 key: str, 52 model_type_hints: dict[str, FieldDefinition], 53 model_name: str, 54 ) -> list[DTOFieldDefinition]: 55 raise NotImplementedError(f"Unsupported extension type: {extension_type}") 56 57 @handle_orm_descriptor.register(NotExtension) 58 @classmethod 59 def _( 60 cls, 61 extension_type: NotExtension, 62 key: str, 63 orm_descriptor: InspectionAttr, 64 model_type_hints: dict[str, FieldDefinition], 65 model_name: str, 66 ) -> list[DTOFieldDefinition]: 67 if not isinstance(orm_descriptor, QueryableAttribute): 68 raise NotImplementedError(f"Unexpected descriptor type for '{extension_type}': '{orm_descriptor}'") 69 70 elem: ElementType 71 if isinstance(orm_descriptor.property, ColumnProperty): 72 if not isinstance(orm_descriptor.property.expression, Column): 73 raise NotImplementedError(f"Expected 'Column', got: '{orm_descriptor.property.expression}'") 74 elem = orm_descriptor.property.expression 75 elif isinstance(orm_descriptor.property, RelationshipProperty): 76 elem = orm_descriptor.property 77 else: 78 raise NotImplementedError(f"Unhandled property type: '{orm_descriptor.property}'") 79 80 default, default_factory = _detect_defaults(elem) 81 82 try: 83 if (field_definition := model_type_hints[key]).origin is Mapped: 84 (field_definition,) = field_definition.inner_types 85 else: 86 raise NotImplementedError(f"Expected 'Mapped' origin, got: '{field_definition.origin}'") 87 except KeyError: 88 field_definition = parse_type_from_element(elem) 89 90 return [ 91 DTOFieldDefinition.from_field_definition( 92 field_definition=replace( 93 field_definition, 94 name=key, 95 default=default, 96 ), 97 default_factory=default_factory, 98 dto_field=elem.info.get(DTO_FIELD_META_KEY, DTOField()), 99 model_name=model_name, 100 ) 101 ] 102 103 @handle_orm_descriptor.register(AssociationProxyExtensionType) 104 @classmethod 105 def _( 106 cls, 107 extension_type: AssociationProxyExtensionType, 108 key: str, 109 orm_descriptor: InspectionAttr, 110 model_type_hints: dict[str, FieldDefinition], 111 model_name: str, 112 ) -> list[DTOFieldDefinition]: 113 if not isinstance(orm_descriptor, AssociationProxy): 114 raise NotImplementedError(f"Unexpected descriptor type '{orm_descriptor}' for '{extension_type}'") 115 116 if (field_definition := model_type_hints[key]).origin is AssociationProxy: 117 (field_definition,) = field_definition.inner_types 118 else: 119 raise NotImplementedError(f"Expected 'AssociationProxy' origin, got: '{field_definition.origin}'") 120 121 return [ 122 DTOFieldDefinition.from_field_definition( 123 field_definition=replace( 124 field_definition, 125 name=key, 126 default=Empty, 127 ), 128 default_factory=None, 129 dto_field=orm_descriptor.info.get(DTO_FIELD_META_KEY, DTOField(mark=Mark.READ_ONLY)), 130 model_name=model_name, 131 ) 132 ] 133 134 @handle_orm_descriptor.register(HybridExtensionType) 135 @classmethod 136 def _( 137 cls, 138 extension_type: HybridExtensionType, 139 key: str, 140 orm_descriptor: InspectionAttr, 141 model_type_hints: dict[str, FieldDefinition], 142 model_name: str, 143 ) -> list[DTOFieldDefinition]: 144 if not isinstance(orm_descriptor, hybrid_property): 145 raise NotImplementedError(f"Unexpected descriptor type '{orm_descriptor}' for '{extension_type}'") 146 147 getter_sig = ParsedSignature.from_fn(orm_descriptor.fget, {}) 148 149 field_defs = [ 150 DTOFieldDefinition.from_field_definition( 151 field_definition=replace( 152 getter_sig.return_type, 153 name=orm_descriptor.__name__, 154 default=Empty, 155 ), 156 default_factory=None, 157 dto_field=orm_descriptor.info.get(DTO_FIELD_META_KEY, DTOField(mark=Mark.READ_ONLY)), 158 model_name=model_name, 159 ) 160 ] 161 162 if orm_descriptor.fset is not None: 163 setter_sig = ParsedSignature.from_fn(orm_descriptor.fset, {}) 164 field_defs.append( 165 DTOFieldDefinition.from_field_definition( 166 field_definition=replace( 167 next(iter(setter_sig.parameters.values())), 168 name=orm_descriptor.__name__, 169 default=Empty, 170 ), 171 default_factory=None, 172 dto_field=orm_descriptor.info.get(DTO_FIELD_META_KEY, DTOField(mark=Mark.WRITE_ONLY)), 173 model_name=model_name, 174 ) 175 ) 176 177 return field_defs 178 179 @classmethod 180 def generate_field_definitions(cls, model_type: type[DeclarativeBase]) -> Generator[DTOFieldDefinition, None, None]: 181 if (mapper := inspect(model_type)) is None: # pragma: no cover 182 raise RuntimeError("Unexpected `None` value for mapper.") 183 184 # includes SQLAlchemy names and other mapped class names in the forward reference resolution namespace 185 namespace = {**SQLA_NS, **{m.class_.__name__: m.class_ for m in mapper.registry.mappers if m is not mapper}} 186 model_type_hints = cls.get_model_type_hints(model_type, namespace=namespace) 187 model_name = model_type.__name__ 188 189 # the same hybrid property descriptor can be included in `all_orm_descriptors` multiple times, once 190 # for each method name it is bound to. We only need to see it once, so track views of it here. 191 seen_hybrid_descriptors: set[hybrid_property] = set() 192 for key, orm_descriptor in mapper.all_orm_descriptors.items(): 193 if isinstance(orm_descriptor, hybrid_property): 194 if orm_descriptor in seen_hybrid_descriptors: 195 continue 196 197 seen_hybrid_descriptors.add(orm_descriptor) 198 199 yield from cls.handle_orm_descriptor( 200 orm_descriptor.extension_type, key, orm_descriptor, model_type_hints, model_name 201 ) 202 203 @classmethod 204 def detect_nested_field(cls, field_definition: FieldDefinition) -> bool: 205 return field_definition.is_subclass_of(DeclarativeBase) 206 207 208 def _detect_defaults(elem: ElementType) -> tuple[Any, Any]: 209 default: Any = Empty 210 default_factory: Any = None # pyright:ignore 211 if sqla_default := getattr(elem, "default", None): 212 if sqla_default.is_scalar: 213 default = sqla_default.arg 214 elif sqla_default.is_callable: 215 216 def default_factory(d: Any = sqla_default) -> Any: 217 return d.arg({}) 218 219 elif sqla_default.is_sequence or sqla_default.is_sentinel: 220 # SQLAlchemy sequences represent server side defaults 221 # so we cannot infer a reasonable default value for 222 # them on the client side 223 pass 224 else: 225 raise ValueError("Unexpected default type") 226 else: 227 if ( 228 isinstance(elem, RelationshipProperty) 229 and detect_nullable_relationship(elem) 230 or getattr(elem, "nullable", False) 231 ): 232 default = None 233 234 return default, default_factory 235 236 237 def parse_type_from_element(elem: ElementType) -> FieldDefinition: 238 """Parses a type from a SQLAlchemy element. 239 240 Args: 241 elem: The SQLAlchemy element to parse. 242 243 Returns: 244 FieldDefinition: The parsed type. 245 246 Raises: 247 ImproperlyConfiguredException: If the type cannot be parsed. 248 """ 249 250 if isinstance(elem, Column): 251 if elem.nullable: 252 return FieldDefinition.from_annotation(Optional[elem.type.python_type]) 253 return FieldDefinition.from_annotation(elem.type.python_type) 254 255 if isinstance(elem, RelationshipProperty): 256 if elem.direction in (RelationshipDirection.ONETOMANY, RelationshipDirection.MANYTOMANY): 257 collection_type = FieldDefinition.from_annotation(elem.collection_class or list) 258 return FieldDefinition.from_annotation(collection_type.safe_generic_origin[elem.mapper.class_]) 259 260 if detect_nullable_relationship(elem): 261 return FieldDefinition.from_annotation(Optional[elem.mapper.class_]) 262 263 return FieldDefinition.from_annotation(elem.mapper.class_) 264 265 raise ImproperlyConfiguredException( 266 f"Unable to parse type from element '{elem}'. Consider adding a type hint.", 267 ) 268 269 270 def detect_nullable_relationship(elem: RelationshipProperty) -> bool: 271 """Detects if a relationship is nullable. 272 273 This attempts to decide if we should allow a ``None`` default value for a relationship by looking at the 274 foreign key fields. If all foreign key fields are nullable, then we allow a ``None`` default value. 275 276 Args: 277 elem: The relationship to check. 278 279 Returns: 280 bool: ``True`` if the relationship is nullable, ``False`` otherwise. 281 """ 282 return elem.direction == RelationshipDirection.MANYTOONE and all(c.nullable for c in elem.local_columns) 283 [end of litestar/contrib/sqlalchemy/dto.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/litestar/contrib/sqlalchemy/dto.py b/litestar/contrib/sqlalchemy/dto.py --- a/litestar/contrib/sqlalchemy/dto.py +++ b/litestar/contrib/sqlalchemy/dto.py @@ -9,9 +9,11 @@ from sqlalchemy.ext.hybrid import HybridExtensionType, hybrid_property from sqlalchemy.orm import ( ColumnProperty, + CompositeProperty, DeclarativeBase, InspectionAttr, Mapped, + MappedColumn, NotExtension, QueryableAttribute, RelationshipDirection, @@ -35,7 +37,7 @@ T = TypeVar("T", bound="DeclarativeBase | Collection[DeclarativeBase]") -ElementType: TypeAlias = "Column | RelationshipProperty" +ElementType: TypeAlias = "Column | RelationshipProperty | CompositeProperty" SQLA_NS = {**vars(orm), **vars(sql)} @@ -72,7 +74,7 @@ if not isinstance(orm_descriptor.property.expression, Column): raise NotImplementedError(f"Expected 'Column', got: '{orm_descriptor.property.expression}'") elem = orm_descriptor.property.expression - elif isinstance(orm_descriptor.property, RelationshipProperty): + elif isinstance(orm_descriptor.property, (RelationshipProperty, CompositeProperty)): elem = orm_descriptor.property else: raise NotImplementedError(f"Unhandled property type: '{orm_descriptor.property}'") @@ -189,6 +191,13 @@ # the same hybrid property descriptor can be included in `all_orm_descriptors` multiple times, once # for each method name it is bound to. We only need to see it once, so track views of it here. seen_hybrid_descriptors: set[hybrid_property] = set() + skipped_columns: set[str] = set() + for composite_property in mapper.composites: + for attr in composite_property.attrs: + if isinstance(attr, (MappedColumn, Column)): + skipped_columns.add(attr.name) + elif isinstance(attr, str): + skipped_columns.add(attr) for key, orm_descriptor in mapper.all_orm_descriptors.items(): if isinstance(orm_descriptor, hybrid_property): if orm_descriptor in seen_hybrid_descriptors: @@ -196,6 +205,9 @@ seen_hybrid_descriptors.add(orm_descriptor) + if key in skipped_columns: + continue + yield from cls.handle_orm_descriptor( orm_descriptor.extension_type, key, orm_descriptor, model_type_hints, model_name ) @@ -262,6 +274,9 @@ return FieldDefinition.from_annotation(elem.mapper.class_) + if isinstance(elem, CompositeProperty): + return FieldDefinition.from_annotation(elem.composite_class) + raise ImproperlyConfiguredException( f"Unable to parse type from element '{elem}'. Consider adding a type hint.", )
{"golden_diff": "diff --git a/litestar/contrib/sqlalchemy/dto.py b/litestar/contrib/sqlalchemy/dto.py\n--- a/litestar/contrib/sqlalchemy/dto.py\n+++ b/litestar/contrib/sqlalchemy/dto.py\n@@ -9,9 +9,11 @@\n from sqlalchemy.ext.hybrid import HybridExtensionType, hybrid_property\n from sqlalchemy.orm import (\n ColumnProperty,\n+ CompositeProperty,\n DeclarativeBase,\n InspectionAttr,\n Mapped,\n+ MappedColumn,\n NotExtension,\n QueryableAttribute,\n RelationshipDirection,\n@@ -35,7 +37,7 @@\n \n T = TypeVar(\"T\", bound=\"DeclarativeBase | Collection[DeclarativeBase]\")\n \n-ElementType: TypeAlias = \"Column | RelationshipProperty\"\n+ElementType: TypeAlias = \"Column | RelationshipProperty | CompositeProperty\"\n SQLA_NS = {**vars(orm), **vars(sql)}\n \n \n@@ -72,7 +74,7 @@\n if not isinstance(orm_descriptor.property.expression, Column):\n raise NotImplementedError(f\"Expected 'Column', got: '{orm_descriptor.property.expression}'\")\n elem = orm_descriptor.property.expression\n- elif isinstance(orm_descriptor.property, RelationshipProperty):\n+ elif isinstance(orm_descriptor.property, (RelationshipProperty, CompositeProperty)):\n elem = orm_descriptor.property\n else:\n raise NotImplementedError(f\"Unhandled property type: '{orm_descriptor.property}'\")\n@@ -189,6 +191,13 @@\n # the same hybrid property descriptor can be included in `all_orm_descriptors` multiple times, once\n # for each method name it is bound to. We only need to see it once, so track views of it here.\n seen_hybrid_descriptors: set[hybrid_property] = set()\n+ skipped_columns: set[str] = set()\n+ for composite_property in mapper.composites:\n+ for attr in composite_property.attrs:\n+ if isinstance(attr, (MappedColumn, Column)):\n+ skipped_columns.add(attr.name)\n+ elif isinstance(attr, str):\n+ skipped_columns.add(attr)\n for key, orm_descriptor in mapper.all_orm_descriptors.items():\n if isinstance(orm_descriptor, hybrid_property):\n if orm_descriptor in seen_hybrid_descriptors:\n@@ -196,6 +205,9 @@\n \n seen_hybrid_descriptors.add(orm_descriptor)\n \n+ if key in skipped_columns:\n+ continue\n+\n yield from cls.handle_orm_descriptor(\n orm_descriptor.extension_type, key, orm_descriptor, model_type_hints, model_name\n )\n@@ -262,6 +274,9 @@\n \n return FieldDefinition.from_annotation(elem.mapper.class_)\n \n+ if isinstance(elem, CompositeProperty):\n+ return FieldDefinition.from_annotation(elem.composite_class)\n+\n raise ImproperlyConfiguredException(\n f\"Unable to parse type from element '{elem}'. Consider adding a type hint.\",\n )\n", "issue": "StaticFilesConfig and virtual directories\nI'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. \r\n\r\nThis is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.\r\n\r\nhttps://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom dataclasses import replace\nfrom functools import singledispatchmethod\nfrom typing import TYPE_CHECKING, Collection, Generic, Optional, TypeVar\n\nfrom sqlalchemy import Column, inspect, orm, sql\nfrom sqlalchemy.ext.associationproxy import AssociationProxy, AssociationProxyExtensionType\nfrom sqlalchemy.ext.hybrid import HybridExtensionType, hybrid_property\nfrom sqlalchemy.orm import (\n ColumnProperty,\n DeclarativeBase,\n InspectionAttr,\n Mapped,\n NotExtension,\n QueryableAttribute,\n RelationshipDirection,\n RelationshipProperty,\n)\n\nfrom litestar.dto.base_dto import AbstractDTO\nfrom litestar.dto.data_structures import DTOFieldDefinition\nfrom litestar.dto.field import DTO_FIELD_META_KEY, DTOField, Mark\nfrom litestar.exceptions import ImproperlyConfiguredException\nfrom litestar.types.empty import Empty\nfrom litestar.typing import FieldDefinition\nfrom litestar.utils.signature import ParsedSignature\n\nif TYPE_CHECKING:\n from typing import Any, Generator\n\n from typing_extensions import TypeAlias\n\n__all__ = (\"SQLAlchemyDTO\",)\n\nT = TypeVar(\"T\", bound=\"DeclarativeBase | Collection[DeclarativeBase]\")\n\nElementType: TypeAlias = \"Column | RelationshipProperty\"\nSQLA_NS = {**vars(orm), **vars(sql)}\n\n\nclass SQLAlchemyDTO(AbstractDTO[T], Generic[T]):\n \"\"\"Support for domain modelling with SQLAlchemy.\"\"\"\n\n @singledispatchmethod\n @classmethod\n def handle_orm_descriptor(\n cls,\n extension_type: NotExtension | AssociationProxyExtensionType | HybridExtensionType,\n orm_descriptor: InspectionAttr,\n key: str,\n model_type_hints: dict[str, FieldDefinition],\n model_name: str,\n ) -> list[DTOFieldDefinition]:\n raise NotImplementedError(f\"Unsupported extension type: {extension_type}\")\n\n @handle_orm_descriptor.register(NotExtension)\n @classmethod\n def _(\n cls,\n extension_type: NotExtension,\n key: str,\n orm_descriptor: InspectionAttr,\n model_type_hints: dict[str, FieldDefinition],\n model_name: str,\n ) -> list[DTOFieldDefinition]:\n if not isinstance(orm_descriptor, QueryableAttribute):\n raise NotImplementedError(f\"Unexpected descriptor type for '{extension_type}': '{orm_descriptor}'\")\n\n elem: ElementType\n if isinstance(orm_descriptor.property, ColumnProperty):\n if not isinstance(orm_descriptor.property.expression, Column):\n raise NotImplementedError(f\"Expected 'Column', got: '{orm_descriptor.property.expression}'\")\n elem = orm_descriptor.property.expression\n elif isinstance(orm_descriptor.property, RelationshipProperty):\n elem = orm_descriptor.property\n else:\n raise NotImplementedError(f\"Unhandled property type: '{orm_descriptor.property}'\")\n\n default, default_factory = _detect_defaults(elem)\n\n try:\n if (field_definition := model_type_hints[key]).origin is Mapped:\n (field_definition,) = field_definition.inner_types\n else:\n raise NotImplementedError(f\"Expected 'Mapped' origin, got: '{field_definition.origin}'\")\n except KeyError:\n field_definition = parse_type_from_element(elem)\n\n return [\n DTOFieldDefinition.from_field_definition(\n field_definition=replace(\n field_definition,\n name=key,\n default=default,\n ),\n default_factory=default_factory,\n dto_field=elem.info.get(DTO_FIELD_META_KEY, DTOField()),\n model_name=model_name,\n )\n ]\n\n @handle_orm_descriptor.register(AssociationProxyExtensionType)\n @classmethod\n def _(\n cls,\n extension_type: AssociationProxyExtensionType,\n key: str,\n orm_descriptor: InspectionAttr,\n model_type_hints: dict[str, FieldDefinition],\n model_name: str,\n ) -> list[DTOFieldDefinition]:\n if not isinstance(orm_descriptor, AssociationProxy):\n raise NotImplementedError(f\"Unexpected descriptor type '{orm_descriptor}' for '{extension_type}'\")\n\n if (field_definition := model_type_hints[key]).origin is AssociationProxy:\n (field_definition,) = field_definition.inner_types\n else:\n raise NotImplementedError(f\"Expected 'AssociationProxy' origin, got: '{field_definition.origin}'\")\n\n return [\n DTOFieldDefinition.from_field_definition(\n field_definition=replace(\n field_definition,\n name=key,\n default=Empty,\n ),\n default_factory=None,\n dto_field=orm_descriptor.info.get(DTO_FIELD_META_KEY, DTOField(mark=Mark.READ_ONLY)),\n model_name=model_name,\n )\n ]\n\n @handle_orm_descriptor.register(HybridExtensionType)\n @classmethod\n def _(\n cls,\n extension_type: HybridExtensionType,\n key: str,\n orm_descriptor: InspectionAttr,\n model_type_hints: dict[str, FieldDefinition],\n model_name: str,\n ) -> list[DTOFieldDefinition]:\n if not isinstance(orm_descriptor, hybrid_property):\n raise NotImplementedError(f\"Unexpected descriptor type '{orm_descriptor}' for '{extension_type}'\")\n\n getter_sig = ParsedSignature.from_fn(orm_descriptor.fget, {})\n\n field_defs = [\n DTOFieldDefinition.from_field_definition(\n field_definition=replace(\n getter_sig.return_type,\n name=orm_descriptor.__name__,\n default=Empty,\n ),\n default_factory=None,\n dto_field=orm_descriptor.info.get(DTO_FIELD_META_KEY, DTOField(mark=Mark.READ_ONLY)),\n model_name=model_name,\n )\n ]\n\n if orm_descriptor.fset is not None:\n setter_sig = ParsedSignature.from_fn(orm_descriptor.fset, {})\n field_defs.append(\n DTOFieldDefinition.from_field_definition(\n field_definition=replace(\n next(iter(setter_sig.parameters.values())),\n name=orm_descriptor.__name__,\n default=Empty,\n ),\n default_factory=None,\n dto_field=orm_descriptor.info.get(DTO_FIELD_META_KEY, DTOField(mark=Mark.WRITE_ONLY)),\n model_name=model_name,\n )\n )\n\n return field_defs\n\n @classmethod\n def generate_field_definitions(cls, model_type: type[DeclarativeBase]) -> Generator[DTOFieldDefinition, None, None]:\n if (mapper := inspect(model_type)) is None: # pragma: no cover\n raise RuntimeError(\"Unexpected `None` value for mapper.\")\n\n # includes SQLAlchemy names and other mapped class names in the forward reference resolution namespace\n namespace = {**SQLA_NS, **{m.class_.__name__: m.class_ for m in mapper.registry.mappers if m is not mapper}}\n model_type_hints = cls.get_model_type_hints(model_type, namespace=namespace)\n model_name = model_type.__name__\n\n # the same hybrid property descriptor can be included in `all_orm_descriptors` multiple times, once\n # for each method name it is bound to. We only need to see it once, so track views of it here.\n seen_hybrid_descriptors: set[hybrid_property] = set()\n for key, orm_descriptor in mapper.all_orm_descriptors.items():\n if isinstance(orm_descriptor, hybrid_property):\n if orm_descriptor in seen_hybrid_descriptors:\n continue\n\n seen_hybrid_descriptors.add(orm_descriptor)\n\n yield from cls.handle_orm_descriptor(\n orm_descriptor.extension_type, key, orm_descriptor, model_type_hints, model_name\n )\n\n @classmethod\n def detect_nested_field(cls, field_definition: FieldDefinition) -> bool:\n return field_definition.is_subclass_of(DeclarativeBase)\n\n\ndef _detect_defaults(elem: ElementType) -> tuple[Any, Any]:\n default: Any = Empty\n default_factory: Any = None # pyright:ignore\n if sqla_default := getattr(elem, \"default\", None):\n if sqla_default.is_scalar:\n default = sqla_default.arg\n elif sqla_default.is_callable:\n\n def default_factory(d: Any = sqla_default) -> Any:\n return d.arg({})\n\n elif sqla_default.is_sequence or sqla_default.is_sentinel:\n # SQLAlchemy sequences represent server side defaults\n # so we cannot infer a reasonable default value for\n # them on the client side\n pass\n else:\n raise ValueError(\"Unexpected default type\")\n else:\n if (\n isinstance(elem, RelationshipProperty)\n and detect_nullable_relationship(elem)\n or getattr(elem, \"nullable\", False)\n ):\n default = None\n\n return default, default_factory\n\n\ndef parse_type_from_element(elem: ElementType) -> FieldDefinition:\n \"\"\"Parses a type from a SQLAlchemy element.\n\n Args:\n elem: The SQLAlchemy element to parse.\n\n Returns:\n FieldDefinition: The parsed type.\n\n Raises:\n ImproperlyConfiguredException: If the type cannot be parsed.\n \"\"\"\n\n if isinstance(elem, Column):\n if elem.nullable:\n return FieldDefinition.from_annotation(Optional[elem.type.python_type])\n return FieldDefinition.from_annotation(elem.type.python_type)\n\n if isinstance(elem, RelationshipProperty):\n if elem.direction in (RelationshipDirection.ONETOMANY, RelationshipDirection.MANYTOMANY):\n collection_type = FieldDefinition.from_annotation(elem.collection_class or list)\n return FieldDefinition.from_annotation(collection_type.safe_generic_origin[elem.mapper.class_])\n\n if detect_nullable_relationship(elem):\n return FieldDefinition.from_annotation(Optional[elem.mapper.class_])\n\n return FieldDefinition.from_annotation(elem.mapper.class_)\n\n raise ImproperlyConfiguredException(\n f\"Unable to parse type from element '{elem}'. Consider adding a type hint.\",\n )\n\n\ndef detect_nullable_relationship(elem: RelationshipProperty) -> bool:\n \"\"\"Detects if a relationship is nullable.\n\n This attempts to decide if we should allow a ``None`` default value for a relationship by looking at the\n foreign key fields. If all foreign key fields are nullable, then we allow a ``None`` default value.\n\n Args:\n elem: The relationship to check.\n\n Returns:\n bool: ``True`` if the relationship is nullable, ``False`` otherwise.\n \"\"\"\n return elem.direction == RelationshipDirection.MANYTOONE and all(c.nullable for c in elem.local_columns)\n", "path": "litestar/contrib/sqlalchemy/dto.py"}]}
3,601
631
gh_patches_debug_3104
rasdani/github-patches
git_diff
certbot__certbot-2004
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> don't add includeSubDomains for Strict-Transport-Security header Hi, I think the letsencrypt client shouldn’t add `includeSubDomains` to the Strict-Transport-Security header by default. If you let the letsencrypt client modify the configuration for a webserver which hosts example.com, this would make it only possible to reach subdomain hosts (foo.example.com) with https. If you forgot only one webserver which serves a subdomain by http only, you can't reach this one any more. And as browsers actually don't have an option to clear the HSTS cache, It won't help you to remove the includeSubDomains later from the webserver config. includeSubDomains is something, that an admin should only add if he truly knows, that all subdomains are reachable by https. So I think this should never be added by an automatic tool, like the letsencrypt client. RFC 6797 section 11.1 [1] also describes some implications of includeSubDomains Chris [1] https://tools.ietf.org/html/rfc6797#section-11.4 </issue> <code> [start of letsencrypt-apache/letsencrypt_apache/constants.py] 1 """Apache plugin constants.""" 2 import pkg_resources 3 4 5 CLI_DEFAULTS = dict( 6 server_root="/etc/apache2", 7 ctl="apache2ctl", 8 enmod="a2enmod", 9 dismod="a2dismod", 10 le_vhost_ext="-le-ssl.conf", 11 ) 12 """CLI defaults.""" 13 14 MOD_SSL_CONF_DEST = "options-ssl-apache.conf" 15 """Name of the mod_ssl config file as saved in `IConfig.config_dir`.""" 16 17 MOD_SSL_CONF_SRC = pkg_resources.resource_filename( 18 "letsencrypt_apache", "options-ssl-apache.conf") 19 """Path to the Apache mod_ssl config file found in the Let's Encrypt 20 distribution.""" 21 22 AUGEAS_LENS_DIR = pkg_resources.resource_filename( 23 "letsencrypt_apache", "augeas_lens") 24 """Path to the Augeas lens directory""" 25 26 REWRITE_HTTPS_ARGS = [ 27 "^", "https://%{SERVER_NAME}%{REQUEST_URI}", "[L,QSA,R=permanent]"] 28 """Apache version<2.3.9 rewrite rule arguments used for redirections to https vhost""" 29 30 REWRITE_HTTPS_ARGS_WITH_END = [ 31 "^", "https://%{SERVER_NAME}%{REQUEST_URI}", "[END,QSA,R=permanent]"] 32 """Apache version >= 2.3.9 rewrite rule arguments used for redirections to 33 https vhost""" 34 35 HSTS_ARGS = ["always", "set", "Strict-Transport-Security", 36 "\"max-age=31536000; includeSubDomains\""] 37 """Apache header arguments for HSTS""" 38 39 UIR_ARGS = ["always", "set", "Content-Security-Policy", 40 "upgrade-insecure-requests"] 41 42 HEADER_ARGS = {"Strict-Transport-Security": HSTS_ARGS, 43 "Upgrade-Insecure-Requests": UIR_ARGS} 44 45 [end of letsencrypt-apache/letsencrypt_apache/constants.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/letsencrypt-apache/letsencrypt_apache/constants.py b/letsencrypt-apache/letsencrypt_apache/constants.py --- a/letsencrypt-apache/letsencrypt_apache/constants.py +++ b/letsencrypt-apache/letsencrypt_apache/constants.py @@ -33,7 +33,7 @@ https vhost""" HSTS_ARGS = ["always", "set", "Strict-Transport-Security", - "\"max-age=31536000; includeSubDomains\""] + "\"max-age=31536000\""] """Apache header arguments for HSTS""" UIR_ARGS = ["always", "set", "Content-Security-Policy",
{"golden_diff": "diff --git a/letsencrypt-apache/letsencrypt_apache/constants.py b/letsencrypt-apache/letsencrypt_apache/constants.py\n--- a/letsencrypt-apache/letsencrypt_apache/constants.py\n+++ b/letsencrypt-apache/letsencrypt_apache/constants.py\n@@ -33,7 +33,7 @@\n https vhost\"\"\"\n \n HSTS_ARGS = [\"always\", \"set\", \"Strict-Transport-Security\",\n- \"\\\"max-age=31536000; includeSubDomains\\\"\"]\n+ \"\\\"max-age=31536000\\\"\"]\n \"\"\"Apache header arguments for HSTS\"\"\"\n \n UIR_ARGS = [\"always\", \"set\", \"Content-Security-Policy\",\n", "issue": "don't add includeSubDomains for Strict-Transport-Security header\nHi,\n\nI think the letsencrypt client shouldn\u2019t add `includeSubDomains` to the Strict-Transport-Security header by default.\n\nIf you let the letsencrypt client modify the configuration for a webserver which hosts example.com, this would make it only possible to reach subdomain hosts (foo.example.com) with https. If you forgot only one webserver which serves a subdomain by http only, you can't reach this one any more.\nAnd as browsers actually don't have an option to clear the HSTS cache, It won't help you to remove the includeSubDomains later from the webserver config.\n\nincludeSubDomains is something, that an admin should only add if he truly knows, that all subdomains are reachable by https. So I think this should never be added by an automatic tool, like the letsencrypt client.\n\nRFC 6797 section 11.1 [1] also describes some implications of includeSubDomains\n\nChris\n\n[1] https://tools.ietf.org/html/rfc6797#section-11.4\n\n", "before_files": [{"content": "\"\"\"Apache plugin constants.\"\"\"\nimport pkg_resources\n\n\nCLI_DEFAULTS = dict(\n server_root=\"/etc/apache2\",\n ctl=\"apache2ctl\",\n enmod=\"a2enmod\",\n dismod=\"a2dismod\",\n le_vhost_ext=\"-le-ssl.conf\",\n)\n\"\"\"CLI defaults.\"\"\"\n\nMOD_SSL_CONF_DEST = \"options-ssl-apache.conf\"\n\"\"\"Name of the mod_ssl config file as saved in `IConfig.config_dir`.\"\"\"\n\nMOD_SSL_CONF_SRC = pkg_resources.resource_filename(\n \"letsencrypt_apache\", \"options-ssl-apache.conf\")\n\"\"\"Path to the Apache mod_ssl config file found in the Let's Encrypt\ndistribution.\"\"\"\n\nAUGEAS_LENS_DIR = pkg_resources.resource_filename(\n \"letsencrypt_apache\", \"augeas_lens\")\n\"\"\"Path to the Augeas lens directory\"\"\"\n\nREWRITE_HTTPS_ARGS = [\n \"^\", \"https://%{SERVER_NAME}%{REQUEST_URI}\", \"[L,QSA,R=permanent]\"]\n\"\"\"Apache version<2.3.9 rewrite rule arguments used for redirections to https vhost\"\"\"\n\nREWRITE_HTTPS_ARGS_WITH_END = [\n \"^\", \"https://%{SERVER_NAME}%{REQUEST_URI}\", \"[END,QSA,R=permanent]\"]\n\"\"\"Apache version >= 2.3.9 rewrite rule arguments used for redirections to\n https vhost\"\"\"\n\nHSTS_ARGS = [\"always\", \"set\", \"Strict-Transport-Security\",\n \"\\\"max-age=31536000; includeSubDomains\\\"\"]\n\"\"\"Apache header arguments for HSTS\"\"\"\n\nUIR_ARGS = [\"always\", \"set\", \"Content-Security-Policy\",\n \"upgrade-insecure-requests\"]\n\nHEADER_ARGS = {\"Strict-Transport-Security\": HSTS_ARGS,\n \"Upgrade-Insecure-Requests\": UIR_ARGS}\n\n", "path": "letsencrypt-apache/letsencrypt_apache/constants.py"}]}
1,246
157
gh_patches_debug_41488
rasdani/github-patches
git_diff
chainer__chainer-5029
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add requirements check for ideep4py Add requirements check for iDeep4py as we do in cupy. Refs #4933 </issue> <code> [start of chainer/_runtime_info.py] 1 import platform 2 import sys 3 4 import numpy 5 import six 6 7 import chainer 8 from chainer.backends import cuda 9 10 11 class _RuntimeInfo(object): 12 13 chainer_version = None 14 numpy_version = None 15 cuda_info = None 16 17 def __init__(self): 18 self.chainer_version = chainer.__version__ 19 self.numpy_version = numpy.__version__ 20 self.platform_version = platform.platform() 21 if cuda.available: 22 self.cuda_info = cuda.cupyx.get_runtime_info() 23 else: 24 self.cuda_info = None 25 26 def __str__(self): 27 s = six.StringIO() 28 s.write('''Platform: {}\n'''.format(self.platform_version)) 29 s.write('''Chainer: {}\n'''.format(self.chainer_version)) 30 s.write('''NumPy: {}\n'''.format(self.numpy_version)) 31 if self.cuda_info is None: 32 s.write('''CuPy: Not Available\n''') 33 else: 34 s.write('''CuPy:\n''') 35 for line in str(self.cuda_info).splitlines(): 36 s.write(''' {}\n'''.format(line)) 37 return s.getvalue() 38 39 40 def get_runtime_info(): 41 return _RuntimeInfo() 42 43 44 def print_runtime_info(out=None): 45 if out is None: 46 out = sys.stdout 47 out.write(str(get_runtime_info())) 48 if hasattr(out, 'flush'): 49 out.flush() 50 [end of chainer/_runtime_info.py] [start of chainer/backends/intel64.py] 1 from __future__ import absolute_import 2 3 import chainer 4 from chainer.configuration import config 5 6 7 _ideep_version = None 8 _error = None 9 10 try: 11 import ideep4py as ideep # NOQA 12 from ideep4py import mdarray # NOQA 13 _ideep_version = 0 14 except ImportError as e: 15 _error = e 16 17 class mdarray(object): 18 pass # for type testing 19 20 21 # ------------------------------------------------------------------------------ 22 # ideep configuration 23 # ------------------------------------------------------------------------------ 24 _SHOULD_USE_IDEEP = { 25 '==always': {'always': True, 'auto': False, 'never': False}, 26 '>=auto': {'always': True, 'auto': True, 'never': False}, 27 } 28 29 30 def is_ideep_available(): 31 """Returns if iDeep is available. 32 33 Returns: 34 bool: ``True`` if iDeep is installed. 35 """ 36 37 return _ideep_version is not None 38 39 40 def check_ideep_available(): 41 """Checks if iDeep is available. 42 43 When iDeep is correctly set up, nothing happens. 44 Otherwise it raises ``RuntimeError``. 45 """ 46 if _ideep_version is None: 47 # If the error is missing shared object, append a message to 48 # redirect to the ideep website. 49 msg = str(_error) 50 if 'cannot open shared object file' in msg: 51 msg += ('\n\nEnsure iDeep requirements are satisfied: ' 52 'https://github.com/intel/ideep') 53 raise RuntimeError( 54 'iDeep is not available.\n' 55 'Reason: {}: {}'.format(type(_error).__name__, msg)) 56 57 58 def should_use_ideep(level): 59 """Determines if we should use iDeep. 60 61 This function checks ``chainer.config.use_ideep`` and availability 62 of ``ideep4py`` package. 63 64 Args: 65 level (str): iDeep use level. It must be either ``'==always'`` or 66 ``'>=auto'``. ``'==always'`` indicates that the ``use_ideep`` 67 config must be ``'always'`` to use iDeep. 68 69 Returns: 70 bool: ``True`` if the caller should use iDeep. 71 72 """ 73 if _ideep_version is None: 74 return False 75 76 # TODO(niboshi): 77 # Add lowest_version argument and compare with ideep version. 78 # Currently ideep does not provide a way to retrieve its version. 79 80 if level not in _SHOULD_USE_IDEEP: 81 raise ValueError('invalid iDeep use level: %s ' 82 '(must be either of "==always" or ">=auto")' % 83 repr(level)) 84 85 flags = _SHOULD_USE_IDEEP[level] 86 87 use_ideep = config.use_ideep 88 if use_ideep not in flags: 89 raise ValueError('invalid use_ideep configuration: %s ' 90 '(must be either of "always", "auto", or "never")' % 91 repr(use_ideep)) 92 return flags[use_ideep] 93 94 95 def inputs_all_ready(inputs, supported_ndim=(2, 4)): 96 """Checks if input arrays are supported for an iDeep primitive. 97 98 Before calling an iDeep primitive (e.g., ``ideep4py.linear.Forward``), you 99 need to make sure that all input arrays are ready for the primitive by 100 calling this function. 101 Information to be checked includes array types, dimesions and data types. 102 The function checks ``inputs`` info and ``supported_ndim``. 103 104 Inputs to be tested can be any of ``Variable``, ``numpy.ndarray`` or 105 ``ideep4py.mdarray``. However, all inputs to iDeep primitives must be 106 ``ideep4py.mdarray``. Callers of iDeep primitives are responsible of 107 converting all inputs to ``ideep4py.mdarray``. 108 109 Args: 110 inputs (sequence of arrays or variables): 111 Inputs to be checked. 112 supported_ndim (tuple of ints): 113 Supported ndim values for the iDeep primitive. 114 115 Returns: 116 bool: ``True`` if all conditions meet. 117 118 """ 119 120 def _is_supported_array_type(a): 121 return isinstance(a, ideep.mdarray) or ideep.check_type([a]) 122 123 if _ideep_version is None: 124 return False 125 126 inputs = [x.data if isinstance(x, chainer.variable.Variable) 127 else x for x in inputs] 128 129 return (ideep.check_ndim(inputs, supported_ndim) 130 and all([_is_supported_array_type(a) for a in inputs])) 131 [end of chainer/backends/intel64.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/chainer/_runtime_info.py b/chainer/_runtime_info.py --- a/chainer/_runtime_info.py +++ b/chainer/_runtime_info.py @@ -6,6 +6,7 @@ import chainer from chainer.backends import cuda +from chainer.backends import intel64 class _RuntimeInfo(object): @@ -13,6 +14,7 @@ chainer_version = None numpy_version = None cuda_info = None + ideep_version = None def __init__(self): self.chainer_version = chainer.__version__ @@ -22,6 +24,10 @@ self.cuda_info = cuda.cupyx.get_runtime_info() else: self.cuda_info = None + if intel64.is_ideep_available(): + self.ideep_version = intel64.ideep.__version__ + else: + self.ideep_version = None def __str__(self): s = six.StringIO() @@ -34,6 +40,10 @@ s.write('''CuPy:\n''') for line in str(self.cuda_info).splitlines(): s.write(''' {}\n'''.format(line)) + if self.ideep_version is None: + s.write('''iDeep: Not Available\n''') + else: + s.write('''iDeep: {}\n'''.format(self.ideep_version)) return s.getvalue() diff --git a/chainer/backends/intel64.py b/chainer/backends/intel64.py --- a/chainer/backends/intel64.py +++ b/chainer/backends/intel64.py @@ -10,9 +10,10 @@ try: import ideep4py as ideep # NOQA from ideep4py import mdarray # NOQA - _ideep_version = 0 + _ideep_version = 2 if hasattr(ideep, '__version__') else 1 except ImportError as e: _error = e + _ideep_version = None class mdarray(object): pass # for type testing @@ -31,10 +32,9 @@ """Returns if iDeep is available. Returns: - bool: ``True`` if iDeep is installed. + bool: ``True`` if the supported version of iDeep is installed. """ - - return _ideep_version is not None + return _ideep_version is not None and _ideep_version == 2 def check_ideep_available(): @@ -53,6 +53,10 @@ raise RuntimeError( 'iDeep is not available.\n' 'Reason: {}: {}'.format(type(_error).__name__, msg)) + elif _ideep_version != 2: + raise RuntimeError( + 'iDeep is not available.\n' + 'Reason: Unsupported iDeep version ({})'.format(_ideep_version)) def should_use_ideep(level): @@ -70,7 +74,7 @@ bool: ``True`` if the caller should use iDeep. """ - if _ideep_version is None: + if not is_ideep_available(): return False # TODO(niboshi): @@ -120,7 +124,7 @@ def _is_supported_array_type(a): return isinstance(a, ideep.mdarray) or ideep.check_type([a]) - if _ideep_version is None: + if not is_ideep_available(): return False inputs = [x.data if isinstance(x, chainer.variable.Variable)
{"golden_diff": "diff --git a/chainer/_runtime_info.py b/chainer/_runtime_info.py\n--- a/chainer/_runtime_info.py\n+++ b/chainer/_runtime_info.py\n@@ -6,6 +6,7 @@\n \n import chainer\n from chainer.backends import cuda\n+from chainer.backends import intel64\n \n \n class _RuntimeInfo(object):\n@@ -13,6 +14,7 @@\n chainer_version = None\n numpy_version = None\n cuda_info = None\n+ ideep_version = None\n \n def __init__(self):\n self.chainer_version = chainer.__version__\n@@ -22,6 +24,10 @@\n self.cuda_info = cuda.cupyx.get_runtime_info()\n else:\n self.cuda_info = None\n+ if intel64.is_ideep_available():\n+ self.ideep_version = intel64.ideep.__version__\n+ else:\n+ self.ideep_version = None\n \n def __str__(self):\n s = six.StringIO()\n@@ -34,6 +40,10 @@\n s.write('''CuPy:\\n''')\n for line in str(self.cuda_info).splitlines():\n s.write(''' {}\\n'''.format(line))\n+ if self.ideep_version is None:\n+ s.write('''iDeep: Not Available\\n''')\n+ else:\n+ s.write('''iDeep: {}\\n'''.format(self.ideep_version))\n return s.getvalue()\n \n \ndiff --git a/chainer/backends/intel64.py b/chainer/backends/intel64.py\n--- a/chainer/backends/intel64.py\n+++ b/chainer/backends/intel64.py\n@@ -10,9 +10,10 @@\n try:\n import ideep4py as ideep # NOQA\n from ideep4py import mdarray # NOQA\n- _ideep_version = 0\n+ _ideep_version = 2 if hasattr(ideep, '__version__') else 1\n except ImportError as e:\n _error = e\n+ _ideep_version = None\n \n class mdarray(object):\n pass # for type testing\n@@ -31,10 +32,9 @@\n \"\"\"Returns if iDeep is available.\n \n Returns:\n- bool: ``True`` if iDeep is installed.\n+ bool: ``True`` if the supported version of iDeep is installed.\n \"\"\"\n-\n- return _ideep_version is not None\n+ return _ideep_version is not None and _ideep_version == 2\n \n \n def check_ideep_available():\n@@ -53,6 +53,10 @@\n raise RuntimeError(\n 'iDeep is not available.\\n'\n 'Reason: {}: {}'.format(type(_error).__name__, msg))\n+ elif _ideep_version != 2:\n+ raise RuntimeError(\n+ 'iDeep is not available.\\n'\n+ 'Reason: Unsupported iDeep version ({})'.format(_ideep_version))\n \n \n def should_use_ideep(level):\n@@ -70,7 +74,7 @@\n bool: ``True`` if the caller should use iDeep.\n \n \"\"\"\n- if _ideep_version is None:\n+ if not is_ideep_available():\n return False\n \n # TODO(niboshi):\n@@ -120,7 +124,7 @@\n def _is_supported_array_type(a):\n return isinstance(a, ideep.mdarray) or ideep.check_type([a])\n \n- if _ideep_version is None:\n+ if not is_ideep_available():\n return False\n \n inputs = [x.data if isinstance(x, chainer.variable.Variable)\n", "issue": "Add requirements check for ideep4py\nAdd requirements check for iDeep4py as we do in cupy.\r\nRefs #4933\n", "before_files": [{"content": "import platform\nimport sys\n\nimport numpy\nimport six\n\nimport chainer\nfrom chainer.backends import cuda\n\n\nclass _RuntimeInfo(object):\n\n chainer_version = None\n numpy_version = None\n cuda_info = None\n\n def __init__(self):\n self.chainer_version = chainer.__version__\n self.numpy_version = numpy.__version__\n self.platform_version = platform.platform()\n if cuda.available:\n self.cuda_info = cuda.cupyx.get_runtime_info()\n else:\n self.cuda_info = None\n\n def __str__(self):\n s = six.StringIO()\n s.write('''Platform: {}\\n'''.format(self.platform_version))\n s.write('''Chainer: {}\\n'''.format(self.chainer_version))\n s.write('''NumPy: {}\\n'''.format(self.numpy_version))\n if self.cuda_info is None:\n s.write('''CuPy: Not Available\\n''')\n else:\n s.write('''CuPy:\\n''')\n for line in str(self.cuda_info).splitlines():\n s.write(''' {}\\n'''.format(line))\n return s.getvalue()\n\n\ndef get_runtime_info():\n return _RuntimeInfo()\n\n\ndef print_runtime_info(out=None):\n if out is None:\n out = sys.stdout\n out.write(str(get_runtime_info()))\n if hasattr(out, 'flush'):\n out.flush()\n", "path": "chainer/_runtime_info.py"}, {"content": "from __future__ import absolute_import\n\nimport chainer\nfrom chainer.configuration import config\n\n\n_ideep_version = None\n_error = None\n\ntry:\n import ideep4py as ideep # NOQA\n from ideep4py import mdarray # NOQA\n _ideep_version = 0\nexcept ImportError as e:\n _error = e\n\n class mdarray(object):\n pass # for type testing\n\n\n# ------------------------------------------------------------------------------\n# ideep configuration\n# ------------------------------------------------------------------------------\n_SHOULD_USE_IDEEP = {\n '==always': {'always': True, 'auto': False, 'never': False},\n '>=auto': {'always': True, 'auto': True, 'never': False},\n}\n\n\ndef is_ideep_available():\n \"\"\"Returns if iDeep is available.\n\n Returns:\n bool: ``True`` if iDeep is installed.\n \"\"\"\n\n return _ideep_version is not None\n\n\ndef check_ideep_available():\n \"\"\"Checks if iDeep is available.\n\n When iDeep is correctly set up, nothing happens.\n Otherwise it raises ``RuntimeError``.\n \"\"\"\n if _ideep_version is None:\n # If the error is missing shared object, append a message to\n # redirect to the ideep website.\n msg = str(_error)\n if 'cannot open shared object file' in msg:\n msg += ('\\n\\nEnsure iDeep requirements are satisfied: '\n 'https://github.com/intel/ideep')\n raise RuntimeError(\n 'iDeep is not available.\\n'\n 'Reason: {}: {}'.format(type(_error).__name__, msg))\n\n\ndef should_use_ideep(level):\n \"\"\"Determines if we should use iDeep.\n\n This function checks ``chainer.config.use_ideep`` and availability\n of ``ideep4py`` package.\n\n Args:\n level (str): iDeep use level. It must be either ``'==always'`` or\n ``'>=auto'``. ``'==always'`` indicates that the ``use_ideep``\n config must be ``'always'`` to use iDeep.\n\n Returns:\n bool: ``True`` if the caller should use iDeep.\n\n \"\"\"\n if _ideep_version is None:\n return False\n\n # TODO(niboshi):\n # Add lowest_version argument and compare with ideep version.\n # Currently ideep does not provide a way to retrieve its version.\n\n if level not in _SHOULD_USE_IDEEP:\n raise ValueError('invalid iDeep use level: %s '\n '(must be either of \"==always\" or \">=auto\")' %\n repr(level))\n\n flags = _SHOULD_USE_IDEEP[level]\n\n use_ideep = config.use_ideep\n if use_ideep not in flags:\n raise ValueError('invalid use_ideep configuration: %s '\n '(must be either of \"always\", \"auto\", or \"never\")' %\n repr(use_ideep))\n return flags[use_ideep]\n\n\ndef inputs_all_ready(inputs, supported_ndim=(2, 4)):\n \"\"\"Checks if input arrays are supported for an iDeep primitive.\n\n Before calling an iDeep primitive (e.g., ``ideep4py.linear.Forward``), you\n need to make sure that all input arrays are ready for the primitive by\n calling this function.\n Information to be checked includes array types, dimesions and data types.\n The function checks ``inputs`` info and ``supported_ndim``.\n\n Inputs to be tested can be any of ``Variable``, ``numpy.ndarray`` or\n ``ideep4py.mdarray``. However, all inputs to iDeep primitives must be\n ``ideep4py.mdarray``. Callers of iDeep primitives are responsible of\n converting all inputs to ``ideep4py.mdarray``.\n\n Args:\n inputs (sequence of arrays or variables):\n Inputs to be checked.\n supported_ndim (tuple of ints):\n Supported ndim values for the iDeep primitive.\n\n Returns:\n bool: ``True`` if all conditions meet.\n\n \"\"\"\n\n def _is_supported_array_type(a):\n return isinstance(a, ideep.mdarray) or ideep.check_type([a])\n\n if _ideep_version is None:\n return False\n\n inputs = [x.data if isinstance(x, chainer.variable.Variable)\n else x for x in inputs]\n\n return (ideep.check_ndim(inputs, supported_ndim)\n and all([_is_supported_array_type(a) for a in inputs]))\n", "path": "chainer/backends/intel64.py"}]}
2,270
812
gh_patches_debug_21326
rasdani/github-patches
git_diff
hylang__hy-1266
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Single quote crashes hy2py ## x.hy ``` ' ``` ## `hy2py x.hy` ``` Traceback (most recent call last): File "<string>", line 1, in <module> File "hy/cmdline.py", line 406, in hy2py_main if stdin_text is None File "hy/cmdline.py", line 186, in pretty_error print(e, file=sys.stderr) File "hy/lex/exceptions.py", line 43, in __str__ source = self.source.split("\n") AttributeError: 'NoneType' object has no attribute 'split' ``` </issue> <code> [start of hy/lex/exceptions.py] 1 # Copyright (c) 2013 Nicolas Dandrimont <[email protected]> 2 # Copyright (c) 2013 Bob Tolbert <[email protected]> 3 # 4 # Permission is hereby granted, free of charge, to any person obtaining a 5 # copy of this software and associated documentation files (the "Software"), 6 # to deal in the Software without restriction, including without limitation 7 # the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 # and/or sell copies of the Software, and to permit persons to whom the 9 # Software is furnished to do so, subject to the following conditions: 10 # 11 # The above copyright notice and this permission notice shall be included in 12 # all copies or substantial portions of the Software. 13 # 14 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 19 # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 20 # DEALINGS IN THE SOFTWARE. 21 22 from hy.errors import HyError 23 24 25 class LexException(HyError): 26 """Error during the Lexing of a Hython expression.""" 27 def __init__(self, message, lineno, colno): 28 super(LexException, self).__init__(message) 29 self.message = message 30 self.lineno = lineno 31 self.colno = colno 32 self.source = None 33 self.filename = '<stdin>' 34 35 def __str__(self): 36 from hy.errors import colored 37 38 line = self.lineno 39 start = self.colno 40 41 result = "" 42 43 source = self.source.split("\n") 44 45 if line > 0 and start > 0: 46 result += ' File "%s", line %d, column %d\n\n' % (self.filename, 47 line, 48 start) 49 50 if len(self.source) > 0: 51 source_line = source[line-1] 52 else: 53 source_line = "" 54 55 result += ' %s\n' % colored.red(source_line) 56 result += ' %s%s\n' % (' '*(start-1), colored.green('^')) 57 58 result += colored.yellow("LexException: %s\n\n" % self.message) 59 60 return result 61 62 63 class PrematureEndOfInput(LexException): 64 """We got a premature end of input""" 65 def __init__(self, message): 66 super(PrematureEndOfInput, self).__init__(message, -1, -1) 67 [end of hy/lex/exceptions.py] [start of hy/lex/__init__.py] 1 # Copyright (c) 2013 Paul Tagliamonte <[email protected]> 2 # 3 # Permission is hereby granted, free of charge, to any person obtaining a 4 # copy of this software and associated documentation files (the "Software"), 5 # to deal in the Software without restriction, including without limitation 6 # the rights to use, copy, modify, merge, publish, distribute, sublicense, 7 # and/or sell copies of the Software, and to permit persons to whom the 8 # Software is furnished to do so, subject to the following conditions: 9 # 10 # The above copyright notice and this permission notice shall be included in 11 # all copies or substantial portions of the Software. 12 # 13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 16 # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 18 # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 19 # DEALINGS IN THE SOFTWARE. 20 21 from rply.errors import LexingError 22 23 from hy.lex.exceptions import LexException, PrematureEndOfInput # NOQA 24 from hy.lex.lexer import lexer 25 from hy.lex.parser import parser 26 27 28 def tokenize(buf): 29 """ 30 Tokenize a Lisp file or string buffer into internal Hy objects. 31 """ 32 try: 33 return parser.parse(lexer.lex(buf)) 34 except LexingError as e: 35 pos = e.getsourcepos() 36 raise LexException("Could not identify the next token.", 37 pos.lineno, pos.colno) 38 except LexException as e: 39 if e.source is None: 40 e.source = buf 41 raise 42 [end of hy/lex/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/hy/lex/__init__.py b/hy/lex/__init__.py --- a/hy/lex/__init__.py +++ b/hy/lex/__init__.py @@ -34,7 +34,7 @@ except LexingError as e: pos = e.getsourcepos() raise LexException("Could not identify the next token.", - pos.lineno, pos.colno) + pos.lineno, pos.colno, buf) except LexException as e: if e.source is None: e.source = buf diff --git a/hy/lex/exceptions.py b/hy/lex/exceptions.py --- a/hy/lex/exceptions.py +++ b/hy/lex/exceptions.py @@ -24,12 +24,12 @@ class LexException(HyError): """Error during the Lexing of a Hython expression.""" - def __init__(self, message, lineno, colno): + def __init__(self, message, lineno, colno, source=None): super(LexException, self).__init__(message) self.message = message self.lineno = lineno self.colno = colno - self.source = None + self.source = source self.filename = '<stdin>' def __str__(self):
{"golden_diff": "diff --git a/hy/lex/__init__.py b/hy/lex/__init__.py\n--- a/hy/lex/__init__.py\n+++ b/hy/lex/__init__.py\n@@ -34,7 +34,7 @@\n except LexingError as e:\n pos = e.getsourcepos()\n raise LexException(\"Could not identify the next token.\",\n- pos.lineno, pos.colno)\n+ pos.lineno, pos.colno, buf)\n except LexException as e:\n if e.source is None:\n e.source = buf\ndiff --git a/hy/lex/exceptions.py b/hy/lex/exceptions.py\n--- a/hy/lex/exceptions.py\n+++ b/hy/lex/exceptions.py\n@@ -24,12 +24,12 @@\n \n class LexException(HyError):\n \"\"\"Error during the Lexing of a Hython expression.\"\"\"\n- def __init__(self, message, lineno, colno):\n+ def __init__(self, message, lineno, colno, source=None):\n super(LexException, self).__init__(message)\n self.message = message\n self.lineno = lineno\n self.colno = colno\n- self.source = None\n+ self.source = source\n self.filename = '<stdin>'\n \n def __str__(self):\n", "issue": "Single quote crashes hy2py\n## x.hy\r\n\r\n```\r\n'\r\n```\r\n\r\n## `hy2py x.hy`\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"<string>\", line 1, in <module>\r\n File \"hy/cmdline.py\", line 406, in hy2py_main\r\n if stdin_text is None\r\n File \"hy/cmdline.py\", line 186, in pretty_error\r\n print(e, file=sys.stderr)\r\n File \"hy/lex/exceptions.py\", line 43, in __str__\r\n source = self.source.split(\"\\n\")\r\nAttributeError: 'NoneType' object has no attribute 'split'\r\n```\n", "before_files": [{"content": "# Copyright (c) 2013 Nicolas Dandrimont <[email protected]>\n# Copyright (c) 2013 Bob Tolbert <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nfrom hy.errors import HyError\n\n\nclass LexException(HyError):\n \"\"\"Error during the Lexing of a Hython expression.\"\"\"\n def __init__(self, message, lineno, colno):\n super(LexException, self).__init__(message)\n self.message = message\n self.lineno = lineno\n self.colno = colno\n self.source = None\n self.filename = '<stdin>'\n\n def __str__(self):\n from hy.errors import colored\n\n line = self.lineno\n start = self.colno\n\n result = \"\"\n\n source = self.source.split(\"\\n\")\n\n if line > 0 and start > 0:\n result += ' File \"%s\", line %d, column %d\\n\\n' % (self.filename,\n line,\n start)\n\n if len(self.source) > 0:\n source_line = source[line-1]\n else:\n source_line = \"\"\n\n result += ' %s\\n' % colored.red(source_line)\n result += ' %s%s\\n' % (' '*(start-1), colored.green('^'))\n\n result += colored.yellow(\"LexException: %s\\n\\n\" % self.message)\n\n return result\n\n\nclass PrematureEndOfInput(LexException):\n \"\"\"We got a premature end of input\"\"\"\n def __init__(self, message):\n super(PrematureEndOfInput, self).__init__(message, -1, -1)\n", "path": "hy/lex/exceptions.py"}, {"content": "# Copyright (c) 2013 Paul Tagliamonte <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nfrom rply.errors import LexingError\n\nfrom hy.lex.exceptions import LexException, PrematureEndOfInput # NOQA\nfrom hy.lex.lexer import lexer\nfrom hy.lex.parser import parser\n\n\ndef tokenize(buf):\n \"\"\"\n Tokenize a Lisp file or string buffer into internal Hy objects.\n \"\"\"\n try:\n return parser.parse(lexer.lex(buf))\n except LexingError as e:\n pos = e.getsourcepos()\n raise LexException(\"Could not identify the next token.\",\n pos.lineno, pos.colno)\n except LexException as e:\n if e.source is None:\n e.source = buf\n raise\n", "path": "hy/lex/__init__.py"}]}
1,873
296
gh_patches_debug_2702
rasdani/github-patches
git_diff
readthedocs__readthedocs.org-4811
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Delete untracked tags on fetch step Currently, if the user deletes a tag, it needs to wipe the environment for this change be reflected in their version list. There are some solutions to delete untracked tags (require more than 2 commands). But I found that the newest version of git has the `--prune-tags` option, which is used as `git fetch --prune --prune-tags` (`git >2.17`). We need to update git on the servers (we use 2.7.4) and change the fetch command. Or we can find a way to wipe the environment if we detect something like this case. Raised in https://github.com/rtfd/readthedocs.org/pull/3913#issuecomment-396673349 </issue> <code> [start of readthedocs/vcs_support/backends/git.py] 1 # -*- coding: utf-8 -*- 2 """Git-related utilities.""" 3 4 from __future__ import ( 5 absolute_import, division, print_function, unicode_literals) 6 7 import csv 8 import logging 9 import os 10 import re 11 12 import git 13 from builtins import str 14 from django.core.exceptions import ValidationError 15 from git.exc import BadName 16 from six import PY2, StringIO 17 18 from readthedocs.config import ALL 19 from readthedocs.projects.exceptions import RepositoryError 20 from readthedocs.projects.validators import validate_submodule_url 21 from readthedocs.vcs_support.base import BaseVCS, VCSVersion 22 23 log = logging.getLogger(__name__) 24 25 26 class Backend(BaseVCS): 27 28 """Git VCS backend.""" 29 30 supports_tags = True 31 supports_branches = True 32 supports_submodules = True 33 fallback_branch = 'master' # default branch 34 35 def __init__(self, *args, **kwargs): 36 super(Backend, self).__init__(*args, **kwargs) 37 self.token = kwargs.get('token', None) 38 self.repo_url = self._get_clone_url() 39 40 def _get_clone_url(self): 41 if '://' in self.repo_url: 42 hacked_url = self.repo_url.split('://')[1] 43 hacked_url = re.sub('.git$', '', hacked_url) 44 clone_url = 'https://%s' % hacked_url 45 if self.token: 46 clone_url = 'https://%s@%s' % (self.token, hacked_url) 47 return clone_url 48 # Don't edit URL because all hosts aren't the same 49 # else: 50 # clone_url = 'git://%s' % (hacked_url) 51 return self.repo_url 52 53 def set_remote_url(self, url): 54 return self.run('git', 'remote', 'set-url', 'origin', url) 55 56 def update(self): 57 # Use checkout() to update repo 58 # TODO: See where we call this 59 self.checkout() 60 61 def repo_exists(self): 62 code, _, _ = self.run('git', 'status', record=False) 63 return code == 0 64 65 def are_submodules_available(self, config): 66 """Test whether git submodule checkout step should be performed.""" 67 # TODO remove this after users migrate to a config file 68 from readthedocs.projects.models import Feature 69 submodules_in_config = ( 70 config.submodules.exclude != ALL or 71 config.submodules.include 72 ) 73 if (self.project.has_feature(Feature.SKIP_SUBMODULES) or 74 not submodules_in_config): 75 return False 76 77 # Keep compatibility with previous projects 78 code, out, _ = self.run('git', 'submodule', 'status', record=False) 79 return code == 0 and bool(out) 80 81 def validate_submodules(self, config): 82 """ 83 Returns the submodules and check that its URLs are valid. 84 85 .. note:: 86 87 Allways call after `self.are_submodules_available`. 88 89 :returns: tuple(bool, list) 90 91 Returns true if all required submodules URLs are valid. 92 Returns a list of all required submodules: 93 - Include is `ALL`, returns all submodules avaliable. 94 - Include is a list, returns just those. 95 - Exclude is `ALL` - this should never happen. 96 - Exlude is a list, returns all avaliable submodules 97 but those from the list. 98 """ 99 repo = git.Repo(self.working_dir) 100 submodules = { 101 sub.path: sub 102 for sub in repo.submodules 103 } 104 105 for sub_path in config.submodules.exclude: 106 path = sub_path.rstrip('/') 107 if path in submodules: 108 del submodules[path] 109 110 if config.submodules.include != ALL and config.submodules.include: 111 submodules_include = {} 112 for sub_path in config.submodules.include: 113 path = sub_path.rstrip('/') 114 submodules_include[path] = submodules[path] 115 submodules = submodules_include 116 117 for path, submodule in submodules.items(): 118 try: 119 validate_submodule_url(submodule.url) 120 except ValidationError: 121 return False, [] 122 return True, submodules.keys() 123 124 def fetch(self): 125 code, _, _ = self.run('git', 'fetch', '--tags', '--prune') 126 if code != 0: 127 raise RepositoryError 128 129 def checkout_revision(self, revision=None): 130 if not revision: 131 branch = self.default_branch or self.fallback_branch 132 revision = 'origin/%s' % branch 133 134 code, out, err = self.run('git', 'checkout', '--force', revision) 135 if code != 0: 136 log.warning("Failed to checkout revision '%s': %s", revision, code) 137 return [code, out, err] 138 139 def clone(self): 140 """ 141 Clone the repository. 142 143 .. note:: 144 145 Temporarily, we support skipping submodule recursive clone via a 146 feature flag. This will eventually be configurable with our YAML 147 config. 148 """ 149 # TODO remove with https://github.com/rtfd/readthedocs-build/issues/30 150 from readthedocs.projects.models import Feature 151 cmd = ['git', 'clone'] 152 cmd.extend([self.repo_url, '.']) 153 code, _, _ = self.run(*cmd) 154 if code != 0: 155 raise RepositoryError 156 157 @property 158 def tags(self): 159 versions = [] 160 repo = git.Repo(self.working_dir) 161 for tag in repo.tags: 162 try: 163 versions.append(VCSVersion(self, str(tag.commit), str(tag))) 164 except ValueError as e: 165 # ValueError: Cannot resolve commit as tag TAGNAME points to a 166 # blob object - use the `.object` property instead to access it 167 # This is not a real tag for us, so we skip it 168 # https://github.com/rtfd/readthedocs.org/issues/4440 169 log.warning('Git tag skipped: %s', tag, exc_info=True) 170 continue 171 return versions 172 173 @property 174 def branches(self): 175 # Only show remote branches 176 retcode, stdout, _ = self.run( 177 'git', 178 'branch', 179 '-r', 180 record_as_success=True, 181 ) 182 # error (or no branches found) 183 if retcode != 0: 184 return [] 185 return self.parse_branches(stdout) 186 187 def parse_branches(self, data): 188 """ 189 Parse output of git branch -r. 190 191 e.g.: 192 193 origin/2.0.X 194 origin/HEAD -> origin/master 195 origin/develop 196 origin/master 197 origin/release/2.0.0 198 origin/release/2.1.0 199 """ 200 clean_branches = [] 201 # StringIO below is expecting Unicode data, so ensure that it gets it. 202 if not isinstance(data, str): 203 data = str(data) 204 delimiter = str(' ').encode('utf-8') if PY2 else str(' ') 205 raw_branches = csv.reader(StringIO(data), delimiter=delimiter) 206 for branch in raw_branches: 207 branch = [f for f in branch if f not in ('', '*')] 208 # Handle empty branches 209 if branch: 210 branch = branch[0] 211 if branch.startswith('origin/'): 212 verbose_name = branch.replace('origin/', '') 213 if verbose_name in ['HEAD']: 214 continue 215 clean_branches.append( 216 VCSVersion(self, branch, verbose_name)) 217 else: 218 clean_branches.append(VCSVersion(self, branch, branch)) 219 return clean_branches 220 221 @property 222 def commit(self): 223 _, stdout, _ = self.run('git', 'rev-parse', 'HEAD') 224 return stdout.strip() 225 226 def checkout(self, identifier=None): 227 self.check_working_dir() 228 229 # Clone or update repository 230 if self.repo_exists(): 231 self.set_remote_url(self.repo_url) 232 self.fetch() 233 else: 234 self.make_clean_working_dir() 235 self.clone() 236 237 # Find proper identifier 238 if not identifier: 239 identifier = self.default_branch or self.fallback_branch 240 241 identifier = self.find_ref(identifier) 242 243 # Checkout the correct identifier for this branch. 244 code, out, err = self.checkout_revision(identifier) 245 if code != 0: 246 return code, out, err 247 248 # Clean any remains of previous checkouts 249 self.run('git', 'clean', '-d', '-f', '-f') 250 return code, out, err 251 252 def update_submodules(self, config): 253 if self.are_submodules_available(config): 254 valid, submodules = self.validate_submodules(config) 255 if valid: 256 self.checkout_submodules(submodules, config) 257 else: 258 raise RepositoryError(RepositoryError.INVALID_SUBMODULES) 259 260 def checkout_submodules(self, submodules, config): 261 """Checkout all repository submodules.""" 262 self.run('git', 'submodule', 'sync') 263 cmd = [ 264 'git', 265 'submodule', 266 'update', 267 '--init', 268 '--force', 269 ] 270 if config.submodules.recursive: 271 cmd.append('--recursive') 272 cmd += submodules 273 self.run(*cmd) 274 275 def find_ref(self, ref): 276 # Check if ref starts with 'origin/' 277 if ref.startswith('origin/'): 278 return ref 279 280 # Check if ref is a branch of the origin remote 281 if self.ref_exists('remotes/origin/' + ref): 282 return 'origin/' + ref 283 284 return ref 285 286 def ref_exists(self, ref): 287 try: 288 r = git.Repo(self.working_dir) 289 if r.commit(ref): 290 return True 291 except (BadName, ValueError): 292 return False 293 return False 294 295 @property 296 def env(self): 297 env = super(Backend, self).env 298 env['GIT_DIR'] = os.path.join(self.working_dir, '.git') 299 # Don't prompt for username, this requires Git 2.3+ 300 env['GIT_TERMINAL_PROMPT'] = '0' 301 return env 302 [end of readthedocs/vcs_support/backends/git.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/readthedocs/vcs_support/backends/git.py b/readthedocs/vcs_support/backends/git.py --- a/readthedocs/vcs_support/backends/git.py +++ b/readthedocs/vcs_support/backends/git.py @@ -122,7 +122,9 @@ return True, submodules.keys() def fetch(self): - code, _, _ = self.run('git', 'fetch', '--tags', '--prune') + code, _, _ = self.run( + 'git', 'fetch', '--tags', '--prune', '--prune-tags', + ) if code != 0: raise RepositoryError
{"golden_diff": "diff --git a/readthedocs/vcs_support/backends/git.py b/readthedocs/vcs_support/backends/git.py\n--- a/readthedocs/vcs_support/backends/git.py\n+++ b/readthedocs/vcs_support/backends/git.py\n@@ -122,7 +122,9 @@\n return True, submodules.keys()\n \n def fetch(self):\n- code, _, _ = self.run('git', 'fetch', '--tags', '--prune')\n+ code, _, _ = self.run(\n+ 'git', 'fetch', '--tags', '--prune', '--prune-tags',\n+ )\n if code != 0:\n raise RepositoryError\n", "issue": "Delete untracked tags on fetch step\nCurrently, if the user deletes a tag, it needs to wipe the environment for this change be reflected in their version list.\r\n\r\nThere are some solutions to delete untracked tags (require more than 2 commands). But I found that the newest version of git has the `--prune-tags` option, which is used as `git fetch --prune --prune-tags` (`git >2.17`). We need to update git on the servers (we use 2.7.4) and change the fetch command. Or we can find a way to wipe the environment if we detect something like this case.\r\n\r\nRaised in https://github.com/rtfd/readthedocs.org/pull/3913#issuecomment-396673349\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Git-related utilities.\"\"\"\n\nfrom __future__ import (\n absolute_import, division, print_function, unicode_literals)\n\nimport csv\nimport logging\nimport os\nimport re\n\nimport git\nfrom builtins import str\nfrom django.core.exceptions import ValidationError\nfrom git.exc import BadName\nfrom six import PY2, StringIO\n\nfrom readthedocs.config import ALL\nfrom readthedocs.projects.exceptions import RepositoryError\nfrom readthedocs.projects.validators import validate_submodule_url\nfrom readthedocs.vcs_support.base import BaseVCS, VCSVersion\n\nlog = logging.getLogger(__name__)\n\n\nclass Backend(BaseVCS):\n\n \"\"\"Git VCS backend.\"\"\"\n\n supports_tags = True\n supports_branches = True\n supports_submodules = True\n fallback_branch = 'master' # default branch\n\n def __init__(self, *args, **kwargs):\n super(Backend, self).__init__(*args, **kwargs)\n self.token = kwargs.get('token', None)\n self.repo_url = self._get_clone_url()\n\n def _get_clone_url(self):\n if '://' in self.repo_url:\n hacked_url = self.repo_url.split('://')[1]\n hacked_url = re.sub('.git$', '', hacked_url)\n clone_url = 'https://%s' % hacked_url\n if self.token:\n clone_url = 'https://%s@%s' % (self.token, hacked_url)\n return clone_url\n # Don't edit URL because all hosts aren't the same\n # else:\n # clone_url = 'git://%s' % (hacked_url)\n return self.repo_url\n\n def set_remote_url(self, url):\n return self.run('git', 'remote', 'set-url', 'origin', url)\n\n def update(self):\n # Use checkout() to update repo\n # TODO: See where we call this\n self.checkout()\n\n def repo_exists(self):\n code, _, _ = self.run('git', 'status', record=False)\n return code == 0\n\n def are_submodules_available(self, config):\n \"\"\"Test whether git submodule checkout step should be performed.\"\"\"\n # TODO remove this after users migrate to a config file\n from readthedocs.projects.models import Feature\n submodules_in_config = (\n config.submodules.exclude != ALL or\n config.submodules.include\n )\n if (self.project.has_feature(Feature.SKIP_SUBMODULES) or\n not submodules_in_config):\n return False\n\n # Keep compatibility with previous projects\n code, out, _ = self.run('git', 'submodule', 'status', record=False)\n return code == 0 and bool(out)\n\n def validate_submodules(self, config):\n \"\"\"\n Returns the submodules and check that its URLs are valid.\n\n .. note::\n\n Allways call after `self.are_submodules_available`.\n\n :returns: tuple(bool, list)\n\n Returns true if all required submodules URLs are valid.\n Returns a list of all required submodules:\n - Include is `ALL`, returns all submodules avaliable.\n - Include is a list, returns just those.\n - Exclude is `ALL` - this should never happen.\n - Exlude is a list, returns all avaliable submodules\n but those from the list.\n \"\"\"\n repo = git.Repo(self.working_dir)\n submodules = {\n sub.path: sub\n for sub in repo.submodules\n }\n\n for sub_path in config.submodules.exclude:\n path = sub_path.rstrip('/')\n if path in submodules:\n del submodules[path]\n\n if config.submodules.include != ALL and config.submodules.include:\n submodules_include = {}\n for sub_path in config.submodules.include:\n path = sub_path.rstrip('/')\n submodules_include[path] = submodules[path]\n submodules = submodules_include\n\n for path, submodule in submodules.items():\n try:\n validate_submodule_url(submodule.url)\n except ValidationError:\n return False, []\n return True, submodules.keys()\n\n def fetch(self):\n code, _, _ = self.run('git', 'fetch', '--tags', '--prune')\n if code != 0:\n raise RepositoryError\n\n def checkout_revision(self, revision=None):\n if not revision:\n branch = self.default_branch or self.fallback_branch\n revision = 'origin/%s' % branch\n\n code, out, err = self.run('git', 'checkout', '--force', revision)\n if code != 0:\n log.warning(\"Failed to checkout revision '%s': %s\", revision, code)\n return [code, out, err]\n\n def clone(self):\n \"\"\"\n Clone the repository.\n\n .. note::\n\n Temporarily, we support skipping submodule recursive clone via a\n feature flag. This will eventually be configurable with our YAML\n config.\n \"\"\"\n # TODO remove with https://github.com/rtfd/readthedocs-build/issues/30\n from readthedocs.projects.models import Feature\n cmd = ['git', 'clone']\n cmd.extend([self.repo_url, '.'])\n code, _, _ = self.run(*cmd)\n if code != 0:\n raise RepositoryError\n\n @property\n def tags(self):\n versions = []\n repo = git.Repo(self.working_dir)\n for tag in repo.tags:\n try:\n versions.append(VCSVersion(self, str(tag.commit), str(tag)))\n except ValueError as e:\n # ValueError: Cannot resolve commit as tag TAGNAME points to a\n # blob object - use the `.object` property instead to access it\n # This is not a real tag for us, so we skip it\n # https://github.com/rtfd/readthedocs.org/issues/4440\n log.warning('Git tag skipped: %s', tag, exc_info=True)\n continue\n return versions\n\n @property\n def branches(self):\n # Only show remote branches\n retcode, stdout, _ = self.run(\n 'git',\n 'branch',\n '-r',\n record_as_success=True,\n )\n # error (or no branches found)\n if retcode != 0:\n return []\n return self.parse_branches(stdout)\n\n def parse_branches(self, data):\n \"\"\"\n Parse output of git branch -r.\n\n e.g.:\n\n origin/2.0.X\n origin/HEAD -> origin/master\n origin/develop\n origin/master\n origin/release/2.0.0\n origin/release/2.1.0\n \"\"\"\n clean_branches = []\n # StringIO below is expecting Unicode data, so ensure that it gets it.\n if not isinstance(data, str):\n data = str(data)\n delimiter = str(' ').encode('utf-8') if PY2 else str(' ')\n raw_branches = csv.reader(StringIO(data), delimiter=delimiter)\n for branch in raw_branches:\n branch = [f for f in branch if f not in ('', '*')]\n # Handle empty branches\n if branch:\n branch = branch[0]\n if branch.startswith('origin/'):\n verbose_name = branch.replace('origin/', '')\n if verbose_name in ['HEAD']:\n continue\n clean_branches.append(\n VCSVersion(self, branch, verbose_name))\n else:\n clean_branches.append(VCSVersion(self, branch, branch))\n return clean_branches\n\n @property\n def commit(self):\n _, stdout, _ = self.run('git', 'rev-parse', 'HEAD')\n return stdout.strip()\n\n def checkout(self, identifier=None):\n self.check_working_dir()\n\n # Clone or update repository\n if self.repo_exists():\n self.set_remote_url(self.repo_url)\n self.fetch()\n else:\n self.make_clean_working_dir()\n self.clone()\n\n # Find proper identifier\n if not identifier:\n identifier = self.default_branch or self.fallback_branch\n\n identifier = self.find_ref(identifier)\n\n # Checkout the correct identifier for this branch.\n code, out, err = self.checkout_revision(identifier)\n if code != 0:\n return code, out, err\n\n # Clean any remains of previous checkouts\n self.run('git', 'clean', '-d', '-f', '-f')\n return code, out, err\n\n def update_submodules(self, config):\n if self.are_submodules_available(config):\n valid, submodules = self.validate_submodules(config)\n if valid:\n self.checkout_submodules(submodules, config)\n else:\n raise RepositoryError(RepositoryError.INVALID_SUBMODULES)\n\n def checkout_submodules(self, submodules, config):\n \"\"\"Checkout all repository submodules.\"\"\"\n self.run('git', 'submodule', 'sync')\n cmd = [\n 'git',\n 'submodule',\n 'update',\n '--init',\n '--force',\n ]\n if config.submodules.recursive:\n cmd.append('--recursive')\n cmd += submodules\n self.run(*cmd)\n\n def find_ref(self, ref):\n # Check if ref starts with 'origin/'\n if ref.startswith('origin/'):\n return ref\n\n # Check if ref is a branch of the origin remote\n if self.ref_exists('remotes/origin/' + ref):\n return 'origin/' + ref\n\n return ref\n\n def ref_exists(self, ref):\n try:\n r = git.Repo(self.working_dir)\n if r.commit(ref):\n return True\n except (BadName, ValueError):\n return False\n return False\n\n @property\n def env(self):\n env = super(Backend, self).env\n env['GIT_DIR'] = os.path.join(self.working_dir, '.git')\n # Don't prompt for username, this requires Git 2.3+\n env['GIT_TERMINAL_PROMPT'] = '0'\n return env\n", "path": "readthedocs/vcs_support/backends/git.py"}]}
3,671
147
gh_patches_debug_25452
rasdani/github-patches
git_diff
encode__httpx-566
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> scripts as a single top-level point of project automation. Generally across the `encode` projects I've been using [GitHub's "Scripts to Rule Them All" pattern](https://github.blog/2015-06-30-scripts-to-rule-them-all/) as a consistent style for project testing, linting, publishing etc... It's benenficial from my POV, because it ends up that we can always just run `scripts/install`, or `scripts/test` from *any* project and expect that to perform the same kind of thing, even if we're using different tooling between projects. (eg. started adopting `nox` in some places, but not yet there accross the board.) I'd like to do the same with `httpx`, while still keeping the niceness of `nox` (So eg. reintroduce a `scripts/test`, `scripts/lint` and `scripts/publish`.) but figure it's worth raising this for discussion first. I'd also be quite keen on having a fast-feedback test suite being the default, with nox's run against multiple environments as an explicit case? Any general thoughts here or should I just dive in and take a crack at this? </issue> <code> [start of noxfile.py] 1 import nox 2 3 nox.options.stop_on_first_error = True 4 nox.options.reuse_existing_virtualenvs = True 5 nox.options.keywords = "not serve" 6 7 source_files = ("httpx", "tools", "tests", "setup.py", "noxfile.py") 8 9 10 @nox.session 11 def lint(session): 12 session.install( 13 "--upgrade", "autoflake", "black", "flake8", "isort", "seed-isort-config" 14 ) 15 16 session.run("autoflake", "--in-place", "--recursive", *source_files) 17 session.run("seed-isort-config", "--application-directories=httpx") 18 session.run("isort", "--project=httpx", "--recursive", "--apply", *source_files) 19 session.run("black", "--target-version=py36", *source_files) 20 21 check(session) 22 23 24 @nox.session 25 def check(session): 26 session.install( 27 "--upgrade", "black", "flake8", "flake8-bugbear", "flake8-pie", "isort", "mypy" 28 ) 29 30 session.run("black", "--check", "--diff", "--target-version=py36", *source_files) 31 session.run("flake8", *source_files) 32 session.run("mypy", "httpx") 33 session.run( 34 "isort", "--check", "--diff", "--project=httpx", "--recursive", *source_files 35 ) 36 37 38 @nox.session 39 def docs(session): 40 session.install("--upgrade", "mkdocs", "mkdocs-material", "mkautodoc>=0.1.0") 41 session.install("-e", ".") 42 session.run("mkdocs", "build") 43 44 45 @nox.session(reuse_venv=True) 46 def serve(session): 47 session.install("--upgrade", "mkdocs", "mkdocs-material") 48 49 session.run("mkdocs", "serve") 50 51 52 @nox.session(python=["3.6", "3.7", "3.8"]) 53 def test(session): 54 session.install("--upgrade", "-r", "test-requirements.txt") 55 session.run("python", "-m", "pytest", *session.posargs) 56 [end of noxfile.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/noxfile.py b/noxfile.py deleted file mode 100644 --- a/noxfile.py +++ /dev/null @@ -1,55 +0,0 @@ -import nox - -nox.options.stop_on_first_error = True -nox.options.reuse_existing_virtualenvs = True -nox.options.keywords = "not serve" - -source_files = ("httpx", "tools", "tests", "setup.py", "noxfile.py") - - [email protected] -def lint(session): - session.install( - "--upgrade", "autoflake", "black", "flake8", "isort", "seed-isort-config" - ) - - session.run("autoflake", "--in-place", "--recursive", *source_files) - session.run("seed-isort-config", "--application-directories=httpx") - session.run("isort", "--project=httpx", "--recursive", "--apply", *source_files) - session.run("black", "--target-version=py36", *source_files) - - check(session) - - [email protected] -def check(session): - session.install( - "--upgrade", "black", "flake8", "flake8-bugbear", "flake8-pie", "isort", "mypy" - ) - - session.run("black", "--check", "--diff", "--target-version=py36", *source_files) - session.run("flake8", *source_files) - session.run("mypy", "httpx") - session.run( - "isort", "--check", "--diff", "--project=httpx", "--recursive", *source_files - ) - - [email protected] -def docs(session): - session.install("--upgrade", "mkdocs", "mkdocs-material", "mkautodoc>=0.1.0") - session.install("-e", ".") - session.run("mkdocs", "build") - - [email protected](reuse_venv=True) -def serve(session): - session.install("--upgrade", "mkdocs", "mkdocs-material") - - session.run("mkdocs", "serve") - - [email protected](python=["3.6", "3.7", "3.8"]) -def test(session): - session.install("--upgrade", "-r", "test-requirements.txt") - session.run("python", "-m", "pytest", *session.posargs)
{"golden_diff": "diff --git a/noxfile.py b/noxfile.py\ndeleted file mode 100644\n--- a/noxfile.py\n+++ /dev/null\n@@ -1,55 +0,0 @@\n-import nox\n-\n-nox.options.stop_on_first_error = True\n-nox.options.reuse_existing_virtualenvs = True\n-nox.options.keywords = \"not serve\"\n-\n-source_files = (\"httpx\", \"tools\", \"tests\", \"setup.py\", \"noxfile.py\")\n-\n-\[email protected]\n-def lint(session):\n- session.install(\n- \"--upgrade\", \"autoflake\", \"black\", \"flake8\", \"isort\", \"seed-isort-config\"\n- )\n-\n- session.run(\"autoflake\", \"--in-place\", \"--recursive\", *source_files)\n- session.run(\"seed-isort-config\", \"--application-directories=httpx\")\n- session.run(\"isort\", \"--project=httpx\", \"--recursive\", \"--apply\", *source_files)\n- session.run(\"black\", \"--target-version=py36\", *source_files)\n-\n- check(session)\n-\n-\[email protected]\n-def check(session):\n- session.install(\n- \"--upgrade\", \"black\", \"flake8\", \"flake8-bugbear\", \"flake8-pie\", \"isort\", \"mypy\"\n- )\n-\n- session.run(\"black\", \"--check\", \"--diff\", \"--target-version=py36\", *source_files)\n- session.run(\"flake8\", *source_files)\n- session.run(\"mypy\", \"httpx\")\n- session.run(\n- \"isort\", \"--check\", \"--diff\", \"--project=httpx\", \"--recursive\", *source_files\n- )\n-\n-\[email protected]\n-def docs(session):\n- session.install(\"--upgrade\", \"mkdocs\", \"mkdocs-material\", \"mkautodoc>=0.1.0\")\n- session.install(\"-e\", \".\")\n- session.run(\"mkdocs\", \"build\")\n-\n-\[email protected](reuse_venv=True)\n-def serve(session):\n- session.install(\"--upgrade\", \"mkdocs\", \"mkdocs-material\")\n-\n- session.run(\"mkdocs\", \"serve\")\n-\n-\[email protected](python=[\"3.6\", \"3.7\", \"3.8\"])\n-def test(session):\n- session.install(\"--upgrade\", \"-r\", \"test-requirements.txt\")\n- session.run(\"python\", \"-m\", \"pytest\", *session.posargs)\n", "issue": "scripts as a single top-level point of project automation.\nGenerally across the `encode` projects I've been using [GitHub's \"Scripts to Rule Them All\" pattern](https://github.blog/2015-06-30-scripts-to-rule-them-all/) as a consistent style for project testing, linting, publishing etc...\r\n\r\nIt's benenficial from my POV, because it ends up that we can always just run `scripts/install`, or `scripts/test` from *any* project and expect that to perform the same kind of thing, even if we're using different tooling between projects. (eg. started adopting `nox` in some places, but not yet there accross the board.)\r\n\r\nI'd like to do the same with `httpx`, while still keeping the niceness of `nox` (So eg. reintroduce a `scripts/test`, `scripts/lint` and `scripts/publish`.) but figure it's worth raising this for discussion first.\r\n\r\nI'd also be quite keen on having a fast-feedback test suite being the default, with nox's run against multiple environments as an explicit case?\r\n\r\nAny general thoughts here or should I just dive in and take a crack at this?\n", "before_files": [{"content": "import nox\n\nnox.options.stop_on_first_error = True\nnox.options.reuse_existing_virtualenvs = True\nnox.options.keywords = \"not serve\"\n\nsource_files = (\"httpx\", \"tools\", \"tests\", \"setup.py\", \"noxfile.py\")\n\n\[email protected]\ndef lint(session):\n session.install(\n \"--upgrade\", \"autoflake\", \"black\", \"flake8\", \"isort\", \"seed-isort-config\"\n )\n\n session.run(\"autoflake\", \"--in-place\", \"--recursive\", *source_files)\n session.run(\"seed-isort-config\", \"--application-directories=httpx\")\n session.run(\"isort\", \"--project=httpx\", \"--recursive\", \"--apply\", *source_files)\n session.run(\"black\", \"--target-version=py36\", *source_files)\n\n check(session)\n\n\[email protected]\ndef check(session):\n session.install(\n \"--upgrade\", \"black\", \"flake8\", \"flake8-bugbear\", \"flake8-pie\", \"isort\", \"mypy\"\n )\n\n session.run(\"black\", \"--check\", \"--diff\", \"--target-version=py36\", *source_files)\n session.run(\"flake8\", *source_files)\n session.run(\"mypy\", \"httpx\")\n session.run(\n \"isort\", \"--check\", \"--diff\", \"--project=httpx\", \"--recursive\", *source_files\n )\n\n\[email protected]\ndef docs(session):\n session.install(\"--upgrade\", \"mkdocs\", \"mkdocs-material\", \"mkautodoc>=0.1.0\")\n session.install(\"-e\", \".\")\n session.run(\"mkdocs\", \"build\")\n\n\[email protected](reuse_venv=True)\ndef serve(session):\n session.install(\"--upgrade\", \"mkdocs\", \"mkdocs-material\")\n\n session.run(\"mkdocs\", \"serve\")\n\n\[email protected](python=[\"3.6\", \"3.7\", \"3.8\"])\ndef test(session):\n session.install(\"--upgrade\", \"-r\", \"test-requirements.txt\")\n session.run(\"python\", \"-m\", \"pytest\", *session.posargs)\n", "path": "noxfile.py"}]}
1,350
538
gh_patches_debug_8868
rasdani/github-patches
git_diff
tensorflow__addons-1738
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Dropping support for tensorflow < 2.2. We have three pull requests that need TF 2.2 and above: #1733 #1726 #905 They are blocked until we drop support, since Addons 0.9 was released with support for both tf 2.1 and 2.2, I don't think it's going to be a very big inconvenience for users if we drop support for 2.1 now. They have a good fallback. </issue> <code> [start of tensorflow_addons/utils/resource_loader.py] 1 # Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 # ============================================================================== 15 """Utilities similar to tf.python.platform.resource_loader.""" 16 17 from distutils.version import LooseVersion 18 import os 19 import warnings 20 21 import tensorflow as tf 22 23 MIN_TF_VERSION_FOR_ABI_COMPATIBILITY = "2.1.0" 24 MAX_TF_VERSION_FOR_ABI_COMPATIBILITY = "2.2.0" 25 abi_warning_already_raised = False 26 SKIP_CUSTOM_OPS = False 27 28 29 def get_project_root(): 30 """Returns project root folder.""" 31 return os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 32 33 34 def get_path_to_datafile(path): 35 """Get the path to the specified file in the data dependencies. 36 37 The path is relative to tensorflow_addons/ 38 39 Args: 40 path: a string resource path relative to tensorflow_addons/ 41 Returns: 42 The path to the specified data file 43 """ 44 root_dir = get_project_root() 45 return os.path.join(root_dir, path.replace("/", os.sep)) 46 47 48 class LazySO: 49 def __init__(self, relative_path): 50 self.relative_path = relative_path 51 self._ops = None 52 53 @property 54 def ops(self): 55 if SKIP_CUSTOM_OPS: 56 import pytest 57 58 pytest.skip( 59 "Skipping the test because a custom ops " 60 "was being loaded while --skip-custom-ops was set." 61 ) 62 if self._ops is None: 63 self.display_warning_if_incompatible() 64 self._ops = tf.load_op_library(get_path_to_datafile(self.relative_path)) 65 return self._ops 66 67 def display_warning_if_incompatible(self): 68 global abi_warning_already_raised 69 if abi_is_compatible() or abi_warning_already_raised: 70 return 71 72 warnings.warn( 73 "You are currently using TensorFlow {} and trying to load a custom op ({})." 74 "\n" 75 "TensorFlow Addons has compiled its custom ops against TensorFlow {}, " 76 "and there are no compatibility guarantees between the two versions. " 77 "\n" 78 "This means that you might get segfaults when loading the custom op, " 79 "or other kind of low-level errors.\n If you do, do not file an issue " 80 "on Github. This is a known limitation." 81 "\n\n" 82 "It might help you to fallback to pure Python " 83 "ops with TF_ADDONS_PY_OPS . To do that, see " 84 "https://github.com/tensorflow/addons#gpucpu-custom-ops " 85 "\n\n" 86 "You can also change the TensorFlow version installed on your system. " 87 "You would need a TensorFlow version equal to or above {} and strictly " 88 "below {}.\n Note that nightly versions of TensorFlow, " 89 "as well as non-pip TensorFlow like `conda install tensorflow` or compiled " 90 "from source are not supported." 91 "\n\n" 92 "The last solution is to find the TensorFlow Addons version that has " 93 "custom ops compatible with the TensorFlow installed on your " 94 "system. To do that, refer to the readme: " 95 "https://github.com/tensorflow/addons" 96 "".format( 97 tf.__version__, 98 self.relative_path, 99 MIN_TF_VERSION_FOR_ABI_COMPATIBILITY, 100 MIN_TF_VERSION_FOR_ABI_COMPATIBILITY, 101 MAX_TF_VERSION_FOR_ABI_COMPATIBILITY, 102 ), 103 UserWarning, 104 ) 105 abi_warning_already_raised = True 106 107 108 def abi_is_compatible(): 109 if "dev" in tf.__version__: 110 # tf-nightly 111 return False 112 113 min_version = LooseVersion(MIN_TF_VERSION_FOR_ABI_COMPATIBILITY) 114 max_version = LooseVersion(MAX_TF_VERSION_FOR_ABI_COMPATIBILITY) 115 return min_version <= LooseVersion(tf.__version__) < max_version 116 [end of tensorflow_addons/utils/resource_loader.py] [start of tensorflow_addons/utils/ensure_tf_install.py] 1 # Copyright 2019 The TensorFlow Authors. All Rights Reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 # ============================================================================== 15 16 17 # Ensure the TensorFlow version is in the right range. This 18 # needs to happen before anything else, since the imports below will try to 19 # import TensorFlow, too. 20 21 from distutils.version import LooseVersion 22 import warnings 23 24 import tensorflow as tf 25 26 MIN_TF_VERSION = "2.1.0" 27 MAX_TF_VERSION = "2.3.0" 28 29 30 def _check_tf_version(): 31 """Warn the user if the version of TensorFlow used is not supported. 32 33 This is not a check for custom ops compatibility. This check only ensure that 34 we support this TensorFlow version if the user uses only Addons' Python code. 35 """ 36 37 if "dev" in tf.__version__: 38 warnings.warn( 39 "You are currently using a nightly version of TensorFlow ({}). \n" 40 "TensorFlow Addons offers no support for the nightly versions of " 41 "TensorFlow. Some things might work, some other might not. \n" 42 "If you encounter a bug, do not file an issue on GitHub." 43 "".format(tf.__version__), 44 UserWarning, 45 ) 46 return 47 48 min_version = LooseVersion(MIN_TF_VERSION) 49 max_version = LooseVersion(MAX_TF_VERSION) 50 51 if min_version <= LooseVersion(tf.__version__) < max_version: 52 return 53 54 warnings.warn( 55 "Tensorflow Addons supports using Python ops for all Tensorflow versions " 56 "above or equal to {} and strictly below {} (nightly versions are not " 57 "supported). \n " 58 "The versions of TensorFlow you are currently using is {} and is not " 59 "supported. \n" 60 "Some things might work, some things might not.\n" 61 "If you were to encounter a bug, do not file an issue.\n" 62 "If you want to make sure you're using a tested and supported configuration, " 63 "either change the TensorFlow version or the TensorFlow Addons's version. \n" 64 "You can find the compatibility matrix in TensorFlow Addon's readme:\n" 65 "https://github.com/tensorflow/addons".format( 66 MIN_TF_VERSION, MAX_TF_VERSION, tf.__version__ 67 ), 68 UserWarning, 69 ) 70 [end of tensorflow_addons/utils/ensure_tf_install.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/tensorflow_addons/utils/ensure_tf_install.py b/tensorflow_addons/utils/ensure_tf_install.py --- a/tensorflow_addons/utils/ensure_tf_install.py +++ b/tensorflow_addons/utils/ensure_tf_install.py @@ -23,7 +23,7 @@ import tensorflow as tf -MIN_TF_VERSION = "2.1.0" +MIN_TF_VERSION = "2.2.0" MAX_TF_VERSION = "2.3.0" diff --git a/tensorflow_addons/utils/resource_loader.py b/tensorflow_addons/utils/resource_loader.py --- a/tensorflow_addons/utils/resource_loader.py +++ b/tensorflow_addons/utils/resource_loader.py @@ -20,8 +20,8 @@ import tensorflow as tf -MIN_TF_VERSION_FOR_ABI_COMPATIBILITY = "2.1.0" -MAX_TF_VERSION_FOR_ABI_COMPATIBILITY = "2.2.0" +MIN_TF_VERSION_FOR_ABI_COMPATIBILITY = "2.2.0" +MAX_TF_VERSION_FOR_ABI_COMPATIBILITY = "2.3.0" abi_warning_already_raised = False SKIP_CUSTOM_OPS = False
{"golden_diff": "diff --git a/tensorflow_addons/utils/ensure_tf_install.py b/tensorflow_addons/utils/ensure_tf_install.py\n--- a/tensorflow_addons/utils/ensure_tf_install.py\n+++ b/tensorflow_addons/utils/ensure_tf_install.py\n@@ -23,7 +23,7 @@\n \n import tensorflow as tf\n \n-MIN_TF_VERSION = \"2.1.0\"\n+MIN_TF_VERSION = \"2.2.0\"\n MAX_TF_VERSION = \"2.3.0\"\n \n \ndiff --git a/tensorflow_addons/utils/resource_loader.py b/tensorflow_addons/utils/resource_loader.py\n--- a/tensorflow_addons/utils/resource_loader.py\n+++ b/tensorflow_addons/utils/resource_loader.py\n@@ -20,8 +20,8 @@\n \n import tensorflow as tf\n \n-MIN_TF_VERSION_FOR_ABI_COMPATIBILITY = \"2.1.0\"\n-MAX_TF_VERSION_FOR_ABI_COMPATIBILITY = \"2.2.0\"\n+MIN_TF_VERSION_FOR_ABI_COMPATIBILITY = \"2.2.0\"\n+MAX_TF_VERSION_FOR_ABI_COMPATIBILITY = \"2.3.0\"\n abi_warning_already_raised = False\n SKIP_CUSTOM_OPS = False\n", "issue": "Dropping support for tensorflow < 2.2.\nWe have three pull requests that need TF 2.2 and above: #1733 #1726 #905 \r\n\r\nThey are blocked until we drop support, since Addons 0.9 was released with support for both tf 2.1 and 2.2, I don't think it's going to be a very big inconvenience for users if we drop support for 2.1 now. They have a good fallback.\n", "before_files": [{"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Utilities similar to tf.python.platform.resource_loader.\"\"\"\n\nfrom distutils.version import LooseVersion\nimport os\nimport warnings\n\nimport tensorflow as tf\n\nMIN_TF_VERSION_FOR_ABI_COMPATIBILITY = \"2.1.0\"\nMAX_TF_VERSION_FOR_ABI_COMPATIBILITY = \"2.2.0\"\nabi_warning_already_raised = False\nSKIP_CUSTOM_OPS = False\n\n\ndef get_project_root():\n \"\"\"Returns project root folder.\"\"\"\n return os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\ndef get_path_to_datafile(path):\n \"\"\"Get the path to the specified file in the data dependencies.\n\n The path is relative to tensorflow_addons/\n\n Args:\n path: a string resource path relative to tensorflow_addons/\n Returns:\n The path to the specified data file\n \"\"\"\n root_dir = get_project_root()\n return os.path.join(root_dir, path.replace(\"/\", os.sep))\n\n\nclass LazySO:\n def __init__(self, relative_path):\n self.relative_path = relative_path\n self._ops = None\n\n @property\n def ops(self):\n if SKIP_CUSTOM_OPS:\n import pytest\n\n pytest.skip(\n \"Skipping the test because a custom ops \"\n \"was being loaded while --skip-custom-ops was set.\"\n )\n if self._ops is None:\n self.display_warning_if_incompatible()\n self._ops = tf.load_op_library(get_path_to_datafile(self.relative_path))\n return self._ops\n\n def display_warning_if_incompatible(self):\n global abi_warning_already_raised\n if abi_is_compatible() or abi_warning_already_raised:\n return\n\n warnings.warn(\n \"You are currently using TensorFlow {} and trying to load a custom op ({}).\"\n \"\\n\"\n \"TensorFlow Addons has compiled its custom ops against TensorFlow {}, \"\n \"and there are no compatibility guarantees between the two versions. \"\n \"\\n\"\n \"This means that you might get segfaults when loading the custom op, \"\n \"or other kind of low-level errors.\\n If you do, do not file an issue \"\n \"on Github. This is a known limitation.\"\n \"\\n\\n\"\n \"It might help you to fallback to pure Python \"\n \"ops with TF_ADDONS_PY_OPS . To do that, see \"\n \"https://github.com/tensorflow/addons#gpucpu-custom-ops \"\n \"\\n\\n\"\n \"You can also change the TensorFlow version installed on your system. \"\n \"You would need a TensorFlow version equal to or above {} and strictly \"\n \"below {}.\\n Note that nightly versions of TensorFlow, \"\n \"as well as non-pip TensorFlow like `conda install tensorflow` or compiled \"\n \"from source are not supported.\"\n \"\\n\\n\"\n \"The last solution is to find the TensorFlow Addons version that has \"\n \"custom ops compatible with the TensorFlow installed on your \"\n \"system. To do that, refer to the readme: \"\n \"https://github.com/tensorflow/addons\"\n \"\".format(\n tf.__version__,\n self.relative_path,\n MIN_TF_VERSION_FOR_ABI_COMPATIBILITY,\n MIN_TF_VERSION_FOR_ABI_COMPATIBILITY,\n MAX_TF_VERSION_FOR_ABI_COMPATIBILITY,\n ),\n UserWarning,\n )\n abi_warning_already_raised = True\n\n\ndef abi_is_compatible():\n if \"dev\" in tf.__version__:\n # tf-nightly\n return False\n\n min_version = LooseVersion(MIN_TF_VERSION_FOR_ABI_COMPATIBILITY)\n max_version = LooseVersion(MAX_TF_VERSION_FOR_ABI_COMPATIBILITY)\n return min_version <= LooseVersion(tf.__version__) < max_version\n", "path": "tensorflow_addons/utils/resource_loader.py"}, {"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\n# Ensure the TensorFlow version is in the right range. This\n# needs to happen before anything else, since the imports below will try to\n# import TensorFlow, too.\n\nfrom distutils.version import LooseVersion\nimport warnings\n\nimport tensorflow as tf\n\nMIN_TF_VERSION = \"2.1.0\"\nMAX_TF_VERSION = \"2.3.0\"\n\n\ndef _check_tf_version():\n \"\"\"Warn the user if the version of TensorFlow used is not supported.\n\n This is not a check for custom ops compatibility. This check only ensure that\n we support this TensorFlow version if the user uses only Addons' Python code.\n \"\"\"\n\n if \"dev\" in tf.__version__:\n warnings.warn(\n \"You are currently using a nightly version of TensorFlow ({}). \\n\"\n \"TensorFlow Addons offers no support for the nightly versions of \"\n \"TensorFlow. Some things might work, some other might not. \\n\"\n \"If you encounter a bug, do not file an issue on GitHub.\"\n \"\".format(tf.__version__),\n UserWarning,\n )\n return\n\n min_version = LooseVersion(MIN_TF_VERSION)\n max_version = LooseVersion(MAX_TF_VERSION)\n\n if min_version <= LooseVersion(tf.__version__) < max_version:\n return\n\n warnings.warn(\n \"Tensorflow Addons supports using Python ops for all Tensorflow versions \"\n \"above or equal to {} and strictly below {} (nightly versions are not \"\n \"supported). \\n \"\n \"The versions of TensorFlow you are currently using is {} and is not \"\n \"supported. \\n\"\n \"Some things might work, some things might not.\\n\"\n \"If you were to encounter a bug, do not file an issue.\\n\"\n \"If you want to make sure you're using a tested and supported configuration, \"\n \"either change the TensorFlow version or the TensorFlow Addons's version. \\n\"\n \"You can find the compatibility matrix in TensorFlow Addon's readme:\\n\"\n \"https://github.com/tensorflow/addons\".format(\n MIN_TF_VERSION, MAX_TF_VERSION, tf.__version__\n ),\n UserWarning,\n )\n", "path": "tensorflow_addons/utils/ensure_tf_install.py"}]}
2,579
262
gh_patches_debug_1770
rasdani/github-patches
git_diff
weecology__retriever-677
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Download only fails for `gwdd` ``` ethan@gandalf:~$ retriever download gwdd => Installing Zanne et al. Global wood density database. Creating database GWDD... Couldn't create database ('NoneType' object has no attribute 'execute'). Trying to continue anyway. 'table_name' ``` And the file doesn't appear. From `try_install_all.py`: ``` ('download', 'gwdd', KeyError('table_name',)) ``` </issue> <code> [start of scripts/gwdd.py] 1 #retriever 2 """Retriever script for Zanne et al. Global wood density database. 3 4 """ 5 from builtins import str 6 from builtins import range 7 8 import os 9 import sys 10 import xlrd 11 from retriever.lib.templates import Script 12 from retriever.lib.models import Table 13 from retriever.lib.excel import Excel 14 15 16 17 class main(Script): 18 def __init__(self, **kwargs): 19 Script.__init__(self, **kwargs) 20 self.name = "Zanne et al. Global wood density database." 21 self.shortname = "GWDD" 22 self.retriever_minimum_version = '2.0.0-dev' 23 self.script_version = '1.0' 24 self.urls = {"GWDD": "http://datadryad.org/bitstream/handle/10255/dryad.235/GlobalWoodDensityDatabase.xls?sequence=1"} 25 self.tags = ["Taxon > Plants", "Spatial Scale > Global", 26 "Data Type > Observational"] 27 self.ref = "http://datadryad.org/resource/doi:10.5061/dryad.234" 28 self.description = "A collection and collation of data on the major wood functional traits, including the largest wood density database to date (8412 taxa), mechanical strength measures and anatomical features, as well as clade-specific features such as secondary chemistry." 29 self.citation = "Chave J, Coomes DA, Jansen S, Lewis SL, Swenson NG, Zanne AE (2009) Towards a worldwide wood economics spectrum. Ecology Letters 12(4): 351-366. http://dx.doi.org/10.1111/j.1461-0248.2009.01285.x" 30 self.addendum = """ *Correspondence for updates to the database: [email protected] 31 For descriptions of the database, see Chave et al. 2009. Towards a worldwide wood economics spectrum. Ecology Letters. Identifier: http://hdl.handle.net/10255/dryad.234 32 33 Below we list the rules of use for the Global wood density database. 34 These are developed based on the rules of use for the Glopnet dataset (www.nature.com/nature/journal/v428/n6985/full/nature02403.html) and Cedar Creek LTER and Related Data (http://www.lter.umn.edu/cgi-bin/register). 35 If you would like to use the Global wood density database, we request that you: 36 1. Notify the main address of correspondence (Gaby Lopez-Gonzalo) if you plan to use the database in a publication. 37 2. Provide recognition of the efforts of this group in the assembly of the data by using the citation for the database above. 38 3. Recognize that these data were assembled by the group for various analyses and research questions. If any of these uses overlap with your interests, you recognize that group has precedence in addressing these questions.""" 39 40 def download(self, engine=None, debug=False): 41 Script.download(self, engine, debug) 42 43 self.engine.download_file(self.urls["GWDD"], "GlobalWoodDensityDatabase.xls") 44 filename = os.path.basename("GlobalWoodDensityDatabase.xls") 45 46 book = xlrd.open_workbook(self.engine.format_filename(filename)) 47 sh = book.sheet_by_index(1) 48 rows = sh.nrows 49 50 #Creating data table 51 lines = [] 52 for i in range(1, rows): 53 row = sh.row(i) 54 if not all(Excel.empty_cell(cell) for cell in row): 55 this_line = {} 56 def format_value(s): 57 s = Excel.cell_value(s) 58 return str(s).title().replace("\\", "/").replace('"', '') 59 for num, label in enumerate(["Number", "Family", "Binomial", "Wood_Density", 60 "Region", "Reference_Number"]): 61 this_line[label] = format_value(row[num]) 62 lines.append(this_line) 63 64 table = Table("data", delimiter="\t") 65 table.columns=[("Number" , ("pk-int",) ), 66 ("Family" , ("char",) ), 67 ("Binomial" , ("char",) ), 68 ("Wood_Density" , ("double",) ), 69 ("Region" , ("char",) ), 70 ("Reference_Number" , ("int",) )] 71 table.pk = 'Number' 72 table.contains_pk = True 73 74 gwdd = [] 75 for line in lines: 76 gwdd_data = [line["Number"], 77 line["Family"], 78 line["Binomial"], 79 line["Wood_Density"], 80 line["Region"], 81 line["Reference_Number"]] 82 gwdd.append(gwdd_data) 83 84 data = ['\t'.join(gwdd_line) for gwdd_line in gwdd] 85 self.engine.table = table 86 self.engine.create_table() 87 self.engine.add_to_table(data) 88 89 #Creating reference table 90 lines = [] 91 sh = book.sheet_by_index(2) 92 rows = sh.nrows 93 for i in range(1, rows): 94 row = sh.row(i) 95 if not all(Excel.empty_cell(cell) for cell in row): 96 this_line = {} 97 def format_value(s): 98 s = Excel.cell_value(s) 99 return str(s).title().replace("\\", "/").replace('"', '') 100 for num, label in enumerate(["Reference_Number", "Reference"]): 101 this_line[label] = format_value(row[num]) 102 lines.append(this_line) 103 104 table = Table("reference", delimiter="\t") 105 table.columns=[("Reference_Number" , ("pk-int",) ), 106 ("Reference" , ("char",) )] 107 table.pk = 'Reference_Number' 108 table.contains_pk = True 109 110 gwdd = [] 111 for line in lines: 112 gwdd_ref = [line["Reference_Number"], 113 line["Reference"]] 114 gwdd.append(gwdd_ref) 115 116 data = ['\t'.join(gwdd_line) for gwdd_line in gwdd] 117 self.engine.table = table 118 self.engine.create_table() 119 self.engine.add_to_table(data) 120 121 return self.engine 122 123 SCRIPT = main() 124 125 [end of scripts/gwdd.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/scripts/gwdd.py b/scripts/gwdd.py --- a/scripts/gwdd.py +++ b/scripts/gwdd.py @@ -117,7 +117,8 @@ self.engine.table = table self.engine.create_table() self.engine.add_to_table(data) - + self.engine.find_file("GlobalWoodDensityDatabase.xls") + return self.engine SCRIPT = main()
{"golden_diff": "diff --git a/scripts/gwdd.py b/scripts/gwdd.py\n--- a/scripts/gwdd.py\n+++ b/scripts/gwdd.py\n@@ -117,7 +117,8 @@\n self.engine.table = table\n self.engine.create_table()\n self.engine.add_to_table(data)\n- \n+ self.engine.find_file(\"GlobalWoodDensityDatabase.xls\")\n+\n return self.engine\n \n SCRIPT = main()\n", "issue": "Download only fails for `gwdd`\n```\nethan@gandalf:~$ retriever download gwdd\n=> Installing Zanne et al. Global wood density database.\nCreating database GWDD...\nCouldn't create database ('NoneType' object has no attribute 'execute'). Trying to continue anyway.\n'table_name'\n```\n\nAnd the file doesn't appear.\n\nFrom `try_install_all.py`:\n\n```\n('download', 'gwdd', KeyError('table_name',))\n```\n\n", "before_files": [{"content": "#retriever\n\"\"\"Retriever script for Zanne et al. Global wood density database.\n\n\"\"\"\nfrom builtins import str\nfrom builtins import range\n\nimport os\nimport sys\nimport xlrd\nfrom retriever.lib.templates import Script\nfrom retriever.lib.models import Table\nfrom retriever.lib.excel import Excel\n\n\n\nclass main(Script):\n def __init__(self, **kwargs):\n Script.__init__(self, **kwargs)\n self.name = \"Zanne et al. Global wood density database.\"\n self.shortname = \"GWDD\"\n self.retriever_minimum_version = '2.0.0-dev'\n self.script_version = '1.0'\n self.urls = {\"GWDD\": \"http://datadryad.org/bitstream/handle/10255/dryad.235/GlobalWoodDensityDatabase.xls?sequence=1\"}\n self.tags = [\"Taxon > Plants\", \"Spatial Scale > Global\",\n \"Data Type > Observational\"]\n self.ref = \"http://datadryad.org/resource/doi:10.5061/dryad.234\"\n self.description = \"A collection and collation of data on the major wood functional traits, including the largest wood density database to date (8412 taxa), mechanical strength measures and anatomical features, as well as clade-specific features such as secondary chemistry.\"\n self.citation = \"Chave J, Coomes DA, Jansen S, Lewis SL, Swenson NG, Zanne AE (2009) Towards a worldwide wood economics spectrum. Ecology Letters 12(4): 351-366. http://dx.doi.org/10.1111/j.1461-0248.2009.01285.x\"\n self.addendum = \"\"\" *Correspondence for updates to the database: [email protected]\n For descriptions of the database, see Chave et al. 2009. Towards a worldwide wood economics spectrum. Ecology Letters. Identifier: http://hdl.handle.net/10255/dryad.234\n\n Below we list the rules of use for the Global wood density database. \n These are developed based on the rules of use for the Glopnet dataset (www.nature.com/nature/journal/v428/n6985/full/nature02403.html) and Cedar Creek LTER and Related Data (http://www.lter.umn.edu/cgi-bin/register). \n If you would like to use the Global wood density database, we request that you:\n 1. Notify the main address of correspondence (Gaby Lopez-Gonzalo) if you plan to use the database in a publication.\n 2. Provide recognition of the efforts of this group in the assembly of the data by using the citation for the database above.\n 3. Recognize that these data were assembled by the group for various analyses and research questions. If any of these uses overlap with your interests, you recognize that group has precedence in addressing these questions.\"\"\"\n\n def download(self, engine=None, debug=False):\n Script.download(self, engine, debug)\n\n self.engine.download_file(self.urls[\"GWDD\"], \"GlobalWoodDensityDatabase.xls\")\n filename = os.path.basename(\"GlobalWoodDensityDatabase.xls\")\n\n book = xlrd.open_workbook(self.engine.format_filename(filename))\n sh = book.sheet_by_index(1)\n rows = sh.nrows\n\n #Creating data table\n lines = []\n for i in range(1, rows):\n row = sh.row(i)\n if not all(Excel.empty_cell(cell) for cell in row):\n this_line = {}\n def format_value(s):\n s = Excel.cell_value(s)\n return str(s).title().replace(\"\\\\\", \"/\").replace('\"', '')\n for num, label in enumerate([\"Number\", \"Family\", \"Binomial\", \"Wood_Density\",\n \"Region\", \"Reference_Number\"]):\n this_line[label] = format_value(row[num])\n lines.append(this_line)\n\n table = Table(\"data\", delimiter=\"\\t\")\n table.columns=[(\"Number\" , (\"pk-int\",) ),\n (\"Family\" , (\"char\",) ),\n (\"Binomial\" , (\"char\",) ),\n (\"Wood_Density\" , (\"double\",) ),\n (\"Region\" , (\"char\",) ),\n (\"Reference_Number\" , (\"int\",) )]\n table.pk = 'Number'\n table.contains_pk = True\n\n gwdd = []\n for line in lines:\n gwdd_data = [line[\"Number\"],\n line[\"Family\"],\n line[\"Binomial\"],\n line[\"Wood_Density\"],\n line[\"Region\"],\n line[\"Reference_Number\"]]\n gwdd.append(gwdd_data)\n\n data = ['\\t'.join(gwdd_line) for gwdd_line in gwdd]\n self.engine.table = table\n self.engine.create_table()\n self.engine.add_to_table(data)\n\n #Creating reference table\n lines = []\n sh = book.sheet_by_index(2)\n rows = sh.nrows\n for i in range(1, rows):\n row = sh.row(i)\n if not all(Excel.empty_cell(cell) for cell in row):\n this_line = {}\n def format_value(s):\n s = Excel.cell_value(s)\n return str(s).title().replace(\"\\\\\", \"/\").replace('\"', '')\n for num, label in enumerate([\"Reference_Number\", \"Reference\"]):\n this_line[label] = format_value(row[num])\n lines.append(this_line)\n\n table = Table(\"reference\", delimiter=\"\\t\")\n table.columns=[(\"Reference_Number\" , (\"pk-int\",) ),\n (\"Reference\" , (\"char\",) )]\n table.pk = 'Reference_Number'\n table.contains_pk = True\n\n gwdd = []\n for line in lines:\n gwdd_ref = [line[\"Reference_Number\"],\n line[\"Reference\"]]\n gwdd.append(gwdd_ref)\n\n data = ['\\t'.join(gwdd_line) for gwdd_line in gwdd]\n self.engine.table = table\n self.engine.create_table()\n self.engine.add_to_table(data)\n \n return self.engine\n\nSCRIPT = main()\n\n", "path": "scripts/gwdd.py"}]}
2,277
92
gh_patches_debug_19263
rasdani/github-patches
git_diff
ytdl-org__youtube-dl-12391
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [fox.com] unable to get season_number and episode_number ## Please follow the guide below - You will be asked some questions and requested to provide some information, please read them **carefully** and answer honestly - Put an `x` into all the boxes [ ] relevant to your *issue* (like that [x]) - Use *Preview* tab to see how your issue will actually look like --- ### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2017.02.21*. If it's not read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected. - [x] I've **verified** and **I assure** that I'm running youtube-dl **2017.02.21** ### Before submitting an *issue* make sure you have: - [x] At least skimmed through [README](https://github.com/rg3/youtube-dl/blob/master/README.md) and **most notably** [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections - [x] [Searched](https://github.com/rg3/youtube-dl/search?type=Issues) the bugtracker for similar issues including closed ones ### What is the purpose of your *issue*? - [ ] Bug report (encountered problems with youtube-dl) - [ ] Site support request (request for adding support for a new site) - [x] Feature request (request for a new functionality) - [ ] Question - [ ] Other --- ### The following sections concretize particular purposed issues, you can erase any section (the contents between triple ---) not applicable to your *issue* --- ### If the purpose of this *issue* is a *bug report*, *site support request* or you are not completely sure provide the full verbose output as follows: Add `-v` flag to **your command line** you run youtube-dl with, copy the **whole** output and insert it here. It should look similar to one below (replace it with **your** log inserted between triple ```): ``` [debug] System config: [] [debug] User config: [] [debug] Custom config: [] [debug] Command-line args: ['--ap-mso', 'DTV', '--ap-username', 'PRIVATE', '--ap-password', 'PRIVATE', '-v', '--get-filename', '-o', '%(season)s s%(season_number)se%(episode_number)s - %(title)s.%(ext)s', 'http://www.fox.com/watch/796751939630/7684301568'] [debug] Encodings: locale cp1252, fs mbcs, out cp437, pref cp1252 [debug] youtube-dl version 2017.02.17 [debug] Python version 3.4.4 - Windows-10-10.0.14393 [debug] exe versions: ffmpeg N-82092-g89ec4ad, ffprobe N-82092-g89ec4ad, rtmpdump 2.4 [debug] Proxy map: {} NA sNAeNA - Chris Has Got a Date, Date, Date, Date, Date.mp4 ``` --- ### If the purpose of this *issue* is a *site support request* please provide all kinds of example URLs support for which should be included (replace following example URLs by **yours**): - Single video: http://www.fox.com/watch/868757571821/7684301568 - Single video: http://www.fox.com/watch/853172291669/7684301568 - Single video: http://www.fox.com/watch/835908163744/7684301568 - Single video: http://www.fox.com/watch/796751939630/7684301568 Note that **youtube-dl does not support sites dedicated to [copyright infringement](https://github.com/rg3/youtube-dl#can-you-add-support-for-this-anime-video-site-or-site-which-shows-current-movies-for-free)**. In order for site support request to be accepted all provided example URLs should not violate any copyrights. --- ### Description of your *issue*, suggested solution and other information Unable to get season, season_number, and episode_number. Output results in NA for each field. I am able to grab the title. The links I provided clearly show the season number and episode number on the web page. I think the info is contained in the following script example from the last link I provided above. ``` <script type="application/ld+json">{"@context":"http:\/\/schema.org","@id":"801550915931","@type":"TVEpisode","name":"Chris Has Got a Date, Date, Date, Date, Date","description":"Taylor Swift agrees to go to a homecoming dance with Chris after he asks her via online video.","url":"http:\/\/www.fox.com\/watch\/796751939630\/7684301568","image":"http:\/\/www.fox.com\/sites\/default\/files\/video-image-thumb\/eacx02_376_03a_0152_hires2_1280x720_801543747530.jpg","episodeNumber":"5","partOfSeason":{"@type":"TVSeason","seasonNumber":"15"},"partOfSeries":{"@type":"TVSeries","name":"Family Guy","sameAs":"http:\/\/www.imdb.com\/title\/tt0182576\/","url":"http:\/\/www.fox.com\/family-guy"},"releasedEvent":{"@type":"PublicationEvent","startDate":"2016-11-06","location":{"@type":"Country","name":"US"}},"potentialAction":[{"@type":"WatchAction","target":["http:\/\/www.fox.com\/watch\/796751939630\/7684301568?cmpid=xtra:-FOX-Goog_Watch","android-app:\/\/com.fox.now\/foxapp\/video_fullepisode\/freewheel\/796751939630?cmpid=xtra:-FOX-Goog_Watch","ios-app:\/\/571096102\/foxapp\/video_fullepisode\/freewheel\/796751939630?cmpid=xtra:-FOX-Goog_Watch"],"expectsAcceptanceOf":[{"@type":"Offer","availabilityStarts":"1969-12-31T19:00:00-05:00","availabilityEnds":"2016-11-14T03:01:00-05:00","category":"externalSubscription","eligibleRegion":[{"@type":"Country","name":"US"},{"@type":"Country","name":"AS"},{"@type":"Country","name":"GU"},{"@type":"Country","name":"PR"},{"@type":"Country","name":"VI"},{"@type":"Country","name":"UM"}]},{"@type":"Offer","availabilityStarts":"2016-11-14T03:01:00-05:00","availabilityEnds":"2017-01-09T02:59:00-05:00","category":"free","eligibleRegion":[{"@type":"Country","name":"US"},{"@type":"Country","name":"AS"},{"@type":"Country","name":"GU"},{"@type":"Country","name":"PR"},{"@type":"Country","name":"VI"},{"@type":"Country","name":"UM"}]},{"@type":"Offer","availabilityStarts":"2017-01-09T02:59:00-05:00","availabilityEnds":"2017-06-29T02:59:00-04:00","category":"externalSubscription","eligibleRegion":[{"@type":"Country","name":"US"},{"@type":"Country","name":"AS"},{"@type":"Country","name":"GU"},{"@type":"Country","name":"PR"},{"@type":"Country","name":"VI"},{"@type":"Country","name":"UM"}]}]},{"@type":"WatchAction","target":[{"@type":"EntryPoint","urlTemplate":"http:\/\/www.fox.com\/watch\/796751939630\/7684301568?cmpid=xtra:-FOX-Goog_Watch","actionPlatform":"http:\/\/schema.org\/DesktopWebPlatform","inLanguage":"en"},{"@type":"EntryPoint","urlTemplate":"android-app:\/\/com.fox.now\/foxapp\/video_fullepisode\/freewheel\/796751939630?extcmp=xtra_FOXNOW_Goog_Watch","actionPlatform":"http:\/\/schema.org\/AndroidPlatform","inLanguage":"en"},{"@type":"EntryPoint","urlTemplate":"http:\/\/www.fox.com\/watch\/796751939630\/7684301568?extcmp=xtra_FOXNOW_Goog_Watch","actionPlatform":"http:\/\/schema.org\/IOSPlatform","inLanguage":"en"}],"expectsAcceptanceOf":[{"@type":"Offer","availabilityStarts":"1969-12-31T19:00:00-05:00","availabilityEnds":"2016-11-14T03:01:00-05:00","category":"externalSubscription","eligibleRegion":[{"@type":"Country","name":"US"},{"@type":"Country","name":"AS"},{"@type":"Country","name":"GU"},{"@type":"Country","name":"PR"},{"@type":"Country","name":"VI"},{"@type":"Country","name":"UM"}]},{"@type":"Offer","availabilityStarts":"2016-11-14T03:01:00-05:00","availabilityEnds":"2017-01-09T02:59:00-05:00","category":"free","eligibleRegion":[{"@type":"Country","name":"US"},{"@type":"Country","name":"AS"},{"@type":"Country","name":"GU"},{"@type":"Country","name":"PR"},{"@type":"Country","name":"VI"},{"@type":"Country","name":"UM"}]},{"@type":"Offer","availabilityStarts":"2017-01-09T02:59:00-05:00","availabilityEnds":"2017-06-29T02:59:00-04:00","category":"externalSubscription","eligibleRegion":[{"@type":"Country","name":"US"},{"@type":"Country","name":"AS"},{"@type":"Country","name":"GU"},{"@type":"Country","name":"PR"},{"@type":"Country","name":"VI"},{"@type":"Country","name":"UM"}]}]}],"duration":"PT21M36S"}</script><meta name="description" content="FULL EPISODE - &quot;Chris Has Got a Date, Date, Date, Date, Date&quot; - Taylor Swift agrees to go to a homecoming dance with Chris after he asks her via online video."/> ``` This issue only occurs on Fox. I do not have this issue with other major networks. </issue> <code> [start of youtube_dl/extractor/fox.py] 1 # coding: utf-8 2 from __future__ import unicode_literals 3 4 from .adobepass import AdobePassIE 5 from ..utils import ( 6 smuggle_url, 7 update_url_query, 8 ) 9 10 11 class FOXIE(AdobePassIE): 12 _VALID_URL = r'https?://(?:www\.)?fox\.com/watch/(?P<id>[0-9]+)' 13 _TEST = { 14 'url': 'http://www.fox.com/watch/255180355939/7684182528', 15 'md5': 'ebd296fcc41dd4b19f8115d8461a3165', 16 'info_dict': { 17 'id': '255180355939', 18 'ext': 'mp4', 19 'title': 'Official Trailer: Gotham', 20 'description': 'Tracing the rise of the great DC Comics Super-Villains and vigilantes, Gotham reveals an entirely new chapter that has never been told.', 21 'duration': 129, 22 'timestamp': 1400020798, 23 'upload_date': '20140513', 24 'uploader': 'NEWA-FNG-FOXCOM', 25 }, 26 'add_ie': ['ThePlatform'], 27 } 28 29 def _real_extract(self, url): 30 video_id = self._match_id(url) 31 webpage = self._download_webpage(url, video_id) 32 33 settings = self._parse_json(self._search_regex( 34 r'jQuery\.extend\(Drupal\.settings\s*,\s*({.+?})\);', 35 webpage, 'drupal settings'), video_id) 36 fox_pdk_player = settings['fox_pdk_player'] 37 release_url = fox_pdk_player['release_url'] 38 query = { 39 'mbr': 'true', 40 'switch': 'http' 41 } 42 if fox_pdk_player.get('access') == 'locked': 43 ap_p = settings['foxAdobePassProvider'] 44 rating = ap_p.get('videoRating') 45 if rating == 'n/a': 46 rating = None 47 resource = self._get_mvpd_resource('fbc-fox', None, ap_p['videoGUID'], rating) 48 query['auth'] = self._extract_mvpd_auth(url, video_id, 'fbc-fox', resource) 49 50 return { 51 '_type': 'url_transparent', 52 'ie_key': 'ThePlatform', 53 'url': smuggle_url(update_url_query(release_url, query), {'force_smil_url': True}), 54 'id': video_id, 55 } 56 [end of youtube_dl/extractor/fox.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/youtube_dl/extractor/fox.py b/youtube_dl/extractor/fox.py --- a/youtube_dl/extractor/fox.py +++ b/youtube_dl/extractor/fox.py @@ -3,6 +3,7 @@ from .adobepass import AdobePassIE from ..utils import ( + int_or_none, smuggle_url, update_url_query, ) @@ -47,9 +48,12 @@ resource = self._get_mvpd_resource('fbc-fox', None, ap_p['videoGUID'], rating) query['auth'] = self._extract_mvpd_auth(url, video_id, 'fbc-fox', resource) - return { + info = self._search_json_ld(webpage, video_id, fatal=False) + info.update({ '_type': 'url_transparent', 'ie_key': 'ThePlatform', 'url': smuggle_url(update_url_query(release_url, query), {'force_smil_url': True}), 'id': video_id, - } + }) + + return info
{"golden_diff": "diff --git a/youtube_dl/extractor/fox.py b/youtube_dl/extractor/fox.py\n--- a/youtube_dl/extractor/fox.py\n+++ b/youtube_dl/extractor/fox.py\n@@ -3,6 +3,7 @@\n \n from .adobepass import AdobePassIE\n from ..utils import (\n+ int_or_none,\n smuggle_url,\n update_url_query,\n )\n@@ -47,9 +48,12 @@\n resource = self._get_mvpd_resource('fbc-fox', None, ap_p['videoGUID'], rating)\n query['auth'] = self._extract_mvpd_auth(url, video_id, 'fbc-fox', resource)\n \n- return {\n+ info = self._search_json_ld(webpage, video_id, fatal=False)\n+ info.update({\n '_type': 'url_transparent',\n 'ie_key': 'ThePlatform',\n 'url': smuggle_url(update_url_query(release_url, query), {'force_smil_url': True}),\n 'id': video_id,\n- }\n+ })\n+\n+ return info\n", "issue": "[fox.com] unable to get season_number and episode_number\n## Please follow the guide below\r\n\r\n- You will be asked some questions and requested to provide some information, please read them **carefully** and answer honestly\r\n- Put an `x` into all the boxes [ ] relevant to your *issue* (like that [x])\r\n- Use *Preview* tab to see how your issue will actually look like\r\n\r\n---\r\n\r\n### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2017.02.21*. If it's not read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.\r\n- [x] I've **verified** and **I assure** that I'm running youtube-dl **2017.02.21**\r\n\r\n### Before submitting an *issue* make sure you have:\r\n- [x] At least skimmed through [README](https://github.com/rg3/youtube-dl/blob/master/README.md) and **most notably** [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections\r\n- [x] [Searched](https://github.com/rg3/youtube-dl/search?type=Issues) the bugtracker for similar issues including closed ones\r\n\r\n### What is the purpose of your *issue*?\r\n- [ ] Bug report (encountered problems with youtube-dl)\r\n- [ ] Site support request (request for adding support for a new site)\r\n- [x] Feature request (request for a new functionality)\r\n- [ ] Question\r\n- [ ] Other\r\n\r\n---\r\n\r\n### The following sections concretize particular purposed issues, you can erase any section (the contents between triple ---) not applicable to your *issue*\r\n\r\n---\r\n\r\n### If the purpose of this *issue* is a *bug report*, *site support request* or you are not completely sure provide the full verbose output as follows:\r\n\r\nAdd `-v` flag to **your command line** you run youtube-dl with, copy the **whole** output and insert it here. It should look similar to one below (replace it with **your** log inserted between triple ```):\r\n```\r\n[debug] System config: []\r\n[debug] User config: []\r\n[debug] Custom config: []\r\n[debug] Command-line args: ['--ap-mso', 'DTV', '--ap-username', 'PRIVATE', '--ap-password', 'PRIVATE', '-v', '--get-filename', '-o', '%(season)s s%(season_number)se%(episode_number)s - %(title)s.%(ext)s', 'http://www.fox.com/watch/796751939630/7684301568']\r\n[debug] Encodings: locale cp1252, fs mbcs, out cp437, pref cp1252\r\n[debug] youtube-dl version 2017.02.17\r\n[debug] Python version 3.4.4 - Windows-10-10.0.14393\r\n[debug] exe versions: ffmpeg N-82092-g89ec4ad, ffprobe N-82092-g89ec4ad, rtmpdump 2.4\r\n[debug] Proxy map: {}\r\nNA sNAeNA - Chris Has Got a Date, Date, Date, Date, Date.mp4\r\n```\r\n\r\n---\r\n\r\n### If the purpose of this *issue* is a *site support request* please provide all kinds of example URLs support for which should be included (replace following example URLs by **yours**):\r\n- Single video: http://www.fox.com/watch/868757571821/7684301568\r\n- Single video: http://www.fox.com/watch/853172291669/7684301568\r\n- Single video: http://www.fox.com/watch/835908163744/7684301568\r\n- Single video: http://www.fox.com/watch/796751939630/7684301568\r\n\r\nNote that **youtube-dl does not support sites dedicated to [copyright infringement](https://github.com/rg3/youtube-dl#can-you-add-support-for-this-anime-video-site-or-site-which-shows-current-movies-for-free)**. In order for site support request to be accepted all provided example URLs should not violate any copyrights.\r\n\r\n\r\n\r\n---\r\n\r\n### Description of your *issue*, suggested solution and other information\r\n\r\nUnable to get season, season_number, and episode_number. Output results in NA for each field. I am able to grab the title. The links I provided clearly show the season number and episode number on the web page. I think the info is contained in the following script example from the last link I provided above. \r\n\r\n```\r\n<script type=\"application/ld+json\">{\"@context\":\"http:\\/\\/schema.org\",\"@id\":\"801550915931\",\"@type\":\"TVEpisode\",\"name\":\"Chris Has Got a Date, Date, Date, Date, Date\",\"description\":\"Taylor Swift agrees to go to a homecoming dance with Chris after he asks her via online video.\",\"url\":\"http:\\/\\/www.fox.com\\/watch\\/796751939630\\/7684301568\",\"image\":\"http:\\/\\/www.fox.com\\/sites\\/default\\/files\\/video-image-thumb\\/eacx02_376_03a_0152_hires2_1280x720_801543747530.jpg\",\"episodeNumber\":\"5\",\"partOfSeason\":{\"@type\":\"TVSeason\",\"seasonNumber\":\"15\"},\"partOfSeries\":{\"@type\":\"TVSeries\",\"name\":\"Family Guy\",\"sameAs\":\"http:\\/\\/www.imdb.com\\/title\\/tt0182576\\/\",\"url\":\"http:\\/\\/www.fox.com\\/family-guy\"},\"releasedEvent\":{\"@type\":\"PublicationEvent\",\"startDate\":\"2016-11-06\",\"location\":{\"@type\":\"Country\",\"name\":\"US\"}},\"potentialAction\":[{\"@type\":\"WatchAction\",\"target\":[\"http:\\/\\/www.fox.com\\/watch\\/796751939630\\/7684301568?cmpid=xtra:-FOX-Goog_Watch\",\"android-app:\\/\\/com.fox.now\\/foxapp\\/video_fullepisode\\/freewheel\\/796751939630?cmpid=xtra:-FOX-Goog_Watch\",\"ios-app:\\/\\/571096102\\/foxapp\\/video_fullepisode\\/freewheel\\/796751939630?cmpid=xtra:-FOX-Goog_Watch\"],\"expectsAcceptanceOf\":[{\"@type\":\"Offer\",\"availabilityStarts\":\"1969-12-31T19:00:00-05:00\",\"availabilityEnds\":\"2016-11-14T03:01:00-05:00\",\"category\":\"externalSubscription\",\"eligibleRegion\":[{\"@type\":\"Country\",\"name\":\"US\"},{\"@type\":\"Country\",\"name\":\"AS\"},{\"@type\":\"Country\",\"name\":\"GU\"},{\"@type\":\"Country\",\"name\":\"PR\"},{\"@type\":\"Country\",\"name\":\"VI\"},{\"@type\":\"Country\",\"name\":\"UM\"}]},{\"@type\":\"Offer\",\"availabilityStarts\":\"2016-11-14T03:01:00-05:00\",\"availabilityEnds\":\"2017-01-09T02:59:00-05:00\",\"category\":\"free\",\"eligibleRegion\":[{\"@type\":\"Country\",\"name\":\"US\"},{\"@type\":\"Country\",\"name\":\"AS\"},{\"@type\":\"Country\",\"name\":\"GU\"},{\"@type\":\"Country\",\"name\":\"PR\"},{\"@type\":\"Country\",\"name\":\"VI\"},{\"@type\":\"Country\",\"name\":\"UM\"}]},{\"@type\":\"Offer\",\"availabilityStarts\":\"2017-01-09T02:59:00-05:00\",\"availabilityEnds\":\"2017-06-29T02:59:00-04:00\",\"category\":\"externalSubscription\",\"eligibleRegion\":[{\"@type\":\"Country\",\"name\":\"US\"},{\"@type\":\"Country\",\"name\":\"AS\"},{\"@type\":\"Country\",\"name\":\"GU\"},{\"@type\":\"Country\",\"name\":\"PR\"},{\"@type\":\"Country\",\"name\":\"VI\"},{\"@type\":\"Country\",\"name\":\"UM\"}]}]},{\"@type\":\"WatchAction\",\"target\":[{\"@type\":\"EntryPoint\",\"urlTemplate\":\"http:\\/\\/www.fox.com\\/watch\\/796751939630\\/7684301568?cmpid=xtra:-FOX-Goog_Watch\",\"actionPlatform\":\"http:\\/\\/schema.org\\/DesktopWebPlatform\",\"inLanguage\":\"en\"},{\"@type\":\"EntryPoint\",\"urlTemplate\":\"android-app:\\/\\/com.fox.now\\/foxapp\\/video_fullepisode\\/freewheel\\/796751939630?extcmp=xtra_FOXNOW_Goog_Watch\",\"actionPlatform\":\"http:\\/\\/schema.org\\/AndroidPlatform\",\"inLanguage\":\"en\"},{\"@type\":\"EntryPoint\",\"urlTemplate\":\"http:\\/\\/www.fox.com\\/watch\\/796751939630\\/7684301568?extcmp=xtra_FOXNOW_Goog_Watch\",\"actionPlatform\":\"http:\\/\\/schema.org\\/IOSPlatform\",\"inLanguage\":\"en\"}],\"expectsAcceptanceOf\":[{\"@type\":\"Offer\",\"availabilityStarts\":\"1969-12-31T19:00:00-05:00\",\"availabilityEnds\":\"2016-11-14T03:01:00-05:00\",\"category\":\"externalSubscription\",\"eligibleRegion\":[{\"@type\":\"Country\",\"name\":\"US\"},{\"@type\":\"Country\",\"name\":\"AS\"},{\"@type\":\"Country\",\"name\":\"GU\"},{\"@type\":\"Country\",\"name\":\"PR\"},{\"@type\":\"Country\",\"name\":\"VI\"},{\"@type\":\"Country\",\"name\":\"UM\"}]},{\"@type\":\"Offer\",\"availabilityStarts\":\"2016-11-14T03:01:00-05:00\",\"availabilityEnds\":\"2017-01-09T02:59:00-05:00\",\"category\":\"free\",\"eligibleRegion\":[{\"@type\":\"Country\",\"name\":\"US\"},{\"@type\":\"Country\",\"name\":\"AS\"},{\"@type\":\"Country\",\"name\":\"GU\"},{\"@type\":\"Country\",\"name\":\"PR\"},{\"@type\":\"Country\",\"name\":\"VI\"},{\"@type\":\"Country\",\"name\":\"UM\"}]},{\"@type\":\"Offer\",\"availabilityStarts\":\"2017-01-09T02:59:00-05:00\",\"availabilityEnds\":\"2017-06-29T02:59:00-04:00\",\"category\":\"externalSubscription\",\"eligibleRegion\":[{\"@type\":\"Country\",\"name\":\"US\"},{\"@type\":\"Country\",\"name\":\"AS\"},{\"@type\":\"Country\",\"name\":\"GU\"},{\"@type\":\"Country\",\"name\":\"PR\"},{\"@type\":\"Country\",\"name\":\"VI\"},{\"@type\":\"Country\",\"name\":\"UM\"}]}]}],\"duration\":\"PT21M36S\"}</script><meta name=\"description\" content=\"FULL EPISODE - &quot;Chris Has Got a Date, Date, Date, Date, Date&quot; - Taylor Swift agrees to go to a homecoming dance with Chris after he asks her via online video.\"/>\r\n```\r\n\r\nThis issue only occurs on Fox. I do not have this issue with other major networks. \n", "before_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nfrom .adobepass import AdobePassIE\nfrom ..utils import (\n smuggle_url,\n update_url_query,\n)\n\n\nclass FOXIE(AdobePassIE):\n _VALID_URL = r'https?://(?:www\\.)?fox\\.com/watch/(?P<id>[0-9]+)'\n _TEST = {\n 'url': 'http://www.fox.com/watch/255180355939/7684182528',\n 'md5': 'ebd296fcc41dd4b19f8115d8461a3165',\n 'info_dict': {\n 'id': '255180355939',\n 'ext': 'mp4',\n 'title': 'Official Trailer: Gotham',\n 'description': 'Tracing the rise of the great DC Comics Super-Villains and vigilantes, Gotham reveals an entirely new chapter that has never been told.',\n 'duration': 129,\n 'timestamp': 1400020798,\n 'upload_date': '20140513',\n 'uploader': 'NEWA-FNG-FOXCOM',\n },\n 'add_ie': ['ThePlatform'],\n }\n\n def _real_extract(self, url):\n video_id = self._match_id(url)\n webpage = self._download_webpage(url, video_id)\n\n settings = self._parse_json(self._search_regex(\n r'jQuery\\.extend\\(Drupal\\.settings\\s*,\\s*({.+?})\\);',\n webpage, 'drupal settings'), video_id)\n fox_pdk_player = settings['fox_pdk_player']\n release_url = fox_pdk_player['release_url']\n query = {\n 'mbr': 'true',\n 'switch': 'http'\n }\n if fox_pdk_player.get('access') == 'locked':\n ap_p = settings['foxAdobePassProvider']\n rating = ap_p.get('videoRating')\n if rating == 'n/a':\n rating = None\n resource = self._get_mvpd_resource('fbc-fox', None, ap_p['videoGUID'], rating)\n query['auth'] = self._extract_mvpd_auth(url, video_id, 'fbc-fox', resource)\n\n return {\n '_type': 'url_transparent',\n 'ie_key': 'ThePlatform',\n 'url': smuggle_url(update_url_query(release_url, query), {'force_smil_url': True}),\n 'id': video_id,\n }\n", "path": "youtube_dl/extractor/fox.py"}]}
3,876
246
gh_patches_debug_2584
rasdani/github-patches
git_diff
kivy__python-for-android-2180
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Issues introduced by PR #2113 (SDL2) As said on Discord #dev channel yesterday, PR #2113 introduces a lot of blocking issues. These are the results of the tests done by me, @AndreMiras and @opacam : - `sdl2==2.0.10` have issues that have been solved by the SDL2 team, so it needs to be bumped to `2.0.12`. - `sdl2==2.0.12` works but create freezes during runtime. - These freezes are definitely related to the new `SDL_LockMutex` / `SDL_UnlockMutex` mechanism they added for concurrency issues. - Commenting `SDL_LockMutex` on `Touch` related events fixes the freeze issue for non-fullscreen apps. - On fullscreen apps, the patch it's also needed on `Resize, .. etc` events. I'm providing an attached patch that fixes the issues on top of `2.0.12`, btw seems not a good idea to do that, so it needs some more investigation: [disable_mutex.txt](https://github.com/kivy/python-for-android/files/4569870/disable_mutex.txt) </issue> <code> [start of pythonforandroid/recipes/sdl2/__init__.py] 1 from pythonforandroid.recipe import BootstrapNDKRecipe 2 from pythonforandroid.toolchain import current_directory, shprint 3 import sh 4 5 6 class LibSDL2Recipe(BootstrapNDKRecipe): 7 version = "2.0.10" 8 url = "https://www.libsdl.org/release/SDL2-{version}.zip" 9 md5sum = "6b2e9a4a2faba4ff277062cf669724f4" 10 11 dir_name = 'SDL' 12 13 depends = ['sdl2_image', 'sdl2_mixer', 'sdl2_ttf'] 14 15 def get_recipe_env(self, arch=None, with_flags_in_cc=True, with_python=True): 16 env = super().get_recipe_env( 17 arch=arch, with_flags_in_cc=with_flags_in_cc, with_python=with_python) 18 env['APP_ALLOW_MISSING_DEPS'] = 'true' 19 return env 20 21 def build_arch(self, arch): 22 env = self.get_recipe_env(arch) 23 24 with current_directory(self.get_jni_dir()): 25 shprint( 26 sh.ndk_build, 27 "V=1", 28 "NDK_DEBUG=" + ("1" if self.ctx.build_as_debuggable else "0"), 29 _env=env 30 ) 31 32 33 recipe = LibSDL2Recipe() 34 [end of pythonforandroid/recipes/sdl2/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pythonforandroid/recipes/sdl2/__init__.py b/pythonforandroid/recipes/sdl2/__init__.py --- a/pythonforandroid/recipes/sdl2/__init__.py +++ b/pythonforandroid/recipes/sdl2/__init__.py @@ -4,9 +4,9 @@ class LibSDL2Recipe(BootstrapNDKRecipe): - version = "2.0.10" - url = "https://www.libsdl.org/release/SDL2-{version}.zip" - md5sum = "6b2e9a4a2faba4ff277062cf669724f4" + version = "2.0.9" + url = "https://www.libsdl.org/release/SDL2-{version}.tar.gz" + md5sum = 'f2ecfba915c54f7200f504d8b48a5dfe' dir_name = 'SDL'
{"golden_diff": "diff --git a/pythonforandroid/recipes/sdl2/__init__.py b/pythonforandroid/recipes/sdl2/__init__.py\n--- a/pythonforandroid/recipes/sdl2/__init__.py\n+++ b/pythonforandroid/recipes/sdl2/__init__.py\n@@ -4,9 +4,9 @@\n \n \n class LibSDL2Recipe(BootstrapNDKRecipe):\n- version = \"2.0.10\"\n- url = \"https://www.libsdl.org/release/SDL2-{version}.zip\"\n- md5sum = \"6b2e9a4a2faba4ff277062cf669724f4\"\n+ version = \"2.0.9\"\n+ url = \"https://www.libsdl.org/release/SDL2-{version}.tar.gz\"\n+ md5sum = 'f2ecfba915c54f7200f504d8b48a5dfe'\n \n dir_name = 'SDL'\n", "issue": "Issues introduced by PR #2113 (SDL2)\nAs said on Discord #dev channel yesterday, PR #2113 introduces a lot of blocking issues.\r\n\r\nThese are the results of the tests done by me, @AndreMiras and @opacam :\r\n\r\n- `sdl2==2.0.10` have issues that have been solved by the SDL2 team, so it needs to be bumped to `2.0.12`.\r\n\r\n- `sdl2==2.0.12` works but create freezes during runtime.\r\n\r\n- These freezes are definitely related to the new `SDL_LockMutex` / `SDL_UnlockMutex` mechanism they added for concurrency issues.\r\n\r\n- Commenting `SDL_LockMutex` on `Touch` related events fixes the freeze issue for non-fullscreen apps.\r\n\r\n- On fullscreen apps, the patch it's also needed on `Resize, .. etc` events.\r\n\r\nI'm providing an attached patch that fixes the issues on top of `2.0.12`, btw seems not a good idea to do that, so it needs some more investigation:\r\n\r\n[disable_mutex.txt](https://github.com/kivy/python-for-android/files/4569870/disable_mutex.txt)\r\n\r\n\n", "before_files": [{"content": "from pythonforandroid.recipe import BootstrapNDKRecipe\nfrom pythonforandroid.toolchain import current_directory, shprint\nimport sh\n\n\nclass LibSDL2Recipe(BootstrapNDKRecipe):\n version = \"2.0.10\"\n url = \"https://www.libsdl.org/release/SDL2-{version}.zip\"\n md5sum = \"6b2e9a4a2faba4ff277062cf669724f4\"\n\n dir_name = 'SDL'\n\n depends = ['sdl2_image', 'sdl2_mixer', 'sdl2_ttf']\n\n def get_recipe_env(self, arch=None, with_flags_in_cc=True, with_python=True):\n env = super().get_recipe_env(\n arch=arch, with_flags_in_cc=with_flags_in_cc, with_python=with_python)\n env['APP_ALLOW_MISSING_DEPS'] = 'true'\n return env\n\n def build_arch(self, arch):\n env = self.get_recipe_env(arch)\n\n with current_directory(self.get_jni_dir()):\n shprint(\n sh.ndk_build,\n \"V=1\",\n \"NDK_DEBUG=\" + (\"1\" if self.ctx.build_as_debuggable else \"0\"),\n _env=env\n )\n\n\nrecipe = LibSDL2Recipe()\n", "path": "pythonforandroid/recipes/sdl2/__init__.py"}]}
1,156
227
gh_patches_debug_1952
rasdani/github-patches
git_diff
docker__docker-py-1709
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> .dockerignore does not work with patterns begin with slash docker version: ``` docker -v Docker version 17.03.1-ce, build c6d412e ``` reproduce: ``` mkdir app cd app mkdir foo touch foo/bar echo '/foo/bar' > .dockerignore printf 'FROM alpine:3.1\nWORKDIR /app\nCOPY . .\n' > Dockerfile docker build -t app . docker run --rm app find foo ``` output: ``` foo foo/bar ``` It seems the statement from [the official document](https://docs.docker.com/engine/reference/builder/#dockerignore-file) below is not correct: > For example, the patterns `/foo/bar` and `foo/bar` both exclude a file or directory named `bar` in the `foo` subdirectory of `PATH` or in the root of the git repository located at `URL`. We should either amend the document or fix the bug. </issue> <code> [start of docker/utils/build.py] 1 import os 2 3 from ..constants import IS_WINDOWS_PLATFORM 4 from .fnmatch import fnmatch 5 from .utils import create_archive 6 7 8 def tar(path, exclude=None, dockerfile=None, fileobj=None, gzip=False): 9 root = os.path.abspath(path) 10 exclude = exclude or [] 11 12 return create_archive( 13 files=sorted(exclude_paths(root, exclude, dockerfile=dockerfile)), 14 root=root, fileobj=fileobj, gzip=gzip 15 ) 16 17 18 def exclude_paths(root, patterns, dockerfile=None): 19 """ 20 Given a root directory path and a list of .dockerignore patterns, return 21 an iterator of all paths (both regular files and directories) in the root 22 directory that do *not* match any of the patterns. 23 24 All paths returned are relative to the root. 25 """ 26 if dockerfile is None: 27 dockerfile = 'Dockerfile' 28 29 exceptions = [p for p in patterns if p.startswith('!')] 30 31 include_patterns = [p[1:] for p in exceptions] 32 include_patterns += [dockerfile, '.dockerignore'] 33 34 exclude_patterns = list(set(patterns) - set(exceptions)) 35 36 paths = get_paths(root, exclude_patterns, include_patterns, 37 has_exceptions=len(exceptions) > 0) 38 39 return set(paths).union( 40 # If the Dockerfile is in a subdirectory that is excluded, get_paths 41 # will not descend into it and the file will be skipped. This ensures 42 # it doesn't happen. 43 set([dockerfile.replace('/', os.path.sep)]) 44 if os.path.exists(os.path.join(root, dockerfile)) else set() 45 ) 46 47 48 def should_include(path, exclude_patterns, include_patterns): 49 """ 50 Given a path, a list of exclude patterns, and a list of inclusion patterns: 51 52 1. Returns True if the path doesn't match any exclusion pattern 53 2. Returns False if the path matches an exclusion pattern and doesn't match 54 an inclusion pattern 55 3. Returns true if the path matches an exclusion pattern and matches an 56 inclusion pattern 57 """ 58 for pattern in exclude_patterns: 59 if match_path(path, pattern): 60 for pattern in include_patterns: 61 if match_path(path, pattern): 62 return True 63 return False 64 return True 65 66 67 def should_check_directory(directory_path, exclude_patterns, include_patterns): 68 """ 69 Given a directory path, a list of exclude patterns, and a list of inclusion 70 patterns: 71 72 1. Returns True if the directory path should be included according to 73 should_include. 74 2. Returns True if the directory path is the prefix for an inclusion 75 pattern 76 3. Returns False otherwise 77 """ 78 79 # To account for exception rules, check directories if their path is a 80 # a prefix to an inclusion pattern. This logic conforms with the current 81 # docker logic (2016-10-27): 82 # https://github.com/docker/docker/blob/bc52939b0455116ab8e0da67869ec81c1a1c3e2c/pkg/archive/archive.go#L640-L671 83 84 def normalize_path(path): 85 return path.replace(os.path.sep, '/') 86 87 path_with_slash = normalize_path(directory_path) + '/' 88 possible_child_patterns = [ 89 pattern for pattern in map(normalize_path, include_patterns) 90 if (pattern + '/').startswith(path_with_slash) 91 ] 92 directory_included = should_include( 93 directory_path, exclude_patterns, include_patterns 94 ) 95 return directory_included or len(possible_child_patterns) > 0 96 97 98 def get_paths(root, exclude_patterns, include_patterns, has_exceptions=False): 99 paths = [] 100 101 for parent, dirs, files in os.walk(root, topdown=True, followlinks=False): 102 parent = os.path.relpath(parent, root) 103 if parent == '.': 104 parent = '' 105 106 # Remove excluded patterns from the list of directories to traverse 107 # by mutating the dirs we're iterating over. 108 # This looks strange, but is considered the correct way to skip 109 # traversal. See https://docs.python.org/2/library/os.html#os.walk 110 dirs[:] = [ 111 d for d in dirs if should_check_directory( 112 os.path.join(parent, d), exclude_patterns, include_patterns 113 ) 114 ] 115 116 for path in dirs: 117 if should_include(os.path.join(parent, path), 118 exclude_patterns, include_patterns): 119 paths.append(os.path.join(parent, path)) 120 121 for path in files: 122 if should_include(os.path.join(parent, path), 123 exclude_patterns, include_patterns): 124 paths.append(os.path.join(parent, path)) 125 126 return paths 127 128 129 def match_path(path, pattern): 130 pattern = pattern.rstrip('/' + os.path.sep) 131 if pattern: 132 pattern = os.path.relpath(pattern) 133 134 pattern_components = pattern.split(os.path.sep) 135 if len(pattern_components) == 1 and IS_WINDOWS_PLATFORM: 136 pattern_components = pattern.split('/') 137 138 if '**' not in pattern: 139 path_components = path.split(os.path.sep)[:len(pattern_components)] 140 else: 141 path_components = path.split(os.path.sep) 142 return fnmatch('/'.join(path_components), '/'.join(pattern_components)) 143 [end of docker/utils/build.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/docker/utils/build.py b/docker/utils/build.py --- a/docker/utils/build.py +++ b/docker/utils/build.py @@ -26,6 +26,7 @@ if dockerfile is None: dockerfile = 'Dockerfile' + patterns = [p.lstrip('/') for p in patterns] exceptions = [p for p in patterns if p.startswith('!')] include_patterns = [p[1:] for p in exceptions]
{"golden_diff": "diff --git a/docker/utils/build.py b/docker/utils/build.py\n--- a/docker/utils/build.py\n+++ b/docker/utils/build.py\n@@ -26,6 +26,7 @@\n if dockerfile is None:\n dockerfile = 'Dockerfile'\n \n+ patterns = [p.lstrip('/') for p in patterns]\n exceptions = [p for p in patterns if p.startswith('!')]\n \n include_patterns = [p[1:] for p in exceptions]\n", "issue": ".dockerignore does not work with patterns begin with slash\ndocker version:\r\n\r\n```\r\ndocker -v\r\nDocker version 17.03.1-ce, build c6d412e\r\n```\r\nreproduce:\r\n\r\n```\r\nmkdir app\r\ncd app\r\nmkdir foo\r\ntouch foo/bar\r\necho '/foo/bar' > .dockerignore\r\nprintf 'FROM alpine:3.1\\nWORKDIR /app\\nCOPY . .\\n' > Dockerfile\r\ndocker build -t app .\r\ndocker run --rm app find foo\r\n```\r\n\r\noutput:\r\n\r\n```\r\nfoo\r\nfoo/bar\r\n```\r\n\r\nIt seems the statement from [the official document](https://docs.docker.com/engine/reference/builder/#dockerignore-file) below is not correct:\r\n\r\n> For example, the patterns `/foo/bar` and `foo/bar` both exclude a file or directory named `bar` in the `foo` subdirectory of `PATH` or in the root of the git repository located at `URL`.\r\n\r\nWe should either amend the document or fix the bug.\r\n\n", "before_files": [{"content": "import os\n\nfrom ..constants import IS_WINDOWS_PLATFORM\nfrom .fnmatch import fnmatch\nfrom .utils import create_archive\n\n\ndef tar(path, exclude=None, dockerfile=None, fileobj=None, gzip=False):\n root = os.path.abspath(path)\n exclude = exclude or []\n\n return create_archive(\n files=sorted(exclude_paths(root, exclude, dockerfile=dockerfile)),\n root=root, fileobj=fileobj, gzip=gzip\n )\n\n\ndef exclude_paths(root, patterns, dockerfile=None):\n \"\"\"\n Given a root directory path and a list of .dockerignore patterns, return\n an iterator of all paths (both regular files and directories) in the root\n directory that do *not* match any of the patterns.\n\n All paths returned are relative to the root.\n \"\"\"\n if dockerfile is None:\n dockerfile = 'Dockerfile'\n\n exceptions = [p for p in patterns if p.startswith('!')]\n\n include_patterns = [p[1:] for p in exceptions]\n include_patterns += [dockerfile, '.dockerignore']\n\n exclude_patterns = list(set(patterns) - set(exceptions))\n\n paths = get_paths(root, exclude_patterns, include_patterns,\n has_exceptions=len(exceptions) > 0)\n\n return set(paths).union(\n # If the Dockerfile is in a subdirectory that is excluded, get_paths\n # will not descend into it and the file will be skipped. This ensures\n # it doesn't happen.\n set([dockerfile.replace('/', os.path.sep)])\n if os.path.exists(os.path.join(root, dockerfile)) else set()\n )\n\n\ndef should_include(path, exclude_patterns, include_patterns):\n \"\"\"\n Given a path, a list of exclude patterns, and a list of inclusion patterns:\n\n 1. Returns True if the path doesn't match any exclusion pattern\n 2. Returns False if the path matches an exclusion pattern and doesn't match\n an inclusion pattern\n 3. Returns true if the path matches an exclusion pattern and matches an\n inclusion pattern\n \"\"\"\n for pattern in exclude_patterns:\n if match_path(path, pattern):\n for pattern in include_patterns:\n if match_path(path, pattern):\n return True\n return False\n return True\n\n\ndef should_check_directory(directory_path, exclude_patterns, include_patterns):\n \"\"\"\n Given a directory path, a list of exclude patterns, and a list of inclusion\n patterns:\n\n 1. Returns True if the directory path should be included according to\n should_include.\n 2. Returns True if the directory path is the prefix for an inclusion\n pattern\n 3. Returns False otherwise\n \"\"\"\n\n # To account for exception rules, check directories if their path is a\n # a prefix to an inclusion pattern. This logic conforms with the current\n # docker logic (2016-10-27):\n # https://github.com/docker/docker/blob/bc52939b0455116ab8e0da67869ec81c1a1c3e2c/pkg/archive/archive.go#L640-L671\n\n def normalize_path(path):\n return path.replace(os.path.sep, '/')\n\n path_with_slash = normalize_path(directory_path) + '/'\n possible_child_patterns = [\n pattern for pattern in map(normalize_path, include_patterns)\n if (pattern + '/').startswith(path_with_slash)\n ]\n directory_included = should_include(\n directory_path, exclude_patterns, include_patterns\n )\n return directory_included or len(possible_child_patterns) > 0\n\n\ndef get_paths(root, exclude_patterns, include_patterns, has_exceptions=False):\n paths = []\n\n for parent, dirs, files in os.walk(root, topdown=True, followlinks=False):\n parent = os.path.relpath(parent, root)\n if parent == '.':\n parent = ''\n\n # Remove excluded patterns from the list of directories to traverse\n # by mutating the dirs we're iterating over.\n # This looks strange, but is considered the correct way to skip\n # traversal. See https://docs.python.org/2/library/os.html#os.walk\n dirs[:] = [\n d for d in dirs if should_check_directory(\n os.path.join(parent, d), exclude_patterns, include_patterns\n )\n ]\n\n for path in dirs:\n if should_include(os.path.join(parent, path),\n exclude_patterns, include_patterns):\n paths.append(os.path.join(parent, path))\n\n for path in files:\n if should_include(os.path.join(parent, path),\n exclude_patterns, include_patterns):\n paths.append(os.path.join(parent, path))\n\n return paths\n\n\ndef match_path(path, pattern):\n pattern = pattern.rstrip('/' + os.path.sep)\n if pattern:\n pattern = os.path.relpath(pattern)\n\n pattern_components = pattern.split(os.path.sep)\n if len(pattern_components) == 1 and IS_WINDOWS_PLATFORM:\n pattern_components = pattern.split('/')\n\n if '**' not in pattern:\n path_components = path.split(os.path.sep)[:len(pattern_components)]\n else:\n path_components = path.split(os.path.sep)\n return fnmatch('/'.join(path_components), '/'.join(pattern_components))\n", "path": "docker/utils/build.py"}]}
2,208
101
gh_patches_debug_16086
rasdani/github-patches
git_diff
nvaccess__nvda-13382
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Recent alpha versions crash Notepad++ versions 8.2.x and below <!-- Please read the text in this edit field before filling it in. Please thoroughly read NVDA's wiki article on how to fill in this template, including how to provide the required files. Issues may be closed if the required information is not present. https://github.com/nvaccess/nvda/blob/master/devDocs/githubIssueTemplateExplanationAndExamples.md Please also note that the NVDA project has a Citizen and Contributor Code of Conduct which can be found at https://github.com/nvaccess/nvda/blob/master/CODE_OF_CONDUCT.md. NV Access expects that all contributors and other community members read and abide by the rules set out in this document while participating or contributing to this project. This includes creating or commenting on issues and pull requests. Each of the questions and sections below start with multiple hash symbols (#). Place your answers and information on the blank line below each question. --> ### Steps to reproduce: 1. Use NVDA alpha version nvda_snapshot_alpha-24771,a1a6598b.exe or above. 2. Open Notepad++ v8.1.x or v8.2.x 64-bit. 3.Press up/down a few times. ### Actual behavior: Notepad++ crashes. No error message is printed in NVDA log. ### Expected behavior: Notepad++ should not crash. ### System configuration #### NVDA installed/portable/running from source: Portable alpha nvda_snapshot_alpha-24771,a1a6598b.exe #### NVDA version: nvda_snapshot_alpha-24771,a1a6598b.exe #### Windows version: Reproduced on both Windows 10 and 11 64-bit. #### Name and version of other software in use when reproducing the issue: Reproduced using either of these two versions (64-bit in both cases): - Notepad++ v8.1.9.3: https://github.com/notepad-plus-plus/notepad-plus-plus/releases/download/v8.1.9.3/npp.8.1.9.3.portable.x64.zip - Notepad++ v8.2.1: https://github.com/notepad-plus-plus/notepad-plus-plus/releases/download/v8.2.1/npp.8.2.1.portable.x64.zip #### Other information about your system: N/A ### Other questions #### Does the issue still occur after restarting your computer? Yes #### Have you tried any other versions of NVDA? If so, please report their behaviors. Identified that commit a1a6598b19f71bd108bef64373dd35c89c14cd4a is causing this, therefore cc: @lukaszgo1. #### If NVDA add-ons are disabled, is your problem still occurring? Yes #### Does the issue still occur after you run the COM Registration Fixing Tool in NVDA's tools menu? Yes </issue> <code> [start of source/appModules/notepadPlusPlus.py] 1 # A part of NonVisual Desktop Access (NVDA) 2 # Copyright (C) 2022 NV Access Limited, Łukasz Golonka 3 # This file may be used under the terms of the GNU General Public License, version 2 or later. 4 # For more details see: https://www.gnu.org/licenses/gpl-2.0.html 5 6 """AppModule for Notepad++. 7 Do not rename! The executable file for Notepad++ is named `notepad++` and `+` is not a valid character 8 in Python's import statements. 9 This module is mapped to the right binary separately 10 and the current name makes it possible to expose it from `nvdaBuiltin` for add-on developers. 11 """ 12 13 import ctypes 14 15 import appModuleHandler 16 import NVDAObjects.window.scintilla as ScintillaBase 17 18 19 class CharacterRangeStructLongLong(ctypes.Structure): 20 """By default character ranges in Scintilla are represented by longs. 21 However long is not big enough for files over 2 GB, 22 therefore in 64-bit builds of Notepad++ 8.3 and later 23 these ranges are represented by longlong. 24 """ 25 _fields_ = [ 26 ('cpMin', ctypes.c_longlong), 27 ('cpMax', ctypes.c_longlong), 28 ] 29 30 31 class ScintillaTextInfoNpp83(ScintillaBase.ScintillaTextInfo): 32 """Text info for 64-bit builds of Notepad++ 8.3 and later. 33 """ 34 35 class TextRangeStruct(ctypes.Structure): 36 _fields_ = [ 37 ('chrg', CharacterRangeStructLongLong), 38 ('lpstrText', ctypes.c_char_p), 39 ] 40 41 42 class NppEdit(ScintillaBase.Scintilla): 43 44 name = None # The name of the editor is not useful. 45 46 def _get_TextInfo(self): 47 if self.appModule.is64BitProcess: 48 appVerMajor, appVerMinor, *__ = self.appModule.productVersion.split(".") 49 if int(appVerMajor) >= 8 and int(appVerMinor) >= 3: 50 return ScintillaTextInfoNpp83 51 return super().TextInfo 52 53 54 class AppModule(appModuleHandler.AppModule): 55 56 def chooseNVDAObjectOverlayClasses(self, obj, clsList): 57 if obj.windowClassName == "Scintilla" and obj.windowControlID == 0: 58 clsList.insert(0, NppEdit) 59 [end of source/appModules/notepadPlusPlus.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/source/appModules/notepadPlusPlus.py b/source/appModules/notepadPlusPlus.py --- a/source/appModules/notepadPlusPlus.py +++ b/source/appModules/notepadPlusPlus.py @@ -46,7 +46,12 @@ def _get_TextInfo(self): if self.appModule.is64BitProcess: appVerMajor, appVerMinor, *__ = self.appModule.productVersion.split(".") - if int(appVerMajor) >= 8 and int(appVerMinor) >= 3: + # When retrieving the version, Notepad++ concatenates + # minor, patch, build in major.minor.patch.build to the form of major.minor + # https://github.com/notepad-plus-plus/npp-usermanual/blob/master/content/docs/plugin-communication.md#nppm_getnppversion + # e.g. '8.3' for '8.3', '8.21' for '8.2.1' and '8.192' for '8.1.9.2'. + # Therefore, only use the first digit of the minor version to match against version 8.3 or later. + if int(appVerMajor) >= 8 and int(appVerMinor[0]) >= 3: return ScintillaTextInfoNpp83 return super().TextInfo
{"golden_diff": "diff --git a/source/appModules/notepadPlusPlus.py b/source/appModules/notepadPlusPlus.py\n--- a/source/appModules/notepadPlusPlus.py\n+++ b/source/appModules/notepadPlusPlus.py\n@@ -46,7 +46,12 @@\n \tdef _get_TextInfo(self):\r\n \t\tif self.appModule.is64BitProcess:\r\n \t\t\tappVerMajor, appVerMinor, *__ = self.appModule.productVersion.split(\".\")\r\n-\t\t\tif int(appVerMajor) >= 8 and int(appVerMinor) >= 3:\r\n+\t\t\t# When retrieving the version, Notepad++ concatenates\r\n+\t\t\t# minor, patch, build in major.minor.patch.build to the form of major.minor\r\n+\t\t\t# https://github.com/notepad-plus-plus/npp-usermanual/blob/master/content/docs/plugin-communication.md#nppm_getnppversion\r\n+\t\t\t# e.g. '8.3' for '8.3', '8.21' for '8.2.1' and '8.192' for '8.1.9.2'.\r\n+\t\t\t# Therefore, only use the first digit of the minor version to match against version 8.3 or later.\r\n+\t\t\tif int(appVerMajor) >= 8 and int(appVerMinor[0]) >= 3:\r\n \t\t\t\treturn ScintillaTextInfoNpp83\r\n \t\treturn super().TextInfo\n", "issue": "Recent alpha versions crash Notepad++ versions 8.2.x and below\n<!-- Please read the text in this edit field before filling it in.\r\nPlease thoroughly read NVDA's wiki article on how to fill in this template, including how to provide the required files.\r\nIssues may be closed if the required information is not present.\r\nhttps://github.com/nvaccess/nvda/blob/master/devDocs/githubIssueTemplateExplanationAndExamples.md\r\nPlease also note that the NVDA project has a Citizen and Contributor Code of Conduct which can be found at https://github.com/nvaccess/nvda/blob/master/CODE_OF_CONDUCT.md. NV Access expects that all contributors and other community members read and abide by the rules set out in this document while participating or contributing to this project. This includes creating or commenting on issues and pull requests. \r\n\r\nEach of the questions and sections below start with multiple hash symbols (#). Place your answers and information on the blank line below each question.\r\n-->\r\n\r\n### Steps to reproduce:\r\n1. Use NVDA alpha version nvda_snapshot_alpha-24771,a1a6598b.exe or above.\r\n2. Open Notepad++ v8.1.x or v8.2.x 64-bit.\r\n3.Press up/down a few times.\r\n\r\n### Actual behavior:\r\nNotepad++ crashes. No error message is printed in NVDA log.\r\n### Expected behavior:\r\nNotepad++ should not crash.\r\n### System configuration\r\n#### NVDA installed/portable/running from source:\r\nPortable alpha nvda_snapshot_alpha-24771,a1a6598b.exe\r\n#### NVDA version:\r\nnvda_snapshot_alpha-24771,a1a6598b.exe\r\n\r\n#### Windows version:\r\nReproduced on both Windows 10 and 11 64-bit.\r\n#### Name and version of other software in use when reproducing the issue:\r\nReproduced using either of these two versions (64-bit in both cases):\r\n- Notepad++ v8.1.9.3: https://github.com/notepad-plus-plus/notepad-plus-plus/releases/download/v8.1.9.3/npp.8.1.9.3.portable.x64.zip\r\n- Notepad++ v8.2.1: https://github.com/notepad-plus-plus/notepad-plus-plus/releases/download/v8.2.1/npp.8.2.1.portable.x64.zip\r\n#### Other information about your system:\r\nN/A\r\n### Other questions\r\n#### Does the issue still occur after restarting your computer?\r\nYes\r\n#### Have you tried any other versions of NVDA? If so, please report their behaviors.\r\nIdentified that commit a1a6598b19f71bd108bef64373dd35c89c14cd4a is causing this, therefore cc: @lukaszgo1.\r\n#### If NVDA add-ons are disabled, is your problem still occurring?\r\nYes\r\n#### Does the issue still occur after you run the COM Registration Fixing Tool in NVDA's tools menu?\r\nYes\n", "before_files": [{"content": "# A part of NonVisual Desktop Access (NVDA)\r\n# Copyright (C) 2022 NV Access Limited, \u0141ukasz Golonka\r\n# This file may be used under the terms of the GNU General Public License, version 2 or later.\r\n# For more details see: https://www.gnu.org/licenses/gpl-2.0.html\r\n\r\n\"\"\"AppModule for Notepad++.\r\nDo not rename! The executable file for Notepad++ is named `notepad++` and `+` is not a valid character\r\nin Python's import statements.\r\nThis module is mapped to the right binary separately\r\nand the current name makes it possible to expose it from `nvdaBuiltin` for add-on developers.\r\n\"\"\"\r\n\r\nimport ctypes\r\n\r\nimport appModuleHandler\r\nimport NVDAObjects.window.scintilla as ScintillaBase\r\n\r\n\r\nclass CharacterRangeStructLongLong(ctypes.Structure):\r\n\t\"\"\"By default character ranges in Scintilla are represented by longs.\r\n\tHowever long is not big enough for files over 2 GB,\r\n\ttherefore in 64-bit builds of Notepad++ 8.3 and later\r\n\tthese ranges are represented by longlong.\r\n\t\"\"\"\r\n\t_fields_ = [\r\n\t\t('cpMin', ctypes.c_longlong),\r\n\t\t('cpMax', ctypes.c_longlong),\r\n\t]\r\n\r\n\r\nclass ScintillaTextInfoNpp83(ScintillaBase.ScintillaTextInfo):\r\n\t\"\"\"Text info for 64-bit builds of Notepad++ 8.3 and later.\r\n\t\"\"\"\r\n\r\n\tclass TextRangeStruct(ctypes.Structure):\r\n\t\t_fields_ = [\r\n\t\t\t('chrg', CharacterRangeStructLongLong),\r\n\t\t\t('lpstrText', ctypes.c_char_p),\r\n\t\t]\r\n\r\n\r\nclass NppEdit(ScintillaBase.Scintilla):\r\n\r\n\tname = None # The name of the editor is not useful.\r\n\r\n\tdef _get_TextInfo(self):\r\n\t\tif self.appModule.is64BitProcess:\r\n\t\t\tappVerMajor, appVerMinor, *__ = self.appModule.productVersion.split(\".\")\r\n\t\t\tif int(appVerMajor) >= 8 and int(appVerMinor) >= 3:\r\n\t\t\t\treturn ScintillaTextInfoNpp83\r\n\t\treturn super().TextInfo\r\n\r\n\r\nclass AppModule(appModuleHandler.AppModule):\r\n\r\n\tdef chooseNVDAObjectOverlayClasses(self, obj, clsList):\r\n\t\tif obj.windowClassName == \"Scintilla\" and obj.windowControlID == 0:\r\n\t\t\tclsList.insert(0, NppEdit)\r\n", "path": "source/appModules/notepadPlusPlus.py"}]}
1,840
306
gh_patches_debug_31171
rasdani/github-patches
git_diff
spack__spack-36657
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Invalid flag with apple m2 cpu and apple-clang (clang-1400.0.29.202) On a system as described in the title, the following flag in not recognized and causes compilation failures: https://github.com/spack/spack/blob/b2c806f6fcd73ae624582c84a177e465a7877cf7/lib/spack/external/archspec/json/cpu/microarchitectures.json#L2794 Replacing with `-mcpu=apple-m1` seems to fix the issue. There seems to be no flag specific to m2 cpu. </issue> <code> [start of lib/spack/external/archspec/cli.py] 1 # Copyright 2019-2020 Lawrence Livermore National Security, LLC and other 2 # Archspec Project Developers. See the top-level COPYRIGHT file for details. 3 # 4 # SPDX-License-Identifier: (Apache-2.0 OR MIT) 5 """ 6 archspec command line interface 7 """ 8 9 import click 10 11 import archspec 12 import archspec.cpu 13 14 15 @click.group(name="archspec") 16 @click.version_option(version=archspec.__version__) 17 def main(): 18 """archspec command line interface""" 19 20 21 @main.command() 22 def cpu(): 23 """archspec command line interface for CPU""" 24 click.echo(archspec.cpu.host()) 25 [end of lib/spack/external/archspec/cli.py] [start of lib/spack/external/__init__.py] 1 # Copyright 2013-2023 Lawrence Livermore National Security, LLC and other 2 # Spack Project Developers. See the top-level COPYRIGHT file for details. 3 # 4 # SPDX-License-Identifier: (Apache-2.0 OR MIT) 5 6 """This module contains the following external, potentially separately 7 licensed, packages that are included in Spack: 8 9 altgraph 10 -------- 11 12 * Homepage: https://altgraph.readthedocs.io/en/latest/index.html 13 * Usage: dependency of macholib 14 * Version: 0.17.3 15 16 archspec 17 -------- 18 19 * Homepage: https://pypi.python.org/pypi/archspec 20 * Usage: Labeling, comparison and detection of microarchitectures 21 * Version: 0.2.0 (commit e44bad9c7b6defac73696f64078b2fe634719b62) 22 23 astunparse 24 ---------------- 25 26 * Homepage: https://github.com/simonpercivall/astunparse 27 * Usage: Unparsing Python ASTs for package hashes in Spack 28 * Version: 1.6.3 (plus modifications) 29 * Note: This is in ``spack.util.unparse`` because it's very heavily 30 modified, and we want to track coverage for it. 31 Specifically, we have modified this library to generate consistent unparsed ASTs 32 regardless of the Python version. It is based on: 33 1. The original ``astunparse`` library; 34 2. Modifications for consistency; 35 3. Backports from the ``ast.unparse`` function in Python 3.9 and later 36 The unparsing is now mostly consistent with upstream ``ast.unparse``, so if 37 we ever require Python 3.9 or higher, we can drop this external package. 38 39 attrs 40 ---------------- 41 42 * Homepage: https://github.com/python-attrs/attrs 43 * Usage: Needed by jsonschema. 44 * Version: 22.1.0 45 46 ctest_log_parser 47 ---------------- 48 49 * Homepage: https://github.com/Kitware/CMake/blob/master/Source/CTest/cmCTestBuildHandler.cxx 50 * Usage: Functions to parse build logs and extract error messages. 51 * Version: Unversioned 52 * Note: This is a homemade port of Kitware's CTest build handler. 53 54 distro 55 ------ 56 57 * Homepage: https://pypi.python.org/pypi/distro 58 * Usage: Provides a more stable linux distribution detection. 59 * Version: 1.8.0 60 61 jinja2 62 ------ 63 64 * Homepage: https://pypi.python.org/pypi/Jinja2 65 * Usage: A modern and designer-friendly templating language for Python. 66 * Version: 3.0.3 (last version supporting Python 3.6) 67 68 jsonschema 69 ---------- 70 71 * Homepage: https://pypi.python.org/pypi/jsonschema 72 * Usage: An implementation of JSON Schema for Python. 73 * Version: 3.2.0 (last version before 2.7 and 3.6 support was dropped) 74 * Note: We don't include tests or benchmarks; just what Spack needs. 75 76 macholib 77 -------- 78 79 * Homepage: https://macholib.readthedocs.io/en/latest/index.html# 80 * Usage: Manipulation of Mach-o binaries for relocating macOS buildcaches on Linux 81 * Version: 1.16.2 82 83 markupsafe 84 ---------- 85 86 * Homepage: https://pypi.python.org/pypi/MarkupSafe 87 * Usage: Implements a XML/HTML/XHTML Markup safe string for Python. 88 * Version: 2.0.1 (last version supporting Python 3.6) 89 90 pyrsistent 91 ---------- 92 93 * Homepage: http://github.com/tobgu/pyrsistent/ 94 * Usage: Needed by `jsonschema` 95 * Version: 0.18.0 96 97 ruamel.yaml 98 ------ 99 100 * Homepage: https://yaml.readthedocs.io/ 101 * Usage: Used for config files. Ruamel is based on PyYAML but is more 102 actively maintained and has more features, including round-tripping 103 comments read from config files. 104 * Version: 0.11.15 (last version supporting Python 2.6) 105 * Note: This package has been slightly modified to improve Python 2.6 106 compatibility -- some ``{}`` format strings were replaced, and the 107 import for ``OrderedDict`` was tweaked. 108 109 six 110 --- 111 112 * Homepage: https://pypi.python.org/pypi/six 113 * Usage: Python 2 and 3 compatibility utilities. 114 * Version: 1.16.0 115 116 """ 117 [end of lib/spack/external/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lib/spack/external/__init__.py b/lib/spack/external/__init__.py --- a/lib/spack/external/__init__.py +++ b/lib/spack/external/__init__.py @@ -18,7 +18,7 @@ * Homepage: https://pypi.python.org/pypi/archspec * Usage: Labeling, comparison and detection of microarchitectures -* Version: 0.2.0 (commit e44bad9c7b6defac73696f64078b2fe634719b62) +* Version: 0.2.0-dev (commit f3667f95030c6573842fb5f6df0d647285597509) astunparse ---------------- diff --git a/lib/spack/external/archspec/cli.py b/lib/spack/external/archspec/cli.py --- a/lib/spack/external/archspec/cli.py +++ b/lib/spack/external/archspec/cli.py @@ -6,19 +6,61 @@ archspec command line interface """ -import click +import argparse +import typing import archspec import archspec.cpu [email protected](name="archspec") [email protected]_option(version=archspec.__version__) -def main(): - """archspec command line interface""" +def _make_parser() -> argparse.ArgumentParser: + parser = argparse.ArgumentParser( + "archspec", + description="archspec command line interface", + add_help=False, + ) + parser.add_argument( + "--version", + "-V", + help="Show the version and exit.", + action="version", + version=f"archspec, version {archspec.__version__}", + ) + parser.add_argument("--help", "-h", help="Show the help and exit.", action="help") + subcommands = parser.add_subparsers( + title="command", + metavar="COMMAND", + dest="command", + ) [email protected]() -def cpu(): - """archspec command line interface for CPU""" - click.echo(archspec.cpu.host()) + cpu_command = subcommands.add_parser( + "cpu", + help="archspec command line interface for CPU", + description="archspec command line interface for CPU", + ) + cpu_command.set_defaults(run=cpu) + + return parser + + +def cpu() -> int: + """Run the `archspec cpu` subcommand.""" + print(archspec.cpu.host()) + return 0 + + +def main(argv: typing.Optional[typing.List[str]] = None) -> int: + """Run the `archspec` command line interface.""" + parser = _make_parser() + + try: + args = parser.parse_args(argv) + except SystemExit as err: + return err.code + + if args.command is None: + parser.print_help() + return 0 + + return args.run()
{"golden_diff": "diff --git a/lib/spack/external/__init__.py b/lib/spack/external/__init__.py\n--- a/lib/spack/external/__init__.py\n+++ b/lib/spack/external/__init__.py\n@@ -18,7 +18,7 @@\n \n * Homepage: https://pypi.python.org/pypi/archspec\n * Usage: Labeling, comparison and detection of microarchitectures\n-* Version: 0.2.0 (commit e44bad9c7b6defac73696f64078b2fe634719b62)\n+* Version: 0.2.0-dev (commit f3667f95030c6573842fb5f6df0d647285597509)\n \n astunparse\n ----------------\ndiff --git a/lib/spack/external/archspec/cli.py b/lib/spack/external/archspec/cli.py\n--- a/lib/spack/external/archspec/cli.py\n+++ b/lib/spack/external/archspec/cli.py\n@@ -6,19 +6,61 @@\n archspec command line interface\n \"\"\"\n \n-import click\n+import argparse\n+import typing\n \n import archspec\n import archspec.cpu\n \n \[email protected](name=\"archspec\")\[email protected]_option(version=archspec.__version__)\n-def main():\n- \"\"\"archspec command line interface\"\"\"\n+def _make_parser() -> argparse.ArgumentParser:\n+ parser = argparse.ArgumentParser(\n+ \"archspec\",\n+ description=\"archspec command line interface\",\n+ add_help=False,\n+ )\n+ parser.add_argument(\n+ \"--version\",\n+ \"-V\",\n+ help=\"Show the version and exit.\",\n+ action=\"version\",\n+ version=f\"archspec, version {archspec.__version__}\",\n+ )\n+ parser.add_argument(\"--help\", \"-h\", help=\"Show the help and exit.\", action=\"help\")\n \n+ subcommands = parser.add_subparsers(\n+ title=\"command\",\n+ metavar=\"COMMAND\",\n+ dest=\"command\",\n+ )\n \[email protected]()\n-def cpu():\n- \"\"\"archspec command line interface for CPU\"\"\"\n- click.echo(archspec.cpu.host())\n+ cpu_command = subcommands.add_parser(\n+ \"cpu\",\n+ help=\"archspec command line interface for CPU\",\n+ description=\"archspec command line interface for CPU\",\n+ )\n+ cpu_command.set_defaults(run=cpu)\n+\n+ return parser\n+\n+\n+def cpu() -> int:\n+ \"\"\"Run the `archspec cpu` subcommand.\"\"\"\n+ print(archspec.cpu.host())\n+ return 0\n+\n+\n+def main(argv: typing.Optional[typing.List[str]] = None) -> int:\n+ \"\"\"Run the `archspec` command line interface.\"\"\"\n+ parser = _make_parser()\n+\n+ try:\n+ args = parser.parse_args(argv)\n+ except SystemExit as err:\n+ return err.code\n+\n+ if args.command is None:\n+ parser.print_help()\n+ return 0\n+\n+ return args.run()\n", "issue": "Invalid flag with apple m2 cpu and apple-clang (clang-1400.0.29.202)\nOn a system as described in the title, the following flag in not recognized and causes compilation failures:\r\n\r\nhttps://github.com/spack/spack/blob/b2c806f6fcd73ae624582c84a177e465a7877cf7/lib/spack/external/archspec/json/cpu/microarchitectures.json#L2794\r\n\r\nReplacing with `-mcpu=apple-m1` seems to fix the issue. There seems to be no flag specific to m2 cpu.\r\n\n", "before_files": [{"content": "# Copyright 2019-2020 Lawrence Livermore National Security, LLC and other\n# Archspec Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\"\"\"\narchspec command line interface\n\"\"\"\n\nimport click\n\nimport archspec\nimport archspec.cpu\n\n\[email protected](name=\"archspec\")\[email protected]_option(version=archspec.__version__)\ndef main():\n \"\"\"archspec command line interface\"\"\"\n\n\[email protected]()\ndef cpu():\n \"\"\"archspec command line interface for CPU\"\"\"\n click.echo(archspec.cpu.host())\n", "path": "lib/spack/external/archspec/cli.py"}, {"content": "# Copyright 2013-2023 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\n\"\"\"This module contains the following external, potentially separately\nlicensed, packages that are included in Spack:\n\naltgraph\n--------\n\n* Homepage: https://altgraph.readthedocs.io/en/latest/index.html\n* Usage: dependency of macholib\n* Version: 0.17.3\n\narchspec\n--------\n\n* Homepage: https://pypi.python.org/pypi/archspec\n* Usage: Labeling, comparison and detection of microarchitectures\n* Version: 0.2.0 (commit e44bad9c7b6defac73696f64078b2fe634719b62)\n\nastunparse\n----------------\n\n* Homepage: https://github.com/simonpercivall/astunparse\n* Usage: Unparsing Python ASTs for package hashes in Spack\n* Version: 1.6.3 (plus modifications)\n* Note: This is in ``spack.util.unparse`` because it's very heavily\n modified, and we want to track coverage for it.\n Specifically, we have modified this library to generate consistent unparsed ASTs\n regardless of the Python version. It is based on:\n 1. The original ``astunparse`` library;\n 2. Modifications for consistency;\n 3. Backports from the ``ast.unparse`` function in Python 3.9 and later\n The unparsing is now mostly consistent with upstream ``ast.unparse``, so if\n we ever require Python 3.9 or higher, we can drop this external package.\n\nattrs\n----------------\n\n* Homepage: https://github.com/python-attrs/attrs\n* Usage: Needed by jsonschema.\n* Version: 22.1.0\n\nctest_log_parser\n----------------\n\n* Homepage: https://github.com/Kitware/CMake/blob/master/Source/CTest/cmCTestBuildHandler.cxx\n* Usage: Functions to parse build logs and extract error messages.\n* Version: Unversioned\n* Note: This is a homemade port of Kitware's CTest build handler.\n\ndistro\n------\n\n* Homepage: https://pypi.python.org/pypi/distro\n* Usage: Provides a more stable linux distribution detection.\n* Version: 1.8.0\n\njinja2\n------\n\n* Homepage: https://pypi.python.org/pypi/Jinja2\n* Usage: A modern and designer-friendly templating language for Python.\n* Version: 3.0.3 (last version supporting Python 3.6)\n\njsonschema\n----------\n\n* Homepage: https://pypi.python.org/pypi/jsonschema\n* Usage: An implementation of JSON Schema for Python.\n* Version: 3.2.0 (last version before 2.7 and 3.6 support was dropped)\n* Note: We don't include tests or benchmarks; just what Spack needs.\n\nmacholib\n--------\n\n* Homepage: https://macholib.readthedocs.io/en/latest/index.html#\n* Usage: Manipulation of Mach-o binaries for relocating macOS buildcaches on Linux\n* Version: 1.16.2\n\nmarkupsafe\n----------\n\n* Homepage: https://pypi.python.org/pypi/MarkupSafe\n* Usage: Implements a XML/HTML/XHTML Markup safe string for Python.\n* Version: 2.0.1 (last version supporting Python 3.6)\n\npyrsistent\n----------\n\n* Homepage: http://github.com/tobgu/pyrsistent/\n* Usage: Needed by `jsonschema`\n* Version: 0.18.0\n\nruamel.yaml\n------\n\n* Homepage: https://yaml.readthedocs.io/\n* Usage: Used for config files. Ruamel is based on PyYAML but is more\n actively maintained and has more features, including round-tripping\n comments read from config files.\n* Version: 0.11.15 (last version supporting Python 2.6)\n* Note: This package has been slightly modified to improve Python 2.6\n compatibility -- some ``{}`` format strings were replaced, and the\n import for ``OrderedDict`` was tweaked.\n\nsix\n---\n\n* Homepage: https://pypi.python.org/pypi/six\n* Usage: Python 2 and 3 compatibility utilities.\n* Version: 1.16.0\n\n\"\"\"\n", "path": "lib/spack/external/__init__.py"}]}
2,120
687
gh_patches_debug_38047
rasdani/github-patches
git_diff
getsentry__sentry-3216
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Better avatar uploads caching Right now, we cache resizes on the server, but these responses are unable to be cached by any browser or proxy. To fix this we need to: - [ ] Send a proper `Cache-Control` header that tells the browser it's allowed to cache it. Something like `Cache-Control: max-age=315360000` - [ ] Urls that don't change, so they _can_ be cached. Right now, uploaded avatars reuse the same `UserAvatar.ident`, which means we get the same url even if we change the file. So we can't cache. - [ ] Remove `Set-Cookie` header on response (this may be tricky but @mattrobenolt can help). `Set-Cookie` header kills all caching. - [ ] Remove `Vary` header. We don't need to Vary on anything, and right now it's `Vary: Accept-Encoding, Cookie`, which basically makes caching useless. (this may be tricky as well, but @mattrobenolt can help) @macqueen </issue> <code> [start of src/sentry/middleware/locale.py] 1 """ 2 sentry.middleware.locale 3 ~~~~~~~~~~~~~~~~~~~~~~~~ 4 5 :copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details. 6 :license: BSD, see LICENSE for more details. 7 """ 8 9 from __future__ import absolute_import 10 11 import pytz 12 13 from django.middleware.locale import LocaleMiddleware 14 15 from sentry.models import UserOption 16 from sentry.utils.safe import safe_execute 17 18 19 class SentryLocaleMiddleware(LocaleMiddleware): 20 def process_request(self, request): 21 # No locale for static media 22 # This avoids touching user session, which means we avoid 23 # setting `Vary: Cookie` as a response header which will 24 # break HTTP caching entirely. 25 self.__is_static = request.path_info[:9] == '/_static/' 26 if self.__is_static: 27 return 28 29 safe_execute(self.load_user_conf, request, 30 _with_transaction=False) 31 32 super(SentryLocaleMiddleware, self).process_request(request) 33 34 def load_user_conf(self, request): 35 if not request.user.is_authenticated(): 36 return 37 38 language = UserOption.objects.get_value( 39 user=request.user, project=None, key='language', default=None) 40 if language: 41 request.session['django_language'] = language 42 43 timezone = UserOption.objects.get_value( 44 user=request.user, project=None, key='timezone', default=None) 45 if timezone: 46 request.timezone = pytz.timezone(timezone) 47 48 def process_response(self, request, response): 49 # If static bound, we don't want to run the normal process_response since this 50 # adds an extra `Vary: Accept-Language`. Static files don't need this and is 51 # less effective for caching. 52 try: 53 if self.__is_static: 54 return response 55 except AttributeError: 56 # catch ourselves in case __is_static never got set. 57 # It's possible that process_request never ran. 58 pass 59 return super(SentryLocaleMiddleware, self).process_response(request, response) 60 [end of src/sentry/middleware/locale.py] [start of src/sentry/api/endpoints/useravatar.py] 1 from __future__ import absolute_import 2 3 from PIL import Image 4 5 from django.conf import settings 6 7 from rest_framework import status 8 from rest_framework.response import Response 9 10 from sentry.api.bases.user import UserEndpoint 11 from sentry.api.serializers import serialize 12 from sentry.models import UserAvatar, File 13 from sentry.utils.compat import StringIO 14 15 16 MIN_DIMENSION = 256 17 18 MAX_DIMENSION = 1024 19 20 21 class UserAvatarEndpoint(UserEndpoint): 22 FILE_TYPE = 'avatar.file' 23 24 def get(self, request, user): 25 return Response(serialize(user, request.user)) 26 27 def is_valid_size(self, width, height): 28 if width != height: 29 return False 30 if width < MIN_DIMENSION: 31 return False 32 if width > MAX_DIMENSION: 33 return False 34 return True 35 36 def put(self, request, user): 37 if user != request.user: 38 return Response(status=status.HTTP_403_FORBIDDEN) 39 40 photo_string = request.DATA.get('avatar_photo') 41 photo = None 42 if photo_string: 43 photo_string = photo_string.decode('base64') 44 if len(photo_string) > settings.SENTRY_MAX_AVATAR_SIZE: 45 return Response(status=status.HTTP_413_REQUEST_ENTITY_TOO_LARGE) 46 with Image.open(StringIO(photo_string)) as img: 47 width, height = img.size 48 if not self.is_valid_size(width, height): 49 return Response(status=status.HTTP_400_BAD_REQUEST) 50 file_name = '%s.png' % user.id 51 photo = File.objects.create(name=file_name, type=self.FILE_TYPE) 52 photo.putfile(StringIO(photo_string)) 53 54 avatar, _ = UserAvatar.objects.get_or_create(user=user) 55 if avatar.file and photo: 56 avatar.file.delete() 57 avatar.clear_cached_photos() 58 if photo: 59 avatar.file = photo 60 61 avatar_type = request.DATA.get('avatar_type') 62 63 if not avatar.file and avatar_type == 'upload': 64 return Response(status=status.HTTP_400_BAD_REQUEST) 65 66 if avatar_type: 67 try: 68 avatar.avatar_type = [i for i, n in UserAvatar.AVATAR_TYPES if n == avatar_type][0] 69 except IndexError: 70 return Response(status=status.HTTP_400_BAD_REQUEST) 71 72 avatar.save() 73 return Response(serialize(user, request.user)) 74 [end of src/sentry/api/endpoints/useravatar.py] [start of src/sentry/web/frontend/user_avatar.py] 1 from __future__ import absolute_import 2 3 from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseNotFound 4 from django.views.generic import View 5 6 from sentry.models import UserAvatar 7 8 9 class UserAvatarPhotoView(View): 10 11 def get_file_name(self, user): 12 return '%s.png' % user.id 13 14 def get(self, request, *args, **kwargs): 15 avatar_id = kwargs['avatar_id'] 16 try: 17 avatar = UserAvatar.objects.get(ident=avatar_id) 18 except UserAvatar.DoesNotExist: 19 return HttpResponseNotFound() 20 21 photo = avatar.file 22 if not photo: 23 return HttpResponseNotFound() 24 25 size = request.GET.get('s') 26 photo_file = photo.getfile() 27 if size: 28 try: 29 size = int(size) 30 except ValueError: 31 return HttpResponseBadRequest() 32 else: 33 photo_file = avatar.get_cached_photo(size) 34 return HttpResponse(photo_file, content_type='image/png') 35 [end of src/sentry/web/frontend/user_avatar.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/sentry/api/endpoints/useravatar.py b/src/sentry/api/endpoints/useravatar.py --- a/src/sentry/api/endpoints/useravatar.py +++ b/src/sentry/api/endpoints/useravatar.py @@ -1,5 +1,7 @@ from __future__ import absolute_import +from uuid import uuid4 + from PIL import Image from django.conf import settings @@ -57,6 +59,7 @@ avatar.clear_cached_photos() if photo: avatar.file = photo + avatar.ident = uuid4().hex avatar_type = request.DATA.get('avatar_type') diff --git a/src/sentry/middleware/locale.py b/src/sentry/middleware/locale.py --- a/src/sentry/middleware/locale.py +++ b/src/sentry/middleware/locale.py @@ -22,8 +22,9 @@ # This avoids touching user session, which means we avoid # setting `Vary: Cookie` as a response header which will # break HTTP caching entirely. - self.__is_static = request.path_info[:9] == '/_static/' - if self.__is_static: + self.__skip_caching = (request.path_info[:9] == '/_static/' or + request.path_info[:8] == '/avatar/') + if self.__skip_caching: return safe_execute(self.load_user_conf, request, @@ -50,10 +51,10 @@ # adds an extra `Vary: Accept-Language`. Static files don't need this and is # less effective for caching. try: - if self.__is_static: + if self.__skip_caching: return response except AttributeError: - # catch ourselves in case __is_static never got set. + # catch ourselves in case __skip_caching never got set. # It's possible that process_request never ran. pass return super(SentryLocaleMiddleware, self).process_response(request, response) diff --git a/src/sentry/web/frontend/user_avatar.py b/src/sentry/web/frontend/user_avatar.py --- a/src/sentry/web/frontend/user_avatar.py +++ b/src/sentry/web/frontend/user_avatar.py @@ -4,6 +4,7 @@ from django.views.generic import View from sentry.models import UserAvatar +from sentry.web.frontend.generic import FOREVER_CACHE class UserAvatarPhotoView(View): @@ -31,4 +32,7 @@ return HttpResponseBadRequest() else: photo_file = avatar.get_cached_photo(size) - return HttpResponse(photo_file, content_type='image/png') + + res = HttpResponse(photo_file, content_type='image/png') + res['Cache-Control'] = FOREVER_CACHE + return res
{"golden_diff": "diff --git a/src/sentry/api/endpoints/useravatar.py b/src/sentry/api/endpoints/useravatar.py\n--- a/src/sentry/api/endpoints/useravatar.py\n+++ b/src/sentry/api/endpoints/useravatar.py\n@@ -1,5 +1,7 @@\n from __future__ import absolute_import\n \n+from uuid import uuid4\n+\n from PIL import Image\n \n from django.conf import settings\n@@ -57,6 +59,7 @@\n avatar.clear_cached_photos()\n if photo:\n avatar.file = photo\n+ avatar.ident = uuid4().hex\n \n avatar_type = request.DATA.get('avatar_type')\n \ndiff --git a/src/sentry/middleware/locale.py b/src/sentry/middleware/locale.py\n--- a/src/sentry/middleware/locale.py\n+++ b/src/sentry/middleware/locale.py\n@@ -22,8 +22,9 @@\n # This avoids touching user session, which means we avoid\n # setting `Vary: Cookie` as a response header which will\n # break HTTP caching entirely.\n- self.__is_static = request.path_info[:9] == '/_static/'\n- if self.__is_static:\n+ self.__skip_caching = (request.path_info[:9] == '/_static/' or\n+ request.path_info[:8] == '/avatar/')\n+ if self.__skip_caching:\n return\n \n safe_execute(self.load_user_conf, request,\n@@ -50,10 +51,10 @@\n # adds an extra `Vary: Accept-Language`. Static files don't need this and is\n # less effective for caching.\n try:\n- if self.__is_static:\n+ if self.__skip_caching:\n return response\n except AttributeError:\n- # catch ourselves in case __is_static never got set.\n+ # catch ourselves in case __skip_caching never got set.\n # It's possible that process_request never ran.\n pass\n return super(SentryLocaleMiddleware, self).process_response(request, response)\ndiff --git a/src/sentry/web/frontend/user_avatar.py b/src/sentry/web/frontend/user_avatar.py\n--- a/src/sentry/web/frontend/user_avatar.py\n+++ b/src/sentry/web/frontend/user_avatar.py\n@@ -4,6 +4,7 @@\n from django.views.generic import View\n \n from sentry.models import UserAvatar\n+from sentry.web.frontend.generic import FOREVER_CACHE\n \n \n class UserAvatarPhotoView(View):\n@@ -31,4 +32,7 @@\n return HttpResponseBadRequest()\n else:\n photo_file = avatar.get_cached_photo(size)\n- return HttpResponse(photo_file, content_type='image/png')\n+\n+ res = HttpResponse(photo_file, content_type='image/png')\n+ res['Cache-Control'] = FOREVER_CACHE\n+ return res\n", "issue": "Better avatar uploads caching\nRight now, we cache resizes on the server, but these responses are unable to be cached by any browser or proxy.\n\nTo fix this we need to:\n- [ ] Send a proper `Cache-Control` header that tells the browser it's allowed to cache it. Something like `Cache-Control: max-age=315360000`\n- [ ] Urls that don't change, so they _can_ be cached. Right now, uploaded avatars reuse the same `UserAvatar.ident`, which means we get the same url even if we change the file. So we can't cache.\n- [ ] Remove `Set-Cookie` header on response (this may be tricky but @mattrobenolt can help). `Set-Cookie` header kills all caching.\n- [ ] Remove `Vary` header. We don't need to Vary on anything, and right now it's `Vary: Accept-Encoding, Cookie`, which basically makes caching useless. (this may be tricky as well, but @mattrobenolt can help)\n\n@macqueen \n\n", "before_files": [{"content": "\"\"\"\nsentry.middleware.locale\n~~~~~~~~~~~~~~~~~~~~~~~~\n\n:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.\n:license: BSD, see LICENSE for more details.\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport pytz\n\nfrom django.middleware.locale import LocaleMiddleware\n\nfrom sentry.models import UserOption\nfrom sentry.utils.safe import safe_execute\n\n\nclass SentryLocaleMiddleware(LocaleMiddleware):\n def process_request(self, request):\n # No locale for static media\n # This avoids touching user session, which means we avoid\n # setting `Vary: Cookie` as a response header which will\n # break HTTP caching entirely.\n self.__is_static = request.path_info[:9] == '/_static/'\n if self.__is_static:\n return\n\n safe_execute(self.load_user_conf, request,\n _with_transaction=False)\n\n super(SentryLocaleMiddleware, self).process_request(request)\n\n def load_user_conf(self, request):\n if not request.user.is_authenticated():\n return\n\n language = UserOption.objects.get_value(\n user=request.user, project=None, key='language', default=None)\n if language:\n request.session['django_language'] = language\n\n timezone = UserOption.objects.get_value(\n user=request.user, project=None, key='timezone', default=None)\n if timezone:\n request.timezone = pytz.timezone(timezone)\n\n def process_response(self, request, response):\n # If static bound, we don't want to run the normal process_response since this\n # adds an extra `Vary: Accept-Language`. Static files don't need this and is\n # less effective for caching.\n try:\n if self.__is_static:\n return response\n except AttributeError:\n # catch ourselves in case __is_static never got set.\n # It's possible that process_request never ran.\n pass\n return super(SentryLocaleMiddleware, self).process_response(request, response)\n", "path": "src/sentry/middleware/locale.py"}, {"content": "from __future__ import absolute_import\n\nfrom PIL import Image\n\nfrom django.conf import settings\n\nfrom rest_framework import status\nfrom rest_framework.response import Response\n\nfrom sentry.api.bases.user import UserEndpoint\nfrom sentry.api.serializers import serialize\nfrom sentry.models import UserAvatar, File\nfrom sentry.utils.compat import StringIO\n\n\nMIN_DIMENSION = 256\n\nMAX_DIMENSION = 1024\n\n\nclass UserAvatarEndpoint(UserEndpoint):\n FILE_TYPE = 'avatar.file'\n\n def get(self, request, user):\n return Response(serialize(user, request.user))\n\n def is_valid_size(self, width, height):\n if width != height:\n return False\n if width < MIN_DIMENSION:\n return False\n if width > MAX_DIMENSION:\n return False\n return True\n\n def put(self, request, user):\n if user != request.user:\n return Response(status=status.HTTP_403_FORBIDDEN)\n\n photo_string = request.DATA.get('avatar_photo')\n photo = None\n if photo_string:\n photo_string = photo_string.decode('base64')\n if len(photo_string) > settings.SENTRY_MAX_AVATAR_SIZE:\n return Response(status=status.HTTP_413_REQUEST_ENTITY_TOO_LARGE)\n with Image.open(StringIO(photo_string)) as img:\n width, height = img.size\n if not self.is_valid_size(width, height):\n return Response(status=status.HTTP_400_BAD_REQUEST)\n file_name = '%s.png' % user.id\n photo = File.objects.create(name=file_name, type=self.FILE_TYPE)\n photo.putfile(StringIO(photo_string))\n\n avatar, _ = UserAvatar.objects.get_or_create(user=user)\n if avatar.file and photo:\n avatar.file.delete()\n avatar.clear_cached_photos()\n if photo:\n avatar.file = photo\n\n avatar_type = request.DATA.get('avatar_type')\n\n if not avatar.file and avatar_type == 'upload':\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n if avatar_type:\n try:\n avatar.avatar_type = [i for i, n in UserAvatar.AVATAR_TYPES if n == avatar_type][0]\n except IndexError:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n avatar.save()\n return Response(serialize(user, request.user))\n", "path": "src/sentry/api/endpoints/useravatar.py"}, {"content": "from __future__ import absolute_import\n\nfrom django.http import HttpResponse, HttpResponseBadRequest, HttpResponseNotFound\nfrom django.views.generic import View\n\nfrom sentry.models import UserAvatar\n\n\nclass UserAvatarPhotoView(View):\n\n def get_file_name(self, user):\n return '%s.png' % user.id\n\n def get(self, request, *args, **kwargs):\n avatar_id = kwargs['avatar_id']\n try:\n avatar = UserAvatar.objects.get(ident=avatar_id)\n except UserAvatar.DoesNotExist:\n return HttpResponseNotFound()\n\n photo = avatar.file\n if not photo:\n return HttpResponseNotFound()\n\n size = request.GET.get('s')\n photo_file = photo.getfile()\n if size:\n try:\n size = int(size)\n except ValueError:\n return HttpResponseBadRequest()\n else:\n photo_file = avatar.get_cached_photo(size)\n return HttpResponse(photo_file, content_type='image/png')\n", "path": "src/sentry/web/frontend/user_avatar.py"}]}
2,257
599
gh_patches_debug_3407
rasdani/github-patches
git_diff
vllm-project__vllm-1855
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> benchmark_latency.py will hang when --batchsize=1 and --n=2 benchmark_latency.py will hang if setting --batchsize=1 and --n=2or4or8. should max_num_seqs been set as args.batch_size * args.n ? https://github.com/vllm-project/vllm/blob/main/benchmarks/benchmark_latency.py#L23 </issue> <code> [start of benchmarks/benchmark_latency.py] 1 """Benchmark the latency of processing a single batch of requests.""" 2 import argparse 3 import time 4 5 import numpy as np 6 import torch 7 from tqdm import tqdm 8 9 from vllm import LLM, SamplingParams 10 11 12 def main(args: argparse.Namespace): 13 print(args) 14 15 # NOTE(woosuk): If the request cannot be processed in a single batch, 16 # the engine will automatically process the request in multiple batches. 17 llm = LLM( 18 model=args.model, 19 tokenizer=args.tokenizer, 20 quantization=args.quantization, 21 tensor_parallel_size=args.tensor_parallel_size, 22 max_num_seqs=args.batch_size, 23 trust_remote_code=args.trust_remote_code, 24 dtype=args.dtype, 25 ) 26 27 sampling_params = SamplingParams( 28 n=args.n, 29 temperature=0.0 if args.use_beam_search else 1.0, 30 top_p=1.0, 31 use_beam_search=args.use_beam_search, 32 ignore_eos=True, 33 max_tokens=args.output_len, 34 ) 35 print(sampling_params) 36 dummy_prompt_token_ids = [[0] * args.input_len] * args.batch_size 37 38 def run_to_completion(profile: bool = False): 39 if profile: 40 with torch.profiler.profile(activities=[ 41 torch.profiler.ProfilerActivity.CPU, 42 torch.profiler.ProfilerActivity.CUDA, 43 ]) as p: 44 llm.generate(prompt_token_ids=dummy_prompt_token_ids, 45 sampling_params=sampling_params, 46 use_tqdm=False) 47 print(p.key_averages()) 48 else: 49 start_time = time.perf_counter() 50 llm.generate(prompt_token_ids=dummy_prompt_token_ids, 51 sampling_params=sampling_params, 52 use_tqdm=False) 53 end_time = time.perf_counter() 54 latency = end_time - start_time 55 return latency 56 57 print("Warming up...") 58 run_to_completion(profile=False) 59 60 if args.profile: 61 print("Profiling...") 62 run_to_completion(profile=True) 63 return 64 65 # Benchmark. 66 latencies = [] 67 for _ in tqdm(range(args.num_iters), desc="Profiling iterations"): 68 latencies.append(run_to_completion(profile=False)) 69 print(f'Avg latency: {np.mean(latencies)} seconds') 70 71 72 if __name__ == '__main__': 73 parser = argparse.ArgumentParser( 74 description='Benchmark the latency of processing a single batch of ' 75 'requests till completion.') 76 parser.add_argument('--model', type=str, default='facebook/opt-125m') 77 parser.add_argument('--tokenizer', type=str, default=None) 78 parser.add_argument('--quantization', 79 '-q', 80 choices=['awq', 'squeezellm', None], 81 default=None) 82 parser.add_argument('--tensor-parallel-size', '-tp', type=int, default=1) 83 parser.add_argument('--input-len', type=int, default=32) 84 parser.add_argument('--output-len', type=int, default=128) 85 parser.add_argument('--batch-size', type=int, default=8) 86 parser.add_argument('--n', 87 type=int, 88 default=1, 89 help='Number of generated sequences per prompt.') 90 parser.add_argument('--use-beam-search', action='store_true') 91 parser.add_argument('--num-iters', 92 type=int, 93 default=3, 94 help='Number of iterations to run.') 95 parser.add_argument('--trust-remote-code', 96 action='store_true', 97 help='trust remote code from huggingface') 98 parser.add_argument( 99 '--dtype', 100 type=str, 101 default='auto', 102 choices=['auto', 'half', 'float16', 'bfloat16', 'float', 'float32'], 103 help='data type for model weights and activations. ' 104 'The "auto" option will use FP16 precision ' 105 'for FP32 and FP16 models, and BF16 precision ' 106 'for BF16 models.') 107 parser.add_argument( 108 '--profile', 109 action='store_true', 110 help='profile the generation process of a single batch') 111 args = parser.parse_args() 112 main(args) 113 [end of benchmarks/benchmark_latency.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/benchmarks/benchmark_latency.py b/benchmarks/benchmark_latency.py --- a/benchmarks/benchmark_latency.py +++ b/benchmarks/benchmark_latency.py @@ -19,7 +19,6 @@ tokenizer=args.tokenizer, quantization=args.quantization, tensor_parallel_size=args.tensor_parallel_size, - max_num_seqs=args.batch_size, trust_remote_code=args.trust_remote_code, dtype=args.dtype, )
{"golden_diff": "diff --git a/benchmarks/benchmark_latency.py b/benchmarks/benchmark_latency.py\n--- a/benchmarks/benchmark_latency.py\n+++ b/benchmarks/benchmark_latency.py\n@@ -19,7 +19,6 @@\n tokenizer=args.tokenizer,\n quantization=args.quantization,\n tensor_parallel_size=args.tensor_parallel_size,\n- max_num_seqs=args.batch_size,\n trust_remote_code=args.trust_remote_code,\n dtype=args.dtype,\n )\n", "issue": "benchmark_latency.py will hang when --batchsize=1 and --n=2\nbenchmark_latency.py will hang if setting --batchsize=1 and --n=2or4or8.\r\nshould max_num_seqs been set as args.batch_size * args.n \uff1f\r\nhttps://github.com/vllm-project/vllm/blob/main/benchmarks/benchmark_latency.py#L23\n", "before_files": [{"content": "\"\"\"Benchmark the latency of processing a single batch of requests.\"\"\"\nimport argparse\nimport time\n\nimport numpy as np\nimport torch\nfrom tqdm import tqdm\n\nfrom vllm import LLM, SamplingParams\n\n\ndef main(args: argparse.Namespace):\n print(args)\n\n # NOTE(woosuk): If the request cannot be processed in a single batch,\n # the engine will automatically process the request in multiple batches.\n llm = LLM(\n model=args.model,\n tokenizer=args.tokenizer,\n quantization=args.quantization,\n tensor_parallel_size=args.tensor_parallel_size,\n max_num_seqs=args.batch_size,\n trust_remote_code=args.trust_remote_code,\n dtype=args.dtype,\n )\n\n sampling_params = SamplingParams(\n n=args.n,\n temperature=0.0 if args.use_beam_search else 1.0,\n top_p=1.0,\n use_beam_search=args.use_beam_search,\n ignore_eos=True,\n max_tokens=args.output_len,\n )\n print(sampling_params)\n dummy_prompt_token_ids = [[0] * args.input_len] * args.batch_size\n\n def run_to_completion(profile: bool = False):\n if profile:\n with torch.profiler.profile(activities=[\n torch.profiler.ProfilerActivity.CPU,\n torch.profiler.ProfilerActivity.CUDA,\n ]) as p:\n llm.generate(prompt_token_ids=dummy_prompt_token_ids,\n sampling_params=sampling_params,\n use_tqdm=False)\n print(p.key_averages())\n else:\n start_time = time.perf_counter()\n llm.generate(prompt_token_ids=dummy_prompt_token_ids,\n sampling_params=sampling_params,\n use_tqdm=False)\n end_time = time.perf_counter()\n latency = end_time - start_time\n return latency\n\n print(\"Warming up...\")\n run_to_completion(profile=False)\n\n if args.profile:\n print(\"Profiling...\")\n run_to_completion(profile=True)\n return\n\n # Benchmark.\n latencies = []\n for _ in tqdm(range(args.num_iters), desc=\"Profiling iterations\"):\n latencies.append(run_to_completion(profile=False))\n print(f'Avg latency: {np.mean(latencies)} seconds')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Benchmark the latency of processing a single batch of '\n 'requests till completion.')\n parser.add_argument('--model', type=str, default='facebook/opt-125m')\n parser.add_argument('--tokenizer', type=str, default=None)\n parser.add_argument('--quantization',\n '-q',\n choices=['awq', 'squeezellm', None],\n default=None)\n parser.add_argument('--tensor-parallel-size', '-tp', type=int, default=1)\n parser.add_argument('--input-len', type=int, default=32)\n parser.add_argument('--output-len', type=int, default=128)\n parser.add_argument('--batch-size', type=int, default=8)\n parser.add_argument('--n',\n type=int,\n default=1,\n help='Number of generated sequences per prompt.')\n parser.add_argument('--use-beam-search', action='store_true')\n parser.add_argument('--num-iters',\n type=int,\n default=3,\n help='Number of iterations to run.')\n parser.add_argument('--trust-remote-code',\n action='store_true',\n help='trust remote code from huggingface')\n parser.add_argument(\n '--dtype',\n type=str,\n default='auto',\n choices=['auto', 'half', 'float16', 'bfloat16', 'float', 'float32'],\n help='data type for model weights and activations. '\n 'The \"auto\" option will use FP16 precision '\n 'for FP32 and FP16 models, and BF16 precision '\n 'for BF16 models.')\n parser.add_argument(\n '--profile',\n action='store_true',\n help='profile the generation process of a single batch')\n args = parser.parse_args()\n main(args)\n", "path": "benchmarks/benchmark_latency.py"}]}
1,714
100
gh_patches_debug_2566
rasdani/github-patches
git_diff
getpelican__pelican-2632
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add Markdown as an (optional) dependency Since its inception, this project has taken the collective position that since not everyone uses Markdown, the `markdown` package should not be a dependency of the project and should instead be manually installed by users who want to use Markdown. On the other hand, the `docutils` package that parses reStructuredText markup has always been a dependency. Particularly given Markdown's widespread popularity, I cannot come up with a compelling argument to support the continuation of this discrepancy. This project is littered with issues caused by environments with missing `markdown` packages. Many folks have encountered frustration when trying to build their Markdown-content sites, only to be met with an (often inscrutable) error message. And that's just the folks who have taken the time to file issues! Imagine how many threw up their hands and just moved on to another solution… Adding Markdown as a dependency would have significant benefits, including simplifying installation instructions, reducing frustration, and minimizing support request load. So to take another approach at this topic… What are the reasons for *not* adding Markdown as a dependency, and do you think they are sufficiently important to out-weigh the concerns raised above? </issue> <code> [start of setup.py] 1 #!/usr/bin/env python 2 import sys 3 from io import open 4 from os import walk 5 from os.path import join, relpath 6 7 from setuptools import setup 8 9 10 version = "4.1.2" 11 12 requires = ['feedgenerator >= 1.9', 'jinja2 >= 2.7', 'pygments', 'docutils', 13 'pytz >= 0a', 'blinker', 'unidecode', 'six >= 1.4', 14 'python-dateutil'] 15 16 entry_points = { 17 'console_scripts': [ 18 'pelican = pelican.__main__:main', 19 'pelican-import = pelican.tools.pelican_import:main', 20 'pelican-quickstart = pelican.tools.pelican_quickstart:main', 21 'pelican-themes = pelican.tools.pelican_themes:main' 22 ] 23 } 24 25 README = open('README.rst', encoding='utf-8').read() 26 CHANGELOG = open('docs/changelog.rst', encoding='utf-8').read() 27 28 description = u'\n'.join([README, CHANGELOG]) 29 if sys.version_info.major < 3: 30 description = description.encode('utf-8') 31 32 setup( 33 name='pelican', 34 version=version, 35 url='https://getpelican.com/', 36 author='Justin Mayer', 37 author_email='[email protected]', 38 description="Static site generator supporting reStructuredText and " 39 "Markdown source content.", 40 project_urls={ 41 'Documentation': 'https://docs.getpelican.com/', 42 'Funding': 'https://donate.getpelican.com/', 43 'Source': 'https://github.com/getpelican/pelican', 44 'Tracker': 'https://github.com/getpelican/pelican/issues', 45 }, 46 keywords='static web site generator SSG reStructuredText Markdown', 47 license='AGPLv3', 48 long_description=description, 49 packages=['pelican', 'pelican.tools'], 50 package_data={ 51 # we manually collect the package data, as opposed to using, 52 # include_package_data=True because we don't want the tests to be 53 # included automatically as package data (MANIFEST.in is too greedy) 54 'pelican': [relpath(join(root, name), 'pelican') 55 for root, _, names in walk(join('pelican', 'themes')) 56 for name in names], 57 'pelican.tools': [relpath(join(root, name), join('pelican', 'tools')) 58 for root, _, names in walk(join('pelican', 59 'tools', 60 'templates')) 61 for name in names], 62 }, 63 install_requires=requires, 64 entry_points=entry_points, 65 classifiers=[ 66 'Development Status :: 5 - Production/Stable', 67 'Environment :: Console', 68 'Framework :: Pelican', 69 'License :: OSI Approved :: GNU Affero General Public License v3', 70 'Operating System :: OS Independent', 71 'Programming Language :: Python :: 2', 72 'Programming Language :: Python :: 2.7', 73 'Programming Language :: Python :: 3', 74 'Programming Language :: Python :: 3.5', 75 'Programming Language :: Python :: 3.6', 76 'Programming Language :: Python :: 3.7', 77 'Programming Language :: Python :: Implementation :: CPython', 78 'Topic :: Internet :: WWW/HTTP', 79 'Topic :: Software Development :: Libraries :: Python Modules', 80 ], 81 test_suite='pelican.tests', 82 ) 83 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -61,6 +61,9 @@ for name in names], }, install_requires=requires, + extras_require={ + 'Markdown': ['markdown~=3.1.1'] + }, entry_points=entry_points, classifiers=[ 'Development Status :: 5 - Production/Stable',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -61,6 +61,9 @@\n for name in names],\n },\n install_requires=requires,\n+ extras_require={\n+ 'Markdown': ['markdown~=3.1.1']\n+ },\n entry_points=entry_points,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n", "issue": "Add Markdown as an (optional) dependency\nSince its inception, this project has taken the collective position that since not everyone uses Markdown, the `markdown` package should not be a dependency of the project and should instead be manually installed by users who want to use Markdown.\r\n\r\nOn the other hand, the `docutils` package that parses reStructuredText markup has always been a dependency. Particularly given Markdown's widespread popularity, I cannot come up with a compelling argument to support the continuation of this discrepancy.\r\n\r\nThis project is littered with issues caused by environments with missing `markdown` packages. Many folks have encountered frustration when trying to build their Markdown-content sites, only to be met with an (often inscrutable) error message. And that's just the folks who have taken the time to file issues! Imagine how many threw up their hands and just moved on to another solution\u2026\r\n\r\nAdding Markdown as a dependency would have significant benefits, including simplifying installation instructions, reducing frustration, and minimizing support request load. So to take another approach at this topic\u2026\u00a0What are the reasons for *not* adding Markdown as a dependency, and do you think they are sufficiently important to out-weigh the concerns raised above?\n", "before_files": [{"content": "#!/usr/bin/env python\nimport sys\nfrom io import open\nfrom os import walk\nfrom os.path import join, relpath\n\nfrom setuptools import setup\n\n\nversion = \"4.1.2\"\n\nrequires = ['feedgenerator >= 1.9', 'jinja2 >= 2.7', 'pygments', 'docutils',\n 'pytz >= 0a', 'blinker', 'unidecode', 'six >= 1.4',\n 'python-dateutil']\n\nentry_points = {\n 'console_scripts': [\n 'pelican = pelican.__main__:main',\n 'pelican-import = pelican.tools.pelican_import:main',\n 'pelican-quickstart = pelican.tools.pelican_quickstart:main',\n 'pelican-themes = pelican.tools.pelican_themes:main'\n ]\n}\n\nREADME = open('README.rst', encoding='utf-8').read()\nCHANGELOG = open('docs/changelog.rst', encoding='utf-8').read()\n\ndescription = u'\\n'.join([README, CHANGELOG])\nif sys.version_info.major < 3:\n description = description.encode('utf-8')\n\nsetup(\n name='pelican',\n version=version,\n url='https://getpelican.com/',\n author='Justin Mayer',\n author_email='[email protected]',\n description=\"Static site generator supporting reStructuredText and \"\n \"Markdown source content.\",\n project_urls={\n 'Documentation': 'https://docs.getpelican.com/',\n 'Funding': 'https://donate.getpelican.com/',\n 'Source': 'https://github.com/getpelican/pelican',\n 'Tracker': 'https://github.com/getpelican/pelican/issues',\n },\n keywords='static web site generator SSG reStructuredText Markdown',\n license='AGPLv3',\n long_description=description,\n packages=['pelican', 'pelican.tools'],\n package_data={\n # we manually collect the package data, as opposed to using,\n # include_package_data=True because we don't want the tests to be\n # included automatically as package data (MANIFEST.in is too greedy)\n 'pelican': [relpath(join(root, name), 'pelican')\n for root, _, names in walk(join('pelican', 'themes'))\n for name in names],\n 'pelican.tools': [relpath(join(root, name), join('pelican', 'tools'))\n for root, _, names in walk(join('pelican',\n 'tools',\n 'templates'))\n for name in names],\n },\n install_requires=requires,\n entry_points=entry_points,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Framework :: Pelican',\n 'License :: OSI Approved :: GNU Affero General Public License v3',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n test_suite='pelican.tests',\n)\n", "path": "setup.py"}]}
1,666
90