problem_id
stringlengths
18
22
source
stringclasses
1 value
task_type
stringclasses
1 value
in_source_id
stringlengths
13
58
prompt
stringlengths
1.71k
18.9k
golden_diff
stringlengths
145
5.13k
verification_info
stringlengths
465
23.6k
num_tokens_prompt
int64
556
4.1k
num_tokens_diff
int64
47
1.02k
gh_patches_debug_50209
rasdani/github-patches
git_diff
pex-tool__pex-2286
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `venv create` no longer includes `--sources-directory` contents when all files are nested It seems like there was a regression from 2.1.148 -> 2.1.149 with the behaviour of `venv create` with a `--pex-repository` that was created with `--sources-directory`: those sources aren't included in the final venv. Reproducer: ```shell cd $(mktemp -d) # create our dummy file mkdir -p source_files/foo touch source_files/foo/bar.py # NB.1 # touch source_files/qux.py # NB.2 for version in v2.1.148 v2.1.149; do curl -s -L https://github.com/pantsbuild/pex/releases/download/$version/pex > pex-$version chmod +x pex-$version # NB.3 ./pex-$version --output-file=repository-$version.pex --sources-directory=source_files # NB.4 PEX_SCRIPT=pex3 ./pex-$version venv create --dest-dir=dest-$version --pex-repository=repository-$version.pex --layout=flat # what was included? tree dest-$version done ``` Running that shows that the contents of the `dest-...` directory depends on the version, without the `bar.py` file when using v2.1.149, but should be the same: ``` dest-v2.1.148 └── foo └── bar.py 1 directory, 1 file dest-v2.1.149 0 directories, 0 files ``` Ablative studies: - uncommenting `NB.2` line (to have two files) passes βœ… (both versions have both `foo/bar.py` and `qux.py`) - _replacing_ the `NB.1` with `NB.2` (to just `qux.py` at the top level) passes βœ… - _always_ using v2.1.148 on line `NB.3` (create the pex) and v2.1.149 on line `NB.4` (create the venv) passes βœ… - v2.1.149 for `NB.3` and v2.1.148 for `NB.4` fails ❌ - I think third-party dependencies work okay, but haven't confirmed in this reduced setting - This reproduces without `--layout`, but the output is simpler with `--layout=flat` (First observed in https://github.com/pantsbuild/pants/pull/20149.) </issue> <code> [start of pex/util.py] 1 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 4 from __future__ import absolute_import 5 6 import contextlib 7 import hashlib 8 import importlib 9 import os 10 import shutil 11 import tempfile 12 from hashlib import sha1 13 from site import makepath # type: ignore[attr-defined] 14 15 from pex import hashing 16 from pex.common import is_pyc_dir, is_pyc_file, safe_mkdir, safe_mkdtemp 17 from pex.compatibility import ( # type: ignore[attr-defined] # `exec_function` is defined dynamically 18 PY2, 19 exec_function, 20 ) 21 from pex.orderedset import OrderedSet 22 from pex.typing import TYPE_CHECKING 23 24 if TYPE_CHECKING: 25 from typing import IO, Any, Callable, Iterator, Optional, Text 26 27 from pex.hashing import Hasher 28 29 30 class DistributionHelper(object): 31 # TODO(#584: This appears unused, but clients might still use it. We cannot remove until we 32 # have a deprecation policy. 33 @classmethod 34 def access_zipped_assets(cls, static_module_name, static_path, dir_location=None): 35 # type: (str, str, Optional[str]) -> str 36 """Create a copy of static resource files as we can't serve them from within the pex file. 37 38 :param static_module_name: Module name containing module to cache in a tempdir 39 :param static_path: Module name, for example 'serverset' 40 :param dir_location: create a new temporary directory inside, or None to have one created 41 :returns temp_dir: Temporary directory with the zipped assets inside 42 """ 43 if dir_location is None: 44 temp_dir = safe_mkdtemp() 45 else: 46 temp_dir = dir_location 47 48 module = importlib.import_module(static_module_name) 49 # N.B.: This handles namespace packages new and old. 50 paths = OrderedSet(os.path.realpath(d) for d in getattr(module, "__path__", [])) 51 if module.__file__: 52 # And this handles old-style __init__.py packages. 53 paths.add(os.path.realpath(module.__file__)) 54 55 safe_mkdir(temp_dir) 56 for path in paths: 57 resource_dir = os.path.realpath(os.path.join(path, static_path)) 58 if os.path.isdir(resource_dir): 59 for root, dirs, files in os.walk(resource_dir): 60 for d in dirs: 61 safe_mkdir( 62 os.path.join( 63 temp_dir, os.path.relpath(os.path.join(root, d), resource_dir) 64 ) 65 ) 66 for f in files: 67 src = os.path.join(root, f) 68 shutil.copy(src, os.path.join(temp_dir, os.path.relpath(src, resource_dir))) 69 return temp_dir 70 71 72 class CacheHelper(object): 73 @classmethod 74 def hash(cls, path, digest=None, hasher=sha1): 75 # type: (Text, Optional[Hasher], Callable[[], Hasher]) -> str 76 """Return the digest of a single file in a memory-efficient manner.""" 77 if digest is None: 78 digest = hasher() 79 hashing.file_hash(path, digest) 80 return digest.hexdigest() 81 82 @classmethod 83 def pex_code_hash(cls, directory): 84 # type: (str) -> str 85 """Return a reproducible hash of the contents of a loose PEX; excluding all `.pyc` files.""" 86 digest = hashlib.sha1() 87 hashing.dir_hash( 88 directory=directory, 89 digest=digest, 90 dir_filter=is_pyc_dir, 91 file_filter=lambda file_path: not is_pyc_file(file_path) 92 and not file_path.startswith("."), 93 ) 94 return digest.hexdigest() 95 96 @classmethod 97 def dir_hash(cls, directory, digest=None, hasher=sha1): 98 # type: (str, Optional[Hasher], Callable[[], Hasher]) -> str 99 """Return a reproducible hash of the contents of a directory; excluding all `.pyc` files.""" 100 if digest is None: 101 digest = hasher() 102 hashing.dir_hash( 103 directory=directory, 104 digest=digest, 105 dir_filter=lambda d: not is_pyc_dir(d), 106 file_filter=lambda f: not is_pyc_file(f), 107 ) 108 return digest.hexdigest() 109 110 @classmethod 111 def zip_hash( 112 cls, 113 zip_path, # type: str 114 relpath=None, # type: Optional[str] 115 ): 116 # type: (...) -> str 117 """Return a reproducible hash of the contents of a zip; excluding all `.pyc` files.""" 118 digest = hashlib.sha1() 119 hashing.zip_hash( 120 zip_path=zip_path, 121 digest=digest, 122 relpath=relpath, 123 dir_filter=lambda d: not is_pyc_dir(d), 124 file_filter=lambda f: not is_pyc_file(f), 125 ) 126 return digest.hexdigest() 127 128 129 @contextlib.contextmanager 130 def named_temporary_file(**kwargs): 131 # type: (**Any) -> Iterator[IO] 132 """Due to a bug in python (https://bugs.python.org/issue14243), we need this to be able to use 133 the temporary file without deleting it.""" 134 assert "delete" not in kwargs 135 kwargs["delete"] = False 136 fp = tempfile.NamedTemporaryFile(**kwargs) 137 try: 138 with fp: 139 yield fp 140 finally: 141 os.remove(fp.name) 142 [end of pex/util.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pex/util.py b/pex/util.py --- a/pex/util.py +++ b/pex/util.py @@ -87,7 +87,7 @@ hashing.dir_hash( directory=directory, digest=digest, - dir_filter=is_pyc_dir, + dir_filter=lambda d: not is_pyc_dir(d), file_filter=lambda file_path: not is_pyc_file(file_path) and not file_path.startswith("."), )
{"golden_diff": "diff --git a/pex/util.py b/pex/util.py\n--- a/pex/util.py\n+++ b/pex/util.py\n@@ -87,7 +87,7 @@\n hashing.dir_hash(\n directory=directory,\n digest=digest,\n- dir_filter=is_pyc_dir,\n+ dir_filter=lambda d: not is_pyc_dir(d),\n file_filter=lambda file_path: not is_pyc_file(file_path)\n and not file_path.startswith(\".\"),\n )\n", "issue": "`venv create` no longer includes `--sources-directory` contents when all files are nested\nIt seems like there was a regression from 2.1.148 -> 2.1.149 with the behaviour of `venv create` with a `--pex-repository` that was created with `--sources-directory`: those sources aren't included in the final venv.\r\n\r\nReproducer:\r\n\r\n```shell\r\ncd $(mktemp -d)\r\n\r\n# create our dummy file\r\nmkdir -p source_files/foo\r\ntouch source_files/foo/bar.py # NB.1\r\n# touch source_files/qux.py # NB.2\r\n\r\nfor version in v2.1.148 v2.1.149; do\r\n curl -s -L https://github.com/pantsbuild/pex/releases/download/$version/pex > pex-$version\r\n chmod +x pex-$version\r\n\r\n # NB.3\r\n ./pex-$version --output-file=repository-$version.pex --sources-directory=source_files\r\n\r\n # NB.4\r\n PEX_SCRIPT=pex3 ./pex-$version venv create --dest-dir=dest-$version --pex-repository=repository-$version.pex --layout=flat\r\n\r\n # what was included?\r\n tree dest-$version\r\ndone\r\n```\r\n\r\nRunning that shows that the contents of the `dest-...` directory depends on the version, without the `bar.py` file when using v2.1.149, but should be the same:\r\n\r\n```\r\ndest-v2.1.148\r\n\u2514\u2500\u2500 foo\r\n \u2514\u2500\u2500 bar.py\r\n\r\n1 directory, 1 file\r\ndest-v2.1.149\r\n\r\n0 directories, 0 files\r\n```\r\n\r\nAblative studies:\r\n\r\n- uncommenting `NB.2` line (to have two files) passes \u2705 (both versions have both `foo/bar.py` and `qux.py`)\r\n- _replacing_ the `NB.1` with `NB.2` (to just `qux.py` at the top level) passes \u2705 \r\n- _always_ using v2.1.148 on line `NB.3` (create the pex) and v2.1.149 on line `NB.4` (create the venv) passes \u2705 \r\n- v2.1.149 for `NB.3` and v2.1.148 for `NB.4` fails \u274c \r\n- I think third-party dependencies work okay, but haven't confirmed in this reduced setting\r\n- This reproduces without `--layout`, but the output is simpler with `--layout=flat`\r\n\r\n(First observed in https://github.com/pantsbuild/pants/pull/20149.)\n", "before_files": [{"content": "# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import absolute_import\n\nimport contextlib\nimport hashlib\nimport importlib\nimport os\nimport shutil\nimport tempfile\nfrom hashlib import sha1\nfrom site import makepath # type: ignore[attr-defined]\n\nfrom pex import hashing\nfrom pex.common import is_pyc_dir, is_pyc_file, safe_mkdir, safe_mkdtemp\nfrom pex.compatibility import ( # type: ignore[attr-defined] # `exec_function` is defined dynamically\n PY2,\n exec_function,\n)\nfrom pex.orderedset import OrderedSet\nfrom pex.typing import TYPE_CHECKING\n\nif TYPE_CHECKING:\n from typing import IO, Any, Callable, Iterator, Optional, Text\n\n from pex.hashing import Hasher\n\n\nclass DistributionHelper(object):\n # TODO(#584: This appears unused, but clients might still use it. We cannot remove until we\n # have a deprecation policy.\n @classmethod\n def access_zipped_assets(cls, static_module_name, static_path, dir_location=None):\n # type: (str, str, Optional[str]) -> str\n \"\"\"Create a copy of static resource files as we can't serve them from within the pex file.\n\n :param static_module_name: Module name containing module to cache in a tempdir\n :param static_path: Module name, for example 'serverset'\n :param dir_location: create a new temporary directory inside, or None to have one created\n :returns temp_dir: Temporary directory with the zipped assets inside\n \"\"\"\n if dir_location is None:\n temp_dir = safe_mkdtemp()\n else:\n temp_dir = dir_location\n\n module = importlib.import_module(static_module_name)\n # N.B.: This handles namespace packages new and old.\n paths = OrderedSet(os.path.realpath(d) for d in getattr(module, \"__path__\", []))\n if module.__file__:\n # And this handles old-style __init__.py packages.\n paths.add(os.path.realpath(module.__file__))\n\n safe_mkdir(temp_dir)\n for path in paths:\n resource_dir = os.path.realpath(os.path.join(path, static_path))\n if os.path.isdir(resource_dir):\n for root, dirs, files in os.walk(resource_dir):\n for d in dirs:\n safe_mkdir(\n os.path.join(\n temp_dir, os.path.relpath(os.path.join(root, d), resource_dir)\n )\n )\n for f in files:\n src = os.path.join(root, f)\n shutil.copy(src, os.path.join(temp_dir, os.path.relpath(src, resource_dir)))\n return temp_dir\n\n\nclass CacheHelper(object):\n @classmethod\n def hash(cls, path, digest=None, hasher=sha1):\n # type: (Text, Optional[Hasher], Callable[[], Hasher]) -> str\n \"\"\"Return the digest of a single file in a memory-efficient manner.\"\"\"\n if digest is None:\n digest = hasher()\n hashing.file_hash(path, digest)\n return digest.hexdigest()\n\n @classmethod\n def pex_code_hash(cls, directory):\n # type: (str) -> str\n \"\"\"Return a reproducible hash of the contents of a loose PEX; excluding all `.pyc` files.\"\"\"\n digest = hashlib.sha1()\n hashing.dir_hash(\n directory=directory,\n digest=digest,\n dir_filter=is_pyc_dir,\n file_filter=lambda file_path: not is_pyc_file(file_path)\n and not file_path.startswith(\".\"),\n )\n return digest.hexdigest()\n\n @classmethod\n def dir_hash(cls, directory, digest=None, hasher=sha1):\n # type: (str, Optional[Hasher], Callable[[], Hasher]) -> str\n \"\"\"Return a reproducible hash of the contents of a directory; excluding all `.pyc` files.\"\"\"\n if digest is None:\n digest = hasher()\n hashing.dir_hash(\n directory=directory,\n digest=digest,\n dir_filter=lambda d: not is_pyc_dir(d),\n file_filter=lambda f: not is_pyc_file(f),\n )\n return digest.hexdigest()\n\n @classmethod\n def zip_hash(\n cls,\n zip_path, # type: str\n relpath=None, # type: Optional[str]\n ):\n # type: (...) -> str\n \"\"\"Return a reproducible hash of the contents of a zip; excluding all `.pyc` files.\"\"\"\n digest = hashlib.sha1()\n hashing.zip_hash(\n zip_path=zip_path,\n digest=digest,\n relpath=relpath,\n dir_filter=lambda d: not is_pyc_dir(d),\n file_filter=lambda f: not is_pyc_file(f),\n )\n return digest.hexdigest()\n\n\[email protected]\ndef named_temporary_file(**kwargs):\n # type: (**Any) -> Iterator[IO]\n \"\"\"Due to a bug in python (https://bugs.python.org/issue14243), we need this to be able to use\n the temporary file without deleting it.\"\"\"\n assert \"delete\" not in kwargs\n kwargs[\"delete\"] = False\n fp = tempfile.NamedTemporaryFile(**kwargs)\n try:\n with fp:\n yield fp\n finally:\n os.remove(fp.name)\n", "path": "pex/util.py"}]}
2,612
103
gh_patches_debug_28189
rasdani/github-patches
git_diff
netbox-community__netbox-4718
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> API Definitions for available-ips and available-prefixes are incorrect <!-- NOTE: IF YOUR ISSUE DOES NOT FOLLOW THIS TEMPLATE, IT WILL BE CLOSED. Please indicate the nature of the change by placing an X in one of the boxes below. --> ### Change Type [ ] Addition [x] Correction [ ] Deprecation [ ] Cleanup (formatting, typos, etc.) ### Area [ ] Installation instructions [ ] Configuration parameters [ ] Functionality/features [x] REST API [ ] Administration/development [ ] Other <!-- Describe the proposed change(s). --> ### Proposed Changes I'm writing a ipam project with go-netbox(https://github.com/kobayashi/go-netbox for v2.8), which generate the client from swagger. And I'm using the prefix/available-ips api to create unused ip address. I notice that the api documents in http://netbox/api/docs/ has difference with actually code: * The request model should be IPAddress rather than WritablePrefix _ref: https://github.com/netbox-community/netbox/blob/v2.8.3/netbox/ipam/api/views.py#L211_ * The return code should be 201 rather than 200 _ref: https://github.com/netbox-community/netbox/blob/v2.8.3/netbox/ipam/api/views.py#L218_ These mismatches make go-netbox client unable to work correctly. I would appreciate if this can be fixed. </issue> <code> [start of netbox/ipam/api/views.py] 1 from django.conf import settings 2 from django.db.models import Count 3 from django.shortcuts import get_object_or_404 4 from django_pglocks import advisory_lock 5 from drf_yasg.utils import swagger_auto_schema 6 from rest_framework import status 7 from rest_framework.decorators import action 8 from rest_framework.exceptions import PermissionDenied 9 from rest_framework.response import Response 10 11 from extras.api.views import CustomFieldModelViewSet 12 from ipam import filters 13 from ipam.models import Aggregate, IPAddress, Prefix, RIR, Role, Service, VLAN, VLANGroup, VRF 14 from utilities.api import ModelViewSet 15 from utilities.constants import ADVISORY_LOCK_KEYS 16 from utilities.utils import get_subquery 17 from . import serializers 18 19 20 # 21 # VRFs 22 # 23 24 class VRFViewSet(CustomFieldModelViewSet): 25 queryset = VRF.objects.prefetch_related('tenant').prefetch_related('tags').annotate( 26 ipaddress_count=get_subquery(IPAddress, 'vrf'), 27 prefix_count=get_subquery(Prefix, 'vrf') 28 ) 29 serializer_class = serializers.VRFSerializer 30 filterset_class = filters.VRFFilterSet 31 32 33 # 34 # RIRs 35 # 36 37 class RIRViewSet(ModelViewSet): 38 queryset = RIR.objects.annotate( 39 aggregate_count=Count('aggregates') 40 ) 41 serializer_class = serializers.RIRSerializer 42 filterset_class = filters.RIRFilterSet 43 44 45 # 46 # Aggregates 47 # 48 49 class AggregateViewSet(CustomFieldModelViewSet): 50 queryset = Aggregate.objects.prefetch_related('rir').prefetch_related('tags') 51 serializer_class = serializers.AggregateSerializer 52 filterset_class = filters.AggregateFilterSet 53 54 55 # 56 # Roles 57 # 58 59 class RoleViewSet(ModelViewSet): 60 queryset = Role.objects.annotate( 61 prefix_count=get_subquery(Prefix, 'role'), 62 vlan_count=get_subquery(VLAN, 'role') 63 ) 64 serializer_class = serializers.RoleSerializer 65 filterset_class = filters.RoleFilterSet 66 67 68 # 69 # Prefixes 70 # 71 72 class PrefixViewSet(CustomFieldModelViewSet): 73 queryset = Prefix.objects.prefetch_related('site', 'vrf__tenant', 'tenant', 'vlan', 'role', 'tags') 74 serializer_class = serializers.PrefixSerializer 75 filterset_class = filters.PrefixFilterSet 76 77 @swagger_auto_schema( 78 methods=['get', 'post'], 79 responses={ 80 200: serializers.AvailablePrefixSerializer(many=True), 81 } 82 ) 83 @action(detail=True, url_path='available-prefixes', methods=['get', 'post']) 84 @advisory_lock(ADVISORY_LOCK_KEYS['available-prefixes']) 85 def available_prefixes(self, request, pk=None): 86 """ 87 A convenience method for returning available child prefixes within a parent. 88 89 The advisory lock decorator uses a PostgreSQL advisory lock to prevent this API from being 90 invoked in parallel, which results in a race condition where multiple insertions can occur. 91 """ 92 prefix = get_object_or_404(Prefix, pk=pk) 93 available_prefixes = prefix.get_available_prefixes() 94 95 if request.method == 'POST': 96 97 # Permissions check 98 if not request.user.has_perm('ipam.add_prefix'): 99 raise PermissionDenied() 100 101 # Validate Requested Prefixes' length 102 serializer = serializers.PrefixLengthSerializer( 103 data=request.data if isinstance(request.data, list) else [request.data], 104 many=True, 105 context={ 106 'request': request, 107 'prefix': prefix, 108 } 109 ) 110 if not serializer.is_valid(): 111 return Response( 112 serializer.errors, 113 status=status.HTTP_400_BAD_REQUEST 114 ) 115 116 requested_prefixes = serializer.validated_data 117 # Allocate prefixes to the requested objects based on availability within the parent 118 for i, requested_prefix in enumerate(requested_prefixes): 119 120 # Find the first available prefix equal to or larger than the requested size 121 for available_prefix in available_prefixes.iter_cidrs(): 122 if requested_prefix['prefix_length'] >= available_prefix.prefixlen: 123 allocated_prefix = '{}/{}'.format(available_prefix.network, requested_prefix['prefix_length']) 124 requested_prefix['prefix'] = allocated_prefix 125 requested_prefix['vrf'] = prefix.vrf.pk if prefix.vrf else None 126 break 127 else: 128 return Response( 129 { 130 "detail": "Insufficient space is available to accommodate the requested prefix size(s)" 131 }, 132 status=status.HTTP_204_NO_CONTENT 133 ) 134 135 # Remove the allocated prefix from the list of available prefixes 136 available_prefixes.remove(allocated_prefix) 137 138 # Initialize the serializer with a list or a single object depending on what was requested 139 context = {'request': request} 140 if isinstance(request.data, list): 141 serializer = serializers.PrefixSerializer(data=requested_prefixes, many=True, context=context) 142 else: 143 serializer = serializers.PrefixSerializer(data=requested_prefixes[0], context=context) 144 145 # Create the new Prefix(es) 146 if serializer.is_valid(): 147 serializer.save() 148 return Response(serializer.data, status=status.HTTP_201_CREATED) 149 150 return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) 151 152 else: 153 154 serializer = serializers.AvailablePrefixSerializer(available_prefixes.iter_cidrs(), many=True, context={ 155 'request': request, 156 'vrf': prefix.vrf, 157 }) 158 159 return Response(serializer.data) 160 161 @swagger_auto_schema( 162 methods=['get', 'post'], 163 responses={ 164 200: serializers.AvailableIPSerializer(many=True), 165 } 166 ) 167 @action(detail=True, url_path='available-ips', methods=['get', 'post']) 168 @advisory_lock(ADVISORY_LOCK_KEYS['available-ips']) 169 def available_ips(self, request, pk=None): 170 """ 171 A convenience method for returning available IP addresses within a prefix. By default, the number of IPs 172 returned will be equivalent to PAGINATE_COUNT. An arbitrary limit (up to MAX_PAGE_SIZE, if set) may be passed, 173 however results will not be paginated. 174 175 The advisory lock decorator uses a PostgreSQL advisory lock to prevent this API from being 176 invoked in parallel, which results in a race condition where multiple insertions can occur. 177 """ 178 prefix = get_object_or_404(Prefix, pk=pk) 179 180 # Create the next available IP within the prefix 181 if request.method == 'POST': 182 183 # Permissions check 184 if not request.user.has_perm('ipam.add_ipaddress'): 185 raise PermissionDenied() 186 187 # Normalize to a list of objects 188 requested_ips = request.data if isinstance(request.data, list) else [request.data] 189 190 # Determine if the requested number of IPs is available 191 available_ips = prefix.get_available_ips() 192 if available_ips.size < len(requested_ips): 193 return Response( 194 { 195 "detail": "An insufficient number of IP addresses are available within the prefix {} ({} " 196 "requested, {} available)".format(prefix, len(requested_ips), len(available_ips)) 197 }, 198 status=status.HTTP_204_NO_CONTENT 199 ) 200 201 # Assign addresses from the list of available IPs and copy VRF assignment from the parent prefix 202 available_ips = iter(available_ips) 203 prefix_length = prefix.prefix.prefixlen 204 for requested_ip in requested_ips: 205 requested_ip['address'] = '{}/{}'.format(next(available_ips), prefix_length) 206 requested_ip['vrf'] = prefix.vrf.pk if prefix.vrf else None 207 208 # Initialize the serializer with a list or a single object depending on what was requested 209 context = {'request': request} 210 if isinstance(request.data, list): 211 serializer = serializers.IPAddressSerializer(data=requested_ips, many=True, context=context) 212 else: 213 serializer = serializers.IPAddressSerializer(data=requested_ips[0], context=context) 214 215 # Create the new IP address(es) 216 if serializer.is_valid(): 217 serializer.save() 218 return Response(serializer.data, status=status.HTTP_201_CREATED) 219 220 return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) 221 222 # Determine the maximum number of IPs to return 223 else: 224 try: 225 limit = int(request.query_params.get('limit', settings.PAGINATE_COUNT)) 226 except ValueError: 227 limit = settings.PAGINATE_COUNT 228 if settings.MAX_PAGE_SIZE: 229 limit = min(limit, settings.MAX_PAGE_SIZE) 230 231 # Calculate available IPs within the prefix 232 ip_list = [] 233 for index, ip in enumerate(prefix.get_available_ips(), start=1): 234 ip_list.append(ip) 235 if index == limit: 236 break 237 serializer = serializers.AvailableIPSerializer(ip_list, many=True, context={ 238 'request': request, 239 'prefix': prefix.prefix, 240 'vrf': prefix.vrf, 241 }) 242 243 return Response(serializer.data) 244 245 246 # 247 # IP addresses 248 # 249 250 class IPAddressViewSet(CustomFieldModelViewSet): 251 queryset = IPAddress.objects.prefetch_related( 252 'vrf__tenant', 'tenant', 'nat_inside', 'interface__device__device_type', 'interface__virtual_machine', 253 'nat_outside', 'tags', 254 ) 255 serializer_class = serializers.IPAddressSerializer 256 filterset_class = filters.IPAddressFilterSet 257 258 259 # 260 # VLAN groups 261 # 262 263 class VLANGroupViewSet(ModelViewSet): 264 queryset = VLANGroup.objects.prefetch_related('site').annotate( 265 vlan_count=Count('vlans') 266 ) 267 serializer_class = serializers.VLANGroupSerializer 268 filterset_class = filters.VLANGroupFilterSet 269 270 271 # 272 # VLANs 273 # 274 275 class VLANViewSet(CustomFieldModelViewSet): 276 queryset = VLAN.objects.prefetch_related( 277 'site', 'group', 'tenant', 'role', 'tags' 278 ).annotate( 279 prefix_count=get_subquery(Prefix, 'vlan') 280 ) 281 serializer_class = serializers.VLANSerializer 282 filterset_class = filters.VLANFilterSet 283 284 285 # 286 # Services 287 # 288 289 class ServiceViewSet(ModelViewSet): 290 queryset = Service.objects.prefetch_related('device').prefetch_related('tags') 291 serializer_class = serializers.ServiceSerializer 292 filterset_class = filters.ServiceFilterSet 293 [end of netbox/ipam/api/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/netbox/ipam/api/views.py b/netbox/ipam/api/views.py --- a/netbox/ipam/api/views.py +++ b/netbox/ipam/api/views.py @@ -74,12 +74,8 @@ serializer_class = serializers.PrefixSerializer filterset_class = filters.PrefixFilterSet - @swagger_auto_schema( - methods=['get', 'post'], - responses={ - 200: serializers.AvailablePrefixSerializer(many=True), - } - ) + @swagger_auto_schema(method='get', responses={200: serializers.AvailablePrefixSerializer(many=True)}) + @swagger_auto_schema(method='post', responses={201: serializers.AvailablePrefixSerializer(many=True)}) @action(detail=True, url_path='available-prefixes', methods=['get', 'post']) @advisory_lock(ADVISORY_LOCK_KEYS['available-prefixes']) def available_prefixes(self, request, pk=None): @@ -158,12 +154,9 @@ return Response(serializer.data) - @swagger_auto_schema( - methods=['get', 'post'], - responses={ - 200: serializers.AvailableIPSerializer(many=True), - } - ) + @swagger_auto_schema(method='get', responses={200: serializers.AvailableIPSerializer(many=True)}) + @swagger_auto_schema(method='post', responses={201: serializers.AvailableIPSerializer(many=True)}, + request_body=serializers.AvailableIPSerializer(many=False)) @action(detail=True, url_path='available-ips', methods=['get', 'post']) @advisory_lock(ADVISORY_LOCK_KEYS['available-ips']) def available_ips(self, request, pk=None):
{"golden_diff": "diff --git a/netbox/ipam/api/views.py b/netbox/ipam/api/views.py\n--- a/netbox/ipam/api/views.py\n+++ b/netbox/ipam/api/views.py\n@@ -74,12 +74,8 @@\n serializer_class = serializers.PrefixSerializer\n filterset_class = filters.PrefixFilterSet\n \n- @swagger_auto_schema(\n- methods=['get', 'post'],\n- responses={\n- 200: serializers.AvailablePrefixSerializer(many=True),\n- }\n- )\n+ @swagger_auto_schema(method='get', responses={200: serializers.AvailablePrefixSerializer(many=True)})\n+ @swagger_auto_schema(method='post', responses={201: serializers.AvailablePrefixSerializer(many=True)})\n @action(detail=True, url_path='available-prefixes', methods=['get', 'post'])\n @advisory_lock(ADVISORY_LOCK_KEYS['available-prefixes'])\n def available_prefixes(self, request, pk=None):\n@@ -158,12 +154,9 @@\n \n return Response(serializer.data)\n \n- @swagger_auto_schema(\n- methods=['get', 'post'],\n- responses={\n- 200: serializers.AvailableIPSerializer(many=True),\n- }\n- )\n+ @swagger_auto_schema(method='get', responses={200: serializers.AvailableIPSerializer(many=True)})\n+ @swagger_auto_schema(method='post', responses={201: serializers.AvailableIPSerializer(many=True)},\n+ request_body=serializers.AvailableIPSerializer(many=False))\n @action(detail=True, url_path='available-ips', methods=['get', 'post'])\n @advisory_lock(ADVISORY_LOCK_KEYS['available-ips'])\n def available_ips(self, request, pk=None):\n", "issue": "API Definitions for available-ips and available-prefixes are incorrect\n<!--\r\n NOTE: IF YOUR ISSUE DOES NOT FOLLOW THIS TEMPLATE, IT WILL BE CLOSED.\r\n\r\n Please indicate the nature of the change by placing an X in one of the\r\n boxes below.\r\n-->\r\n### Change Type\r\n[ ] Addition\r\n[x] Correction\r\n[ ] Deprecation\r\n[ ] Cleanup (formatting, typos, etc.)\r\n\r\n### Area\r\n[ ] Installation instructions\r\n[ ] Configuration parameters\r\n[ ] Functionality/features\r\n[x] REST API\r\n[ ] Administration/development\r\n[ ] Other\r\n\r\n<!-- Describe the proposed change(s). -->\r\n### Proposed Changes\r\nI'm writing a ipam project with go-netbox(https://github.com/kobayashi/go-netbox for v2.8), which generate the client from swagger. And I'm using the prefix/available-ips api to create unused ip address. I notice that the api documents in http://netbox/api/docs/ has difference with actually code:\r\n* The request model should be IPAddress rather than WritablePrefix\r\n _ref: https://github.com/netbox-community/netbox/blob/v2.8.3/netbox/ipam/api/views.py#L211_\r\n* The return code should be 201 rather than 200\r\n _ref: https://github.com/netbox-community/netbox/blob/v2.8.3/netbox/ipam/api/views.py#L218_\r\n\r\nThese mismatches make go-netbox client unable to work correctly. I would appreciate if this can be fixed.\n", "before_files": [{"content": "from django.conf import settings\nfrom django.db.models import Count\nfrom django.shortcuts import get_object_or_404\nfrom django_pglocks import advisory_lock\nfrom drf_yasg.utils import swagger_auto_schema\nfrom rest_framework import status\nfrom rest_framework.decorators import action\nfrom rest_framework.exceptions import PermissionDenied\nfrom rest_framework.response import Response\n\nfrom extras.api.views import CustomFieldModelViewSet\nfrom ipam import filters\nfrom ipam.models import Aggregate, IPAddress, Prefix, RIR, Role, Service, VLAN, VLANGroup, VRF\nfrom utilities.api import ModelViewSet\nfrom utilities.constants import ADVISORY_LOCK_KEYS\nfrom utilities.utils import get_subquery\nfrom . import serializers\n\n\n#\n# VRFs\n#\n\nclass VRFViewSet(CustomFieldModelViewSet):\n queryset = VRF.objects.prefetch_related('tenant').prefetch_related('tags').annotate(\n ipaddress_count=get_subquery(IPAddress, 'vrf'),\n prefix_count=get_subquery(Prefix, 'vrf')\n )\n serializer_class = serializers.VRFSerializer\n filterset_class = filters.VRFFilterSet\n\n\n#\n# RIRs\n#\n\nclass RIRViewSet(ModelViewSet):\n queryset = RIR.objects.annotate(\n aggregate_count=Count('aggregates')\n )\n serializer_class = serializers.RIRSerializer\n filterset_class = filters.RIRFilterSet\n\n\n#\n# Aggregates\n#\n\nclass AggregateViewSet(CustomFieldModelViewSet):\n queryset = Aggregate.objects.prefetch_related('rir').prefetch_related('tags')\n serializer_class = serializers.AggregateSerializer\n filterset_class = filters.AggregateFilterSet\n\n\n#\n# Roles\n#\n\nclass RoleViewSet(ModelViewSet):\n queryset = Role.objects.annotate(\n prefix_count=get_subquery(Prefix, 'role'),\n vlan_count=get_subquery(VLAN, 'role')\n )\n serializer_class = serializers.RoleSerializer\n filterset_class = filters.RoleFilterSet\n\n\n#\n# Prefixes\n#\n\nclass PrefixViewSet(CustomFieldModelViewSet):\n queryset = Prefix.objects.prefetch_related('site', 'vrf__tenant', 'tenant', 'vlan', 'role', 'tags')\n serializer_class = serializers.PrefixSerializer\n filterset_class = filters.PrefixFilterSet\n\n @swagger_auto_schema(\n methods=['get', 'post'],\n responses={\n 200: serializers.AvailablePrefixSerializer(many=True),\n }\n )\n @action(detail=True, url_path='available-prefixes', methods=['get', 'post'])\n @advisory_lock(ADVISORY_LOCK_KEYS['available-prefixes'])\n def available_prefixes(self, request, pk=None):\n \"\"\"\n A convenience method for returning available child prefixes within a parent.\n\n The advisory lock decorator uses a PostgreSQL advisory lock to prevent this API from being\n invoked in parallel, which results in a race condition where multiple insertions can occur.\n \"\"\"\n prefix = get_object_or_404(Prefix, pk=pk)\n available_prefixes = prefix.get_available_prefixes()\n\n if request.method == 'POST':\n\n # Permissions check\n if not request.user.has_perm('ipam.add_prefix'):\n raise PermissionDenied()\n\n # Validate Requested Prefixes' length\n serializer = serializers.PrefixLengthSerializer(\n data=request.data if isinstance(request.data, list) else [request.data],\n many=True,\n context={\n 'request': request,\n 'prefix': prefix,\n }\n )\n if not serializer.is_valid():\n return Response(\n serializer.errors,\n status=status.HTTP_400_BAD_REQUEST\n )\n\n requested_prefixes = serializer.validated_data\n # Allocate prefixes to the requested objects based on availability within the parent\n for i, requested_prefix in enumerate(requested_prefixes):\n\n # Find the first available prefix equal to or larger than the requested size\n for available_prefix in available_prefixes.iter_cidrs():\n if requested_prefix['prefix_length'] >= available_prefix.prefixlen:\n allocated_prefix = '{}/{}'.format(available_prefix.network, requested_prefix['prefix_length'])\n requested_prefix['prefix'] = allocated_prefix\n requested_prefix['vrf'] = prefix.vrf.pk if prefix.vrf else None\n break\n else:\n return Response(\n {\n \"detail\": \"Insufficient space is available to accommodate the requested prefix size(s)\"\n },\n status=status.HTTP_204_NO_CONTENT\n )\n\n # Remove the allocated prefix from the list of available prefixes\n available_prefixes.remove(allocated_prefix)\n\n # Initialize the serializer with a list or a single object depending on what was requested\n context = {'request': request}\n if isinstance(request.data, list):\n serializer = serializers.PrefixSerializer(data=requested_prefixes, many=True, context=context)\n else:\n serializer = serializers.PrefixSerializer(data=requested_prefixes[0], context=context)\n\n # Create the new Prefix(es)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n else:\n\n serializer = serializers.AvailablePrefixSerializer(available_prefixes.iter_cidrs(), many=True, context={\n 'request': request,\n 'vrf': prefix.vrf,\n })\n\n return Response(serializer.data)\n\n @swagger_auto_schema(\n methods=['get', 'post'],\n responses={\n 200: serializers.AvailableIPSerializer(many=True),\n }\n )\n @action(detail=True, url_path='available-ips', methods=['get', 'post'])\n @advisory_lock(ADVISORY_LOCK_KEYS['available-ips'])\n def available_ips(self, request, pk=None):\n \"\"\"\n A convenience method for returning available IP addresses within a prefix. By default, the number of IPs\n returned will be equivalent to PAGINATE_COUNT. An arbitrary limit (up to MAX_PAGE_SIZE, if set) may be passed,\n however results will not be paginated.\n\n The advisory lock decorator uses a PostgreSQL advisory lock to prevent this API from being\n invoked in parallel, which results in a race condition where multiple insertions can occur.\n \"\"\"\n prefix = get_object_or_404(Prefix, pk=pk)\n\n # Create the next available IP within the prefix\n if request.method == 'POST':\n\n # Permissions check\n if not request.user.has_perm('ipam.add_ipaddress'):\n raise PermissionDenied()\n\n # Normalize to a list of objects\n requested_ips = request.data if isinstance(request.data, list) else [request.data]\n\n # Determine if the requested number of IPs is available\n available_ips = prefix.get_available_ips()\n if available_ips.size < len(requested_ips):\n return Response(\n {\n \"detail\": \"An insufficient number of IP addresses are available within the prefix {} ({} \"\n \"requested, {} available)\".format(prefix, len(requested_ips), len(available_ips))\n },\n status=status.HTTP_204_NO_CONTENT\n )\n\n # Assign addresses from the list of available IPs and copy VRF assignment from the parent prefix\n available_ips = iter(available_ips)\n prefix_length = prefix.prefix.prefixlen\n for requested_ip in requested_ips:\n requested_ip['address'] = '{}/{}'.format(next(available_ips), prefix_length)\n requested_ip['vrf'] = prefix.vrf.pk if prefix.vrf else None\n\n # Initialize the serializer with a list or a single object depending on what was requested\n context = {'request': request}\n if isinstance(request.data, list):\n serializer = serializers.IPAddressSerializer(data=requested_ips, many=True, context=context)\n else:\n serializer = serializers.IPAddressSerializer(data=requested_ips[0], context=context)\n\n # Create the new IP address(es)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n # Determine the maximum number of IPs to return\n else:\n try:\n limit = int(request.query_params.get('limit', settings.PAGINATE_COUNT))\n except ValueError:\n limit = settings.PAGINATE_COUNT\n if settings.MAX_PAGE_SIZE:\n limit = min(limit, settings.MAX_PAGE_SIZE)\n\n # Calculate available IPs within the prefix\n ip_list = []\n for index, ip in enumerate(prefix.get_available_ips(), start=1):\n ip_list.append(ip)\n if index == limit:\n break\n serializer = serializers.AvailableIPSerializer(ip_list, many=True, context={\n 'request': request,\n 'prefix': prefix.prefix,\n 'vrf': prefix.vrf,\n })\n\n return Response(serializer.data)\n\n\n#\n# IP addresses\n#\n\nclass IPAddressViewSet(CustomFieldModelViewSet):\n queryset = IPAddress.objects.prefetch_related(\n 'vrf__tenant', 'tenant', 'nat_inside', 'interface__device__device_type', 'interface__virtual_machine',\n 'nat_outside', 'tags',\n )\n serializer_class = serializers.IPAddressSerializer\n filterset_class = filters.IPAddressFilterSet\n\n\n#\n# VLAN groups\n#\n\nclass VLANGroupViewSet(ModelViewSet):\n queryset = VLANGroup.objects.prefetch_related('site').annotate(\n vlan_count=Count('vlans')\n )\n serializer_class = serializers.VLANGroupSerializer\n filterset_class = filters.VLANGroupFilterSet\n\n\n#\n# VLANs\n#\n\nclass VLANViewSet(CustomFieldModelViewSet):\n queryset = VLAN.objects.prefetch_related(\n 'site', 'group', 'tenant', 'role', 'tags'\n ).annotate(\n prefix_count=get_subquery(Prefix, 'vlan')\n )\n serializer_class = serializers.VLANSerializer\n filterset_class = filters.VLANFilterSet\n\n\n#\n# Services\n#\n\nclass ServiceViewSet(ModelViewSet):\n queryset = Service.objects.prefetch_related('device').prefetch_related('tags')\n serializer_class = serializers.ServiceSerializer\n filterset_class = filters.ServiceFilterSet\n", "path": "netbox/ipam/api/views.py"}]}
3,790
392
gh_patches_debug_40882
rasdani/github-patches
git_diff
pytorch__vision-3193
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Clean up and Document the io.image enhancements PR #3024 introduced a few enhancements on the `io.image` methods used for reading JPEG and PNG images. The code changes were merged but there are a few nice-to-have improvements missing. More specifically: - [x] Provide better error messages for [JPEG](https://github.com/pytorch/vision/pull/3024#discussion_r527066925) and [PNG](https://github.com/pytorch/vision/pull/3024#discussion_r527093866) when a specific `ImageReadMode` value is not supported. - [x] Ensure that `image_read_mode.h` has a new line at the [end of the file](https://github.com/pytorch/vision/pull/3024#discussion_r526710809). - [x] [Replace](https://github.com/pytorch/vision/pull/3024#discussion_r527065253) the old `#define` with the newer `constexpr` expression at `image_read_mode.h`. - [x] [Move the documentation](https://github.com/pytorch/vision/pull/3024#discussion_r527095264) of `ImageReadMode` values from the methods to the enum. **Note:** `constexpr` might cause problems on Windows. If that's the case, try using const instead. </issue> <code> [start of torchvision/io/image.py] 1 import torch 2 3 import os 4 import os.path as osp 5 import importlib.machinery 6 7 from enum import Enum 8 9 _HAS_IMAGE_OPT = False 10 11 try: 12 lib_dir = osp.abspath(osp.join(osp.dirname(__file__), "..")) 13 14 loader_details = ( 15 importlib.machinery.ExtensionFileLoader, 16 importlib.machinery.EXTENSION_SUFFIXES 17 ) 18 19 extfinder = importlib.machinery.FileFinder(lib_dir, loader_details) # type: ignore[arg-type] 20 ext_specs = extfinder.find_spec("image") 21 22 if os.name == 'nt': 23 # Load the image extension using LoadLibraryExW 24 import ctypes 25 import sys 26 27 kernel32 = ctypes.WinDLL('kernel32.dll', use_last_error=True) 28 with_load_library_flags = hasattr(kernel32, 'AddDllDirectory') 29 prev_error_mode = kernel32.SetErrorMode(0x0001) 30 31 kernel32.LoadLibraryW.restype = ctypes.c_void_p 32 if with_load_library_flags: 33 kernel32.LoadLibraryExW.restype = ctypes.c_void_p 34 35 if ext_specs is not None: 36 res = kernel32.LoadLibraryExW(ext_specs.origin, None, 0x00001100) 37 if res is None: 38 err = ctypes.WinError(ctypes.get_last_error()) 39 err.strerror += (f' Error loading "{ext_specs.origin}" or any or ' 40 'its dependencies.') 41 raise err 42 43 kernel32.SetErrorMode(prev_error_mode) 44 45 if ext_specs is not None: 46 torch.ops.load_library(ext_specs.origin) 47 _HAS_IMAGE_OPT = True 48 except (ImportError, OSError): 49 pass 50 51 52 class ImageReadMode(Enum): 53 UNCHANGED = 0 54 GRAY = 1 55 GRAY_ALPHA = 2 56 RGB = 3 57 RGB_ALPHA = 4 58 59 60 def read_file(path: str) -> torch.Tensor: 61 """ 62 Reads and outputs the bytes contents of a file as a uint8 Tensor 63 with one dimension. 64 65 Arguments: 66 path (str): the path to the file to be read 67 68 Returns: 69 data (Tensor) 70 """ 71 data = torch.ops.image.read_file(path) 72 return data 73 74 75 def write_file(filename: str, data: torch.Tensor) -> None: 76 """ 77 Writes the contents of a uint8 tensor with one dimension to a 78 file. 79 80 Arguments: 81 filename (str): the path to the file to be written 82 data (Tensor): the contents to be written to the output file 83 """ 84 torch.ops.image.write_file(filename, data) 85 86 87 def decode_png(input: torch.Tensor, mode: ImageReadMode = ImageReadMode.UNCHANGED) -> torch.Tensor: 88 """ 89 Decodes a PNG image into a 3 dimensional RGB Tensor. 90 Optionally converts the image to the desired format. 91 The values of the output tensor are uint8 between 0 and 255. 92 93 Arguments: 94 input (Tensor[1]): a one dimensional uint8 tensor containing 95 the raw bytes of the PNG image. 96 mode (ImageReadMode): the read mode used for optionally 97 converting the image. Use `ImageReadMode.UNCHANGED` for loading 98 the image as-is, `ImageReadMode.GRAY` for converting to grayscale, 99 `ImageReadMode.GRAY_ALPHA` for grayscale with transparency, 100 `ImageReadMode.RGB` for RGB and `ImageReadMode.RGB_ALPHA` for 101 RGB with transparency. Default: `ImageReadMode.UNCHANGED` 102 103 Returns: 104 output (Tensor[image_channels, image_height, image_width]) 105 """ 106 output = torch.ops.image.decode_png(input, mode.value) 107 return output 108 109 110 def encode_png(input: torch.Tensor, compression_level: int = 6) -> torch.Tensor: 111 """ 112 Takes an input tensor in CHW layout and returns a buffer with the contents 113 of its corresponding PNG file. 114 115 Parameters 116 ---------- 117 input: Tensor[channels, image_height, image_width] 118 int8 image tensor of `c` channels, where `c` must 3 or 1. 119 compression_level: int 120 Compression factor for the resulting file, it must be a number 121 between 0 and 9. Default: 6 122 123 Returns 124 ------- 125 output: Tensor[1] 126 A one dimensional int8 tensor that contains the raw bytes of the 127 PNG file. 128 """ 129 output = torch.ops.image.encode_png(input, compression_level) 130 return output 131 132 133 def write_png(input: torch.Tensor, filename: str, compression_level: int = 6): 134 """ 135 Takes an input tensor in CHW layout (or HW in the case of grayscale images) 136 and saves it in a PNG file. 137 138 Parameters 139 ---------- 140 input: Tensor[channels, image_height, image_width] 141 int8 image tensor of `c` channels, where `c` must be 1 or 3. 142 filename: str 143 Path to save the image. 144 compression_level: int 145 Compression factor for the resulting file, it must be a number 146 between 0 and 9. Default: 6 147 """ 148 output = encode_png(input, compression_level) 149 write_file(filename, output) 150 151 152 def decode_jpeg(input: torch.Tensor, mode: ImageReadMode = ImageReadMode.UNCHANGED) -> torch.Tensor: 153 """ 154 Decodes a JPEG image into a 3 dimensional RGB Tensor. 155 Optionally converts the image to the desired format. 156 The values of the output tensor are uint8 between 0 and 255. 157 158 Arguments: 159 input (Tensor[1]): a one dimensional uint8 tensor containing 160 the raw bytes of the JPEG image. 161 mode (ImageReadMode): the read mode used for optionally 162 converting the image. Use `ImageReadMode.UNCHANGED` for loading 163 the image as-is, `ImageReadMode.GRAY` for converting to grayscale 164 and `ImageReadMode.RGB` for RGB. Default: `ImageReadMode.UNCHANGED` 165 166 Returns: 167 output (Tensor[image_channels, image_height, image_width]) 168 """ 169 output = torch.ops.image.decode_jpeg(input, mode.value) 170 return output 171 172 173 def encode_jpeg(input: torch.Tensor, quality: int = 75) -> torch.Tensor: 174 """ 175 Takes an input tensor in CHW layout and returns a buffer with the contents 176 of its corresponding JPEG file. 177 178 Parameters 179 ---------- 180 input: Tensor[channels, image_height, image_width]) 181 int8 image tensor of `c` channels, where `c` must be 1 or 3. 182 quality: int 183 Quality of the resulting JPEG file, it must be a number between 184 1 and 100. Default: 75 185 186 Returns 187 ------- 188 output: Tensor[1] 189 A one dimensional int8 tensor that contains the raw bytes of the 190 JPEG file. 191 """ 192 if quality < 1 or quality > 100: 193 raise ValueError('Image quality should be a positive number ' 194 'between 1 and 100') 195 196 output = torch.ops.image.encode_jpeg(input, quality) 197 return output 198 199 200 def write_jpeg(input: torch.Tensor, filename: str, quality: int = 75): 201 """ 202 Takes an input tensor in CHW layout and saves it in a JPEG file. 203 204 Parameters 205 ---------- 206 input: Tensor[channels, image_height, image_width] 207 int8 image tensor of `c` channels, where `c` must be 1 or 3. 208 filename: str 209 Path to save the image. 210 quality: int 211 Quality of the resulting JPEG file, it must be a number 212 between 1 and 100. Default: 75 213 """ 214 output = encode_jpeg(input, quality) 215 write_file(filename, output) 216 217 218 def decode_image(input: torch.Tensor, mode: ImageReadMode = ImageReadMode.UNCHANGED) -> torch.Tensor: 219 """ 220 Detects whether an image is a JPEG or PNG and performs the appropriate 221 operation to decode the image into a 3 dimensional RGB Tensor. 222 223 Optionally converts the image to the desired format. 224 The values of the output tensor are uint8 between 0 and 255. 225 226 Parameters 227 ---------- 228 input: Tensor 229 a one dimensional uint8 tensor containing the raw bytes of the 230 PNG or JPEG image. 231 mode: ImageReadMode 232 the read mode used for optionally converting the image. JPEG 233 and PNG images have different permitted values. The default 234 value is `ImageReadMode.UNCHANGED` and it keeps the image as-is. 235 See `decode_jpeg()` and `decode_png()` for more information. 236 Default: `ImageReadMode.UNCHANGED` 237 238 Returns 239 ------- 240 output: Tensor[image_channels, image_height, image_width] 241 """ 242 output = torch.ops.image.decode_image(input, mode.value) 243 return output 244 245 246 def read_image(path: str, mode: ImageReadMode = ImageReadMode.UNCHANGED) -> torch.Tensor: 247 """ 248 Reads a JPEG or PNG image into a 3 dimensional RGB Tensor. 249 Optionally converts the image to the desired format. 250 The values of the output tensor are uint8 between 0 and 255. 251 252 Parameters 253 ---------- 254 path: str 255 path of the JPEG or PNG image. 256 mode: ImageReadMode 257 the read mode used for optionally converting the image. JPEG 258 and PNG images have different permitted values. The default 259 value is `ImageReadMode.UNCHANGED` and it keeps the image as-is. 260 See `decode_jpeg()` and `decode_png()` for more information. 261 Default: `ImageReadMode.UNCHANGED` 262 263 Returns 264 ------- 265 output: Tensor[image_channels, image_height, image_width] 266 """ 267 data = read_file(path) 268 return decode_image(data, mode) 269 [end of torchvision/io/image.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/torchvision/io/image.py b/torchvision/io/image.py --- a/torchvision/io/image.py +++ b/torchvision/io/image.py @@ -50,6 +50,15 @@ class ImageReadMode(Enum): + """ + Support for various modes while reading images. + + Use `ImageReadMode.UNCHANGED` for loading the image as-is, + `ImageReadMode.GRAY` for converting to grayscale, + `ImageReadMode.GRAY_ALPHA` for grayscale with transparency, + `ImageReadMode.RGB` for RGB and `ImageReadMode.RGB_ALPHA` for + RGB with transparency. + """ UNCHANGED = 0 GRAY = 1 GRAY_ALPHA = 2 @@ -94,11 +103,9 @@ input (Tensor[1]): a one dimensional uint8 tensor containing the raw bytes of the PNG image. mode (ImageReadMode): the read mode used for optionally - converting the image. Use `ImageReadMode.UNCHANGED` for loading - the image as-is, `ImageReadMode.GRAY` for converting to grayscale, - `ImageReadMode.GRAY_ALPHA` for grayscale with transparency, - `ImageReadMode.RGB` for RGB and `ImageReadMode.RGB_ALPHA` for - RGB with transparency. Default: `ImageReadMode.UNCHANGED` + converting the image. Default: `ImageReadMode.UNCHANGED`. + See `ImageReadMode` class for more information on various + available modes. Returns: output (Tensor[image_channels, image_height, image_width]) @@ -159,9 +166,9 @@ input (Tensor[1]): a one dimensional uint8 tensor containing the raw bytes of the JPEG image. mode (ImageReadMode): the read mode used for optionally - converting the image. Use `ImageReadMode.UNCHANGED` for loading - the image as-is, `ImageReadMode.GRAY` for converting to grayscale - and `ImageReadMode.RGB` for RGB. Default: `ImageReadMode.UNCHANGED` + converting the image. Default: `ImageReadMode.UNCHANGED`. + See `ImageReadMode` class for more information on various + available modes. Returns: output (Tensor[image_channels, image_height, image_width]) @@ -229,11 +236,10 @@ a one dimensional uint8 tensor containing the raw bytes of the PNG or JPEG image. mode: ImageReadMode - the read mode used for optionally converting the image. JPEG - and PNG images have different permitted values. The default - value is `ImageReadMode.UNCHANGED` and it keeps the image as-is. - See `decode_jpeg()` and `decode_png()` for more information. - Default: `ImageReadMode.UNCHANGED` + the read mode used for optionally converting the image. + Default: `ImageReadMode.UNCHANGED`. + See `ImageReadMode` class for more information on various + available modes. Returns ------- @@ -254,11 +260,10 @@ path: str path of the JPEG or PNG image. mode: ImageReadMode - the read mode used for optionally converting the image. JPEG - and PNG images have different permitted values. The default - value is `ImageReadMode.UNCHANGED` and it keeps the image as-is. - See `decode_jpeg()` and `decode_png()` for more information. - Default: `ImageReadMode.UNCHANGED` + the read mode used for optionally converting the image. + Default: `ImageReadMode.UNCHANGED`. + See `ImageReadMode` class for more information on various + available modes. Returns -------
{"golden_diff": "diff --git a/torchvision/io/image.py b/torchvision/io/image.py\n--- a/torchvision/io/image.py\n+++ b/torchvision/io/image.py\n@@ -50,6 +50,15 @@\n \n \n class ImageReadMode(Enum):\n+ \"\"\"\n+ Support for various modes while reading images.\n+\n+ Use `ImageReadMode.UNCHANGED` for loading the image as-is,\n+ `ImageReadMode.GRAY` for converting to grayscale,\n+ `ImageReadMode.GRAY_ALPHA` for grayscale with transparency,\n+ `ImageReadMode.RGB` for RGB and `ImageReadMode.RGB_ALPHA` for\n+ RGB with transparency.\n+ \"\"\"\n UNCHANGED = 0\n GRAY = 1\n GRAY_ALPHA = 2\n@@ -94,11 +103,9 @@\n input (Tensor[1]): a one dimensional uint8 tensor containing\n the raw bytes of the PNG image.\n mode (ImageReadMode): the read mode used for optionally\n- converting the image. Use `ImageReadMode.UNCHANGED` for loading\n- the image as-is, `ImageReadMode.GRAY` for converting to grayscale,\n- `ImageReadMode.GRAY_ALPHA` for grayscale with transparency,\n- `ImageReadMode.RGB` for RGB and `ImageReadMode.RGB_ALPHA` for\n- RGB with transparency. Default: `ImageReadMode.UNCHANGED`\n+ converting the image. Default: `ImageReadMode.UNCHANGED`.\n+ See `ImageReadMode` class for more information on various\n+ available modes.\n \n Returns:\n output (Tensor[image_channels, image_height, image_width])\n@@ -159,9 +166,9 @@\n input (Tensor[1]): a one dimensional uint8 tensor containing\n the raw bytes of the JPEG image.\n mode (ImageReadMode): the read mode used for optionally\n- converting the image. Use `ImageReadMode.UNCHANGED` for loading\n- the image as-is, `ImageReadMode.GRAY` for converting to grayscale\n- and `ImageReadMode.RGB` for RGB. Default: `ImageReadMode.UNCHANGED`\n+ converting the image. Default: `ImageReadMode.UNCHANGED`.\n+ See `ImageReadMode` class for more information on various\n+ available modes.\n \n Returns:\n output (Tensor[image_channels, image_height, image_width])\n@@ -229,11 +236,10 @@\n a one dimensional uint8 tensor containing the raw bytes of the\n PNG or JPEG image.\n mode: ImageReadMode\n- the read mode used for optionally converting the image. JPEG\n- and PNG images have different permitted values. The default\n- value is `ImageReadMode.UNCHANGED` and it keeps the image as-is.\n- See `decode_jpeg()` and `decode_png()` for more information.\n- Default: `ImageReadMode.UNCHANGED`\n+ the read mode used for optionally converting the image.\n+ Default: `ImageReadMode.UNCHANGED`.\n+ See `ImageReadMode` class for more information on various\n+ available modes.\n \n Returns\n -------\n@@ -254,11 +260,10 @@\n path: str\n path of the JPEG or PNG image.\n mode: ImageReadMode\n- the read mode used for optionally converting the image. JPEG\n- and PNG images have different permitted values. The default\n- value is `ImageReadMode.UNCHANGED` and it keeps the image as-is.\n- See `decode_jpeg()` and `decode_png()` for more information.\n- Default: `ImageReadMode.UNCHANGED`\n+ the read mode used for optionally converting the image.\n+ Default: `ImageReadMode.UNCHANGED`.\n+ See `ImageReadMode` class for more information on various\n+ available modes.\n \n Returns\n -------\n", "issue": "Clean up and Document the io.image enhancements\nPR #3024 introduced a few enhancements on the `io.image` methods used for reading JPEG and PNG images. The code changes were merged but there are a few nice-to-have improvements missing.\r\n\r\nMore specifically:\r\n- [x] Provide better error messages for [JPEG](https://github.com/pytorch/vision/pull/3024#discussion_r527066925) and [PNG](https://github.com/pytorch/vision/pull/3024#discussion_r527093866) when a specific `ImageReadMode` value is not supported.\r\n- [x] Ensure that `image_read_mode.h` has a new line at the [end of the file](https://github.com/pytorch/vision/pull/3024#discussion_r526710809).\r\n- [x] [Replace](https://github.com/pytorch/vision/pull/3024#discussion_r527065253) the old `#define` with the newer `constexpr` expression at `image_read_mode.h`.\r\n- [x] [Move the documentation](https://github.com/pytorch/vision/pull/3024#discussion_r527095264) of `ImageReadMode` values from the methods to the enum.\r\n\r\n**Note:** `constexpr` might cause problems on Windows. If that's the case, try using const instead.\n", "before_files": [{"content": "import torch\n\nimport os\nimport os.path as osp\nimport importlib.machinery\n\nfrom enum import Enum\n\n_HAS_IMAGE_OPT = False\n\ntry:\n lib_dir = osp.abspath(osp.join(osp.dirname(__file__), \"..\"))\n\n loader_details = (\n importlib.machinery.ExtensionFileLoader,\n importlib.machinery.EXTENSION_SUFFIXES\n )\n\n extfinder = importlib.machinery.FileFinder(lib_dir, loader_details) # type: ignore[arg-type]\n ext_specs = extfinder.find_spec(\"image\")\n\n if os.name == 'nt':\n # Load the image extension using LoadLibraryExW\n import ctypes\n import sys\n\n kernel32 = ctypes.WinDLL('kernel32.dll', use_last_error=True)\n with_load_library_flags = hasattr(kernel32, 'AddDllDirectory')\n prev_error_mode = kernel32.SetErrorMode(0x0001)\n\n kernel32.LoadLibraryW.restype = ctypes.c_void_p\n if with_load_library_flags:\n kernel32.LoadLibraryExW.restype = ctypes.c_void_p\n\n if ext_specs is not None:\n res = kernel32.LoadLibraryExW(ext_specs.origin, None, 0x00001100)\n if res is None:\n err = ctypes.WinError(ctypes.get_last_error())\n err.strerror += (f' Error loading \"{ext_specs.origin}\" or any or '\n 'its dependencies.')\n raise err\n\n kernel32.SetErrorMode(prev_error_mode)\n\n if ext_specs is not None:\n torch.ops.load_library(ext_specs.origin)\n _HAS_IMAGE_OPT = True\nexcept (ImportError, OSError):\n pass\n\n\nclass ImageReadMode(Enum):\n UNCHANGED = 0\n GRAY = 1\n GRAY_ALPHA = 2\n RGB = 3\n RGB_ALPHA = 4\n\n\ndef read_file(path: str) -> torch.Tensor:\n \"\"\"\n Reads and outputs the bytes contents of a file as a uint8 Tensor\n with one dimension.\n\n Arguments:\n path (str): the path to the file to be read\n\n Returns:\n data (Tensor)\n \"\"\"\n data = torch.ops.image.read_file(path)\n return data\n\n\ndef write_file(filename: str, data: torch.Tensor) -> None:\n \"\"\"\n Writes the contents of a uint8 tensor with one dimension to a\n file.\n\n Arguments:\n filename (str): the path to the file to be written\n data (Tensor): the contents to be written to the output file\n \"\"\"\n torch.ops.image.write_file(filename, data)\n\n\ndef decode_png(input: torch.Tensor, mode: ImageReadMode = ImageReadMode.UNCHANGED) -> torch.Tensor:\n \"\"\"\n Decodes a PNG image into a 3 dimensional RGB Tensor.\n Optionally converts the image to the desired format.\n The values of the output tensor are uint8 between 0 and 255.\n\n Arguments:\n input (Tensor[1]): a one dimensional uint8 tensor containing\n the raw bytes of the PNG image.\n mode (ImageReadMode): the read mode used for optionally\n converting the image. Use `ImageReadMode.UNCHANGED` for loading\n the image as-is, `ImageReadMode.GRAY` for converting to grayscale,\n `ImageReadMode.GRAY_ALPHA` for grayscale with transparency,\n `ImageReadMode.RGB` for RGB and `ImageReadMode.RGB_ALPHA` for\n RGB with transparency. Default: `ImageReadMode.UNCHANGED`\n\n Returns:\n output (Tensor[image_channels, image_height, image_width])\n \"\"\"\n output = torch.ops.image.decode_png(input, mode.value)\n return output\n\n\ndef encode_png(input: torch.Tensor, compression_level: int = 6) -> torch.Tensor:\n \"\"\"\n Takes an input tensor in CHW layout and returns a buffer with the contents\n of its corresponding PNG file.\n\n Parameters\n ----------\n input: Tensor[channels, image_height, image_width]\n int8 image tensor of `c` channels, where `c` must 3 or 1.\n compression_level: int\n Compression factor for the resulting file, it must be a number\n between 0 and 9. Default: 6\n\n Returns\n -------\n output: Tensor[1]\n A one dimensional int8 tensor that contains the raw bytes of the\n PNG file.\n \"\"\"\n output = torch.ops.image.encode_png(input, compression_level)\n return output\n\n\ndef write_png(input: torch.Tensor, filename: str, compression_level: int = 6):\n \"\"\"\n Takes an input tensor in CHW layout (or HW in the case of grayscale images)\n and saves it in a PNG file.\n\n Parameters\n ----------\n input: Tensor[channels, image_height, image_width]\n int8 image tensor of `c` channels, where `c` must be 1 or 3.\n filename: str\n Path to save the image.\n compression_level: int\n Compression factor for the resulting file, it must be a number\n between 0 and 9. Default: 6\n \"\"\"\n output = encode_png(input, compression_level)\n write_file(filename, output)\n\n\ndef decode_jpeg(input: torch.Tensor, mode: ImageReadMode = ImageReadMode.UNCHANGED) -> torch.Tensor:\n \"\"\"\n Decodes a JPEG image into a 3 dimensional RGB Tensor.\n Optionally converts the image to the desired format.\n The values of the output tensor are uint8 between 0 and 255.\n\n Arguments:\n input (Tensor[1]): a one dimensional uint8 tensor containing\n the raw bytes of the JPEG image.\n mode (ImageReadMode): the read mode used for optionally\n converting the image. Use `ImageReadMode.UNCHANGED` for loading\n the image as-is, `ImageReadMode.GRAY` for converting to grayscale\n and `ImageReadMode.RGB` for RGB. Default: `ImageReadMode.UNCHANGED`\n\n Returns:\n output (Tensor[image_channels, image_height, image_width])\n \"\"\"\n output = torch.ops.image.decode_jpeg(input, mode.value)\n return output\n\n\ndef encode_jpeg(input: torch.Tensor, quality: int = 75) -> torch.Tensor:\n \"\"\"\n Takes an input tensor in CHW layout and returns a buffer with the contents\n of its corresponding JPEG file.\n\n Parameters\n ----------\n input: Tensor[channels, image_height, image_width])\n int8 image tensor of `c` channels, where `c` must be 1 or 3.\n quality: int\n Quality of the resulting JPEG file, it must be a number between\n 1 and 100. Default: 75\n\n Returns\n -------\n output: Tensor[1]\n A one dimensional int8 tensor that contains the raw bytes of the\n JPEG file.\n \"\"\"\n if quality < 1 or quality > 100:\n raise ValueError('Image quality should be a positive number '\n 'between 1 and 100')\n\n output = torch.ops.image.encode_jpeg(input, quality)\n return output\n\n\ndef write_jpeg(input: torch.Tensor, filename: str, quality: int = 75):\n \"\"\"\n Takes an input tensor in CHW layout and saves it in a JPEG file.\n\n Parameters\n ----------\n input: Tensor[channels, image_height, image_width]\n int8 image tensor of `c` channels, where `c` must be 1 or 3.\n filename: str\n Path to save the image.\n quality: int\n Quality of the resulting JPEG file, it must be a number\n between 1 and 100. Default: 75\n \"\"\"\n output = encode_jpeg(input, quality)\n write_file(filename, output)\n\n\ndef decode_image(input: torch.Tensor, mode: ImageReadMode = ImageReadMode.UNCHANGED) -> torch.Tensor:\n \"\"\"\n Detects whether an image is a JPEG or PNG and performs the appropriate\n operation to decode the image into a 3 dimensional RGB Tensor.\n\n Optionally converts the image to the desired format.\n The values of the output tensor are uint8 between 0 and 255.\n\n Parameters\n ----------\n input: Tensor\n a one dimensional uint8 tensor containing the raw bytes of the\n PNG or JPEG image.\n mode: ImageReadMode\n the read mode used for optionally converting the image. JPEG\n and PNG images have different permitted values. The default\n value is `ImageReadMode.UNCHANGED` and it keeps the image as-is.\n See `decode_jpeg()` and `decode_png()` for more information.\n Default: `ImageReadMode.UNCHANGED`\n\n Returns\n -------\n output: Tensor[image_channels, image_height, image_width]\n \"\"\"\n output = torch.ops.image.decode_image(input, mode.value)\n return output\n\n\ndef read_image(path: str, mode: ImageReadMode = ImageReadMode.UNCHANGED) -> torch.Tensor:\n \"\"\"\n Reads a JPEG or PNG image into a 3 dimensional RGB Tensor.\n Optionally converts the image to the desired format.\n The values of the output tensor are uint8 between 0 and 255.\n\n Parameters\n ----------\n path: str\n path of the JPEG or PNG image.\n mode: ImageReadMode\n the read mode used for optionally converting the image. JPEG\n and PNG images have different permitted values. The default\n value is `ImageReadMode.UNCHANGED` and it keeps the image as-is.\n See `decode_jpeg()` and `decode_png()` for more information.\n Default: `ImageReadMode.UNCHANGED`\n\n Returns\n -------\n output: Tensor[image_channels, image_height, image_width]\n \"\"\"\n data = read_file(path)\n return decode_image(data, mode)\n", "path": "torchvision/io/image.py"}]}
3,707
858
gh_patches_debug_6597
rasdani/github-patches
git_diff
mlflow__mlflow-2196
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Cannot preview artifacts in UI when uploaded to remote machine via FTP ### System information - **Have I written custom code (as opposed to using a stock example script provided in MLflow)**: No - **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Linux Ubuntu 18.04 - **MLflow installed from (source or binary)**: installed via from pypi via pip - **MLflow version (run ``mlflow --version``)**: 0.9.1 - **Python version**: 3.6.7 - **npm version (if running the dev UI): - **Exact command to reproduce**: Server runs with: ``` mlflow server --backend-store-uri ./mlruns --default-artifact-root ./artifacts -h 0.0.0.0 -p 5020 ``` within a folder holding a `mlruns` and a `artifacts` folder ### Describe the problem I am running the server on a remote machine. On the local machine which uploads via the tracking API the tracking uri is set to: `http://path.to.server:port` and artifact uri to: `ftp://anonymous:@path.to.server/artifacts` uploading the tracking information and artifacts work fine and everything shows in the ui: ![image](https://user-images.githubusercontent.com/12450485/58400502-7d0e9e00-805b-11e9-8d4f-b89a1bab5cb1.png) However when I try to click one of the artifacts I always get the message that something went wrong: ![image](https://user-images.githubusercontent.com/12450485/58400530-99aad600-805b-11e9-8887-4f2640765ba4.png) Previewing the artifacts work when I store them locally on my machine but as soon as I transfer them via FTP and then try to preview them from the remote machine I get the error shown above. ### Source code / logs - </issue> <code> [start of mlflow/store/artifact/ftp_artifact_repo.py] 1 import os 2 import ftplib 3 from ftplib import FTP 4 from contextlib import contextmanager 5 6 import posixpath 7 from six.moves import urllib 8 9 from mlflow.entities.file_info import FileInfo 10 from mlflow.store.artifact.artifact_repo import ArtifactRepository 11 from mlflow.utils.file_utils import relative_path_to_artifact_path 12 from mlflow.exceptions import MlflowException 13 14 15 class FTPArtifactRepository(ArtifactRepository): 16 """Stores artifacts as files in a remote directory, via ftp.""" 17 18 def __init__(self, artifact_uri): 19 self.uri = artifact_uri 20 parsed = urllib.parse.urlparse(artifact_uri) 21 self.config = { 22 'host': parsed.hostname, 23 'port': 21 if parsed.port is None else parsed.port, 24 'username': parsed.username, 25 'password': parsed.password 26 } 27 self.path = parsed.path 28 29 if self.config['host'] is None: 30 self.config['host'] = 'localhost' 31 32 super(FTPArtifactRepository, self).__init__(artifact_uri) 33 34 @contextmanager 35 def get_ftp_client(self): 36 ftp = FTP() 37 ftp.connect(self.config['host'], self.config['port']) 38 ftp.login(self.config['username'], self.config['password']) 39 yield ftp 40 ftp.close() 41 42 @staticmethod 43 def _is_dir(ftp, full_file_path): 44 try: 45 ftp.cwd(full_file_path) 46 return True 47 except ftplib.error_perm: 48 return False 49 50 @staticmethod 51 def _mkdir(ftp, artifact_dir): 52 try: 53 if not FTPArtifactRepository._is_dir(ftp, artifact_dir): 54 ftp.mkd(artifact_dir) 55 except ftplib.error_perm: 56 head, _ = posixpath.split(artifact_dir) 57 FTPArtifactRepository._mkdir(ftp, head) 58 FTPArtifactRepository._mkdir(ftp, artifact_dir) 59 60 @staticmethod 61 def _size(ftp, full_file_path): 62 ftp.voidcmd('TYPE I') 63 size = ftp.size(full_file_path) 64 ftp.voidcmd('TYPE A') 65 return size 66 67 def log_artifact(self, local_file, artifact_path=None): 68 with self.get_ftp_client() as ftp: 69 artifact_dir = posixpath.join(self.path, artifact_path) \ 70 if artifact_path else self.path 71 self._mkdir(ftp, artifact_dir) 72 with open(local_file, 'rb') as f: 73 ftp.cwd(artifact_dir) 74 ftp.storbinary('STOR ' + os.path.basename(local_file), f) 75 76 def log_artifacts(self, local_dir, artifact_path=None): 77 dest_path = posixpath.join(self.path, artifact_path) \ 78 if artifact_path else self.path 79 80 dest_path = posixpath.join( 81 dest_path, os.path.split(local_dir)[1]) 82 dest_path_re = os.path.split(local_dir)[1] 83 if artifact_path: 84 dest_path_re = posixpath.join( 85 artifact_path, os.path.split(local_dir)[1]) 86 87 local_dir = os.path.abspath(local_dir) 88 for (root, _, filenames) in os.walk(local_dir): 89 upload_path = dest_path 90 if root != local_dir: 91 rel_path = os.path.relpath(root, local_dir) 92 rel_path = relative_path_to_artifact_path(rel_path) 93 upload_path = posixpath.join(dest_path_re, rel_path) 94 if not filenames: 95 with self.get_ftp_client() as ftp: 96 self._mkdir(ftp, posixpath.join(self.path, upload_path)) 97 for f in filenames: 98 if os.path.isfile(os.path.join(root, f)): 99 self.log_artifact(os.path.join(root, f), upload_path) 100 101 def _is_directory(self, artifact_path): 102 artifact_dir = self.path 103 list_dir = posixpath.join(artifact_dir, artifact_path) if artifact_path else artifact_dir 104 with self.get_ftp_client() as ftp: 105 return self._is_dir(ftp, list_dir) 106 107 def list_artifacts(self, path=None): 108 with self.get_ftp_client() as ftp: 109 artifact_dir = self.path 110 list_dir = posixpath.join(artifact_dir, path) if path else artifact_dir 111 if not self._is_dir(ftp, list_dir): 112 return [] 113 artifact_files = ftp.nlst(list_dir) 114 infos = [] 115 for file_name in artifact_files: 116 file_path = (file_name if path is None 117 else posixpath.join(path, file_name)) 118 full_file_path = posixpath.join(list_dir, file_name) 119 if self._is_dir(ftp, full_file_path): 120 infos.append(FileInfo(file_path, True, None)) 121 else: 122 size = self._size(ftp, full_file_path) 123 infos.append(FileInfo(file_path, False, size)) 124 return infos 125 126 def _download_file(self, remote_file_path, local_path): 127 remote_full_path = posixpath.join(self.path, remote_file_path) \ 128 if remote_file_path else self.path 129 with self.get_ftp_client() as ftp: 130 with open(local_path, 'wb') as f: 131 ftp.retrbinary('RETR ' + remote_full_path, f.write) 132 133 def delete_artifacts(self, artifact_path=None): 134 raise MlflowException('Not implemented yet') 135 [end of mlflow/store/artifact/ftp_artifact_repo.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mlflow/store/artifact/ftp_artifact_repo.py b/mlflow/store/artifact/ftp_artifact_repo.py --- a/mlflow/store/artifact/ftp_artifact_repo.py +++ b/mlflow/store/artifact/ftp_artifact_repo.py @@ -111,6 +111,7 @@ if not self._is_dir(ftp, list_dir): return [] artifact_files = ftp.nlst(list_dir) + artifact_files = list(filter(lambda x: x != "." and x != "..", artifact_files)) infos = [] for file_name in artifact_files: file_path = (file_name if path is None
{"golden_diff": "diff --git a/mlflow/store/artifact/ftp_artifact_repo.py b/mlflow/store/artifact/ftp_artifact_repo.py\n--- a/mlflow/store/artifact/ftp_artifact_repo.py\n+++ b/mlflow/store/artifact/ftp_artifact_repo.py\n@@ -111,6 +111,7 @@\n if not self._is_dir(ftp, list_dir):\n return []\n artifact_files = ftp.nlst(list_dir)\n+ artifact_files = list(filter(lambda x: x != \".\" and x != \"..\", artifact_files))\n infos = []\n for file_name in artifact_files:\n file_path = (file_name if path is None\n", "issue": "Cannot preview artifacts in UI when uploaded to remote machine via FTP\n### System information\r\n- **Have I written custom code (as opposed to using a stock example script provided in MLflow)**:\r\nNo\r\n- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**:\r\nLinux Ubuntu 18.04\r\n- **MLflow installed from (source or binary)**: \r\ninstalled via from pypi via pip\r\n- **MLflow version (run ``mlflow --version``)**:\r\n0.9.1\r\n- **Python version**: \r\n3.6.7\r\n- **npm version (if running the dev UI):\r\n- **Exact command to reproduce**:\r\nServer runs with:\r\n```\r\nmlflow server --backend-store-uri ./mlruns --default-artifact-root ./artifacts -h 0.0.0.0 -p 5020\r\n```\r\nwithin a folder holding a `mlruns` and a `artifacts` folder\r\n\r\n### Describe the problem\r\nI am running the server on a remote machine.\r\nOn the local machine which uploads via the tracking API the\r\ntracking uri is set to: `http://path.to.server:port`\r\nand artifact uri to: `ftp://anonymous:@path.to.server/artifacts`\r\n\r\nuploading the tracking information and artifacts work fine and everything shows in the ui:\r\n![image](https://user-images.githubusercontent.com/12450485/58400502-7d0e9e00-805b-11e9-8d4f-b89a1bab5cb1.png)\r\n\r\nHowever when I try to click one of the artifacts I always get the message that something went wrong:\r\n![image](https://user-images.githubusercontent.com/12450485/58400530-99aad600-805b-11e9-8887-4f2640765ba4.png)\r\n\r\nPreviewing the artifacts work when I store them locally on my machine but as soon as I transfer them via FTP and then try to preview them from the remote machine I get the error shown above.\r\n\r\n\r\n### Source code / logs\r\n-\r\n\n", "before_files": [{"content": "import os\nimport ftplib\nfrom ftplib import FTP\nfrom contextlib import contextmanager\n\nimport posixpath\nfrom six.moves import urllib\n\nfrom mlflow.entities.file_info import FileInfo\nfrom mlflow.store.artifact.artifact_repo import ArtifactRepository\nfrom mlflow.utils.file_utils import relative_path_to_artifact_path\nfrom mlflow.exceptions import MlflowException\n\n\nclass FTPArtifactRepository(ArtifactRepository):\n \"\"\"Stores artifacts as files in a remote directory, via ftp.\"\"\"\n\n def __init__(self, artifact_uri):\n self.uri = artifact_uri\n parsed = urllib.parse.urlparse(artifact_uri)\n self.config = {\n 'host': parsed.hostname,\n 'port': 21 if parsed.port is None else parsed.port,\n 'username': parsed.username,\n 'password': parsed.password\n }\n self.path = parsed.path\n\n if self.config['host'] is None:\n self.config['host'] = 'localhost'\n\n super(FTPArtifactRepository, self).__init__(artifact_uri)\n\n @contextmanager\n def get_ftp_client(self):\n ftp = FTP()\n ftp.connect(self.config['host'], self.config['port'])\n ftp.login(self.config['username'], self.config['password'])\n yield ftp\n ftp.close()\n\n @staticmethod\n def _is_dir(ftp, full_file_path):\n try:\n ftp.cwd(full_file_path)\n return True\n except ftplib.error_perm:\n return False\n\n @staticmethod\n def _mkdir(ftp, artifact_dir):\n try:\n if not FTPArtifactRepository._is_dir(ftp, artifact_dir):\n ftp.mkd(artifact_dir)\n except ftplib.error_perm:\n head, _ = posixpath.split(artifact_dir)\n FTPArtifactRepository._mkdir(ftp, head)\n FTPArtifactRepository._mkdir(ftp, artifact_dir)\n\n @staticmethod\n def _size(ftp, full_file_path):\n ftp.voidcmd('TYPE I')\n size = ftp.size(full_file_path)\n ftp.voidcmd('TYPE A')\n return size\n\n def log_artifact(self, local_file, artifact_path=None):\n with self.get_ftp_client() as ftp:\n artifact_dir = posixpath.join(self.path, artifact_path) \\\n if artifact_path else self.path\n self._mkdir(ftp, artifact_dir)\n with open(local_file, 'rb') as f:\n ftp.cwd(artifact_dir)\n ftp.storbinary('STOR ' + os.path.basename(local_file), f)\n\n def log_artifacts(self, local_dir, artifact_path=None):\n dest_path = posixpath.join(self.path, artifact_path) \\\n if artifact_path else self.path\n\n dest_path = posixpath.join(\n dest_path, os.path.split(local_dir)[1])\n dest_path_re = os.path.split(local_dir)[1]\n if artifact_path:\n dest_path_re = posixpath.join(\n artifact_path, os.path.split(local_dir)[1])\n\n local_dir = os.path.abspath(local_dir)\n for (root, _, filenames) in os.walk(local_dir):\n upload_path = dest_path\n if root != local_dir:\n rel_path = os.path.relpath(root, local_dir)\n rel_path = relative_path_to_artifact_path(rel_path)\n upload_path = posixpath.join(dest_path_re, rel_path)\n if not filenames:\n with self.get_ftp_client() as ftp:\n self._mkdir(ftp, posixpath.join(self.path, upload_path))\n for f in filenames:\n if os.path.isfile(os.path.join(root, f)):\n self.log_artifact(os.path.join(root, f), upload_path)\n\n def _is_directory(self, artifact_path):\n artifact_dir = self.path\n list_dir = posixpath.join(artifact_dir, artifact_path) if artifact_path else artifact_dir\n with self.get_ftp_client() as ftp:\n return self._is_dir(ftp, list_dir)\n\n def list_artifacts(self, path=None):\n with self.get_ftp_client() as ftp:\n artifact_dir = self.path\n list_dir = posixpath.join(artifact_dir, path) if path else artifact_dir\n if not self._is_dir(ftp, list_dir):\n return []\n artifact_files = ftp.nlst(list_dir)\n infos = []\n for file_name in artifact_files:\n file_path = (file_name if path is None\n else posixpath.join(path, file_name))\n full_file_path = posixpath.join(list_dir, file_name)\n if self._is_dir(ftp, full_file_path):\n infos.append(FileInfo(file_path, True, None))\n else:\n size = self._size(ftp, full_file_path)\n infos.append(FileInfo(file_path, False, size))\n return infos\n\n def _download_file(self, remote_file_path, local_path):\n remote_full_path = posixpath.join(self.path, remote_file_path) \\\n if remote_file_path else self.path\n with self.get_ftp_client() as ftp:\n with open(local_path, 'wb') as f:\n ftp.retrbinary('RETR ' + remote_full_path, f.write)\n\n def delete_artifacts(self, artifact_path=None):\n raise MlflowException('Not implemented yet')\n", "path": "mlflow/store/artifact/ftp_artifact_repo.py"}]}
2,442
141
gh_patches_debug_6388
rasdani/github-patches
git_diff
ephios-dev__ephios-259
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Translations for v0.2.0 </issue> <code> [start of ephios/user_management/consequences.py] 1 import functools 2 import operator 3 from datetime import datetime 4 5 import django.dispatch 6 from django.contrib.auth import get_user_model 7 from django.contrib.auth.models import Group 8 from django.db.models import OuterRef, Q, Subquery 9 from django.db.models.fields.json import KeyTransform 10 from django.utils.formats import date_format 11 from django.utils.translation import gettext_lazy as _ 12 from guardian.shortcuts import get_objects_for_user 13 14 from ephios.event_management.models import Event, Shift 15 from ephios.user_management.models import ( 16 Consequence, 17 Qualification, 18 QualificationGrant, 19 UserProfile, 20 WorkingHours, 21 ) 22 23 register_consequence_handlers = django.dispatch.Signal() 24 25 26 def all_consequence_handlers(): 27 for _, handlers in register_consequence_handlers.send(None): 28 yield from (h() for h in handlers) 29 30 31 def consequence_handler_from_slug(slug): 32 for handler in all_consequence_handlers(): 33 if handler.slug == slug: 34 return handler 35 raise ValueError(_("Consequence Handler '{slug}' was not found.").format(slug=slug)) 36 37 38 def editable_consequences(user): 39 handlers = list(all_consequence_handlers()) 40 qs = Consequence.objects.filter( 41 functools.reduce( 42 operator.or_, 43 (handler.editable_by_filter(user) for handler in handlers), 44 Q(), 45 ) 46 ).distinct() 47 for handler in handlers: 48 qs = handler.annotate_queryset(qs) 49 return qs 50 51 52 class ConsequenceError(Exception): 53 pass 54 55 56 class BaseConsequenceHandler: 57 @property 58 def slug(self): 59 raise NotImplementedError 60 61 @classmethod 62 def execute(cls, consequence): 63 """ 64 Gets a consequence and tries to execute whatever it is the consequence wants to happen. 65 """ 66 raise NotImplementedError 67 68 @classmethod 69 def render(cls, consequence): 70 """ 71 Return html describing the action to be done as a consequence of what. 72 Return None if you cannot handle this consequence. 73 """ 74 raise NotImplementedError 75 76 @classmethod 77 def editable_by_filter(cls, user: UserProfile): 78 """ 79 Return a Q object to filter consequence objects of this type that can be confirmed by the given user. 80 """ 81 raise NotImplementedError 82 83 @classmethod 84 def annotate_queryset(cls, qs): 85 """ 86 Annotate a queryset of heterogeneous consequences to avoid needing additional queries for rendering a consequence. 87 Does no annotations by default. 88 """ 89 return qs 90 91 92 class WorkingHoursConsequenceHandler(BaseConsequenceHandler): 93 slug = "ephios.grant_working_hours" 94 95 @classmethod 96 def create( 97 cls, 98 user: UserProfile, 99 when: datetime, 100 hours: float, 101 reason: str, 102 ): 103 return Consequence.objects.create( 104 slug=cls.slug, 105 user=user, 106 data=dict(hours=hours, date=when, reason=reason), 107 ) 108 109 @classmethod 110 def execute(cls, consequence): 111 WorkingHours.objects.create( 112 user=consequence.user, 113 date=consequence.data["date"], 114 hours=consequence.data["hours"], 115 reason=consequence.data.get("reason"), 116 ) 117 118 @classmethod 119 def render(cls, consequence): 120 return _("{user} logs {hours:.1f} hours on {date} for {reason}").format( 121 user=consequence.user.get_full_name(), 122 hours=consequence.data.get("hours"), 123 reason=consequence.data.get("reason"), 124 date=date_format(consequence.data.get("date")), 125 ) 126 127 @classmethod 128 def editable_by_filter(cls, user): 129 return Q( 130 slug=cls.slug, 131 user__groups__in=get_objects_for_user( 132 user, "decide_workinghours_for_group", klass=Group 133 ), 134 ) 135 136 137 class QualificationConsequenceHandler(BaseConsequenceHandler): 138 slug = "ephios.grant_qualification" 139 140 @classmethod 141 def create( 142 cls, 143 user: UserProfile, 144 qualification: Qualification, 145 expires: datetime = None, 146 shift: Shift = None, 147 ): 148 return Consequence.objects.create( 149 slug=cls.slug, 150 user=user, 151 data=dict( 152 qualification_id=qualification.id, 153 event_id=None if shift is None else shift.event_id, 154 expires=expires, 155 ), 156 ) 157 158 @classmethod 159 def execute(cls, consequence): 160 qg, created = QualificationGrant.objects.get_or_create( 161 defaults=dict( 162 expires=consequence.data["expires"], 163 ), 164 user=consequence.user, 165 qualification_id=consequence.data["qualification_id"], 166 ) 167 if not created: 168 qg.expires = max( 169 qg.expires, consequence.data["expires"], key=lambda dt: dt or datetime.max 170 ) 171 qg.save() 172 173 @classmethod 174 def render(cls, consequence): 175 # Get all the strings we need from the annotations, or fetch them from DB as backup 176 try: # try the annotation 177 event_title = consequence.event_title 178 except AttributeError: 179 if event_id := consequence.data["event_id"]: # fetch from DB as backup 180 event_title = Event.objects.get(id=event_id).title 181 else: # no event has been associated 182 event_title = None 183 184 try: 185 qualification_title = consequence.qualification_title 186 except AttributeError: 187 qualification_title = Qualification.objects.get( 188 id=consequence.data["qualification_id"] 189 ).title 190 191 if expires := consequence.data.get("expires"): 192 expires = date_format(expires) 193 194 user = consequence.user.get_full_name() 195 196 # build string based on available data 197 198 if event_title: 199 s = _("{user} acquires '{qualification}' after participating in {event}.").format( 200 user=user, qualification=qualification_title, event=event_title 201 ) 202 else: 203 s = _("{user} acquires '{qualification}'.").format( 204 user=user, 205 qualification=qualification_title, 206 ) 207 208 if expires: 209 s += " " + _("(valid until {expires_str})").format(expires_str=expires) 210 return s 211 212 @classmethod 213 def editable_by_filter(cls, user: UserProfile): 214 # Qualifications can be granted by people who... 215 return Q(slug=cls.slug,) & ( 216 Q( # are responsible for the event the consequence originated from, if applicable 217 data__event_id__isnull=False, 218 data__event_id__in=get_objects_for_user(user, perms="change_event", klass=Event), 219 ) 220 | Q( # can edit the affected user anyway 221 user__in=get_objects_for_user( 222 user, perms="user_management.change_userprofile", klass=get_user_model() 223 ) 224 ) 225 ) 226 227 @classmethod 228 def annotate_queryset(cls, qs): 229 return qs.annotate( 230 qualification_id=KeyTransform("qualification_id", "data"), 231 event_id=KeyTransform("event_id", "data"), 232 ).annotate( 233 qualification_title=Subquery( 234 Qualification.objects.filter(id=OuterRef("qualification_id")).values("title")[:1] 235 ), 236 event_title=Subquery(Event.objects.filter(id=OuterRef("event_id")).values("title")[:1]), 237 ) 238 [end of ephios/user_management/consequences.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ephios/user_management/consequences.py b/ephios/user_management/consequences.py --- a/ephios/user_management/consequences.py +++ b/ephios/user_management/consequences.py @@ -117,7 +117,7 @@ @classmethod def render(cls, consequence): - return _("{user} logs {hours:.1f} hours on {date} for {reason}").format( + return _("{user} logs {hours:.1f} hours on {date}. Reason: {reason}").format( user=consequence.user.get_full_name(), hours=consequence.data.get("hours"), reason=consequence.data.get("reason"),
{"golden_diff": "diff --git a/ephios/user_management/consequences.py b/ephios/user_management/consequences.py\n--- a/ephios/user_management/consequences.py\n+++ b/ephios/user_management/consequences.py\n@@ -117,7 +117,7 @@\n \n @classmethod\n def render(cls, consequence):\n- return _(\"{user} logs {hours:.1f} hours on {date} for {reason}\").format(\n+ return _(\"{user} logs {hours:.1f} hours on {date}. Reason: {reason}\").format(\n user=consequence.user.get_full_name(),\n hours=consequence.data.get(\"hours\"),\n reason=consequence.data.get(\"reason\"),\n", "issue": "Translations for v0.2.0\n\n", "before_files": [{"content": "import functools\nimport operator\nfrom datetime import datetime\n\nimport django.dispatch\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import Group\nfrom django.db.models import OuterRef, Q, Subquery\nfrom django.db.models.fields.json import KeyTransform\nfrom django.utils.formats import date_format\nfrom django.utils.translation import gettext_lazy as _\nfrom guardian.shortcuts import get_objects_for_user\n\nfrom ephios.event_management.models import Event, Shift\nfrom ephios.user_management.models import (\n Consequence,\n Qualification,\n QualificationGrant,\n UserProfile,\n WorkingHours,\n)\n\nregister_consequence_handlers = django.dispatch.Signal()\n\n\ndef all_consequence_handlers():\n for _, handlers in register_consequence_handlers.send(None):\n yield from (h() for h in handlers)\n\n\ndef consequence_handler_from_slug(slug):\n for handler in all_consequence_handlers():\n if handler.slug == slug:\n return handler\n raise ValueError(_(\"Consequence Handler '{slug}' was not found.\").format(slug=slug))\n\n\ndef editable_consequences(user):\n handlers = list(all_consequence_handlers())\n qs = Consequence.objects.filter(\n functools.reduce(\n operator.or_,\n (handler.editable_by_filter(user) for handler in handlers),\n Q(),\n )\n ).distinct()\n for handler in handlers:\n qs = handler.annotate_queryset(qs)\n return qs\n\n\nclass ConsequenceError(Exception):\n pass\n\n\nclass BaseConsequenceHandler:\n @property\n def slug(self):\n raise NotImplementedError\n\n @classmethod\n def execute(cls, consequence):\n \"\"\"\n Gets a consequence and tries to execute whatever it is the consequence wants to happen.\n \"\"\"\n raise NotImplementedError\n\n @classmethod\n def render(cls, consequence):\n \"\"\"\n Return html describing the action to be done as a consequence of what.\n Return None if you cannot handle this consequence.\n \"\"\"\n raise NotImplementedError\n\n @classmethod\n def editable_by_filter(cls, user: UserProfile):\n \"\"\"\n Return a Q object to filter consequence objects of this type that can be confirmed by the given user.\n \"\"\"\n raise NotImplementedError\n\n @classmethod\n def annotate_queryset(cls, qs):\n \"\"\"\n Annotate a queryset of heterogeneous consequences to avoid needing additional queries for rendering a consequence.\n Does no annotations by default.\n \"\"\"\n return qs\n\n\nclass WorkingHoursConsequenceHandler(BaseConsequenceHandler):\n slug = \"ephios.grant_working_hours\"\n\n @classmethod\n def create(\n cls,\n user: UserProfile,\n when: datetime,\n hours: float,\n reason: str,\n ):\n return Consequence.objects.create(\n slug=cls.slug,\n user=user,\n data=dict(hours=hours, date=when, reason=reason),\n )\n\n @classmethod\n def execute(cls, consequence):\n WorkingHours.objects.create(\n user=consequence.user,\n date=consequence.data[\"date\"],\n hours=consequence.data[\"hours\"],\n reason=consequence.data.get(\"reason\"),\n )\n\n @classmethod\n def render(cls, consequence):\n return _(\"{user} logs {hours:.1f} hours on {date} for {reason}\").format(\n user=consequence.user.get_full_name(),\n hours=consequence.data.get(\"hours\"),\n reason=consequence.data.get(\"reason\"),\n date=date_format(consequence.data.get(\"date\")),\n )\n\n @classmethod\n def editable_by_filter(cls, user):\n return Q(\n slug=cls.slug,\n user__groups__in=get_objects_for_user(\n user, \"decide_workinghours_for_group\", klass=Group\n ),\n )\n\n\nclass QualificationConsequenceHandler(BaseConsequenceHandler):\n slug = \"ephios.grant_qualification\"\n\n @classmethod\n def create(\n cls,\n user: UserProfile,\n qualification: Qualification,\n expires: datetime = None,\n shift: Shift = None,\n ):\n return Consequence.objects.create(\n slug=cls.slug,\n user=user,\n data=dict(\n qualification_id=qualification.id,\n event_id=None if shift is None else shift.event_id,\n expires=expires,\n ),\n )\n\n @classmethod\n def execute(cls, consequence):\n qg, created = QualificationGrant.objects.get_or_create(\n defaults=dict(\n expires=consequence.data[\"expires\"],\n ),\n user=consequence.user,\n qualification_id=consequence.data[\"qualification_id\"],\n )\n if not created:\n qg.expires = max(\n qg.expires, consequence.data[\"expires\"], key=lambda dt: dt or datetime.max\n )\n qg.save()\n\n @classmethod\n def render(cls, consequence):\n # Get all the strings we need from the annotations, or fetch them from DB as backup\n try: # try the annotation\n event_title = consequence.event_title\n except AttributeError:\n if event_id := consequence.data[\"event_id\"]: # fetch from DB as backup\n event_title = Event.objects.get(id=event_id).title\n else: # no event has been associated\n event_title = None\n\n try:\n qualification_title = consequence.qualification_title\n except AttributeError:\n qualification_title = Qualification.objects.get(\n id=consequence.data[\"qualification_id\"]\n ).title\n\n if expires := consequence.data.get(\"expires\"):\n expires = date_format(expires)\n\n user = consequence.user.get_full_name()\n\n # build string based on available data\n\n if event_title:\n s = _(\"{user} acquires '{qualification}' after participating in {event}.\").format(\n user=user, qualification=qualification_title, event=event_title\n )\n else:\n s = _(\"{user} acquires '{qualification}'.\").format(\n user=user,\n qualification=qualification_title,\n )\n\n if expires:\n s += \" \" + _(\"(valid until {expires_str})\").format(expires_str=expires)\n return s\n\n @classmethod\n def editable_by_filter(cls, user: UserProfile):\n # Qualifications can be granted by people who...\n return Q(slug=cls.slug,) & (\n Q( # are responsible for the event the consequence originated from, if applicable\n data__event_id__isnull=False,\n data__event_id__in=get_objects_for_user(user, perms=\"change_event\", klass=Event),\n )\n | Q( # can edit the affected user anyway\n user__in=get_objects_for_user(\n user, perms=\"user_management.change_userprofile\", klass=get_user_model()\n )\n )\n )\n\n @classmethod\n def annotate_queryset(cls, qs):\n return qs.annotate(\n qualification_id=KeyTransform(\"qualification_id\", \"data\"),\n event_id=KeyTransform(\"event_id\", \"data\"),\n ).annotate(\n qualification_title=Subquery(\n Qualification.objects.filter(id=OuterRef(\"qualification_id\")).values(\"title\")[:1]\n ),\n event_title=Subquery(Event.objects.filter(id=OuterRef(\"event_id\")).values(\"title\")[:1]),\n )\n", "path": "ephios/user_management/consequences.py"}]}
2,665
153
gh_patches_debug_58539
rasdani/github-patches
git_diff
ansible__molecule-4038
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> created: true is ignored in state.yml # Issue Type - Bug report # Molecule and Ansible details ``` ansible --version && molecule --version ansible [core 2.15.3] config file = None configured module search path = ['/home/manu/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /home/manu/.local/share/virtualenvs/molecule-test-IsY3eZIi/lib/python3.11/site-packages/ansible ansible collection location = /home/manu/.ansible/collections:/usr/share/ansible/collections executable location = /home/manu/.local/share/virtualenvs/molecule-test-IsY3eZIi/bin/ansible python version = 3.11.5 (main, Aug 28 2023, 20:02:58) [GCC 13.2.1 20230801] (/home/manu/.local/share/virtualenvs/molecule-test-IsY3eZIi/bin/python) jinja version = 3.1.2 libyaml = True molecule 6.0.2 using python 3.11 ansible:2.15.3 default:6.0.2 from molecule molecule-qemu:0.5.3 from molecule_qemu ``` Molecule installation method: source Ansible installation method: pip # Desired Behavior In molecule v5, the `create` step was automatically skipped when `created: true` in `<XDG_CACHE_HOME>/molecule//<scenario>/state.yml` with the message `WARNING Skipping, instances already created.`. This is the desired behavior. Here an example with a simple hello_world role after a `molecule create` execution. The molecule-qemu plugin is used here. ``` ❯ molecule converge INFO default scenario test matrix: dependency, create, prepare, converge INFO Performing prerun with role_name_check=0... INFO Set ANSIBLE_LIBRARY=/home/manu/.cache/ansible-compat/35072c/modules:/home/manu/.ansible/plugins/modules:/usr/share/ansible/plugins/modules INFO Set ANSIBLE_COLLECTIONS_PATH=/home/manu/.cache/ansible-compat/35072c/collections:/home/manu/.ansible/collections:/usr/share/ansible/collections INFO Set ANSIBLE_ROLES_PATH=/home/manu/.cache/ansible-compat/35072c/roles:/home/manu/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles INFO Running default > dependency WARNING Skipping, missing the requirements file. WARNING Skipping, missing the requirements file. INFO Running default > create WARNING Skipping, instances already created. INFO Running default > prepare WARNING Skipping, prepare playbook not configured. INFO Running default > converge PLAY [all] ********************************************************************* TASK [hello_world : Hello world] *********************************************** ok: [test-hello-world] => { "msg": "Hello, world!" } PLAY RECAP ********************************************************************* test-hello-world : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 ``` # Actual Behaviour Since molecule v6, the `WARNING Skipping, instances already created.` message is no longer displayed and the create step is executed each time the `molecule converge` is called. Here an example with the same role, with the same conditions except that molecule version is 6.0.2. ``` INFO default scenario test matrix: dependency, create, prepare, converge INFO Performing prerun with role_name_check=0... INFO Running default > dependency WARNING Skipping, missing the requirements file. WARNING Skipping, missing the requirements file. INFO Running default > create PLAY [Create] ****************************************************************** TASK [Gather only necessary facts] ********************************************* ok: [localhost] TASK [Register VMs data] ******************************************************* ok: [localhost] => (item=test-hello-world) TASK [Prepare VMs data] ******************************************************** ok: [localhost] ... TASK [Dump VMs config] ********************************************************* ok: [localhost] PLAY RECAP ********************************************************************* localhost : ok=31 changed=0 unreachable=0 failed=0 skipped=5 rescued=0 ignored=0 INFO Running default > prepare WARNING Skipping, prepare playbook not configured. INFO Running default > converge PLAY [all] ********************************************************************* TASK [hello_world : Hello world] *********************************************** ok: [test-hello-world] => { "msg": "Hello, world!" } PLAY RECAP ********************************************************************* test-hello-world : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 ``` </issue> <code> [start of src/molecule/command/create.py] 1 # Copyright (c) 2015-2018 Cisco Systems, Inc. 2 # 3 # Permission is hereby granted, free of charge, to any person obtaining a copy 4 # of this software and associated documentation files (the "Software"), to 5 # deal in the Software without restriction, including without limitation the 6 # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 7 # sell copies of the Software, and to permit persons to whom the Software is 8 # furnished to do so, subject to the following conditions: 9 # 10 # The above copyright notice and this permission notice shall be included in 11 # all copies or substantial portions of the Software. 12 # 13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 18 # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 19 # DEALINGS IN THE SOFTWARE. 20 """Create Command Module.""" 21 22 import logging 23 24 import click 25 26 from molecule.api import drivers 27 from molecule.command import base 28 from molecule.config import DEFAULT_DRIVER 29 30 LOG = logging.getLogger(__name__) 31 32 33 class Create(base.Base): 34 """Create Command Class.""" 35 36 def execute(self, action_args=None): 37 """Execute the actions necessary to perform a `molecule create` and \ 38 returns None. 39 40 :return: None 41 """ 42 self._config.state.change_state("driver", self._config.driver.name) 43 44 self._config.provisioner.create() 45 46 self._config.state.change_state("created", True) 47 48 49 @base.click_command_ex() 50 @click.pass_context 51 @click.option( 52 "--scenario-name", 53 "-s", 54 default=base.MOLECULE_DEFAULT_SCENARIO_NAME, 55 help=f"Name of the scenario to target. ({base.MOLECULE_DEFAULT_SCENARIO_NAME})", 56 ) 57 @click.option( 58 "--driver-name", 59 "-d", 60 type=click.Choice([str(s) for s in drivers()]), 61 help=f"Name of driver to use. ({DEFAULT_DRIVER})", 62 ) 63 def create(ctx, scenario_name, driver_name): # pragma: no cover 64 """Use the provisioner to start the instances.""" 65 args = ctx.obj.get("args") 66 subcommand = base._get_subcommand(__name__) 67 command_args = {"subcommand": subcommand, "driver_name": driver_name} 68 69 base.execute_cmdline_scenarios(scenario_name, args, command_args) 70 [end of src/molecule/command/create.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/molecule/command/create.py b/src/molecule/command/create.py --- a/src/molecule/command/create.py +++ b/src/molecule/command/create.py @@ -41,6 +41,11 @@ """ self._config.state.change_state("driver", self._config.driver.name) + if self._config.state.created: + msg = "Skipping, instances already created." + LOG.warning(msg) + return + self._config.provisioner.create() self._config.state.change_state("created", True)
{"golden_diff": "diff --git a/src/molecule/command/create.py b/src/molecule/command/create.py\n--- a/src/molecule/command/create.py\n+++ b/src/molecule/command/create.py\n@@ -41,6 +41,11 @@\n \"\"\"\n self._config.state.change_state(\"driver\", self._config.driver.name)\n \n+ if self._config.state.created:\n+ msg = \"Skipping, instances already created.\"\n+ LOG.warning(msg)\n+ return\n+\n self._config.provisioner.create()\n \n self._config.state.change_state(\"created\", True)\n", "issue": "created: true is ignored in state.yml\n# Issue Type\r\n\r\n- Bug report\r\n\r\n# Molecule and Ansible details\r\n\r\n```\r\nansible --version && molecule --version\r\nansible [core 2.15.3]\r\n config file = None\r\n configured module search path = ['/home/manu/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']\r\n ansible python module location = /home/manu/.local/share/virtualenvs/molecule-test-IsY3eZIi/lib/python3.11/site-packages/ansible\r\n ansible collection location = /home/manu/.ansible/collections:/usr/share/ansible/collections\r\n executable location = /home/manu/.local/share/virtualenvs/molecule-test-IsY3eZIi/bin/ansible\r\n python version = 3.11.5 (main, Aug 28 2023, 20:02:58) [GCC 13.2.1 20230801] (/home/manu/.local/share/virtualenvs/molecule-test-IsY3eZIi/bin/python)\r\n jinja version = 3.1.2\r\n libyaml = True\r\nmolecule 6.0.2 using python 3.11\r\n ansible:2.15.3\r\n default:6.0.2 from molecule\r\n molecule-qemu:0.5.3 from molecule_qemu\r\n```\r\n\r\nMolecule installation method: source\r\nAnsible installation method: pip\r\n\r\n# Desired Behavior\r\n\r\nIn molecule v5, the `create` step was automatically skipped when `created: true` in `<XDG_CACHE_HOME>/molecule//<scenario>/state.yml` with the message `WARNING Skipping, instances already created.`. This is the desired behavior.\r\n\r\nHere an example with a simple hello_world role after a `molecule create` execution. The molecule-qemu plugin is used here.\r\n```\r\n\u276f molecule converge\r\nINFO default scenario test matrix: dependency, create, prepare, converge\r\nINFO Performing prerun with role_name_check=0...\r\nINFO Set ANSIBLE_LIBRARY=/home/manu/.cache/ansible-compat/35072c/modules:/home/manu/.ansible/plugins/modules:/usr/share/ansible/plugins/modules\r\nINFO Set ANSIBLE_COLLECTIONS_PATH=/home/manu/.cache/ansible-compat/35072c/collections:/home/manu/.ansible/collections:/usr/share/ansible/collections\r\nINFO Set ANSIBLE_ROLES_PATH=/home/manu/.cache/ansible-compat/35072c/roles:/home/manu/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles\r\nINFO Running default > dependency\r\nWARNING Skipping, missing the requirements file.\r\nWARNING Skipping, missing the requirements file.\r\nINFO Running default > create\r\nWARNING Skipping, instances already created.\r\nINFO Running default > prepare\r\nWARNING Skipping, prepare playbook not configured.\r\nINFO Running default > converge\r\n\r\nPLAY [all] *********************************************************************\r\n\r\nTASK [hello_world : Hello world] ***********************************************\r\nok: [test-hello-world] => {\r\n \"msg\": \"Hello, world!\"\r\n}\r\n\r\nPLAY RECAP *********************************************************************\r\ntest-hello-world : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0\r\n```\r\n\r\n# Actual Behaviour\r\n\r\nSince molecule v6, the `WARNING Skipping, instances already created.` message is no longer displayed and the create step is executed each time the `molecule converge` is called.\r\n\r\nHere an example with the same role, with the same conditions except that molecule version is 6.0.2.\r\n```\r\nINFO default scenario test matrix: dependency, create, prepare, converge\r\nINFO Performing prerun with role_name_check=0...\r\nINFO Running default > dependency\r\nWARNING Skipping, missing the requirements file.\r\nWARNING Skipping, missing the requirements file.\r\nINFO Running default > create\r\n\r\nPLAY [Create] ******************************************************************\r\n\r\nTASK [Gather only necessary facts] *********************************************\r\nok: [localhost]\r\n\r\nTASK [Register VMs data] *******************************************************\r\nok: [localhost] => (item=test-hello-world)\r\n\r\nTASK [Prepare VMs data] ********************************************************\r\nok: [localhost]\r\n...\r\nTASK [Dump VMs config] *********************************************************\r\nok: [localhost]\r\n\r\nPLAY RECAP *********************************************************************\r\nlocalhost : ok=31 changed=0 unreachable=0 failed=0 skipped=5 rescued=0 ignored=0\r\n\r\nINFO Running default > prepare\r\nWARNING Skipping, prepare playbook not configured.\r\nINFO Running default > converge\r\n\r\nPLAY [all] *********************************************************************\r\n\r\nTASK [hello_world : Hello world] ***********************************************\r\nok: [test-hello-world] => {\r\n \"msg\": \"Hello, world!\"\r\n}\r\n\r\nPLAY RECAP *********************************************************************\r\ntest-hello-world : ok=1 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0\r\n```\n", "before_files": [{"content": "# Copyright (c) 2015-2018 Cisco Systems, Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to\n# deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n# sell copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\"\"\"Create Command Module.\"\"\"\n\nimport logging\n\nimport click\n\nfrom molecule.api import drivers\nfrom molecule.command import base\nfrom molecule.config import DEFAULT_DRIVER\n\nLOG = logging.getLogger(__name__)\n\n\nclass Create(base.Base):\n \"\"\"Create Command Class.\"\"\"\n\n def execute(self, action_args=None):\n \"\"\"Execute the actions necessary to perform a `molecule create` and \\\n returns None.\n\n :return: None\n \"\"\"\n self._config.state.change_state(\"driver\", self._config.driver.name)\n\n self._config.provisioner.create()\n\n self._config.state.change_state(\"created\", True)\n\n\[email protected]_command_ex()\[email protected]_context\[email protected](\n \"--scenario-name\",\n \"-s\",\n default=base.MOLECULE_DEFAULT_SCENARIO_NAME,\n help=f\"Name of the scenario to target. ({base.MOLECULE_DEFAULT_SCENARIO_NAME})\",\n)\[email protected](\n \"--driver-name\",\n \"-d\",\n type=click.Choice([str(s) for s in drivers()]),\n help=f\"Name of driver to use. ({DEFAULT_DRIVER})\",\n)\ndef create(ctx, scenario_name, driver_name): # pragma: no cover\n \"\"\"Use the provisioner to start the instances.\"\"\"\n args = ctx.obj.get(\"args\")\n subcommand = base._get_subcommand(__name__)\n command_args = {\"subcommand\": subcommand, \"driver_name\": driver_name}\n\n base.execute_cmdline_scenarios(scenario_name, args, command_args)\n", "path": "src/molecule/command/create.py"}]}
2,311
121
gh_patches_debug_3440
rasdani/github-patches
git_diff
spack__spack-21932
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> shell test kcov suggestion does not work ### Steps to reproduce the issue ```console $ ./share/spack/qa/run-shell-tests ERROR: kcov is required to run this script. To install with Spack, run: $ spack install kcov Then add the bin directory to your PATH. $ spack load kcov $ ./share/spack/qa/run-shell-tests ERROR: kcov is required to run this script. To install with Spack, run: $ spack install kcov Then add the bin directory to your PATH. ``` Seems like there is no kcov binary: ``` $ kcov Command 'kcov' not found, but can be installed with: sudo apt install kcov ``` * **Spack:** 0.16.0-1501-e8eb76c198 * **Python:** 3.8.7 * **Platform:** linux-ubuntu20.04-zen2 * **Concretizer:** clingo ### Additional information - [x] I have run `spack debug report` and reported the version of Spack/Python/Platform - [x] I have searched the issues of this repo and believe this is not a duplicate - [x] I have run the failing commands in debug mode and reported the output </issue> <code> [start of var/spack/repos/builtin/packages/kcov/package.py] 1 # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other 2 # Spack Project Developers. See the top-level COPYRIGHT file for details. 3 # 4 # SPDX-License-Identifier: (Apache-2.0 OR MIT) 5 6 from spack import * 7 8 9 class Kcov(CMakePackage): 10 """Code coverage tool for compiled programs, Python and Bash which uses 11 debugging information to collect and report data without special 12 compilation options""" 13 14 homepage = "http://simonkagstrom.github.io/kcov/index.html" 15 url = "https://github.com/SimonKagstrom/kcov/archive/38.tar.gz" 16 17 version('38', sha256='b37af60d81a9b1e3b140f9473bdcb7975af12040feb24cc666f9bb2bb0be68b4') 18 19 depends_on('[email protected]:', type='build') 20 depends_on('zlib') 21 depends_on('curl') 22 23 def cmake_args(self): 24 # Necessary at least on macOS, fixes linking error to LLDB 25 # https://github.com/Homebrew/homebrew-core/blob/master/Formula/kcov.rb 26 return ['-DSPECIFY_RPATH=ON'] 27 28 @run_after('install') 29 @on_package_attributes(run_tests=True) 30 def test_install(self): 31 # The help message exits with an exit code of 1 32 kcov = Executable(self.prefix.bin.kcov) 33 kcov('-h', ignore_errors=1) 34 [end of var/spack/repos/builtin/packages/kcov/package.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/var/spack/repos/builtin/packages/kcov/package.py b/var/spack/repos/builtin/packages/kcov/package.py --- a/var/spack/repos/builtin/packages/kcov/package.py +++ b/var/spack/repos/builtin/packages/kcov/package.py @@ -19,6 +19,8 @@ depends_on('[email protected]:', type='build') depends_on('zlib') depends_on('curl') + depends_on('elfutils') + depends_on('binutils +libiberty', type='link') def cmake_args(self): # Necessary at least on macOS, fixes linking error to LLDB
{"golden_diff": "diff --git a/var/spack/repos/builtin/packages/kcov/package.py b/var/spack/repos/builtin/packages/kcov/package.py\n--- a/var/spack/repos/builtin/packages/kcov/package.py\n+++ b/var/spack/repos/builtin/packages/kcov/package.py\n@@ -19,6 +19,8 @@\n depends_on('[email protected]:', type='build')\n depends_on('zlib')\n depends_on('curl')\n+ depends_on('elfutils')\n+ depends_on('binutils +libiberty', type='link')\n \n def cmake_args(self):\n # Necessary at least on macOS, fixes linking error to LLDB\n", "issue": "shell test kcov suggestion does not work\n### Steps to reproduce the issue\r\n\r\n```console\r\n$ ./share/spack/qa/run-shell-tests \r\nERROR: kcov is required to run this script.\r\n\r\nTo install with Spack, run:\r\n $ spack install kcov\r\nThen add the bin directory to your PATH.\r\n$ spack load kcov\r\n$ ./share/spack/qa/run-shell-tests \r\nERROR: kcov is required to run this script.\r\n\r\nTo install with Spack, run:\r\n $ spack install kcov\r\nThen add the bin directory to your PATH.\r\n```\r\n\r\nSeems like there is no kcov binary:\r\n\r\n```\r\n$ kcov\r\n\r\nCommand 'kcov' not found, but can be installed with:\r\n\r\nsudo apt install kcov\r\n```\r\n\r\n* **Spack:** 0.16.0-1501-e8eb76c198\r\n* **Python:** 3.8.7\r\n* **Platform:** linux-ubuntu20.04-zen2\r\n* **Concretizer:** clingo\r\n\r\n### Additional information\r\n\r\n- [x] I have run `spack debug report` and reported the version of Spack/Python/Platform\r\n- [x] I have searched the issues of this repo and believe this is not a duplicate\r\n- [x] I have run the failing commands in debug mode and reported the output\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack import *\n\n\nclass Kcov(CMakePackage):\n \"\"\"Code coverage tool for compiled programs, Python and Bash which uses\n debugging information to collect and report data without special\n compilation options\"\"\"\n\n homepage = \"http://simonkagstrom.github.io/kcov/index.html\"\n url = \"https://github.com/SimonKagstrom/kcov/archive/38.tar.gz\"\n\n version('38', sha256='b37af60d81a9b1e3b140f9473bdcb7975af12040feb24cc666f9bb2bb0be68b4')\n\n depends_on('[email protected]:', type='build')\n depends_on('zlib')\n depends_on('curl')\n\n def cmake_args(self):\n # Necessary at least on macOS, fixes linking error to LLDB\n # https://github.com/Homebrew/homebrew-core/blob/master/Formula/kcov.rb\n return ['-DSPECIFY_RPATH=ON']\n\n @run_after('install')\n @on_package_attributes(run_tests=True)\n def test_install(self):\n # The help message exits with an exit code of 1\n kcov = Executable(self.prefix.bin.kcov)\n kcov('-h', ignore_errors=1)\n", "path": "var/spack/repos/builtin/packages/kcov/package.py"}]}
1,247
144
gh_patches_debug_23351
rasdani/github-patches
git_diff
pwndbg__pwndbg-671
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> subprocess.CalledProcessError: Command '['/usr/local/bin/checksec', '--file', '/tmp/xxx']' returned non-zero exit status 2. ``` pwndbg> checksec Traceback (most recent call last): File "/root/pwndbg/pwndbg/commands/__init__.py", line 109, in __call__ return self.function(*args, **kwargs) File "/root/pwndbg/pwndbg/commands/__init__.py", line 189, in _OnlyWithFile return function(*a, **kw) File "/root/pwndbg/pwndbg/commands/checksec.py", line 16, in checksec print(pwndbg.wrappers.checksec.get_raw_out()) File "/root/pwndbg/pwndbg/commands/__init__.py", line 189, in _OnlyWithFile return function(*a, **kw) File "/root/pwndbg/pwndbg/wrappers/__init__.py", line 28, in _OnlyWithCommand return function(*a, **kw) File "/root/pwndbg/pwndbg/memoize.py", line 48, in __call__ value = self.func(*args, **kwargs) File "/root/pwndbg/pwndbg/wrappers/checksec.py", line 20, in get_raw_out return pwndbg.wrappers.call_cmd(cmd) File "/root/pwndbg/pwndbg/wrappers/__init__.py", line 35, in call_cmd return subprocess.check_output(cmd, stderr=STDOUT).decode('utf-8') File "/usr/lib/python3.7/subprocess.py", line 395, in check_output **kwargs).stdout File "/usr/lib/python3.7/subprocess.py", line 487, in run output=stdout, stderr=stderr) subprocess.CalledProcessError: Command '['/usr/local/bin/checksec', '--file', '/tmp/forgot']' returned non-zero exit status 2. ``` when checksec running in python3 pwntools environment, raise a failure. Here's the reason: ``` root@kali:/tmp# checksec --file forgot usage: checksec [-h] elf [elf ...] checksec: error: unrecognized arguments: --file ``` So modify pwndbg/pwndbg/wrappers/checksec.py: 19 with: `cmd = [get_raw_out.cmd_path, local_path]` Then it will be solved </issue> <code> [start of pwndbg/wrappers/checksec.py] 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 from __future__ import absolute_import 4 from __future__ import division 5 from __future__ import print_function 6 from __future__ import unicode_literals 7 8 import pwndbg.commands 9 import pwndbg.memoize 10 import pwndbg.wrappers 11 12 cmd_name = "checksec" 13 14 @pwndbg.wrappers.OnlyWithCommand(cmd_name) 15 @pwndbg.memoize.reset_on_objfile 16 def get_raw_out(): 17 18 local_path = pwndbg.file.get_file(pwndbg.proc.exe) 19 cmd = [get_raw_out.cmd_path, "--file", local_path] 20 return pwndbg.wrappers.call_cmd(cmd) 21 22 @pwndbg.wrappers.OnlyWithCommand(cmd_name) 23 def relro_status(): 24 relro = "No RELRO" 25 out = get_raw_out() 26 27 if "Full RELRO" in out: 28 relro = "Full RELRO" 29 elif "Partial RELRO" in out: 30 relro = "Partial RELRO" 31 32 return relro 33 34 @pwndbg.wrappers.OnlyWithCommand(cmd_name) 35 def pie_status(): 36 pie = "No PIE" 37 out = get_raw_out() 38 39 if "PIE enabled" in out: 40 pie = "PIE enabled" 41 42 return pie 43 [end of pwndbg/wrappers/checksec.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pwndbg/wrappers/checksec.py b/pwndbg/wrappers/checksec.py --- a/pwndbg/wrappers/checksec.py +++ b/pwndbg/wrappers/checksec.py @@ -5,6 +5,10 @@ from __future__ import print_function from __future__ import unicode_literals +import subprocess +from re import search +from subprocess import STDOUT + import pwndbg.commands import pwndbg.memoize import pwndbg.wrappers @@ -14,10 +18,17 @@ @pwndbg.wrappers.OnlyWithCommand(cmd_name) @pwndbg.memoize.reset_on_objfile def get_raw_out(): - local_path = pwndbg.file.get_file(pwndbg.proc.exe) - cmd = [get_raw_out.cmd_path, "--file", local_path] - return pwndbg.wrappers.call_cmd(cmd) + try: + version_output = subprocess.check_output([get_raw_out.cmd_path, "--version"], stderr=STDOUT).decode('utf-8') + match = search('checksec v([\\w.]+),', version_output) + if match: + version = tuple(map(int, (match.group(1).split(".")))) + if version >= (2, 0): + return pwndbg.wrappers.call_cmd([get_raw_out.cmd_path, "--file=" + local_path]) + except Exception: + pass + return pwndbg.wrappers.call_cmd([get_raw_out.cmd_path, "--file", local_path]) @pwndbg.wrappers.OnlyWithCommand(cmd_name) def relro_status():
{"golden_diff": "diff --git a/pwndbg/wrappers/checksec.py b/pwndbg/wrappers/checksec.py\n--- a/pwndbg/wrappers/checksec.py\n+++ b/pwndbg/wrappers/checksec.py\n@@ -5,6 +5,10 @@\n from __future__ import print_function\n from __future__ import unicode_literals\n \n+import subprocess\n+from re import search\n+from subprocess import STDOUT\n+\n import pwndbg.commands\n import pwndbg.memoize\n import pwndbg.wrappers\n@@ -14,10 +18,17 @@\n @pwndbg.wrappers.OnlyWithCommand(cmd_name)\n @pwndbg.memoize.reset_on_objfile\n def get_raw_out():\n-\n local_path = pwndbg.file.get_file(pwndbg.proc.exe)\n- cmd = [get_raw_out.cmd_path, \"--file\", local_path]\n- return pwndbg.wrappers.call_cmd(cmd)\n+ try:\n+ version_output = subprocess.check_output([get_raw_out.cmd_path, \"--version\"], stderr=STDOUT).decode('utf-8')\n+ match = search('checksec v([\\\\w.]+),', version_output)\n+ if match:\n+ version = tuple(map(int, (match.group(1).split(\".\"))))\n+ if version >= (2, 0):\n+ return pwndbg.wrappers.call_cmd([get_raw_out.cmd_path, \"--file=\" + local_path])\n+ except Exception:\n+ pass\n+ return pwndbg.wrappers.call_cmd([get_raw_out.cmd_path, \"--file\", local_path])\n \n @pwndbg.wrappers.OnlyWithCommand(cmd_name)\n def relro_status():\n", "issue": "subprocess.CalledProcessError: Command '['/usr/local/bin/checksec', '--file', '/tmp/xxx']' returned non-zero exit status 2.\n```\r\npwndbg> checksec \r\nTraceback (most recent call last):\r\n File \"/root/pwndbg/pwndbg/commands/__init__.py\", line 109, in __call__\r\n return self.function(*args, **kwargs)\r\n File \"/root/pwndbg/pwndbg/commands/__init__.py\", line 189, in _OnlyWithFile\r\n return function(*a, **kw)\r\n File \"/root/pwndbg/pwndbg/commands/checksec.py\", line 16, in checksec\r\n print(pwndbg.wrappers.checksec.get_raw_out())\r\n File \"/root/pwndbg/pwndbg/commands/__init__.py\", line 189, in _OnlyWithFile\r\n return function(*a, **kw)\r\n File \"/root/pwndbg/pwndbg/wrappers/__init__.py\", line 28, in _OnlyWithCommand\r\n return function(*a, **kw)\r\n File \"/root/pwndbg/pwndbg/memoize.py\", line 48, in __call__\r\n value = self.func(*args, **kwargs)\r\n File \"/root/pwndbg/pwndbg/wrappers/checksec.py\", line 20, in get_raw_out\r\n return pwndbg.wrappers.call_cmd(cmd)\r\n File \"/root/pwndbg/pwndbg/wrappers/__init__.py\", line 35, in call_cmd\r\n return subprocess.check_output(cmd, stderr=STDOUT).decode('utf-8')\r\n File \"/usr/lib/python3.7/subprocess.py\", line 395, in check_output\r\n **kwargs).stdout\r\n File \"/usr/lib/python3.7/subprocess.py\", line 487, in run\r\n output=stdout, stderr=stderr)\r\nsubprocess.CalledProcessError: Command '['/usr/local/bin/checksec', '--file', '/tmp/forgot']' returned non-zero exit status 2.\r\n```\r\n\r\nwhen checksec running in python3 pwntools environment, raise a failure. Here's the reason:\r\n```\r\nroot@kali:/tmp# checksec --file forgot\r\nusage: checksec [-h] elf [elf ...]\r\nchecksec: error: unrecognized arguments: --file\r\n```\r\n\r\n\r\nSo modify pwndbg/pwndbg/wrappers/checksec.py: 19 with:\r\n`cmd = [get_raw_out.cmd_path, local_path]`\r\n\r\nThen it will be solved\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport pwndbg.commands\nimport pwndbg.memoize\nimport pwndbg.wrappers\n\ncmd_name = \"checksec\"\n\[email protected](cmd_name)\[email protected]_on_objfile\ndef get_raw_out():\n\n local_path = pwndbg.file.get_file(pwndbg.proc.exe)\n cmd = [get_raw_out.cmd_path, \"--file\", local_path]\n return pwndbg.wrappers.call_cmd(cmd)\n\[email protected](cmd_name)\ndef relro_status():\n relro = \"No RELRO\"\n out = get_raw_out()\n\n if \"Full RELRO\" in out:\n relro = \"Full RELRO\"\n elif \"Partial RELRO\" in out:\n relro = \"Partial RELRO\"\n\n return relro\n\[email protected](cmd_name)\ndef pie_status():\n pie = \"No PIE\"\n out = get_raw_out()\n\n if \"PIE enabled\" in out:\n pie = \"PIE enabled\"\n\n return pie\n", "path": "pwndbg/wrappers/checksec.py"}]}
1,458
360
gh_patches_debug_43335
rasdani/github-patches
git_diff
google__mobly-437
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Propagate stderr from adb commands The current mobly adb proxy does not propagate stderr if ret code is zero. We thought this was ok since Android has fixed return code issues in M. But turns out many China manufacturers did not fix this in China devices. In order to better support China devices and potentially other devices of the same ret code problem, we need to surface stderr. </issue> <code> [start of mobly/controllers/android_device_lib/adb.py] 1 # Copyright 2016 Google Inc. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from builtins import str 16 from past.builtins import basestring 17 18 import logging 19 import pipes 20 import psutil 21 import subprocess 22 import threading 23 24 # Command to use for running ADB commands. 25 ADB = 'adb' 26 27 # adb gets confused if we try to manage bound ports in parallel, so anything to 28 # do with port forwarding must happen under this lock. 29 ADB_PORT_LOCK = threading.Lock() 30 31 # Qualified class name of the default instrumentation test runner. 32 DEFAULT_INSTRUMENTATION_RUNNER = 'com.android.common.support.test.runner.AndroidJUnitRunner' 33 34 35 class Error(Exception): 36 """Base error type for adb proxy module.""" 37 38 39 class AdbError(Error): 40 """Raised when an adb command encounters an error. 41 42 Args: 43 cmd: list of strings, the adb command executed. 44 stdout: byte string, the raw stdout of the command. 45 stderr: byte string, the raw stderr of the command. 46 ret_code: int, the return code of the command. 47 """ 48 49 def __init__(self, cmd, stdout, stderr, ret_code): 50 self.cmd = cmd 51 self.stdout = stdout 52 self.stderr = stderr 53 self.ret_code = ret_code 54 55 def __str__(self): 56 return ('Error executing adb cmd "%s". ret: %d, stdout: %s, stderr: %s' 57 ) % (cli_cmd_to_string(self.cmd), self.ret_code, self.stdout, 58 self.stderr) 59 60 61 class AdbTimeoutError(Error): 62 """Raised when an command did not complete within expected time. 63 64 Args: 65 cmd: list of strings, the adb command that timed out 66 timeout: float, the number of seconds passed before timing out. 67 """ 68 69 def __init__(self, cmd, timeout): 70 self.cmd = cmd 71 self.timeout = timeout 72 73 def __str__(self): 74 return 'Timed out executing command "%s" after %ss.' % ( 75 cli_cmd_to_string(self.cmd), self.timeout) 76 77 78 def list_occupied_adb_ports(): 79 """Lists all the host ports occupied by adb forward. 80 81 This is useful because adb will silently override the binding if an attempt 82 to bind to a port already used by adb was made, instead of throwing binding 83 error. So one should always check what ports adb is using before trying to 84 bind to a port with adb. 85 86 Returns: 87 A list of integers representing occupied host ports. 88 """ 89 out = AdbProxy().forward('--list') 90 clean_lines = str(out, 'utf-8').strip().split('\n') 91 used_ports = [] 92 for line in clean_lines: 93 tokens = line.split(' tcp:') 94 if len(tokens) != 3: 95 continue 96 used_ports.append(int(tokens[1])) 97 return used_ports 98 99 100 def cli_cmd_to_string(args): 101 """Converts a cmd arg list to string. 102 103 Args: 104 args: list of strings, the arguments of a command. 105 106 Returns: 107 String representation of the command. 108 """ 109 if isinstance(args, basestring): 110 # Return directly if it's already a string. 111 return args 112 return ' '.join([pipes.quote(arg) for arg in args]) 113 114 115 class AdbProxy(object): 116 """Proxy class for ADB. 117 118 For syntactic reasons, the '-' in adb commands need to be replaced with 119 '_'. Can directly execute adb commands on an object: 120 >> adb = AdbProxy(<serial>) 121 >> adb.start_server() 122 >> adb.devices() # will return the console output of "adb devices". 123 124 By default, command args are expected to be an iterable which is passed 125 directly to subprocess.Popen(): 126 >> adb.shell(['echo', 'a', 'b']) 127 128 This way of launching commands is recommended by the subprocess 129 documentation to avoid shell injection vulnerabilities and avoid having to 130 deal with multiple layers of shell quoting and different shell environments 131 between different OSes. 132 133 If you really want to run the command through the system shell, this is 134 possible by supplying shell=True, but try to avoid this if possible: 135 >> adb.shell('cat /foo > /tmp/file', shell=True) 136 """ 137 138 def __init__(self, serial=''): 139 self.serial = serial 140 141 def _exec_cmd(self, args, shell, timeout): 142 """Executes adb commands. 143 144 Args: 145 args: string or list of strings, program arguments. 146 See subprocess.Popen() documentation. 147 shell: bool, True to run this command through the system shell, 148 False to invoke it directly. See subprocess.Popen() docs. 149 timeout: float, the number of seconds to wait before timing out. 150 If not specified, no timeout takes effect. 151 152 Returns: 153 The output of the adb command run if exit code is 0. 154 155 Raises: 156 AdbError: The adb command exit code is not 0. 157 AdbTimeoutError: The adb command timed out. 158 """ 159 proc = subprocess.Popen( 160 args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=shell) 161 process = psutil.Process(proc.pid) 162 if timeout and timeout <= 0: 163 raise Error('Timeout is not a positive value: %s' % timeout) 164 if timeout and timeout > 0: 165 try: 166 process.wait(timeout=timeout) 167 except psutil.TimeoutExpired: 168 process.terminate() 169 raise AdbTimeoutError(cmd=args, timeout=timeout) 170 171 (out, err) = proc.communicate() 172 ret = proc.returncode 173 logging.debug('cmd: %s, stdout: %s, stderr: %s, ret: %s', 174 cli_cmd_to_string(args), out, err, ret) 175 if ret == 0: 176 return out 177 else: 178 raise AdbError(cmd=args, stdout=out, stderr=err, ret_code=ret) 179 180 def _exec_adb_cmd(self, name, args, shell, timeout): 181 if shell: 182 # Add quotes around "adb" in case the ADB path contains spaces. This 183 # is pretty common on Windows (e.g. Program Files). 184 if self.serial: 185 adb_cmd = '"%s" -s "%s" %s %s' % (ADB, self.serial, name, args) 186 else: 187 adb_cmd = '"%s" %s %s' % (ADB, name, args) 188 else: 189 adb_cmd = [ADB] 190 if self.serial: 191 adb_cmd.extend(['-s', self.serial]) 192 adb_cmd.append(name) 193 if args: 194 if isinstance(args, basestring): 195 adb_cmd.append(args) 196 else: 197 adb_cmd.extend(args) 198 return self._exec_cmd(adb_cmd, shell=shell, timeout=timeout) 199 200 def getprop(self, prop_name): 201 """Get a property of the device. 202 203 This is a convenience wrapper for "adb shell getprop xxx". 204 205 Args: 206 prop_name: A string that is the name of the property to get. 207 208 Returns: 209 A string that is the value of the property, or None if the property 210 doesn't exist. 211 """ 212 return self.shell('getprop %s' % prop_name).decode('utf-8').strip() 213 214 def has_shell_command(self, command): 215 """Checks to see if a given check command exists on the device. 216 217 Args: 218 command: A string that is the name of the command to check. 219 220 Returns: 221 A boolean that is True if the command exists and False otherwise. 222 """ 223 try: 224 output = self.shell(['command', '-v', 225 command]).decode('utf-8').strip() 226 return command in output 227 except AdbError: 228 # If the command doesn't exist, then 'command -v' can return 229 # an exit code > 1. 230 return False 231 232 def forward(self, args=None, shell=False): 233 with ADB_PORT_LOCK: 234 return self._exec_adb_cmd('forward', args, shell, timeout=None) 235 236 def instrument(self, package, options=None, runner=None): 237 """Runs an instrumentation command on the device. 238 239 This is a convenience wrapper to avoid parameter formatting. 240 241 Example: 242 device.instrument( 243 'com.my.package.test', 244 options = { 245 'class': 'com.my.package.test.TestSuite', 246 }, 247 ) 248 249 Args: 250 package: string, the package of the instrumentation tests. 251 options: dict, the instrumentation options including the test 252 class. 253 runner: string, the test runner name, which defaults to 254 DEFAULT_INSTRUMENTATION_RUNNER. 255 256 Returns: 257 The output of instrumentation command. 258 """ 259 if runner is None: 260 runner = DEFAULT_INSTRUMENTATION_RUNNER 261 if options is None: 262 options = {} 263 264 options_list = [] 265 for option_key, option_value in options.items(): 266 options_list.append('-e %s %s' % (option_key, option_value)) 267 options_string = ' '.join(options_list) 268 269 instrumentation_command = 'am instrument -r -w %s %s/%s' % ( 270 options_string, package, runner) 271 logging.info('AndroidDevice|%s: Executing adb shell %s', self.serial, 272 instrumentation_command) 273 return self.shell(instrumentation_command) 274 275 def __getattr__(self, name): 276 def adb_call(args=None, shell=False, timeout=None): 277 """Wrapper for an ADB command. 278 279 Args: 280 args: string or list of strings, arguments to the adb command. 281 See subprocess.Proc() documentation. 282 shell: bool, True to run this command through the system shell, 283 False to invoke it directly. See subprocess.Proc() docs. 284 timeout: float, the number of seconds to wait before timing out. 285 If not specified, no timeout takes effect. 286 287 Returns: 288 The output of the adb command run if exit code is 0. 289 """ 290 args = args or '' 291 clean_name = name.replace('_', '-') 292 return self._exec_adb_cmd( 293 clean_name, args, shell=shell, timeout=timeout) 294 295 return adb_call 296 [end of mobly/controllers/android_device_lib/adb.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mobly/controllers/android_device_lib/adb.py b/mobly/controllers/android_device_lib/adb.py --- a/mobly/controllers/android_device_lib/adb.py +++ b/mobly/controllers/android_device_lib/adb.py @@ -138,7 +138,7 @@ def __init__(self, serial=''): self.serial = serial - def _exec_cmd(self, args, shell, timeout): + def _exec_cmd(self, args, shell, timeout, stderr): """Executes adb commands. Args: @@ -148,6 +148,8 @@ False to invoke it directly. See subprocess.Popen() docs. timeout: float, the number of seconds to wait before timing out. If not specified, no timeout takes effect. + stderr: a Byte stream, like io.BytesIO, stderr of the command will + be written to this object if provided. Returns: The output of the adb command run if exit code is 0. @@ -169,6 +171,8 @@ raise AdbTimeoutError(cmd=args, timeout=timeout) (out, err) = proc.communicate() + if stderr: + stderr.write(err) ret = proc.returncode logging.debug('cmd: %s, stdout: %s, stderr: %s, ret: %s', cli_cmd_to_string(args), out, err, ret) @@ -177,7 +181,7 @@ else: raise AdbError(cmd=args, stdout=out, stderr=err, ret_code=ret) - def _exec_adb_cmd(self, name, args, shell, timeout): + def _exec_adb_cmd(self, name, args, shell, timeout, stderr): if shell: # Add quotes around "adb" in case the ADB path contains spaces. This # is pretty common on Windows (e.g. Program Files). @@ -195,7 +199,9 @@ adb_cmd.append(args) else: adb_cmd.extend(args) - return self._exec_cmd(adb_cmd, shell=shell, timeout=timeout) + out = self._exec_cmd( + adb_cmd, shell=shell, timeout=timeout, stderr=stderr) + return out def getprop(self, prop_name): """Get a property of the device. @@ -273,7 +279,7 @@ return self.shell(instrumentation_command) def __getattr__(self, name): - def adb_call(args=None, shell=False, timeout=None): + def adb_call(args=None, shell=False, timeout=None, stderr=None): """Wrapper for an ADB command. Args: @@ -283,6 +289,8 @@ False to invoke it directly. See subprocess.Proc() docs. timeout: float, the number of seconds to wait before timing out. If not specified, no timeout takes effect. + stderr: a Byte stream, like io.BytesIO, stderr of the command + will be written to this object if provided. Returns: The output of the adb command run if exit code is 0. @@ -290,6 +298,6 @@ args = args or '' clean_name = name.replace('_', '-') return self._exec_adb_cmd( - clean_name, args, shell=shell, timeout=timeout) + clean_name, args, shell=shell, timeout=timeout, stderr=stderr) return adb_call
{"golden_diff": "diff --git a/mobly/controllers/android_device_lib/adb.py b/mobly/controllers/android_device_lib/adb.py\n--- a/mobly/controllers/android_device_lib/adb.py\n+++ b/mobly/controllers/android_device_lib/adb.py\n@@ -138,7 +138,7 @@\n def __init__(self, serial=''):\n self.serial = serial\n \n- def _exec_cmd(self, args, shell, timeout):\n+ def _exec_cmd(self, args, shell, timeout, stderr):\n \"\"\"Executes adb commands.\n \n Args:\n@@ -148,6 +148,8 @@\n False to invoke it directly. See subprocess.Popen() docs.\n timeout: float, the number of seconds to wait before timing out.\n If not specified, no timeout takes effect.\n+ stderr: a Byte stream, like io.BytesIO, stderr of the command will\n+ be written to this object if provided.\n \n Returns:\n The output of the adb command run if exit code is 0.\n@@ -169,6 +171,8 @@\n raise AdbTimeoutError(cmd=args, timeout=timeout)\n \n (out, err) = proc.communicate()\n+ if stderr:\n+ stderr.write(err)\n ret = proc.returncode\n logging.debug('cmd: %s, stdout: %s, stderr: %s, ret: %s',\n cli_cmd_to_string(args), out, err, ret)\n@@ -177,7 +181,7 @@\n else:\n raise AdbError(cmd=args, stdout=out, stderr=err, ret_code=ret)\n \n- def _exec_adb_cmd(self, name, args, shell, timeout):\n+ def _exec_adb_cmd(self, name, args, shell, timeout, stderr):\n if shell:\n # Add quotes around \"adb\" in case the ADB path contains spaces. This\n # is pretty common on Windows (e.g. Program Files).\n@@ -195,7 +199,9 @@\n adb_cmd.append(args)\n else:\n adb_cmd.extend(args)\n- return self._exec_cmd(adb_cmd, shell=shell, timeout=timeout)\n+ out = self._exec_cmd(\n+ adb_cmd, shell=shell, timeout=timeout, stderr=stderr)\n+ return out\n \n def getprop(self, prop_name):\n \"\"\"Get a property of the device.\n@@ -273,7 +279,7 @@\n return self.shell(instrumentation_command)\n \n def __getattr__(self, name):\n- def adb_call(args=None, shell=False, timeout=None):\n+ def adb_call(args=None, shell=False, timeout=None, stderr=None):\n \"\"\"Wrapper for an ADB command.\n \n Args:\n@@ -283,6 +289,8 @@\n False to invoke it directly. See subprocess.Proc() docs.\n timeout: float, the number of seconds to wait before timing out.\n If not specified, no timeout takes effect.\n+ stderr: a Byte stream, like io.BytesIO, stderr of the command\n+ will be written to this object if provided.\n \n Returns:\n The output of the adb command run if exit code is 0.\n@@ -290,6 +298,6 @@\n args = args or ''\n clean_name = name.replace('_', '-')\n return self._exec_adb_cmd(\n- clean_name, args, shell=shell, timeout=timeout)\n+ clean_name, args, shell=shell, timeout=timeout, stderr=stderr)\n \n return adb_call\n", "issue": "Propagate stderr from adb commands\nThe current mobly adb proxy does not propagate stderr if ret code is zero.\r\nWe thought this was ok since Android has fixed return code issues in M.\r\n\r\nBut turns out many China manufacturers did not fix this in China devices.\r\nIn order to better support China devices and potentially other devices of the same ret code problem, we need to surface stderr.\n", "before_files": [{"content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom builtins import str\nfrom past.builtins import basestring\n\nimport logging\nimport pipes\nimport psutil\nimport subprocess\nimport threading\n\n# Command to use for running ADB commands.\nADB = 'adb'\n\n# adb gets confused if we try to manage bound ports in parallel, so anything to\n# do with port forwarding must happen under this lock.\nADB_PORT_LOCK = threading.Lock()\n\n# Qualified class name of the default instrumentation test runner.\nDEFAULT_INSTRUMENTATION_RUNNER = 'com.android.common.support.test.runner.AndroidJUnitRunner'\n\n\nclass Error(Exception):\n \"\"\"Base error type for adb proxy module.\"\"\"\n\n\nclass AdbError(Error):\n \"\"\"Raised when an adb command encounters an error.\n\n Args:\n cmd: list of strings, the adb command executed.\n stdout: byte string, the raw stdout of the command.\n stderr: byte string, the raw stderr of the command.\n ret_code: int, the return code of the command.\n \"\"\"\n\n def __init__(self, cmd, stdout, stderr, ret_code):\n self.cmd = cmd\n self.stdout = stdout\n self.stderr = stderr\n self.ret_code = ret_code\n\n def __str__(self):\n return ('Error executing adb cmd \"%s\". ret: %d, stdout: %s, stderr: %s'\n ) % (cli_cmd_to_string(self.cmd), self.ret_code, self.stdout,\n self.stderr)\n\n\nclass AdbTimeoutError(Error):\n \"\"\"Raised when an command did not complete within expected time.\n\n Args:\n cmd: list of strings, the adb command that timed out\n timeout: float, the number of seconds passed before timing out.\n \"\"\"\n\n def __init__(self, cmd, timeout):\n self.cmd = cmd\n self.timeout = timeout\n\n def __str__(self):\n return 'Timed out executing command \"%s\" after %ss.' % (\n cli_cmd_to_string(self.cmd), self.timeout)\n\n\ndef list_occupied_adb_ports():\n \"\"\"Lists all the host ports occupied by adb forward.\n\n This is useful because adb will silently override the binding if an attempt\n to bind to a port already used by adb was made, instead of throwing binding\n error. So one should always check what ports adb is using before trying to\n bind to a port with adb.\n\n Returns:\n A list of integers representing occupied host ports.\n \"\"\"\n out = AdbProxy().forward('--list')\n clean_lines = str(out, 'utf-8').strip().split('\\n')\n used_ports = []\n for line in clean_lines:\n tokens = line.split(' tcp:')\n if len(tokens) != 3:\n continue\n used_ports.append(int(tokens[1]))\n return used_ports\n\n\ndef cli_cmd_to_string(args):\n \"\"\"Converts a cmd arg list to string.\n\n Args:\n args: list of strings, the arguments of a command.\n\n Returns:\n String representation of the command.\n \"\"\"\n if isinstance(args, basestring):\n # Return directly if it's already a string.\n return args\n return ' '.join([pipes.quote(arg) for arg in args])\n\n\nclass AdbProxy(object):\n \"\"\"Proxy class for ADB.\n\n For syntactic reasons, the '-' in adb commands need to be replaced with\n '_'. Can directly execute adb commands on an object:\n >> adb = AdbProxy(<serial>)\n >> adb.start_server()\n >> adb.devices() # will return the console output of \"adb devices\".\n\n By default, command args are expected to be an iterable which is passed\n directly to subprocess.Popen():\n >> adb.shell(['echo', 'a', 'b'])\n\n This way of launching commands is recommended by the subprocess\n documentation to avoid shell injection vulnerabilities and avoid having to\n deal with multiple layers of shell quoting and different shell environments\n between different OSes.\n\n If you really want to run the command through the system shell, this is\n possible by supplying shell=True, but try to avoid this if possible:\n >> adb.shell('cat /foo > /tmp/file', shell=True)\n \"\"\"\n\n def __init__(self, serial=''):\n self.serial = serial\n\n def _exec_cmd(self, args, shell, timeout):\n \"\"\"Executes adb commands.\n\n Args:\n args: string or list of strings, program arguments.\n See subprocess.Popen() documentation.\n shell: bool, True to run this command through the system shell,\n False to invoke it directly. See subprocess.Popen() docs.\n timeout: float, the number of seconds to wait before timing out.\n If not specified, no timeout takes effect.\n\n Returns:\n The output of the adb command run if exit code is 0.\n\n Raises:\n AdbError: The adb command exit code is not 0.\n AdbTimeoutError: The adb command timed out.\n \"\"\"\n proc = subprocess.Popen(\n args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=shell)\n process = psutil.Process(proc.pid)\n if timeout and timeout <= 0:\n raise Error('Timeout is not a positive value: %s' % timeout)\n if timeout and timeout > 0:\n try:\n process.wait(timeout=timeout)\n except psutil.TimeoutExpired:\n process.terminate()\n raise AdbTimeoutError(cmd=args, timeout=timeout)\n\n (out, err) = proc.communicate()\n ret = proc.returncode\n logging.debug('cmd: %s, stdout: %s, stderr: %s, ret: %s',\n cli_cmd_to_string(args), out, err, ret)\n if ret == 0:\n return out\n else:\n raise AdbError(cmd=args, stdout=out, stderr=err, ret_code=ret)\n\n def _exec_adb_cmd(self, name, args, shell, timeout):\n if shell:\n # Add quotes around \"adb\" in case the ADB path contains spaces. This\n # is pretty common on Windows (e.g. Program Files).\n if self.serial:\n adb_cmd = '\"%s\" -s \"%s\" %s %s' % (ADB, self.serial, name, args)\n else:\n adb_cmd = '\"%s\" %s %s' % (ADB, name, args)\n else:\n adb_cmd = [ADB]\n if self.serial:\n adb_cmd.extend(['-s', self.serial])\n adb_cmd.append(name)\n if args:\n if isinstance(args, basestring):\n adb_cmd.append(args)\n else:\n adb_cmd.extend(args)\n return self._exec_cmd(adb_cmd, shell=shell, timeout=timeout)\n\n def getprop(self, prop_name):\n \"\"\"Get a property of the device.\n\n This is a convenience wrapper for \"adb shell getprop xxx\".\n\n Args:\n prop_name: A string that is the name of the property to get.\n\n Returns:\n A string that is the value of the property, or None if the property\n doesn't exist.\n \"\"\"\n return self.shell('getprop %s' % prop_name).decode('utf-8').strip()\n\n def has_shell_command(self, command):\n \"\"\"Checks to see if a given check command exists on the device.\n\n Args:\n command: A string that is the name of the command to check.\n\n Returns:\n A boolean that is True if the command exists and False otherwise.\n \"\"\"\n try:\n output = self.shell(['command', '-v',\n command]).decode('utf-8').strip()\n return command in output\n except AdbError:\n # If the command doesn't exist, then 'command -v' can return\n # an exit code > 1.\n return False\n\n def forward(self, args=None, shell=False):\n with ADB_PORT_LOCK:\n return self._exec_adb_cmd('forward', args, shell, timeout=None)\n\n def instrument(self, package, options=None, runner=None):\n \"\"\"Runs an instrumentation command on the device.\n\n This is a convenience wrapper to avoid parameter formatting.\n\n Example:\n device.instrument(\n 'com.my.package.test',\n options = {\n 'class': 'com.my.package.test.TestSuite',\n },\n )\n\n Args:\n package: string, the package of the instrumentation tests.\n options: dict, the instrumentation options including the test\n class.\n runner: string, the test runner name, which defaults to\n DEFAULT_INSTRUMENTATION_RUNNER.\n\n Returns:\n The output of instrumentation command.\n \"\"\"\n if runner is None:\n runner = DEFAULT_INSTRUMENTATION_RUNNER\n if options is None:\n options = {}\n\n options_list = []\n for option_key, option_value in options.items():\n options_list.append('-e %s %s' % (option_key, option_value))\n options_string = ' '.join(options_list)\n\n instrumentation_command = 'am instrument -r -w %s %s/%s' % (\n options_string, package, runner)\n logging.info('AndroidDevice|%s: Executing adb shell %s', self.serial,\n instrumentation_command)\n return self.shell(instrumentation_command)\n\n def __getattr__(self, name):\n def adb_call(args=None, shell=False, timeout=None):\n \"\"\"Wrapper for an ADB command.\n\n Args:\n args: string or list of strings, arguments to the adb command.\n See subprocess.Proc() documentation.\n shell: bool, True to run this command through the system shell,\n False to invoke it directly. See subprocess.Proc() docs.\n timeout: float, the number of seconds to wait before timing out.\n If not specified, no timeout takes effect.\n\n Returns:\n The output of the adb command run if exit code is 0.\n \"\"\"\n args = args or ''\n clean_name = name.replace('_', '-')\n return self._exec_adb_cmd(\n clean_name, args, shell=shell, timeout=timeout)\n\n return adb_call\n", "path": "mobly/controllers/android_device_lib/adb.py"}]}
3,689
781
gh_patches_debug_30288
rasdani/github-patches
git_diff
OCA__social-687
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [14.0] mail_debrand: TypeError: cannot use a string pattern on a bytes-like object This error occurs in the app email marketing(mass_mailing) by clicking on "Test" in the Mailings tab File "/opt/odoo/odoo/addons/base/models/ir_http.py", line 237, in _dispatch result = request.dispatch() File "/opt/odoo/odoo/http.py", line 683, in dispatch result = self._call_function(**self.params) File "/opt/odoo/odoo/http.py", line 359, in _call_function return checked_call(self.db, *args, **kwargs) File "/opt/odoo/odoo/service/model.py", line 94, in wrapper return f(dbname, *args, **kwargs) File "/opt/odoo/odoo/http.py", line 347, in checked_call result = self.endpoint(*a, **kw) File "/opt/odoo/odoo/http.py", line 912, in __call__ return self.method(*args, **kw) File "/opt/odoo/odoo/http.py", line 531, in response_wrap response = f(*args, **kw) File "/opt/odoo/odoo/addons/web/controllers/main.py", line 1368, in call_button action = self._call_kw(model, method, args, kwargs) File "/opt/odoo/odoo/addons/web/controllers/main.py", line 1356, in _call_kw return call_kw(request.env[model], method, args, kwargs) File "/opt/odoo/odoo/api.py", line 396, in call_kw result = _call_kw_multi(method, model, args, kwargs) File "/opt/odoo/odoo/api.py", line 383, in _call_kw_multi result = method(recs, *args, **kwargs) File "/opt/odoo/odoo/addons/mass_mailing/wizard/mailing_mailing_test.py", line 54, in send_mail_test mail = self.env['mail.mail'].sudo().create(mail_values) File "<decorator-gen-210>", line 2, in create File "/opt/odoo/odoo/api.py", line 344, in _model_create_multi return create(self, [arg]) File "/opt/odoo/odoo/addons/mass_mailing/models/mail_mail.py", line 21, in create mails = super(MailMail, self).create(values_list) File "<decorator-gen-155>", line 2, in create File "/opt/odoo/odoo/api.py", line 345, in _model_create_multi return create(self, arg) File "/opt/odoo/custom-addons/mail_debrand/models/mail_render_mixinANDmail_mail.py", line 98, in create values_list[index]["body_html"] = self.env[ File "/opt/odoo/custom-addons/mail_debrand/models/mail_render_mixinANDmail_mail.py", line 18, in remove_href_odoo has_odoo_link = re.search(r"<a\s(.*)odoo\.com", value, flags=re.IGNORECASE) File "/usr/lib/python3.8/re.py", line 201, in search return _compile(pattern, flags).search(string) Exception The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/opt/odoo/odoo/http.py", line 639, in _handle_exception return super(JsonRequest, self)._handle_exception(exception) File "/opt/odoo/odoo/http.py", line 315, in _handle_exception raise exception.with_traceback(None) from new_cause TypeError: cannot use a string pattern on a bytes-like object </issue> <code> [start of mail_debrand/models/mail_render_mixinANDmail_mail.py] 1 # Copyright 2019 O4SB - Graeme Gellatly 2 # Copyright 2019 Tecnativa - Ernesto Tejeda 3 # Copyright 2020 Onestein - Andrea Stirpe 4 # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). 5 import re 6 7 from lxml import etree, html 8 9 from odoo import api, models 10 11 12 class MailRenderMixin(models.AbstractModel): 13 _inherit = "mail.render.mixin" 14 15 def remove_href_odoo(self, value, remove_parent=True, remove_before=False): 16 if len(value) < 20: 17 return value 18 has_odoo_link = re.search(r"<a\s(.*)odoo\.com", value, flags=re.IGNORECASE) 19 if has_odoo_link: 20 tree = etree.HTML( 21 value 22 ) # html with brlken links tree = etree.fromstring(value) just xml 23 odoo_achors = tree.xpath('//a[contains(@href,"odoo.com")]') 24 for elem in odoo_achors: 25 parent = elem.getparent() 26 previous = elem.getprevious() 27 28 if remove_before and not remove_parent and previous: 29 # remove 'using' that is before <a and after </span> 30 bytes_text = etree.tostring( 31 previous, pretty_print=True, method="html" 32 ) 33 only_what_is_in_tags = bytes_text[: bytes_text.rfind(b">") + 1] 34 data_formatted = html.fromstring(only_what_is_in_tags) 35 parent.replace(previous, data_formatted) 36 if len(parent.getparent()) and remove_parent: 37 # anchor <a href odoo has a parent powered by that must be removed 38 parent.getparent().remove(parent) 39 else: 40 if parent.tag == "td": # also here can be powerd by 41 parent.getparent().remove(parent) 42 else: 43 parent.remove(elem) 44 value = etree.tostring(tree, pretty_print=True, method="html") 45 # etree can return bytes; ensure we get a proper string 46 if type(value) is bytes: 47 value = value.decode() 48 return re.sub("[^(<)(</)]odoo", "", value, flags=re.IGNORECASE) 49 50 @api.model 51 def _render_template( 52 self, 53 template_src, 54 model, 55 res_ids, 56 engine="jinja", 57 add_context=None, 58 post_process=False, 59 ): 60 """replace anything that is with odoo in templates 61 if is a <a that contains odoo will delete it completly 62 original: 63 Render the given string on records designed by model / res_ids using 64 the given rendering engine. Currently only jinja is supported. 65 66 :param str template_src: template text to render (jinja) or (qweb) 67 this could be cleaned but hey, we are in a rush 68 :param str model: model name of records on which we want to perform rendering 69 :param list res_ids: list of ids of records (all belonging to same model) 70 :param string engine: jinja 71 :param post_process: perform rendered str / html post processing (see 72 ``_render_template_postprocess``) 73 74 :return dict: {res_id: string of rendered template based on record}""" 75 orginal_rendered = super()._render_template( 76 template_src, 77 model, 78 res_ids, 79 engine="jinja", 80 add_context=None, 81 post_process=False, 82 ) 83 84 for key in res_ids: 85 orginal_rendered[key] = self.remove_href_odoo(orginal_rendered[key]) 86 87 return orginal_rendered 88 89 90 class MailMail(models.AbstractModel): 91 _inherit = "mail.mail" 92 93 # in messages from objects is adding using Odoo that we are going to remove 94 95 @api.model_create_multi 96 def create(self, values_list): 97 for index, _value in enumerate(values_list): 98 values_list[index]["body_html"] = self.env[ 99 "mail.render.mixin" 100 ].remove_href_odoo( 101 values_list[index]["body_html"], remove_parent=0, remove_before=1 102 ) 103 104 return super().create(values_list) 105 [end of mail_debrand/models/mail_render_mixinANDmail_mail.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mail_debrand/models/mail_render_mixinANDmail_mail.py b/mail_debrand/models/mail_render_mixinANDmail_mail.py --- a/mail_debrand/models/mail_render_mixinANDmail_mail.py +++ b/mail_debrand/models/mail_render_mixinANDmail_mail.py @@ -15,11 +15,14 @@ def remove_href_odoo(self, value, remove_parent=True, remove_before=False): if len(value) < 20: return value + # value can be bytes type; ensure we get a proper string + if type(value) is bytes: + value = value.decode() has_odoo_link = re.search(r"<a\s(.*)odoo\.com", value, flags=re.IGNORECASE) if has_odoo_link: tree = etree.HTML( value - ) # html with brlken links tree = etree.fromstring(value) just xml + ) # html with broken links tree = etree.fromstring(value) just xml odoo_achors = tree.xpath('//a[contains(@href,"odoo.com")]') for elem in odoo_achors: parent = elem.getparent() @@ -33,7 +36,7 @@ only_what_is_in_tags = bytes_text[: bytes_text.rfind(b">") + 1] data_formatted = html.fromstring(only_what_is_in_tags) parent.replace(previous, data_formatted) - if len(parent.getparent()) and remove_parent: + if remove_parent and len(parent.getparent()): # anchor <a href odoo has a parent powered by that must be removed parent.getparent().remove(parent) else:
{"golden_diff": "diff --git a/mail_debrand/models/mail_render_mixinANDmail_mail.py b/mail_debrand/models/mail_render_mixinANDmail_mail.py\n--- a/mail_debrand/models/mail_render_mixinANDmail_mail.py\n+++ b/mail_debrand/models/mail_render_mixinANDmail_mail.py\n@@ -15,11 +15,14 @@\n def remove_href_odoo(self, value, remove_parent=True, remove_before=False):\n if len(value) < 20:\n return value\n+ # value can be bytes type; ensure we get a proper string\n+ if type(value) is bytes:\n+ value = value.decode()\n has_odoo_link = re.search(r\"<a\\s(.*)odoo\\.com\", value, flags=re.IGNORECASE)\n if has_odoo_link:\n tree = etree.HTML(\n value\n- ) # html with brlken links tree = etree.fromstring(value) just xml\n+ ) # html with broken links tree = etree.fromstring(value) just xml\n odoo_achors = tree.xpath('//a[contains(@href,\"odoo.com\")]')\n for elem in odoo_achors:\n parent = elem.getparent()\n@@ -33,7 +36,7 @@\n only_what_is_in_tags = bytes_text[: bytes_text.rfind(b\">\") + 1]\n data_formatted = html.fromstring(only_what_is_in_tags)\n parent.replace(previous, data_formatted)\n- if len(parent.getparent()) and remove_parent:\n+ if remove_parent and len(parent.getparent()):\n # anchor <a href odoo has a parent powered by that must be removed\n parent.getparent().remove(parent)\n else:\n", "issue": "[14.0] mail_debrand: TypeError: cannot use a string pattern on a bytes-like object\nThis error occurs in the app email marketing(mass_mailing) by clicking on \"Test\" in the Mailings tab\r\n\r\n File \"/opt/odoo/odoo/addons/base/models/ir_http.py\", line 237, in _dispatch\r\n result = request.dispatch()\r\n File \"/opt/odoo/odoo/http.py\", line 683, in dispatch\r\n result = self._call_function(**self.params)\r\n File \"/opt/odoo/odoo/http.py\", line 359, in _call_function\r\n return checked_call(self.db, *args, **kwargs)\r\n File \"/opt/odoo/odoo/service/model.py\", line 94, in wrapper\r\n return f(dbname, *args, **kwargs)\r\n File \"/opt/odoo/odoo/http.py\", line 347, in checked_call\r\n result = self.endpoint(*a, **kw)\r\n File \"/opt/odoo/odoo/http.py\", line 912, in __call__\r\n return self.method(*args, **kw)\r\n File \"/opt/odoo/odoo/http.py\", line 531, in response_wrap\r\n response = f(*args, **kw)\r\n File \"/opt/odoo/odoo/addons/web/controllers/main.py\", line 1368, in call_button\r\n action = self._call_kw(model, method, args, kwargs)\r\n File \"/opt/odoo/odoo/addons/web/controllers/main.py\", line 1356, in _call_kw\r\n return call_kw(request.env[model], method, args, kwargs)\r\n File \"/opt/odoo/odoo/api.py\", line 396, in call_kw\r\n result = _call_kw_multi(method, model, args, kwargs)\r\n File \"/opt/odoo/odoo/api.py\", line 383, in _call_kw_multi\r\n result = method(recs, *args, **kwargs)\r\n File \"/opt/odoo/odoo/addons/mass_mailing/wizard/mailing_mailing_test.py\", line 54, in send_mail_test\r\n mail = self.env['mail.mail'].sudo().create(mail_values)\r\n File \"<decorator-gen-210>\", line 2, in create\r\n File \"/opt/odoo/odoo/api.py\", line 344, in _model_create_multi\r\n return create(self, [arg])\r\n File \"/opt/odoo/odoo/addons/mass_mailing/models/mail_mail.py\", line 21, in create\r\n mails = super(MailMail, self).create(values_list)\r\n File \"<decorator-gen-155>\", line 2, in create\r\n File \"/opt/odoo/odoo/api.py\", line 345, in _model_create_multi\r\n return create(self, arg)\r\n File \"/opt/odoo/custom-addons/mail_debrand/models/mail_render_mixinANDmail_mail.py\", line 98, in create\r\n values_list[index][\"body_html\"] = self.env[\r\n File \"/opt/odoo/custom-addons/mail_debrand/models/mail_render_mixinANDmail_mail.py\", line 18, in remove_href_odoo\r\n has_odoo_link = re.search(r\"<a\\s(.*)odoo\\.com\", value, flags=re.IGNORECASE)\r\n File \"/usr/lib/python3.8/re.py\", line 201, in search\r\n return _compile(pattern, flags).search(string)\r\nException\r\n \r\nThe above exception was the direct cause of the following exception:\r\n \r\nTraceback (most recent call last):\r\n File \"/opt/odoo/odoo/http.py\", line 639, in _handle_exception\r\n return super(JsonRequest, self)._handle_exception(exception)\r\n File \"/opt/odoo/odoo/http.py\", line 315, in _handle_exception\r\n raise exception.with_traceback(None) from new_cause\r\nTypeError: cannot use a string pattern on a bytes-like object\n", "before_files": [{"content": "# Copyright 2019 O4SB - Graeme Gellatly\n# Copyright 2019 Tecnativa - Ernesto Tejeda\n# Copyright 2020 Onestein - Andrea Stirpe\n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).\nimport re\n\nfrom lxml import etree, html\n\nfrom odoo import api, models\n\n\nclass MailRenderMixin(models.AbstractModel):\n _inherit = \"mail.render.mixin\"\n\n def remove_href_odoo(self, value, remove_parent=True, remove_before=False):\n if len(value) < 20:\n return value\n has_odoo_link = re.search(r\"<a\\s(.*)odoo\\.com\", value, flags=re.IGNORECASE)\n if has_odoo_link:\n tree = etree.HTML(\n value\n ) # html with brlken links tree = etree.fromstring(value) just xml\n odoo_achors = tree.xpath('//a[contains(@href,\"odoo.com\")]')\n for elem in odoo_achors:\n parent = elem.getparent()\n previous = elem.getprevious()\n\n if remove_before and not remove_parent and previous:\n # remove 'using' that is before <a and after </span>\n bytes_text = etree.tostring(\n previous, pretty_print=True, method=\"html\"\n )\n only_what_is_in_tags = bytes_text[: bytes_text.rfind(b\">\") + 1]\n data_formatted = html.fromstring(only_what_is_in_tags)\n parent.replace(previous, data_formatted)\n if len(parent.getparent()) and remove_parent:\n # anchor <a href odoo has a parent powered by that must be removed\n parent.getparent().remove(parent)\n else:\n if parent.tag == \"td\": # also here can be powerd by\n parent.getparent().remove(parent)\n else:\n parent.remove(elem)\n value = etree.tostring(tree, pretty_print=True, method=\"html\")\n # etree can return bytes; ensure we get a proper string\n if type(value) is bytes:\n value = value.decode()\n return re.sub(\"[^(<)(</)]odoo\", \"\", value, flags=re.IGNORECASE)\n\n @api.model\n def _render_template(\n self,\n template_src,\n model,\n res_ids,\n engine=\"jinja\",\n add_context=None,\n post_process=False,\n ):\n \"\"\"replace anything that is with odoo in templates\n if is a <a that contains odoo will delete it completly\n original:\n Render the given string on records designed by model / res_ids using\n the given rendering engine. Currently only jinja is supported.\n\n :param str template_src: template text to render (jinja) or (qweb)\n this could be cleaned but hey, we are in a rush\n :param str model: model name of records on which we want to perform rendering\n :param list res_ids: list of ids of records (all belonging to same model)\n :param string engine: jinja\n :param post_process: perform rendered str / html post processing (see\n ``_render_template_postprocess``)\n\n :return dict: {res_id: string of rendered template based on record}\"\"\"\n orginal_rendered = super()._render_template(\n template_src,\n model,\n res_ids,\n engine=\"jinja\",\n add_context=None,\n post_process=False,\n )\n\n for key in res_ids:\n orginal_rendered[key] = self.remove_href_odoo(orginal_rendered[key])\n\n return orginal_rendered\n\n\nclass MailMail(models.AbstractModel):\n _inherit = \"mail.mail\"\n\n # in messages from objects is adding using Odoo that we are going to remove\n\n @api.model_create_multi\n def create(self, values_list):\n for index, _value in enumerate(values_list):\n values_list[index][\"body_html\"] = self.env[\n \"mail.render.mixin\"\n ].remove_href_odoo(\n values_list[index][\"body_html\"], remove_parent=0, remove_before=1\n )\n\n return super().create(values_list)\n", "path": "mail_debrand/models/mail_render_mixinANDmail_mail.py"}]}
2,545
368
gh_patches_debug_24957
rasdani/github-patches
git_diff
conan-io__conan-center-index-10297
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [package] dbus/1.12.20: cannot connect to system bus on linux # dbus/1.12.20 cannot connect to the system bus on linux The system bus socket in improperly configured (see log) when dbus/1.12.20 is built. This makes any connection attempt to the system bus impossible. ### Package and Environment Details * Package Name/Version: **dbus/1.12.20** * Operating System+version: **Linux Ubuntu 20.04** * Compiler+version: **GCC 9** * Docker image: **n/a** * Conan version: **conan 1.46.2** * Python version: **Python 3.8.10** ### Conan profile ``` Configuration for profile default: [settings] os=Linux os_build=Linux arch=x86_64 arch_build=x86_64 compiler=gcc compiler.version=9 compiler.libcxx=libstdc++11 build_type=Release [options] [conf] [build_requires] [env] ``` ### Steps to reproduce Minimal Example: conanfile.txt: ``` [requires] dbus/1.12.20 qt/5.15.2 [options] qt:shared=True qt:with_dbus=True [generators] CMakeDeps CMakeToolchain VirtualRunEnv ``` CMakeLists.txt: ```cmake cmake_minimum_required(VERSION 3.15) project(dbus-test) find_package(Qt5 REQUIRED COMPONENTS DBus) add_executable(dbus-test main.cpp) target_link_libraries(dbus-test Qt5::DBus) ``` main.cpp: ```cpp #include <iostream> #include <QCoreApplication> #include <QDBusConnection> #include <QDBusError> int main(int argc, char**argv) { QDBusConnection connection = QDBusConnection::systemBus(); std::cout << "Is connected? " << connection.isConnected() << "\nError: \"" << connection.lastError().message().toStdString() << "\"\n"; return 0; } ``` 1. run `mkdir build && cd build && conan install .. --build=missing` 2. run `cmake -DCMAKE_TOOLCHAIN_FILE=conan_toolchain.cmake ..` 3. run `make` 4. run `source conanrun.sh` 5. run `./dbus-test` Expected output: ``` Is connected? 1 Error: "" ``` Actual output: ``` Is connected? 0 Error: "Socket name too long " ``` **Note** that running `dbus-test` outside of the conan runenv will produce the expected behavior, as the system dbus library is used (as seen by `ldd dbus-test`). ### Logs (Include/Attach if Applicable) [log.txt](https://github.com/conan-io/conan-center-index/files/8470511/log.txt) </issue> <code> [start of recipes/dbus/1.x.x/conanfile.py] 1 from conans import AutoToolsBuildEnvironment, ConanFile, tools, CMake 2 import os 3 import shutil 4 import textwrap 5 6 required_conan_version = ">=1.43.0" 7 8 9 class DbusConan(ConanFile): 10 name = "dbus" 11 license = ("AFL-2.1", "GPL-2.0-or-later") 12 url = "https://github.com/conan-io/conan-center-index" 13 homepage = "https://www.freedesktop.org/wiki/Software/dbus" 14 description = "D-Bus is a simple system for interprocess communication and coordination." 15 topics = ("dbus") 16 17 settings = "os", "arch", "compiler", "build_type" 18 options = { 19 "with_x11": [True, False], 20 "with_glib": [True, False], 21 } 22 default_options = { 23 "with_x11": False, 24 "with_glib": False, 25 } 26 27 generators = "pkg_config", "cmake", "cmake_find_package" 28 _autotools = None 29 _cmake = None 30 31 @property 32 def _source_subfolder(self): 33 return "source_subfolder" 34 35 @property 36 def _build_subfolder(self): 37 return "build_subfolder" 38 39 def config_options(self): 40 if self.settings.os not in ("Linux", "FreeBSD"): 41 del self.options.with_x11 42 43 def configure(self): 44 del self.settings.compiler.libcxx 45 del self.settings.compiler.cppstd 46 47 def requirements(self): 48 self.requires("expat/2.4.6") 49 if self.options.with_glib: 50 self.requires("glib/2.70.1") 51 if self.options.get_safe("with_x11"): 52 self.requires("xorg/system") 53 54 def source(self): 55 tools.get(**self.conan_data["sources"][self.version], strip_root=True, destination=self._source_subfolder) 56 57 def _configure_autotools(self): 58 if not self._autotools: 59 self._autotools = AutoToolsBuildEnvironment(self) 60 61 args = [] 62 args.append("--disable-tests") 63 args.append("--disable-doxygen-docs") 64 args.append("--disable-xml-docs") 65 66 args.append("--with-x=%s" % ("yes" if self.options.get_safe("with_x11", False) else "no")) 67 args.append("--%s-x11-autolaunch" % ("enable" if self.options.get_safe("with_x11", False) else "disable")) 68 args.append("--disable-asserts") 69 args.append("--disable-checks") 70 71 args.append("--with-systemdsystemunitdir=%s" % os.path.join(self.package_folder, "lib", "systemd", "system")) 72 args.append("--with-systemduserunitdir=%s" % os.path.join(self.package_folder, "lib", "systemd", "user")) 73 74 args.append("--disable-launchd") 75 args.append("--disable-systemd") 76 77 self._autotools.configure(args=args, configure_dir=self._source_subfolder) 78 return self._autotools 79 80 def _configure_cmake(self): 81 if not self._cmake: 82 self._cmake = CMake(self) 83 84 self._cmake.definitions["DBUS_BUILD_TESTS"] = False 85 self._cmake.definitions["DBUS_ENABLE_DOXYGEN_DOCS"] = False 86 self._cmake.definitions["DBUS_ENABLE_XML_DOCS"] = False 87 88 self._cmake.definitions["DBUS_BUILD_X11"] = self.options.get_safe("with_x11", False) 89 self._cmake.definitions["DBUS_WITH_GLIB"] = self.options.with_glib 90 self._cmake.definitions["DBUS_DISABLE_ASSERT"] = False 91 self._cmake.definitions["DBUS_DISABLE_CHECKS"] = False 92 93 path_to_cmake_lists = os.path.join(self._source_subfolder, "cmake") 94 95 self._cmake.configure(source_folder=path_to_cmake_lists, 96 build_folder=self._build_subfolder) 97 return self._cmake 98 99 def build(self): 100 tools.replace_in_file(os.path.join(self._source_subfolder, "cmake", "CMakeLists.txt"), 101 "project(dbus)", 102 "project(dbus)\ninclude(../../conanbuildinfo.cmake)\nconan_basic_setup()") 103 if self.settings.os == "Windows": 104 cmake = self._configure_cmake() 105 cmake.build() 106 else: 107 autotools = self._configure_autotools() 108 autotools.make() 109 110 def package(self): 111 self.copy(pattern="COPYING", dst="licenses", 112 src=self._source_subfolder) 113 if self.settings.os == "Windows": 114 cmake = self._configure_cmake() 115 cmake.install() 116 else: 117 autotools = self._configure_autotools() 118 autotools.install() 119 120 tools.rmdir(os.path.join(self.package_folder, "share", "doc")) 121 for i in ["var", "share", "etc"]: 122 shutil.move(os.path.join(self.package_folder, i), os.path.join(self.package_folder, "res", i)) 123 124 tools.rmdir(os.path.join(self.package_folder, "lib", "cmake")) 125 tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig")) 126 tools.rmdir(os.path.join(self.package_folder, "lib", "systemd")) 127 tools.remove_files_by_mask(self.package_folder, "*.la") 128 129 # TODO: to remove in conan v2 once cmake_find_package_* generators removed 130 self._create_cmake_module_alias_targets( 131 os.path.join(self.package_folder, self._module_file_rel_path), 132 {"dbus-1": "dbus-1::dbus-1"} 133 ) 134 135 @staticmethod 136 def _create_cmake_module_alias_targets(module_file, targets): 137 content = "" 138 for alias, aliased in targets.items(): 139 content += textwrap.dedent("""\ 140 if(TARGET {aliased} AND NOT TARGET {alias}) 141 add_library({alias} INTERFACE IMPORTED) 142 set_property(TARGET {alias} PROPERTY INTERFACE_LINK_LIBRARIES {aliased}) 143 endif() 144 """.format(alias=alias, aliased=aliased)) 145 tools.save(module_file, content) 146 147 @property 148 def _module_file_rel_path(self): 149 return os.path.join("lib", "cmake", "conan-official-{}-targets.cmake".format(self.name)) 150 151 def package_info(self): 152 self.cpp_info.set_property("cmake_file_name", "DBus1") 153 self.cpp_info.set_property("cmake_target_name", "dbus-1") 154 self.cpp_info.set_property("pkg_config_name", "dbus-1") 155 self.cpp_info.includedirs.extend([ 156 os.path.join("include", "dbus-1.0"), 157 os.path.join("lib", "dbus-1.0", "include"), 158 ]) 159 self.cpp_info.libs = ["dbus-1"] 160 161 # TODO: to remove in conan v2 once cmake_find_package_* & pkg_config generators removed 162 self.cpp_info.filenames["cmake_find_package"] = "DBus1" 163 self.cpp_info.filenames["cmake_find_package_multi"] = "DBus1" 164 self.cpp_info.names["cmake_find_package"] = "dbus-1" 165 self.cpp_info.names["cmake_find_package_multi"] = "dbus-1" 166 self.cpp_info.build_modules["cmake_find_package"] = [self._module_file_rel_path] 167 self.cpp_info.build_modules["cmake_find_package_multi"] = [self._module_file_rel_path] 168 self.cpp_info.names["pkg_config"] = "dbus-1" 169 [end of recipes/dbus/1.x.x/conanfile.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/recipes/dbus/1.x.x/conanfile.py b/recipes/dbus/1.x.x/conanfile.py --- a/recipes/dbus/1.x.x/conanfile.py +++ b/recipes/dbus/1.x.x/conanfile.py @@ -16,10 +16,14 @@ settings = "os", "arch", "compiler", "build_type" options = { + "system_socket": "ANY", + "system_pid_file": "ANY", "with_x11": [True, False], "with_glib": [True, False], } default_options = { + "system_socket": "", + "system_pid_file": "", "with_x11": False, "with_glib": False, } @@ -71,6 +75,11 @@ args.append("--with-systemdsystemunitdir=%s" % os.path.join(self.package_folder, "lib", "systemd", "system")) args.append("--with-systemduserunitdir=%s" % os.path.join(self.package_folder, "lib", "systemd", "user")) + if str(self.options.system_socket) is not "": + args.append("--with-system-socket=%s" % self.options.system_socket) + if str(self.options.system_pid_file) is not "": + args.append("--with-system-pid-file=%s" % self.options.system_pid_file) + args.append("--disable-launchd") args.append("--disable-systemd")
{"golden_diff": "diff --git a/recipes/dbus/1.x.x/conanfile.py b/recipes/dbus/1.x.x/conanfile.py\n--- a/recipes/dbus/1.x.x/conanfile.py\n+++ b/recipes/dbus/1.x.x/conanfile.py\n@@ -16,10 +16,14 @@\n \n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n+ \"system_socket\": \"ANY\",\n+ \"system_pid_file\": \"ANY\",\n \"with_x11\": [True, False],\n \"with_glib\": [True, False],\n }\n default_options = {\n+ \"system_socket\": \"\",\n+ \"system_pid_file\": \"\",\n \"with_x11\": False,\n \"with_glib\": False,\n }\n@@ -71,6 +75,11 @@\n args.append(\"--with-systemdsystemunitdir=%s\" % os.path.join(self.package_folder, \"lib\", \"systemd\", \"system\"))\n args.append(\"--with-systemduserunitdir=%s\" % os.path.join(self.package_folder, \"lib\", \"systemd\", \"user\"))\n \n+ if str(self.options.system_socket) is not \"\":\n+ args.append(\"--with-system-socket=%s\" % self.options.system_socket)\n+ if str(self.options.system_pid_file) is not \"\":\n+ args.append(\"--with-system-pid-file=%s\" % self.options.system_pid_file)\n+\n args.append(\"--disable-launchd\")\n args.append(\"--disable-systemd\")\n", "issue": "[package] dbus/1.12.20: cannot connect to system bus on linux\n# dbus/1.12.20 cannot connect to the system bus on linux\r\nThe system bus socket in improperly configured (see log) when dbus/1.12.20 is built. This makes any connection attempt to the system bus impossible.\r\n\r\n### Package and Environment Details\r\n * Package Name/Version: **dbus/1.12.20**\r\n * Operating System+version: **Linux Ubuntu 20.04**\r\n * Compiler+version: **GCC 9**\r\n * Docker image: **n/a**\r\n * Conan version: **conan 1.46.2**\r\n * Python version: **Python 3.8.10**\r\n\r\n\r\n### Conan profile\r\n```\r\nConfiguration for profile default:\r\n\r\n[settings]\r\nos=Linux\r\nos_build=Linux\r\narch=x86_64\r\narch_build=x86_64\r\ncompiler=gcc\r\ncompiler.version=9\r\ncompiler.libcxx=libstdc++11\r\nbuild_type=Release\r\n[options]\r\n[conf]\r\n[build_requires]\r\n[env]\r\n```\r\n\r\n\r\n### Steps to reproduce\r\nMinimal Example:\r\n\r\nconanfile.txt:\r\n```\r\n[requires]\r\ndbus/1.12.20\r\nqt/5.15.2\r\n\r\n[options]\r\nqt:shared=True\r\nqt:with_dbus=True\r\n\r\n[generators]\r\nCMakeDeps\r\nCMakeToolchain\r\nVirtualRunEnv\r\n```\r\n\r\nCMakeLists.txt:\r\n```cmake\r\ncmake_minimum_required(VERSION 3.15)\r\nproject(dbus-test)\r\n\r\nfind_package(Qt5 REQUIRED COMPONENTS DBus)\r\nadd_executable(dbus-test main.cpp)\r\ntarget_link_libraries(dbus-test Qt5::DBus)\r\n```\r\n\r\nmain.cpp:\r\n```cpp\r\n#include <iostream>\r\n\r\n#include <QCoreApplication>\r\n#include <QDBusConnection>\r\n#include <QDBusError>\r\n\r\nint main(int argc, char**argv) {\r\n QDBusConnection connection = QDBusConnection::systemBus();\r\n\r\n std::cout << \"Is connected? \" << connection.isConnected()\r\n << \"\\nError: \\\"\" << connection.lastError().message().toStdString() << \"\\\"\\n\";\r\n\r\n return 0;\r\n}\r\n```\r\n\r\n1. run `mkdir build && cd build && conan install .. --build=missing`\r\n2. run `cmake -DCMAKE_TOOLCHAIN_FILE=conan_toolchain.cmake ..`\r\n3. run `make`\r\n4. run `source conanrun.sh`\r\n5. run `./dbus-test`\r\n\r\nExpected output:\r\n```\r\nIs connected? 1\r\nError: \"\"\r\n```\r\nActual output:\r\n```\r\nIs connected? 0\r\nError: \"Socket name too long\r\n\"\r\n```\r\n\r\n**Note** that running `dbus-test` outside of the conan runenv will produce the expected behavior, as the system dbus library is used (as seen by `ldd dbus-test`).\r\n\r\n### Logs (Include/Attach if Applicable)\r\n[log.txt](https://github.com/conan-io/conan-center-index/files/8470511/log.txt)\r\n\r\n\r\n\n", "before_files": [{"content": "from conans import AutoToolsBuildEnvironment, ConanFile, tools, CMake\nimport os\nimport shutil\nimport textwrap\n\nrequired_conan_version = \">=1.43.0\"\n\n\nclass DbusConan(ConanFile):\n name = \"dbus\"\n license = (\"AFL-2.1\", \"GPL-2.0-or-later\")\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://www.freedesktop.org/wiki/Software/dbus\"\n description = \"D-Bus is a simple system for interprocess communication and coordination.\"\n topics = (\"dbus\")\n\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\n \"with_x11\": [True, False],\n \"with_glib\": [True, False],\n }\n default_options = {\n \"with_x11\": False,\n \"with_glib\": False,\n }\n\n generators = \"pkg_config\", \"cmake\", \"cmake_find_package\"\n _autotools = None\n _cmake = None\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n @property\n def _build_subfolder(self):\n return \"build_subfolder\"\n\n def config_options(self):\n if self.settings.os not in (\"Linux\", \"FreeBSD\"):\n del self.options.with_x11\n\n def configure(self):\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n\n def requirements(self):\n self.requires(\"expat/2.4.6\")\n if self.options.with_glib:\n self.requires(\"glib/2.70.1\")\n if self.options.get_safe(\"with_x11\"):\n self.requires(\"xorg/system\")\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version], strip_root=True, destination=self._source_subfolder)\n\n def _configure_autotools(self):\n if not self._autotools:\n self._autotools = AutoToolsBuildEnvironment(self)\n\n args = []\n args.append(\"--disable-tests\")\n args.append(\"--disable-doxygen-docs\")\n args.append(\"--disable-xml-docs\")\n\n args.append(\"--with-x=%s\" % (\"yes\" if self.options.get_safe(\"with_x11\", False) else \"no\"))\n args.append(\"--%s-x11-autolaunch\" % (\"enable\" if self.options.get_safe(\"with_x11\", False) else \"disable\"))\n args.append(\"--disable-asserts\")\n args.append(\"--disable-checks\")\n\n args.append(\"--with-systemdsystemunitdir=%s\" % os.path.join(self.package_folder, \"lib\", \"systemd\", \"system\"))\n args.append(\"--with-systemduserunitdir=%s\" % os.path.join(self.package_folder, \"lib\", \"systemd\", \"user\"))\n\n args.append(\"--disable-launchd\")\n args.append(\"--disable-systemd\")\n\n self._autotools.configure(args=args, configure_dir=self._source_subfolder)\n return self._autotools\n\n def _configure_cmake(self):\n if not self._cmake:\n self._cmake = CMake(self)\n\n self._cmake.definitions[\"DBUS_BUILD_TESTS\"] = False\n self._cmake.definitions[\"DBUS_ENABLE_DOXYGEN_DOCS\"] = False\n self._cmake.definitions[\"DBUS_ENABLE_XML_DOCS\"] = False\n\n self._cmake.definitions[\"DBUS_BUILD_X11\"] = self.options.get_safe(\"with_x11\", False)\n self._cmake.definitions[\"DBUS_WITH_GLIB\"] = self.options.with_glib\n self._cmake.definitions[\"DBUS_DISABLE_ASSERT\"] = False\n self._cmake.definitions[\"DBUS_DISABLE_CHECKS\"] = False\n\n path_to_cmake_lists = os.path.join(self._source_subfolder, \"cmake\")\n\n self._cmake.configure(source_folder=path_to_cmake_lists,\n build_folder=self._build_subfolder)\n return self._cmake\n\n def build(self):\n tools.replace_in_file(os.path.join(self._source_subfolder, \"cmake\", \"CMakeLists.txt\"),\n \"project(dbus)\",\n \"project(dbus)\\ninclude(../../conanbuildinfo.cmake)\\nconan_basic_setup()\")\n if self.settings.os == \"Windows\":\n cmake = self._configure_cmake()\n cmake.build()\n else:\n autotools = self._configure_autotools()\n autotools.make()\n\n def package(self):\n self.copy(pattern=\"COPYING\", dst=\"licenses\",\n src=self._source_subfolder)\n if self.settings.os == \"Windows\":\n cmake = self._configure_cmake()\n cmake.install()\n else:\n autotools = self._configure_autotools()\n autotools.install()\n\n tools.rmdir(os.path.join(self.package_folder, \"share\", \"doc\"))\n for i in [\"var\", \"share\", \"etc\"]:\n shutil.move(os.path.join(self.package_folder, i), os.path.join(self.package_folder, \"res\", i))\n\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"cmake\"))\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"pkgconfig\"))\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"systemd\"))\n tools.remove_files_by_mask(self.package_folder, \"*.la\")\n\n # TODO: to remove in conan v2 once cmake_find_package_* generators removed\n self._create_cmake_module_alias_targets(\n os.path.join(self.package_folder, self._module_file_rel_path),\n {\"dbus-1\": \"dbus-1::dbus-1\"}\n )\n\n @staticmethod\n def _create_cmake_module_alias_targets(module_file, targets):\n content = \"\"\n for alias, aliased in targets.items():\n content += textwrap.dedent(\"\"\"\\\n if(TARGET {aliased} AND NOT TARGET {alias})\n add_library({alias} INTERFACE IMPORTED)\n set_property(TARGET {alias} PROPERTY INTERFACE_LINK_LIBRARIES {aliased})\n endif()\n \"\"\".format(alias=alias, aliased=aliased))\n tools.save(module_file, content)\n\n @property\n def _module_file_rel_path(self):\n return os.path.join(\"lib\", \"cmake\", \"conan-official-{}-targets.cmake\".format(self.name))\n\n def package_info(self):\n self.cpp_info.set_property(\"cmake_file_name\", \"DBus1\")\n self.cpp_info.set_property(\"cmake_target_name\", \"dbus-1\")\n self.cpp_info.set_property(\"pkg_config_name\", \"dbus-1\")\n self.cpp_info.includedirs.extend([\n os.path.join(\"include\", \"dbus-1.0\"),\n os.path.join(\"lib\", \"dbus-1.0\", \"include\"),\n ])\n self.cpp_info.libs = [\"dbus-1\"]\n\n # TODO: to remove in conan v2 once cmake_find_package_* & pkg_config generators removed\n self.cpp_info.filenames[\"cmake_find_package\"] = \"DBus1\"\n self.cpp_info.filenames[\"cmake_find_package_multi\"] = \"DBus1\"\n self.cpp_info.names[\"cmake_find_package\"] = \"dbus-1\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"dbus-1\"\n self.cpp_info.build_modules[\"cmake_find_package\"] = [self._module_file_rel_path]\n self.cpp_info.build_modules[\"cmake_find_package_multi\"] = [self._module_file_rel_path]\n self.cpp_info.names[\"pkg_config\"] = \"dbus-1\"\n", "path": "recipes/dbus/1.x.x/conanfile.py"}]}
3,263
334
gh_patches_debug_3919
rasdani/github-patches
git_diff
gammapy__gammapy-3381
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> FoVBackgroundMaker parameter error not accessible After the running the `FoVBackgroundMaker`, the error on the fitted parameters is not accessible. **To reproduce** Try `print(dataset.background_model.spectral_model.norm.error)` inside the `Data Reduction Loop` in `analysis_2` notebook. It is always `0` **Reason** It seems that the error is available at this stage, https://github.com/gammapy/gammapy/blob/df3db33e34233369512d57cca5341a7b645802d6/gammapy/makers/background/fov.py#L180 but the info is lost outside the `with models.restore_status` block, eg: in line 187. I did not understand why, though. </issue> <code> [start of gammapy/makers/background/fov.py] 1 # Licensed under a 3-clause BSD style license - see LICENSE.rst 2 """FoV background estimation.""" 3 import logging 4 import numpy as np 5 from gammapy.maps import Map 6 from gammapy.modeling import Fit 7 from gammapy.modeling.models import FoVBackgroundModel, Model 8 from ..core import Maker 9 10 11 __all__ = ["FoVBackgroundMaker"] 12 13 log = logging.getLogger(__name__) 14 15 16 class FoVBackgroundMaker(Maker): 17 """Normalize template background on the whole field-of-view. 18 19 The dataset background model can be simply scaled (method="scale") or fitted (method="fit") 20 on the dataset counts. 21 22 The normalization is performed outside the exclusion mask that is passed on init. 23 24 If a SkyModel is set on the input dataset and method is 'fit', it' parameters 25 are frozen during the fov normalization fit. 26 27 If the requirement (greater than) of either min_counts or min_npred_background is not satisfied, 28 the background will not be normalised 29 30 Parameters 31 ---------- 32 method : str in ['fit', 'scale'] 33 the normalization method to be applied. Default 'scale'. 34 exclusion_mask : `~gammapy.maps.WcsNDMap` 35 Exclusion mask 36 spectral_model : SpectralModel or str 37 Reference norm spectral model to use for the `FoVBackgroundModel`, if none is defined 38 on the dataset. By default, use pl-norm. 39 min_counts : int 40 Minimum number of counts required outside the exclusion region 41 min_npred_background : float 42 Minimum number of predicted background counts required outside the exclusion region 43 """ 44 45 tag = "FoVBackgroundMaker" 46 available_methods = ["fit", "scale"] 47 48 def __init__( 49 self, 50 method="scale", 51 exclusion_mask=None, 52 spectral_model="pl-norm", 53 min_counts=0, 54 min_npred_background=0, 55 ): 56 self.method = method 57 self.exclusion_mask = exclusion_mask 58 self.min_counts = min_counts 59 self.min_npred_background = min_npred_background 60 61 if isinstance(spectral_model, str): 62 spectral_model = Model.create(tag=spectral_model, model_type="spectral") 63 64 if not spectral_model.is_norm_spectral_model: 65 raise ValueError("Spectral model must be a norm spectral model") 66 67 self.default_spectral_model = spectral_model 68 69 @property 70 def method(self): 71 """Method""" 72 return self._method 73 74 @method.setter 75 def method(self, value): 76 """Method setter""" 77 if value not in self.available_methods: 78 raise ValueError( 79 f"Not a valid method for FoVBackgroundMaker: {value}." 80 f" Choose from {self.available_methods}" 81 ) 82 83 self._method = value 84 85 def make_default_fov_background_model(self, dataset): 86 """Add fov background model to the model definition 87 88 Parameters 89 ---------- 90 dataset : `~gammapy.datasets.MapDataset` 91 Input map dataset. 92 93 Returns 94 ------- 95 dataset : `~gammapy.datasets.MapDataset` 96 Map dataset including background model 97 98 """ 99 bkg_model = FoVBackgroundModel( 100 dataset_name=dataset.name, spectral_model=self.default_spectral_model.copy() 101 ) 102 103 if dataset.models is None: 104 dataset.models = bkg_model 105 else: 106 dataset.models = dataset.models + bkg_model 107 108 return dataset 109 110 def make_exclusion_mask(self, dataset): 111 """Project input exclusion mask to dataset geom 112 113 Parameters 114 ---------- 115 dataset : `~gammapy.datasets.MapDataset` 116 Input map dataset. 117 118 Returns 119 ------- 120 mask : `~gammapy.maps.WcsNDMap` 121 Projected exclusion mask 122 """ 123 geom = dataset._geom 124 if self.exclusion_mask: 125 mask = self.exclusion_mask.interp_to_geom(geom=geom) 126 else: 127 mask = Map.from_geom(geom=geom, data=1, dtype=bool) 128 return mask 129 130 def run(self, dataset, observation=None): 131 """Run FoV background maker. 132 133 Fit the background model norm 134 135 Parameters 136 ---------- 137 dataset : `~gammapy.datasets.MapDataset` 138 Input map dataset. 139 140 """ 141 mask_fit = dataset.mask_fit 142 143 dataset.mask_fit = self.make_exclusion_mask(dataset) 144 145 if dataset.background_model is None: 146 dataset = self.make_default_fov_background_model(dataset) 147 148 if self.method == "fit": 149 dataset = self.make_background_fit(dataset) 150 else: 151 # always scale the background first 152 dataset = self.make_background_scale(dataset) 153 154 dataset.mask_fit = mask_fit 155 return dataset 156 157 @staticmethod 158 def make_background_fit(dataset): 159 """Fit the FoV background model on the dataset counts data 160 161 Parameters 162 ---------- 163 dataset : `~gammapy.datasets.MapDataset` 164 Input dataset. 165 166 Returns 167 ------- 168 dataset : `~gammapy.datasets.MapDataset` 169 Map dataset with fitted background model 170 """ 171 # freeze all model components not related to background model 172 173 models = dataset.models 174 175 with models.restore_status(restore_values=False): 176 models.select(tag="sky-model").freeze() 177 178 fit = Fit([dataset]) 179 fit_result = fit.run() 180 if not fit_result.success: 181 log.warning( 182 f"FoVBackgroundMaker failed. Fit did not converge for {dataset.name}. " 183 f"Setting mask to False." 184 ) 185 dataset.mask_safe.data[...] = False 186 187 return dataset 188 189 def make_background_scale(self, dataset): 190 """Fit the FoV background model on the dataset counts data 191 192 Parameters 193 ---------- 194 dataset : `~gammapy.datasets.MapDataset` 195 Input dataset. 196 197 Returns 198 ------- 199 dataset : `~gammapy.datasets.MapDataset` 200 Map dataset with scaled background model 201 202 """ 203 mask = dataset.mask 204 count_tot = dataset.counts.data[mask].sum() 205 bkg_tot = dataset.npred_background().data[mask].sum() 206 207 if count_tot <= self.min_counts: 208 log.warning( 209 f"FoVBackgroundMaker failed. Only {int(count_tot)} counts outside exclusion mask for {dataset.name}. " 210 f"Setting mask to False." 211 ) 212 dataset.mask_safe.data[...] = False 213 elif bkg_tot <= self.min_npred_background: 214 log.warning( 215 f"FoVBackgroundMaker failed. Only {int(bkg_tot)} background counts outside exclusion mask for {dataset.name}. " 216 f"Setting mask to False." 217 ) 218 dataset.mask_safe.data[...] = False 219 else: 220 value = count_tot / bkg_tot 221 err = np.sqrt(count_tot) / bkg_tot 222 dataset.models[f"{dataset.name}-bkg"].spectral_model.norm.value = value 223 dataset.models[f"{dataset.name}-bkg"].spectral_model.norm.error = err 224 225 return dataset 226 [end of gammapy/makers/background/fov.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/gammapy/makers/background/fov.py b/gammapy/makers/background/fov.py --- a/gammapy/makers/background/fov.py +++ b/gammapy/makers/background/fov.py @@ -170,7 +170,7 @@ """ # freeze all model components not related to background model - models = dataset.models + models = dataset.models.select(tag="sky-model") with models.restore_status(restore_values=False): models.select(tag="sky-model").freeze()
{"golden_diff": "diff --git a/gammapy/makers/background/fov.py b/gammapy/makers/background/fov.py\n--- a/gammapy/makers/background/fov.py\n+++ b/gammapy/makers/background/fov.py\n@@ -170,7 +170,7 @@\n \"\"\"\n # freeze all model components not related to background model\n \n- models = dataset.models\n+ models = dataset.models.select(tag=\"sky-model\")\n \n with models.restore_status(restore_values=False):\n models.select(tag=\"sky-model\").freeze()\n", "issue": "FoVBackgroundMaker parameter error not accessible\nAfter the running the `FoVBackgroundMaker`, the error on the fitted parameters is not accessible.\r\n\r\n**To reproduce**\r\nTry `print(dataset.background_model.spectral_model.norm.error)` inside the `Data Reduction Loop` in `analysis_2` notebook. It is always `0`\r\n\r\n**Reason**\r\nIt seems that the error is available at this stage, \r\nhttps://github.com/gammapy/gammapy/blob/df3db33e34233369512d57cca5341a7b645802d6/gammapy/makers/background/fov.py#L180\r\n\r\nbut the info is lost outside the `with models.restore_status` block, eg: in line 187.\r\nI did not understand why, though.\n", "before_files": [{"content": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"FoV background estimation.\"\"\"\nimport logging\nimport numpy as np\nfrom gammapy.maps import Map\nfrom gammapy.modeling import Fit\nfrom gammapy.modeling.models import FoVBackgroundModel, Model\nfrom ..core import Maker\n\n\n__all__ = [\"FoVBackgroundMaker\"]\n\nlog = logging.getLogger(__name__)\n\n\nclass FoVBackgroundMaker(Maker):\n \"\"\"Normalize template background on the whole field-of-view.\n\n The dataset background model can be simply scaled (method=\"scale\") or fitted (method=\"fit\")\n on the dataset counts.\n\n The normalization is performed outside the exclusion mask that is passed on init.\n\n If a SkyModel is set on the input dataset and method is 'fit', it' parameters\n are frozen during the fov normalization fit.\n\n If the requirement (greater than) of either min_counts or min_npred_background is not satisfied,\n the background will not be normalised\n\n Parameters\n ----------\n method : str in ['fit', 'scale']\n the normalization method to be applied. Default 'scale'.\n exclusion_mask : `~gammapy.maps.WcsNDMap`\n Exclusion mask\n spectral_model : SpectralModel or str\n Reference norm spectral model to use for the `FoVBackgroundModel`, if none is defined\n on the dataset. By default, use pl-norm.\n min_counts : int\n Minimum number of counts required outside the exclusion region\n min_npred_background : float\n Minimum number of predicted background counts required outside the exclusion region\n \"\"\"\n\n tag = \"FoVBackgroundMaker\"\n available_methods = [\"fit\", \"scale\"]\n\n def __init__(\n self,\n method=\"scale\",\n exclusion_mask=None,\n spectral_model=\"pl-norm\",\n min_counts=0,\n min_npred_background=0,\n ):\n self.method = method\n self.exclusion_mask = exclusion_mask\n self.min_counts = min_counts\n self.min_npred_background = min_npred_background\n\n if isinstance(spectral_model, str):\n spectral_model = Model.create(tag=spectral_model, model_type=\"spectral\")\n\n if not spectral_model.is_norm_spectral_model:\n raise ValueError(\"Spectral model must be a norm spectral model\")\n\n self.default_spectral_model = spectral_model\n\n @property\n def method(self):\n \"\"\"Method\"\"\"\n return self._method\n\n @method.setter\n def method(self, value):\n \"\"\"Method setter\"\"\"\n if value not in self.available_methods:\n raise ValueError(\n f\"Not a valid method for FoVBackgroundMaker: {value}.\"\n f\" Choose from {self.available_methods}\"\n )\n\n self._method = value\n\n def make_default_fov_background_model(self, dataset):\n \"\"\"Add fov background model to the model definition\n\n Parameters\n ----------\n dataset : `~gammapy.datasets.MapDataset`\n Input map dataset.\n\n Returns\n -------\n dataset : `~gammapy.datasets.MapDataset`\n Map dataset including background model\n\n \"\"\"\n bkg_model = FoVBackgroundModel(\n dataset_name=dataset.name, spectral_model=self.default_spectral_model.copy()\n )\n\n if dataset.models is None:\n dataset.models = bkg_model\n else:\n dataset.models = dataset.models + bkg_model\n\n return dataset\n\n def make_exclusion_mask(self, dataset):\n \"\"\"Project input exclusion mask to dataset geom\n\n Parameters\n ----------\n dataset : `~gammapy.datasets.MapDataset`\n Input map dataset.\n\n Returns\n -------\n mask : `~gammapy.maps.WcsNDMap`\n Projected exclusion mask\n \"\"\"\n geom = dataset._geom\n if self.exclusion_mask:\n mask = self.exclusion_mask.interp_to_geom(geom=geom)\n else:\n mask = Map.from_geom(geom=geom, data=1, dtype=bool)\n return mask\n\n def run(self, dataset, observation=None):\n \"\"\"Run FoV background maker.\n\n Fit the background model norm\n\n Parameters\n ----------\n dataset : `~gammapy.datasets.MapDataset`\n Input map dataset.\n\n \"\"\"\n mask_fit = dataset.mask_fit\n\n dataset.mask_fit = self.make_exclusion_mask(dataset)\n\n if dataset.background_model is None:\n dataset = self.make_default_fov_background_model(dataset)\n\n if self.method == \"fit\":\n dataset = self.make_background_fit(dataset)\n else:\n # always scale the background first\n dataset = self.make_background_scale(dataset)\n\n dataset.mask_fit = mask_fit\n return dataset\n\n @staticmethod\n def make_background_fit(dataset):\n \"\"\"Fit the FoV background model on the dataset counts data\n\n Parameters\n ----------\n dataset : `~gammapy.datasets.MapDataset`\n Input dataset.\n\n Returns\n -------\n dataset : `~gammapy.datasets.MapDataset`\n Map dataset with fitted background model\n \"\"\"\n # freeze all model components not related to background model\n\n models = dataset.models\n\n with models.restore_status(restore_values=False):\n models.select(tag=\"sky-model\").freeze()\n\n fit = Fit([dataset])\n fit_result = fit.run()\n if not fit_result.success:\n log.warning(\n f\"FoVBackgroundMaker failed. Fit did not converge for {dataset.name}. \"\n f\"Setting mask to False.\"\n )\n dataset.mask_safe.data[...] = False\n\n return dataset\n\n def make_background_scale(self, dataset):\n \"\"\"Fit the FoV background model on the dataset counts data\n\n Parameters\n ----------\n dataset : `~gammapy.datasets.MapDataset`\n Input dataset.\n\n Returns\n -------\n dataset : `~gammapy.datasets.MapDataset`\n Map dataset with scaled background model\n\n \"\"\"\n mask = dataset.mask\n count_tot = dataset.counts.data[mask].sum()\n bkg_tot = dataset.npred_background().data[mask].sum()\n\n if count_tot <= self.min_counts:\n log.warning(\n f\"FoVBackgroundMaker failed. Only {int(count_tot)} counts outside exclusion mask for {dataset.name}. \"\n f\"Setting mask to False.\"\n )\n dataset.mask_safe.data[...] = False\n elif bkg_tot <= self.min_npred_background:\n log.warning(\n f\"FoVBackgroundMaker failed. Only {int(bkg_tot)} background counts outside exclusion mask for {dataset.name}. \"\n f\"Setting mask to False.\"\n )\n dataset.mask_safe.data[...] = False\n else:\n value = count_tot / bkg_tot\n err = np.sqrt(count_tot) / bkg_tot\n dataset.models[f\"{dataset.name}-bkg\"].spectral_model.norm.value = value\n dataset.models[f\"{dataset.name}-bkg\"].spectral_model.norm.error = err\n\n return dataset\n", "path": "gammapy/makers/background/fov.py"}]}
2,787
120
gh_patches_debug_60955
rasdani/github-patches
git_diff
voicepaw__so-vits-svc-fork-1155
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Ensure proper sorting by name for output files ### Is your feature request related to a problem? Please describe. To enhance readability and ensure proper sorting by name, the numeric part of output file names should have a fixed width. This can be achieved by adding leading zeros to the numeric part, with four digits likely being sufficient. ### Describe alternatives you've considered I don't have any. ### Additional context _No response_ ### Code of Conduct - [X] I agree to follow this project's Code of Conduct ### Are you willing to resolve this issue by submitting a Pull Request? - [X] Yes, I have the time, and I know how to start. - [ ] Yes, I have the time, but I don't know how to start. I would need guidance. - [ ] No, I don't have the time, although I believe I could do it if I had the time... - [ ] No, I don't have the time and I wouldn't even know how to start. </issue> <code> [start of src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py] 1 from __future__ import annotations 2 3 from collections import defaultdict 4 from logging import getLogger 5 from pathlib import Path 6 7 import librosa 8 import soundfile as sf 9 import torch 10 from joblib import Parallel, delayed 11 from pyannote.audio import Pipeline 12 from tqdm import tqdm 13 from tqdm_joblib import tqdm_joblib 14 15 LOG = getLogger(__name__) 16 17 18 def _process_one( 19 input_path: Path, 20 output_dir: Path, 21 sr: int, 22 *, 23 min_speakers: int = 1, 24 max_speakers: int = 1, 25 huggingface_token: str | None = None, 26 ) -> None: 27 try: 28 audio, sr = librosa.load(input_path, sr=sr, mono=True) 29 except Exception as e: 30 LOG.warning(f"Failed to read {input_path}: {e}") 31 return 32 pipeline = Pipeline.from_pretrained( 33 "pyannote/speaker-diarization", use_auth_token=huggingface_token 34 ) 35 if pipeline is None: 36 raise ValueError("Failed to load pipeline") 37 pipeline = pipeline.to(torch.device("cuda")) 38 LOG.info(f"Processing {input_path}. This may take a while...") 39 diarization = pipeline( 40 input_path, min_speakers=min_speakers, max_speakers=max_speakers 41 ) 42 43 LOG.info(f"Found {len(diarization)} tracks, writing to {output_dir}") 44 speaker_count = defaultdict(int) 45 46 output_dir.mkdir(parents=True, exist_ok=True) 47 for segment, track, speaker in tqdm( 48 list(diarization.itertracks(yield_label=True)), desc=f"Writing {input_path}" 49 ): 50 if segment.end - segment.start < 1: 51 continue 52 speaker_count[speaker] += 1 53 audio_cut = audio[int(segment.start * sr) : int(segment.end * sr)] 54 sf.write( 55 (output_dir / f"{speaker}_{speaker_count[speaker]}.wav"), 56 audio_cut, 57 sr, 58 ) 59 60 LOG.info(f"Speaker count: {speaker_count}") 61 62 63 def preprocess_speaker_diarization( 64 input_dir: Path | str, 65 output_dir: Path | str, 66 sr: int, 67 *, 68 min_speakers: int = 1, 69 max_speakers: int = 1, 70 huggingface_token: str | None = None, 71 n_jobs: int = -1, 72 ) -> None: 73 if huggingface_token is not None and not huggingface_token.startswith("hf_"): 74 LOG.warning("Huggingface token probably should start with hf_") 75 if not torch.cuda.is_available(): 76 LOG.warning("CUDA is not available. This will be extremely slow.") 77 input_dir = Path(input_dir) 78 output_dir = Path(output_dir) 79 input_dir.mkdir(parents=True, exist_ok=True) 80 output_dir.mkdir(parents=True, exist_ok=True) 81 input_paths = list(input_dir.rglob("*.*")) 82 with tqdm_joblib(desc="Preprocessing speaker diarization", total=len(input_paths)): 83 Parallel(n_jobs=n_jobs)( 84 delayed(_process_one)( 85 input_path, 86 output_dir / input_path.relative_to(input_dir).parent / input_path.stem, 87 sr, 88 max_speakers=max_speakers, 89 min_speakers=min_speakers, 90 huggingface_token=huggingface_token, 91 ) 92 for input_path in input_paths 93 ) 94 [end of src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py b/src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py --- a/src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py +++ b/src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py @@ -52,7 +52,7 @@ speaker_count[speaker] += 1 audio_cut = audio[int(segment.start * sr) : int(segment.end * sr)] sf.write( - (output_dir / f"{speaker}_{speaker_count[speaker]}.wav"), + (output_dir / f"{speaker}_{speaker_count[speaker]:04d}.wav"), audio_cut, sr, )
{"golden_diff": "diff --git a/src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py b/src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py\n--- a/src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py\n+++ b/src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py\n@@ -52,7 +52,7 @@\n speaker_count[speaker] += 1\n audio_cut = audio[int(segment.start * sr) : int(segment.end * sr)]\n sf.write(\n- (output_dir / f\"{speaker}_{speaker_count[speaker]}.wav\"),\n+ (output_dir / f\"{speaker}_{speaker_count[speaker]:04d}.wav\"),\n audio_cut,\n sr,\n )\n", "issue": "Ensure proper sorting by name for output files\n### Is your feature request related to a problem? Please describe.\n\nTo enhance readability and ensure proper sorting by name, the numeric part of output file names should have a fixed width. This can be achieved by adding leading zeros to the numeric part, with four digits likely being sufficient.\n\n### Describe alternatives you've considered\n\nI don't have any.\n\n### Additional context\n\n_No response_\n\n### Code of Conduct\n\n- [X] I agree to follow this project's Code of Conduct\n\n### Are you willing to resolve this issue by submitting a Pull Request?\n\n- [X] Yes, I have the time, and I know how to start.\n- [ ] Yes, I have the time, but I don't know how to start. I would need guidance.\n- [ ] No, I don't have the time, although I believe I could do it if I had the time...\n- [ ] No, I don't have the time and I wouldn't even know how to start.\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom collections import defaultdict\nfrom logging import getLogger\nfrom pathlib import Path\n\nimport librosa\nimport soundfile as sf\nimport torch\nfrom joblib import Parallel, delayed\nfrom pyannote.audio import Pipeline\nfrom tqdm import tqdm\nfrom tqdm_joblib import tqdm_joblib\n\nLOG = getLogger(__name__)\n\n\ndef _process_one(\n input_path: Path,\n output_dir: Path,\n sr: int,\n *,\n min_speakers: int = 1,\n max_speakers: int = 1,\n huggingface_token: str | None = None,\n) -> None:\n try:\n audio, sr = librosa.load(input_path, sr=sr, mono=True)\n except Exception as e:\n LOG.warning(f\"Failed to read {input_path}: {e}\")\n return\n pipeline = Pipeline.from_pretrained(\n \"pyannote/speaker-diarization\", use_auth_token=huggingface_token\n )\n if pipeline is None:\n raise ValueError(\"Failed to load pipeline\")\n pipeline = pipeline.to(torch.device(\"cuda\"))\n LOG.info(f\"Processing {input_path}. This may take a while...\")\n diarization = pipeline(\n input_path, min_speakers=min_speakers, max_speakers=max_speakers\n )\n\n LOG.info(f\"Found {len(diarization)} tracks, writing to {output_dir}\")\n speaker_count = defaultdict(int)\n\n output_dir.mkdir(parents=True, exist_ok=True)\n for segment, track, speaker in tqdm(\n list(diarization.itertracks(yield_label=True)), desc=f\"Writing {input_path}\"\n ):\n if segment.end - segment.start < 1:\n continue\n speaker_count[speaker] += 1\n audio_cut = audio[int(segment.start * sr) : int(segment.end * sr)]\n sf.write(\n (output_dir / f\"{speaker}_{speaker_count[speaker]}.wav\"),\n audio_cut,\n sr,\n )\n\n LOG.info(f\"Speaker count: {speaker_count}\")\n\n\ndef preprocess_speaker_diarization(\n input_dir: Path | str,\n output_dir: Path | str,\n sr: int,\n *,\n min_speakers: int = 1,\n max_speakers: int = 1,\n huggingface_token: str | None = None,\n n_jobs: int = -1,\n) -> None:\n if huggingface_token is not None and not huggingface_token.startswith(\"hf_\"):\n LOG.warning(\"Huggingface token probably should start with hf_\")\n if not torch.cuda.is_available():\n LOG.warning(\"CUDA is not available. This will be extremely slow.\")\n input_dir = Path(input_dir)\n output_dir = Path(output_dir)\n input_dir.mkdir(parents=True, exist_ok=True)\n output_dir.mkdir(parents=True, exist_ok=True)\n input_paths = list(input_dir.rglob(\"*.*\"))\n with tqdm_joblib(desc=\"Preprocessing speaker diarization\", total=len(input_paths)):\n Parallel(n_jobs=n_jobs)(\n delayed(_process_one)(\n input_path,\n output_dir / input_path.relative_to(input_dir).parent / input_path.stem,\n sr,\n max_speakers=max_speakers,\n min_speakers=min_speakers,\n huggingface_token=huggingface_token,\n )\n for input_path in input_paths\n )\n", "path": "src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py"}]}
1,677
176
gh_patches_debug_30960
rasdani/github-patches
git_diff
spack__spack-7852
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> PMIx build fails on aarch64 PMIx up through version 2.1.0 has a bug in its test code that causes problems on strict alignment architectures like aarch64. (The issue is explained in this commit: pmix/pmix@3a8bc4503a04158e3be072c92eb0253c6c77e534) The fix is included in the 2.1.1 release. The issue is exacerbated by building with optimizations turned on (which Spack does by default); it doesn't seem to happen when compiling with ```-O0```. Since the issue is in test code, the work-around is to just not build that code. ### Expected Result The install should work. ### Actual Result The install fails attempting to link pmix_test. ### Steps to reproduce the issue On an aarch64 machine: ```console $ spack install -j 56 pmix %[email protected] ``` ### Information on your system 1. HPE (pre-production) Apache servers (w/ Cavium ThunderX2 CPU's) 1. RHEL 7.4 1. GCC 7.3.0 (built by Spack) and GCC 4.8.5 (RHEL 7 default version) </issue> <code> [start of var/spack/repos/builtin/packages/pmix/package.py] 1 ############################################################################## 2 # Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC. 3 # Produced at the Lawrence Livermore National Laboratory. 4 # 5 # This file is part of Spack. 6 # Created by Todd Gamblin, [email protected], All rights reserved. 7 # LLNL-CODE-647188 8 # 9 # For details, see https://github.com/spack/spack 10 # Please also see the NOTICE and LICENSE files for our notice and the LGPL. 11 # 12 # This program is free software; you can redistribute it and/or modify 13 # it under the terms of the GNU Lesser General Public License (as 14 # published by the Free Software Foundation) version 2.1, February 1999. 15 # 16 # This program is distributed in the hope that it will be useful, but 17 # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF 18 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and 19 # conditions of the GNU Lesser General Public License for more details. 20 # 21 # You should have received a copy of the GNU Lesser General Public 22 # License along with this program; if not, write to the Free Software 23 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 24 ############################################################################## 25 26 from spack import * 27 28 29 class Pmix(AutotoolsPackage): 30 """The Process Management Interface (PMI) has been used for quite some time 31 as a means of exchanging wireup information needed for interprocess 32 communication. Two versions (PMI-1 and PMI-2) have been released as part 33 of the MPICH effort. While PMI-2 demonstrates better scaling properties 34 than its PMI-1 predecessor, attaining rapid launch and wireup of the 35 roughly 1M processes executing across 100k nodes expected for exascale 36 operations remains challenging. PMI Exascale (PMIx) represents an 37 attempt to resolve these questions by providing an extended version 38 of the PMI definitions specifically designed to support clusters up 39 to and including exascale sizes. The overall objective of the project 40 is not to branch the existing definitions - in fact, PMIx fully 41 supports both of the existing PMI-1 and PMI-2 APIs - but rather to 42 (a) augment and extend those APIs to eliminate some current restrictions 43 that impact scalability, (b) establish a standards-like body for 44 maintaining the definitions, and (c) provide a reference implementation 45 of the PMIx standard that demonstrates the desired level of 46 scalability.""" 47 48 homepage = "https://pmix.github.io/pmix" 49 url = "https://github.com/pmix/pmix/releases/download/v2.0.1/pmix-2.0.1.tar.bz2" 50 51 version('2.1.0', 'fc97513b601d78fe7c6bb20c6a21df3c') 52 version('2.0.2', 'e3ed1deed87c84f9b43da2621c6ad689') 53 version('2.0.1', 'ba3193b485843516e6b4e8641e443b1e') 54 version('2.0.0', '3e047c2ea0ba8ee9925ed92b205fd92e') 55 version('1.2.4', '242a812e206e7c5948f1f5c9688eb2a7') 56 version('1.2.3', '102b1cc650018b62348b45d572b158e9') 57 version('1.2.2', 'd85c8fd437bd88f984549425ad369e9f') 58 version('1.2.1', 'f090f524681c52001ea2db3b0285596f') 59 version('1.2.0', '6a42472d5a32e1c31ce5da19d50fc21a') 60 61 depends_on('libevent') 62 63 def configure_args(self): 64 spec = self.spec 65 config_args = [ 66 '--enable-shared', 67 '--enable-static' 68 ] 69 70 # external libevent support (needed to keep Open MPI happy) 71 config_args.append( 72 '--with-libevent={0}'.format(spec['libevent'].prefix)) 73 74 return config_args 75 [end of var/spack/repos/builtin/packages/pmix/package.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/var/spack/repos/builtin/packages/pmix/package.py b/var/spack/repos/builtin/packages/pmix/package.py --- a/var/spack/repos/builtin/packages/pmix/package.py +++ b/var/spack/repos/builtin/packages/pmix/package.py @@ -24,6 +24,7 @@ ############################################################################## from spack import * +import spack.architecture class Pmix(AutotoolsPackage): @@ -48,6 +49,7 @@ homepage = "https://pmix.github.io/pmix" url = "https://github.com/pmix/pmix/releases/download/v2.0.1/pmix-2.0.1.tar.bz2" + version('2.1.1', 'f9f109421661b757245d5e0bd44a38b3') version('2.1.0', 'fc97513b601d78fe7c6bb20c6a21df3c') version('2.0.2', 'e3ed1deed87c84f9b43da2621c6ad689') version('2.0.1', 'ba3193b485843516e6b4e8641e443b1e') @@ -61,6 +63,7 @@ depends_on('libevent') def configure_args(self): + spec = self.spec config_args = [ '--enable-shared', @@ -71,4 +74,11 @@ config_args.append( '--with-libevent={0}'.format(spec['libevent'].prefix)) + # Versions < 2.1.1 have a bug in the test code that *sometimes* + # causes problems on strict alignment architectures such as + # aarch64. Work-around is to just not build the test code. + if 'aarch64' in spack.architecture.sys_type() and \ + self.spec.version < Version('2.1.1'): + config_args.append('--without-tests-examples') + return config_args
{"golden_diff": "diff --git a/var/spack/repos/builtin/packages/pmix/package.py b/var/spack/repos/builtin/packages/pmix/package.py\n--- a/var/spack/repos/builtin/packages/pmix/package.py\n+++ b/var/spack/repos/builtin/packages/pmix/package.py\n@@ -24,6 +24,7 @@\n ##############################################################################\n \n from spack import *\n+import spack.architecture\n \n \n class Pmix(AutotoolsPackage):\n@@ -48,6 +49,7 @@\n homepage = \"https://pmix.github.io/pmix\"\n url = \"https://github.com/pmix/pmix/releases/download/v2.0.1/pmix-2.0.1.tar.bz2\"\n \n+ version('2.1.1', 'f9f109421661b757245d5e0bd44a38b3')\n version('2.1.0', 'fc97513b601d78fe7c6bb20c6a21df3c')\n version('2.0.2', 'e3ed1deed87c84f9b43da2621c6ad689')\n version('2.0.1', 'ba3193b485843516e6b4e8641e443b1e')\n@@ -61,6 +63,7 @@\n depends_on('libevent')\n \n def configure_args(self):\n+\n spec = self.spec\n config_args = [\n '--enable-shared',\n@@ -71,4 +74,11 @@\n config_args.append(\n '--with-libevent={0}'.format(spec['libevent'].prefix))\n \n+ # Versions < 2.1.1 have a bug in the test code that *sometimes*\n+ # causes problems on strict alignment architectures such as\n+ # aarch64. Work-around is to just not build the test code.\n+ if 'aarch64' in spack.architecture.sys_type() and \\\n+ self.spec.version < Version('2.1.1'):\n+ config_args.append('--without-tests-examples')\n+\n return config_args\n", "issue": "PMIx build fails on aarch64\n\r\nPMIx up through version 2.1.0 has a bug in its test code that causes problems on strict alignment architectures like aarch64. (The issue is explained in this commit: pmix/pmix@3a8bc4503a04158e3be072c92eb0253c6c77e534) The fix is included in the 2.1.1 release. The issue is exacerbated by building with optimizations turned on (which Spack does by default); it doesn't seem to happen when compiling with ```-O0```.\r\n\r\nSince the issue is in test code, the work-around is to just not build that code.\r\n### Expected Result\r\n\r\nThe install should work.\r\n\r\n### Actual Result\r\n\r\nThe install fails attempting to link pmix_test.\r\n\r\n### Steps to reproduce the issue\r\n\r\nOn an aarch64 machine:\r\n```console\r\n$ spack install -j 56 pmix %[email protected]\r\n```\r\n\r\n### Information on your system\r\n\r\n1. HPE (pre-production) Apache servers (w/ Cavium ThunderX2 CPU's)\r\n1. RHEL 7.4\r\n1. GCC 7.3.0 (built by Spack) and GCC 4.8.5 (RHEL 7 default version)\r\n\n", "before_files": [{"content": "##############################################################################\n# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n#\n# This file is part of Spack.\n# Created by Todd Gamblin, [email protected], All rights reserved.\n# LLNL-CODE-647188\n#\n# For details, see https://github.com/spack/spack\n# Please also see the NOTICE and LICENSE files for our notice and the LGPL.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License (as\n# published by the Free Software Foundation) version 2.1, February 1999.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and\n# conditions of the GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n##############################################################################\n\nfrom spack import *\n\n\nclass Pmix(AutotoolsPackage):\n \"\"\"The Process Management Interface (PMI) has been used for quite some time\n as a means of exchanging wireup information needed for interprocess\n communication. Two versions (PMI-1 and PMI-2) have been released as part\n of the MPICH effort. While PMI-2 demonstrates better scaling properties\n than its PMI-1 predecessor, attaining rapid launch and wireup of the\n roughly 1M processes executing across 100k nodes expected for exascale\n operations remains challenging. PMI Exascale (PMIx) represents an\n attempt to resolve these questions by providing an extended version\n of the PMI definitions specifically designed to support clusters up\n to and including exascale sizes. The overall objective of the project\n is not to branch the existing definitions - in fact, PMIx fully\n supports both of the existing PMI-1 and PMI-2 APIs - but rather to\n (a) augment and extend those APIs to eliminate some current restrictions\n that impact scalability, (b) establish a standards-like body for\n maintaining the definitions, and (c) provide a reference implementation\n of the PMIx standard that demonstrates the desired level of\n scalability.\"\"\"\n\n homepage = \"https://pmix.github.io/pmix\"\n url = \"https://github.com/pmix/pmix/releases/download/v2.0.1/pmix-2.0.1.tar.bz2\"\n\n version('2.1.0', 'fc97513b601d78fe7c6bb20c6a21df3c')\n version('2.0.2', 'e3ed1deed87c84f9b43da2621c6ad689')\n version('2.0.1', 'ba3193b485843516e6b4e8641e443b1e')\n version('2.0.0', '3e047c2ea0ba8ee9925ed92b205fd92e')\n version('1.2.4', '242a812e206e7c5948f1f5c9688eb2a7')\n version('1.2.3', '102b1cc650018b62348b45d572b158e9')\n version('1.2.2', 'd85c8fd437bd88f984549425ad369e9f')\n version('1.2.1', 'f090f524681c52001ea2db3b0285596f')\n version('1.2.0', '6a42472d5a32e1c31ce5da19d50fc21a')\n\n depends_on('libevent')\n\n def configure_args(self):\n spec = self.spec\n config_args = [\n '--enable-shared',\n '--enable-static'\n ]\n\n # external libevent support (needed to keep Open MPI happy)\n config_args.append(\n '--with-libevent={0}'.format(spec['libevent'].prefix))\n\n return config_args\n", "path": "var/spack/repos/builtin/packages/pmix/package.py"}]}
2,051
500
gh_patches_debug_2385
rasdani/github-patches
git_diff
ansible__awx-13645
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Websocket not working at non-root path ### Please confirm the following - [X] I agree to follow this project's [code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html). - [X] I have checked the [current issues](https://github.com/ansible/awx/issues) for duplicates. - [X] I understand that AWX is open source software provided for free and that I might not receive a timely response. ### Summary Changes from #11342 and #652 are not full ### AWX version 21.0.0 ### Select the relevant components - [X] UI - [ ] API - [ ] Docs ### Installation method kubernetes ### Modifications no ### Ansible version _No response_ ### Operating system _No response_ ### Web browser _No response_ ### Steps to reproduce Deploy AWX with custom `ingress_path: /awx` ### Expected results websocket should work ### Actual results `2022-05-17 08:46:41,031 ERROR [-] daphne.ws_protocol [Failure instance: Traceback: <class 'ValueError'>: No route found for path 'awx/websocket/'. /var/lib/awx/venv/awx/lib64/python3.9/site-packages/autobahn/websocket/protocol.py:2841:processHandshake /var/lib/awx/venv/awx/lib64/python3.9/site-packages/txaio/tx.py:366:as_future /var/lib/awx/venv/awx/lib64/python3.9/site-packages/twisted/internet/defer.py:151:maybeDeferred /var/lib/awx/venv/awx/lib64/python3.9/site-packages/daphne/ws_protocol.py:72:onConnect --- <exception caught here> --- /var/lib/awx/venv/awx/lib64/python3.9/site-packages/twisted/internet/defer.py:151:maybeDeferred /var/lib/awx/venv/awx/lib64/python3.9/site-packages/daphne/server.py:201:create_application /var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/routing.py:54:__call__ /var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/sessions.py:47:__call__ /var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/sessions.py:145:__call__ /var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/sessions.py:169:__init__ /var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/middleware.py:31:__call__ /var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/routing.py:150:__call__ ] 2022-05-17 08:46:41,031 ERROR [Failure instance: Traceback: <class 'ValueError'>: No route found for path 'awx/websocket/'. /var/lib/awx/venv/awx/lib64/python3.9/site-packages/autobahn/websocket/protocol.py:2841:processHandshake /var/lib/awx/venv/awx/lib64/python3.9/site-packages/txaio/tx.py:366:as_future /var/lib/awx/venv/awx/lib64/python3.9/site-packages/twisted/internet/defer.py:151:maybeDeferred /var/lib/awx/venv/awx/lib64/python3.9/site-packages/daphne/ws_protocol.py:72:onConnect --- <exception caught here> --- /var/lib/awx/venv/awx/lib64/python3.9/site-packages/twisted/internet/defer.py:151:maybeDeferred /var/lib/awx/venv/awx/lib64/python3.9/site-packages/daphne/server.py:201:create_application /var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/routing.py:54:__call__ /var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/sessions.py:47:__call__ /var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/sessions.py:145:__call__ /var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/sessions.py:169:__init__ /var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/middleware.py:31:__call__ /var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/routing.py:150:__call__` ### Additional information It seems that issue is in https://github.com/ansible/awx/blob/48b016802c517ff04d1cff4c43e64f17bb77a7a8/awx/main/routing.py ``` websocket_urlpatterns = [ re_path(r'websocket/$', consumers.EventConsumer), re_path(r'websocket/broadcast/$', consumers.BroadcastConsumer), ] ``` From https://docs.djangoproject.com/en/4.0/ref/urls/: When a route ends with $ the whole requested URL, matching against path_info, must match the regular expression pattern (re.fullmatch() is used). Replacing with ``` websocket_urlpatterns = [ re_path(r'websocket/', consumers.EventConsumer), re_path(r'websocket/broadcast/', consumers.BroadcastConsumer), ] ``` solves the issue </issue> <code> [start of awx/main/routing.py] 1 import redis 2 import logging 3 4 from django.conf import settings 5 from django.urls import re_path 6 7 from channels.auth import AuthMiddlewareStack 8 from channels.routing import ProtocolTypeRouter, URLRouter 9 10 from . import consumers 11 12 13 logger = logging.getLogger('awx.main.routing') 14 15 16 class AWXProtocolTypeRouter(ProtocolTypeRouter): 17 def __init__(self, *args, **kwargs): 18 try: 19 r = redis.Redis.from_url(settings.BROKER_URL) 20 for k in r.scan_iter('asgi:*', 500): 21 logger.debug(f"cleaning up Redis key {k}") 22 r.delete(k) 23 except redis.exceptions.RedisError as e: 24 logger.warning("encountered an error communicating with redis.") 25 raise e 26 super().__init__(*args, **kwargs) 27 28 29 websocket_urlpatterns = [ 30 re_path(r'websocket/', consumers.EventConsumer.as_asgi()), 31 re_path(r'websocket/broadcast/', consumers.BroadcastConsumer.as_asgi()), 32 ] 33 34 application = AWXProtocolTypeRouter( 35 { 36 'websocket': AuthMiddlewareStack(URLRouter(websocket_urlpatterns)), 37 } 38 ) 39 [end of awx/main/routing.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/awx/main/routing.py b/awx/main/routing.py --- a/awx/main/routing.py +++ b/awx/main/routing.py @@ -27,8 +27,8 @@ websocket_urlpatterns = [ - re_path(r'websocket/', consumers.EventConsumer.as_asgi()), - re_path(r'websocket/broadcast/', consumers.BroadcastConsumer.as_asgi()), + re_path(r'websocket/$', consumers.EventConsumer.as_asgi()), + re_path(r'websocket/broadcast/$', consumers.BroadcastConsumer.as_asgi()), ] application = AWXProtocolTypeRouter(
{"golden_diff": "diff --git a/awx/main/routing.py b/awx/main/routing.py\n--- a/awx/main/routing.py\n+++ b/awx/main/routing.py\n@@ -27,8 +27,8 @@\n \n \n websocket_urlpatterns = [\n- re_path(r'websocket/', consumers.EventConsumer.as_asgi()),\n- re_path(r'websocket/broadcast/', consumers.BroadcastConsumer.as_asgi()),\n+ re_path(r'websocket/$', consumers.EventConsumer.as_asgi()),\n+ re_path(r'websocket/broadcast/$', consumers.BroadcastConsumer.as_asgi()),\n ]\n \n application = AWXProtocolTypeRouter(\n", "issue": "Websocket not working at non-root path\n### Please confirm the following\n\n- [X] I agree to follow this project's [code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html).\n- [X] I have checked the [current issues](https://github.com/ansible/awx/issues) for duplicates.\n- [X] I understand that AWX is open source software provided for free and that I might not receive a timely response.\n\n### Summary\n\nChanges from #11342 and #652 are not full\n\n### AWX version\n\n21.0.0\n\n### Select the relevant components\n\n- [X] UI\n- [ ] API\n- [ ] Docs\n\n### Installation method\n\nkubernetes\n\n### Modifications\n\nno\n\n### Ansible version\n\n_No response_\n\n### Operating system\n\n_No response_\n\n### Web browser\n\n_No response_\n\n### Steps to reproduce\n\nDeploy AWX with custom `ingress_path: /awx`\n\n### Expected results\n\nwebsocket should work\n\n### Actual results\n\n`2022-05-17 08:46:41,031 ERROR [-] daphne.ws_protocol [Failure instance: Traceback: <class 'ValueError'>: No route found for path 'awx/websocket/'.\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/autobahn/websocket/protocol.py:2841:processHandshake\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/txaio/tx.py:366:as_future\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/twisted/internet/defer.py:151:maybeDeferred\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/daphne/ws_protocol.py:72:onConnect\r\n--- <exception caught here> ---\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/twisted/internet/defer.py:151:maybeDeferred\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/daphne/server.py:201:create_application\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/routing.py:54:__call__\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/sessions.py:47:__call__\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/sessions.py:145:__call__\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/sessions.py:169:__init__\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/middleware.py:31:__call__\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/routing.py:150:__call__\r\n]\r\n2022-05-17 08:46:41,031 ERROR [Failure instance: Traceback: <class 'ValueError'>: No route found for path 'awx/websocket/'.\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/autobahn/websocket/protocol.py:2841:processHandshake\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/txaio/tx.py:366:as_future\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/twisted/internet/defer.py:151:maybeDeferred\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/daphne/ws_protocol.py:72:onConnect\r\n--- <exception caught here> ---\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/twisted/internet/defer.py:151:maybeDeferred\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/daphne/server.py:201:create_application\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/routing.py:54:__call__\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/sessions.py:47:__call__\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/sessions.py:145:__call__\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/sessions.py:169:__init__\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/middleware.py:31:__call__\r\n/var/lib/awx/venv/awx/lib64/python3.9/site-packages/channels/routing.py:150:__call__`\n\n### Additional information\n\nIt seems that issue is in https://github.com/ansible/awx/blob/48b016802c517ff04d1cff4c43e64f17bb77a7a8/awx/main/routing.py\r\n\r\n```\r\nwebsocket_urlpatterns = [\r\n re_path(r'websocket/$', consumers.EventConsumer),\r\n re_path(r'websocket/broadcast/$', consumers.BroadcastConsumer),\r\n]\r\n```\r\nFrom https://docs.djangoproject.com/en/4.0/ref/urls/:\r\n\r\nWhen a route ends with $ the whole requested URL, matching against path_info, must match the regular expression pattern (re.fullmatch() is used).\r\n\r\nReplacing with\r\n\r\n```\r\nwebsocket_urlpatterns = [\r\n re_path(r'websocket/', consumers.EventConsumer),\r\n re_path(r'websocket/broadcast/', consumers.BroadcastConsumer),\r\n]\r\n```\r\n\r\nsolves the issue\n", "before_files": [{"content": "import redis\nimport logging\n\nfrom django.conf import settings\nfrom django.urls import re_path\n\nfrom channels.auth import AuthMiddlewareStack\nfrom channels.routing import ProtocolTypeRouter, URLRouter\n\nfrom . import consumers\n\n\nlogger = logging.getLogger('awx.main.routing')\n\n\nclass AWXProtocolTypeRouter(ProtocolTypeRouter):\n def __init__(self, *args, **kwargs):\n try:\n r = redis.Redis.from_url(settings.BROKER_URL)\n for k in r.scan_iter('asgi:*', 500):\n logger.debug(f\"cleaning up Redis key {k}\")\n r.delete(k)\n except redis.exceptions.RedisError as e:\n logger.warning(\"encountered an error communicating with redis.\")\n raise e\n super().__init__(*args, **kwargs)\n\n\nwebsocket_urlpatterns = [\n re_path(r'websocket/', consumers.EventConsumer.as_asgi()),\n re_path(r'websocket/broadcast/', consumers.BroadcastConsumer.as_asgi()),\n]\n\napplication = AWXProtocolTypeRouter(\n {\n 'websocket': AuthMiddlewareStack(URLRouter(websocket_urlpatterns)),\n }\n)\n", "path": "awx/main/routing.py"}]}
2,188
132
gh_patches_debug_38974
rasdani/github-patches
git_diff
frappe__frappe-18020
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> DocType Layout Fields table does not populate ## Description of the issue When selecting a Document Type in a DocType Layout document the Fields table does not populate. The document must be saved twice in order to populate the fields, because the first save resulting in an error message. Obviously this would lead people (like myself) to believe the DocType Layout feature is non-functional. Here is an image demonstrating the problems: ![image](https://user-images.githubusercontent.com/16672299/159814715-b8642de3-97c7-4fee-b151-2ee18fef3ecd.png) ## Context information (for bug reports) **Output of `bench version`** ``` ERPNext: v13.22.0 (version-13) Frappe Framework: v13.22.0 (version-13) ``` ## Steps to reproduce the issue 1. Add a new DocType Layout document 2. Enter a value in Document Type ### Observed result - Fields table does not populate. ### Expected result Something more like this ![image](https://user-images.githubusercontent.com/16672299/159814647-dccba2a6-61be-4796-8431-325c18e24abb.png) ## Additional information From what I can tell, there are two issues at play: 1. The document_type field expects no rows to exist (but a blank row is auto-generated) 2. The method neglects to refresh the Fields table </issue> <code> [start of frappe/custom/doctype/doctype_layout/doctype_layout.py] 1 # Copyright (c) 2020, Frappe Technologies and contributors 2 # License: MIT. See LICENSE 3 4 from frappe.desk.utils import slug 5 from frappe.model.document import Document 6 7 8 class DocTypeLayout(Document): 9 def validate(self): 10 if not self.route: 11 self.route = slug(self.name) 12 [end of frappe/custom/doctype/doctype_layout/doctype_layout.py] [start of frappe/custom/doctype/custom_field/custom_field.py] 1 # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors 2 # License: MIT. See LICENSE 3 4 import json 5 6 import frappe 7 from frappe import _ 8 from frappe.model import core_doctypes_list 9 from frappe.model.docfield import supports_translation 10 from frappe.model.document import Document 11 from frappe.query_builder.functions import IfNull 12 from frappe.utils import cstr 13 14 15 class CustomField(Document): 16 def autoname(self): 17 self.set_fieldname() 18 self.name = self.dt + "-" + self.fieldname 19 20 def set_fieldname(self): 21 if not self.fieldname: 22 label = self.label 23 if not label: 24 if self.fieldtype in ["Section Break", "Column Break", "Tab Break"]: 25 label = self.fieldtype + "_" + str(self.idx) 26 else: 27 frappe.throw(_("Label is mandatory")) 28 29 # remove special characters from fieldname 30 self.fieldname = "".join( 31 filter(lambda x: x.isdigit() or x.isalpha() or "_", cstr(label).replace(" ", "_")) 32 ) 33 34 # fieldnames should be lowercase 35 self.fieldname = self.fieldname.lower() 36 37 def before_insert(self): 38 self.set_fieldname() 39 40 def validate(self): 41 # these imports have been added to avoid cyclical import, should fix in future 42 from frappe.core.doctype.doctype.doctype import check_fieldname_conflicts 43 from frappe.custom.doctype.customize_form.customize_form import CustomizeForm 44 45 # don't always get meta to improve performance 46 # setting idx is just an improvement, not a requirement 47 if self.is_new() or self.insert_after == "append": 48 meta = frappe.get_meta(self.dt, cached=False) 49 fieldnames = [df.fieldname for df in meta.get("fields")] 50 51 if self.is_new() and self.fieldname in fieldnames: 52 frappe.throw( 53 _("A field with the name {0} already exists in {1}").format( 54 frappe.bold(self.fieldname), self.dt 55 ) 56 ) 57 58 if self.insert_after == "append": 59 self.insert_after = fieldnames[-1] 60 61 if self.insert_after and self.insert_after in fieldnames: 62 self.idx = fieldnames.index(self.insert_after) + 1 63 64 if ( 65 not self.is_virtual 66 and (doc_before_save := self.get_doc_before_save()) 67 and (old_fieldtype := doc_before_save.fieldtype) != self.fieldtype 68 and not CustomizeForm.allow_fieldtype_change(old_fieldtype, self.fieldtype) 69 ): 70 frappe.throw( 71 _("Fieldtype cannot be changed from {0} to {1}").format(old_fieldtype, self.fieldtype) 72 ) 73 74 if not self.fieldname: 75 frappe.throw(_("Fieldname not set for Custom Field")) 76 77 if self.get("translatable", 0) and not supports_translation(self.fieldtype): 78 self.translatable = 0 79 80 check_fieldname_conflicts(self) 81 82 def on_update(self): 83 # validate field 84 if not self.flags.ignore_validate: 85 from frappe.core.doctype.doctype.doctype import validate_fields_for_doctype 86 87 validate_fields_for_doctype(self.dt) 88 89 # clear cache and update the schema 90 if not frappe.flags.in_create_custom_fields: 91 frappe.clear_cache(doctype=self.dt) 92 frappe.db.updatedb(self.dt) 93 94 def on_trash(self): 95 # check if Admin owned field 96 if self.owner == "Administrator" and frappe.session.user != "Administrator": 97 frappe.throw( 98 _( 99 "Custom Field {0} is created by the Administrator and can only be deleted through the Administrator account." 100 ).format(frappe.bold(self.label)) 101 ) 102 103 # delete property setter entries 104 frappe.db.delete("Property Setter", {"doc_type": self.dt, "field_name": self.fieldname}) 105 frappe.clear_cache(doctype=self.dt) 106 107 def validate_insert_after(self, meta): 108 if not meta.get_field(self.insert_after): 109 frappe.throw( 110 _( 111 "Insert After field '{0}' mentioned in Custom Field '{1}', with label '{2}', does not exist" 112 ).format(self.insert_after, self.name, self.label), 113 frappe.DoesNotExistError, 114 ) 115 116 if self.fieldname == self.insert_after: 117 frappe.throw(_("Insert After cannot be set as {0}").format(meta.get_label(self.insert_after))) 118 119 120 @frappe.whitelist() 121 def get_fields_label(doctype=None): 122 meta = frappe.get_meta(doctype) 123 124 if doctype in core_doctypes_list: 125 return frappe.msgprint(_("Custom Fields cannot be added to core DocTypes.")) 126 127 if meta.custom: 128 return frappe.msgprint(_("Custom Fields can only be added to a standard DocType.")) 129 130 return [ 131 {"value": df.fieldname or "", "label": _(df.label or "")} 132 for df in frappe.get_meta(doctype).get("fields") 133 ] 134 135 136 def create_custom_field_if_values_exist(doctype, df): 137 df = frappe._dict(df) 138 if df.fieldname in frappe.db.get_table_columns(doctype) and frappe.db.count( 139 dt=doctype, filters=IfNull(df.fieldname, "") != "" 140 ): 141 create_custom_field(doctype, df) 142 143 144 def create_custom_field(doctype, df, ignore_validate=False, is_system_generated=True): 145 df = frappe._dict(df) 146 if not df.fieldname and df.label: 147 df.fieldname = frappe.scrub(df.label) 148 if not frappe.db.get_value("Custom Field", {"dt": doctype, "fieldname": df.fieldname}): 149 custom_field = frappe.get_doc( 150 { 151 "doctype": "Custom Field", 152 "dt": doctype, 153 "permlevel": 0, 154 "fieldtype": "Data", 155 "hidden": 0, 156 "is_system_generated": is_system_generated, 157 } 158 ) 159 custom_field.update(df) 160 custom_field.flags.ignore_validate = ignore_validate 161 custom_field.insert() 162 return custom_field 163 164 165 def create_custom_fields(custom_fields, ignore_validate=False, update=True): 166 """Add / update multiple custom fields 167 168 :param custom_fields: example `{'Sales Invoice': [dict(fieldname='test')]}`""" 169 170 try: 171 frappe.flags.in_create_custom_fields = True 172 doctypes_to_update = set() 173 174 if frappe.flags.in_setup_wizard: 175 ignore_validate = True 176 177 for doctypes, fields in custom_fields.items(): 178 if isinstance(fields, dict): 179 # only one field 180 fields = [fields] 181 182 if isinstance(doctypes, str): 183 # only one doctype 184 doctypes = (doctypes,) 185 186 for doctype in doctypes: 187 doctypes_to_update.add(doctype) 188 189 for df in fields: 190 field = frappe.db.get_value("Custom Field", {"dt": doctype, "fieldname": df["fieldname"]}) 191 if not field: 192 try: 193 df = df.copy() 194 df["owner"] = "Administrator" 195 create_custom_field(doctype, df, ignore_validate=ignore_validate) 196 197 except frappe.exceptions.DuplicateEntryError: 198 pass 199 200 elif update: 201 custom_field = frappe.get_doc("Custom Field", field) 202 custom_field.flags.ignore_validate = ignore_validate 203 custom_field.update(df) 204 custom_field.save() 205 206 for doctype in doctypes_to_update: 207 frappe.clear_cache(doctype=doctype) 208 frappe.db.updatedb(doctype) 209 210 finally: 211 frappe.flags.in_create_custom_fields = False 212 [end of frappe/custom/doctype/custom_field/custom_field.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/frappe/custom/doctype/custom_field/custom_field.py b/frappe/custom/doctype/custom_field/custom_field.py --- a/frappe/custom/doctype/custom_field/custom_field.py +++ b/frappe/custom/doctype/custom_field/custom_field.py @@ -102,6 +102,20 @@ # delete property setter entries frappe.db.delete("Property Setter", {"doc_type": self.dt, "field_name": self.fieldname}) + + # update doctype layouts + doctype_layouts = frappe.get_all( + "DocType Layout", filters={"document_type": self.dt}, pluck="name" + ) + + for layout in doctype_layouts: + layout_doc = frappe.get_doc("DocType Layout", layout) + for field in layout_doc.fields: + if field.fieldname == self.fieldname: + layout_doc.remove(field) + layout_doc.save() + break + frappe.clear_cache(doctype=self.dt) def validate_insert_after(self, meta): diff --git a/frappe/custom/doctype/doctype_layout/doctype_layout.py b/frappe/custom/doctype/doctype_layout/doctype_layout.py --- a/frappe/custom/doctype/doctype_layout/doctype_layout.py +++ b/frappe/custom/doctype/doctype_layout/doctype_layout.py @@ -1,11 +1,77 @@ # Copyright (c) 2020, Frappe Technologies and contributors # License: MIT. See LICENSE +from typing import TYPE_CHECKING + +import frappe from frappe.desk.utils import slug from frappe.model.document import Document +if TYPE_CHECKING: + from frappe.core.doctype.docfield.docfield import DocField + class DocTypeLayout(Document): def validate(self): if not self.route: self.route = slug(self.name) + + @frappe.whitelist() + def sync_fields(self): + doctype_fields = frappe.get_meta(self.document_type).fields + + if self.is_new(): + added_fields = [field.fieldname for field in doctype_fields] + removed_fields = [] + else: + doctype_fieldnames = {field.fieldname for field in doctype_fields} + layout_fieldnames = {field.fieldname for field in self.fields} + added_fields = list(doctype_fieldnames - layout_fieldnames) + removed_fields = list(layout_fieldnames - doctype_fieldnames) + + if not (added_fields or removed_fields): + return + + added = self.add_fields(added_fields, doctype_fields) + removed = self.remove_fields(removed_fields) + + for index, field in enumerate(self.fields): + field.idx = index + 1 + + return {"added": added, "removed": removed} + + def add_fields(self, added_fields: list[str], doctype_fields: list["DocField"]) -> list[dict]: + added = [] + for field in added_fields: + field_details = next((f for f in doctype_fields if f.fieldname == field), None) + if not field_details: + continue + + # remove 'doctype' data from the DocField to allow adding it to the layout + row = self.append("fields", field_details.as_dict(no_default_fields=True)) + row_data = row.as_dict() + + if field_details.get("insert_after"): + insert_after = next( + (f for f in self.fields if f.fieldname == field_details.insert_after), + None, + ) + + # initialize new row to just after the insert_after field + if insert_after: + self.fields.insert(insert_after.idx, row) + self.fields.pop() + + row_data = {"idx": insert_after.idx + 1, "fieldname": row.fieldname, "label": row.label} + + added.append(row_data) + return added + + def remove_fields(self, removed_fields: list[str]) -> list[dict]: + removed = [] + for field in removed_fields: + field_details = next((f for f in self.fields if f.fieldname == field), None) + if field_details: + self.remove(field_details) + removed.append(field_details.as_dict()) + return removed
{"golden_diff": "diff --git a/frappe/custom/doctype/custom_field/custom_field.py b/frappe/custom/doctype/custom_field/custom_field.py\n--- a/frappe/custom/doctype/custom_field/custom_field.py\n+++ b/frappe/custom/doctype/custom_field/custom_field.py\n@@ -102,6 +102,20 @@\n \n \t\t# delete property setter entries\n \t\tfrappe.db.delete(\"Property Setter\", {\"doc_type\": self.dt, \"field_name\": self.fieldname})\n+\n+\t\t# update doctype layouts\n+\t\tdoctype_layouts = frappe.get_all(\n+\t\t\t\"DocType Layout\", filters={\"document_type\": self.dt}, pluck=\"name\"\n+\t\t)\n+\n+\t\tfor layout in doctype_layouts:\n+\t\t\tlayout_doc = frappe.get_doc(\"DocType Layout\", layout)\n+\t\t\tfor field in layout_doc.fields:\n+\t\t\t\tif field.fieldname == self.fieldname:\n+\t\t\t\t\tlayout_doc.remove(field)\n+\t\t\t\t\tlayout_doc.save()\n+\t\t\t\t\tbreak\n+\n \t\tfrappe.clear_cache(doctype=self.dt)\n \n \tdef validate_insert_after(self, meta):\ndiff --git a/frappe/custom/doctype/doctype_layout/doctype_layout.py b/frappe/custom/doctype/doctype_layout/doctype_layout.py\n--- a/frappe/custom/doctype/doctype_layout/doctype_layout.py\n+++ b/frappe/custom/doctype/doctype_layout/doctype_layout.py\n@@ -1,11 +1,77 @@\n # Copyright (c) 2020, Frappe Technologies and contributors\n # License: MIT. See LICENSE\n \n+from typing import TYPE_CHECKING\n+\n+import frappe\n from frappe.desk.utils import slug\n from frappe.model.document import Document\n \n+if TYPE_CHECKING:\n+\tfrom frappe.core.doctype.docfield.docfield import DocField\n+\n \n class DocTypeLayout(Document):\n \tdef validate(self):\n \t\tif not self.route:\n \t\t\tself.route = slug(self.name)\n+\n+\[email protected]()\n+\tdef sync_fields(self):\n+\t\tdoctype_fields = frappe.get_meta(self.document_type).fields\n+\n+\t\tif self.is_new():\n+\t\t\tadded_fields = [field.fieldname for field in doctype_fields]\n+\t\t\tremoved_fields = []\n+\t\telse:\n+\t\t\tdoctype_fieldnames = {field.fieldname for field in doctype_fields}\n+\t\t\tlayout_fieldnames = {field.fieldname for field in self.fields}\n+\t\t\tadded_fields = list(doctype_fieldnames - layout_fieldnames)\n+\t\t\tremoved_fields = list(layout_fieldnames - doctype_fieldnames)\n+\n+\t\tif not (added_fields or removed_fields):\n+\t\t\treturn\n+\n+\t\tadded = self.add_fields(added_fields, doctype_fields)\n+\t\tremoved = self.remove_fields(removed_fields)\n+\n+\t\tfor index, field in enumerate(self.fields):\n+\t\t\tfield.idx = index + 1\n+\n+\t\treturn {\"added\": added, \"removed\": removed}\n+\n+\tdef add_fields(self, added_fields: list[str], doctype_fields: list[\"DocField\"]) -> list[dict]:\n+\t\tadded = []\n+\t\tfor field in added_fields:\n+\t\t\tfield_details = next((f for f in doctype_fields if f.fieldname == field), None)\n+\t\t\tif not field_details:\n+\t\t\t\tcontinue\n+\n+\t\t\t# remove 'doctype' data from the DocField to allow adding it to the layout\n+\t\t\trow = self.append(\"fields\", field_details.as_dict(no_default_fields=True))\n+\t\t\trow_data = row.as_dict()\n+\n+\t\t\tif field_details.get(\"insert_after\"):\n+\t\t\t\tinsert_after = next(\n+\t\t\t\t\t(f for f in self.fields if f.fieldname == field_details.insert_after),\n+\t\t\t\t\tNone,\n+\t\t\t\t)\n+\n+\t\t\t\t# initialize new row to just after the insert_after field\n+\t\t\t\tif insert_after:\n+\t\t\t\t\tself.fields.insert(insert_after.idx, row)\n+\t\t\t\t\tself.fields.pop()\n+\n+\t\t\t\t\trow_data = {\"idx\": insert_after.idx + 1, \"fieldname\": row.fieldname, \"label\": row.label}\n+\n+\t\t\tadded.append(row_data)\n+\t\treturn added\n+\n+\tdef remove_fields(self, removed_fields: list[str]) -> list[dict]:\n+\t\tremoved = []\n+\t\tfor field in removed_fields:\n+\t\t\tfield_details = next((f for f in self.fields if f.fieldname == field), None)\n+\t\t\tif field_details:\n+\t\t\t\tself.remove(field_details)\n+\t\t\t\tremoved.append(field_details.as_dict())\n+\t\treturn removed\n", "issue": "DocType Layout Fields table does not populate\n## Description of the issue\r\nWhen selecting a Document Type in a DocType Layout document the Fields table does not populate. The document must be saved twice in order to populate the fields, because the first save resulting in an error message. Obviously this would lead people (like myself) to believe the DocType Layout feature is non-functional.\r\n\r\nHere is an image demonstrating the problems:\r\n![image](https://user-images.githubusercontent.com/16672299/159814715-b8642de3-97c7-4fee-b151-2ee18fef3ecd.png)\r\n\r\n## Context information (for bug reports)\r\n\r\n**Output of `bench version`**\r\n```\r\nERPNext: v13.22.0 (version-13)\r\nFrappe Framework: v13.22.0 (version-13)\r\n```\r\n\r\n## Steps to reproduce the issue\r\n\r\n1. Add a new DocType Layout document\r\n2. Enter a value in Document Type\r\n\r\n### Observed result\r\n\r\n- Fields table does not populate.\r\n\r\n### Expected result\r\n\r\nSomething more like this\r\n![image](https://user-images.githubusercontent.com/16672299/159814647-dccba2a6-61be-4796-8431-325c18e24abb.png)\r\n\r\n## Additional information\r\n\r\nFrom what I can tell, there are two issues at play:\r\n\r\n1. The document_type field expects no rows to exist (but a blank row is auto-generated)\r\n2. The method neglects to refresh the Fields table\n", "before_files": [{"content": "# Copyright (c) 2020, Frappe Technologies and contributors\n# License: MIT. See LICENSE\n\nfrom frappe.desk.utils import slug\nfrom frappe.model.document import Document\n\n\nclass DocTypeLayout(Document):\n\tdef validate(self):\n\t\tif not self.route:\n\t\t\tself.route = slug(self.name)\n", "path": "frappe/custom/doctype/doctype_layout/doctype_layout.py"}, {"content": "# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors\n# License: MIT. See LICENSE\n\nimport json\n\nimport frappe\nfrom frappe import _\nfrom frappe.model import core_doctypes_list\nfrom frappe.model.docfield import supports_translation\nfrom frappe.model.document import Document\nfrom frappe.query_builder.functions import IfNull\nfrom frappe.utils import cstr\n\n\nclass CustomField(Document):\n\tdef autoname(self):\n\t\tself.set_fieldname()\n\t\tself.name = self.dt + \"-\" + self.fieldname\n\n\tdef set_fieldname(self):\n\t\tif not self.fieldname:\n\t\t\tlabel = self.label\n\t\t\tif not label:\n\t\t\t\tif self.fieldtype in [\"Section Break\", \"Column Break\", \"Tab Break\"]:\n\t\t\t\t\tlabel = self.fieldtype + \"_\" + str(self.idx)\n\t\t\t\telse:\n\t\t\t\t\tfrappe.throw(_(\"Label is mandatory\"))\n\n\t\t\t# remove special characters from fieldname\n\t\t\tself.fieldname = \"\".join(\n\t\t\t\tfilter(lambda x: x.isdigit() or x.isalpha() or \"_\", cstr(label).replace(\" \", \"_\"))\n\t\t\t)\n\n\t\t# fieldnames should be lowercase\n\t\tself.fieldname = self.fieldname.lower()\n\n\tdef before_insert(self):\n\t\tself.set_fieldname()\n\n\tdef validate(self):\n\t\t# these imports have been added to avoid cyclical import, should fix in future\n\t\tfrom frappe.core.doctype.doctype.doctype import check_fieldname_conflicts\n\t\tfrom frappe.custom.doctype.customize_form.customize_form import CustomizeForm\n\n\t\t# don't always get meta to improve performance\n\t\t# setting idx is just an improvement, not a requirement\n\t\tif self.is_new() or self.insert_after == \"append\":\n\t\t\tmeta = frappe.get_meta(self.dt, cached=False)\n\t\t\tfieldnames = [df.fieldname for df in meta.get(\"fields\")]\n\n\t\t\tif self.is_new() and self.fieldname in fieldnames:\n\t\t\t\tfrappe.throw(\n\t\t\t\t\t_(\"A field with the name {0} already exists in {1}\").format(\n\t\t\t\t\t\tfrappe.bold(self.fieldname), self.dt\n\t\t\t\t\t)\n\t\t\t\t)\n\n\t\t\tif self.insert_after == \"append\":\n\t\t\t\tself.insert_after = fieldnames[-1]\n\n\t\t\tif self.insert_after and self.insert_after in fieldnames:\n\t\t\t\tself.idx = fieldnames.index(self.insert_after) + 1\n\n\t\tif (\n\t\t\tnot self.is_virtual\n\t\t\tand (doc_before_save := self.get_doc_before_save())\n\t\t\tand (old_fieldtype := doc_before_save.fieldtype) != self.fieldtype\n\t\t\tand not CustomizeForm.allow_fieldtype_change(old_fieldtype, self.fieldtype)\n\t\t):\n\t\t\tfrappe.throw(\n\t\t\t\t_(\"Fieldtype cannot be changed from {0} to {1}\").format(old_fieldtype, self.fieldtype)\n\t\t\t)\n\n\t\tif not self.fieldname:\n\t\t\tfrappe.throw(_(\"Fieldname not set for Custom Field\"))\n\n\t\tif self.get(\"translatable\", 0) and not supports_translation(self.fieldtype):\n\t\t\tself.translatable = 0\n\n\t\tcheck_fieldname_conflicts(self)\n\n\tdef on_update(self):\n\t\t# validate field\n\t\tif not self.flags.ignore_validate:\n\t\t\tfrom frappe.core.doctype.doctype.doctype import validate_fields_for_doctype\n\n\t\t\tvalidate_fields_for_doctype(self.dt)\n\n\t\t# clear cache and update the schema\n\t\tif not frappe.flags.in_create_custom_fields:\n\t\t\tfrappe.clear_cache(doctype=self.dt)\n\t\t\tfrappe.db.updatedb(self.dt)\n\n\tdef on_trash(self):\n\t\t# check if Admin owned field\n\t\tif self.owner == \"Administrator\" and frappe.session.user != \"Administrator\":\n\t\t\tfrappe.throw(\n\t\t\t\t_(\n\t\t\t\t\t\"Custom Field {0} is created by the Administrator and can only be deleted through the Administrator account.\"\n\t\t\t\t).format(frappe.bold(self.label))\n\t\t\t)\n\n\t\t# delete property setter entries\n\t\tfrappe.db.delete(\"Property Setter\", {\"doc_type\": self.dt, \"field_name\": self.fieldname})\n\t\tfrappe.clear_cache(doctype=self.dt)\n\n\tdef validate_insert_after(self, meta):\n\t\tif not meta.get_field(self.insert_after):\n\t\t\tfrappe.throw(\n\t\t\t\t_(\n\t\t\t\t\t\"Insert After field '{0}' mentioned in Custom Field '{1}', with label '{2}', does not exist\"\n\t\t\t\t).format(self.insert_after, self.name, self.label),\n\t\t\t\tfrappe.DoesNotExistError,\n\t\t\t)\n\n\t\tif self.fieldname == self.insert_after:\n\t\t\tfrappe.throw(_(\"Insert After cannot be set as {0}\").format(meta.get_label(self.insert_after)))\n\n\[email protected]()\ndef get_fields_label(doctype=None):\n\tmeta = frappe.get_meta(doctype)\n\n\tif doctype in core_doctypes_list:\n\t\treturn frappe.msgprint(_(\"Custom Fields cannot be added to core DocTypes.\"))\n\n\tif meta.custom:\n\t\treturn frappe.msgprint(_(\"Custom Fields can only be added to a standard DocType.\"))\n\n\treturn [\n\t\t{\"value\": df.fieldname or \"\", \"label\": _(df.label or \"\")}\n\t\tfor df in frappe.get_meta(doctype).get(\"fields\")\n\t]\n\n\ndef create_custom_field_if_values_exist(doctype, df):\n\tdf = frappe._dict(df)\n\tif df.fieldname in frappe.db.get_table_columns(doctype) and frappe.db.count(\n\t\tdt=doctype, filters=IfNull(df.fieldname, \"\") != \"\"\n\t):\n\t\tcreate_custom_field(doctype, df)\n\n\ndef create_custom_field(doctype, df, ignore_validate=False, is_system_generated=True):\n\tdf = frappe._dict(df)\n\tif not df.fieldname and df.label:\n\t\tdf.fieldname = frappe.scrub(df.label)\n\tif not frappe.db.get_value(\"Custom Field\", {\"dt\": doctype, \"fieldname\": df.fieldname}):\n\t\tcustom_field = frappe.get_doc(\n\t\t\t{\n\t\t\t\t\"doctype\": \"Custom Field\",\n\t\t\t\t\"dt\": doctype,\n\t\t\t\t\"permlevel\": 0,\n\t\t\t\t\"fieldtype\": \"Data\",\n\t\t\t\t\"hidden\": 0,\n\t\t\t\t\"is_system_generated\": is_system_generated,\n\t\t\t}\n\t\t)\n\t\tcustom_field.update(df)\n\t\tcustom_field.flags.ignore_validate = ignore_validate\n\t\tcustom_field.insert()\n\t\treturn custom_field\n\n\ndef create_custom_fields(custom_fields, ignore_validate=False, update=True):\n\t\"\"\"Add / update multiple custom fields\n\n\t:param custom_fields: example `{'Sales Invoice': [dict(fieldname='test')]}`\"\"\"\n\n\ttry:\n\t\tfrappe.flags.in_create_custom_fields = True\n\t\tdoctypes_to_update = set()\n\n\t\tif frappe.flags.in_setup_wizard:\n\t\t\tignore_validate = True\n\n\t\tfor doctypes, fields in custom_fields.items():\n\t\t\tif isinstance(fields, dict):\n\t\t\t\t# only one field\n\t\t\t\tfields = [fields]\n\n\t\t\tif isinstance(doctypes, str):\n\t\t\t\t# only one doctype\n\t\t\t\tdoctypes = (doctypes,)\n\n\t\t\tfor doctype in doctypes:\n\t\t\t\tdoctypes_to_update.add(doctype)\n\n\t\t\t\tfor df in fields:\n\t\t\t\t\tfield = frappe.db.get_value(\"Custom Field\", {\"dt\": doctype, \"fieldname\": df[\"fieldname\"]})\n\t\t\t\t\tif not field:\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tdf = df.copy()\n\t\t\t\t\t\t\tdf[\"owner\"] = \"Administrator\"\n\t\t\t\t\t\t\tcreate_custom_field(doctype, df, ignore_validate=ignore_validate)\n\n\t\t\t\t\t\texcept frappe.exceptions.DuplicateEntryError:\n\t\t\t\t\t\t\tpass\n\n\t\t\t\t\telif update:\n\t\t\t\t\t\tcustom_field = frappe.get_doc(\"Custom Field\", field)\n\t\t\t\t\t\tcustom_field.flags.ignore_validate = ignore_validate\n\t\t\t\t\t\tcustom_field.update(df)\n\t\t\t\t\t\tcustom_field.save()\n\n\t\tfor doctype in doctypes_to_update:\n\t\t\tfrappe.clear_cache(doctype=doctype)\n\t\t\tfrappe.db.updatedb(doctype)\n\n\tfinally:\n\t\tfrappe.flags.in_create_custom_fields = False\n", "path": "frappe/custom/doctype/custom_field/custom_field.py"}]}
3,266
957
gh_patches_debug_40221
rasdani/github-patches
git_diff
ibis-project__ibis-797
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Using http for webhdfs not possible when kerberos is enabled The hdfs_connect function has no option to choose http or https protocol when connecting to webhdfs. We are running CDH 5.5 with kerberos but without ssl enabled for webhdfs. Could you maybe add an option to signal the use of http when using kerberos? code snippet. the hdfs_connect function code in ibis/ibis/**init**.py always uses https when auth_mechanism in ['GSSAPI', 'LDAP'] example: def hdfs_connect(host='localhost', port=50070, protocol='webhdfs', auth_mechanism='NOSASL', verify=True, use_https=True, **kwds): if auth_mechanism in ['GSSAPI', 'LDAP']: try: import requests_kerberos except ImportError: raise IbisError( "Unable to import requests-kerberos, which is required for " "Kerberos HDFS support. Install it by executing `pip install " "requests-kerberos` or `pip install hdfs[kerberos]`.") from hdfs.ext.kerberos import KerberosClient if use_https: url = 'https://{0}:{1}'.format(host, port) # use SSL else: url = 'http://{0}:{1}'.format(host, port) # no SSL kwds.setdefault('mutual_auth', 'OPTIONAL') hdfs_client = KerberosClient(url, *_kwds) else: from hdfs.client import InsecureClient url = 'http://{0}:{1}'.format(host, port) hdfs_client = InsecureClient(url, *_kwds) </issue> <code> [start of ibis/impala/api.py] 1 # Copyright 2015 Cloudera Inc 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # http://www.apache.org/licenses/LICENSE-2.0 7 # 8 # Unless required by applicable law or agreed to in writing, software 9 # distributed under the License is distributed on an "AS IS" BASIS, 10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 # See the License for the specific language governing permissions and 12 # limitations under the License. 13 14 from ibis.impala.client import (ImpalaConnection, # noqa 15 ImpalaClient, 16 ImpalaDatabase, 17 ImpalaTable) 18 from ibis.impala.udf import * # noqa 19 from ibis.impala.madlib import MADLibAPI # noqa 20 from ibis.config import options 21 import ibis.common as com 22 23 24 def compile(expr): 25 """ 26 Force compilation of expression as though it were an expression depending 27 on Impala. Note you can also call expr.compile() 28 29 Returns 30 ------- 31 compiled : string 32 """ 33 from .compiler import to_sql 34 return to_sql(expr) 35 36 37 def verify(expr): 38 """ 39 Determine if expression can be successfully translated to execute on Impala 40 """ 41 try: 42 compile(expr) 43 return True 44 except com.TranslationError: 45 return False 46 47 48 def connect(host='localhost', port=21050, database='default', timeout=45, 49 use_ssl=False, ca_cert=None, user=None, password=None, 50 auth_mechanism='NOSASL', kerberos_service_name='impala', 51 pool_size=8, hdfs_client=None): 52 """ 53 Create an ImpalaClient for use with Ibis. 54 55 Parameters 56 ---------- 57 host : string, Host name of the impalad or HiveServer2 in Hive 58 port : int, Defaults to 21050 (Impala's HiveServer2) 59 database : string, Default database when obtaining new cursors 60 timeout : int, Connection timeout (seconds) when communicating with 61 HiveServer2 62 use_ssl : boolean, Use SSL when connecting to HiveServer2 63 ca_cert : string, Local path to 3rd party CA certificate or copy of server 64 certificate for self-signed certificates. If SSL is enabled, but this 65 argument is None, then certificate validation is skipped. 66 user : string, LDAP user to authenticate 67 password : string, LDAP password to authenticate 68 auth_mechanism : string, {'NOSASL' <- default, 'PLAIN', 'GSSAPI', 'LDAP'}. 69 Use NOSASL for non-secured Impala connections. Use PLAIN for 70 non-secured Hive clusters. Use LDAP for LDAP authenticated 71 connections. Use GSSAPI for Kerberos-secured clusters. 72 kerberos_service_name : string, Specify particular impalad service 73 principal. 74 75 Examples 76 -------- 77 >>> hdfs = ibis.hdfs_connect(**hdfs_params) 78 >>> client = ibis.impala.connect(hdfs_client=hdfs, **impala_params) 79 80 Returns 81 ------- 82 con : ImpalaClient 83 """ 84 params = { 85 'host': host, 86 'port': port, 87 'database': database, 88 'timeout': timeout, 89 'use_ssl': use_ssl, 90 'ca_cert': ca_cert, 91 'user': user, 92 'password': password, 93 'auth_mechanism': auth_mechanism, 94 'kerberos_service_name': kerberos_service_name 95 } 96 97 con = ImpalaConnection(pool_size=pool_size, **params) 98 client = ImpalaClient(con, hdfs_client=hdfs_client) 99 100 if options.default_backend is None: 101 options.default_backend = client 102 103 return client 104 [end of ibis/impala/api.py] [start of ibis/__init__.py] 1 # Copyright 2014 Cloudera Inc. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 16 # flake8: noqa 17 18 __version__ = '0.5.0' 19 20 from ibis.filesystems import HDFS, WebHDFS 21 from ibis.common import IbisError 22 23 import ibis.expr.api as api 24 import ibis.expr.types as ir 25 26 # __all__ is defined 27 from ibis.expr.api import * 28 29 import ibis.impala.api as impala 30 import ibis.sql.sqlite.api as sqlite 31 32 import ibis.config_init 33 from ibis.config import options 34 import ibis.util as util 35 36 37 # Deprecated 38 impala_connect = util.deprecate(impala.connect, 39 'impala_connect is deprecated, use' 40 ' ibis.impala.connect instead') 41 42 43 def make_client(db, hdfs_client=None): 44 """ 45 Create an Ibis client from a database connection and optional additional 46 connections (like HDFS) 47 48 Parameters 49 ---------- 50 db : Connection 51 e.g. produced by ibis.impala.connect 52 hdfs_client : ibis HDFS client 53 54 Examples 55 -------- 56 >>> con = ibis.impala.connect(**impala_params) 57 >>> hdfs = ibis.hdfs_connect(**hdfs_params) 58 >>> client = ibis.make_client(con, hdfs_client=hdfs) 59 60 Returns 61 ------- 62 client : IbisClient 63 """ 64 db._hdfs = hdfs_client 65 return db 66 67 make_client = util.deprecate( 68 make_client, ('make_client is deprecated. ' 69 'Use ibis.impala.connect ' 70 ' with hdfs_client=hdfs_client')) 71 72 73 def hdfs_connect(host='localhost', port=50070, protocol='webhdfs', 74 auth_mechanism='NOSASL', verify=True, **kwds): 75 """ 76 Connect to HDFS 77 78 Parameters 79 ---------- 80 host : string, Host name of the HDFS NameNode 81 port : int, NameNode's WebHDFS port (default 50070) 82 protocol : {'webhdfs'} 83 auth_mechanism : string, Set to NOSASL or PLAIN for non-secure clusters. 84 Set to GSSAPI or LDAP for Kerberos-secured clusters. 85 verify : boolean, Set to False to turn off verifying SSL certificates. 86 (default True) 87 88 Other keywords are forwarded to hdfs library classes 89 90 Returns 91 ------- 92 client : WebHDFS 93 """ 94 import requests 95 session = kwds.setdefault('session', requests.Session()) 96 session.verify = verify 97 if auth_mechanism in ['GSSAPI', 'LDAP']: 98 try: 99 import requests_kerberos 100 except ImportError: 101 raise IbisError( 102 "Unable to import requests-kerberos, which is required for " 103 "Kerberos HDFS support. Install it by executing `pip install " 104 "requests-kerberos` or `pip install hdfs[kerberos]`.") 105 from hdfs.ext.kerberos import KerberosClient 106 url = 'https://{0}:{1}'.format(host, port) # note SSL 107 kwds.setdefault('mutual_auth', 'OPTIONAL') 108 hdfs_client = KerberosClient(url, **kwds) 109 else: 110 from hdfs.client import InsecureClient 111 url = 'http://{0}:{1}'.format(host, port) 112 hdfs_client = InsecureClient(url, **kwds) 113 return WebHDFS(hdfs_client) 114 115 def test(impala=False): 116 import pytest 117 import ibis 118 import os 119 120 ibis_dir, _ = os.path.split(ibis.__file__) 121 122 args = ['--pyargs', ibis_dir] 123 if impala: 124 args.append('--impala') 125 pytest.main(args) 126 127 from ._version import get_versions 128 __version__ = get_versions()['version'] 129 del get_versions 130 [end of ibis/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ibis/__init__.py b/ibis/__init__.py --- a/ibis/__init__.py +++ b/ibis/__init__.py @@ -71,7 +71,8 @@ def hdfs_connect(host='localhost', port=50070, protocol='webhdfs', - auth_mechanism='NOSASL', verify=True, **kwds): + use_https='default', auth_mechanism='NOSASL', + verify=True, **kwds): """ Connect to HDFS @@ -80,6 +81,9 @@ host : string, Host name of the HDFS NameNode port : int, NameNode's WebHDFS port (default 50070) protocol : {'webhdfs'} + use_https : boolean, default 'default' + Connect to WebHDFS with HTTPS, otherwise plain HTTP. For secure + authentication, the default for this is True, otherwise False auth_mechanism : string, Set to NOSASL or PLAIN for non-secure clusters. Set to GSSAPI or LDAP for Kerberos-secured clusters. verify : boolean, Set to False to turn off verifying SSL certificates. @@ -95,6 +99,10 @@ session = kwds.setdefault('session', requests.Session()) session.verify = verify if auth_mechanism in ['GSSAPI', 'LDAP']: + if use_https == 'default': + prefix = 'https' + else: + prefix = 'https' if use_https else 'http' try: import requests_kerberos except ImportError: @@ -103,12 +111,17 @@ "Kerberos HDFS support. Install it by executing `pip install " "requests-kerberos` or `pip install hdfs[kerberos]`.") from hdfs.ext.kerberos import KerberosClient - url = 'https://{0}:{1}'.format(host, port) # note SSL + # note SSL + url = '{0}://{1}:{2}'.format(prefix, host, port) kwds.setdefault('mutual_auth', 'OPTIONAL') hdfs_client = KerberosClient(url, **kwds) else: + if use_https == 'default': + prefix = 'http' + else: + prefix = 'https' if use_https else 'http' from hdfs.client import InsecureClient - url = 'http://{0}:{1}'.format(host, port) + url = '{0}://{1}:{2}'.format(prefix, host, port) hdfs_client = InsecureClient(url, **kwds) return WebHDFS(hdfs_client) diff --git a/ibis/impala/api.py b/ibis/impala/api.py --- a/ibis/impala/api.py +++ b/ibis/impala/api.py @@ -46,9 +46,9 @@ def connect(host='localhost', port=21050, database='default', timeout=45, - use_ssl=False, ca_cert=None, user=None, password=None, - auth_mechanism='NOSASL', kerberos_service_name='impala', - pool_size=8, hdfs_client=None): + use_ssl=False, ca_cert=None, user=None, + password=None, auth_mechanism='NOSASL', + kerberos_service_name='impala', pool_size=8, hdfs_client=None): """ Create an ImpalaClient for use with Ibis.
{"golden_diff": "diff --git a/ibis/__init__.py b/ibis/__init__.py\n--- a/ibis/__init__.py\n+++ b/ibis/__init__.py\n@@ -71,7 +71,8 @@\n \n \n def hdfs_connect(host='localhost', port=50070, protocol='webhdfs',\n- auth_mechanism='NOSASL', verify=True, **kwds):\n+ use_https='default', auth_mechanism='NOSASL',\n+ verify=True, **kwds):\n \"\"\"\n Connect to HDFS\n \n@@ -80,6 +81,9 @@\n host : string, Host name of the HDFS NameNode\n port : int, NameNode's WebHDFS port (default 50070)\n protocol : {'webhdfs'}\n+ use_https : boolean, default 'default'\n+ Connect to WebHDFS with HTTPS, otherwise plain HTTP. For secure\n+ authentication, the default for this is True, otherwise False\n auth_mechanism : string, Set to NOSASL or PLAIN for non-secure clusters.\n Set to GSSAPI or LDAP for Kerberos-secured clusters.\n verify : boolean, Set to False to turn off verifying SSL certificates.\n@@ -95,6 +99,10 @@\n session = kwds.setdefault('session', requests.Session())\n session.verify = verify\n if auth_mechanism in ['GSSAPI', 'LDAP']:\n+ if use_https == 'default':\n+ prefix = 'https'\n+ else:\n+ prefix = 'https' if use_https else 'http'\n try:\n import requests_kerberos\n except ImportError:\n@@ -103,12 +111,17 @@\n \"Kerberos HDFS support. Install it by executing `pip install \"\n \"requests-kerberos` or `pip install hdfs[kerberos]`.\")\n from hdfs.ext.kerberos import KerberosClient\n- url = 'https://{0}:{1}'.format(host, port) # note SSL\n+ # note SSL\n+ url = '{0}://{1}:{2}'.format(prefix, host, port)\n kwds.setdefault('mutual_auth', 'OPTIONAL')\n hdfs_client = KerberosClient(url, **kwds)\n else:\n+ if use_https == 'default':\n+ prefix = 'http'\n+ else:\n+ prefix = 'https' if use_https else 'http'\n from hdfs.client import InsecureClient\n- url = 'http://{0}:{1}'.format(host, port)\n+ url = '{0}://{1}:{2}'.format(prefix, host, port)\n hdfs_client = InsecureClient(url, **kwds)\n return WebHDFS(hdfs_client)\n \ndiff --git a/ibis/impala/api.py b/ibis/impala/api.py\n--- a/ibis/impala/api.py\n+++ b/ibis/impala/api.py\n@@ -46,9 +46,9 @@\n \n \n def connect(host='localhost', port=21050, database='default', timeout=45,\n- use_ssl=False, ca_cert=None, user=None, password=None,\n- auth_mechanism='NOSASL', kerberos_service_name='impala',\n- pool_size=8, hdfs_client=None):\n+ use_ssl=False, ca_cert=None, user=None,\n+ password=None, auth_mechanism='NOSASL',\n+ kerberos_service_name='impala', pool_size=8, hdfs_client=None):\n \"\"\"\n Create an ImpalaClient for use with Ibis.\n", "issue": "Using http for webhdfs not possible when kerberos is enabled \nThe hdfs_connect function has no option to choose http or https protocol when connecting to webhdfs.\nWe are running CDH 5.5 with kerberos but without ssl enabled for webhdfs.\n\nCould you maybe add an option to signal the use of http when using kerberos?\n\ncode snippet.\n\nthe hdfs_connect function code in ibis/ibis/**init**.py always uses https when auth_mechanism in ['GSSAPI', 'LDAP']\n\nexample:\n\ndef hdfs_connect(host='localhost', port=50070, protocol='webhdfs',\n auth_mechanism='NOSASL', verify=True, use_https=True, **kwds):\n\nif auth_mechanism in ['GSSAPI', 'LDAP']:\n try:\n import requests_kerberos\n except ImportError:\n raise IbisError(\n \"Unable to import requests-kerberos, which is required for \"\n \"Kerberos HDFS support. Install it by executing `pip install \"\n \"requests-kerberos` or `pip install hdfs[kerberos]`.\")\n from hdfs.ext.kerberos import KerberosClient\n if use_https:\n url = 'https://{0}:{1}'.format(host, port) # use SSL\n else:\n url = 'http://{0}:{1}'.format(host, port) # no SSL\n kwds.setdefault('mutual_auth', 'OPTIONAL')\n hdfs_client = KerberosClient(url, *_kwds)\n else:\n from hdfs.client import InsecureClient\n url = 'http://{0}:{1}'.format(host, port)\n hdfs_client = InsecureClient(url, *_kwds)\n\n", "before_files": [{"content": "# Copyright 2015 Cloudera Inc\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom ibis.impala.client import (ImpalaConnection, # noqa\n ImpalaClient,\n ImpalaDatabase,\n ImpalaTable)\nfrom ibis.impala.udf import * # noqa\nfrom ibis.impala.madlib import MADLibAPI # noqa\nfrom ibis.config import options\nimport ibis.common as com\n\n\ndef compile(expr):\n \"\"\"\n Force compilation of expression as though it were an expression depending\n on Impala. Note you can also call expr.compile()\n\n Returns\n -------\n compiled : string\n \"\"\"\n from .compiler import to_sql\n return to_sql(expr)\n\n\ndef verify(expr):\n \"\"\"\n Determine if expression can be successfully translated to execute on Impala\n \"\"\"\n try:\n compile(expr)\n return True\n except com.TranslationError:\n return False\n\n\ndef connect(host='localhost', port=21050, database='default', timeout=45,\n use_ssl=False, ca_cert=None, user=None, password=None,\n auth_mechanism='NOSASL', kerberos_service_name='impala',\n pool_size=8, hdfs_client=None):\n \"\"\"\n Create an ImpalaClient for use with Ibis.\n\n Parameters\n ----------\n host : string, Host name of the impalad or HiveServer2 in Hive\n port : int, Defaults to 21050 (Impala's HiveServer2)\n database : string, Default database when obtaining new cursors\n timeout : int, Connection timeout (seconds) when communicating with\n HiveServer2\n use_ssl : boolean, Use SSL when connecting to HiveServer2\n ca_cert : string, Local path to 3rd party CA certificate or copy of server\n certificate for self-signed certificates. If SSL is enabled, but this\n argument is None, then certificate validation is skipped.\n user : string, LDAP user to authenticate\n password : string, LDAP password to authenticate\n auth_mechanism : string, {'NOSASL' <- default, 'PLAIN', 'GSSAPI', 'LDAP'}.\n Use NOSASL for non-secured Impala connections. Use PLAIN for\n non-secured Hive clusters. Use LDAP for LDAP authenticated\n connections. Use GSSAPI for Kerberos-secured clusters.\n kerberos_service_name : string, Specify particular impalad service\n principal.\n\n Examples\n --------\n >>> hdfs = ibis.hdfs_connect(**hdfs_params)\n >>> client = ibis.impala.connect(hdfs_client=hdfs, **impala_params)\n\n Returns\n -------\n con : ImpalaClient\n \"\"\"\n params = {\n 'host': host,\n 'port': port,\n 'database': database,\n 'timeout': timeout,\n 'use_ssl': use_ssl,\n 'ca_cert': ca_cert,\n 'user': user,\n 'password': password,\n 'auth_mechanism': auth_mechanism,\n 'kerberos_service_name': kerberos_service_name\n }\n\n con = ImpalaConnection(pool_size=pool_size, **params)\n client = ImpalaClient(con, hdfs_client=hdfs_client)\n\n if options.default_backend is None:\n options.default_backend = client\n\n return client\n", "path": "ibis/impala/api.py"}, {"content": "# Copyright 2014 Cloudera Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n# flake8: noqa\n\n__version__ = '0.5.0'\n\nfrom ibis.filesystems import HDFS, WebHDFS\nfrom ibis.common import IbisError\n\nimport ibis.expr.api as api\nimport ibis.expr.types as ir\n\n# __all__ is defined\nfrom ibis.expr.api import *\n\nimport ibis.impala.api as impala\nimport ibis.sql.sqlite.api as sqlite\n\nimport ibis.config_init\nfrom ibis.config import options\nimport ibis.util as util\n\n\n# Deprecated\nimpala_connect = util.deprecate(impala.connect,\n 'impala_connect is deprecated, use'\n ' ibis.impala.connect instead')\n\n\ndef make_client(db, hdfs_client=None):\n \"\"\"\n Create an Ibis client from a database connection and optional additional\n connections (like HDFS)\n\n Parameters\n ----------\n db : Connection\n e.g. produced by ibis.impala.connect\n hdfs_client : ibis HDFS client\n\n Examples\n --------\n >>> con = ibis.impala.connect(**impala_params)\n >>> hdfs = ibis.hdfs_connect(**hdfs_params)\n >>> client = ibis.make_client(con, hdfs_client=hdfs)\n\n Returns\n -------\n client : IbisClient\n \"\"\"\n db._hdfs = hdfs_client\n return db\n\nmake_client = util.deprecate(\n make_client, ('make_client is deprecated. '\n 'Use ibis.impala.connect '\n ' with hdfs_client=hdfs_client'))\n\n\ndef hdfs_connect(host='localhost', port=50070, protocol='webhdfs',\n auth_mechanism='NOSASL', verify=True, **kwds):\n \"\"\"\n Connect to HDFS\n\n Parameters\n ----------\n host : string, Host name of the HDFS NameNode\n port : int, NameNode's WebHDFS port (default 50070)\n protocol : {'webhdfs'}\n auth_mechanism : string, Set to NOSASL or PLAIN for non-secure clusters.\n Set to GSSAPI or LDAP for Kerberos-secured clusters.\n verify : boolean, Set to False to turn off verifying SSL certificates.\n (default True)\n\n Other keywords are forwarded to hdfs library classes\n\n Returns\n -------\n client : WebHDFS\n \"\"\"\n import requests\n session = kwds.setdefault('session', requests.Session())\n session.verify = verify\n if auth_mechanism in ['GSSAPI', 'LDAP']:\n try:\n import requests_kerberos\n except ImportError:\n raise IbisError(\n \"Unable to import requests-kerberos, which is required for \"\n \"Kerberos HDFS support. Install it by executing `pip install \"\n \"requests-kerberos` or `pip install hdfs[kerberos]`.\")\n from hdfs.ext.kerberos import KerberosClient\n url = 'https://{0}:{1}'.format(host, port) # note SSL\n kwds.setdefault('mutual_auth', 'OPTIONAL')\n hdfs_client = KerberosClient(url, **kwds)\n else:\n from hdfs.client import InsecureClient\n url = 'http://{0}:{1}'.format(host, port)\n hdfs_client = InsecureClient(url, **kwds)\n return WebHDFS(hdfs_client)\n\ndef test(impala=False):\n import pytest\n import ibis\n import os\n\n ibis_dir, _ = os.path.split(ibis.__file__)\n\n args = ['--pyargs', ibis_dir]\n if impala:\n args.append('--impala')\n pytest.main(args)\n\nfrom ._version import get_versions\n__version__ = get_versions()['version']\ndel get_versions\n", "path": "ibis/__init__.py"}]}
3,217
805
gh_patches_debug_32245
rasdani/github-patches
git_diff
AlexsLemonade__refinebio-2216
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Engagement bot summary doesn't seem to be working ### Context https://github.com/AlexsLemonade/refinebio/issues/2140 ### Problem or idea Tested today and reported no downloads in the last 7 days. > **EngagementBot** > There were no downloads in the last 7 days. However there were a few that should have been counted: ``` data_refinery=> select id, created_at from data_refinery_common_dataset where is_processed='t' and email_address is not NULL order by created_at desc limit 10; id | created_at --------------------------------------+------------------------------- 381e86b9-aea2-4d9a-8a61-338b111fe54d | 2020-03-29 13:06:28.954778+00 eb726da7-8006-46c4-95ba-8a3bcecbd6f1 | 2020-03-26 02:26:35.196333+00 099e7b58-dce6-4d5c-a4b6-f4d049dad866 | 2020-03-25 16:19:14.656207+00 c4c086ef-6963-4fd8-b4e2-a7cf09c9eb68 | 2020-03-25 16:18:03.158765+00 0716857b-211f-484c-9e2b-3e447dd5cdaf | 2020-03-25 15:34:20.155269+00 2e7c1e1c-138b-43b1-9463-0113c64e7edb | 2020-03-25 15:19:23.577922+00 4a2ac1ef-4e9e-4945-bdf8-00d595f99c4c | 2020-03-25 15:18:21.49652+00 bbb30f4e-7b5f-4595-a47d-25f8d72e4a7c | 2020-03-25 15:03:55.391917+00 eff9cbe4-c4f5-43a6-b004-6cd36f10ea15 | 2020-03-24 22:41:51.185922+00 ``` ### Solution or next step Investigate why these downloads are not being reported. </issue> <code> [start of api/data_refinery_api/management/commands/post_downloads_summary.py] 1 import datetime 2 3 from django.conf import settings 4 from django.core.management.base import BaseCommand 5 from django.utils import timezone 6 7 import requests 8 9 from data_refinery_common.models import DatasetAnnotation 10 11 12 class Command(BaseCommand): 13 help = "Post downloads summary to slack" 14 15 def add_arguments(self, parser): 16 parser.add_argument( 17 "--days", 18 type=int, 19 default=7, # default to a week 20 help=("Number of days in the past for which to build the stats"), 21 ) 22 parser.add_argument( 23 "--channel", 24 type=str, 25 default="ccdl-general", 26 help=("Optional parameter to choose the channel where the message will be posted."), 27 ) 28 29 def handle(self, *args, **options): 30 days = options["days"] 31 start_time = timezone.now() - datetime.timedelta(days=-days) 32 33 annotation_queryset = DatasetAnnotation.objects.filter( 34 created_at__gt=start_time 35 ).prefetch_related("dataset") 36 annotations = [ 37 annotation 38 for annotation in annotation_queryset 39 if annotation.data["start"] and should_display_email(annotation.dataset.email_address) 40 ] 41 42 unique_users = list(set(annotation.dataset.email_address for annotation in annotations)) 43 unique_ips = list(set(annotation.data["ip"] for annotation in annotations)) 44 45 if unique_users: 46 fallback_text = "In the last {0} days, {1} users downloaded datasets from {2} locations.".format( 47 days, len(unique_users), len(unique_ips) 48 ) 49 else: 50 fallback_text = "There were no downloads in the last {0} days.".format(days) 51 52 new_users = "" 53 returning_users = "" 54 for email in unique_users: 55 user_annotations = annotation_queryset.filter(dataset__email_address=email) 56 total_downloads = user_annotations.count() 57 unique_locations = list(set(annotation.data["ip"] for annotation in user_annotations)) 58 locations = ", ".join(get_ip_location(ip) for ip in unique_locations) 59 is_new_user = DatasetAnnotation.objects.filter( 60 created_at__lt=start_time, dataset__email_address=email 61 ) 62 text = "{0} | {1} downloads from {2}\n".format(email, total_downloads, locations) 63 if is_new_user: 64 new_users += text 65 else: 66 returning_users += text 67 68 blocks = [ 69 { 70 "type": "section", 71 "text": {"type": "plain_text", "emoji": True, "text": fallback_text}, 72 } 73 ] 74 if new_users: 75 blocks.append( 76 { 77 "type": "section", 78 "text": {"type": "mrkdwn", "text": "*New users* \n" + new_users,}, 79 } 80 ) 81 if returning_users: 82 blocks.append( 83 { 84 "type": "section", 85 "text": {"type": "mrkdwn", "text": "*Returning users* \n" + returning_users,}, 86 } 87 ) 88 89 # Post to slack 90 requests.post( 91 settings.ENGAGEMENTBOT_WEBHOOK, 92 json={ 93 "username": "EngagementBot", 94 "icon_emoji": ":halal:", 95 "channel": "#" + options["channel"], 96 "text": fallback_text, 97 "blocks": blocks, 98 }, 99 headers={"Content-Type": "application/json"}, 100 timeout=10, 101 ) 102 103 104 def should_display_email(email: str) -> bool: 105 """ Returns true if the given email is not associated with the CCDL suers """ 106 if not email: 107 return False 108 109 return not ( 110 email.startswith("cansav09") 111 or email.startswith("arielsvn") 112 or email.startswith("jaclyn.n.taroni") 113 or email.startswith("kurt.wheeler") 114 or email.startswith("greenescientist") 115 or "@alexslemonade.org" not in email 116 or email.startswith("miserlou") 117 or email.startswith("d.prasad") 118 or email is ("[email protected]") 119 or email is ("[email protected]") 120 ) 121 122 123 def get_ip_location(remote_ip): 124 try: 125 city = requests.get("https://ipapi.co/" + remote_ip + "/json/", timeout=10).json()["city"] 126 except Exception: 127 city = remote_ip 128 return city 129 [end of api/data_refinery_api/management/commands/post_downloads_summary.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/api/data_refinery_api/management/commands/post_downloads_summary.py b/api/data_refinery_api/management/commands/post_downloads_summary.py --- a/api/data_refinery_api/management/commands/post_downloads_summary.py +++ b/api/data_refinery_api/management/commands/post_downloads_summary.py @@ -28,7 +28,7 @@ def handle(self, *args, **options): days = options["days"] - start_time = timezone.now() - datetime.timedelta(days=-days) + start_time = timezone.now() - datetime.timedelta(days=days) annotation_queryset = DatasetAnnotation.objects.filter( created_at__gt=start_time @@ -105,16 +105,15 @@ """ Returns true if the given email is not associated with the CCDL suers """ if not email: return False - return not ( email.startswith("cansav09") or email.startswith("arielsvn") or email.startswith("jaclyn.n.taroni") or email.startswith("kurt.wheeler") or email.startswith("greenescientist") - or "@alexslemonade.org" not in email or email.startswith("miserlou") or email.startswith("d.prasad") + or email.endswith("@alexslemonade.org") or email is ("[email protected]") or email is ("[email protected]") ) @@ -122,7 +121,7 @@ def get_ip_location(remote_ip): try: - city = requests.get("https://ipapi.co/" + remote_ip + "/json/", timeout=10).json()["city"] + data = requests.get("https://ipapi.co/" + remote_ip + "/json/", timeout=10).json() + return "{0}, {1}".format(data["city"], data["country_name"]) except Exception: - city = remote_ip - return city + return remote_ip
{"golden_diff": "diff --git a/api/data_refinery_api/management/commands/post_downloads_summary.py b/api/data_refinery_api/management/commands/post_downloads_summary.py\n--- a/api/data_refinery_api/management/commands/post_downloads_summary.py\n+++ b/api/data_refinery_api/management/commands/post_downloads_summary.py\n@@ -28,7 +28,7 @@\n \n def handle(self, *args, **options):\n days = options[\"days\"]\n- start_time = timezone.now() - datetime.timedelta(days=-days)\n+ start_time = timezone.now() - datetime.timedelta(days=days)\n \n annotation_queryset = DatasetAnnotation.objects.filter(\n created_at__gt=start_time\n@@ -105,16 +105,15 @@\n \"\"\" Returns true if the given email is not associated with the CCDL suers \"\"\"\n if not email:\n return False\n-\n return not (\n email.startswith(\"cansav09\")\n or email.startswith(\"arielsvn\")\n or email.startswith(\"jaclyn.n.taroni\")\n or email.startswith(\"kurt.wheeler\")\n or email.startswith(\"greenescientist\")\n- or \"@alexslemonade.org\" not in email\n or email.startswith(\"miserlou\")\n or email.startswith(\"d.prasad\")\n+ or email.endswith(\"@alexslemonade.org\")\n or email is (\"[email protected]\")\n or email is (\"[email protected]\")\n )\n@@ -122,7 +121,7 @@\n \n def get_ip_location(remote_ip):\n try:\n- city = requests.get(\"https://ipapi.co/\" + remote_ip + \"/json/\", timeout=10).json()[\"city\"]\n+ data = requests.get(\"https://ipapi.co/\" + remote_ip + \"/json/\", timeout=10).json()\n+ return \"{0}, {1}\".format(data[\"city\"], data[\"country_name\"])\n except Exception:\n- city = remote_ip\n- return city\n+ return remote_ip\n", "issue": "Engagement bot summary doesn't seem to be working\n### Context\r\n\r\nhttps://github.com/AlexsLemonade/refinebio/issues/2140\r\n\r\n### Problem or idea\r\n\r\nTested today and reported no downloads in the last 7 days.\r\n\r\n> **EngagementBot**\r\n> There were no downloads in the last 7 days.\r\n\r\nHowever there were a few that should have been counted:\r\n\r\n```\r\ndata_refinery=> select id, created_at from data_refinery_common_dataset where is_processed='t' and email_address is not NULL order by created_at desc limit 10;\r\n id | created_at \r\n--------------------------------------+-------------------------------\r\n 381e86b9-aea2-4d9a-8a61-338b111fe54d | 2020-03-29 13:06:28.954778+00\r\n eb726da7-8006-46c4-95ba-8a3bcecbd6f1 | 2020-03-26 02:26:35.196333+00\r\n 099e7b58-dce6-4d5c-a4b6-f4d049dad866 | 2020-03-25 16:19:14.656207+00\r\n c4c086ef-6963-4fd8-b4e2-a7cf09c9eb68 | 2020-03-25 16:18:03.158765+00\r\n 0716857b-211f-484c-9e2b-3e447dd5cdaf | 2020-03-25 15:34:20.155269+00\r\n 2e7c1e1c-138b-43b1-9463-0113c64e7edb | 2020-03-25 15:19:23.577922+00\r\n 4a2ac1ef-4e9e-4945-bdf8-00d595f99c4c | 2020-03-25 15:18:21.49652+00\r\n bbb30f4e-7b5f-4595-a47d-25f8d72e4a7c | 2020-03-25 15:03:55.391917+00\r\n eff9cbe4-c4f5-43a6-b004-6cd36f10ea15 | 2020-03-24 22:41:51.185922+00\r\n```\r\n\r\n### Solution or next step\r\n\r\nInvestigate why these downloads are not being reported.\r\n\n", "before_files": [{"content": "import datetime\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\nfrom django.utils import timezone\n\nimport requests\n\nfrom data_refinery_common.models import DatasetAnnotation\n\n\nclass Command(BaseCommand):\n help = \"Post downloads summary to slack\"\n\n def add_arguments(self, parser):\n parser.add_argument(\n \"--days\",\n type=int,\n default=7, # default to a week\n help=(\"Number of days in the past for which to build the stats\"),\n )\n parser.add_argument(\n \"--channel\",\n type=str,\n default=\"ccdl-general\",\n help=(\"Optional parameter to choose the channel where the message will be posted.\"),\n )\n\n def handle(self, *args, **options):\n days = options[\"days\"]\n start_time = timezone.now() - datetime.timedelta(days=-days)\n\n annotation_queryset = DatasetAnnotation.objects.filter(\n created_at__gt=start_time\n ).prefetch_related(\"dataset\")\n annotations = [\n annotation\n for annotation in annotation_queryset\n if annotation.data[\"start\"] and should_display_email(annotation.dataset.email_address)\n ]\n\n unique_users = list(set(annotation.dataset.email_address for annotation in annotations))\n unique_ips = list(set(annotation.data[\"ip\"] for annotation in annotations))\n\n if unique_users:\n fallback_text = \"In the last {0} days, {1} users downloaded datasets from {2} locations.\".format(\n days, len(unique_users), len(unique_ips)\n )\n else:\n fallback_text = \"There were no downloads in the last {0} days.\".format(days)\n\n new_users = \"\"\n returning_users = \"\"\n for email in unique_users:\n user_annotations = annotation_queryset.filter(dataset__email_address=email)\n total_downloads = user_annotations.count()\n unique_locations = list(set(annotation.data[\"ip\"] for annotation in user_annotations))\n locations = \", \".join(get_ip_location(ip) for ip in unique_locations)\n is_new_user = DatasetAnnotation.objects.filter(\n created_at__lt=start_time, dataset__email_address=email\n )\n text = \"{0} | {1} downloads from {2}\\n\".format(email, total_downloads, locations)\n if is_new_user:\n new_users += text\n else:\n returning_users += text\n\n blocks = [\n {\n \"type\": \"section\",\n \"text\": {\"type\": \"plain_text\", \"emoji\": True, \"text\": fallback_text},\n }\n ]\n if new_users:\n blocks.append(\n {\n \"type\": \"section\",\n \"text\": {\"type\": \"mrkdwn\", \"text\": \"*New users* \\n\" + new_users,},\n }\n )\n if returning_users:\n blocks.append(\n {\n \"type\": \"section\",\n \"text\": {\"type\": \"mrkdwn\", \"text\": \"*Returning users* \\n\" + returning_users,},\n }\n )\n\n # Post to slack\n requests.post(\n settings.ENGAGEMENTBOT_WEBHOOK,\n json={\n \"username\": \"EngagementBot\",\n \"icon_emoji\": \":halal:\",\n \"channel\": \"#\" + options[\"channel\"],\n \"text\": fallback_text,\n \"blocks\": blocks,\n },\n headers={\"Content-Type\": \"application/json\"},\n timeout=10,\n )\n\n\ndef should_display_email(email: str) -> bool:\n \"\"\" Returns true if the given email is not associated with the CCDL suers \"\"\"\n if not email:\n return False\n\n return not (\n email.startswith(\"cansav09\")\n or email.startswith(\"arielsvn\")\n or email.startswith(\"jaclyn.n.taroni\")\n or email.startswith(\"kurt.wheeler\")\n or email.startswith(\"greenescientist\")\n or \"@alexslemonade.org\" not in email\n or email.startswith(\"miserlou\")\n or email.startswith(\"d.prasad\")\n or email is (\"[email protected]\")\n or email is (\"[email protected]\")\n )\n\n\ndef get_ip_location(remote_ip):\n try:\n city = requests.get(\"https://ipapi.co/\" + remote_ip + \"/json/\", timeout=10).json()[\"city\"]\n except Exception:\n city = remote_ip\n return city\n", "path": "api/data_refinery_api/management/commands/post_downloads_summary.py"}]}
2,482
445
gh_patches_debug_29866
rasdani/github-patches
git_diff
vllm-project__vllm-829
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Issue Building vllm Wheel: Unsupported GPU Architecture 'compute_89' I'm trying to build the vllm package from source (pip install vllm), but I'm encountering an error related to CUDA and GPU architecture. I am using RTX 4090. Here are the crucial parts of the log: ``` Building wheels for collected packages: vllm Building wheel for vllm (pyproject.toml) ... error error: subprocess-exited-with-error ... building 'vllm.cache_ops' extension Emitting ninja build file ... ... /usr/local/cuda/bin/nvcc -I/tmp/pip-build-env-... (other flags and paths) nvcc fatal : Unsupported gpu architecture 'compute_89' ``` Here is my compute cluster: --- 6790722 Age:15 min.[Remaining](https://vast.ai/faq#Instances):1d 19h71.69.80.64:44536-44750North Carolina, US1x RTX 409081.8 TFLOPS m:9663host:39244verified0.3/24.6 GB 3506.9 GB/s ROME2D16 PCIE 4.0, 8x12.5 GB/s AMD EPYC 7662 64-Core Processor 25.6/256 cpu 4/52 GB nvme 4972 MB/s6.9/21.0 GB 516.4 Mbps514.0 MbpsMax CUDA: 12.2 GPU: 0% 35C , CPU: 4% Status: success, running pytorch/pytorch_2.0.1-cuda11.7-cudnn8-devel/jupyter $0.263/hr Current State : running Next State : running Docker Image: pytorch/pytorch:2.0.1-cuda11.7-cudnn8-devel Image Runtype : jupyter_direc ssh_direc ssh_proxy Environment : { "JUPYTER_DIR": "/" } Docker Args: Onstart: env | grep _ >> /etc/environment; echo 'starting up' --- It seems that the nvcc compiler doesn't support the 'compute_89' GPU architecture. I'm using CUDA version 11.7 on a RTX 4090. Has anyone encountered this issue before? Any suggestions on how to resolve it? </issue> <code> [start of setup.py] 1 import io 2 import os 3 import re 4 import subprocess 5 from typing import List, Set 6 7 from packaging.version import parse, Version 8 import setuptools 9 import torch 10 from torch.utils.cpp_extension import BuildExtension, CUDAExtension, CUDA_HOME 11 12 ROOT_DIR = os.path.dirname(__file__) 13 14 # Compiler flags. 15 CXX_FLAGS = ["-g", "-O2", "-std=c++17"] 16 # TODO(woosuk): Should we use -O3? 17 NVCC_FLAGS = ["-O2", "-std=c++17"] 18 19 ABI = 1 if torch._C._GLIBCXX_USE_CXX11_ABI else 0 20 CXX_FLAGS += [f"-D_GLIBCXX_USE_CXX11_ABI={ABI}"] 21 NVCC_FLAGS += [f"-D_GLIBCXX_USE_CXX11_ABI={ABI}"] 22 23 if CUDA_HOME is None: 24 raise RuntimeError( 25 f"Cannot find CUDA_HOME. CUDA must be available in order to build the package.") 26 27 28 def get_nvcc_cuda_version(cuda_dir: str) -> Version: 29 """Get the CUDA version from nvcc. 30 31 Adapted from https://github.com/NVIDIA/apex/blob/8b7a1ff183741dd8f9b87e7bafd04cfde99cea28/setup.py 32 """ 33 nvcc_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], 34 universal_newlines=True) 35 output = nvcc_output.split() 36 release_idx = output.index("release") + 1 37 nvcc_cuda_version = parse(output[release_idx].split(",")[0]) 38 return nvcc_cuda_version 39 40 41 # Collect the compute capabilities of all available GPUs. 42 device_count = torch.cuda.device_count() 43 compute_capabilities: Set[int] = set() 44 for i in range(device_count): 45 major, minor = torch.cuda.get_device_capability(i) 46 if major < 7: 47 raise RuntimeError( 48 "GPUs with compute capability less than 7.0 are not supported.") 49 compute_capabilities.add(major * 10 + minor) 50 51 # Validate the NVCC CUDA version. 52 nvcc_cuda_version = get_nvcc_cuda_version(CUDA_HOME) 53 if nvcc_cuda_version < Version("11.0"): 54 raise RuntimeError("CUDA 11.0 or higher is required to build the package.") 55 if 86 in compute_capabilities and nvcc_cuda_version < Version("11.1"): 56 raise RuntimeError( 57 "CUDA 11.1 or higher is required for GPUs with compute capability 8.6.") 58 if 90 in compute_capabilities and nvcc_cuda_version < Version("11.8"): 59 raise RuntimeError( 60 "CUDA 11.8 or higher is required for GPUs with compute capability 9.0.") 61 62 # If no GPU is available, add all supported compute capabilities. 63 if not compute_capabilities: 64 compute_capabilities = {70, 75, 80} 65 if nvcc_cuda_version >= Version("11.1"): 66 compute_capabilities.add(86) 67 if nvcc_cuda_version >= Version("11.8"): 68 compute_capabilities.add(90) 69 70 # Add target compute capabilities to NVCC flags. 71 for capability in compute_capabilities: 72 NVCC_FLAGS += ["-gencode", f"arch=compute_{capability},code=sm_{capability}"] 73 74 # Use NVCC threads to parallelize the build. 75 if nvcc_cuda_version >= Version("11.2"): 76 num_threads = min(os.cpu_count(), 8) 77 NVCC_FLAGS += ["--threads", str(num_threads)] 78 79 ext_modules = [] 80 81 # Cache operations. 82 cache_extension = CUDAExtension( 83 name="vllm.cache_ops", 84 sources=["csrc/cache.cpp", "csrc/cache_kernels.cu"], 85 extra_compile_args={"cxx": CXX_FLAGS, "nvcc": NVCC_FLAGS}, 86 ) 87 ext_modules.append(cache_extension) 88 89 # Attention kernels. 90 attention_extension = CUDAExtension( 91 name="vllm.attention_ops", 92 sources=["csrc/attention.cpp", "csrc/attention/attention_kernels.cu"], 93 extra_compile_args={"cxx": CXX_FLAGS, "nvcc": NVCC_FLAGS}, 94 ) 95 ext_modules.append(attention_extension) 96 97 # Positional encoding kernels. 98 positional_encoding_extension = CUDAExtension( 99 name="vllm.pos_encoding_ops", 100 sources=["csrc/pos_encoding.cpp", "csrc/pos_encoding_kernels.cu"], 101 extra_compile_args={"cxx": CXX_FLAGS, "nvcc": NVCC_FLAGS}, 102 ) 103 ext_modules.append(positional_encoding_extension) 104 105 # Layer normalization kernels. 106 layernorm_extension = CUDAExtension( 107 name="vllm.layernorm_ops", 108 sources=["csrc/layernorm.cpp", "csrc/layernorm_kernels.cu"], 109 extra_compile_args={"cxx": CXX_FLAGS, "nvcc": NVCC_FLAGS}, 110 ) 111 ext_modules.append(layernorm_extension) 112 113 # Activation kernels. 114 activation_extension = CUDAExtension( 115 name="vllm.activation_ops", 116 sources=["csrc/activation.cpp", "csrc/activation_kernels.cu"], 117 extra_compile_args={"cxx": CXX_FLAGS, "nvcc": NVCC_FLAGS}, 118 ) 119 ext_modules.append(activation_extension) 120 121 122 def get_path(*filepath) -> str: 123 return os.path.join(ROOT_DIR, *filepath) 124 125 126 def find_version(filepath: str): 127 """Extract version information from the given filepath. 128 129 Adapted from https://github.com/ray-project/ray/blob/0b190ee1160eeca9796bc091e07eaebf4c85b511/python/setup.py 130 """ 131 with open(filepath) as fp: 132 version_match = re.search( 133 r"^__version__ = ['\"]([^'\"]*)['\"]", fp.read(), re.M) 134 if version_match: 135 return version_match.group(1) 136 raise RuntimeError("Unable to find version string.") 137 138 139 def read_readme() -> str: 140 """Read the README file.""" 141 return io.open(get_path("README.md"), "r", encoding="utf-8").read() 142 143 144 def get_requirements() -> List[str]: 145 """Get Python package dependencies from requirements.txt.""" 146 with open(get_path("requirements.txt")) as f: 147 requirements = f.read().strip().split("\n") 148 return requirements 149 150 151 setuptools.setup( 152 name="vllm", 153 version=find_version(get_path("vllm", "__init__.py")), 154 author="vLLM Team", 155 license="Apache 2.0", 156 description="A high-throughput and memory-efficient inference and serving engine for LLMs", 157 long_description=read_readme(), 158 long_description_content_type="text/markdown", 159 url="https://github.com/vllm-project/vllm", 160 project_urls={ 161 "Homepage": "https://github.com/vllm-project/vllm", 162 "Documentation": "https://vllm.readthedocs.io/en/latest/", 163 }, 164 classifiers=[ 165 "Programming Language :: Python :: 3.8", 166 "Programming Language :: Python :: 3.9", 167 "Programming Language :: Python :: 3.10", 168 "License :: OSI Approved :: Apache Software License", 169 "Topic :: Scientific/Engineering :: Artificial Intelligence", 170 ], 171 packages=setuptools.find_packages( 172 exclude=("assets", "benchmarks", "csrc", "docs", "examples", "tests")), 173 python_requires=">=3.8", 174 install_requires=get_requirements(), 175 ext_modules=ext_modules, 176 cmdclass={"build_ext": BuildExtension}, 177 ) 178 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -22,7 +22,7 @@ if CUDA_HOME is None: raise RuntimeError( - f"Cannot find CUDA_HOME. CUDA must be available in order to build the package.") + f"Cannot find CUDA_HOME. CUDA must be available to build the package.") def get_nvcc_cuda_version(cuda_dir: str) -> Version: @@ -55,6 +55,14 @@ if 86 in compute_capabilities and nvcc_cuda_version < Version("11.1"): raise RuntimeError( "CUDA 11.1 or higher is required for GPUs with compute capability 8.6.") +if 89 in compute_capabilities and nvcc_cuda_version < Version("11.8"): + # CUDA 11.8 is required to generate the code targeting compute capability 8.9. + # However, GPUs with compute capability 8.9 can also run the code generated by + # the previous versions of CUDA 11 and targeting compute capability 8.0. + # Therefore, if CUDA 11.8 is not available, we target compute capability 8.0 + # instead of 8.9. + compute_capabilities.remove(89) + compute_capabilities.add(80) if 90 in compute_capabilities and nvcc_cuda_version < Version("11.8"): raise RuntimeError( "CUDA 11.8 or higher is required for GPUs with compute capability 9.0.") @@ -65,6 +73,7 @@ if nvcc_cuda_version >= Version("11.1"): compute_capabilities.add(86) if nvcc_cuda_version >= Version("11.8"): + compute_capabilities.add(89) compute_capabilities.add(90) # Add target compute capabilities to NVCC flags.
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -22,7 +22,7 @@\n \n if CUDA_HOME is None:\n raise RuntimeError(\n- f\"Cannot find CUDA_HOME. CUDA must be available in order to build the package.\")\n+ f\"Cannot find CUDA_HOME. CUDA must be available to build the package.\")\n \n \n def get_nvcc_cuda_version(cuda_dir: str) -> Version:\n@@ -55,6 +55,14 @@\n if 86 in compute_capabilities and nvcc_cuda_version < Version(\"11.1\"):\n raise RuntimeError(\n \"CUDA 11.1 or higher is required for GPUs with compute capability 8.6.\")\n+if 89 in compute_capabilities and nvcc_cuda_version < Version(\"11.8\"):\n+ # CUDA 11.8 is required to generate the code targeting compute capability 8.9.\n+ # However, GPUs with compute capability 8.9 can also run the code generated by\n+ # the previous versions of CUDA 11 and targeting compute capability 8.0.\n+ # Therefore, if CUDA 11.8 is not available, we target compute capability 8.0\n+ # instead of 8.9.\n+ compute_capabilities.remove(89)\n+ compute_capabilities.add(80)\n if 90 in compute_capabilities and nvcc_cuda_version < Version(\"11.8\"):\n raise RuntimeError(\n \"CUDA 11.8 or higher is required for GPUs with compute capability 9.0.\")\n@@ -65,6 +73,7 @@\n if nvcc_cuda_version >= Version(\"11.1\"):\n compute_capabilities.add(86)\n if nvcc_cuda_version >= Version(\"11.8\"):\n+ compute_capabilities.add(89)\n compute_capabilities.add(90)\n \n # Add target compute capabilities to NVCC flags.\n", "issue": " Issue Building vllm Wheel: Unsupported GPU Architecture 'compute_89'\nI'm trying to build the vllm package from source (pip install vllm), but I'm encountering an error related to CUDA and GPU architecture. I am using RTX 4090. Here are the crucial parts of the log:\r\n\r\n\r\n```\r\nBuilding wheels for collected packages: vllm\r\nBuilding wheel for vllm (pyproject.toml) ... error\r\nerror: subprocess-exited-with-error\r\n\r\n...\r\n\r\nbuilding 'vllm.cache_ops' extension\r\nEmitting ninja build file ...\r\n\r\n...\r\n\r\n/usr/local/cuda/bin/nvcc -I/tmp/pip-build-env-... (other flags and paths)\r\nnvcc fatal : Unsupported gpu architecture 'compute_89'\r\n```\r\n\r\nHere is my compute cluster:\r\n---\r\n6790722\r\nAge:15 min.[Remaining](https://vast.ai/faq#Instances):1d 19h71.69.80.64:44536-44750North Carolina, US1x RTX 409081.8 TFLOPS\r\nm:9663host:39244verified0.3/24.6 GB\r\n3506.9 GB/s\r\nROME2D16\r\nPCIE 4.0, 8x12.5 GB/s\r\nAMD EPYC 7662 64-Core Processor\r\n25.6/256 cpu\r\n4/52 GB\r\nnvme\r\n4972 MB/s6.9/21.0 GB\r\n516.4 Mbps514.0 MbpsMax CUDA: 12.2\r\n \r\nGPU: 0% 35C , CPU: 4% Status: success, running pytorch/pytorch_2.0.1-cuda11.7-cudnn8-devel/jupyter\r\n$0.263/hr \r\nCurrent State : running Next State : running\r\nDocker Image: pytorch/pytorch:2.0.1-cuda11.7-cudnn8-devel\r\n\r\nImage Runtype : jupyter_direc ssh_direc ssh_proxy\r\n\r\nEnvironment :\r\n{\r\n \"JUPYTER_DIR\": \"/\"\r\n}\r\n\r\nDocker Args:\r\n\r\nOnstart: env | grep _ >> /etc/environment; echo 'starting up'\r\n\r\n\r\n---\r\n\r\nIt seems that the nvcc compiler doesn't support the 'compute_89' GPU architecture. I'm using CUDA version 11.7 on a RTX 4090.\r\n\r\nHas anyone encountered this issue before? Any suggestions on how to resolve it?\n", "before_files": [{"content": "import io\nimport os\nimport re\nimport subprocess\nfrom typing import List, Set\n\nfrom packaging.version import parse, Version\nimport setuptools\nimport torch\nfrom torch.utils.cpp_extension import BuildExtension, CUDAExtension, CUDA_HOME\n\nROOT_DIR = os.path.dirname(__file__)\n\n# Compiler flags.\nCXX_FLAGS = [\"-g\", \"-O2\", \"-std=c++17\"]\n# TODO(woosuk): Should we use -O3?\nNVCC_FLAGS = [\"-O2\", \"-std=c++17\"]\n\nABI = 1 if torch._C._GLIBCXX_USE_CXX11_ABI else 0\nCXX_FLAGS += [f\"-D_GLIBCXX_USE_CXX11_ABI={ABI}\"]\nNVCC_FLAGS += [f\"-D_GLIBCXX_USE_CXX11_ABI={ABI}\"]\n\nif CUDA_HOME is None:\n raise RuntimeError(\n f\"Cannot find CUDA_HOME. CUDA must be available in order to build the package.\")\n\n\ndef get_nvcc_cuda_version(cuda_dir: str) -> Version:\n \"\"\"Get the CUDA version from nvcc.\n\n Adapted from https://github.com/NVIDIA/apex/blob/8b7a1ff183741dd8f9b87e7bafd04cfde99cea28/setup.py\n \"\"\"\n nvcc_output = subprocess.check_output([cuda_dir + \"/bin/nvcc\", \"-V\"],\n universal_newlines=True)\n output = nvcc_output.split()\n release_idx = output.index(\"release\") + 1\n nvcc_cuda_version = parse(output[release_idx].split(\",\")[0])\n return nvcc_cuda_version\n\n\n# Collect the compute capabilities of all available GPUs.\ndevice_count = torch.cuda.device_count()\ncompute_capabilities: Set[int] = set()\nfor i in range(device_count):\n major, minor = torch.cuda.get_device_capability(i)\n if major < 7:\n raise RuntimeError(\n \"GPUs with compute capability less than 7.0 are not supported.\")\n compute_capabilities.add(major * 10 + minor)\n\n# Validate the NVCC CUDA version.\nnvcc_cuda_version = get_nvcc_cuda_version(CUDA_HOME)\nif nvcc_cuda_version < Version(\"11.0\"):\n raise RuntimeError(\"CUDA 11.0 or higher is required to build the package.\")\nif 86 in compute_capabilities and nvcc_cuda_version < Version(\"11.1\"):\n raise RuntimeError(\n \"CUDA 11.1 or higher is required for GPUs with compute capability 8.6.\")\nif 90 in compute_capabilities and nvcc_cuda_version < Version(\"11.8\"):\n raise RuntimeError(\n \"CUDA 11.8 or higher is required for GPUs with compute capability 9.0.\")\n\n# If no GPU is available, add all supported compute capabilities.\nif not compute_capabilities:\n compute_capabilities = {70, 75, 80}\n if nvcc_cuda_version >= Version(\"11.1\"):\n compute_capabilities.add(86)\n if nvcc_cuda_version >= Version(\"11.8\"):\n compute_capabilities.add(90)\n\n# Add target compute capabilities to NVCC flags.\nfor capability in compute_capabilities:\n NVCC_FLAGS += [\"-gencode\", f\"arch=compute_{capability},code=sm_{capability}\"]\n\n# Use NVCC threads to parallelize the build.\nif nvcc_cuda_version >= Version(\"11.2\"):\n num_threads = min(os.cpu_count(), 8)\n NVCC_FLAGS += [\"--threads\", str(num_threads)]\n\next_modules = []\n\n# Cache operations.\ncache_extension = CUDAExtension(\n name=\"vllm.cache_ops\",\n sources=[\"csrc/cache.cpp\", \"csrc/cache_kernels.cu\"],\n extra_compile_args={\"cxx\": CXX_FLAGS, \"nvcc\": NVCC_FLAGS},\n)\next_modules.append(cache_extension)\n\n# Attention kernels.\nattention_extension = CUDAExtension(\n name=\"vllm.attention_ops\",\n sources=[\"csrc/attention.cpp\", \"csrc/attention/attention_kernels.cu\"],\n extra_compile_args={\"cxx\": CXX_FLAGS, \"nvcc\": NVCC_FLAGS},\n)\next_modules.append(attention_extension)\n\n# Positional encoding kernels.\npositional_encoding_extension = CUDAExtension(\n name=\"vllm.pos_encoding_ops\",\n sources=[\"csrc/pos_encoding.cpp\", \"csrc/pos_encoding_kernels.cu\"],\n extra_compile_args={\"cxx\": CXX_FLAGS, \"nvcc\": NVCC_FLAGS},\n)\next_modules.append(positional_encoding_extension)\n\n# Layer normalization kernels.\nlayernorm_extension = CUDAExtension(\n name=\"vllm.layernorm_ops\",\n sources=[\"csrc/layernorm.cpp\", \"csrc/layernorm_kernels.cu\"],\n extra_compile_args={\"cxx\": CXX_FLAGS, \"nvcc\": NVCC_FLAGS},\n)\next_modules.append(layernorm_extension)\n\n# Activation kernels.\nactivation_extension = CUDAExtension(\n name=\"vllm.activation_ops\",\n sources=[\"csrc/activation.cpp\", \"csrc/activation_kernels.cu\"],\n extra_compile_args={\"cxx\": CXX_FLAGS, \"nvcc\": NVCC_FLAGS},\n)\next_modules.append(activation_extension)\n\n\ndef get_path(*filepath) -> str:\n return os.path.join(ROOT_DIR, *filepath)\n\n\ndef find_version(filepath: str):\n \"\"\"Extract version information from the given filepath.\n\n Adapted from https://github.com/ray-project/ray/blob/0b190ee1160eeca9796bc091e07eaebf4c85b511/python/setup.py\n \"\"\"\n with open(filepath) as fp:\n version_match = re.search(\n r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\", fp.read(), re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n\n\ndef read_readme() -> str:\n \"\"\"Read the README file.\"\"\"\n return io.open(get_path(\"README.md\"), \"r\", encoding=\"utf-8\").read()\n\n\ndef get_requirements() -> List[str]:\n \"\"\"Get Python package dependencies from requirements.txt.\"\"\"\n with open(get_path(\"requirements.txt\")) as f:\n requirements = f.read().strip().split(\"\\n\")\n return requirements\n\n\nsetuptools.setup(\n name=\"vllm\",\n version=find_version(get_path(\"vllm\", \"__init__.py\")),\n author=\"vLLM Team\",\n license=\"Apache 2.0\",\n description=\"A high-throughput and memory-efficient inference and serving engine for LLMs\",\n long_description=read_readme(),\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/vllm-project/vllm\",\n project_urls={\n \"Homepage\": \"https://github.com/vllm-project/vllm\",\n \"Documentation\": \"https://vllm.readthedocs.io/en/latest/\",\n },\n classifiers=[\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n packages=setuptools.find_packages(\n exclude=(\"assets\", \"benchmarks\", \"csrc\", \"docs\", \"examples\", \"tests\")),\n python_requires=\">=3.8\",\n install_requires=get_requirements(),\n ext_modules=ext_modules,\n cmdclass={\"build_ext\": BuildExtension},\n)\n", "path": "setup.py"}]}
3,154
415
gh_patches_debug_6308
rasdani/github-patches
git_diff
DataBiosphere__toil-2583
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Env value of TOIL_DOCKER_REGISTRY is not honored in the docker image When a docker image is build with a custom `TOIL_DOCKER_REGISTRY`, the built docker image still looks at `quay.io/ucsc_cgl` for pulling related images (e.g. `mtail`, `grafana`, etc). The fix is to put the actual registry in `version_template.py`. </issue> <code> [start of version_template.py] 1 # Copyright (C) 2015-2018 Regents of the University of California 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """This script is a template for src/toil/version.py. Running it without arguments echoes all 16 globals, i.e. module attributes. Constant assignments will be echoed verbatim while callables 17 will be invoked and their result echoed as an assignment using the function name as the left-hand 18 side and the return value of the function as right-hand side. To prevent a module attribute from 19 being echoed, start or end the attribute name with an underscore. To print the value of a single 20 symbol, pass the name of that attribute to the script as a command line argument. You can also 21 import the expand_ function and invoke it directly with either no or exactly one argument.""" 22 23 # Note to maintainers: 24 # 25 # - don't import at module level unless you want the imported value to be included in the output 26 # - only import from the Python standard run-time library (you can't have any dependencies) 27 28 29 baseVersion = '3.20.0a1' 30 cgcloudVersion = '1.6.0a1.dev393' 31 dockerRegistry = 'quay.io/ucsc_cgl' 32 dockerName = 'toil' 33 34 35 def version(): 36 """ 37 A version identifier that includes the full-legth commit SHA1 and an optional suffix to 38 indicate that the working copy is dirty. 39 """ 40 return _version() 41 42 43 def shortVersion(): 44 """ 45 A version identifier that includes the abbreviated commit SHA1 and an optional suffix to 46 indicate that the working copy is dirty. 47 """ 48 return _version(shorten=True) 49 50 51 def _version(shorten=False): 52 return '-'.join(filter(None, [distVersion(), 53 currentCommit()[:7 if shorten else None], 54 ('dirty' if dirty() else None)])) 55 56 57 def distVersion(): 58 """ 59 The distribution version identifying a published release on PyPI. 60 """ 61 from pkg_resources import parse_version 62 build_number = buildNumber() 63 parsedBaseVersion = parse_version(baseVersion) 64 if isinstance(parsedBaseVersion, tuple): 65 raise RuntimeError("Setuptools version 8.0 or newer required. Update by running " 66 "'pip install setuptools --upgrade'") 67 68 if build_number is not None and parsedBaseVersion.is_prerelease: 69 return baseVersion + '.dev' + build_number 70 else: 71 return baseVersion 72 73 74 def dockerTag(): 75 """ 76 The primary tag of the Docker image for the appliance. This uniquely identifies the appliance 77 image. 78 """ 79 return version() 80 81 82 def dockerShortTag(): 83 """ 84 A secondary, shortened form of :func:`dockerTag` with which to tag the appliance image for 85 convenience. 86 """ 87 return shortVersion() 88 89 90 def dockerMinimalTag(): 91 """ 92 A minimal tag with which to tag the appliance image for convenience. Does not include 93 information about the git commit or working copy dirtyness. 94 """ 95 return distVersion() 96 97 98 def buildNumber(): 99 """ 100 The Jenkins build number, if defined, else None. 101 """ 102 import os 103 return os.getenv('BUILD_NUMBER') 104 105 106 def currentCommit(): 107 from subprocess import check_output 108 try: 109 output = check_output('git log --pretty=oneline -n 1 -- $(pwd)', shell=True).decode('utf-8').split()[0] 110 except: 111 # Return this we are not in a git environment. 112 return '000' 113 if isinstance(output, bytes): 114 return output.decode('utf-8') 115 return str(output) 116 117 118 def dirty(): 119 from subprocess import call 120 try: 121 return 0 != call('(git diff --exit-code ' 122 '&& git diff --cached --exit-code) > /dev/null', shell=True) 123 except: 124 return False # In case the git call fails. 125 126 127 def expand_(name=None): 128 variables = {k: v for k, v in globals().items() 129 if not k.startswith('_') and not k.endswith('_')} 130 131 def resolve(k): 132 v = variables[k] 133 if callable(v): 134 v = v() 135 return v 136 137 if name is None: 138 return ''.join("%s = %s\n" % (k, repr(resolve(k))) for k, v in variables.items()) 139 else: 140 return resolve(name) 141 142 143 def _main(): 144 import sys 145 sys.stdout.write(expand_(*sys.argv[1:])) 146 147 148 if __name__ == '__main__': 149 _main() 150 [end of version_template.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/version_template.py b/version_template.py --- a/version_template.py +++ b/version_template.py @@ -28,7 +28,6 @@ baseVersion = '3.20.0a1' cgcloudVersion = '1.6.0a1.dev393' -dockerRegistry = 'quay.io/ucsc_cgl' dockerName = 'toil' @@ -115,6 +114,11 @@ return str(output) +def dockerRegistry(): + import os + return os.getenv('TOIL_DOCKER_REGISTRY', 'quay.io/ucsc_cgl') + + def dirty(): from subprocess import call try:
{"golden_diff": "diff --git a/version_template.py b/version_template.py\n--- a/version_template.py\n+++ b/version_template.py\n@@ -28,7 +28,6 @@\n \n baseVersion = '3.20.0a1'\n cgcloudVersion = '1.6.0a1.dev393'\n-dockerRegistry = 'quay.io/ucsc_cgl'\n dockerName = 'toil'\n \n \n@@ -115,6 +114,11 @@\n return str(output)\n \n \n+def dockerRegistry():\n+ import os\n+ return os.getenv('TOIL_DOCKER_REGISTRY', 'quay.io/ucsc_cgl')\n+\n+\n def dirty():\n from subprocess import call\n try:\n", "issue": "Env value of TOIL_DOCKER_REGISTRY is not honored in the docker image\nWhen a docker image is build with a custom `TOIL_DOCKER_REGISTRY`, the built docker image still looks at `quay.io/ucsc_cgl` for pulling related images (e.g. `mtail`, `grafana`, etc). The fix is to put the actual registry in `version_template.py`.\n", "before_files": [{"content": "# Copyright (C) 2015-2018 Regents of the University of California\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"This script is a template for src/toil/version.py. Running it without arguments echoes all\nglobals, i.e. module attributes. Constant assignments will be echoed verbatim while callables\nwill be invoked and their result echoed as an assignment using the function name as the left-hand\nside and the return value of the function as right-hand side. To prevent a module attribute from\nbeing echoed, start or end the attribute name with an underscore. To print the value of a single\nsymbol, pass the name of that attribute to the script as a command line argument. You can also\nimport the expand_ function and invoke it directly with either no or exactly one argument.\"\"\"\n\n# Note to maintainers:\n#\n# - don't import at module level unless you want the imported value to be included in the output\n# - only import from the Python standard run-time library (you can't have any dependencies)\n\n\nbaseVersion = '3.20.0a1'\ncgcloudVersion = '1.6.0a1.dev393'\ndockerRegistry = 'quay.io/ucsc_cgl'\ndockerName = 'toil'\n\n\ndef version():\n \"\"\"\n A version identifier that includes the full-legth commit SHA1 and an optional suffix to\n indicate that the working copy is dirty.\n \"\"\"\n return _version()\n\n\ndef shortVersion():\n \"\"\"\n A version identifier that includes the abbreviated commit SHA1 and an optional suffix to\n indicate that the working copy is dirty.\n \"\"\"\n return _version(shorten=True)\n\n\ndef _version(shorten=False):\n return '-'.join(filter(None, [distVersion(),\n currentCommit()[:7 if shorten else None],\n ('dirty' if dirty() else None)]))\n\n\ndef distVersion():\n \"\"\"\n The distribution version identifying a published release on PyPI.\n \"\"\"\n from pkg_resources import parse_version\n build_number = buildNumber()\n parsedBaseVersion = parse_version(baseVersion)\n if isinstance(parsedBaseVersion, tuple):\n raise RuntimeError(\"Setuptools version 8.0 or newer required. Update by running \"\n \"'pip install setuptools --upgrade'\")\n\n if build_number is not None and parsedBaseVersion.is_prerelease:\n return baseVersion + '.dev' + build_number\n else:\n return baseVersion\n\n\ndef dockerTag():\n \"\"\"\n The primary tag of the Docker image for the appliance. This uniquely identifies the appliance\n image.\n \"\"\"\n return version()\n\n\ndef dockerShortTag():\n \"\"\"\n A secondary, shortened form of :func:`dockerTag` with which to tag the appliance image for\n convenience.\n \"\"\"\n return shortVersion()\n\n\ndef dockerMinimalTag():\n \"\"\"\n A minimal tag with which to tag the appliance image for convenience. Does not include\n information about the git commit or working copy dirtyness.\n \"\"\"\n return distVersion()\n\n\ndef buildNumber():\n \"\"\"\n The Jenkins build number, if defined, else None.\n \"\"\"\n import os\n return os.getenv('BUILD_NUMBER')\n\n\ndef currentCommit():\n from subprocess import check_output\n try:\n output = check_output('git log --pretty=oneline -n 1 -- $(pwd)', shell=True).decode('utf-8').split()[0]\n except:\n # Return this we are not in a git environment.\n return '000'\n if isinstance(output, bytes):\n return output.decode('utf-8')\n return str(output)\n\n\ndef dirty():\n from subprocess import call\n try:\n return 0 != call('(git diff --exit-code '\n '&& git diff --cached --exit-code) > /dev/null', shell=True)\n except:\n return False # In case the git call fails.\n\n\ndef expand_(name=None):\n variables = {k: v for k, v in globals().items()\n if not k.startswith('_') and not k.endswith('_')}\n\n def resolve(k):\n v = variables[k]\n if callable(v):\n v = v()\n return v\n\n if name is None:\n return ''.join(\"%s = %s\\n\" % (k, repr(resolve(k))) for k, v in variables.items())\n else:\n return resolve(name)\n\n\ndef _main():\n import sys\n sys.stdout.write(expand_(*sys.argv[1:]))\n\n\nif __name__ == '__main__':\n _main()\n", "path": "version_template.py"}]}
2,040
157
gh_patches_debug_21900
rasdani/github-patches
git_diff
fidals__shopelectro-928
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Get rid of pack prices multiplication PO said that 1C stock now is giving already multiplied prices </issue> <code> [start of shopelectro/management/commands/_update_catalog/update_pack.py] 1 """ 2 Update Product.in_pack and prices. 3 4 The update_catalog command always resets product prices to per unit format, so: 5 1. Parse in pack quantity from Tag.name and save it to Product.in_pack 6 2. Multiply product prices by in_pack value and save. 7 """ 8 import logging 9 10 from django.conf import settings 11 from django.db import models, transaction 12 13 from catalog.models_expressions import Substring 14 15 from shopelectro.exception import UpdateCatalogException 16 from shopelectro.models import TagQuerySet, TagGroup 17 18 logger = logging.getLogger(__name__) 19 PRICES = ['price', 'purchase_price', 'wholesale_small', 'wholesale_medium', 'wholesale_large'] 20 21 22 def find_pack_group() -> TagGroup: 23 try: 24 pack_group = TagGroup.objects.get_pack() 25 except TagGroup.DoesNotExist as error: 26 raise UpdateCatalogException( 27 'Update the PACK_GROUP_UUID django settings variable to set the new relevant UUID. ' 28 + str(error) 29 ) 30 if settings.PACK_GROUP_NAME.lower() not in pack_group.name.lower(): 31 raise UpdateCatalogException( 32 'The pack group name isn\'t matched with the set name:' 33 f' Pack group name: {pack_group.name}\n' 34 f' Set name: {settings.PACK_GROUP_NAME}\n' 35 'Update the PACK_GROUP_NAME django settings variable to set the new relevant name.' 36 ) 37 38 return pack_group 39 40 41 def update_in_packs(packs: TagQuerySet): 42 """Parse and save in pack quantity values.""" 43 packs = ( 44 packs 45 .annotate( 46 in_pack_str=Substring( 47 models.F('name'), 48 models.Value('[0-9]+\+?[0-9]*'))) 49 .exclude(in_pack_str__exact='') 50 ) 51 52 for pack in packs: 53 in_pack = sum(map(int, pack.in_pack_str.split('+'))) 54 pack.products.all().update(in_pack=max(in_pack, 1)) 55 56 57 def update_prices(packs: TagQuerySet): 58 """Multiply product prices on in pack quantity.""" 59 fields_to_update = {} 60 for price in PRICES: 61 fields_to_update[price] = models.F(price) * models.F('in_pack') 62 63 with transaction.atomic(): 64 packs.products().update(**fields_to_update) 65 66 67 def main(*args, **kwargs): 68 packs = find_pack_group().tags.all().prefetch_related('products') 69 update_in_packs(packs) 70 update_prices(packs) 71 [end of shopelectro/management/commands/_update_catalog/update_pack.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/shopelectro/management/commands/_update_catalog/update_pack.py b/shopelectro/management/commands/_update_catalog/update_pack.py --- a/shopelectro/management/commands/_update_catalog/update_pack.py +++ b/shopelectro/management/commands/_update_catalog/update_pack.py @@ -8,10 +8,9 @@ import logging from django.conf import settings -from django.db import models, transaction +from django.db import models from catalog.models_expressions import Substring - from shopelectro.exception import UpdateCatalogException from shopelectro.models import TagQuerySet, TagGroup @@ -54,17 +53,6 @@ pack.products.all().update(in_pack=max(in_pack, 1)) -def update_prices(packs: TagQuerySet): - """Multiply product prices on in pack quantity.""" - fields_to_update = {} - for price in PRICES: - fields_to_update[price] = models.F(price) * models.F('in_pack') - - with transaction.atomic(): - packs.products().update(**fields_to_update) - - def main(*args, **kwargs): packs = find_pack_group().tags.all().prefetch_related('products') update_in_packs(packs) - update_prices(packs)
{"golden_diff": "diff --git a/shopelectro/management/commands/_update_catalog/update_pack.py b/shopelectro/management/commands/_update_catalog/update_pack.py\n--- a/shopelectro/management/commands/_update_catalog/update_pack.py\n+++ b/shopelectro/management/commands/_update_catalog/update_pack.py\n@@ -8,10 +8,9 @@\n import logging\n \n from django.conf import settings\n-from django.db import models, transaction\n+from django.db import models\n \n from catalog.models_expressions import Substring\n-\n from shopelectro.exception import UpdateCatalogException\n from shopelectro.models import TagQuerySet, TagGroup\n \n@@ -54,17 +53,6 @@\n pack.products.all().update(in_pack=max(in_pack, 1))\n \n \n-def update_prices(packs: TagQuerySet):\n- \"\"\"Multiply product prices on in pack quantity.\"\"\"\n- fields_to_update = {}\n- for price in PRICES:\n- fields_to_update[price] = models.F(price) * models.F('in_pack')\n-\n- with transaction.atomic():\n- packs.products().update(**fields_to_update)\n-\n-\n def main(*args, **kwargs):\n packs = find_pack_group().tags.all().prefetch_related('products')\n update_in_packs(packs)\n- update_prices(packs)\n", "issue": "Get rid of pack prices multiplication\nPO said that 1C stock now is giving already multiplied prices\n", "before_files": [{"content": "\"\"\"\nUpdate Product.in_pack and prices.\n\nThe update_catalog command always resets product prices to per unit format, so:\n1. Parse in pack quantity from Tag.name and save it to Product.in_pack\n2. Multiply product prices by in_pack value and save.\n\"\"\"\nimport logging\n\nfrom django.conf import settings\nfrom django.db import models, transaction\n\nfrom catalog.models_expressions import Substring\n\nfrom shopelectro.exception import UpdateCatalogException\nfrom shopelectro.models import TagQuerySet, TagGroup\n\nlogger = logging.getLogger(__name__)\nPRICES = ['price', 'purchase_price', 'wholesale_small', 'wholesale_medium', 'wholesale_large']\n\n\ndef find_pack_group() -> TagGroup:\n try:\n pack_group = TagGroup.objects.get_pack()\n except TagGroup.DoesNotExist as error:\n raise UpdateCatalogException(\n 'Update the PACK_GROUP_UUID django settings variable to set the new relevant UUID. '\n + str(error)\n )\n if settings.PACK_GROUP_NAME.lower() not in pack_group.name.lower():\n raise UpdateCatalogException(\n 'The pack group name isn\\'t matched with the set name:'\n f' Pack group name: {pack_group.name}\\n'\n f' Set name: {settings.PACK_GROUP_NAME}\\n'\n 'Update the PACK_GROUP_NAME django settings variable to set the new relevant name.'\n )\n\n return pack_group\n\n\ndef update_in_packs(packs: TagQuerySet):\n \"\"\"Parse and save in pack quantity values.\"\"\"\n packs = (\n packs\n .annotate(\n in_pack_str=Substring(\n models.F('name'),\n models.Value('[0-9]+\\+?[0-9]*')))\n .exclude(in_pack_str__exact='')\n )\n\n for pack in packs:\n in_pack = sum(map(int, pack.in_pack_str.split('+')))\n pack.products.all().update(in_pack=max(in_pack, 1))\n\n\ndef update_prices(packs: TagQuerySet):\n \"\"\"Multiply product prices on in pack quantity.\"\"\"\n fields_to_update = {}\n for price in PRICES:\n fields_to_update[price] = models.F(price) * models.F('in_pack')\n\n with transaction.atomic():\n packs.products().update(**fields_to_update)\n\n\ndef main(*args, **kwargs):\n packs = find_pack_group().tags.all().prefetch_related('products')\n update_in_packs(packs)\n update_prices(packs)\n", "path": "shopelectro/management/commands/_update_catalog/update_pack.py"}]}
1,225
282
gh_patches_debug_17573
rasdani/github-patches
git_diff
huggingface__accelerate-2388
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Python 3.12 distutils deprecation error ### System Info ```Shell accelerate 0.26.1, Windows 11, Python 3.12.1 ``` ### Information - [ ] The official example scripts - [x] My own modified scripts ### Tasks - [ ] One of the scripts in the examples/ folder of Accelerate or an officially supported `no_trainer` script in the `examples` folder of the `transformers` repo (such as `run_no_trainer_glue.py`) - [X] My own task or dataset (give details below) ### Reproduction To reproduce behaviour: 1. In Python 3.12 install latest accelerate with ` pip3 install accelerate --upgrade ` 2. Try to initialise accelerate with: ```python from accelerate import Accelerator accelerator = Accelerator() ``` This results in the error caused by https://github.com/huggingface/accelerate/blob/f88661b5d9f4977b5a1555a92e64ef3cd359cbc9/src/accelerate/utils/environment.py#L19 ![image](https://github.com/huggingface/accelerate/assets/7803161/8b88d142-cf67-43b5-9b02-3f9346baa02a) The fundamental issue is deprecation of distutils as part of https://peps.python.org/pep-0632/ ### Expected behavior Accelerate should not depend on deprecated package and be compatible with Python 3.12+ </issue> <code> [start of src/accelerate/utils/environment.py] 1 # Copyright 2022 The HuggingFace Team. All rights reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import os 16 import platform 17 import subprocess 18 import sys 19 from distutils import spawn 20 from typing import Dict 21 22 import torch 23 24 25 def str_to_bool(value) -> int: 26 """ 27 Converts a string representation of truth to `True` (1) or `False` (0). 28 29 True values are `y`, `yes`, `t`, `true`, `on`, and `1`; False value are `n`, `no`, `f`, `false`, `off`, and `0`; 30 """ 31 value = value.lower() 32 if value in ("y", "yes", "t", "true", "on", "1"): 33 return 1 34 elif value in ("n", "no", "f", "false", "off", "0"): 35 return 0 36 else: 37 raise ValueError(f"invalid truth value {value}") 38 39 40 def get_int_from_env(env_keys, default): 41 """Returns the first positive env value found in the `env_keys` list or the default.""" 42 for e in env_keys: 43 val = int(os.environ.get(e, -1)) 44 if val >= 0: 45 return val 46 return default 47 48 49 def parse_flag_from_env(key, default=False): 50 """Returns truthy value for `key` from the env if available else the default.""" 51 value = os.environ.get(key, str(default)) 52 return str_to_bool(value) == 1 # As its name indicates `str_to_bool` actually returns an int... 53 54 55 def parse_choice_from_env(key, default="no"): 56 value = os.environ.get(key, str(default)) 57 return value 58 59 60 def are_libraries_initialized(*library_names: str) -> Dict[str, bool]: 61 """ 62 Checks if any of `library_names` are imported in the environment. Will return results as a `key:bool` pair. 63 """ 64 return [lib_name for lib_name in library_names if lib_name in sys.modules.keys()] 65 66 67 def get_gpu_info(): 68 """ 69 Gets GPU count and names using `nvidia-smi` instead of torch to not initialize CUDA. 70 71 Largely based on the `gputil` library. 72 """ 73 if platform.system() == "Windows": 74 # If platform is Windows and nvidia-smi can't be found in path 75 # try from systemd rive with default installation path 76 command = spawn.find_executable("nvidia-smi") 77 if command is None: 78 command = "%s\\Program Files\\NVIDIA Corporation\\NVSMI\\nvidia-smi.exe" % os.environ["systemdrive"] 79 else: 80 command = "nvidia-smi" 81 # Returns as list of `n` GPUs and their names 82 output = subprocess.check_output( 83 [command, "--query-gpu=count,name", "--format=csv,noheader"], universal_newlines=True 84 ) 85 output = output.strip() 86 gpus = output.split(os.linesep) 87 # Get names from output 88 gpu_count = len(gpus) 89 gpu_names = [gpu.split(",")[1].strip() for gpu in gpus] 90 return gpu_names, gpu_count 91 92 93 def check_cuda_p2p_ib_support(): 94 """ 95 Checks if the devices being used have issues with P2P and IB communications, namely any consumer GPU hardware after 96 the 3090. 97 98 Noteably uses `nvidia-smi` instead of torch to not initialize CUDA. 99 """ 100 try: 101 device_names, device_count = get_gpu_info() 102 # As new consumer GPUs get released, add them to `unsupported_devices`` 103 unsupported_devices = {"RTX 40"} 104 if device_count > 1: 105 if any( 106 unsupported_device in device_name 107 for device_name in device_names 108 for unsupported_device in unsupported_devices 109 ): 110 return False 111 except Exception: 112 pass 113 return True 114 115 116 def check_fp8_capability(): 117 """ 118 Checks if all the current GPUs available support FP8. 119 120 Notably must initialize `torch.cuda` to check. 121 """ 122 cuda_device_capacity = torch.cuda.get_device_capability() 123 return cuda_device_capacity >= (8, 9) 124 [end of src/accelerate/utils/environment.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/accelerate/utils/environment.py b/src/accelerate/utils/environment.py --- a/src/accelerate/utils/environment.py +++ b/src/accelerate/utils/environment.py @@ -16,7 +16,7 @@ import platform import subprocess import sys -from distutils import spawn +from shutil import which from typing import Dict import torch @@ -72,8 +72,8 @@ """ if platform.system() == "Windows": # If platform is Windows and nvidia-smi can't be found in path - # try from systemd rive with default installation path - command = spawn.find_executable("nvidia-smi") + # try from systemd drive with default installation path + command = which("nvidia-smi") if command is None: command = "%s\\Program Files\\NVIDIA Corporation\\NVSMI\\nvidia-smi.exe" % os.environ["systemdrive"] else:
{"golden_diff": "diff --git a/src/accelerate/utils/environment.py b/src/accelerate/utils/environment.py\n--- a/src/accelerate/utils/environment.py\n+++ b/src/accelerate/utils/environment.py\n@@ -16,7 +16,7 @@\n import platform\n import subprocess\n import sys\n-from distutils import spawn\n+from shutil import which\n from typing import Dict\n \n import torch\n@@ -72,8 +72,8 @@\n \"\"\"\n if platform.system() == \"Windows\":\n # If platform is Windows and nvidia-smi can't be found in path\n- # try from systemd rive with default installation path\n- command = spawn.find_executable(\"nvidia-smi\")\n+ # try from systemd drive with default installation path\n+ command = which(\"nvidia-smi\")\n if command is None:\n command = \"%s\\\\Program Files\\\\NVIDIA Corporation\\\\NVSMI\\\\nvidia-smi.exe\" % os.environ[\"systemdrive\"]\n else:\n", "issue": "Python 3.12 distutils deprecation error\n### System Info\n\n```Shell\naccelerate 0.26.1, Windows 11, Python 3.12.1\n```\n\n\n### Information\n\n- [ ] The official example scripts\n- [x] My own modified scripts\n\n### Tasks\n\n- [ ] One of the scripts in the examples/ folder of Accelerate or an officially supported `no_trainer` script in the `examples` folder of the `transformers` repo (such as `run_no_trainer_glue.py`)\n- [X] My own task or dataset (give details below)\n\n### Reproduction\n\nTo reproduce behaviour:\r\n1. In Python 3.12 install latest accelerate with\r\n` pip3 install accelerate --upgrade `\r\n2. Try to initialise accelerate with:\r\n```python\r\nfrom accelerate import Accelerator\r\naccelerator = Accelerator()\r\n```\r\n\r\nThis results in the error caused by https://github.com/huggingface/accelerate/blob/f88661b5d9f4977b5a1555a92e64ef3cd359cbc9/src/accelerate/utils/environment.py#L19\r\n\r\n![image](https://github.com/huggingface/accelerate/assets/7803161/8b88d142-cf67-43b5-9b02-3f9346baa02a)\r\nThe fundamental issue is deprecation of distutils as part of https://peps.python.org/pep-0632/\n\n### Expected behavior\n\nAccelerate should not depend on deprecated package and be compatible with Python 3.12+\n", "before_files": [{"content": "# Copyright 2022 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport platform\nimport subprocess\nimport sys\nfrom distutils import spawn\nfrom typing import Dict\n\nimport torch\n\n\ndef str_to_bool(value) -> int:\n \"\"\"\n Converts a string representation of truth to `True` (1) or `False` (0).\n\n True values are `y`, `yes`, `t`, `true`, `on`, and `1`; False value are `n`, `no`, `f`, `false`, `off`, and `0`;\n \"\"\"\n value = value.lower()\n if value in (\"y\", \"yes\", \"t\", \"true\", \"on\", \"1\"):\n return 1\n elif value in (\"n\", \"no\", \"f\", \"false\", \"off\", \"0\"):\n return 0\n else:\n raise ValueError(f\"invalid truth value {value}\")\n\n\ndef get_int_from_env(env_keys, default):\n \"\"\"Returns the first positive env value found in the `env_keys` list or the default.\"\"\"\n for e in env_keys:\n val = int(os.environ.get(e, -1))\n if val >= 0:\n return val\n return default\n\n\ndef parse_flag_from_env(key, default=False):\n \"\"\"Returns truthy value for `key` from the env if available else the default.\"\"\"\n value = os.environ.get(key, str(default))\n return str_to_bool(value) == 1 # As its name indicates `str_to_bool` actually returns an int...\n\n\ndef parse_choice_from_env(key, default=\"no\"):\n value = os.environ.get(key, str(default))\n return value\n\n\ndef are_libraries_initialized(*library_names: str) -> Dict[str, bool]:\n \"\"\"\n Checks if any of `library_names` are imported in the environment. Will return results as a `key:bool` pair.\n \"\"\"\n return [lib_name for lib_name in library_names if lib_name in sys.modules.keys()]\n\n\ndef get_gpu_info():\n \"\"\"\n Gets GPU count and names using `nvidia-smi` instead of torch to not initialize CUDA.\n\n Largely based on the `gputil` library.\n \"\"\"\n if platform.system() == \"Windows\":\n # If platform is Windows and nvidia-smi can't be found in path\n # try from systemd rive with default installation path\n command = spawn.find_executable(\"nvidia-smi\")\n if command is None:\n command = \"%s\\\\Program Files\\\\NVIDIA Corporation\\\\NVSMI\\\\nvidia-smi.exe\" % os.environ[\"systemdrive\"]\n else:\n command = \"nvidia-smi\"\n # Returns as list of `n` GPUs and their names\n output = subprocess.check_output(\n [command, \"--query-gpu=count,name\", \"--format=csv,noheader\"], universal_newlines=True\n )\n output = output.strip()\n gpus = output.split(os.linesep)\n # Get names from output\n gpu_count = len(gpus)\n gpu_names = [gpu.split(\",\")[1].strip() for gpu in gpus]\n return gpu_names, gpu_count\n\n\ndef check_cuda_p2p_ib_support():\n \"\"\"\n Checks if the devices being used have issues with P2P and IB communications, namely any consumer GPU hardware after\n the 3090.\n\n Noteably uses `nvidia-smi` instead of torch to not initialize CUDA.\n \"\"\"\n try:\n device_names, device_count = get_gpu_info()\n # As new consumer GPUs get released, add them to `unsupported_devices``\n unsupported_devices = {\"RTX 40\"}\n if device_count > 1:\n if any(\n unsupported_device in device_name\n for device_name in device_names\n for unsupported_device in unsupported_devices\n ):\n return False\n except Exception:\n pass\n return True\n\n\ndef check_fp8_capability():\n \"\"\"\n Checks if all the current GPUs available support FP8.\n\n Notably must initialize `torch.cuda` to check.\n \"\"\"\n cuda_device_capacity = torch.cuda.get_device_capability()\n return cuda_device_capacity >= (8, 9)\n", "path": "src/accelerate/utils/environment.py"}]}
2,187
211
gh_patches_debug_40301
rasdani/github-patches
git_diff
hpcaitech__ColossalAI-5543
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [tensor] fix some unittests [tensor] fix some unittests [tensor] fix some unittests </issue> <code> [start of colossalai/shardformer/shard/shard_config.py] 1 from dataclasses import dataclass, field 2 from typing import Any, Dict, Optional 3 4 import torch.distributed as dist 5 from torch.distributed import ProcessGroup 6 7 from colossalai.pipeline.stage_manager import PipelineStageManager 8 9 __all__ = ["ShardConfig"] 10 11 12 @dataclass 13 class ShardConfig: 14 r""" 15 The config for sharding the huggingface model 16 17 Args: 18 tensor_parallel_process_group (Optional[ProcessGroup]): The process group of tensor parallelism, it's necessary when using tensor parallel. Defaults to None, which is the global process group. 19 pipeline_stage_manager (Optional[PipelineStageManager]): If using pipeline parallelism, it's necessary to specify a pipeline stage manager for inter-process communication in pipeline parallelism. Defaults to None, which means not using pipeline parallelism. 20 enable_tensor_parallelism (bool): Whether to use tensor parallelism. Defaults to True. 21 enable_fused_normalization (bool): Whether to use fused layernorm. Defaults to False. 22 enable_flash_attention (bool, optional): Whether to switch on flash attention. Defaults to False. 23 enable_jit_fused (bool, optional): Whether to switch on JIT fused operators. Defaults to False. 24 enable_sequence_parallelism (bool): Whether to turn on sequence parallelism, which partitions non-tensor-parallel regions along the sequence dimension. Defaults to False. 25 enable_sequence_overlap (bool): Whether to turn on sequence overlap, which overlap the computation and communication in sequence parallelism. It can only be used when enable_sequence_parallelism is True. Defaults to False. 26 enable_all_optimization (bool): Whether to turn on all optimization tools including 'fused normalization', 'flash attention', 'JIT fused operators', 'sequence parallelism' and 'sequence overlap'. Defaults to False. 27 """ 28 tensor_parallel_process_group: Optional[ProcessGroup] = None 29 pipeline_stage_manager: Optional[PipelineStageManager] = None 30 enable_tensor_parallelism: bool = True 31 enable_fused_normalization: bool = False 32 enable_flash_attention: bool = False 33 enable_jit_fused: bool = False 34 enable_all_optimization: bool = False 35 enable_sequence_parallelism: bool = False 36 enable_sequence_overlap: bool = False 37 parallel_output = True 38 extra_kwargs: Dict[str, Any] = field(default_factory=dict) 39 # pipeline_parallel_size: int 40 # data_parallel_size: int 41 # tensor_parallel_mode: Literal['1d', '2d', '2.5d', '3d'] 42 43 @property 44 def tensor_parallel_size(self): 45 return self._tensor_parallel_size 46 47 def __post_init__(self): 48 if not self.enable_tensor_parallelism and self.enable_sequence_parallelism: 49 raise ValueError( 50 "enable_sequence_parallelism can only be set to True when enable_tensor_parallelism is True" 51 ) 52 if not self.enable_sequence_parallelism and self.enable_sequence_overlap: 53 raise ValueError("enable_sequence_overlap can only be set to True when enable_sequence_parallelism is True") 54 if not self.enable_tensor_parallelism: 55 self._tensor_parallel_size = 1 56 else: 57 # get the parallel size 58 self._tensor_parallel_size = dist.get_world_size(self.tensor_parallel_process_group) 59 # turn on all optimization if all_optimization is set to True 60 if self.enable_all_optimization: 61 self._turn_on_all_optimization() 62 63 def _turn_on_all_optimization(self): 64 """ 65 Turn on all optimization. 66 """ 67 # you can add all the optimization flag here 68 self.enable_fused_normalization = True 69 self.enable_flash_attention = True 70 self.enable_jit_fused = True 71 self.enable_sequence_parallelism = True 72 self.enable_sequence_overlap = True 73 74 def _infer(self): 75 """ 76 Set default params for inference. 77 """ 78 # assert self.pipeline_stage_manager is None, "pipeline parallelism is not supported in inference for now" 79 [end of colossalai/shardformer/shard/shard_config.py] [start of colossalai/inference/modeling/policy/nopadding_llama.py] 1 from functools import partial 2 3 from torch.nn import Parameter 4 from transformers.models.llama.modeling_llama import LlamaDecoderLayer, LlamaForCausalLM, LlamaModel, LlamaRMSNorm 5 6 from colossalai.inference.modeling.models.nopadding_llama import ( 7 NopadLlamaAttention, 8 NopadLlamaMLP, 9 llama_causal_lm_forward, 10 llama_decoder_layer_forward, 11 llama_model_forward, 12 llama_rmsnorm_forward, 13 ) 14 from colossalai.inference.utils import init_to_get_rotary 15 from colossalai.shardformer.policies.base_policy import ModulePolicyDescription, SubModuleReplacementDescription 16 17 # import colossalai 18 from colossalai.shardformer.policies.llama import LlamaForCausalLMPolicy 19 20 21 class NoPaddingLlamaModelInferPolicy(LlamaForCausalLMPolicy): 22 def __init__(self) -> None: 23 super().__init__() 24 25 def module_policy(self): 26 policy = super().module_policy() 27 28 decoder_attribute_replacement = { 29 "lm_head.weight": Parameter(self.model.lm_head.weight.transpose(0, 1), requires_grad=False), 30 } 31 policy[LlamaForCausalLM] = ModulePolicyDescription( 32 attribute_replacement=decoder_attribute_replacement, 33 ) 34 35 policy[LlamaDecoderLayer] = ModulePolicyDescription( 36 sub_module_replacement=[ 37 SubModuleReplacementDescription( 38 suffix="mlp", 39 target_module=NopadLlamaMLP, 40 ), 41 SubModuleReplacementDescription( 42 suffix="self_attn", 43 target_module=NopadLlamaAttention, 44 ), 45 ] 46 ) 47 48 self.shard_config._infer() 49 50 infer_forward = llama_causal_lm_forward 51 method_replacement = {"forward": partial(infer_forward)} 52 self.append_or_create_method_replacement( 53 description=method_replacement, policy=policy, target_key=LlamaForCausalLM 54 ) 55 56 infer_forward = llama_model_forward 57 method_replacement = {"forward": partial(infer_forward)} 58 self.append_or_create_method_replacement(description=method_replacement, policy=policy, target_key=LlamaModel) 59 60 infer_forward = llama_decoder_layer_forward 61 method_replacement = {"forward": partial(infer_forward)} 62 self.append_or_create_method_replacement( 63 description=method_replacement, policy=policy, target_key=LlamaDecoderLayer 64 ) 65 66 infer_forward = llama_rmsnorm_forward 67 method_replacement = {"forward": partial(infer_forward)} 68 self.append_or_create_method_replacement(description=method_replacement, policy=policy, target_key=LlamaRMSNorm) 69 70 return policy 71 72 def postprocess(self): 73 init_to_get_rotary(self.model.model) 74 return self.model 75 [end of colossalai/inference/modeling/policy/nopadding_llama.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/colossalai/inference/modeling/policy/nopadding_llama.py b/colossalai/inference/modeling/policy/nopadding_llama.py --- a/colossalai/inference/modeling/policy/nopadding_llama.py +++ b/colossalai/inference/modeling/policy/nopadding_llama.py @@ -1,5 +1,3 @@ -from functools import partial - from torch.nn import Parameter from transformers.models.llama.modeling_llama import LlamaDecoderLayer, LlamaForCausalLM, LlamaModel, LlamaRMSNorm @@ -13,8 +11,6 @@ ) from colossalai.inference.utils import init_to_get_rotary from colossalai.shardformer.policies.base_policy import ModulePolicyDescription, SubModuleReplacementDescription - -# import colossalai from colossalai.shardformer.policies.llama import LlamaForCausalLMPolicy @@ -45,27 +41,18 @@ ] ) - self.shard_config._infer() - - infer_forward = llama_causal_lm_forward - method_replacement = {"forward": partial(infer_forward)} self.append_or_create_method_replacement( - description=method_replacement, policy=policy, target_key=LlamaForCausalLM + description={"forward": llama_causal_lm_forward}, policy=policy, target_key=LlamaForCausalLM ) - - infer_forward = llama_model_forward - method_replacement = {"forward": partial(infer_forward)} - self.append_or_create_method_replacement(description=method_replacement, policy=policy, target_key=LlamaModel) - - infer_forward = llama_decoder_layer_forward - method_replacement = {"forward": partial(infer_forward)} self.append_or_create_method_replacement( - description=method_replacement, policy=policy, target_key=LlamaDecoderLayer + description={"forward": llama_model_forward}, policy=policy, target_key=LlamaModel + ) + self.append_or_create_method_replacement( + description={"forward": llama_decoder_layer_forward}, policy=policy, target_key=LlamaDecoderLayer + ) + self.append_or_create_method_replacement( + description={"forward": llama_rmsnorm_forward}, policy=policy, target_key=LlamaRMSNorm ) - - infer_forward = llama_rmsnorm_forward - method_replacement = {"forward": partial(infer_forward)} - self.append_or_create_method_replacement(description=method_replacement, policy=policy, target_key=LlamaRMSNorm) return policy diff --git a/colossalai/shardformer/shard/shard_config.py b/colossalai/shardformer/shard/shard_config.py --- a/colossalai/shardformer/shard/shard_config.py +++ b/colossalai/shardformer/shard/shard_config.py @@ -36,8 +36,6 @@ enable_sequence_overlap: bool = False parallel_output = True extra_kwargs: Dict[str, Any] = field(default_factory=dict) - # pipeline_parallel_size: int - # data_parallel_size: int # tensor_parallel_mode: Literal['1d', '2d', '2.5d', '3d'] @property @@ -70,9 +68,3 @@ self.enable_jit_fused = True self.enable_sequence_parallelism = True self.enable_sequence_overlap = True - - def _infer(self): - """ - Set default params for inference. - """ - # assert self.pipeline_stage_manager is None, "pipeline parallelism is not supported in inference for now"
{"golden_diff": "diff --git a/colossalai/inference/modeling/policy/nopadding_llama.py b/colossalai/inference/modeling/policy/nopadding_llama.py\n--- a/colossalai/inference/modeling/policy/nopadding_llama.py\n+++ b/colossalai/inference/modeling/policy/nopadding_llama.py\n@@ -1,5 +1,3 @@\n-from functools import partial\n-\n from torch.nn import Parameter\n from transformers.models.llama.modeling_llama import LlamaDecoderLayer, LlamaForCausalLM, LlamaModel, LlamaRMSNorm\n \n@@ -13,8 +11,6 @@\n )\n from colossalai.inference.utils import init_to_get_rotary\n from colossalai.shardformer.policies.base_policy import ModulePolicyDescription, SubModuleReplacementDescription\n-\n-# import colossalai\n from colossalai.shardformer.policies.llama import LlamaForCausalLMPolicy\n \n \n@@ -45,27 +41,18 @@\n ]\n )\n \n- self.shard_config._infer()\n-\n- infer_forward = llama_causal_lm_forward\n- method_replacement = {\"forward\": partial(infer_forward)}\n self.append_or_create_method_replacement(\n- description=method_replacement, policy=policy, target_key=LlamaForCausalLM\n+ description={\"forward\": llama_causal_lm_forward}, policy=policy, target_key=LlamaForCausalLM\n )\n-\n- infer_forward = llama_model_forward\n- method_replacement = {\"forward\": partial(infer_forward)}\n- self.append_or_create_method_replacement(description=method_replacement, policy=policy, target_key=LlamaModel)\n-\n- infer_forward = llama_decoder_layer_forward\n- method_replacement = {\"forward\": partial(infer_forward)}\n self.append_or_create_method_replacement(\n- description=method_replacement, policy=policy, target_key=LlamaDecoderLayer\n+ description={\"forward\": llama_model_forward}, policy=policy, target_key=LlamaModel\n+ )\n+ self.append_or_create_method_replacement(\n+ description={\"forward\": llama_decoder_layer_forward}, policy=policy, target_key=LlamaDecoderLayer\n+ )\n+ self.append_or_create_method_replacement(\n+ description={\"forward\": llama_rmsnorm_forward}, policy=policy, target_key=LlamaRMSNorm\n )\n-\n- infer_forward = llama_rmsnorm_forward\n- method_replacement = {\"forward\": partial(infer_forward)}\n- self.append_or_create_method_replacement(description=method_replacement, policy=policy, target_key=LlamaRMSNorm)\n \n return policy\n \ndiff --git a/colossalai/shardformer/shard/shard_config.py b/colossalai/shardformer/shard/shard_config.py\n--- a/colossalai/shardformer/shard/shard_config.py\n+++ b/colossalai/shardformer/shard/shard_config.py\n@@ -36,8 +36,6 @@\n enable_sequence_overlap: bool = False\n parallel_output = True\n extra_kwargs: Dict[str, Any] = field(default_factory=dict)\n- # pipeline_parallel_size: int\n- # data_parallel_size: int\n # tensor_parallel_mode: Literal['1d', '2d', '2.5d', '3d']\n \n @property\n@@ -70,9 +68,3 @@\n self.enable_jit_fused = True\n self.enable_sequence_parallelism = True\n self.enable_sequence_overlap = True\n-\n- def _infer(self):\n- \"\"\"\n- Set default params for inference.\n- \"\"\"\n- # assert self.pipeline_stage_manager is None, \"pipeline parallelism is not supported in inference for now\"\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "from dataclasses import dataclass, field\nfrom typing import Any, Dict, Optional\n\nimport torch.distributed as dist\nfrom torch.distributed import ProcessGroup\n\nfrom colossalai.pipeline.stage_manager import PipelineStageManager\n\n__all__ = [\"ShardConfig\"]\n\n\n@dataclass\nclass ShardConfig:\n r\"\"\"\n The config for sharding the huggingface model\n\n Args:\n tensor_parallel_process_group (Optional[ProcessGroup]): The process group of tensor parallelism, it's necessary when using tensor parallel. Defaults to None, which is the global process group.\n pipeline_stage_manager (Optional[PipelineStageManager]): If using pipeline parallelism, it's necessary to specify a pipeline stage manager for inter-process communication in pipeline parallelism. Defaults to None, which means not using pipeline parallelism.\n enable_tensor_parallelism (bool): Whether to use tensor parallelism. Defaults to True.\n enable_fused_normalization (bool): Whether to use fused layernorm. Defaults to False.\n enable_flash_attention (bool, optional): Whether to switch on flash attention. Defaults to False.\n enable_jit_fused (bool, optional): Whether to switch on JIT fused operators. Defaults to False.\n enable_sequence_parallelism (bool): Whether to turn on sequence parallelism, which partitions non-tensor-parallel regions along the sequence dimension. Defaults to False.\n enable_sequence_overlap (bool): Whether to turn on sequence overlap, which overlap the computation and communication in sequence parallelism. It can only be used when enable_sequence_parallelism is True. Defaults to False.\n enable_all_optimization (bool): Whether to turn on all optimization tools including 'fused normalization', 'flash attention', 'JIT fused operators', 'sequence parallelism' and 'sequence overlap'. Defaults to False.\n \"\"\"\n tensor_parallel_process_group: Optional[ProcessGroup] = None\n pipeline_stage_manager: Optional[PipelineStageManager] = None\n enable_tensor_parallelism: bool = True\n enable_fused_normalization: bool = False\n enable_flash_attention: bool = False\n enable_jit_fused: bool = False\n enable_all_optimization: bool = False\n enable_sequence_parallelism: bool = False\n enable_sequence_overlap: bool = False\n parallel_output = True\n extra_kwargs: Dict[str, Any] = field(default_factory=dict)\n # pipeline_parallel_size: int\n # data_parallel_size: int\n # tensor_parallel_mode: Literal['1d', '2d', '2.5d', '3d']\n\n @property\n def tensor_parallel_size(self):\n return self._tensor_parallel_size\n\n def __post_init__(self):\n if not self.enable_tensor_parallelism and self.enable_sequence_parallelism:\n raise ValueError(\n \"enable_sequence_parallelism can only be set to True when enable_tensor_parallelism is True\"\n )\n if not self.enable_sequence_parallelism and self.enable_sequence_overlap:\n raise ValueError(\"enable_sequence_overlap can only be set to True when enable_sequence_parallelism is True\")\n if not self.enable_tensor_parallelism:\n self._tensor_parallel_size = 1\n else:\n # get the parallel size\n self._tensor_parallel_size = dist.get_world_size(self.tensor_parallel_process_group)\n # turn on all optimization if all_optimization is set to True\n if self.enable_all_optimization:\n self._turn_on_all_optimization()\n\n def _turn_on_all_optimization(self):\n \"\"\"\n Turn on all optimization.\n \"\"\"\n # you can add all the optimization flag here\n self.enable_fused_normalization = True\n self.enable_flash_attention = True\n self.enable_jit_fused = True\n self.enable_sequence_parallelism = True\n self.enable_sequence_overlap = True\n\n def _infer(self):\n \"\"\"\n Set default params for inference.\n \"\"\"\n # assert self.pipeline_stage_manager is None, \"pipeline parallelism is not supported in inference for now\"\n", "path": "colossalai/shardformer/shard/shard_config.py"}, {"content": "from functools import partial\n\nfrom torch.nn import Parameter\nfrom transformers.models.llama.modeling_llama import LlamaDecoderLayer, LlamaForCausalLM, LlamaModel, LlamaRMSNorm\n\nfrom colossalai.inference.modeling.models.nopadding_llama import (\n NopadLlamaAttention,\n NopadLlamaMLP,\n llama_causal_lm_forward,\n llama_decoder_layer_forward,\n llama_model_forward,\n llama_rmsnorm_forward,\n)\nfrom colossalai.inference.utils import init_to_get_rotary\nfrom colossalai.shardformer.policies.base_policy import ModulePolicyDescription, SubModuleReplacementDescription\n\n# import colossalai\nfrom colossalai.shardformer.policies.llama import LlamaForCausalLMPolicy\n\n\nclass NoPaddingLlamaModelInferPolicy(LlamaForCausalLMPolicy):\n def __init__(self) -> None:\n super().__init__()\n\n def module_policy(self):\n policy = super().module_policy()\n\n decoder_attribute_replacement = {\n \"lm_head.weight\": Parameter(self.model.lm_head.weight.transpose(0, 1), requires_grad=False),\n }\n policy[LlamaForCausalLM] = ModulePolicyDescription(\n attribute_replacement=decoder_attribute_replacement,\n )\n\n policy[LlamaDecoderLayer] = ModulePolicyDescription(\n sub_module_replacement=[\n SubModuleReplacementDescription(\n suffix=\"mlp\",\n target_module=NopadLlamaMLP,\n ),\n SubModuleReplacementDescription(\n suffix=\"self_attn\",\n target_module=NopadLlamaAttention,\n ),\n ]\n )\n\n self.shard_config._infer()\n\n infer_forward = llama_causal_lm_forward\n method_replacement = {\"forward\": partial(infer_forward)}\n self.append_or_create_method_replacement(\n description=method_replacement, policy=policy, target_key=LlamaForCausalLM\n )\n\n infer_forward = llama_model_forward\n method_replacement = {\"forward\": partial(infer_forward)}\n self.append_or_create_method_replacement(description=method_replacement, policy=policy, target_key=LlamaModel)\n\n infer_forward = llama_decoder_layer_forward\n method_replacement = {\"forward\": partial(infer_forward)}\n self.append_or_create_method_replacement(\n description=method_replacement, policy=policy, target_key=LlamaDecoderLayer\n )\n\n infer_forward = llama_rmsnorm_forward\n method_replacement = {\"forward\": partial(infer_forward)}\n self.append_or_create_method_replacement(description=method_replacement, policy=policy, target_key=LlamaRMSNorm)\n\n return policy\n\n def postprocess(self):\n init_to_get_rotary(self.model.model)\n return self.model\n", "path": "colossalai/inference/modeling/policy/nopadding_llama.py"}]}
2,315
807
gh_patches_debug_13885
rasdani/github-patches
git_diff
pulp__pulpcore-3039
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Remove orphans: Repository matching query does not exist. **Version** ``` $ rpm -qa | grep tfm-rubygem-pulp tfm-rubygem-pulp_certguard_client-1.5.0-1.el7.noarch tfm-rubygem-pulp_ansible_client-0.13.1-1.el7.noarch tfm-rubygem-pulp_deb_client-2.18.0-1.el7.noarch tfm-rubygem-pulp_rpm_client-3.17.4-1.el7.noarch tfm-rubygem-pulp_python_client-3.6.0-1.el7.noarch tfm-rubygem-pulp_ostree_client-2.0.0-0.1.a1.el7.noarch tfm-rubygem-pulpcore_client-3.18.5-1.el7.noarch tfm-rubygem-pulp_container_client-2.10.3-1.el7.noarch tfm-rubygem-pulp_file_client-1.10.0-1.el7.noarch $ rpm -qa | grep katello-4 tfm-rubygem-katello-4.5.0-1.el7.noarch katello-4.5.0-1.el7.noarch ``` **Describe the bug** Running `foreman-rake katello:delete_orphaned_content RAILS_ENV=production` creates two tasks. One of the tasks gets stuck in _paused_ with the following error: `Repository matching query does not exist.` **To Reproduce** - Run `foreman-rake katello:delete_orphaned_content RAILS_ENV=production`. - After a few minutes run `hammer --no-headers --output json task list --order 'started_at DESC' --search 'state = "paused"'` **Expected behavior** We expected both tasks to run successfully. **Additional context** We had to manually apply this patch: https://github.com/pulp/pulp_rpm/pull/2619 - before that, we had his same issue: https://github.com/pulp/pulp_rpm/issues/2459 . ``` $ hammer --no-headers --output json task list --order 'started_at DESC' --search 'state = "paused" ' [ { "ID": "a8aae19c-7c62-4a60-ab12-935687e3723e", "Action": "Remove orphans ", "State": "paused", "Result": "error", "Started at": "2022-07-21 15:29:02 UTC", "Ended at": null, "Duration": "00:02:29.56437", "Owner": "foreman_admin", "Task errors": [ "Repository matching query does not exist." ] } ] ``` ``` $ http_pulp /pulp/api/v3/tasks/e7df82c7-cb38-4fac-a28a-edd8842b3b69/ HTTP/1.1 200 OK Access-Control-Expose-Headers: Correlation-ID Allow: GET, PATCH, DELETE, HEAD, OPTIONS Connection: Keep-Alive Content-Length: 3335 Content-Type: application/json Correlation-ID: e1f2e082da6e401196007424799a05b5 Date: Thu, 21 Jul 2022 17:10:58 GMT Keep-Alive: timeout=15, max=100 Referrer-Policy: same-origin Server: gunicorn Vary: Accept,Cookie Via: 1.1 updates.eurotux.com X-Content-Type-Options: nosniff X-Frame-Options: DENY { "child_tasks": [], "created_resources": [], "error": { "description": "Repository matching query does not exist.", "traceback": " File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulpcore/tasking/pulpcore_worker.py\", line 410, in _perform_task\n result = func(*args, **kwargs)\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulpcore/app/tasks/orphan.py\", line 66, in orphan_cleanup\n c.delete()\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py\", line 746, in delete\n deleted, _rows_count = collector.delete()\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/deletion.py\", line 435, in delete\n signals.post_delete.send(\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/dispatch/dispatcher.py\", line 180, in send\n return [\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/dispatch/dispatcher.py\", line 181, in <listcomp>\n (receiver, receiver(signal=self, sender=sender, **named))\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulp_rpm/app/models/distribution.py\", line 340, in cleanup_subrepos\n Variant.objects.filter(repository=subrepo).delete()\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py\", line 746, in delete\n deleted, _rows_count = collector.delete()\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/deletion.py\", line 435, in delete\n signals.post_delete.send(\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/dispatch/dispatcher.py\", line 180, in send\n return [\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/dispatch/dispatcher.py\", line 181, in <listcomp>\n (receiver, receiver(signal=self, sender=sender, **named))\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulp_rpm/app/models/distribution.py\", line 338, in cleanup_subrepos\n subrepo = instance.repository\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/fields/related_descriptors.py\", line 187, in __get__\n rel_obj = self.get_object(instance)\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/fields/related_descriptors.py\", line 154, in get_object\n return qs.get(self.field.get_reverse_related_filter(instance))\n File \"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py\", line 435, in get\n raise self.model.DoesNotExist(\n" }, "finished_at": "2022-07-21T15:30:11.075104Z", "logging_cid": "ac4070d63c764578bb854f97873ba88a", "name": "pulpcore.app.tasks.orphan.orphan_cleanup", "parent_task": null, "progress_reports": [ { "code": "clean-up.content", "done": 2000, "message": "Clean up orphan Content", "state": "running", "suffix": null, "total": 17098 } ], "pulp_created": "2022-07-21T15:29:58.868921Z", "pulp_href": "/pulp/api/v3/tasks/e7df82c7-cb38-4fac-a28a-edd8842b3b69/", "reserved_resources_record": [], "started_at": "2022-07-21T15:29:59.335110Z", "state": "failed", "task_group": null, "worker": "/pulp/api/v3/workers/6ff07070-3e14-49b3-b508-a0c300683f28/" } ``` edit: related to https://community.theforeman.org/t/delete-orphaned-content-fails/29494/6 https://bugzilla.redhat.com/show_bug.cgi?id=2115881 </issue> <code> [start of pulpcore/app/views/orphans.py] 1 from drf_spectacular.utils import extend_schema 2 from rest_framework.views import APIView 3 4 from pulpcore.app.loggers import deprecation_logger 5 from pulpcore.app.response import OperationPostponedResponse 6 from pulpcore.app.serializers import AsyncOperationResponseSerializer 7 from pulpcore.app.tasks import orphan_cleanup 8 from pulpcore.tasking.tasks import dispatch 9 10 11 class OrphansView(APIView): 12 @extend_schema( 13 description="DEPRECATED! Trigger an asynchronous task that deletes all " 14 "orphaned content and artifacts. Use the `POST /pulp/api/v3/orphans/cleanup/` call " 15 "instead.", 16 summary="Delete orphans", 17 responses={202: AsyncOperationResponseSerializer}, 18 ) 19 def delete(self, request, format=None): 20 """ 21 Cleans up all the Content and Artifact orphans in the system 22 """ 23 deprecation_logger.warning( 24 "The `DELETE /pulp/api/v3/orphans/` call is deprecated. Use" 25 "`POST /pulp/api/v3/orphans/cleanup/` instead." 26 ) 27 28 task = dispatch(orphan_cleanup) 29 30 return OperationPostponedResponse(task, request) 31 [end of pulpcore/app/views/orphans.py] [start of pulpcore/app/viewsets/orphans.py] 1 from drf_spectacular.utils import extend_schema 2 from rest_framework.viewsets import ViewSet 3 4 from pulpcore.app.response import OperationPostponedResponse 5 from pulpcore.app.serializers import AsyncOperationResponseSerializer, OrphansCleanupSerializer 6 from pulpcore.app.tasks import orphan_cleanup 7 from pulpcore.tasking.tasks import dispatch 8 9 10 class OrphansCleanupViewset(ViewSet): 11 serializer_class = OrphansCleanupSerializer 12 13 @extend_schema( 14 description="Trigger an asynchronous orphan cleanup operation.", 15 responses={202: AsyncOperationResponseSerializer}, 16 ) 17 def cleanup(self, request): 18 """ 19 Triggers an asynchronous orphan cleanup operation. 20 """ 21 serializer = OrphansCleanupSerializer(data=request.data) 22 serializer.is_valid(raise_exception=True) 23 24 content_pks = serializer.validated_data.get("content_hrefs", None) 25 orphan_protection_time = serializer.validated_data.get("orphan_protection_time") 26 27 task = dispatch( 28 orphan_cleanup, 29 kwargs={"content_pks": content_pks, "orphan_protection_time": orphan_protection_time}, 30 ) 31 32 return OperationPostponedResponse(task, request) 33 [end of pulpcore/app/viewsets/orphans.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pulpcore/app/views/orphans.py b/pulpcore/app/views/orphans.py --- a/pulpcore/app/views/orphans.py +++ b/pulpcore/app/views/orphans.py @@ -25,6 +25,6 @@ "`POST /pulp/api/v3/orphans/cleanup/` instead." ) - task = dispatch(orphan_cleanup) + task = dispatch(orphan_cleanup, exclusive_resources=["/pulp/api/v3/orphans/cleanup/"]) return OperationPostponedResponse(task, request) diff --git a/pulpcore/app/viewsets/orphans.py b/pulpcore/app/viewsets/orphans.py --- a/pulpcore/app/viewsets/orphans.py +++ b/pulpcore/app/viewsets/orphans.py @@ -26,6 +26,7 @@ task = dispatch( orphan_cleanup, + exclusive_resources=["/pulp/api/v3/orphans/cleanup/"], kwargs={"content_pks": content_pks, "orphan_protection_time": orphan_protection_time}, )
{"golden_diff": "diff --git a/pulpcore/app/views/orphans.py b/pulpcore/app/views/orphans.py\n--- a/pulpcore/app/views/orphans.py\n+++ b/pulpcore/app/views/orphans.py\n@@ -25,6 +25,6 @@\n \"`POST /pulp/api/v3/orphans/cleanup/` instead.\"\n )\n \n- task = dispatch(orphan_cleanup)\n+ task = dispatch(orphan_cleanup, exclusive_resources=[\"/pulp/api/v3/orphans/cleanup/\"])\n \n return OperationPostponedResponse(task, request)\ndiff --git a/pulpcore/app/viewsets/orphans.py b/pulpcore/app/viewsets/orphans.py\n--- a/pulpcore/app/viewsets/orphans.py\n+++ b/pulpcore/app/viewsets/orphans.py\n@@ -26,6 +26,7 @@\n \n task = dispatch(\n orphan_cleanup,\n+ exclusive_resources=[\"/pulp/api/v3/orphans/cleanup/\"],\n kwargs={\"content_pks\": content_pks, \"orphan_protection_time\": orphan_protection_time},\n )\n", "issue": "Remove orphans: Repository matching query does not exist.\n**Version**\r\n```\r\n$ rpm -qa | grep tfm-rubygem-pulp\r\ntfm-rubygem-pulp_certguard_client-1.5.0-1.el7.noarch\r\ntfm-rubygem-pulp_ansible_client-0.13.1-1.el7.noarch\r\ntfm-rubygem-pulp_deb_client-2.18.0-1.el7.noarch\r\ntfm-rubygem-pulp_rpm_client-3.17.4-1.el7.noarch\r\ntfm-rubygem-pulp_python_client-3.6.0-1.el7.noarch\r\ntfm-rubygem-pulp_ostree_client-2.0.0-0.1.a1.el7.noarch\r\ntfm-rubygem-pulpcore_client-3.18.5-1.el7.noarch\r\ntfm-rubygem-pulp_container_client-2.10.3-1.el7.noarch\r\ntfm-rubygem-pulp_file_client-1.10.0-1.el7.noarch\r\n\r\n$ rpm -qa | grep katello-4\r\ntfm-rubygem-katello-4.5.0-1.el7.noarch\r\nkatello-4.5.0-1.el7.noarch\r\n```\r\n\r\n**Describe the bug**\r\nRunning `foreman-rake katello:delete_orphaned_content RAILS_ENV=production` creates two tasks.\r\nOne of the tasks gets stuck in _paused_ with the following error: `Repository matching query does not exist.`\r\n\r\n\r\n**To Reproduce**\r\n\r\n- Run \r\n `foreman-rake katello:delete_orphaned_content RAILS_ENV=production`.\r\n\r\n- After a few minutes run \r\n `hammer --no-headers --output json task list --order 'started_at DESC' --search 'state = \"paused\"'`\r\n\r\n**Expected behavior**\r\nWe expected both tasks to run successfully.\r\n\r\n**Additional context**\r\nWe had to manually apply this patch: https://github.com/pulp/pulp_rpm/pull/2619 - before that, we had his same issue: https://github.com/pulp/pulp_rpm/issues/2459 .\r\n\r\n```\r\n$ hammer --no-headers --output json task list --order 'started_at DESC' --search 'state = \"paused\" '\r\n[\r\n {\r\n \"ID\": \"a8aae19c-7c62-4a60-ab12-935687e3723e\",\r\n \"Action\": \"Remove orphans \",\r\n \"State\": \"paused\",\r\n \"Result\": \"error\",\r\n \"Started at\": \"2022-07-21 15:29:02 UTC\",\r\n \"Ended at\": null,\r\n \"Duration\": \"00:02:29.56437\",\r\n \"Owner\": \"foreman_admin\",\r\n \"Task errors\": [\r\n \"Repository matching query does not exist.\"\r\n ]\r\n }\r\n]\r\n```\r\n```\r\n$ http_pulp /pulp/api/v3/tasks/e7df82c7-cb38-4fac-a28a-edd8842b3b69/\r\nHTTP/1.1 200 OK\r\nAccess-Control-Expose-Headers: Correlation-ID\r\nAllow: GET, PATCH, DELETE, HEAD, OPTIONS\r\nConnection: Keep-Alive\r\nContent-Length: 3335\r\nContent-Type: application/json\r\nCorrelation-ID: e1f2e082da6e401196007424799a05b5\r\nDate: Thu, 21 Jul 2022 17:10:58 GMT\r\nKeep-Alive: timeout=15, max=100\r\nReferrer-Policy: same-origin\r\nServer: gunicorn\r\nVary: Accept,Cookie\r\nVia: 1.1 updates.eurotux.com\r\nX-Content-Type-Options: nosniff\r\nX-Frame-Options: DENY\r\n\r\n{\r\n \"child_tasks\": [],\r\n \"created_resources\": [],\r\n \"error\": {\r\n \"description\": \"Repository matching query does not exist.\",\r\n \"traceback\": \" File \\\"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulpcore/tasking/pulpcore_worker.py\\\", line 410, in _perform_task\\n result = func(*args, **kwargs)\\n File \\\"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulpcore/app/tasks/orphan.py\\\", line 66, in orphan_cleanup\\n c.delete()\\n File \\\"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py\\\", line 746, in delete\\n deleted, _rows_count = collector.delete()\\n File \\\"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/deletion.py\\\", line 435, in delete\\n signals.post_delete.send(\\n File \\\"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/dispatch/dispatcher.py\\\", line 180, in send\\n return [\\n File \\\"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/dispatch/dispatcher.py\\\", line 181, in <listcomp>\\n (receiver, receiver(signal=self, sender=sender, **named))\\n File \\\"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulp_rpm/app/models/distribution.py\\\", line 340, in cleanup_subrepos\\n Variant.objects.filter(repository=subrepo).delete()\\n File \\\"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py\\\", line 746, in delete\\n deleted, _rows_count = collector.delete()\\n File \\\"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/deletion.py\\\", line 435, in delete\\n signals.post_delete.send(\\n File \\\"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/dispatch/dispatcher.py\\\", line 180, in send\\n return [\\n File \\\"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/dispatch/dispatcher.py\\\", line 181, in <listcomp>\\n (receiver, receiver(signal=self, sender=sender, **named))\\n File \\\"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/pulp_rpm/app/models/distribution.py\\\", line 338, in cleanup_subrepos\\n subrepo = instance.repository\\n File \\\"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/fields/related_descriptors.py\\\", line 187, in __get__\\n rel_obj = self.get_object(instance)\\n File \\\"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/fields/related_descriptors.py\\\", line 154, in get_object\\n return qs.get(self.field.get_reverse_related_filter(instance))\\n File \\\"/opt/theforeman/tfm-pulpcore/root/usr/lib/python3.8/site-packages/django/db/models/query.py\\\", line 435, in get\\n raise self.model.DoesNotExist(\\n\"\r\n },\r\n \"finished_at\": \"2022-07-21T15:30:11.075104Z\",\r\n \"logging_cid\": \"ac4070d63c764578bb854f97873ba88a\",\r\n \"name\": \"pulpcore.app.tasks.orphan.orphan_cleanup\",\r\n \"parent_task\": null,\r\n \"progress_reports\": [\r\n {\r\n \"code\": \"clean-up.content\",\r\n \"done\": 2000,\r\n \"message\": \"Clean up orphan Content\",\r\n \"state\": \"running\",\r\n \"suffix\": null,\r\n \"total\": 17098\r\n }\r\n ],\r\n \"pulp_created\": \"2022-07-21T15:29:58.868921Z\",\r\n \"pulp_href\": \"/pulp/api/v3/tasks/e7df82c7-cb38-4fac-a28a-edd8842b3b69/\",\r\n \"reserved_resources_record\": [],\r\n \"started_at\": \"2022-07-21T15:29:59.335110Z\",\r\n \"state\": \"failed\",\r\n \"task_group\": null,\r\n \"worker\": \"/pulp/api/v3/workers/6ff07070-3e14-49b3-b508-a0c300683f28/\"\r\n}\r\n```\r\n\r\nedit: related to https://community.theforeman.org/t/delete-orphaned-content-fails/29494/6\r\n\r\nhttps://bugzilla.redhat.com/show_bug.cgi?id=2115881\r\n\r\n\n", "before_files": [{"content": "from drf_spectacular.utils import extend_schema\nfrom rest_framework.views import APIView\n\nfrom pulpcore.app.loggers import deprecation_logger\nfrom pulpcore.app.response import OperationPostponedResponse\nfrom pulpcore.app.serializers import AsyncOperationResponseSerializer\nfrom pulpcore.app.tasks import orphan_cleanup\nfrom pulpcore.tasking.tasks import dispatch\n\n\nclass OrphansView(APIView):\n @extend_schema(\n description=\"DEPRECATED! Trigger an asynchronous task that deletes all \"\n \"orphaned content and artifacts. Use the `POST /pulp/api/v3/orphans/cleanup/` call \"\n \"instead.\",\n summary=\"Delete orphans\",\n responses={202: AsyncOperationResponseSerializer},\n )\n def delete(self, request, format=None):\n \"\"\"\n Cleans up all the Content and Artifact orphans in the system\n \"\"\"\n deprecation_logger.warning(\n \"The `DELETE /pulp/api/v3/orphans/` call is deprecated. Use\"\n \"`POST /pulp/api/v3/orphans/cleanup/` instead.\"\n )\n\n task = dispatch(orphan_cleanup)\n\n return OperationPostponedResponse(task, request)\n", "path": "pulpcore/app/views/orphans.py"}, {"content": "from drf_spectacular.utils import extend_schema\nfrom rest_framework.viewsets import ViewSet\n\nfrom pulpcore.app.response import OperationPostponedResponse\nfrom pulpcore.app.serializers import AsyncOperationResponseSerializer, OrphansCleanupSerializer\nfrom pulpcore.app.tasks import orphan_cleanup\nfrom pulpcore.tasking.tasks import dispatch\n\n\nclass OrphansCleanupViewset(ViewSet):\n serializer_class = OrphansCleanupSerializer\n\n @extend_schema(\n description=\"Trigger an asynchronous orphan cleanup operation.\",\n responses={202: AsyncOperationResponseSerializer},\n )\n def cleanup(self, request):\n \"\"\"\n Triggers an asynchronous orphan cleanup operation.\n \"\"\"\n serializer = OrphansCleanupSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n\n content_pks = serializer.validated_data.get(\"content_hrefs\", None)\n orphan_protection_time = serializer.validated_data.get(\"orphan_protection_time\")\n\n task = dispatch(\n orphan_cleanup,\n kwargs={\"content_pks\": content_pks, \"orphan_protection_time\": orphan_protection_time},\n )\n\n return OperationPostponedResponse(task, request)\n", "path": "pulpcore/app/viewsets/orphans.py"}]}
3,242
233
gh_patches_debug_2586
rasdani/github-patches
git_diff
pyro-ppl__numpyro-737
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Possible error in the validation of a Categorical distribution I am getting an error when I try to run the following code. The code just sample from a categorical distribution using the defined probabilities. ```python import numpyro import numpyro.distributions as dist import jax.numpy as jnp numpyro.enable_validation(True) def model(): probs = jnp.array([0.5, 0.5, 0.]) c = numpyro.sample('c', dist.Categorical(probs=probs)) return c with numpyro.handlers.seed(rng_seed=54): print(model()) ``` ``` ValueError Traceback (most recent call last) <ipython-input-1-fc7fe60e083b> in <module> 10 11 with numpyro.handlers.seed(rng_seed=54): ---> 12 print(model()) <ipython-input-1-fc7fe60e083b> in model() 6 def model(): 7 probs = jnp.array([0.5, 0.5, 0.]) ----> 8 c = numpyro.sample('c', dist.Categorical(probs=probs)) 9 return c 10 ~/miniconda3/envs/numpyro_test/lib/python3.8/site-packages/numpyro/distributions/discrete.py in Categorical(probs, logits, validate_args) 348 def Categorical(probs=None, logits=None, validate_args=None): 349 if probs is not None: --> 350 return CategoricalProbs(probs, validate_args=validate_args) 351 elif logits is not None: 352 return CategoricalLogits(logits, validate_args=validate_args) ~/miniconda3/envs/numpyro_test/lib/python3.8/site-packages/numpyro/distributions/discrete.py in __init__(self, probs, validate_args) 265 raise ValueError("`probs` parameter must be at least one-dimensional.") 266 self.probs = probs --> 267 super(CategoricalProbs, self).__init__(batch_shape=jnp.shape(self.probs)[:-1], 268 validate_args=validate_args) 269 ~/miniconda3/envs/numpyro_test/lib/python3.8/site-packages/numpyro/distributions/distribution.py in __init__(self, batch_shape, event_shape, validate_args) 142 if not_jax_tracer(is_valid): 143 if not is_valid: --> 144 raise ValueError("The parameter {} has invalid values".format(param)) 145 super(Distribution, self).__init__() 146 ValueError: The parameter probs has invalid values ``` I think the problem is caused by the validation because If I restart my kernel and comment the line ```numpyro.enable_validation(True)``` the code will run without problem. It will print 0 in my case. If I write a similar code in Pyro with the validation enabled, I do not get an error. ```python import torch import pyro import pyro.distributions as dist pyro.enable_validation(True) pyro.set_rng_seed(54) def model(): probs = torch.tensor([0.5, 0.5, 0.]) c = pyro.sample('c', dist.Categorical(probs=probs)) return c print(model()) ``` I am using Python 3.8.5, Pyro 1.4.0 and NumPyro 0.3.0 with Ubuntu. Happy to help with what I can. </issue> <code> [start of numpyro/distributions/constraints.py] 1 # Copyright Contributors to the Pyro project. 2 # SPDX-License-Identifier: Apache-2.0 3 4 # The implementation follows the design in PyTorch: torch.distributions.constraints.py 5 # 6 # Copyright (c) 2016- Facebook, Inc (Adam Paszke) 7 # Copyright (c) 2014- Facebook, Inc (Soumith Chintala) 8 # Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert) 9 # Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu) 10 # Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu) 11 # Copyright (c) 2011-2013 NYU (Clement Farabet) 12 # Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston) 13 # Copyright (c) 2006 Idiap Research Institute (Samy Bengio) 14 # Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz) 15 # 16 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 17 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 20 # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21 # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22 # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23 # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24 # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25 # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 # POSSIBILITY OF SUCH DAMAGE. 27 28 29 __all__ = [ 30 'boolean', 31 'corr_cholesky', 32 'corr_matrix', 33 'dependent', 34 'greater_than', 35 'integer_interval', 36 'integer_greater_than', 37 'interval', 38 'is_dependent', 39 'less_than', 40 'lower_cholesky', 41 'multinomial', 42 'nonnegative_integer', 43 'positive', 44 'positive_definite', 45 'positive_integer', 46 'real', 47 'real_vector', 48 'simplex', 49 'unit_interval', 50 'Constraint', 51 ] 52 53 import jax.numpy as jnp 54 55 56 class Constraint(object): 57 """ 58 Abstract base class for constraints. 59 60 A constraint object represents a region over which a variable is valid, 61 e.g. within which a variable can be optimized. 62 """ 63 64 def __call__(self, x): 65 raise NotImplementedError 66 67 def check(self, value): 68 """ 69 Returns a byte tensor of `sample_shape + batch_shape` indicating 70 whether each event in value satisfies this constraint. 71 """ 72 return self(value) 73 74 75 class _Boolean(Constraint): 76 def __call__(self, x): 77 return (x == 0) | (x == 1) 78 79 80 class _CorrCholesky(Constraint): 81 def __call__(self, x): 82 tril = jnp.tril(x) 83 lower_triangular = jnp.all(jnp.reshape(tril == x, x.shape[:-2] + (-1,)), axis=-1) 84 positive_diagonal = jnp.all(jnp.diagonal(x, axis1=-2, axis2=-1) > 0, axis=-1) 85 x_norm = jnp.linalg.norm(x, axis=-1) 86 unit_norm_row = jnp.all((x_norm <= 1) & (x_norm > 1 - 1e-6), axis=-1) 87 return lower_triangular & positive_diagonal & unit_norm_row 88 89 90 class _CorrMatrix(Constraint): 91 def __call__(self, x): 92 # check for symmetric 93 symmetric = jnp.all(jnp.all(x == jnp.swapaxes(x, -2, -1), axis=-1), axis=-1) 94 # check for the smallest eigenvalue is positive 95 positive = jnp.linalg.eigh(x)[0][..., 0] > 0 96 # check for diagonal equal to 1 97 unit_variance = jnp.all(jnp.abs(jnp.diagonal(x, axis1=-2, axis2=-1) - 1) < 1e-6, axis=-1) 98 return symmetric & positive & unit_variance 99 100 101 class _Dependent(Constraint): 102 def __call__(self, x): 103 raise ValueError('Cannot determine validity of dependent constraint') 104 105 106 def is_dependent(constraint): 107 return isinstance(constraint, _Dependent) 108 109 110 class _GreaterThan(Constraint): 111 def __init__(self, lower_bound): 112 self.lower_bound = lower_bound 113 114 def __call__(self, x): 115 return x > self.lower_bound 116 117 118 class _LessThan(Constraint): 119 def __init__(self, upper_bound): 120 self.upper_bound = upper_bound 121 122 def __call__(self, x): 123 return x < self.upper_bound 124 125 126 class _IntegerInterval(Constraint): 127 def __init__(self, lower_bound, upper_bound): 128 self.lower_bound = lower_bound 129 self.upper_bound = upper_bound 130 131 def __call__(self, x): 132 return (x >= self.lower_bound) & (x <= self.upper_bound) & (x == jnp.floor(x)) 133 134 135 class _IntegerGreaterThan(Constraint): 136 def __init__(self, lower_bound): 137 self.lower_bound = lower_bound 138 139 def __call__(self, x): 140 return (x % 1 == 0) & (x >= self.lower_bound) 141 142 143 class _Interval(Constraint): 144 def __init__(self, lower_bound, upper_bound): 145 self.lower_bound = lower_bound 146 self.upper_bound = upper_bound 147 148 def __call__(self, x): 149 return (x > self.lower_bound) & (x < self.upper_bound) 150 151 152 class _LowerCholesky(Constraint): 153 def __call__(self, x): 154 tril = jnp.tril(x) 155 lower_triangular = jnp.all(jnp.reshape(tril == x, x.shape[:-2] + (-1,)), axis=-1) 156 positive_diagonal = jnp.all(jnp.diagonal(x, axis1=-2, axis2=-1) > 0, axis=-1) 157 return lower_triangular & positive_diagonal 158 159 160 class _Multinomial(Constraint): 161 def __init__(self, upper_bound): 162 self.upper_bound = upper_bound 163 164 def __call__(self, x): 165 return jnp.all(x >= 0, axis=-1) & (jnp.sum(x, -1) == self.upper_bound) 166 167 168 class _OrderedVector(Constraint): 169 def __call__(self, x): 170 return jnp.all(x[..., 1:] > x[..., :-1], axis=-1) 171 172 173 class _PositiveDefinite(Constraint): 174 def __call__(self, x): 175 # check for symmetric 176 symmetric = jnp.all(jnp.all(x == jnp.swapaxes(x, -2, -1), axis=-1), axis=-1) 177 # check for the smallest eigenvalue is positive 178 positive = jnp.linalg.eigh(x)[0][..., 0] > 0 179 return symmetric & positive 180 181 182 class _Real(Constraint): 183 def __call__(self, x): 184 return jnp.isfinite(x) 185 186 187 class _RealVector(Constraint): 188 def __call__(self, x): 189 return jnp.all(jnp.isfinite(x), axis=-1) 190 191 192 class _Simplex(Constraint): 193 def __call__(self, x): 194 x_sum = jnp.sum(x, axis=-1) 195 return jnp.all(x > 0, axis=-1) & (x_sum < 1 + 1e-6) & (x_sum > 1 - 1e-6) 196 197 198 # TODO: Make types consistent 199 200 boolean = _Boolean() 201 corr_cholesky = _CorrCholesky() 202 corr_matrix = _CorrMatrix() 203 dependent = _Dependent() 204 greater_than = _GreaterThan 205 less_than = _LessThan 206 integer_interval = _IntegerInterval 207 integer_greater_than = _IntegerGreaterThan 208 interval = _Interval 209 lower_cholesky = _LowerCholesky() 210 multinomial = _Multinomial 211 nonnegative_integer = _IntegerGreaterThan(0) 212 ordered_vector = _OrderedVector() 213 positive = _GreaterThan(0.) 214 positive_definite = _PositiveDefinite() 215 positive_integer = _IntegerGreaterThan(1) 216 real = _Real() 217 real_vector = _RealVector() 218 simplex = _Simplex() 219 unit_interval = _Interval(0., 1.) 220 [end of numpyro/distributions/constraints.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/numpyro/distributions/constraints.py b/numpyro/distributions/constraints.py --- a/numpyro/distributions/constraints.py +++ b/numpyro/distributions/constraints.py @@ -192,7 +192,7 @@ class _Simplex(Constraint): def __call__(self, x): x_sum = jnp.sum(x, axis=-1) - return jnp.all(x > 0, axis=-1) & (x_sum < 1 + 1e-6) & (x_sum > 1 - 1e-6) + return jnp.all(x >= 0, axis=-1) & (x_sum < 1 + 1e-6) & (x_sum > 1 - 1e-6) # TODO: Make types consistent
{"golden_diff": "diff --git a/numpyro/distributions/constraints.py b/numpyro/distributions/constraints.py\n--- a/numpyro/distributions/constraints.py\n+++ b/numpyro/distributions/constraints.py\n@@ -192,7 +192,7 @@\n class _Simplex(Constraint):\n def __call__(self, x):\n x_sum = jnp.sum(x, axis=-1)\n- return jnp.all(x > 0, axis=-1) & (x_sum < 1 + 1e-6) & (x_sum > 1 - 1e-6)\n+ return jnp.all(x >= 0, axis=-1) & (x_sum < 1 + 1e-6) & (x_sum > 1 - 1e-6)\n \n \n # TODO: Make types consistent\n", "issue": "Possible error in the validation of a Categorical distribution\nI am getting an error when I try to run the following code. The code just sample from a categorical distribution using the defined probabilities. \r\n\r\n```python\r\nimport numpyro\r\nimport numpyro.distributions as dist\r\nimport jax.numpy as jnp\r\nnumpyro.enable_validation(True)\r\n\r\n\r\ndef model():\r\n probs = jnp.array([0.5, 0.5, 0.])\r\n c = numpyro.sample('c', dist.Categorical(probs=probs))\r\n return c\r\n\r\nwith numpyro.handlers.seed(rng_seed=54):\r\n print(model())\r\n```\r\n\r\n```\r\nValueError Traceback (most recent call last)\r\n<ipython-input-1-fc7fe60e083b> in <module>\r\n 10 \r\n 11 with numpyro.handlers.seed(rng_seed=54):\r\n---> 12 print(model())\r\n\r\n<ipython-input-1-fc7fe60e083b> in model()\r\n 6 def model():\r\n 7 probs = jnp.array([0.5, 0.5, 0.])\r\n----> 8 c = numpyro.sample('c', dist.Categorical(probs=probs))\r\n 9 return c\r\n 10 \r\n\r\n~/miniconda3/envs/numpyro_test/lib/python3.8/site-packages/numpyro/distributions/discrete.py in Categorical(probs, logits, validate_args)\r\n 348 def Categorical(probs=None, logits=None, validate_args=None):\r\n 349 if probs is not None:\r\n--> 350 return CategoricalProbs(probs, validate_args=validate_args)\r\n 351 elif logits is not None:\r\n 352 return CategoricalLogits(logits, validate_args=validate_args)\r\n\r\n~/miniconda3/envs/numpyro_test/lib/python3.8/site-packages/numpyro/distributions/discrete.py in __init__(self, probs, validate_args)\r\n 265 raise ValueError(\"`probs` parameter must be at least one-dimensional.\")\r\n 266 self.probs = probs\r\n--> 267 super(CategoricalProbs, self).__init__(batch_shape=jnp.shape(self.probs)[:-1],\r\n 268 validate_args=validate_args)\r\n 269 \r\n\r\n~/miniconda3/envs/numpyro_test/lib/python3.8/site-packages/numpyro/distributions/distribution.py in __init__(self, batch_shape, event_shape, validate_args)\r\n 142 if not_jax_tracer(is_valid):\r\n 143 if not is_valid:\r\n--> 144 raise ValueError(\"The parameter {} has invalid values\".format(param))\r\n 145 super(Distribution, self).__init__()\r\n 146 \r\n\r\nValueError: The parameter probs has invalid values\r\n```\r\n\r\nI think the problem is caused by the validation because If I restart my kernel and comment the line ```numpyro.enable_validation(True)``` the code will run without problem. It will print 0 in my case.\r\n\r\n\r\nIf I write a similar code in Pyro with the validation enabled, I do not get an error.\r\n\r\n```python\r\nimport torch\r\nimport pyro\r\nimport pyro.distributions as dist\r\npyro.enable_validation(True)\r\npyro.set_rng_seed(54)\r\n\r\ndef model():\r\n probs = torch.tensor([0.5, 0.5, 0.])\r\n c = pyro.sample('c', dist.Categorical(probs=probs))\r\n return c\r\n\r\nprint(model())\r\n```\r\n\r\nI am using Python 3.8.5, Pyro 1.4.0 and NumPyro 0.3.0 with Ubuntu. Happy to help with what I can.\n", "before_files": [{"content": "# Copyright Contributors to the Pyro project.\n# SPDX-License-Identifier: Apache-2.0\n\n# The implementation follows the design in PyTorch: torch.distributions.constraints.py\n#\n# Copyright (c) 2016- Facebook, Inc (Adam Paszke)\n# Copyright (c) 2014- Facebook, Inc (Soumith Chintala)\n# Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)\n# Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)\n# Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)\n# Copyright (c) 2011-2013 NYU (Clement Farabet)\n# Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)\n# Copyright (c) 2006 Idiap Research Institute (Samy Bengio)\n# Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\n\n__all__ = [\n 'boolean',\n 'corr_cholesky',\n 'corr_matrix',\n 'dependent',\n 'greater_than',\n 'integer_interval',\n 'integer_greater_than',\n 'interval',\n 'is_dependent',\n 'less_than',\n 'lower_cholesky',\n 'multinomial',\n 'nonnegative_integer',\n 'positive',\n 'positive_definite',\n 'positive_integer',\n 'real',\n 'real_vector',\n 'simplex',\n 'unit_interval',\n 'Constraint',\n]\n\nimport jax.numpy as jnp\n\n\nclass Constraint(object):\n \"\"\"\n Abstract base class for constraints.\n\n A constraint object represents a region over which a variable is valid,\n e.g. within which a variable can be optimized.\n \"\"\"\n\n def __call__(self, x):\n raise NotImplementedError\n\n def check(self, value):\n \"\"\"\n Returns a byte tensor of `sample_shape + batch_shape` indicating\n whether each event in value satisfies this constraint.\n \"\"\"\n return self(value)\n\n\nclass _Boolean(Constraint):\n def __call__(self, x):\n return (x == 0) | (x == 1)\n\n\nclass _CorrCholesky(Constraint):\n def __call__(self, x):\n tril = jnp.tril(x)\n lower_triangular = jnp.all(jnp.reshape(tril == x, x.shape[:-2] + (-1,)), axis=-1)\n positive_diagonal = jnp.all(jnp.diagonal(x, axis1=-2, axis2=-1) > 0, axis=-1)\n x_norm = jnp.linalg.norm(x, axis=-1)\n unit_norm_row = jnp.all((x_norm <= 1) & (x_norm > 1 - 1e-6), axis=-1)\n return lower_triangular & positive_diagonal & unit_norm_row\n\n\nclass _CorrMatrix(Constraint):\n def __call__(self, x):\n # check for symmetric\n symmetric = jnp.all(jnp.all(x == jnp.swapaxes(x, -2, -1), axis=-1), axis=-1)\n # check for the smallest eigenvalue is positive\n positive = jnp.linalg.eigh(x)[0][..., 0] > 0\n # check for diagonal equal to 1\n unit_variance = jnp.all(jnp.abs(jnp.diagonal(x, axis1=-2, axis2=-1) - 1) < 1e-6, axis=-1)\n return symmetric & positive & unit_variance\n\n\nclass _Dependent(Constraint):\n def __call__(self, x):\n raise ValueError('Cannot determine validity of dependent constraint')\n\n\ndef is_dependent(constraint):\n return isinstance(constraint, _Dependent)\n\n\nclass _GreaterThan(Constraint):\n def __init__(self, lower_bound):\n self.lower_bound = lower_bound\n\n def __call__(self, x):\n return x > self.lower_bound\n\n\nclass _LessThan(Constraint):\n def __init__(self, upper_bound):\n self.upper_bound = upper_bound\n\n def __call__(self, x):\n return x < self.upper_bound\n\n\nclass _IntegerInterval(Constraint):\n def __init__(self, lower_bound, upper_bound):\n self.lower_bound = lower_bound\n self.upper_bound = upper_bound\n\n def __call__(self, x):\n return (x >= self.lower_bound) & (x <= self.upper_bound) & (x == jnp.floor(x))\n\n\nclass _IntegerGreaterThan(Constraint):\n def __init__(self, lower_bound):\n self.lower_bound = lower_bound\n\n def __call__(self, x):\n return (x % 1 == 0) & (x >= self.lower_bound)\n\n\nclass _Interval(Constraint):\n def __init__(self, lower_bound, upper_bound):\n self.lower_bound = lower_bound\n self.upper_bound = upper_bound\n\n def __call__(self, x):\n return (x > self.lower_bound) & (x < self.upper_bound)\n\n\nclass _LowerCholesky(Constraint):\n def __call__(self, x):\n tril = jnp.tril(x)\n lower_triangular = jnp.all(jnp.reshape(tril == x, x.shape[:-2] + (-1,)), axis=-1)\n positive_diagonal = jnp.all(jnp.diagonal(x, axis1=-2, axis2=-1) > 0, axis=-1)\n return lower_triangular & positive_diagonal\n\n\nclass _Multinomial(Constraint):\n def __init__(self, upper_bound):\n self.upper_bound = upper_bound\n\n def __call__(self, x):\n return jnp.all(x >= 0, axis=-1) & (jnp.sum(x, -1) == self.upper_bound)\n\n\nclass _OrderedVector(Constraint):\n def __call__(self, x):\n return jnp.all(x[..., 1:] > x[..., :-1], axis=-1)\n\n\nclass _PositiveDefinite(Constraint):\n def __call__(self, x):\n # check for symmetric\n symmetric = jnp.all(jnp.all(x == jnp.swapaxes(x, -2, -1), axis=-1), axis=-1)\n # check for the smallest eigenvalue is positive\n positive = jnp.linalg.eigh(x)[0][..., 0] > 0\n return symmetric & positive\n\n\nclass _Real(Constraint):\n def __call__(self, x):\n return jnp.isfinite(x)\n\n\nclass _RealVector(Constraint):\n def __call__(self, x):\n return jnp.all(jnp.isfinite(x), axis=-1)\n\n\nclass _Simplex(Constraint):\n def __call__(self, x):\n x_sum = jnp.sum(x, axis=-1)\n return jnp.all(x > 0, axis=-1) & (x_sum < 1 + 1e-6) & (x_sum > 1 - 1e-6)\n\n\n# TODO: Make types consistent\n\nboolean = _Boolean()\ncorr_cholesky = _CorrCholesky()\ncorr_matrix = _CorrMatrix()\ndependent = _Dependent()\ngreater_than = _GreaterThan\nless_than = _LessThan\ninteger_interval = _IntegerInterval\ninteger_greater_than = _IntegerGreaterThan\ninterval = _Interval\nlower_cholesky = _LowerCholesky()\nmultinomial = _Multinomial\nnonnegative_integer = _IntegerGreaterThan(0)\nordered_vector = _OrderedVector()\npositive = _GreaterThan(0.)\npositive_definite = _PositiveDefinite()\npositive_integer = _IntegerGreaterThan(1)\nreal = _Real()\nreal_vector = _RealVector()\nsimplex = _Simplex()\nunit_interval = _Interval(0., 1.)\n", "path": "numpyro/distributions/constraints.py"}]}
3,847
181
gh_patches_debug_6604
rasdani/github-patches
git_diff
OpenNMT__OpenNMT-tf-222
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> assert_state_is_compatible() cannot detect dimension difference between encoder_state and decoder_zero_state when encoder and decoder dimensions are not the same in NMTSmall model I just followed the instructions on the page [http://opennmt.net/OpenNMT-tf/quickstart.html](http://opennmt.net/OpenNMT-tf/quickstart.html) and played around a little bit with the NMTSmall model by setting a different `num_units` value to the `UnidirectionalRNNEncoder`, say `256`, which is different from the `512` for the `AttentionalRNNDecoder`. This line https://github.com/OpenNMT/OpenNMT-tf/blob/ae0ada93a652c02793de3b481e20315716ea4f23/opennmt/layers/bridge.py#L56 in the `CopyBridge` did not throw any error, even though the `encoder_state` and `decoder_zero_state` do not have the same dimensions, `256` vs `512`. It probably natual for someone to think of using the `DenseBridge` when dimensions are set differently. However, the `CopyBridge` should be throwing some errors in such misusage cases here, instead of letting one to figure out that with some error message as follows `ValueError: Dimensions must be equal, but are 1280 and 1536 for 'seq2seq/parallel_0/seq2seq/decoder_1/decoder/while/BasicDecoderStep/decoder/attention_wrapper/attention_wrapper/multi_rnn_cell/cell_0/lstm_cell/MatMul' (op: 'MatMul') with input shapes: [?,1280], [1536,2048].` Can anyone please explain why the https://github.com/OpenNMT/OpenNMT-tf/blob/ae0ada93a652c02793de3b481e20315716ea4f23/opennmt/layers/bridge.py#L28 passed without an issue? Thanks! </issue> <code> [start of opennmt/layers/bridge.py] 1 """Define bridges: logic of passing the encoder state to the decoder.""" 2 3 import abc 4 import six 5 6 import tensorflow as tf 7 8 9 def assert_state_is_compatible(expected_state, state): 10 """Asserts that states are compatible. 11 12 Args: 13 expected_state: The reference state. 14 state: The state that must be compatible with :obj:`expected_state`. 15 16 Raises: 17 ValueError: if the states are incompatible. 18 """ 19 # Check structure compatibility. 20 tf.contrib.framework.nest.assert_same_structure(expected_state, state) 21 22 # Check shape compatibility. 23 expected_state_flat = tf.contrib.framework.nest.flatten(expected_state) 24 state_flat = tf.contrib.framework.nest.flatten(state) 25 26 for x, y in zip(expected_state_flat, state_flat): 27 if tf.contrib.framework.is_tensor(x): 28 tf.contrib.framework.with_same_shape(x, y) 29 30 31 @six.add_metaclass(abc.ABCMeta) 32 class Bridge(object): 33 """Base class for bridges.""" 34 35 def __call__(self, encoder_state, decoder_zero_state): 36 """Returns the initial decoder state. 37 38 Args: 39 encoder_state: The encoder state. 40 decoder_zero_state: The default decoder state. 41 42 Returns: 43 The decoder initial state. 44 """ 45 return self._build(encoder_state, decoder_zero_state) 46 47 @abc.abstractmethod 48 def _build(self, encoder_state, decoder_zero_state): 49 raise NotImplementedError() 50 51 52 class CopyBridge(Bridge): 53 """A bridge that passes the encoder state as is.""" 54 55 def _build(self, encoder_state, decoder_zero_state): 56 assert_state_is_compatible(decoder_zero_state, encoder_state) 57 return encoder_state 58 59 60 class ZeroBridge(Bridge): 61 """A bridge that does not pass information from the encoder.""" 62 63 def _build(self, encoder_state, decoder_zero_state): 64 # Simply return the default decoder state. 65 return decoder_zero_state 66 67 68 class DenseBridge(Bridge): 69 """A bridge that applies a parameterized linear transformation from the 70 encoder state to the decoder state size. 71 """ 72 73 def __init__(self, activation=None): 74 """Initializes the bridge. 75 76 Args: 77 activation: Activation function (a callable). 78 Set it to ``None`` to maintain a linear activation. 79 """ 80 self.activation = activation 81 82 def _build(self, encoder_state, decoder_zero_state): 83 # Flattened states. 84 encoder_state_flat = tf.contrib.framework.nest.flatten(encoder_state) 85 decoder_state_flat = tf.contrib.framework.nest.flatten(decoder_zero_state) 86 87 # View encoder state as a single tensor. 88 encoder_state_concat = tf.concat(encoder_state_flat, 1) 89 90 # Extract decoder state sizes. 91 decoder_state_size = [] 92 for tensor in decoder_state_flat: 93 decoder_state_size.append(tensor.get_shape().as_list()[-1]) 94 95 decoder_total_size = sum(decoder_state_size) 96 97 # Apply linear transformation. 98 transformed = tf.layers.dense( 99 encoder_state_concat, 100 decoder_total_size, 101 activation=self.activation) 102 103 # Split resulting tensor to match the decoder state size. 104 splitted = tf.split(transformed, decoder_state_size, axis=1) 105 106 # Pack as the origial decoder state. 107 return tf.contrib.framework.nest.pack_sequence_as(decoder_zero_state, splitted) 108 [end of opennmt/layers/bridge.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/opennmt/layers/bridge.py b/opennmt/layers/bridge.py --- a/opennmt/layers/bridge.py +++ b/opennmt/layers/bridge.py @@ -25,7 +25,11 @@ for x, y in zip(expected_state_flat, state_flat): if tf.contrib.framework.is_tensor(x): - tf.contrib.framework.with_same_shape(x, y) + expected_depth = x.get_shape().as_list()[-1] + depth = y.get_shape().as_list()[-1] + if depth != expected_depth: + raise ValueError("Tensor %s in state has shape %s which is incompatible " + "with the target shape %s" % (y.name, y.shape, x.shape)) @six.add_metaclass(abc.ABCMeta)
{"golden_diff": "diff --git a/opennmt/layers/bridge.py b/opennmt/layers/bridge.py\n--- a/opennmt/layers/bridge.py\n+++ b/opennmt/layers/bridge.py\n@@ -25,7 +25,11 @@\n \n for x, y in zip(expected_state_flat, state_flat):\n if tf.contrib.framework.is_tensor(x):\n- tf.contrib.framework.with_same_shape(x, y)\n+ expected_depth = x.get_shape().as_list()[-1]\n+ depth = y.get_shape().as_list()[-1]\n+ if depth != expected_depth:\n+ raise ValueError(\"Tensor %s in state has shape %s which is incompatible \"\n+ \"with the target shape %s\" % (y.name, y.shape, x.shape))\n \n \n @six.add_metaclass(abc.ABCMeta)\n", "issue": "assert_state_is_compatible() cannot detect dimension difference between encoder_state and decoder_zero_state when encoder and decoder dimensions are not the same in NMTSmall model\nI just followed the instructions on the page [http://opennmt.net/OpenNMT-tf/quickstart.html](http://opennmt.net/OpenNMT-tf/quickstart.html) and played around a little bit with the NMTSmall model by setting a different `num_units` value to the `UnidirectionalRNNEncoder`, say `256`, which is different from the `512` for the `AttentionalRNNDecoder`. \r\n\r\nThis line\r\n\r\nhttps://github.com/OpenNMT/OpenNMT-tf/blob/ae0ada93a652c02793de3b481e20315716ea4f23/opennmt/layers/bridge.py#L56\r\n\r\nin the `CopyBridge` did not throw any error, even though the `encoder_state` and `decoder_zero_state` do not have the same dimensions, `256` vs `512`.\r\n\r\nIt probably natual for someone to think of using the `DenseBridge` when dimensions are set differently. However, the `CopyBridge` should be throwing some errors in such misusage cases here, instead of letting one to figure out that with some error message as follows\r\n\r\n`ValueError: Dimensions must be equal, but are 1280 and 1536 for 'seq2seq/parallel_0/seq2seq/decoder_1/decoder/while/BasicDecoderStep/decoder/attention_wrapper/attention_wrapper/multi_rnn_cell/cell_0/lstm_cell/MatMul' (op: 'MatMul') with input shapes: [?,1280], [1536,2048].`\r\n\r\nCan anyone please explain why the \r\nhttps://github.com/OpenNMT/OpenNMT-tf/blob/ae0ada93a652c02793de3b481e20315716ea4f23/opennmt/layers/bridge.py#L28\r\npassed without an issue?\r\n\r\nThanks!\n", "before_files": [{"content": "\"\"\"Define bridges: logic of passing the encoder state to the decoder.\"\"\"\n\nimport abc\nimport six\n\nimport tensorflow as tf\n\n\ndef assert_state_is_compatible(expected_state, state):\n \"\"\"Asserts that states are compatible.\n\n Args:\n expected_state: The reference state.\n state: The state that must be compatible with :obj:`expected_state`.\n\n Raises:\n ValueError: if the states are incompatible.\n \"\"\"\n # Check structure compatibility.\n tf.contrib.framework.nest.assert_same_structure(expected_state, state)\n\n # Check shape compatibility.\n expected_state_flat = tf.contrib.framework.nest.flatten(expected_state)\n state_flat = tf.contrib.framework.nest.flatten(state)\n\n for x, y in zip(expected_state_flat, state_flat):\n if tf.contrib.framework.is_tensor(x):\n tf.contrib.framework.with_same_shape(x, y)\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass Bridge(object):\n \"\"\"Base class for bridges.\"\"\"\n\n def __call__(self, encoder_state, decoder_zero_state):\n \"\"\"Returns the initial decoder state.\n\n Args:\n encoder_state: The encoder state.\n decoder_zero_state: The default decoder state.\n\n Returns:\n The decoder initial state.\n \"\"\"\n return self._build(encoder_state, decoder_zero_state)\n\n @abc.abstractmethod\n def _build(self, encoder_state, decoder_zero_state):\n raise NotImplementedError()\n\n\nclass CopyBridge(Bridge):\n \"\"\"A bridge that passes the encoder state as is.\"\"\"\n\n def _build(self, encoder_state, decoder_zero_state):\n assert_state_is_compatible(decoder_zero_state, encoder_state)\n return encoder_state\n\n\nclass ZeroBridge(Bridge):\n \"\"\"A bridge that does not pass information from the encoder.\"\"\"\n\n def _build(self, encoder_state, decoder_zero_state):\n # Simply return the default decoder state.\n return decoder_zero_state\n\n\nclass DenseBridge(Bridge):\n \"\"\"A bridge that applies a parameterized linear transformation from the\n encoder state to the decoder state size.\n \"\"\"\n\n def __init__(self, activation=None):\n \"\"\"Initializes the bridge.\n\n Args:\n activation: Activation function (a callable).\n Set it to ``None`` to maintain a linear activation.\n \"\"\"\n self.activation = activation\n\n def _build(self, encoder_state, decoder_zero_state):\n # Flattened states.\n encoder_state_flat = tf.contrib.framework.nest.flatten(encoder_state)\n decoder_state_flat = tf.contrib.framework.nest.flatten(decoder_zero_state)\n\n # View encoder state as a single tensor.\n encoder_state_concat = tf.concat(encoder_state_flat, 1)\n\n # Extract decoder state sizes.\n decoder_state_size = []\n for tensor in decoder_state_flat:\n decoder_state_size.append(tensor.get_shape().as_list()[-1])\n\n decoder_total_size = sum(decoder_state_size)\n\n # Apply linear transformation.\n transformed = tf.layers.dense(\n encoder_state_concat,\n decoder_total_size,\n activation=self.activation)\n\n # Split resulting tensor to match the decoder state size.\n splitted = tf.split(transformed, decoder_state_size, axis=1)\n\n # Pack as the origial decoder state.\n return tf.contrib.framework.nest.pack_sequence_as(decoder_zero_state, splitted)\n", "path": "opennmt/layers/bridge.py"}]}
1,926
181
gh_patches_debug_17593
rasdani/github-patches
git_diff
OpenMined__PySyft-5122
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add DCGAN example Duet Notebooks ## Description Add two notebook's which reflect the DCGAN example split into DO (Data Owner) and DS (Data Scientist): https://github.com/pytorch/examples/blob/master/dcgan/ ## Definition of Done The partially runnable DCGAN example notebooks should be in the examples/duet/dcgan folder and a README.md should be added in the parent examples/duet directory with a link to the original example and our notebook. Add Super Resolution Example Duet Notebooks ## Description Add two notebook's which reflect the Super Resolution example split into DO (Data Owner) and DS (Data Scientist): https://github.com/pytorch/examples/blob/master/super_resolution/ ## Definition of Done The partially runnable Super Resolution example notebooks should be in the examples/duet/super_resolution folder and a README.md should be added in the parent examples/duet directory with a link to the original example and our notebook. Add DCGAN example Duet Notebooks ## Description Add two notebook's which reflect the DCGAN example split into DO (Data Owner) and DS (Data Scientist): https://github.com/pytorch/examples/blob/master/dcgan/ ## Definition of Done The partially runnable DCGAN example notebooks should be in the examples/duet/dcgan folder and a README.md should be added in the parent examples/duet directory with a link to the original example and our notebook. </issue> <code> [start of src/syft/lib/torch/__init__.py] 1 # stdlib 2 from typing import Any 3 from typing import Dict 4 from typing import Union 5 6 # third party 7 from packaging import version 8 import torch 9 10 # syft relative 11 from . import device # noqa: 401 12 from . import parameter # noqa: 401 13 from . import uppercase_tensor # noqa: 401 14 from ...ast.globals import Globals 15 from ...logger import critical 16 from .allowlist import allowlist 17 18 TORCH_VERSION = version.parse(torch.__version__.split("+")[0]) 19 20 21 def get_return_type(support_dict: Union[str, Dict[str, str]]) -> str: 22 if isinstance(support_dict, str): 23 return support_dict 24 else: 25 return support_dict["return_type"] 26 27 28 def version_supported(support_dict: Union[str, Dict[str, str]]) -> bool: 29 if isinstance(support_dict, str): 30 return True 31 else: 32 # if we are on either side of the min or max versions we don't support this op 33 if "min_version" in support_dict and TORCH_VERSION < version.parse( 34 support_dict["min_version"] 35 ): 36 return False 37 if "max_version" in support_dict and TORCH_VERSION > version.parse( 38 support_dict["max_version"] 39 ): 40 return False 41 return True 42 43 44 def create_torch_ast(client: Any = None) -> Globals: 45 ast = Globals(client) 46 47 # most methods work in all versions and have a single return type 48 # for the more complicated ones we pass a dict with keys like return_type and 49 # min_version 50 for method, return_type_name_or_dict in allowlist.items(): 51 if version_supported(support_dict=return_type_name_or_dict): 52 return_type = get_return_type(support_dict=return_type_name_or_dict) 53 if return_type == "unknown": 54 # this allows us to import them for testing 55 continue 56 ast.add_path( 57 path=method, framework_reference=torch, return_type_name=return_type 58 ) 59 # add all the torch.nn.Parameter hooks 60 if method.startswith("torch.Tensor."): 61 method = method.replace("torch.Tensor.", "torch.nn.Parameter.") 62 return_type = return_type.replace("torch.Tensor", "torch.nn.Parameter") 63 ast.add_path( 64 path=method, framework_reference=torch, return_type_name=return_type 65 ) 66 else: 67 critical(f"Skipping {method} not supported in {TORCH_VERSION}") 68 69 for klass in ast.classes: 70 klass.create_pointer_class() 71 klass.create_send_method() 72 klass.create_storable_object_attr_convenience_methods() 73 return ast 74 [end of src/syft/lib/torch/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/syft/lib/torch/__init__.py b/src/syft/lib/torch/__init__.py --- a/src/syft/lib/torch/__init__.py +++ b/src/syft/lib/torch/__init__.py @@ -12,7 +12,7 @@ from . import parameter # noqa: 401 from . import uppercase_tensor # noqa: 401 from ...ast.globals import Globals -from ...logger import critical +from ...logger import info from .allowlist import allowlist TORCH_VERSION = version.parse(torch.__version__.split("+")[0]) @@ -64,7 +64,7 @@ path=method, framework_reference=torch, return_type_name=return_type ) else: - critical(f"Skipping {method} not supported in {TORCH_VERSION}") + info(f"Skipping {method} not supported in {TORCH_VERSION}") for klass in ast.classes: klass.create_pointer_class()
{"golden_diff": "diff --git a/src/syft/lib/torch/__init__.py b/src/syft/lib/torch/__init__.py\n--- a/src/syft/lib/torch/__init__.py\n+++ b/src/syft/lib/torch/__init__.py\n@@ -12,7 +12,7 @@\n from . import parameter # noqa: 401\n from . import uppercase_tensor # noqa: 401\n from ...ast.globals import Globals\n-from ...logger import critical\n+from ...logger import info\n from .allowlist import allowlist\n \n TORCH_VERSION = version.parse(torch.__version__.split(\"+\")[0])\n@@ -64,7 +64,7 @@\n path=method, framework_reference=torch, return_type_name=return_type\n )\n else:\n- critical(f\"Skipping {method} not supported in {TORCH_VERSION}\")\n+ info(f\"Skipping {method} not supported in {TORCH_VERSION}\")\n \n for klass in ast.classes:\n klass.create_pointer_class()\n", "issue": "Add DCGAN example Duet Notebooks\n## Description\r\nAdd two notebook's which reflect the DCGAN example split into DO (Data Owner) and DS (Data Scientist):\r\nhttps://github.com/pytorch/examples/blob/master/dcgan/\r\n\r\n## Definition of Done\r\nThe partially runnable DCGAN example notebooks should be in the examples/duet/dcgan folder and a README.md should be added in the parent examples/duet directory with a link to the original example and our notebook.\nAdd Super Resolution Example Duet Notebooks\n## Description\r\nAdd two notebook's which reflect the Super Resolution example split into DO (Data Owner) and DS (Data Scientist):\r\nhttps://github.com/pytorch/examples/blob/master/super_resolution/\r\n\r\n## Definition of Done\r\nThe partially runnable Super Resolution example notebooks should be in the examples/duet/super_resolution folder and a README.md should be added in the parent examples/duet directory with a link to the original example and our notebook.\nAdd DCGAN example Duet Notebooks\n## Description\r\nAdd two notebook's which reflect the DCGAN example split into DO (Data Owner) and DS (Data Scientist):\r\nhttps://github.com/pytorch/examples/blob/master/dcgan/\r\n\r\n## Definition of Done\r\nThe partially runnable DCGAN example notebooks should be in the examples/duet/dcgan folder and a README.md should be added in the parent examples/duet directory with a link to the original example and our notebook.\n", "before_files": [{"content": "# stdlib\nfrom typing import Any\nfrom typing import Dict\nfrom typing import Union\n\n# third party\nfrom packaging import version\nimport torch\n\n# syft relative\nfrom . import device # noqa: 401\nfrom . import parameter # noqa: 401\nfrom . import uppercase_tensor # noqa: 401\nfrom ...ast.globals import Globals\nfrom ...logger import critical\nfrom .allowlist import allowlist\n\nTORCH_VERSION = version.parse(torch.__version__.split(\"+\")[0])\n\n\ndef get_return_type(support_dict: Union[str, Dict[str, str]]) -> str:\n if isinstance(support_dict, str):\n return support_dict\n else:\n return support_dict[\"return_type\"]\n\n\ndef version_supported(support_dict: Union[str, Dict[str, str]]) -> bool:\n if isinstance(support_dict, str):\n return True\n else:\n # if we are on either side of the min or max versions we don't support this op\n if \"min_version\" in support_dict and TORCH_VERSION < version.parse(\n support_dict[\"min_version\"]\n ):\n return False\n if \"max_version\" in support_dict and TORCH_VERSION > version.parse(\n support_dict[\"max_version\"]\n ):\n return False\n return True\n\n\ndef create_torch_ast(client: Any = None) -> Globals:\n ast = Globals(client)\n\n # most methods work in all versions and have a single return type\n # for the more complicated ones we pass a dict with keys like return_type and\n # min_version\n for method, return_type_name_or_dict in allowlist.items():\n if version_supported(support_dict=return_type_name_or_dict):\n return_type = get_return_type(support_dict=return_type_name_or_dict)\n if return_type == \"unknown\":\n # this allows us to import them for testing\n continue\n ast.add_path(\n path=method, framework_reference=torch, return_type_name=return_type\n )\n # add all the torch.nn.Parameter hooks\n if method.startswith(\"torch.Tensor.\"):\n method = method.replace(\"torch.Tensor.\", \"torch.nn.Parameter.\")\n return_type = return_type.replace(\"torch.Tensor\", \"torch.nn.Parameter\")\n ast.add_path(\n path=method, framework_reference=torch, return_type_name=return_type\n )\n else:\n critical(f\"Skipping {method} not supported in {TORCH_VERSION}\")\n\n for klass in ast.classes:\n klass.create_pointer_class()\n klass.create_send_method()\n klass.create_storable_object_attr_convenience_methods()\n return ast\n", "path": "src/syft/lib/torch/__init__.py"}]}
1,546
220
gh_patches_debug_26137
rasdani/github-patches
git_diff
liberapay__liberapay.com-327
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Tweaking the take throttling algorithm We're trying to use liberapay for our catalyst community in Lille, that's why we got 100€ (https://liberapay.com/collectifcatalyst/) by week for our local community. We need people to take this money faster than starting with 1€ (and 2€ / 4 / 8 next week) as people are giving a lot of time now. Is that possible ? Could we get multiple way to distribute money in a team depending on how the project go (for example for a faster project, starting with 1€ is difficult). </issue> <code> [start of liberapay/models/_mixin_team.py] 1 """Teams are groups of participants. 2 """ 3 from __future__ import division, print_function, unicode_literals 4 5 from collections import OrderedDict 6 from decimal import Decimal, ROUND_UP 7 from statistics import median 8 9 from liberapay.constants import D_CENT, D_UNIT, D_ZERO 10 11 12 class MemberLimitReached(Exception): pass 13 14 15 class InactiveParticipantAdded(Exception): pass 16 17 18 class MixinTeam(object): 19 20 def invite(self, invitee, inviter): 21 assert self.kind == 'group' 22 with self.db.get_cursor() as c: 23 n_id = invitee.notify( 24 'team_invite', 25 team=self.username, 26 team_url=self.url(), 27 inviter=inviter.username, 28 ) 29 payload = dict(invitee=invitee.id, notification_id=n_id) 30 self.add_event(c, 'invite', payload, inviter.id) 31 32 def add_member(self, member, cursor=None): 33 """Add a member to this team. 34 """ 35 if len(self.get_current_takes()) == 149: 36 raise MemberLimitReached 37 if member.status != 'active': 38 raise InactiveParticipantAdded 39 self.set_take_for(member, D_ZERO, self, cursor=cursor) 40 41 def remove_all_members(self, cursor=None): 42 (cursor or self.db).run(""" 43 INSERT INTO takes (ctime, member, team, amount, recorder) ( 44 SELECT ctime, member, %(id)s, NULL, %(id)s 45 FROM current_takes 46 WHERE team=%(id)s 47 ); 48 """, dict(id=self.id)) 49 50 def member_of(self, team): 51 """Given a Participant object, return a boolean. 52 """ 53 assert team.kind == 'group' 54 return self.db.one(""" 55 SELECT true 56 FROM current_takes 57 WHERE team=%s AND member=%s 58 """, (team.id, self.id), default=False) 59 60 def get_takes_last_week(self): 61 """Get the users' nominal takes last week. Used in throttling. 62 """ 63 assert self.kind == 'group' 64 takes = {t.member: t.amount for t in self.db.all(""" 65 66 SELECT DISTINCT ON (member) member, amount, mtime 67 FROM takes 68 WHERE team=%s 69 AND mtime < ( 70 SELECT ts_start 71 FROM paydays 72 WHERE ts_end > ts_start 73 ORDER BY ts_start DESC LIMIT 1 74 ) 75 ORDER BY member, mtime DESC 76 77 """, (self.id,)) if t.amount} 78 takes['_relative_min'] = median(takes.values() or (0,)) ** Decimal('0.7') 79 return takes 80 81 def get_take_for(self, member): 82 """Return the nominal take for this member, or None. 83 """ 84 return self.db.one( 85 "SELECT amount FROM current_takes WHERE member = %s AND team = %s", 86 (member.id, self.id) 87 ) 88 89 def compute_max_this_week(self, member_id, last_week): 90 """2x the member's take last week, or a minimum based on last week's 91 median take, or current income divided by the number of members if takes 92 were zero last week, or 1. 93 """ 94 return max( 95 last_week.get(member_id, 0) * 2, 96 last_week['_relative_min'] or self.receiving / self.nmembers, 97 D_UNIT 98 ) 99 100 def set_take_for(self, member, take, recorder, check_max=True, cursor=None): 101 """Sets member's take from the team pool. 102 """ 103 assert self.kind == 'group' 104 105 if recorder.id != self.id: 106 cur_take = self.get_take_for(member) 107 if cur_take is None: 108 return None 109 110 if not isinstance(take, (None.__class__, Decimal)): 111 take = Decimal(take) 112 113 if take and check_max and take > 1: 114 last_week = self.get_takes_last_week() 115 max_this_week = self.compute_max_this_week(member.id, last_week) 116 if take > max_this_week: 117 take = max_this_week 118 119 with self.db.get_cursor(cursor) as cursor: 120 # Lock to avoid race conditions 121 cursor.run("LOCK TABLE takes IN EXCLUSIVE MODE") 122 # Compute the current takes 123 old_takes = self.compute_actual_takes(cursor) 124 # Insert the new take 125 cursor.run(""" 126 127 INSERT INTO takes (ctime, member, team, amount, recorder) 128 VALUES ( COALESCE (( SELECT ctime 129 FROM takes 130 WHERE member=%(member)s 131 AND team=%(team)s 132 LIMIT 1 133 ), CURRENT_TIMESTAMP) 134 , %(member)s 135 , %(team)s 136 , %(amount)s 137 , %(recorder)s 138 ) 139 140 """, dict(member=member.id, team=self.id, amount=take, 141 recorder=recorder.id)) 142 # Compute the new takes 143 new_takes = self.compute_actual_takes(cursor) 144 # Update receiving amounts in the participants table 145 self.update_taking(old_takes, new_takes, cursor, member) 146 # Update is_funded on member's tips 147 member.update_giving(cursor) 148 149 return take 150 151 def update_taking(self, old_takes, new_takes, cursor=None, member=None): 152 """Update `taking` amounts based on the difference between `old_takes` 153 and `new_takes`. 154 """ 155 for p_id in set(old_takes.keys()).union(new_takes.keys()): 156 old = old_takes.get(p_id, {}).get('actual_amount', D_ZERO) 157 new = new_takes.get(p_id, {}).get('actual_amount', D_ZERO) 158 diff = new - old 159 if diff != 0: 160 (cursor or self.db).run(""" 161 UPDATE participants 162 SET taking = (taking + %(diff)s) 163 , receiving = (receiving + %(diff)s) 164 WHERE id=%(p_id)s 165 """, dict(p_id=p_id, diff=diff)) 166 if member and p_id == member.id: 167 r = (cursor or self.db).one( 168 "SELECT taking, receiving FROM participants WHERE id = %s", 169 (p_id,) 170 ) 171 member.set_attributes(**r._asdict()) 172 173 def get_current_takes(self, cursor=None): 174 """Return a list of member takes for a team. 175 """ 176 assert self.kind == 'group' 177 TAKES = """ 178 SELECT p.id AS member_id, p.username AS member_name, p.avatar_url 179 , (p.mangopay_user_id IS NOT NULL) AS is_identified 180 , t.amount, t.ctime, t.mtime 181 FROM current_takes t 182 JOIN participants p ON p.id = member 183 WHERE t.team=%(team)s 184 ORDER BY p.username 185 """ 186 records = (cursor or self.db).all(TAKES, dict(team=self.id)) 187 return [r._asdict() for r in records] 188 189 def compute_actual_takes(self, cursor=None): 190 """Get the takes, compute the actual amounts, and return an OrderedDict. 191 """ 192 actual_takes = OrderedDict() 193 nominal_takes = self.get_current_takes(cursor=cursor) 194 balance = self.receiving 195 total_takes = sum(t['amount'] for t in nominal_takes if t['is_identified']) 196 ratio = min(balance / total_takes, 1) if total_takes else 0 197 for take in nominal_takes: 198 nominal = take['nominal_take'] = take.pop('amount') 199 actual = take['actual_amount'] = min( 200 (nominal * ratio).quantize(D_CENT, rounding=ROUND_UP), 201 balance 202 ) if take['is_identified'] else D_ZERO 203 balance -= actual 204 actual_takes[take['member_id']] = take 205 actual_takes.leftover = balance 206 return actual_takes 207 208 @property 209 def nmembers(self): 210 assert self.kind == 'group' 211 return self.db.one(""" 212 SELECT COUNT(*) 213 FROM current_takes 214 WHERE team=%s 215 """, (self.id,)) 216 217 def get_members(self): 218 """Return an OrderedDict of member dicts. 219 """ 220 takes = self.compute_actual_takes() 221 last_week = self.get_takes_last_week() 222 members = OrderedDict() 223 members.leftover = takes.leftover 224 for take in takes.values(): 225 member = {} 226 m_id = member['id'] = take['member_id'] 227 member['username'] = take['member_name'] 228 member['nominal_take'] = take['nominal_take'] 229 member['actual_amount'] = take['actual_amount'] 230 member['last_week'] = last_week.get(m_id, D_ZERO) 231 member['max_this_week'] = self.compute_max_this_week(m_id, last_week) 232 members[member['id']] = member 233 return members 234 235 @property 236 def closed_by(self): 237 assert self.status == 'closed' 238 return self.db.one(""" 239 SELECT member 240 FROM takes 241 WHERE team = %s 242 ORDER BY mtime DESC 243 LIMIT 1 244 """, (self.id,)) 245 [end of liberapay/models/_mixin_team.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/liberapay/models/_mixin_team.py b/liberapay/models/_mixin_team.py --- a/liberapay/models/_mixin_team.py +++ b/liberapay/models/_mixin_team.py @@ -75,7 +75,6 @@ ORDER BY member, mtime DESC """, (self.id,)) if t.amount} - takes['_relative_min'] = median(takes.values() or (0,)) ** Decimal('0.7') return takes def get_take_for(self, member): @@ -87,13 +86,20 @@ ) def compute_max_this_week(self, member_id, last_week): - """2x the member's take last week, or a minimum based on last week's - median take, or current income divided by the number of members if takes - were zero last week, or 1. + """2x the member's take last week, or the member's take last week + a + proportional share of the leftover, or a minimum based on last week's + median take, or 1. """ + sum_last_week = sum(last_week.values()) + initial_leftover = self.receiving - sum_last_week + nonzero_last_week = [a for a in last_week.values() if a] + member_last_week = last_week.get(member_id, 0) + leftover_share = member_last_week / (sum_last_week or Decimal('inf')) + leftover_share = max(leftover_share, D_UNIT / self.nmembers) return max( - last_week.get(member_id, 0) * 2, - last_week['_relative_min'] or self.receiving / self.nmembers, + member_last_week * 2, + member_last_week + initial_leftover * leftover_share, + median(nonzero_last_week or (0,)), D_UNIT )
{"golden_diff": "diff --git a/liberapay/models/_mixin_team.py b/liberapay/models/_mixin_team.py\n--- a/liberapay/models/_mixin_team.py\n+++ b/liberapay/models/_mixin_team.py\n@@ -75,7 +75,6 @@\n ORDER BY member, mtime DESC\n \n \"\"\", (self.id,)) if t.amount}\n- takes['_relative_min'] = median(takes.values() or (0,)) ** Decimal('0.7')\n return takes\n \n def get_take_for(self, member):\n@@ -87,13 +86,20 @@\n )\n \n def compute_max_this_week(self, member_id, last_week):\n- \"\"\"2x the member's take last week, or a minimum based on last week's\n- median take, or current income divided by the number of members if takes\n- were zero last week, or 1.\n+ \"\"\"2x the member's take last week, or the member's take last week + a\n+ proportional share of the leftover, or a minimum based on last week's\n+ median take, or 1.\n \"\"\"\n+ sum_last_week = sum(last_week.values())\n+ initial_leftover = self.receiving - sum_last_week\n+ nonzero_last_week = [a for a in last_week.values() if a]\n+ member_last_week = last_week.get(member_id, 0)\n+ leftover_share = member_last_week / (sum_last_week or Decimal('inf'))\n+ leftover_share = max(leftover_share, D_UNIT / self.nmembers)\n return max(\n- last_week.get(member_id, 0) * 2,\n- last_week['_relative_min'] or self.receiving / self.nmembers,\n+ member_last_week * 2,\n+ member_last_week + initial_leftover * leftover_share,\n+ median(nonzero_last_week or (0,)),\n D_UNIT\n )\n", "issue": "Tweaking the take throttling algorithm\n We're trying to use liberapay for our catalyst community in Lille, that's why we got 100\u20ac (https://liberapay.com/collectifcatalyst/) by week for our local community. We need people to take this money faster than starting with 1\u20ac (and 2\u20ac / 4 / 8 next week) as people are giving a lot of time now. Is that possible ? Could we get multiple way to distribute money in a team depending on how the project go (for example for a faster project, starting with 1\u20ac is difficult).\n\n", "before_files": [{"content": "\"\"\"Teams are groups of participants.\n\"\"\"\nfrom __future__ import division, print_function, unicode_literals\n\nfrom collections import OrderedDict\nfrom decimal import Decimal, ROUND_UP\nfrom statistics import median\n\nfrom liberapay.constants import D_CENT, D_UNIT, D_ZERO\n\n\nclass MemberLimitReached(Exception): pass\n\n\nclass InactiveParticipantAdded(Exception): pass\n\n\nclass MixinTeam(object):\n\n def invite(self, invitee, inviter):\n assert self.kind == 'group'\n with self.db.get_cursor() as c:\n n_id = invitee.notify(\n 'team_invite',\n team=self.username,\n team_url=self.url(),\n inviter=inviter.username,\n )\n payload = dict(invitee=invitee.id, notification_id=n_id)\n self.add_event(c, 'invite', payload, inviter.id)\n\n def add_member(self, member, cursor=None):\n \"\"\"Add a member to this team.\n \"\"\"\n if len(self.get_current_takes()) == 149:\n raise MemberLimitReached\n if member.status != 'active':\n raise InactiveParticipantAdded\n self.set_take_for(member, D_ZERO, self, cursor=cursor)\n\n def remove_all_members(self, cursor=None):\n (cursor or self.db).run(\"\"\"\n INSERT INTO takes (ctime, member, team, amount, recorder) (\n SELECT ctime, member, %(id)s, NULL, %(id)s\n FROM current_takes\n WHERE team=%(id)s\n );\n \"\"\", dict(id=self.id))\n\n def member_of(self, team):\n \"\"\"Given a Participant object, return a boolean.\n \"\"\"\n assert team.kind == 'group'\n return self.db.one(\"\"\"\n SELECT true\n FROM current_takes\n WHERE team=%s AND member=%s\n \"\"\", (team.id, self.id), default=False)\n\n def get_takes_last_week(self):\n \"\"\"Get the users' nominal takes last week. Used in throttling.\n \"\"\"\n assert self.kind == 'group'\n takes = {t.member: t.amount for t in self.db.all(\"\"\"\n\n SELECT DISTINCT ON (member) member, amount, mtime\n FROM takes\n WHERE team=%s\n AND mtime < (\n SELECT ts_start\n FROM paydays\n WHERE ts_end > ts_start\n ORDER BY ts_start DESC LIMIT 1\n )\n ORDER BY member, mtime DESC\n\n \"\"\", (self.id,)) if t.amount}\n takes['_relative_min'] = median(takes.values() or (0,)) ** Decimal('0.7')\n return takes\n\n def get_take_for(self, member):\n \"\"\"Return the nominal take for this member, or None.\n \"\"\"\n return self.db.one(\n \"SELECT amount FROM current_takes WHERE member = %s AND team = %s\",\n (member.id, self.id)\n )\n\n def compute_max_this_week(self, member_id, last_week):\n \"\"\"2x the member's take last week, or a minimum based on last week's\n median take, or current income divided by the number of members if takes\n were zero last week, or 1.\n \"\"\"\n return max(\n last_week.get(member_id, 0) * 2,\n last_week['_relative_min'] or self.receiving / self.nmembers,\n D_UNIT\n )\n\n def set_take_for(self, member, take, recorder, check_max=True, cursor=None):\n \"\"\"Sets member's take from the team pool.\n \"\"\"\n assert self.kind == 'group'\n\n if recorder.id != self.id:\n cur_take = self.get_take_for(member)\n if cur_take is None:\n return None\n\n if not isinstance(take, (None.__class__, Decimal)):\n take = Decimal(take)\n\n if take and check_max and take > 1:\n last_week = self.get_takes_last_week()\n max_this_week = self.compute_max_this_week(member.id, last_week)\n if take > max_this_week:\n take = max_this_week\n\n with self.db.get_cursor(cursor) as cursor:\n # Lock to avoid race conditions\n cursor.run(\"LOCK TABLE takes IN EXCLUSIVE MODE\")\n # Compute the current takes\n old_takes = self.compute_actual_takes(cursor)\n # Insert the new take\n cursor.run(\"\"\"\n\n INSERT INTO takes (ctime, member, team, amount, recorder)\n VALUES ( COALESCE (( SELECT ctime\n FROM takes\n WHERE member=%(member)s\n AND team=%(team)s\n LIMIT 1\n ), CURRENT_TIMESTAMP)\n , %(member)s\n , %(team)s\n , %(amount)s\n , %(recorder)s\n )\n\n \"\"\", dict(member=member.id, team=self.id, amount=take,\n recorder=recorder.id))\n # Compute the new takes\n new_takes = self.compute_actual_takes(cursor)\n # Update receiving amounts in the participants table\n self.update_taking(old_takes, new_takes, cursor, member)\n # Update is_funded on member's tips\n member.update_giving(cursor)\n\n return take\n\n def update_taking(self, old_takes, new_takes, cursor=None, member=None):\n \"\"\"Update `taking` amounts based on the difference between `old_takes`\n and `new_takes`.\n \"\"\"\n for p_id in set(old_takes.keys()).union(new_takes.keys()):\n old = old_takes.get(p_id, {}).get('actual_amount', D_ZERO)\n new = new_takes.get(p_id, {}).get('actual_amount', D_ZERO)\n diff = new - old\n if diff != 0:\n (cursor or self.db).run(\"\"\"\n UPDATE participants\n SET taking = (taking + %(diff)s)\n , receiving = (receiving + %(diff)s)\n WHERE id=%(p_id)s\n \"\"\", dict(p_id=p_id, diff=diff))\n if member and p_id == member.id:\n r = (cursor or self.db).one(\n \"SELECT taking, receiving FROM participants WHERE id = %s\",\n (p_id,)\n )\n member.set_attributes(**r._asdict())\n\n def get_current_takes(self, cursor=None):\n \"\"\"Return a list of member takes for a team.\n \"\"\"\n assert self.kind == 'group'\n TAKES = \"\"\"\n SELECT p.id AS member_id, p.username AS member_name, p.avatar_url\n , (p.mangopay_user_id IS NOT NULL) AS is_identified\n , t.amount, t.ctime, t.mtime\n FROM current_takes t\n JOIN participants p ON p.id = member\n WHERE t.team=%(team)s\n ORDER BY p.username\n \"\"\"\n records = (cursor or self.db).all(TAKES, dict(team=self.id))\n return [r._asdict() for r in records]\n\n def compute_actual_takes(self, cursor=None):\n \"\"\"Get the takes, compute the actual amounts, and return an OrderedDict.\n \"\"\"\n actual_takes = OrderedDict()\n nominal_takes = self.get_current_takes(cursor=cursor)\n balance = self.receiving\n total_takes = sum(t['amount'] for t in nominal_takes if t['is_identified'])\n ratio = min(balance / total_takes, 1) if total_takes else 0\n for take in nominal_takes:\n nominal = take['nominal_take'] = take.pop('amount')\n actual = take['actual_amount'] = min(\n (nominal * ratio).quantize(D_CENT, rounding=ROUND_UP),\n balance\n ) if take['is_identified'] else D_ZERO\n balance -= actual\n actual_takes[take['member_id']] = take\n actual_takes.leftover = balance\n return actual_takes\n\n @property\n def nmembers(self):\n assert self.kind == 'group'\n return self.db.one(\"\"\"\n SELECT COUNT(*)\n FROM current_takes\n WHERE team=%s\n \"\"\", (self.id,))\n\n def get_members(self):\n \"\"\"Return an OrderedDict of member dicts.\n \"\"\"\n takes = self.compute_actual_takes()\n last_week = self.get_takes_last_week()\n members = OrderedDict()\n members.leftover = takes.leftover\n for take in takes.values():\n member = {}\n m_id = member['id'] = take['member_id']\n member['username'] = take['member_name']\n member['nominal_take'] = take['nominal_take']\n member['actual_amount'] = take['actual_amount']\n member['last_week'] = last_week.get(m_id, D_ZERO)\n member['max_this_week'] = self.compute_max_this_week(m_id, last_week)\n members[member['id']] = member\n return members\n\n @property\n def closed_by(self):\n assert self.status == 'closed'\n return self.db.one(\"\"\"\n SELECT member\n FROM takes\n WHERE team = %s\n ORDER BY mtime DESC\n LIMIT 1\n \"\"\", (self.id,))\n", "path": "liberapay/models/_mixin_team.py"}]}
3,277
422
gh_patches_debug_57997
rasdani/github-patches
git_diff
zigpy__zha-device-handlers-569
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [Device Support Request] CentraLite Contact Sensor-A **TL;DR**: The battery entity for the `CentraLite Contact Sensor-A` always reports `Unavailable`. It's very similar in signature to the CentraLite 3320-L sensor, so I'm hoping it just needs a device-specific quirk. **Is your feature request related to a problem? Please describe.** I have some Sylvania SMART+ Door/Window sensors that are detected as `CentraLite Contact Sensor-A`. They do not have a device-specific quirk; the ZHA Network Card reports them using zha.device.Device. The open/close and temp entities currently work fine in HA, but the battery level entity always shows `Unavailable`. There is battery information available in the UI via the entity: battery size is reported as `Other`, battery quantity is reported as `1` and battery voltage is reported as a float like `2.7` or `2.6`. I also have some other sensors that are very similar both in terms of device signature and physical attributes. They are Lowes Iris Window/Door sensors and are detected as `Centralite 3320-L`. These use the quirk zhaquirks.centralite.ias.CentraLiteIASSensor. The only differences in the device signatures for these two devices (other than the detected model, of course) are: - The manufacturer_code entry in the node descriptor is different - I’m assuming this is how the different device model is detected. - On the Contact Sensor-A Endpoint 2 has an extra in cluster with id 0x0001 - The Contact Sensor-A uses the default Zigbee device quirk, but the 3320-L uses a CentraLite-specific one. **Describe the solution you'd like** I'd like for the battery entity in HA to show the remaining percentage, similar to the entities for all my other sensor batteries. **Device signature - this can be acquired by removing the device from ZHA and pairing it again from the add devices screen. Be sure to add the entire content of the log panel after pairing the device to a code block below this line.** ``` { "node_descriptor": "NodeDescriptor(byte1=2, byte2=64, mac_capability_flags=128, manufacturer_code=4174, maximum_buffer_size=82, maximum_incoming_transfer_size=82, server_mask=0, maximum_outgoing_transfer_size=82, descriptor_capability_field=0)", "endpoints": { "1": { "profile_id": 260, "device_type": "0x0402", "in_clusters": [ "0x0000", "0x0001", "0x0003", "0x0020", "0x0402", "0x0500", "0x0b05" ], "out_clusters": [ "0x0019" ] }, "2": { "profile_id": 49887, "device_type": "0x000c", "in_clusters": [ "0x0000", "0x0001", "0x0003", "0x0b05", "0xfc0f" ], "out_clusters": [ "0x0003" ] } }, "manufacturer": "CentraLite", "model": "Contact Sensor-A", "class": "zigpy.device.Device" } ``` </issue> <code> [start of zhaquirks/centralite/ias.py] 1 """Device handler for centralite ias sensors.""" 2 from zigpy.profiles import zha 3 from zigpy.quirks import CustomDevice 4 from zigpy.zcl.clusters.general import Basic, BinaryInput, Identify, Ota, PollControl 5 from zigpy.zcl.clusters.measurement import TemperatureMeasurement 6 from zigpy.zcl.clusters.security import IasZone 7 8 from zhaquirks import PowerConfigurationCluster 9 10 from . import CENTRALITE 11 from ..const import ( 12 DEVICE_TYPE, 13 ENDPOINTS, 14 INPUT_CLUSTERS, 15 MODELS_INFO, 16 OUTPUT_CLUSTERS, 17 PROFILE_ID, 18 ) 19 20 DIAGNOSTICS_CLUSTER_ID = 0x0B05 # decimal = 2821 21 MANUFACTURER_SPECIFIC_CLUSTER_ID = 0xFC0F # decimal = 64527 22 MANUFACTURER_SPECIFIC_PROFILE_ID = 0xC2DF # decimal = 49887 23 24 25 class CentraLiteIASSensor(CustomDevice): 26 """Custom device representing centralite ias sensors.""" 27 28 signature = { 29 # <SimpleDescriptor endpoint=1 profile=260 device_type=1026 30 # device_version=0 31 # input_clusters=[0, 1, 3, 32, 1026, 1280, 2821] 32 # output_clusters=[25]> 33 MODELS_INFO: [ 34 (CENTRALITE, "3300-S"), 35 (CENTRALITE, "3315-G"), 36 (CENTRALITE, "3315-L"), 37 (CENTRALITE, "3315-S"), 38 (CENTRALITE, "3315-Seu"), 39 (CENTRALITE, "3315"), 40 (CENTRALITE, "3320-L"), 41 ], 42 ENDPOINTS: { 43 1: { 44 PROFILE_ID: zha.PROFILE_ID, 45 DEVICE_TYPE: zha.DeviceType.IAS_ZONE, 46 INPUT_CLUSTERS: [ 47 Basic.cluster_id, 48 PowerConfigurationCluster.cluster_id, 49 Identify.cluster_id, 50 PollControl.cluster_id, 51 TemperatureMeasurement.cluster_id, 52 IasZone.cluster_id, 53 DIAGNOSTICS_CLUSTER_ID, 54 ], 55 OUTPUT_CLUSTERS: [Ota.cluster_id], 56 }, 57 # <SimpleDescriptor endpoint=2 profile=49887 device_type=12 58 # device_version=0 59 # input_clusters=[0, 1, 3, 2821, 64527] 60 # output_clusters=[3]> 61 2: { 62 PROFILE_ID: MANUFACTURER_SPECIFIC_PROFILE_ID, 63 DEVICE_TYPE: zha.DeviceType.SIMPLE_SENSOR, 64 INPUT_CLUSTERS: [ 65 Basic.cluster_id, 66 PowerConfigurationCluster.cluster_id, 67 Identify.cluster_id, 68 DIAGNOSTICS_CLUSTER_ID, 69 MANUFACTURER_SPECIFIC_CLUSTER_ID, 70 ], 71 OUTPUT_CLUSTERS: [Identify.cluster_id], 72 }, 73 }, 74 } 75 76 replacement = { 77 ENDPOINTS: { 78 1: { 79 INPUT_CLUSTERS: [ 80 Basic.cluster_id, 81 PowerConfigurationCluster, 82 Identify.cluster_id, 83 PollControl.cluster_id, 84 TemperatureMeasurement.cluster_id, 85 IasZone.cluster_id, 86 DIAGNOSTICS_CLUSTER_ID, 87 ], 88 OUTPUT_CLUSTERS: [Ota.cluster_id], 89 }, 90 2: { 91 INPUT_CLUSTERS: [ 92 Basic.cluster_id, 93 Identify.cluster_id, 94 DIAGNOSTICS_CLUSTER_ID, 95 MANUFACTURER_SPECIFIC_CLUSTER_ID, 96 ], 97 OUTPUT_CLUSTERS: [Identify.cluster_id], 98 }, 99 } 100 } 101 102 103 class CentraLiteIASSensorV2(CustomDevice): 104 """Custom device representing centralite ias sensors.""" 105 106 signature = { 107 # <SimpleDescriptor endpoint=1 profile=260 device_type=1026 108 # device_version=0 109 # input_clusters=[0, 1, 3, 32, 1026, 1280, 2821] 110 # output_clusters=[25]> 111 MODELS_INFO: CentraLiteIASSensor.signature[MODELS_INFO], 112 ENDPOINTS: { 113 1: CentraLiteIASSensor.signature[ENDPOINTS][1], 114 # <SimpleDescriptor endpoint=2 profile=49887 device_type=12 115 # device_version=0 116 # input_clusters=[0, 1, 3, 15, 2821, 64527] 117 # output_clusters=[3]> 118 2: { 119 PROFILE_ID: MANUFACTURER_SPECIFIC_PROFILE_ID, 120 DEVICE_TYPE: zha.DeviceType.SIMPLE_SENSOR, 121 INPUT_CLUSTERS: [ 122 Basic.cluster_id, 123 BinaryInput.cluster_id, 124 PowerConfigurationCluster.cluster_id, 125 Identify.cluster_id, 126 DIAGNOSTICS_CLUSTER_ID, 127 MANUFACTURER_SPECIFIC_CLUSTER_ID, 128 ], 129 OUTPUT_CLUSTERS: [Identify.cluster_id], 130 }, 131 }, 132 } 133 134 replacement = CentraLiteIASSensor.replacement 135 136 137 class CentraLiteIASSensorV3(CustomDevice): 138 """Custom device representing centralite ias sensors.""" 139 140 signature = { 141 # <SimpleDescriptor endpoint=1 profile=260 device_type=1026 142 # device_version=0 143 # input_clusters=[0, 1, 3, 32, 1026, 1280, 2821] 144 # output_clusters=[25]> 145 MODELS_INFO: CentraLiteIASSensor.signature[MODELS_INFO], 146 ENDPOINTS: { 147 1: CentraLiteIASSensor.signature[ENDPOINTS][1], 148 # <SimpleDescriptor endpoint=2 profile=49887 device_type=12 149 # device_version=0 150 # input_clusters=[0, 1, 3, 15, 2821] 151 # output_clusters=[3]> 152 2: { 153 PROFILE_ID: MANUFACTURER_SPECIFIC_PROFILE_ID, 154 DEVICE_TYPE: zha.DeviceType.SIMPLE_SENSOR, 155 INPUT_CLUSTERS: [ 156 Basic.cluster_id, 157 BinaryInput.cluster_id, 158 PowerConfigurationCluster.cluster_id, 159 Identify.cluster_id, 160 DIAGNOSTICS_CLUSTER_ID, 161 ], 162 OUTPUT_CLUSTERS: [Identify.cluster_id], 163 }, 164 }, 165 } 166 167 replacement = CentraLiteIASSensor.replacement 168 [end of zhaquirks/centralite/ias.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/zhaquirks/centralite/ias.py b/zhaquirks/centralite/ias.py --- a/zhaquirks/centralite/ias.py +++ b/zhaquirks/centralite/ias.py @@ -38,6 +38,7 @@ (CENTRALITE, "3315-Seu"), (CENTRALITE, "3315"), (CENTRALITE, "3320-L"), + (CENTRALITE, "Contact Sensor-A"), ], ENDPOINTS: { 1: {
{"golden_diff": "diff --git a/zhaquirks/centralite/ias.py b/zhaquirks/centralite/ias.py\n--- a/zhaquirks/centralite/ias.py\n+++ b/zhaquirks/centralite/ias.py\n@@ -38,6 +38,7 @@\n (CENTRALITE, \"3315-Seu\"),\n (CENTRALITE, \"3315\"),\n (CENTRALITE, \"3320-L\"),\n+ (CENTRALITE, \"Contact Sensor-A\"),\n ],\n ENDPOINTS: {\n 1: {\n", "issue": "[Device Support Request] CentraLite Contact Sensor-A\n**TL;DR**: The battery entity for the `CentraLite Contact Sensor-A` always reports `Unavailable`. It's very similar in signature to the CentraLite 3320-L sensor, so I'm hoping it just needs a device-specific quirk.\r\n\r\n**Is your feature request related to a problem? Please describe.**\r\nI have some Sylvania SMART+ Door/Window sensors that are detected as `CentraLite Contact Sensor-A`. They do not have a device-specific quirk; the ZHA Network Card reports them using zha.device.Device.\r\n\r\nThe open/close and temp entities currently work fine in HA, but the battery level entity always shows `Unavailable`. There is battery information available in the UI via the entity: battery size is reported as `Other`, battery quantity is reported as `1` and battery voltage is reported as a float like `2.7` or `2.6`.\r\n\r\nI also have some other sensors that are very similar both in terms of device signature and physical attributes. They are Lowes Iris Window/Door sensors and are detected as `Centralite 3320-L`. These use the quirk zhaquirks.centralite.ias.CentraLiteIASSensor.\r\n\r\nThe only differences in the device signatures for these two devices (other than the detected model, of course) are: \r\n\r\n- The manufacturer_code entry in the node descriptor is different - I\u2019m assuming this is how the different device model is detected.\r\n- On the Contact Sensor-A Endpoint 2 has an extra in cluster with id 0x0001\r\n- The Contact Sensor-A uses the default Zigbee device quirk, but the 3320-L uses a CentraLite-specific one.\r\n\r\n\r\n\r\n**Describe the solution you'd like**\r\nI'd like for the battery entity in HA to show the remaining percentage, similar to the entities for all my other sensor batteries.\r\n\r\n**Device signature - this can be acquired by removing the device from ZHA and pairing it again from the add devices screen. Be sure to add the entire content of the log panel after pairing the device to a code block below this line.**\r\n\r\n```\r\n{\r\n \"node_descriptor\": \"NodeDescriptor(byte1=2, byte2=64, mac_capability_flags=128, manufacturer_code=4174, maximum_buffer_size=82, maximum_incoming_transfer_size=82, server_mask=0, maximum_outgoing_transfer_size=82, descriptor_capability_field=0)\",\r\n \"endpoints\": {\r\n \"1\": {\r\n \"profile_id\": 260,\r\n \"device_type\": \"0x0402\",\r\n \"in_clusters\": [\r\n \"0x0000\",\r\n \"0x0001\",\r\n \"0x0003\",\r\n \"0x0020\",\r\n \"0x0402\",\r\n \"0x0500\",\r\n \"0x0b05\"\r\n ],\r\n \"out_clusters\": [\r\n \"0x0019\"\r\n ]\r\n },\r\n \"2\": {\r\n \"profile_id\": 49887,\r\n \"device_type\": \"0x000c\",\r\n \"in_clusters\": [\r\n \"0x0000\",\r\n \"0x0001\",\r\n \"0x0003\",\r\n \"0x0b05\",\r\n \"0xfc0f\"\r\n ],\r\n \"out_clusters\": [\r\n \"0x0003\"\r\n ]\r\n }\r\n },\r\n \"manufacturer\": \"CentraLite\",\r\n \"model\": \"Contact Sensor-A\",\r\n \"class\": \"zigpy.device.Device\"\r\n}\r\n```\r\n\n", "before_files": [{"content": "\"\"\"Device handler for centralite ias sensors.\"\"\"\nfrom zigpy.profiles import zha\nfrom zigpy.quirks import CustomDevice\nfrom zigpy.zcl.clusters.general import Basic, BinaryInput, Identify, Ota, PollControl\nfrom zigpy.zcl.clusters.measurement import TemperatureMeasurement\nfrom zigpy.zcl.clusters.security import IasZone\n\nfrom zhaquirks import PowerConfigurationCluster\n\nfrom . import CENTRALITE\nfrom ..const import (\n DEVICE_TYPE,\n ENDPOINTS,\n INPUT_CLUSTERS,\n MODELS_INFO,\n OUTPUT_CLUSTERS,\n PROFILE_ID,\n)\n\nDIAGNOSTICS_CLUSTER_ID = 0x0B05 # decimal = 2821\nMANUFACTURER_SPECIFIC_CLUSTER_ID = 0xFC0F # decimal = 64527\nMANUFACTURER_SPECIFIC_PROFILE_ID = 0xC2DF # decimal = 49887\n\n\nclass CentraLiteIASSensor(CustomDevice):\n \"\"\"Custom device representing centralite ias sensors.\"\"\"\n\n signature = {\n # <SimpleDescriptor endpoint=1 profile=260 device_type=1026\n # device_version=0\n # input_clusters=[0, 1, 3, 32, 1026, 1280, 2821]\n # output_clusters=[25]>\n MODELS_INFO: [\n (CENTRALITE, \"3300-S\"),\n (CENTRALITE, \"3315-G\"),\n (CENTRALITE, \"3315-L\"),\n (CENTRALITE, \"3315-S\"),\n (CENTRALITE, \"3315-Seu\"),\n (CENTRALITE, \"3315\"),\n (CENTRALITE, \"3320-L\"),\n ],\n ENDPOINTS: {\n 1: {\n PROFILE_ID: zha.PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.IAS_ZONE,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n PowerConfigurationCluster.cluster_id,\n Identify.cluster_id,\n PollControl.cluster_id,\n TemperatureMeasurement.cluster_id,\n IasZone.cluster_id,\n DIAGNOSTICS_CLUSTER_ID,\n ],\n OUTPUT_CLUSTERS: [Ota.cluster_id],\n },\n # <SimpleDescriptor endpoint=2 profile=49887 device_type=12\n # device_version=0\n # input_clusters=[0, 1, 3, 2821, 64527]\n # output_clusters=[3]>\n 2: {\n PROFILE_ID: MANUFACTURER_SPECIFIC_PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.SIMPLE_SENSOR,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n PowerConfigurationCluster.cluster_id,\n Identify.cluster_id,\n DIAGNOSTICS_CLUSTER_ID,\n MANUFACTURER_SPECIFIC_CLUSTER_ID,\n ],\n OUTPUT_CLUSTERS: [Identify.cluster_id],\n },\n },\n }\n\n replacement = {\n ENDPOINTS: {\n 1: {\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n PowerConfigurationCluster,\n Identify.cluster_id,\n PollControl.cluster_id,\n TemperatureMeasurement.cluster_id,\n IasZone.cluster_id,\n DIAGNOSTICS_CLUSTER_ID,\n ],\n OUTPUT_CLUSTERS: [Ota.cluster_id],\n },\n 2: {\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n Identify.cluster_id,\n DIAGNOSTICS_CLUSTER_ID,\n MANUFACTURER_SPECIFIC_CLUSTER_ID,\n ],\n OUTPUT_CLUSTERS: [Identify.cluster_id],\n },\n }\n }\n\n\nclass CentraLiteIASSensorV2(CustomDevice):\n \"\"\"Custom device representing centralite ias sensors.\"\"\"\n\n signature = {\n # <SimpleDescriptor endpoint=1 profile=260 device_type=1026\n # device_version=0\n # input_clusters=[0, 1, 3, 32, 1026, 1280, 2821]\n # output_clusters=[25]>\n MODELS_INFO: CentraLiteIASSensor.signature[MODELS_INFO],\n ENDPOINTS: {\n 1: CentraLiteIASSensor.signature[ENDPOINTS][1],\n # <SimpleDescriptor endpoint=2 profile=49887 device_type=12\n # device_version=0\n # input_clusters=[0, 1, 3, 15, 2821, 64527]\n # output_clusters=[3]>\n 2: {\n PROFILE_ID: MANUFACTURER_SPECIFIC_PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.SIMPLE_SENSOR,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n BinaryInput.cluster_id,\n PowerConfigurationCluster.cluster_id,\n Identify.cluster_id,\n DIAGNOSTICS_CLUSTER_ID,\n MANUFACTURER_SPECIFIC_CLUSTER_ID,\n ],\n OUTPUT_CLUSTERS: [Identify.cluster_id],\n },\n },\n }\n\n replacement = CentraLiteIASSensor.replacement\n\n\nclass CentraLiteIASSensorV3(CustomDevice):\n \"\"\"Custom device representing centralite ias sensors.\"\"\"\n\n signature = {\n # <SimpleDescriptor endpoint=1 profile=260 device_type=1026\n # device_version=0\n # input_clusters=[0, 1, 3, 32, 1026, 1280, 2821]\n # output_clusters=[25]>\n MODELS_INFO: CentraLiteIASSensor.signature[MODELS_INFO],\n ENDPOINTS: {\n 1: CentraLiteIASSensor.signature[ENDPOINTS][1],\n # <SimpleDescriptor endpoint=2 profile=49887 device_type=12\n # device_version=0\n # input_clusters=[0, 1, 3, 15, 2821]\n # output_clusters=[3]>\n 2: {\n PROFILE_ID: MANUFACTURER_SPECIFIC_PROFILE_ID,\n DEVICE_TYPE: zha.DeviceType.SIMPLE_SENSOR,\n INPUT_CLUSTERS: [\n Basic.cluster_id,\n BinaryInput.cluster_id,\n PowerConfigurationCluster.cluster_id,\n Identify.cluster_id,\n DIAGNOSTICS_CLUSTER_ID,\n ],\n OUTPUT_CLUSTERS: [Identify.cluster_id],\n },\n },\n }\n\n replacement = CentraLiteIASSensor.replacement\n", "path": "zhaquirks/centralite/ias.py"}]}
3,169
127
gh_patches_debug_11307
rasdani/github-patches
git_diff
ivy-llc__ivy-15802
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> asin </issue> <code> [start of ivy/functional/frontends/paddle/tensor/math.py] 1 # global 2 import ivy 3 from ivy.func_wrapper import with_unsupported_dtypes 4 from ivy.functional.frontends.paddle.func_wrapper import ( 5 to_ivy_arrays_and_back, 6 ) 7 8 9 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle") 10 @to_ivy_arrays_and_back 11 def sin(x, name=None): 12 return ivy.sin(x) 13 14 15 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle") 16 @to_ivy_arrays_and_back 17 def cos(x, name=None): 18 return ivy.cos(x) 19 20 21 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle") 22 @to_ivy_arrays_and_back 23 def acos(x, name=None): 24 return ivy.acos(x) 25 26 27 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle") 28 @to_ivy_arrays_and_back 29 def cosh(x, name=None): 30 return ivy.cosh(x) 31 32 33 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle") 34 @to_ivy_arrays_and_back 35 def tanh(x, name=None): 36 return ivy.tanh(x) 37 38 39 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle") 40 @to_ivy_arrays_and_back 41 def acosh(x, name=None): 42 return ivy.acosh(x) 43 [end of ivy/functional/frontends/paddle/tensor/math.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ivy/functional/frontends/paddle/tensor/math.py b/ivy/functional/frontends/paddle/tensor/math.py --- a/ivy/functional/frontends/paddle/tensor/math.py +++ b/ivy/functional/frontends/paddle/tensor/math.py @@ -1,6 +1,6 @@ # global import ivy -from ivy.func_wrapper import with_unsupported_dtypes +from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes from ivy.functional.frontends.paddle.func_wrapper import ( to_ivy_arrays_and_back, ) @@ -40,3 +40,9 @@ @to_ivy_arrays_and_back def acosh(x, name=None): return ivy.acosh(x) + + +@with_supported_dtypes({"2.4.2 and below": ("float32", "float64")}, "paddle") +@to_ivy_arrays_and_back +def asin(x, name=None): + return ivy.asin(x)
{"golden_diff": "diff --git a/ivy/functional/frontends/paddle/tensor/math.py b/ivy/functional/frontends/paddle/tensor/math.py\n--- a/ivy/functional/frontends/paddle/tensor/math.py\n+++ b/ivy/functional/frontends/paddle/tensor/math.py\n@@ -1,6 +1,6 @@\n # global\n import ivy\n-from ivy.func_wrapper import with_unsupported_dtypes\n+from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\n from ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n )\n@@ -40,3 +40,9 @@\n @to_ivy_arrays_and_back\n def acosh(x, name=None):\n return ivy.acosh(x)\n+\n+\n+@with_supported_dtypes({\"2.4.2 and below\": (\"float32\", \"float64\")}, \"paddle\")\n+@to_ivy_arrays_and_back\n+def asin(x, name=None):\n+ return ivy.asin(x)\n", "issue": "asin\n\n", "before_files": [{"content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_unsupported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sin(x, name=None):\n return ivy.sin(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cos(x, name=None):\n return ivy.cos(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef acos(x, name=None):\n return ivy.acos(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cosh(x, name=None):\n return ivy.cosh(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef tanh(x, name=None):\n return ivy.tanh(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef acosh(x, name=None):\n return ivy.acosh(x)\n", "path": "ivy/functional/frontends/paddle/tensor/math.py"}]}
1,005
223
gh_patches_debug_26207
rasdani/github-patches
git_diff
liqd__a4-opin-663
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Refactor idea to use a4 comments and ratings queryset https://github.com/liqd/adhocracy4/pull/20 </issue> <code> [start of euth/ideas/models.py] 1 from autoslug import AutoSlugField 2 from ckeditor.fields import RichTextField 3 from django.contrib.contenttypes.fields import GenericRelation 4 from django.db import models 5 6 from adhocracy4 import transforms 7 from adhocracy4.comments import models as comment_models 8 from adhocracy4.images import fields 9 from adhocracy4.modules import models as module_models 10 from adhocracy4.ratings import models as rating_models 11 12 13 class IdeaQuerySet(models.QuerySet): 14 15 def _rate_value_condition(self, value): 16 return models.Case( 17 models.When(ratings__value=value, then=models.F('ratings__id')), 18 output_field=models.IntegerField() 19 ) 20 21 def annotate_positive_rating_count(self): 22 return self.annotate( 23 positive_rating_count=models.Count( 24 self._rate_value_condition(1), 25 distinct=True # needed to combine with other count annotations 26 ) 27 ) 28 29 def annotate_negative_rating_count(self): 30 return self.annotate( 31 negative_rating_count=models.Count( 32 self._rate_value_condition(-1), 33 distinct=True # needed to combine with other count annotations 34 ) 35 ) 36 37 def annotate_comment_count(self): 38 return self.annotate( 39 comment_count=models.Count( 40 'comments', 41 distinct=True # needed to combine with other count annotations 42 ) 43 ) 44 45 46 class Idea(module_models.Item): 47 slug = AutoSlugField(populate_from='name', unique=True) 48 name = models.CharField(max_length=120) 49 description = RichTextField() 50 image = fields.ConfiguredImageField( 51 'idea_image', 52 upload_to='ideas/images', 53 blank=True, 54 ) 55 ratings = GenericRelation(rating_models.Rating, 56 related_query_name='idea', 57 object_id_field='object_pk') 58 comments = GenericRelation(comment_models.Comment, 59 related_query_name='idea', 60 object_id_field='object_pk') 61 62 objects = IdeaQuerySet.as_manager() 63 64 def __str__(self): 65 return self.name 66 67 def save(self, *args, **kwargs): 68 self.description = transforms.clean_html_field( 69 self.description) 70 super(Idea, self).save(*args, **kwargs) 71 72 def get_absolute_url(self): 73 from django.core.urlresolvers import reverse 74 return reverse('idea-detail', args=[str(self.slug)]) 75 [end of euth/ideas/models.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/euth/ideas/models.py b/euth/ideas/models.py --- a/euth/ideas/models.py +++ b/euth/ideas/models.py @@ -6,41 +6,13 @@ from adhocracy4 import transforms from adhocracy4.comments import models as comment_models from adhocracy4.images import fields +from adhocracy4.models import query from adhocracy4.modules import models as module_models from adhocracy4.ratings import models as rating_models -class IdeaQuerySet(models.QuerySet): - - def _rate_value_condition(self, value): - return models.Case( - models.When(ratings__value=value, then=models.F('ratings__id')), - output_field=models.IntegerField() - ) - - def annotate_positive_rating_count(self): - return self.annotate( - positive_rating_count=models.Count( - self._rate_value_condition(1), - distinct=True # needed to combine with other count annotations - ) - ) - - def annotate_negative_rating_count(self): - return self.annotate( - negative_rating_count=models.Count( - self._rate_value_condition(-1), - distinct=True # needed to combine with other count annotations - ) - ) - - def annotate_comment_count(self): - return self.annotate( - comment_count=models.Count( - 'comments', - distinct=True # needed to combine with other count annotations - ) - ) +class IdeaQuerySet(query.RateableQuerySet, query.CommentableQuerySet): + pass class Idea(module_models.Item):
{"golden_diff": "diff --git a/euth/ideas/models.py b/euth/ideas/models.py\n--- a/euth/ideas/models.py\n+++ b/euth/ideas/models.py\n@@ -6,41 +6,13 @@\n from adhocracy4 import transforms\n from adhocracy4.comments import models as comment_models\n from adhocracy4.images import fields\n+from adhocracy4.models import query\n from adhocracy4.modules import models as module_models\n from adhocracy4.ratings import models as rating_models\n \n \n-class IdeaQuerySet(models.QuerySet):\n-\n- def _rate_value_condition(self, value):\n- return models.Case(\n- models.When(ratings__value=value, then=models.F('ratings__id')),\n- output_field=models.IntegerField()\n- )\n-\n- def annotate_positive_rating_count(self):\n- return self.annotate(\n- positive_rating_count=models.Count(\n- self._rate_value_condition(1),\n- distinct=True # needed to combine with other count annotations\n- )\n- )\n-\n- def annotate_negative_rating_count(self):\n- return self.annotate(\n- negative_rating_count=models.Count(\n- self._rate_value_condition(-1),\n- distinct=True # needed to combine with other count annotations\n- )\n- )\n-\n- def annotate_comment_count(self):\n- return self.annotate(\n- comment_count=models.Count(\n- 'comments',\n- distinct=True # needed to combine with other count annotations\n- )\n- )\n+class IdeaQuerySet(query.RateableQuerySet, query.CommentableQuerySet):\n+ pass\n \n \n class Idea(module_models.Item):\n", "issue": "Refactor idea to use a4 comments and ratings queryset\nhttps://github.com/liqd/adhocracy4/pull/20\n", "before_files": [{"content": "from autoslug import AutoSlugField\nfrom ckeditor.fields import RichTextField\nfrom django.contrib.contenttypes.fields import GenericRelation\nfrom django.db import models\n\nfrom adhocracy4 import transforms\nfrom adhocracy4.comments import models as comment_models\nfrom adhocracy4.images import fields\nfrom adhocracy4.modules import models as module_models\nfrom adhocracy4.ratings import models as rating_models\n\n\nclass IdeaQuerySet(models.QuerySet):\n\n def _rate_value_condition(self, value):\n return models.Case(\n models.When(ratings__value=value, then=models.F('ratings__id')),\n output_field=models.IntegerField()\n )\n\n def annotate_positive_rating_count(self):\n return self.annotate(\n positive_rating_count=models.Count(\n self._rate_value_condition(1),\n distinct=True # needed to combine with other count annotations\n )\n )\n\n def annotate_negative_rating_count(self):\n return self.annotate(\n negative_rating_count=models.Count(\n self._rate_value_condition(-1),\n distinct=True # needed to combine with other count annotations\n )\n )\n\n def annotate_comment_count(self):\n return self.annotate(\n comment_count=models.Count(\n 'comments',\n distinct=True # needed to combine with other count annotations\n )\n )\n\n\nclass Idea(module_models.Item):\n slug = AutoSlugField(populate_from='name', unique=True)\n name = models.CharField(max_length=120)\n description = RichTextField()\n image = fields.ConfiguredImageField(\n 'idea_image',\n upload_to='ideas/images',\n blank=True,\n )\n ratings = GenericRelation(rating_models.Rating,\n related_query_name='idea',\n object_id_field='object_pk')\n comments = GenericRelation(comment_models.Comment,\n related_query_name='idea',\n object_id_field='object_pk')\n\n objects = IdeaQuerySet.as_manager()\n\n def __str__(self):\n return self.name\n\n def save(self, *args, **kwargs):\n self.description = transforms.clean_html_field(\n self.description)\n super(Idea, self).save(*args, **kwargs)\n\n def get_absolute_url(self):\n from django.core.urlresolvers import reverse\n return reverse('idea-detail', args=[str(self.slug)])\n", "path": "euth/ideas/models.py"}]}
1,188
352
gh_patches_debug_13963
rasdani/github-patches
git_diff
mkdocs__mkdocs-254
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Modify stale files warning to ignore .htaccess, .gitignore and .git I am not sure if this is the same as issue #167 or not, or if there a better workaround than the setup I am using. I have the mkdocs project in one folder, and I am building to a sibling folder ( `site_dir: ../site` ) My site dir contains its own git repo, and .htaccess and .gitignore, which I want to keep there, but I get that stale warning. This causes two issues: 1. Someone may actually run this clean command and delete files I want to keep 2. I cannot run `--clean` Perhaps the behavior of that warning (and of `--clean`) should be changed to accommodate such scenario? If it cannot be solved automatically, then perhaps by a directive in the config? `build_ignore: [file, folder]` </issue> <code> [start of mkdocs/utils.py] 1 # coding: utf-8 2 3 """ 4 Standalone file utils. 5 6 Nothing in this module should have an knowledge of config or the layout 7 and structure of the site and pages in the site. 8 """ 9 10 import os 11 import shutil 12 13 from mkdocs.compat import urlparse 14 15 16 def copy_file(source_path, output_path): 17 """ 18 Copy source_path to output_path, making sure any parent directories exist. 19 """ 20 output_dir = os.path.dirname(output_path) 21 if not os.path.exists(output_dir): 22 os.makedirs(output_dir) 23 shutil.copy(source_path, output_path) 24 25 26 def write_file(content, output_path): 27 """ 28 Write content to output_path, making sure any parent directories exist. 29 """ 30 output_dir = os.path.dirname(output_path) 31 if not os.path.exists(output_dir): 32 os.makedirs(output_dir) 33 open(output_path, 'wb').write(content) 34 35 36 def clean_directory(directory): 37 """ 38 Remove the content of a directory recursively but not the directory itself. 39 """ 40 if os.path.exists(directory): 41 for entry in os.listdir(directory): 42 path = os.path.join(directory, entry) 43 if os.path.isdir(path): 44 shutil.rmtree(path, True) 45 else: 46 os.unlink(path) 47 48 49 def copy_media_files(from_dir, to_dir): 50 """ 51 Recursively copy all files except markdown and HTML into another directory. 52 """ 53 for (source_dir, dirnames, filenames) in os.walk(from_dir): 54 relative_path = os.path.relpath(source_dir, from_dir) 55 output_dir = os.path.normpath(os.path.join(to_dir, relative_path)) 56 57 for filename in filenames: 58 if not is_markdown_file(filename) and not is_html_file(filename): 59 source_path = os.path.join(source_dir, filename) 60 output_path = os.path.join(output_dir, filename) 61 copy_file(source_path, output_path) 62 63 64 def get_html_path(path): 65 """ 66 Map a source file path to an output html path. 67 68 Paths like 'index.md' will be converted to 'index.html' 69 Paths like 'about.md' will be converted to 'about/index.html' 70 Paths like 'api-guide/core.md' will be converted to 'api-guide/core/index.html' 71 """ 72 path = os.path.splitext(path)[0] 73 if os.path.basename(path) == 'index': 74 return path + '.html' 75 return "/".join((path, 'index.html')) 76 77 78 def get_url_path(path, use_directory_urls=True): 79 """ 80 Map a source file path to an output html path. 81 82 Paths like 'index.md' will be converted to '/' 83 Paths like 'about.md' will be converted to '/about/' 84 Paths like 'api-guide/core.md' will be converted to '/api-guide/core/' 85 86 If `use_directory_urls` is `False`, returned URLs will include the a trailing 87 `index.html` rather than just returning the directory path. 88 """ 89 path = get_html_path(path) 90 url = '/' + path.replace(os.path.sep, '/') 91 if use_directory_urls: 92 return url[:-len('index.html')] 93 return url 94 95 96 def is_homepage(path): 97 return os.path.splitext(path)[0] == 'index' 98 99 100 def is_markdown_file(path): 101 """ 102 Return True if the given file path is a Markdown file. 103 104 http://superuser.com/questions/249436/file-extension-for-markdown-files 105 """ 106 ext = os.path.splitext(path)[1].lower() 107 return ext in [ 108 '.markdown', 109 '.mdown', 110 '.mkdn', 111 '.mkd', 112 '.md', 113 ] 114 115 116 def is_css_file(path): 117 """ 118 Return True if the given file path is a CSS file. 119 """ 120 ext = os.path.splitext(path)[1].lower() 121 return ext in [ 122 '.css', 123 ] 124 125 126 def is_javascript_file(path): 127 """ 128 Return True if the given file path is a Javascript file. 129 """ 130 ext = os.path.splitext(path)[1].lower() 131 return ext in [ 132 '.js', 133 '.javascript' 134 ] 135 136 137 def is_html_file(path): 138 """ 139 Return True if the given file path is an HTML file. 140 """ 141 ext = os.path.splitext(path)[1].lower() 142 return ext in [ 143 '.html', 144 '.htm', 145 ] 146 147 148 def create_media_urls(nav, url_list): 149 """ 150 Return a list of URLs that have been processed correctly for inclusion in a page. 151 """ 152 final_urls = [] 153 for url in url_list: 154 # Allow links to fully qualified URL's 155 parsed = urlparse(url) 156 if parsed.netloc: 157 final_urls.append(url) 158 else: 159 relative_url = '%s/%s' % (nav.url_context.make_relative('/'), url) 160 final_urls.append(relative_url) 161 return final_urls 162 [end of mkdocs/utils.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mkdocs/utils.py b/mkdocs/utils.py --- a/mkdocs/utils.py +++ b/mkdocs/utils.py @@ -54,6 +54,13 @@ relative_path = os.path.relpath(source_dir, from_dir) output_dir = os.path.normpath(os.path.join(to_dir, relative_path)) + # Filter filenames starting with a '.' + filenames = [f for f in filenames if not f.startswith('.')] + + # Filter the dirnames that start with a '.' and update the list in + # place to prevent us walking these. + dirnames[:] = [d for d in dirnames if not d.startswith('.')] + for filename in filenames: if not is_markdown_file(filename) and not is_html_file(filename): source_path = os.path.join(source_dir, filename)
{"golden_diff": "diff --git a/mkdocs/utils.py b/mkdocs/utils.py\n--- a/mkdocs/utils.py\n+++ b/mkdocs/utils.py\n@@ -54,6 +54,13 @@\n relative_path = os.path.relpath(source_dir, from_dir)\n output_dir = os.path.normpath(os.path.join(to_dir, relative_path))\n \n+ # Filter filenames starting with a '.'\n+ filenames = [f for f in filenames if not f.startswith('.')]\n+\n+ # Filter the dirnames that start with a '.' and update the list in\n+ # place to prevent us walking these.\n+ dirnames[:] = [d for d in dirnames if not d.startswith('.')]\n+\n for filename in filenames:\n if not is_markdown_file(filename) and not is_html_file(filename):\n source_path = os.path.join(source_dir, filename)\n", "issue": "Modify stale files warning to ignore .htaccess, .gitignore and .git\nI am not sure if this is the same as issue #167 or not, or if there a better workaround than the setup I am using.\n\nI have the mkdocs project in one folder, and I am building to a sibling folder ( `site_dir: ../site` )\nMy site dir contains its own git repo, and .htaccess and .gitignore, which I want to keep there, but I get that stale warning.\n\nThis causes two issues:\n1. Someone may actually run this clean command and delete files I want to keep\n2. I cannot run `--clean`\n\nPerhaps the behavior of that warning (and of `--clean`) should be changed to accommodate such scenario? \n\nIf it cannot be solved automatically, then perhaps by a directive in the config? `build_ignore: [file, folder]`\n\n", "before_files": [{"content": "# coding: utf-8\n\n\"\"\"\nStandalone file utils.\n\nNothing in this module should have an knowledge of config or the layout\nand structure of the site and pages in the site.\n\"\"\"\n\nimport os\nimport shutil\n\nfrom mkdocs.compat import urlparse\n\n\ndef copy_file(source_path, output_path):\n \"\"\"\n Copy source_path to output_path, making sure any parent directories exist.\n \"\"\"\n output_dir = os.path.dirname(output_path)\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n shutil.copy(source_path, output_path)\n\n\ndef write_file(content, output_path):\n \"\"\"\n Write content to output_path, making sure any parent directories exist.\n \"\"\"\n output_dir = os.path.dirname(output_path)\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n open(output_path, 'wb').write(content)\n\n\ndef clean_directory(directory):\n \"\"\"\n Remove the content of a directory recursively but not the directory itself.\n \"\"\"\n if os.path.exists(directory):\n for entry in os.listdir(directory):\n path = os.path.join(directory, entry)\n if os.path.isdir(path):\n shutil.rmtree(path, True)\n else:\n os.unlink(path)\n\n\ndef copy_media_files(from_dir, to_dir):\n \"\"\"\n Recursively copy all files except markdown and HTML into another directory.\n \"\"\"\n for (source_dir, dirnames, filenames) in os.walk(from_dir):\n relative_path = os.path.relpath(source_dir, from_dir)\n output_dir = os.path.normpath(os.path.join(to_dir, relative_path))\n\n for filename in filenames:\n if not is_markdown_file(filename) and not is_html_file(filename):\n source_path = os.path.join(source_dir, filename)\n output_path = os.path.join(output_dir, filename)\n copy_file(source_path, output_path)\n\n\ndef get_html_path(path):\n \"\"\"\n Map a source file path to an output html path.\n\n Paths like 'index.md' will be converted to 'index.html'\n Paths like 'about.md' will be converted to 'about/index.html'\n Paths like 'api-guide/core.md' will be converted to 'api-guide/core/index.html'\n \"\"\"\n path = os.path.splitext(path)[0]\n if os.path.basename(path) == 'index':\n return path + '.html'\n return \"/\".join((path, 'index.html'))\n\n\ndef get_url_path(path, use_directory_urls=True):\n \"\"\"\n Map a source file path to an output html path.\n\n Paths like 'index.md' will be converted to '/'\n Paths like 'about.md' will be converted to '/about/'\n Paths like 'api-guide/core.md' will be converted to '/api-guide/core/'\n\n If `use_directory_urls` is `False`, returned URLs will include the a trailing\n `index.html` rather than just returning the directory path.\n \"\"\"\n path = get_html_path(path)\n url = '/' + path.replace(os.path.sep, '/')\n if use_directory_urls:\n return url[:-len('index.html')]\n return url\n\n\ndef is_homepage(path):\n return os.path.splitext(path)[0] == 'index'\n\n\ndef is_markdown_file(path):\n \"\"\"\n Return True if the given file path is a Markdown file.\n\n http://superuser.com/questions/249436/file-extension-for-markdown-files\n \"\"\"\n ext = os.path.splitext(path)[1].lower()\n return ext in [\n '.markdown',\n '.mdown',\n '.mkdn',\n '.mkd',\n '.md',\n ]\n\n\ndef is_css_file(path):\n \"\"\"\n Return True if the given file path is a CSS file.\n \"\"\"\n ext = os.path.splitext(path)[1].lower()\n return ext in [\n '.css',\n ]\n\n\ndef is_javascript_file(path):\n \"\"\"\n Return True if the given file path is a Javascript file.\n \"\"\"\n ext = os.path.splitext(path)[1].lower()\n return ext in [\n '.js',\n '.javascript'\n ]\n\n\ndef is_html_file(path):\n \"\"\"\n Return True if the given file path is an HTML file.\n \"\"\"\n ext = os.path.splitext(path)[1].lower()\n return ext in [\n '.html',\n '.htm',\n ]\n\n\ndef create_media_urls(nav, url_list):\n \"\"\"\n Return a list of URLs that have been processed correctly for inclusion in a page.\n \"\"\"\n final_urls = []\n for url in url_list:\n # Allow links to fully qualified URL's\n parsed = urlparse(url)\n if parsed.netloc:\n final_urls.append(url)\n else:\n relative_url = '%s/%s' % (nav.url_context.make_relative('/'), url)\n final_urls.append(relative_url)\n return final_urls\n", "path": "mkdocs/utils.py"}]}
2,121
186
gh_patches_debug_13000
rasdani/github-patches
git_diff
pyqtgraph__pyqtgraph-1432
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> ExampleApp Syntax Highlighter broken in PyQt5 ### Short description Since merge of #1302, the syntax highlighter in the ExampleApp is broken (CC @j9ac9k ). ### Code to reproduce ```bash cd pyqtgraph\examples python3 . ``` ### Expected behavior Syntax highlighter should work ### Real behavior Following error: ``` Traceback (most recent call last): File "/home/b2/Documents/synced/pyqtgraph/examples/ExampleApp.py", line 298, in highlightBlock format = self.styles[format] AttributeError: 'PythonHighlighter' object has no attribute 'styles' ``` ### Tested environment(s) * PyQtGraph version: 7e57e07068fc8a62e9ae3b68e5556b11b6ef1841 and newer * Qt Python binding: yQt5 5.15.1 Qt 5.15.1 * Python version: Python 3.8.5 * NumPy version: 1.19.2 * Operating system: Linux 5.8.16-2-MANJARO * Installation method: Run in folder (see above) </issue> <code> [start of examples/__main__.py] 1 import sys, os 2 import pyqtgraph as pg 3 from pyqtgraph.Qt import QtCore, QtGui 4 5 6 7 if __name__ == '__main__': 8 if __package__ is None or __package__ == "": 9 parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 10 sys.path.insert(0, parent_dir) 11 import examples 12 __package__ = "examples" 13 14 from .ExampleApp import main as run 15 run() 16 [end of examples/__main__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/examples/__main__.py b/examples/__main__.py --- a/examples/__main__.py +++ b/examples/__main__.py @@ -1,15 +1,16 @@ import sys, os + +# Set up path to contain pyqtgraph module when run without installation +if __name__ == "__main__" and (__package__ is None or __package__==''): + parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + sys.path.insert(0, parent_dir) + import examples + __package__ = "examples" + import pyqtgraph as pg from pyqtgraph.Qt import QtCore, QtGui - if __name__ == '__main__': - if __package__ is None or __package__ == "": - parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) - sys.path.insert(0, parent_dir) - import examples - __package__ = "examples" - from .ExampleApp import main as run run()
{"golden_diff": "diff --git a/examples/__main__.py b/examples/__main__.py\n--- a/examples/__main__.py\n+++ b/examples/__main__.py\n@@ -1,15 +1,16 @@\n import sys, os\n+\n+# Set up path to contain pyqtgraph module when run without installation\n+if __name__ == \"__main__\" and (__package__ is None or __package__==''):\n+ parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n+ sys.path.insert(0, parent_dir)\n+ import examples\n+ __package__ = \"examples\"\n+\n import pyqtgraph as pg\n from pyqtgraph.Qt import QtCore, QtGui\n \n \n-\n if __name__ == '__main__':\n- if __package__ is None or __package__ == \"\":\n- parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n- sys.path.insert(0, parent_dir)\n- import examples\n- __package__ = \"examples\"\n-\n from .ExampleApp import main as run\n run()\n", "issue": "ExampleApp Syntax Highlighter broken in PyQt5\n### Short description\r\nSince merge of #1302, the syntax highlighter in the ExampleApp is broken (CC @j9ac9k ).\r\n\r\n### Code to reproduce\r\n```bash\r\ncd pyqtgraph\\examples\r\npython3 .\r\n```\r\n\r\n### Expected behavior\r\nSyntax highlighter should work\r\n\r\n### Real behavior\r\nFollowing error:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/b2/Documents/synced/pyqtgraph/examples/ExampleApp.py\", line 298, in highlightBlock\r\n format = self.styles[format]\r\nAttributeError: 'PythonHighlighter' object has no attribute 'styles'\r\n```\r\n\r\n### Tested environment(s)\r\n\r\n * PyQtGraph version: 7e57e07068fc8a62e9ae3b68e5556b11b6ef1841 and newer\r\n * Qt Python binding: yQt5 5.15.1 Qt 5.15.1\r\n * Python version: Python 3.8.5\r\n * NumPy version: 1.19.2\r\n * Operating system: Linux 5.8.16-2-MANJARO\r\n * Installation method: Run in folder (see above)\n", "before_files": [{"content": "import sys, os\nimport pyqtgraph as pg\nfrom pyqtgraph.Qt import QtCore, QtGui\n\n\n\nif __name__ == '__main__':\n if __package__ is None or __package__ == \"\":\n parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n sys.path.insert(0, parent_dir)\n import examples\n __package__ = \"examples\"\n\n from .ExampleApp import main as run\n run()\n", "path": "examples/__main__.py"}]}
933
229
gh_patches_debug_25299
rasdani/github-patches
git_diff
facebookresearch__ParlAI-2674
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Problem trying to install Use this to open other questions or issues, and provide context here. Hi, I'm kinda new to this, when I was trying to install it, I got the following error: File "setup.py", line 30 version=f'0.1.{DATE}{BUILD}', ^ SyntaxError: invalid syntax I used the command: git clone https://github.com/facebookresearch/ParlAI.git cd ParlAI; python setup.py develop Any Idea why? Thank you. </issue> <code> [start of setup.py] 1 #!/usr/bin/env python3 2 3 # Copyright (c) Facebook, Inc. and its affiliates. 4 # This source code is licensed under the MIT license found in the 5 # LICENSE file in the root directory of this source tree. 6 7 8 import datetime 9 import sys 10 11 from setuptools import setup, find_packages 12 13 BUILD = '' # if multiple in one day, use "dev0", "dev1", ... 14 DATE = datetime.date.today().isoformat().replace('-', '') 15 16 if sys.version_info < (3, 6): 17 sys.exit('Sorry, Python >=3.6 is required for ParlAI.') 18 19 with open('README.md', encoding="utf8") as f: 20 # strip the header and badges etc 21 readme = f.read().split('--------------------')[-1] 22 23 with open('requirements.txt') as f: 24 reqs = f.read() 25 26 27 if __name__ == '__main__': 28 setup( 29 name='parlai', 30 version=f'0.1.{DATE}{BUILD}', 31 description='Unified platform for dialogue research.', 32 long_description=readme, 33 long_description_content_type='text/markdown', 34 url='http://parl.ai/', 35 python_requires='>=3.6', 36 scripts=['bin/parlai'], 37 packages=find_packages( 38 exclude=('data', 'docs', 'examples', 'tests', 'parlai_internal',) 39 ), 40 install_requires=reqs.strip().split('\n'), 41 include_package_data=True, 42 entry_points={"flake8.extension": ["PAI = parlai.utils.flake8:ParlAIChecker"]}, 43 classifiers=[ 44 "Programming Language :: Python :: 3", 45 "License :: OSI Approved :: MIT License", 46 "Topic :: Scientific/Engineering :: Artificial Intelligence", 47 "Natural Language :: English", 48 ], 49 ) 50 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -10,7 +10,7 @@ from setuptools import setup, find_packages -BUILD = '' # if multiple in one day, use "dev0", "dev1", ... +BUILD = '' # test by setting to ".dev0" if multiple in one day, use ".dev1", ... DATE = datetime.date.today().isoformat().replace('-', '') if sys.version_info < (3, 6): @@ -27,7 +27,7 @@ if __name__ == '__main__': setup( name='parlai', - version=f'0.1.{DATE}{BUILD}', + version='0.1.{DATE}{BUILD}'.format(DATE=DATE, BUILD=BUILD), description='Unified platform for dialogue research.', long_description=readme, long_description_content_type='text/markdown', @@ -39,6 +39,7 @@ ), install_requires=reqs.strip().split('\n'), include_package_data=True, + package_data={'': ['*.txt', '*.md']}, entry_points={"flake8.extension": ["PAI = parlai.utils.flake8:ParlAIChecker"]}, classifiers=[ "Programming Language :: Python :: 3",
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -10,7 +10,7 @@\n \n from setuptools import setup, find_packages\n \n-BUILD = '' # if multiple in one day, use \"dev0\", \"dev1\", ...\n+BUILD = '' # test by setting to \".dev0\" if multiple in one day, use \".dev1\", ...\n DATE = datetime.date.today().isoformat().replace('-', '')\n \n if sys.version_info < (3, 6):\n@@ -27,7 +27,7 @@\n if __name__ == '__main__':\n setup(\n name='parlai',\n- version=f'0.1.{DATE}{BUILD}',\n+ version='0.1.{DATE}{BUILD}'.format(DATE=DATE, BUILD=BUILD),\n description='Unified platform for dialogue research.',\n long_description=readme,\n long_description_content_type='text/markdown',\n@@ -39,6 +39,7 @@\n ),\n install_requires=reqs.strip().split('\\n'),\n include_package_data=True,\n+ package_data={'': ['*.txt', '*.md']},\n entry_points={\"flake8.extension\": [\"PAI = parlai.utils.flake8:ParlAIChecker\"]},\n classifiers=[\n \"Programming Language :: Python :: 3\",\n", "issue": "Problem trying to install \nUse this to open other questions or issues, and provide context here.\r\nHi, I'm kinda new to this, when I was trying to install it, I got the following error: \r\n\r\n File \"setup.py\", line 30\r\n version=f'0.1.{DATE}{BUILD}',\r\n ^\r\nSyntaxError: invalid syntax\r\n\r\nI used the command: \r\n\r\ngit clone https://github.com/facebookresearch/ParlAI.git\r\ncd ParlAI; python setup.py develop\r\n\r\nAny Idea why?\r\n\r\nThank you. \n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n\nimport datetime\nimport sys\n\nfrom setuptools import setup, find_packages\n\nBUILD = '' # if multiple in one day, use \"dev0\", \"dev1\", ...\nDATE = datetime.date.today().isoformat().replace('-', '')\n\nif sys.version_info < (3, 6):\n sys.exit('Sorry, Python >=3.6 is required for ParlAI.')\n\nwith open('README.md', encoding=\"utf8\") as f:\n # strip the header and badges etc\n readme = f.read().split('--------------------')[-1]\n\nwith open('requirements.txt') as f:\n reqs = f.read()\n\n\nif __name__ == '__main__':\n setup(\n name='parlai',\n version=f'0.1.{DATE}{BUILD}',\n description='Unified platform for dialogue research.',\n long_description=readme,\n long_description_content_type='text/markdown',\n url='http://parl.ai/',\n python_requires='>=3.6',\n scripts=['bin/parlai'],\n packages=find_packages(\n exclude=('data', 'docs', 'examples', 'tests', 'parlai_internal',)\n ),\n install_requires=reqs.strip().split('\\n'),\n include_package_data=True,\n entry_points={\"flake8.extension\": [\"PAI = parlai.utils.flake8:ParlAIChecker\"]},\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Natural Language :: English\",\n ],\n )\n", "path": "setup.py"}]}
1,115
287
gh_patches_debug_61
rasdani/github-patches
git_diff
holoviz__panel-5919
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Plotting styling guides are gone As discussed on Discourse the styling gallery notebooks for Plotly, Altair, Seaborn and Matplotlib have gone missing with Panel 1.0 release. Please re-add. Thx. </issue> <code> [start of panel/models/echarts.py] 1 """ 2 Defines custom bokeh model to render ECharts plots. 3 """ 4 from bokeh.core.properties import ( 5 Any, Dict, Enum, List, Nullable, String, 6 ) 7 from bokeh.events import ModelEvent 8 from bokeh.models import LayoutDOM 9 10 from ..config import config 11 from ..io.resources import bundled_files 12 from ..util import classproperty 13 14 15 class EChartsEvent(ModelEvent): 16 17 event_name = 'echarts_event' 18 19 def __init__(self, model, type=None, data=None, query=None): 20 self.type = type 21 self.data = data 22 self.query = query 23 super().__init__(model=model) 24 25 26 class ECharts(LayoutDOM): 27 """ 28 A Bokeh model that wraps around an ECharts plot and renders it 29 inside a Bokeh. 30 """ 31 32 __javascript_raw__ = [ 33 f"{config.npm_cdn}/[email protected]/dist/echarts.min.js", 34 f"{config.npm_cdn}/[email protected]/dist/echarts-gl.min.js" 35 ] 36 37 @classproperty 38 def __javascript__(cls): 39 return bundled_files(cls) 40 41 @classproperty 42 def __js_skip__(cls): 43 return { 44 'echarts': cls.__javascript__[:1] 45 } 46 47 __js_require__ = { 48 'paths': { 49 "echarts": f"{config.npm_cdn}/[email protected]/dist/echarts.min", 50 "echarts-gl": f"{config.npm_cdn}/[email protected]/dist/echarts-gl.min" 51 }, 52 'exports': {} 53 } 54 55 data = Nullable(Dict(String, Any)) 56 57 options = Nullable(Dict(String, Any)) 58 59 event_config = Dict(String, Any) 60 61 js_events = Dict(String, List(Any)) 62 63 renderer = Enum("canvas", "svg") 64 65 theme = Enum("default", "light", "dark") 66 [end of panel/models/echarts.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/panel/models/echarts.py b/panel/models/echarts.py --- a/panel/models/echarts.py +++ b/panel/models/echarts.py @@ -62,4 +62,4 @@ renderer = Enum("canvas", "svg") - theme = Enum("default", "light", "dark") + theme = String("default")
{"golden_diff": "diff --git a/panel/models/echarts.py b/panel/models/echarts.py\n--- a/panel/models/echarts.py\n+++ b/panel/models/echarts.py\n@@ -62,4 +62,4 @@\n \n renderer = Enum(\"canvas\", \"svg\")\n \n- theme = Enum(\"default\", \"light\", \"dark\")\n+ theme = String(\"default\")\n", "issue": "Plotting styling guides are gone\nAs discussed on Discourse the styling gallery notebooks for Plotly, Altair, Seaborn and Matplotlib have gone missing with Panel 1.0 release.\r\n\r\nPlease re-add. Thx.\n", "before_files": [{"content": "\"\"\"\nDefines custom bokeh model to render ECharts plots.\n\"\"\"\nfrom bokeh.core.properties import (\n Any, Dict, Enum, List, Nullable, String,\n)\nfrom bokeh.events import ModelEvent\nfrom bokeh.models import LayoutDOM\n\nfrom ..config import config\nfrom ..io.resources import bundled_files\nfrom ..util import classproperty\n\n\nclass EChartsEvent(ModelEvent):\n\n event_name = 'echarts_event'\n\n def __init__(self, model, type=None, data=None, query=None):\n self.type = type\n self.data = data\n self.query = query\n super().__init__(model=model)\n\n\nclass ECharts(LayoutDOM):\n \"\"\"\n A Bokeh model that wraps around an ECharts plot and renders it\n inside a Bokeh.\n \"\"\"\n\n __javascript_raw__ = [\n f\"{config.npm_cdn}/[email protected]/dist/echarts.min.js\",\n f\"{config.npm_cdn}/[email protected]/dist/echarts-gl.min.js\"\n ]\n\n @classproperty\n def __javascript__(cls):\n return bundled_files(cls)\n\n @classproperty\n def __js_skip__(cls):\n return {\n 'echarts': cls.__javascript__[:1]\n }\n\n __js_require__ = {\n 'paths': {\n \"echarts\": f\"{config.npm_cdn}/[email protected]/dist/echarts.min\",\n \"echarts-gl\": f\"{config.npm_cdn}/[email protected]/dist/echarts-gl.min\"\n },\n 'exports': {}\n }\n\n data = Nullable(Dict(String, Any))\n\n options = Nullable(Dict(String, Any))\n\n event_config = Dict(String, Any)\n\n js_events = Dict(String, List(Any))\n\n renderer = Enum(\"canvas\", \"svg\")\n\n theme = Enum(\"default\", \"light\", \"dark\")\n", "path": "panel/models/echarts.py"}]}
1,136
86
gh_patches_debug_600
rasdani/github-patches
git_diff
pex-tool__pex-1319
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Release 2.1.39 On the docket: + [x] Running opvault 0.4.9 pex leads to infinite recursion in setup tools #1316 </issue> <code> [start of pex/version.py] 1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 4 __version__ = "2.1.38" 5 [end of pex/version.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pex/version.py b/pex/version.py --- a/pex/version.py +++ b/pex/version.py @@ -1,4 +1,4 @@ # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). -__version__ = "2.1.38" +__version__ = "2.1.39"
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.38\"\n+__version__ = \"2.1.39\"\n", "issue": "Release 2.1.39\nOn the docket:\r\n+ [x] Running opvault 0.4.9 pex leads to infinite recursion in setup tools #1316\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.38\"\n", "path": "pex/version.py"}]}
624
96
gh_patches_debug_21533
rasdani/github-patches
git_diff
e-valuation__EvaP-464
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> there is duplicated code in EvaP while browsing the code, i ran across a lot of structurally very similar blocks of code. this is a reminder for me to search for these code parts, collect a few, and find the cause and solutions for them. examples: - [x] https://github.com/fsr-itse/EvaP/commit/ca4b28a6054944859ec4447fff95fc36095a2ffb. - [x] the contributor course view/template vs the fsr course view/template. - [x] student_index and contributor_index (html and view) - [ ] fsr_import and user_import - [x] the multiselect initialization inside the script blocks exists multiple times in different files. one can find it by searching for select[multiple='multiple'] - [x] https://github.com/janno42/EvaP/blob/reward_points/evap/fsr/templates/fsr_reward_point_redemption_event_delete.html - [x] search for $(".twipsify") and $("input[data-datepicker='datepicker']:not([readonly='True'])") - [x] sortable and apply_ordering functions - [x] if ($.fn.button.noConflict != undefined) - [x] $('#id_degree').combobox() and similar - [x] fsr_course_preview.html, fsr_questionnaire_view.html, contributor_course_preview.html - [x] javascript-code of the sortable forms: [contributor/staff]_course_form.html (or rather, evap_course_edit_js.html), staff_faq_index.html and staff_faq_section.html - [x] all the leave.js occurrences </issue> <code> [start of evap/evaluation/management/commands/merge_users.py] 1 from django.core.management.base import BaseCommand 2 3 from evap.evaluation.models import UserProfile 4 from evap.evaluation.merge import merge_model_objects 5 6 7 class Command(BaseCommand): 8 args = '<user ID 1> <user ID 2>' 9 help = 'Merge two users' 10 11 def handle(self, *args, **options): 12 try: 13 user1 = UserProfile.objects.get(pk=int(args[0])) 14 user2 = UserProfile.objects.get(pk=int(args[1])) 15 16 print("Merging user '{1}' into user '{0}'".format(user1, user2)) 17 merge_model_objects(user1, user2) 18 except Exception: 19 import traceback 20 traceback.print_exc() 21 [end of evap/evaluation/management/commands/merge_users.py] [start of evap/staff/templatetags/staff_templatetags.py] 1 from django.template import Library 2 3 from evap.evaluation.models import Semester 4 5 register = Library() 6 7 8 @register.inclusion_tag("staff_semester_menu.html") 9 def include_staff_semester_menu(): 10 return dict(semesters=Semester.objects.all()[:5]) 11 12 @register.inclusion_tag('staff_course_selection_list.html') 13 def include_staff_course_selection_list(course_forms, btn_label, empty_msg): 14 return { 15 'course_forms': course_forms, 16 'btn_label': btn_label, 17 'empty_msg': empty_msg 18 } 19 20 @register.inclusion_tag('staff_user_labels.html') 21 def include_staff_user_labels(user): 22 return dict(user=user) 23 [end of evap/staff/templatetags/staff_templatetags.py] [start of evap/evaluation/templatetags/evaluation_templatetags.py] 1 from django.template import Library 2 from evap.evaluation.models import Semester 3 4 register = Library() 5 6 7 @register.inclusion_tag("user_list_with_links.html") 8 def include_user_list_with_links(users): 9 return dict(users=users) 10 [end of evap/evaluation/templatetags/evaluation_templatetags.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/evap/evaluation/management/commands/merge_users.py b/evap/evaluation/management/commands/merge_users.py --- a/evap/evaluation/management/commands/merge_users.py +++ b/evap/evaluation/management/commands/merge_users.py @@ -5,8 +5,8 @@ class Command(BaseCommand): - args = '<user ID 1> <user ID 2>' - help = 'Merge two users' + args = '<primary user ID> <duplicate user ID>' + help = 'Merges duplicate user into primary user' def handle(self, *args, **options): try: diff --git a/evap/evaluation/templatetags/evaluation_templatetags.py b/evap/evaluation/templatetags/evaluation_templatetags.py --- a/evap/evaluation/templatetags/evaluation_templatetags.py +++ b/evap/evaluation/templatetags/evaluation_templatetags.py @@ -7,3 +7,9 @@ @register.inclusion_tag("user_list_with_links.html") def include_user_list_with_links(users): return dict(users=users) + + [email protected]_tag("sortable_form_js.html") +def include_sortable_form_js(): + return dict() + diff --git a/evap/staff/templatetags/staff_templatetags.py b/evap/staff/templatetags/staff_templatetags.py --- a/evap/staff/templatetags/staff_templatetags.py +++ b/evap/staff/templatetags/staff_templatetags.py @@ -20,3 +20,7 @@ @register.inclusion_tag('staff_user_labels.html') def include_staff_user_labels(user): return dict(user=user) + [email protected]_tag("staff_questionnaire_index_list.html") +def include_staff_questionnaire_index_list(questionnaires, headline): + return dict(questionnaires=questionnaires, headline=headline)
{"golden_diff": "diff --git a/evap/evaluation/management/commands/merge_users.py b/evap/evaluation/management/commands/merge_users.py\n--- a/evap/evaluation/management/commands/merge_users.py\n+++ b/evap/evaluation/management/commands/merge_users.py\n@@ -5,8 +5,8 @@\n \n \n class Command(BaseCommand):\n- args = '<user ID 1> <user ID 2>'\n- help = 'Merge two users'\n+ args = '<primary user ID> <duplicate user ID>'\n+ help = 'Merges duplicate user into primary user'\n \n def handle(self, *args, **options):\n try:\ndiff --git a/evap/evaluation/templatetags/evaluation_templatetags.py b/evap/evaluation/templatetags/evaluation_templatetags.py\n--- a/evap/evaluation/templatetags/evaluation_templatetags.py\n+++ b/evap/evaluation/templatetags/evaluation_templatetags.py\n@@ -7,3 +7,9 @@\n @register.inclusion_tag(\"user_list_with_links.html\")\n def include_user_list_with_links(users):\n return dict(users=users)\n+\n+\[email protected]_tag(\"sortable_form_js.html\")\n+def include_sortable_form_js():\n+ return dict()\n+\ndiff --git a/evap/staff/templatetags/staff_templatetags.py b/evap/staff/templatetags/staff_templatetags.py\n--- a/evap/staff/templatetags/staff_templatetags.py\n+++ b/evap/staff/templatetags/staff_templatetags.py\n@@ -20,3 +20,7 @@\n @register.inclusion_tag('staff_user_labels.html')\n def include_staff_user_labels(user):\n return dict(user=user)\n+\[email protected]_tag(\"staff_questionnaire_index_list.html\")\n+def include_staff_questionnaire_index_list(questionnaires, headline):\n+ return dict(questionnaires=questionnaires, headline=headline)\n", "issue": "there is duplicated code in EvaP\nwhile browsing the code, i ran across a lot of structurally very similar blocks of code. \n\nthis is a reminder for me to search for these code parts, collect a few, and find the cause and solutions for them.\n\nexamples: \n- [x] https://github.com/fsr-itse/EvaP/commit/ca4b28a6054944859ec4447fff95fc36095a2ffb.\n- [x] the contributor course view/template vs the fsr course view/template.\n- [x] student_index and contributor_index (html and view)\n- [ ] fsr_import and user_import\n- [x] the multiselect initialization inside the script blocks exists multiple times in different files. one can find it by searching for select[multiple='multiple']\n- [x] https://github.com/janno42/EvaP/blob/reward_points/evap/fsr/templates/fsr_reward_point_redemption_event_delete.html\n- [x] search for $(\".twipsify\") and $(\"input[data-datepicker='datepicker']:not([readonly='True'])\")\n- [x] sortable and apply_ordering functions\n- [x] if ($.fn.button.noConflict != undefined)\n- [x] $('#id_degree').combobox() and similar\n- [x] fsr_course_preview.html, fsr_questionnaire_view.html, contributor_course_preview.html\n- [x] javascript-code of the sortable forms: [contributor/staff]_course_form.html (or rather, evap_course_edit_js.html), staff_faq_index.html and staff_faq_section.html\n- [x] all the leave.js occurrences\n\n", "before_files": [{"content": "from django.core.management.base import BaseCommand\n\nfrom evap.evaluation.models import UserProfile\nfrom evap.evaluation.merge import merge_model_objects\n\n\nclass Command(BaseCommand):\n args = '<user ID 1> <user ID 2>'\n help = 'Merge two users'\n\n def handle(self, *args, **options):\n try:\n user1 = UserProfile.objects.get(pk=int(args[0]))\n user2 = UserProfile.objects.get(pk=int(args[1]))\n\n print(\"Merging user '{1}' into user '{0}'\".format(user1, user2))\n merge_model_objects(user1, user2)\n except Exception:\n import traceback\n traceback.print_exc()\n", "path": "evap/evaluation/management/commands/merge_users.py"}, {"content": "from django.template import Library\n\nfrom evap.evaluation.models import Semester\n\nregister = Library()\n\n\[email protected]_tag(\"staff_semester_menu.html\")\ndef include_staff_semester_menu():\n return dict(semesters=Semester.objects.all()[:5])\n\[email protected]_tag('staff_course_selection_list.html')\ndef include_staff_course_selection_list(course_forms, btn_label, empty_msg):\n return {\n 'course_forms': course_forms,\n 'btn_label': btn_label,\n 'empty_msg': empty_msg\n }\n\[email protected]_tag('staff_user_labels.html')\ndef include_staff_user_labels(user):\n return dict(user=user)\n", "path": "evap/staff/templatetags/staff_templatetags.py"}, {"content": "from django.template import Library\nfrom evap.evaluation.models import Semester\n\nregister = Library()\n\n\[email protected]_tag(\"user_list_with_links.html\")\ndef include_user_list_with_links(users):\n return dict(users=users)\n", "path": "evap/evaluation/templatetags/evaluation_templatetags.py"}]}
1,404
459
gh_patches_debug_1903
rasdani/github-patches
git_diff
systemd__mkosi-1956
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [Meta] declare a policy about adding new distributions Before people start creating issues asking to support their favorite distribution, I think that mkosi should declare its policy regarding new distributions support. The policy should state in which terms (if any) you will be willing to support a new distributions. </issue> <code> [start of mkosi/distributions/__init__.py] 1 # SPDX-License-Identifier: LGPL-2.1+ 2 3 import enum 4 import importlib 5 import re 6 from collections.abc import Sequence 7 from typing import TYPE_CHECKING, Optional, cast 8 9 from mkosi.architecture import Architecture 10 from mkosi.util import StrEnum, read_os_release 11 12 if TYPE_CHECKING: 13 from mkosi.state import MkosiState 14 15 16 class PackageType(StrEnum): 17 none = enum.auto() 18 rpm = enum.auto() 19 deb = enum.auto() 20 pkg = enum.auto() 21 ebuild = enum.auto() 22 23 24 class DistributionInstaller: 25 @classmethod 26 def pretty_name(cls) -> str: 27 raise NotImplementedError 28 29 @classmethod 30 def setup(cls, state: "MkosiState") -> None: 31 raise NotImplementedError 32 33 @classmethod 34 def install(cls, state: "MkosiState") -> None: 35 raise NotImplementedError 36 37 @classmethod 38 def install_packages(cls, state: "MkosiState", packages: Sequence[str]) -> None: 39 raise NotImplementedError 40 41 @classmethod 42 def remove_packages(cls, state: "MkosiState", packages: Sequence[str]) -> None: 43 raise NotImplementedError 44 45 @classmethod 46 def filesystem(cls) -> str: 47 return "ext4" 48 49 @staticmethod 50 def architecture(arch: Architecture) -> str: 51 return str(arch) 52 53 @classmethod 54 def package_type(cls) -> PackageType: 55 return PackageType.none 56 57 @classmethod 58 def default_release(cls) -> str: 59 return "" 60 61 @classmethod 62 def default_tools_tree_distribution(cls) -> Optional["Distribution"]: 63 return None 64 65 @classmethod 66 def tools_tree_repositories(cls) -> list[str]: 67 return [] 68 69 @classmethod 70 def tools_tree_packages(cls) -> list[str]: 71 return [] 72 73 74 class Distribution(StrEnum): 75 fedora = enum.auto() 76 debian = enum.auto() 77 ubuntu = enum.auto() 78 arch = enum.auto() 79 opensuse = enum.auto() 80 mageia = enum.auto() 81 centos = enum.auto() 82 rhel_ubi = enum.auto() 83 openmandriva = enum.auto() 84 rocky = enum.auto() 85 alma = enum.auto() 86 gentoo = enum.auto() 87 custom = enum.auto() 88 89 def is_centos_variant(self) -> bool: 90 return self in (Distribution.centos, Distribution.alma, Distribution.rocky) 91 92 def is_dnf_distribution(self) -> bool: 93 return self in ( 94 Distribution.fedora, 95 Distribution.mageia, 96 Distribution.centos, 97 Distribution.rhel_ubi, 98 Distribution.openmandriva, 99 Distribution.rocky, 100 Distribution.alma, 101 ) 102 103 def is_apt_distribution(self) -> bool: 104 return self in (Distribution.debian, Distribution.ubuntu) 105 106 def setup(self, state: "MkosiState") -> None: 107 return self.installer().setup(state) 108 109 def install(self, state: "MkosiState") -> None: 110 return self.installer().install(state) 111 112 def install_packages(self, state: "MkosiState", packages: Sequence[str]) -> None: 113 return self.installer().install_packages(state, packages) 114 115 def remove_packages(self, state: "MkosiState", packages: Sequence[str]) -> None: 116 return self.installer().remove_packages(state, packages) 117 118 def filesystem(self) -> str: 119 return self.installer().filesystem() 120 121 def architecture(self, arch: Architecture) -> str: 122 return self.installer().architecture(arch) 123 124 def package_type(self) -> PackageType: 125 return self.installer().package_type() 126 127 def default_release(self) -> str: 128 return self.installer().default_release() 129 130 def default_tools_tree_distribution(self) -> Optional["Distribution"]: 131 return self.installer().default_tools_tree_distribution() 132 133 def tools_tree_repositories(self) -> list[str]: 134 return self.installer().tools_tree_repositories() 135 136 def tools_tree_packages(self) -> list[str]: 137 return self.installer().tools_tree_packages() 138 139 def installer(self) -> type[DistributionInstaller]: 140 modname = str(self).replace('-', '_') 141 mod = importlib.import_module(f"mkosi.distributions.{modname}") 142 installer = getattr(mod, "Installer") 143 assert issubclass(installer, DistributionInstaller) 144 return cast(type[DistributionInstaller], installer) 145 146 147 def detect_distribution() -> tuple[Optional[Distribution], Optional[str]]: 148 try: 149 os_release = read_os_release() 150 except FileNotFoundError: 151 return None, None 152 153 dist_id = os_release.get("ID", "linux") 154 dist_id_like = os_release.get("ID_LIKE", "").split() 155 version = os_release.get("VERSION", None) 156 version_id = os_release.get("VERSION_ID", None) 157 version_codename = os_release.get("VERSION_CODENAME", None) 158 extracted_codename = None 159 160 if version: 161 # extract Debian release codename 162 m = re.search(r"\((.*?)\)", version) 163 if m: 164 extracted_codename = m.group(1) 165 166 d: Optional[Distribution] = None 167 for the_id in [dist_id, *dist_id_like]: 168 d = Distribution.__members__.get(the_id, None) 169 if d is not None: 170 break 171 172 if d in {Distribution.debian, Distribution.ubuntu} and (version_codename or extracted_codename): 173 version_id = version_codename or extracted_codename 174 175 return d, version_id 176 [end of mkosi/distributions/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mkosi/distributions/__init__.py b/mkosi/distributions/__init__.py --- a/mkosi/distributions/__init__.py +++ b/mkosi/distributions/__init__.py @@ -72,6 +72,8 @@ class Distribution(StrEnum): + # Please consult docs/distribution-policy.md and contact one + # of the mkosi maintainers before implementing a new distribution. fedora = enum.auto() debian = enum.auto() ubuntu = enum.auto()
{"golden_diff": "diff --git a/mkosi/distributions/__init__.py b/mkosi/distributions/__init__.py\n--- a/mkosi/distributions/__init__.py\n+++ b/mkosi/distributions/__init__.py\n@@ -72,6 +72,8 @@\n \n \n class Distribution(StrEnum):\n+ # Please consult docs/distribution-policy.md and contact one\n+ # of the mkosi maintainers before implementing a new distribution.\n fedora = enum.auto()\n debian = enum.auto()\n ubuntu = enum.auto()\n", "issue": "[Meta] declare a policy about adding new distributions\nBefore people start creating issues asking to support their favorite distribution, I think that mkosi should declare its policy regarding new distributions support.\r\n\r\nThe policy should state in which terms (if any) you will be willing to support a new distributions.\n", "before_files": [{"content": "# SPDX-License-Identifier: LGPL-2.1+\n\nimport enum\nimport importlib\nimport re\nfrom collections.abc import Sequence\nfrom typing import TYPE_CHECKING, Optional, cast\n\nfrom mkosi.architecture import Architecture\nfrom mkosi.util import StrEnum, read_os_release\n\nif TYPE_CHECKING:\n from mkosi.state import MkosiState\n\n\nclass PackageType(StrEnum):\n none = enum.auto()\n rpm = enum.auto()\n deb = enum.auto()\n pkg = enum.auto()\n ebuild = enum.auto()\n\n\nclass DistributionInstaller:\n @classmethod\n def pretty_name(cls) -> str:\n raise NotImplementedError\n\n @classmethod\n def setup(cls, state: \"MkosiState\") -> None:\n raise NotImplementedError\n\n @classmethod\n def install(cls, state: \"MkosiState\") -> None:\n raise NotImplementedError\n\n @classmethod\n def install_packages(cls, state: \"MkosiState\", packages: Sequence[str]) -> None:\n raise NotImplementedError\n\n @classmethod\n def remove_packages(cls, state: \"MkosiState\", packages: Sequence[str]) -> None:\n raise NotImplementedError\n\n @classmethod\n def filesystem(cls) -> str:\n return \"ext4\"\n\n @staticmethod\n def architecture(arch: Architecture) -> str:\n return str(arch)\n\n @classmethod\n def package_type(cls) -> PackageType:\n return PackageType.none\n\n @classmethod\n def default_release(cls) -> str:\n return \"\"\n\n @classmethod\n def default_tools_tree_distribution(cls) -> Optional[\"Distribution\"]:\n return None\n\n @classmethod\n def tools_tree_repositories(cls) -> list[str]:\n return []\n\n @classmethod\n def tools_tree_packages(cls) -> list[str]:\n return []\n\n\nclass Distribution(StrEnum):\n fedora = enum.auto()\n debian = enum.auto()\n ubuntu = enum.auto()\n arch = enum.auto()\n opensuse = enum.auto()\n mageia = enum.auto()\n centos = enum.auto()\n rhel_ubi = enum.auto()\n openmandriva = enum.auto()\n rocky = enum.auto()\n alma = enum.auto()\n gentoo = enum.auto()\n custom = enum.auto()\n\n def is_centos_variant(self) -> bool:\n return self in (Distribution.centos, Distribution.alma, Distribution.rocky)\n\n def is_dnf_distribution(self) -> bool:\n return self in (\n Distribution.fedora,\n Distribution.mageia,\n Distribution.centos,\n Distribution.rhel_ubi,\n Distribution.openmandriva,\n Distribution.rocky,\n Distribution.alma,\n )\n\n def is_apt_distribution(self) -> bool:\n return self in (Distribution.debian, Distribution.ubuntu)\n\n def setup(self, state: \"MkosiState\") -> None:\n return self.installer().setup(state)\n\n def install(self, state: \"MkosiState\") -> None:\n return self.installer().install(state)\n\n def install_packages(self, state: \"MkosiState\", packages: Sequence[str]) -> None:\n return self.installer().install_packages(state, packages)\n\n def remove_packages(self, state: \"MkosiState\", packages: Sequence[str]) -> None:\n return self.installer().remove_packages(state, packages)\n\n def filesystem(self) -> str:\n return self.installer().filesystem()\n\n def architecture(self, arch: Architecture) -> str:\n return self.installer().architecture(arch)\n\n def package_type(self) -> PackageType:\n return self.installer().package_type()\n\n def default_release(self) -> str:\n return self.installer().default_release()\n\n def default_tools_tree_distribution(self) -> Optional[\"Distribution\"]:\n return self.installer().default_tools_tree_distribution()\n\n def tools_tree_repositories(self) -> list[str]:\n return self.installer().tools_tree_repositories()\n\n def tools_tree_packages(self) -> list[str]:\n return self.installer().tools_tree_packages()\n\n def installer(self) -> type[DistributionInstaller]:\n modname = str(self).replace('-', '_')\n mod = importlib.import_module(f\"mkosi.distributions.{modname}\")\n installer = getattr(mod, \"Installer\")\n assert issubclass(installer, DistributionInstaller)\n return cast(type[DistributionInstaller], installer)\n\n\ndef detect_distribution() -> tuple[Optional[Distribution], Optional[str]]:\n try:\n os_release = read_os_release()\n except FileNotFoundError:\n return None, None\n\n dist_id = os_release.get(\"ID\", \"linux\")\n dist_id_like = os_release.get(\"ID_LIKE\", \"\").split()\n version = os_release.get(\"VERSION\", None)\n version_id = os_release.get(\"VERSION_ID\", None)\n version_codename = os_release.get(\"VERSION_CODENAME\", None)\n extracted_codename = None\n\n if version:\n # extract Debian release codename\n m = re.search(r\"\\((.*?)\\)\", version)\n if m:\n extracted_codename = m.group(1)\n\n d: Optional[Distribution] = None\n for the_id in [dist_id, *dist_id_like]:\n d = Distribution.__members__.get(the_id, None)\n if d is not None:\n break\n\n if d in {Distribution.debian, Distribution.ubuntu} and (version_codename or extracted_codename):\n version_id = version_codename or extracted_codename\n\n return d, version_id\n", "path": "mkosi/distributions/__init__.py"}]}
2,222
117
gh_patches_debug_10128
rasdani/github-patches
git_diff
MycroftAI__mycroft-core-2338
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Skill Shutdown sometimes fails causing duplicate instances of Skill ## Devices: Issue occurred on two separate Debian Linux installations - one running Mycroft-core 19.02.14, - the other running dev branch the date of this issue. ## Steps to reproduce: 1. Load Mycroft and ensure working as expected 2. Make intentional code error in Alarm Skill to raise exception eg `self.log.info(not_a_real_var)` 3. Skill fails on reload with the following error: ``` 12:04:25.758 | INFO | 22386 | mycroft.skills.skill_loader:reload:109 | ATTEMPTING TO RELOAD SKILL: mycroft-alarm.mycroftai 12:04:25.760 | ERROR | 22386 | mycroft.skills.skill_loader:_execute_instance_shutdown:145 | An error occurred while shutting down AlarmSkill Traceback (most recent call last): File "/home/fs-neriahbjato/Documents/Full_Scale/Mycroft/mycroft-core/mycroft/skills/skill_loader.py", line 142, in _execute_instance_shutdown self.instance.default_shutdown() File "/home/fs-neriahbjato/Documents/Full_Scale/Mycroft/mycroft-core/mycroft/skills/mycroft_skill/mycroft_skill.py", line 1162, in default_shutdown self.event_scheduler.shutdown() File "/home/fs-neriahbjato/Documents/Full_Scale/Mycroft/mycroft-core/mycroft/skills/event_scheduler.py", line 433, in shutdown self.events.clear() File "/home/fs-neriahbjato/Documents/Full_Scale/Mycroft/mycroft-core/mycroft/skills/mycroft_skill/event_container.py", line 183, in clear self.bus.remove(e, f) File "/home/fs-neriahbjato/Documents/Full_Scale/Mycroft/mycroft-core/mycroft/messagebus/client/client.py", line 172, in remove self.emitter.remove_listener(event_name, func) File "/home/fs-neriahbjato/Documents/Full_Scale/Mycroft/mycroft-core/mycroft/messagebus/client/threaded_event_emitter.py", line 57, in remove_listener return super().remove_listener(event_name, func) File "/home/fs-neriahbjato/Documents/Full_Scale/Mycroft/mycroft-core/.venv/lib/python3.6/site-packages/pyee/__init__.py", line 205, in remove_listener self._events[event].pop(f) KeyError: <function create_basic_wrapper.<locals>.wrapper at 0x7f2a504781e0> ``` 4. Remove bad code and save to reload Skill ``` 12:04:25.803 | INFO | 22386 | mycroft.skills.skill_loader:_communicate_load_status:273 | Skill mycroft-alarm.mycroftai loaded successfully ``` 5. Trigger the Skill with an utterance eg "what alarms are set" 6. Mycroft responds twice - once from the failed instance of the Skill: "An error occurred while processing a request in Alarm Skill" - once from the new working instance of the Skill: "No alarms have been set" The failed instance of the Skill will remain until Mycroft services are restarted. After restarting, the same steps cause the same error. ## Investigation: This was only occurring for the Alarm Skill, did not get the same behavior in the Date Time Skill. To see what might be causing it, I added some logging to the offending function in [`mycroft/messagebus/client/threaded_event_emitter.py`](https://github.com/MycroftAI/mycroft-core/blob/dev/mycroft/messagebus/client/threaded_event_emitter.py): ``` def remove_listener(self, event_name, func): """ Wrap the remove to translate from function to wrapped function. """ for w in self.wrappers[event_name]: if w[0] == func: self.wrappers[event_name].remove(w) return super().remove_listener(event_name, w[1]) # if no wrapper exists try removing the function LOG.info("REMOVING LISTENER") LOG.info("event_name: " + str(event_name)) LOG.info(event_name) LOG.info("func: " + str(func)) LOG.info(func) return super().remove_listener(event_name, func) ``` Then reloaded Mycroft and can no longer trigger the issue. ## Hypothesis - Presumably the `func` was not being found in `self.wrappers` allowing it to proceed to try removing the function itself on [line 57](https://github.com/MycroftAI/mycroft-core/blob/dev/mycroft/messagebus/client/threaded_event_emitter.py#L57) which then failed. - Possibly the function was modified or removed from `self.wrappers` causing it to not match? - Is `self.wrappers` cached between service restarts, and would editing this file have recreated it? As I'm no longer able to reproduce the error it's difficult to continue debugging. So documenting the situation here in case anyone else runs into it and we can pick it back up. Unless anyone else has an idea of what might be happening? </issue> <code> [start of mycroft/skills/mycroft_skill/event_container.py] 1 from inspect import signature 2 3 from mycroft.messagebus.message import Message 4 from mycroft.metrics import Stopwatch, report_timing 5 from mycroft.util.log import LOG 6 7 from ..skill_data import to_alnum 8 9 10 def unmunge_message(message, skill_id): 11 """Restore message keywords by removing the Letterified skill ID. 12 Arguments: 13 message (Message): Intent result message 14 skill_id (str): skill identifier 15 Returns: 16 Message without clear keywords 17 """ 18 if isinstance(message, Message) and isinstance(message.data, dict): 19 skill_id = to_alnum(skill_id) 20 for key in list(message.data.keys()): 21 if key.startswith(skill_id): 22 # replace the munged key with the real one 23 new_key = key[len(skill_id):] 24 message.data[new_key] = message.data.pop(key) 25 26 return message 27 28 29 def get_handler_name(handler): 30 """Name (including class if available) of handler function. 31 32 Arguments: 33 handler (function): Function to be named 34 35 Returns: 36 string: handler name as string 37 """ 38 if '__self__' in dir(handler) and 'name' in dir(handler.__self__): 39 return handler.__self__.name + '.' + handler.__name__ 40 else: 41 return handler.__name__ 42 43 44 def create_wrapper(handler, skill_id, on_start, on_end, on_error): 45 """Create the default skill handler wrapper. 46 47 This wrapper handles things like metrics, reporting handler start/stop 48 and errors. 49 handler (callable): method/function to call 50 skill_id: skill_id for associated skill 51 on_start (function): function to call before executing the handler 52 on_end (function): function to call after executing the handler 53 on_error (function): function to call for error reporting 54 """ 55 def wrapper(message): 56 stopwatch = Stopwatch() 57 try: 58 message = unmunge_message(message, skill_id) 59 if on_start: 60 on_start(message) 61 62 with stopwatch: 63 if len(signature(handler).parameters) == 0: 64 handler() 65 else: 66 handler(message) 67 68 except Exception as e: 69 if on_error: 70 on_error(e) 71 finally: 72 if on_end: 73 on_end(message) 74 75 # Send timing metrics 76 context = message.context 77 if context and 'ident' in context: 78 report_timing(context['ident'], 'skill_handler', stopwatch, 79 {'handler': handler.__name__, 80 'skill_id': skill_id}) 81 return wrapper 82 83 84 def create_basic_wrapper(handler, on_error=None): 85 """Create the default skill handler wrapper. 86 87 This wrapper handles things like metrics, reporting handler start/stop 88 and errors. 89 90 Arguments: 91 handler (callable): method/function to call 92 on_error (function): function to call to report error. 93 94 Returns: 95 Wrapped callable 96 """ 97 def wrapper(message): 98 try: 99 if len(signature(handler).parameters) == 0: 100 handler() 101 else: 102 handler(message) 103 except Exception as e: 104 if on_error: 105 on_error(e) 106 107 return wrapper 108 109 110 class EventContainer: 111 """Container tracking messagbus handlers. 112 113 This container tracks events added by a skill, allowing unregistering 114 all events on shutdown. 115 """ 116 def __init__(self, bus=None): 117 self.bus = bus 118 self.events = [] 119 120 def set_bus(self, bus): 121 self.bus = bus 122 123 def add(self, name, handler, once=False): 124 """Create event handler for executing intent or other event. 125 126 Arguments: 127 name (string): IntentParser name 128 handler (func): Method to call 129 once (bool, optional): Event handler will be removed after it has 130 been run once. 131 """ 132 def once_wrapper(message): 133 # Remove registered one-time handler before invoking, 134 # allowing them to re-schedule themselves. 135 handler(message) 136 self.remove(name) 137 138 if handler: 139 if once: 140 self.bus.once(name, once_wrapper) 141 else: 142 self.bus.on(name, handler) 143 self.events.append((name, handler)) 144 145 def remove(self, name): 146 """Removes an event from bus emitter and events list. 147 148 Args: 149 name (string): Name of Intent or Scheduler Event 150 Returns: 151 bool: True if found and removed, False if not found 152 """ 153 print("Removing event {}".format(name)) 154 removed = False 155 for _name, _handler in list(self.events): 156 if name == _name: 157 try: 158 self.events.remove((_name, _handler)) 159 except ValueError: 160 LOG.error('Failed to remove event {}'.format(name)) 161 pass 162 removed = True 163 164 # Because of function wrappers, the emitter doesn't always directly 165 # hold the _handler function, it sometimes holds something like 166 # 'wrapper(_handler)'. So a call like: 167 # self.bus.remove(_name, _handler) 168 # will not find it, leaving an event handler with that name left behind 169 # waiting to fire if it is ever re-installed and triggered. 170 # Remove all handlers with the given name, regardless of handler. 171 if removed: 172 self.bus.remove_all_listeners(name) 173 return removed 174 175 def __iter__(self): 176 return iter(self.events) 177 178 def clear(self): 179 """Unregister all registered handlers and clear the list of registered 180 events. 181 """ 182 for e, f in self.events: 183 self.bus.remove(e, f) 184 self.events = [] # Remove reference to wrappers 185 [end of mycroft/skills/mycroft_skill/event_container.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mycroft/skills/mycroft_skill/event_container.py b/mycroft/skills/mycroft_skill/event_container.py --- a/mycroft/skills/mycroft_skill/event_container.py +++ b/mycroft/skills/mycroft_skill/event_container.py @@ -138,9 +138,12 @@ if handler: if once: self.bus.once(name, once_wrapper) + self.events.append((name, once_wrapper)) else: self.bus.on(name, handler) - self.events.append((name, handler)) + self.events.append((name, handler)) + + LOG.debug('Added event: {}'.format(name)) def remove(self, name): """Removes an event from bus emitter and events list.
{"golden_diff": "diff --git a/mycroft/skills/mycroft_skill/event_container.py b/mycroft/skills/mycroft_skill/event_container.py\n--- a/mycroft/skills/mycroft_skill/event_container.py\n+++ b/mycroft/skills/mycroft_skill/event_container.py\n@@ -138,9 +138,12 @@\n if handler:\n if once:\n self.bus.once(name, once_wrapper)\n+ self.events.append((name, once_wrapper))\n else:\n self.bus.on(name, handler)\n- self.events.append((name, handler))\n+ self.events.append((name, handler))\n+\n+ LOG.debug('Added event: {}'.format(name))\n \n def remove(self, name):\n \"\"\"Removes an event from bus emitter and events list.\n", "issue": "Skill Shutdown sometimes fails causing duplicate instances of Skill\n## Devices:\r\nIssue occurred on two separate Debian Linux installations\r\n- one running Mycroft-core 19.02.14, \r\n- the other running dev branch the date of this issue.\r\n\r\n## Steps to reproduce:\r\n1. Load Mycroft and ensure working as expected\r\n2. Make intentional code error in Alarm Skill to raise exception eg `self.log.info(not_a_real_var)`\r\n3. Skill fails on reload with the following error:\r\n```\r\n 12:04:25.758 | INFO | 22386 | mycroft.skills.skill_loader:reload:109 | ATTEMPTING TO RELOAD SKILL: mycroft-alarm.mycroftai\r\n 12:04:25.760 | ERROR | 22386 | mycroft.skills.skill_loader:_execute_instance_shutdown:145 | An error occurred while shutting down AlarmSkill\r\nTraceback (most recent call last):\r\n File \"/home/fs-neriahbjato/Documents/Full_Scale/Mycroft/mycroft-core/mycroft/skills/skill_loader.py\", line 142, in _execute_instance_shutdown\r\n self.instance.default_shutdown()\r\n File \"/home/fs-neriahbjato/Documents/Full_Scale/Mycroft/mycroft-core/mycroft/skills/mycroft_skill/mycroft_skill.py\", line 1162, in default_shutdown\r\n self.event_scheduler.shutdown()\r\n File \"/home/fs-neriahbjato/Documents/Full_Scale/Mycroft/mycroft-core/mycroft/skills/event_scheduler.py\", line 433, in shutdown\r\n self.events.clear()\r\n File \"/home/fs-neriahbjato/Documents/Full_Scale/Mycroft/mycroft-core/mycroft/skills/mycroft_skill/event_container.py\", line 183, in clear\r\n self.bus.remove(e, f)\r\n File \"/home/fs-neriahbjato/Documents/Full_Scale/Mycroft/mycroft-core/mycroft/messagebus/client/client.py\", line 172, in remove\r\n self.emitter.remove_listener(event_name, func)\r\n File \"/home/fs-neriahbjato/Documents/Full_Scale/Mycroft/mycroft-core/mycroft/messagebus/client/threaded_event_emitter.py\", line 57, in remove_listener\r\n return super().remove_listener(event_name, func)\r\n File \"/home/fs-neriahbjato/Documents/Full_Scale/Mycroft/mycroft-core/.venv/lib/python3.6/site-packages/pyee/__init__.py\", line 205, in remove_listener\r\n self._events[event].pop(f)\r\nKeyError: <function create_basic_wrapper.<locals>.wrapper at 0x7f2a504781e0>\r\n```\r\n4. Remove bad code and save to reload Skill\r\n```\r\n 12:04:25.803 | INFO | 22386 | mycroft.skills.skill_loader:_communicate_load_status:273 | Skill mycroft-alarm.mycroftai loaded successfully\r\n```\r\n5. Trigger the Skill with an utterance eg \"what alarms are set\"\r\n6. Mycroft responds twice\r\n - once from the failed instance of the Skill: \"An error occurred while processing a request in Alarm Skill\"\r\n - once from the new working instance of the Skill: \"No alarms have been set\"\r\n\r\nThe failed instance of the Skill will remain until Mycroft services are restarted.\r\nAfter restarting, the same steps cause the same error.\r\n\r\n## Investigation:\r\nThis was only occurring for the Alarm Skill, did not get the same behavior in the Date Time Skill.\r\nTo see what might be causing it, I added some logging to the offending function in [`mycroft/messagebus/client/threaded_event_emitter.py`](https://github.com/MycroftAI/mycroft-core/blob/dev/mycroft/messagebus/client/threaded_event_emitter.py):\r\n```\r\ndef remove_listener(self, event_name, func):\r\n \"\"\" Wrap the remove to translate from function to wrapped\r\n function.\r\n \"\"\"\r\n for w in self.wrappers[event_name]:\r\n if w[0] == func:\r\n self.wrappers[event_name].remove(w)\r\n return super().remove_listener(event_name, w[1])\r\n # if no wrapper exists try removing the function\r\n LOG.info(\"REMOVING LISTENER\")\r\n LOG.info(\"event_name: \" + str(event_name))\r\n LOG.info(event_name)\r\n LOG.info(\"func: \" + str(func))\r\n LOG.info(func)\r\n return super().remove_listener(event_name, func)\r\n```\r\nThen reloaded Mycroft and can no longer trigger the issue.\r\n\r\n## Hypothesis\r\n- Presumably the `func` was not being found in `self.wrappers` allowing it to proceed to try removing the function itself on [line 57](https://github.com/MycroftAI/mycroft-core/blob/dev/mycroft/messagebus/client/threaded_event_emitter.py#L57) which then failed.\r\n- Possibly the function was modified or removed from `self.wrappers` causing it to not match?\r\n- Is `self.wrappers` cached between service restarts, and would editing this file have recreated it?\r\n\r\n\r\nAs I'm no longer able to reproduce the error it's difficult to continue debugging. So documenting the situation here in case anyone else runs into it and we can pick it back up.\r\n\r\nUnless anyone else has an idea of what might be happening?\n", "before_files": [{"content": "from inspect import signature\n\nfrom mycroft.messagebus.message import Message\nfrom mycroft.metrics import Stopwatch, report_timing\nfrom mycroft.util.log import LOG\n\nfrom ..skill_data import to_alnum\n\n\ndef unmunge_message(message, skill_id):\n \"\"\"Restore message keywords by removing the Letterified skill ID.\n Arguments:\n message (Message): Intent result message\n skill_id (str): skill identifier\n Returns:\n Message without clear keywords\n \"\"\"\n if isinstance(message, Message) and isinstance(message.data, dict):\n skill_id = to_alnum(skill_id)\n for key in list(message.data.keys()):\n if key.startswith(skill_id):\n # replace the munged key with the real one\n new_key = key[len(skill_id):]\n message.data[new_key] = message.data.pop(key)\n\n return message\n\n\ndef get_handler_name(handler):\n \"\"\"Name (including class if available) of handler function.\n\n Arguments:\n handler (function): Function to be named\n\n Returns:\n string: handler name as string\n \"\"\"\n if '__self__' in dir(handler) and 'name' in dir(handler.__self__):\n return handler.__self__.name + '.' + handler.__name__\n else:\n return handler.__name__\n\n\ndef create_wrapper(handler, skill_id, on_start, on_end, on_error):\n \"\"\"Create the default skill handler wrapper.\n\n This wrapper handles things like metrics, reporting handler start/stop\n and errors.\n handler (callable): method/function to call\n skill_id: skill_id for associated skill\n on_start (function): function to call before executing the handler\n on_end (function): function to call after executing the handler\n on_error (function): function to call for error reporting\n \"\"\"\n def wrapper(message):\n stopwatch = Stopwatch()\n try:\n message = unmunge_message(message, skill_id)\n if on_start:\n on_start(message)\n\n with stopwatch:\n if len(signature(handler).parameters) == 0:\n handler()\n else:\n handler(message)\n\n except Exception as e:\n if on_error:\n on_error(e)\n finally:\n if on_end:\n on_end(message)\n\n # Send timing metrics\n context = message.context\n if context and 'ident' in context:\n report_timing(context['ident'], 'skill_handler', stopwatch,\n {'handler': handler.__name__,\n 'skill_id': skill_id})\n return wrapper\n\n\ndef create_basic_wrapper(handler, on_error=None):\n \"\"\"Create the default skill handler wrapper.\n\n This wrapper handles things like metrics, reporting handler start/stop\n and errors.\n\n Arguments:\n handler (callable): method/function to call\n on_error (function): function to call to report error.\n\n Returns:\n Wrapped callable\n \"\"\"\n def wrapper(message):\n try:\n if len(signature(handler).parameters) == 0:\n handler()\n else:\n handler(message)\n except Exception as e:\n if on_error:\n on_error(e)\n\n return wrapper\n\n\nclass EventContainer:\n \"\"\"Container tracking messagbus handlers.\n\n This container tracks events added by a skill, allowing unregistering\n all events on shutdown.\n \"\"\"\n def __init__(self, bus=None):\n self.bus = bus\n self.events = []\n\n def set_bus(self, bus):\n self.bus = bus\n\n def add(self, name, handler, once=False):\n \"\"\"Create event handler for executing intent or other event.\n\n Arguments:\n name (string): IntentParser name\n handler (func): Method to call\n once (bool, optional): Event handler will be removed after it has\n been run once.\n \"\"\"\n def once_wrapper(message):\n # Remove registered one-time handler before invoking,\n # allowing them to re-schedule themselves.\n handler(message)\n self.remove(name)\n\n if handler:\n if once:\n self.bus.once(name, once_wrapper)\n else:\n self.bus.on(name, handler)\n self.events.append((name, handler))\n\n def remove(self, name):\n \"\"\"Removes an event from bus emitter and events list.\n\n Args:\n name (string): Name of Intent or Scheduler Event\n Returns:\n bool: True if found and removed, False if not found\n \"\"\"\n print(\"Removing event {}\".format(name))\n removed = False\n for _name, _handler in list(self.events):\n if name == _name:\n try:\n self.events.remove((_name, _handler))\n except ValueError:\n LOG.error('Failed to remove event {}'.format(name))\n pass\n removed = True\n\n # Because of function wrappers, the emitter doesn't always directly\n # hold the _handler function, it sometimes holds something like\n # 'wrapper(_handler)'. So a call like:\n # self.bus.remove(_name, _handler)\n # will not find it, leaving an event handler with that name left behind\n # waiting to fire if it is ever re-installed and triggered.\n # Remove all handlers with the given name, regardless of handler.\n if removed:\n self.bus.remove_all_listeners(name)\n return removed\n\n def __iter__(self):\n return iter(self.events)\n\n def clear(self):\n \"\"\"Unregister all registered handlers and clear the list of registered\n events.\n \"\"\"\n for e, f in self.events:\n self.bus.remove(e, f)\n self.events = [] # Remove reference to wrappers\n", "path": "mycroft/skills/mycroft_skill/event_container.py"}]}
3,335
160
gh_patches_debug_33830
rasdani/github-patches
git_diff
scikit-image__scikit-image-4416
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Interpolation error in measure.profile_line ## Description #4206, once merged, exposes interesting behavior for [measure.profile_line](https://scikit-image.org/docs/dev/api/skimage.measure.html#skimage.measure.profile_line). Consider ```python image = np.arange(12).reshape(4, 3) # reduce_func is a new parameter proposed in #4206 prof = profile_line(image, (0, 1), (3, 1), linewidth=3, reduce_func=None) assert_array_equal(image, prof) ``` As I understand it, the returned profile should be identical with the original image but we get this (value at (0, 2) is different): ``` AssertionError: Arrays are not equal Mismatch: 8.33% Max absolute difference: 2 Max relative difference: 0. x: array([[ 0, 1, 2], [ 3, 4, 5], [ 6, 7, 8], [ 9, 10, 11]]) y: array([[ 0, 1, 0], [ 3, 4, 5], [ 6, 7, 8], [ 9, 10, 11]]) ``` This might originate in [scipy.ndimage.map_coordinates](https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.map_coordinates.html#scipy.ndimage.map_coordinates). Looks like a potential bug to me but might be an edge case for the underlying interpolation that can be ignored/is not reasonable to fix. This behavior is already relevant even before merging #4206 because the faulty value will change the mean value of the profile line. https://github.com/scikit-image/scikit-image/pull/4206#issuecomment-544920806 also highlights additional strange behavior. ## Version information Current master, exposed once #4206 is merged. </issue> <code> [start of skimage/measure/profile.py] 1 from warnings import warn 2 import numpy as np 3 from scipy import ndimage as ndi 4 5 6 def profile_line(image, src, dst, linewidth=1, 7 order=None, mode='constant', cval=0.0, 8 *, reduce_func=np.mean): 9 """Return the intensity profile of an image measured along a scan line. 10 11 Parameters 12 ---------- 13 image : numeric array, shape (M, N[, C]) 14 The image, either grayscale (2D array) or multichannel 15 (3D array, where the final axis contains the channel 16 information). 17 src : 2-tuple of numeric scalar (float or int) 18 The start point of the scan line. 19 dst : 2-tuple of numeric scalar (float or int) 20 The end point of the scan line. The destination point is *included* 21 in the profile, in contrast to standard numpy indexing. 22 linewidth : int, optional 23 Width of the scan, perpendicular to the line 24 order : int in {0, 1, 2, 3, 4, 5}, optional 25 The order of the spline interpolation, default is 0 if 26 image.dtype is bool and 1 otherwise. The order has to be in 27 the range 0-5. See `skimage.transform.warp` for detail. 28 mode : {'constant', 'nearest', 'reflect', 'mirror', 'wrap'}, optional 29 How to compute any values falling outside of the image. 30 cval : float, optional 31 If `mode` is 'constant', what constant value to use outside the image. 32 reduce_func : callable, optional 33 Function used to calculate the aggregation of pixel values 34 perpendicular to the profile_line direction when `linewidth` > 1. 35 If set to None the unreduced array will be returned. 36 37 Returns 38 ------- 39 return_value : array 40 The intensity profile along the scan line. The length of the profile 41 is the ceil of the computed length of the scan line. 42 43 Examples 44 -------- 45 >>> x = np.array([[1, 1, 1, 2, 2, 2]]) 46 >>> img = np.vstack([np.zeros_like(x), x, x, x, np.zeros_like(x)]) 47 >>> img 48 array([[0, 0, 0, 0, 0, 0], 49 [1, 1, 1, 2, 2, 2], 50 [1, 1, 1, 2, 2, 2], 51 [1, 1, 1, 2, 2, 2], 52 [0, 0, 0, 0, 0, 0]]) 53 >>> profile_line(img, (2, 1), (2, 4)) 54 array([1., 1., 2., 2.]) 55 >>> profile_line(img, (1, 0), (1, 6), cval=4) 56 array([1., 1., 1., 2., 2., 2., 4.]) 57 58 The destination point is included in the profile, in contrast to 59 standard numpy indexing. 60 For example: 61 62 >>> profile_line(img, (1, 0), (1, 6)) # The final point is out of bounds 63 array([1., 1., 1., 2., 2., 2., 0.]) 64 >>> profile_line(img, (1, 0), (1, 5)) # This accesses the full first row 65 array([1., 1., 1., 2., 2., 2.]) 66 67 For different reduce_func inputs: 68 69 >>> profile_line(img, (1, 0), (1, 3), linewidth=3, reduce_func=np.mean) 70 array([0.66666667, 0.66666667, 0.66666667, 1.33333333]) 71 >>> profile_line(img, (1, 0), (1, 3), linewidth=3, reduce_func=np.max) 72 array([1, 1, 1, 2]) 73 >>> profile_line(img, (1, 0), (1, 3), linewidth=3, reduce_func=np.sum) 74 array([2, 2, 2, 4]) 75 76 The unreduced array will be returned when `reduce_func` is None or when 77 `reduce_func` acts on each pixel value individually. 78 79 >>> profile_line(img, (1, 2), (4, 2), linewidth=3, order=0, 80 ... reduce_func=None) 81 array([[1, 1, 2], 82 [1, 1, 2], 83 [1, 1, 2], 84 [0, 0, 0]]) 85 >>> profile_line(img, (1, 0), (1, 3), linewidth=3, reduce_func=np.sqrt) 86 array([[1. , 1. , 0. ], 87 [1. , 1. , 0. ], 88 [1. , 1. , 0. ], 89 [1.41421356, 1.41421356, 0. ]]) 90 """ 91 if order is None: 92 order = 0 if image.dtype == bool else 1 93 94 if image.dtype == bool and order != 0: 95 warn("Input image dtype is bool. Interpolation is not defined " 96 "with bool data type. Please set order to 0 or explicitely " 97 "cast input image to another data type. Starting from version " 98 "0.19 a ValueError will be raised instead of this warning.", 99 FutureWarning, stacklevel=2) 100 101 perp_lines = _line_profile_coordinates(src, dst, linewidth=linewidth) 102 if image.ndim == 3: 103 pixels = [ndi.map_coordinates(image[..., i], perp_lines, 104 prefilter=order > 1, 105 order=order, mode=mode, 106 cval=cval) for i in 107 range(image.shape[2])] 108 pixels = np.transpose(np.asarray(pixels), (1, 2, 0)) 109 else: 110 pixels = ndi.map_coordinates(image, perp_lines, prefilter=order > 1, 111 order=order, mode=mode, cval=cval) 112 # The outputted array with reduce_func=None gives an array where the 113 # row values (axis=1) are flipped. Here, we make this consistent. 114 pixels = np.flip(pixels, axis=1) 115 116 if reduce_func is None: 117 intensities = pixels 118 else: 119 try: 120 intensities = reduce_func(pixels, axis=1) 121 except TypeError: # function doesn't allow axis kwarg 122 intensities = np.apply_along_axis(reduce_func, arr=pixels, axis=1) 123 124 return intensities 125 126 127 def _line_profile_coordinates(src, dst, linewidth=1): 128 """Return the coordinates of the profile of an image along a scan line. 129 130 Parameters 131 ---------- 132 src : 2-tuple of numeric scalar (float or int) 133 The start point of the scan line. 134 dst : 2-tuple of numeric scalar (float or int) 135 The end point of the scan line. 136 linewidth : int, optional 137 Width of the scan, perpendicular to the line 138 139 Returns 140 ------- 141 coords : array, shape (2, N, C), float 142 The coordinates of the profile along the scan line. The length of the 143 profile is the ceil of the computed length of the scan line. 144 145 Notes 146 ----- 147 This is a utility method meant to be used internally by skimage functions. 148 The destination point is included in the profile, in contrast to 149 standard numpy indexing. 150 """ 151 src_row, src_col = src = np.asarray(src, dtype=float) 152 dst_row, dst_col = dst = np.asarray(dst, dtype=float) 153 d_row, d_col = dst - src 154 theta = np.arctan2(d_row, d_col) 155 156 length = int(np.ceil(np.hypot(d_row, d_col) + 1)) 157 # we add one above because we include the last point in the profile 158 # (in contrast to standard numpy indexing) 159 line_col = np.linspace(src_col, dst_col, length) 160 line_row = np.linspace(src_row, dst_row, length) 161 162 # we subtract 1 from linewidth to change from pixel-counting 163 # (make this line 3 pixels wide) to point distances (the 164 # distance between pixel centers) 165 col_width = (linewidth - 1) * np.sin(-theta) / 2 166 row_width = (linewidth - 1) * np.cos(theta) / 2 167 perp_rows = np.array([np.linspace(row_i - row_width, row_i + row_width, 168 linewidth) for row_i in line_row]) 169 perp_cols = np.array([np.linspace(col_i - col_width, col_i + col_width, 170 linewidth) for col_i in line_col]) 171 return np.array([perp_rows, perp_cols]) 172 [end of skimage/measure/profile.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/skimage/measure/profile.py b/skimage/measure/profile.py --- a/skimage/measure/profile.py +++ b/skimage/measure/profile.py @@ -4,21 +4,22 @@ def profile_line(image, src, dst, linewidth=1, - order=None, mode='constant', cval=0.0, + order=None, mode=None, cval=0.0, *, reduce_func=np.mean): """Return the intensity profile of an image measured along a scan line. Parameters ---------- - image : numeric array, shape (M, N[, C]) + image : ndarray, shape (M, N[, C]) The image, either grayscale (2D array) or multichannel (3D array, where the final axis contains the channel information). - src : 2-tuple of numeric scalar (float or int) - The start point of the scan line. - dst : 2-tuple of numeric scalar (float or int) - The end point of the scan line. The destination point is *included* - in the profile, in contrast to standard numpy indexing. + src : array_like, shape (2, ) + The coordinates of the start point of the scan line. + dst : array_like, shape (2, ) + The coordinates of the end point of the scan + line. The destination point is *included* in the profile, in + contrast to standard numpy indexing. linewidth : int, optional Width of the scan, perpendicular to the line order : int in {0, 1, 2, 3, 4, 5}, optional @@ -97,6 +98,12 @@ "cast input image to another data type. Starting from version " "0.19 a ValueError will be raised instead of this warning.", FutureWarning, stacklevel=2) + if mode is None: + warn("Default out of bounds interpolation mode 'constant' is " + "deprecated. In version 0.19 it will be set to 'reflect'. " + "To avoid this warning, set `mode=` explicitly.", + FutureWarning, stacklevel=2) + mode = 'constant' perp_lines = _line_profile_coordinates(src, dst, linewidth=linewidth) if image.ndim == 3:
{"golden_diff": "diff --git a/skimage/measure/profile.py b/skimage/measure/profile.py\n--- a/skimage/measure/profile.py\n+++ b/skimage/measure/profile.py\n@@ -4,21 +4,22 @@\n \n \n def profile_line(image, src, dst, linewidth=1,\n- order=None, mode='constant', cval=0.0,\n+ order=None, mode=None, cval=0.0,\n *, reduce_func=np.mean):\n \"\"\"Return the intensity profile of an image measured along a scan line.\n \n Parameters\n ----------\n- image : numeric array, shape (M, N[, C])\n+ image : ndarray, shape (M, N[, C])\n The image, either grayscale (2D array) or multichannel\n (3D array, where the final axis contains the channel\n information).\n- src : 2-tuple of numeric scalar (float or int)\n- The start point of the scan line.\n- dst : 2-tuple of numeric scalar (float or int)\n- The end point of the scan line. The destination point is *included*\n- in the profile, in contrast to standard numpy indexing.\n+ src : array_like, shape (2, )\n+ The coordinates of the start point of the scan line.\n+ dst : array_like, shape (2, )\n+ The coordinates of the end point of the scan\n+ line. The destination point is *included* in the profile, in\n+ contrast to standard numpy indexing.\n linewidth : int, optional\n Width of the scan, perpendicular to the line\n order : int in {0, 1, 2, 3, 4, 5}, optional\n@@ -97,6 +98,12 @@\n \"cast input image to another data type. Starting from version \"\n \"0.19 a ValueError will be raised instead of this warning.\",\n FutureWarning, stacklevel=2)\n+ if mode is None:\n+ warn(\"Default out of bounds interpolation mode 'constant' is \"\n+ \"deprecated. In version 0.19 it will be set to 'reflect'. \"\n+ \"To avoid this warning, set `mode=` explicitly.\",\n+ FutureWarning, stacklevel=2)\n+ mode = 'constant'\n \n perp_lines = _line_profile_coordinates(src, dst, linewidth=linewidth)\n if image.ndim == 3:\n", "issue": "Interpolation error in measure.profile_line\n## Description\r\n\r\n#4206, once merged, exposes interesting behavior for [measure.profile_line](https://scikit-image.org/docs/dev/api/skimage.measure.html#skimage.measure.profile_line). Consider\r\n\r\n```python\r\nimage = np.arange(12).reshape(4, 3)\r\n# reduce_func is a new parameter proposed in #4206\r\nprof = profile_line(image, (0, 1), (3, 1), linewidth=3, reduce_func=None)\r\nassert_array_equal(image, prof)\r\n```\r\nAs I understand it, the returned profile should be identical with the original image but we get this (value at (0, 2) is different):\r\n```\r\nAssertionError: \r\nArrays are not equal\r\nMismatch: 8.33%\r\nMax absolute difference: 2\r\nMax relative difference: 0.\r\n x: array([[ 0, 1, 2],\r\n [ 3, 4, 5],\r\n [ 6, 7, 8],\r\n [ 9, 10, 11]])\r\n y: array([[ 0, 1, 0],\r\n [ 3, 4, 5],\r\n [ 6, 7, 8],\r\n [ 9, 10, 11]])\r\n```\r\nThis might originate in [scipy.ndimage.map_coordinates](https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.map_coordinates.html#scipy.ndimage.map_coordinates). Looks like a potential bug to me but might be an edge case for the underlying interpolation that can be ignored/is not reasonable to fix. \r\nThis behavior is already relevant even before merging #4206 because the faulty value will change the mean value of the profile line.\r\n\r\nhttps://github.com/scikit-image/scikit-image/pull/4206#issuecomment-544920806 also highlights additional strange behavior.\r\n\r\n## Version information\r\n\r\nCurrent master, exposed once #4206 is merged.\r\n\n", "before_files": [{"content": "from warnings import warn\nimport numpy as np\nfrom scipy import ndimage as ndi\n\n\ndef profile_line(image, src, dst, linewidth=1,\n order=None, mode='constant', cval=0.0,\n *, reduce_func=np.mean):\n \"\"\"Return the intensity profile of an image measured along a scan line.\n\n Parameters\n ----------\n image : numeric array, shape (M, N[, C])\n The image, either grayscale (2D array) or multichannel\n (3D array, where the final axis contains the channel\n information).\n src : 2-tuple of numeric scalar (float or int)\n The start point of the scan line.\n dst : 2-tuple of numeric scalar (float or int)\n The end point of the scan line. The destination point is *included*\n in the profile, in contrast to standard numpy indexing.\n linewidth : int, optional\n Width of the scan, perpendicular to the line\n order : int in {0, 1, 2, 3, 4, 5}, optional\n The order of the spline interpolation, default is 0 if\n image.dtype is bool and 1 otherwise. The order has to be in\n the range 0-5. See `skimage.transform.warp` for detail.\n mode : {'constant', 'nearest', 'reflect', 'mirror', 'wrap'}, optional\n How to compute any values falling outside of the image.\n cval : float, optional\n If `mode` is 'constant', what constant value to use outside the image.\n reduce_func : callable, optional\n Function used to calculate the aggregation of pixel values\n perpendicular to the profile_line direction when `linewidth` > 1.\n If set to None the unreduced array will be returned.\n\n Returns\n -------\n return_value : array\n The intensity profile along the scan line. The length of the profile\n is the ceil of the computed length of the scan line.\n\n Examples\n --------\n >>> x = np.array([[1, 1, 1, 2, 2, 2]])\n >>> img = np.vstack([np.zeros_like(x), x, x, x, np.zeros_like(x)])\n >>> img\n array([[0, 0, 0, 0, 0, 0],\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [1, 1, 1, 2, 2, 2],\n [0, 0, 0, 0, 0, 0]])\n >>> profile_line(img, (2, 1), (2, 4))\n array([1., 1., 2., 2.])\n >>> profile_line(img, (1, 0), (1, 6), cval=4)\n array([1., 1., 1., 2., 2., 2., 4.])\n\n The destination point is included in the profile, in contrast to\n standard numpy indexing.\n For example:\n\n >>> profile_line(img, (1, 0), (1, 6)) # The final point is out of bounds\n array([1., 1., 1., 2., 2., 2., 0.])\n >>> profile_line(img, (1, 0), (1, 5)) # This accesses the full first row\n array([1., 1., 1., 2., 2., 2.])\n\n For different reduce_func inputs:\n\n >>> profile_line(img, (1, 0), (1, 3), linewidth=3, reduce_func=np.mean)\n array([0.66666667, 0.66666667, 0.66666667, 1.33333333])\n >>> profile_line(img, (1, 0), (1, 3), linewidth=3, reduce_func=np.max)\n array([1, 1, 1, 2])\n >>> profile_line(img, (1, 0), (1, 3), linewidth=3, reduce_func=np.sum)\n array([2, 2, 2, 4])\n\n The unreduced array will be returned when `reduce_func` is None or when\n `reduce_func` acts on each pixel value individually.\n\n >>> profile_line(img, (1, 2), (4, 2), linewidth=3, order=0,\n ... reduce_func=None)\n array([[1, 1, 2],\n [1, 1, 2],\n [1, 1, 2],\n [0, 0, 0]])\n >>> profile_line(img, (1, 0), (1, 3), linewidth=3, reduce_func=np.sqrt)\n array([[1. , 1. , 0. ],\n [1. , 1. , 0. ],\n [1. , 1. , 0. ],\n [1.41421356, 1.41421356, 0. ]])\n \"\"\"\n if order is None:\n order = 0 if image.dtype == bool else 1\n\n if image.dtype == bool and order != 0:\n warn(\"Input image dtype is bool. Interpolation is not defined \"\n \"with bool data type. Please set order to 0 or explicitely \"\n \"cast input image to another data type. Starting from version \"\n \"0.19 a ValueError will be raised instead of this warning.\",\n FutureWarning, stacklevel=2)\n\n perp_lines = _line_profile_coordinates(src, dst, linewidth=linewidth)\n if image.ndim == 3:\n pixels = [ndi.map_coordinates(image[..., i], perp_lines,\n prefilter=order > 1,\n order=order, mode=mode,\n cval=cval) for i in\n range(image.shape[2])]\n pixels = np.transpose(np.asarray(pixels), (1, 2, 0))\n else:\n pixels = ndi.map_coordinates(image, perp_lines, prefilter=order > 1,\n order=order, mode=mode, cval=cval)\n # The outputted array with reduce_func=None gives an array where the\n # row values (axis=1) are flipped. Here, we make this consistent.\n pixels = np.flip(pixels, axis=1)\n\n if reduce_func is None:\n intensities = pixels\n else:\n try:\n intensities = reduce_func(pixels, axis=1)\n except TypeError: # function doesn't allow axis kwarg\n intensities = np.apply_along_axis(reduce_func, arr=pixels, axis=1)\n\n return intensities\n\n\ndef _line_profile_coordinates(src, dst, linewidth=1):\n \"\"\"Return the coordinates of the profile of an image along a scan line.\n\n Parameters\n ----------\n src : 2-tuple of numeric scalar (float or int)\n The start point of the scan line.\n dst : 2-tuple of numeric scalar (float or int)\n The end point of the scan line.\n linewidth : int, optional\n Width of the scan, perpendicular to the line\n\n Returns\n -------\n coords : array, shape (2, N, C), float\n The coordinates of the profile along the scan line. The length of the\n profile is the ceil of the computed length of the scan line.\n\n Notes\n -----\n This is a utility method meant to be used internally by skimage functions.\n The destination point is included in the profile, in contrast to\n standard numpy indexing.\n \"\"\"\n src_row, src_col = src = np.asarray(src, dtype=float)\n dst_row, dst_col = dst = np.asarray(dst, dtype=float)\n d_row, d_col = dst - src\n theta = np.arctan2(d_row, d_col)\n\n length = int(np.ceil(np.hypot(d_row, d_col) + 1))\n # we add one above because we include the last point in the profile\n # (in contrast to standard numpy indexing)\n line_col = np.linspace(src_col, dst_col, length)\n line_row = np.linspace(src_row, dst_row, length)\n\n # we subtract 1 from linewidth to change from pixel-counting\n # (make this line 3 pixels wide) to point distances (the\n # distance between pixel centers)\n col_width = (linewidth - 1) * np.sin(-theta) / 2\n row_width = (linewidth - 1) * np.cos(theta) / 2\n perp_rows = np.array([np.linspace(row_i - row_width, row_i + row_width,\n linewidth) for row_i in line_row])\n perp_cols = np.array([np.linspace(col_i - col_width, col_i + col_width,\n linewidth) for col_i in line_col])\n return np.array([perp_rows, perp_cols])\n", "path": "skimage/measure/profile.py"}]}
3,400
528
gh_patches_debug_25538
rasdani/github-patches
git_diff
mathesar-foundation__mathesar-2331
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Fix API error response when filtering on an invalid date ## Steps to reproduce 1. Send a request like ```http GET http://localhost:8000/api/db/v0/tables/1/records/?limit=500&offset=0&filter={"equal":[{"column_id":[6]},{"literal":["2"]}]} ``` Here, column with id `6` is a date column, and the value `"2"` is a partially-entered date that should be considered invalid. 1. Expect an error response conforming to our [error standards](https://wiki.mathesar.org/en/engineering/standards/api#errors) 1. Instead, observe an HTTP 500 Django error. </issue> <code> [start of mathesar/api/db/viewsets/records.py] 1 from psycopg2.errors import ForeignKeyViolation 2 from rest_access_policy import AccessViewSetMixin 3 from rest_framework import status, viewsets 4 from rest_framework.exceptions import NotFound 5 from rest_framework.renderers import BrowsableAPIRenderer 6 from rest_framework.response import Response 7 from sqlalchemy.exc import IntegrityError 8 9 from mathesar.api.db.permissions.records import RecordAccessPolicy 10 from mathesar.api.exceptions.error_codes import ErrorCodes 11 import mathesar.api.exceptions.database_exceptions.exceptions as database_api_exceptions 12 import mathesar.api.exceptions.generic_exceptions.base_exceptions as generic_api_exceptions 13 from db.functions.exceptions import ( 14 BadDBFunctionFormat, ReferencedColumnsDontExist, UnknownDBFunctionID, 15 ) 16 from db.records.exceptions import ( 17 BadGroupFormat, GroupFieldNotFound, InvalidGroupType, UndefinedFunction, 18 BadSortFormat, SortFieldNotFound 19 ) 20 from mathesar.api.pagination import TableLimitOffsetPagination 21 from mathesar.api.serializers.records import RecordListParameterSerializer, RecordSerializer 22 from mathesar.api.utils import get_table_or_404 23 from mathesar.functions.operations.convert import rewrite_db_function_spec_column_ids_to_names 24 from mathesar.models.base import Table 25 from mathesar.utils.json import MathesarJSONRenderer 26 27 28 class RecordViewSet(AccessViewSetMixin, viewsets.ViewSet): 29 access_policy = RecordAccessPolicy 30 31 # There is no 'update' method. 32 # We're not supporting PUT requests because there aren't a lot of use cases 33 # where the entire record needs to be replaced, PATCH suffices for updates. 34 def get_queryset(self): 35 return Table.objects.all().order_by('-created_at') 36 37 renderer_classes = [MathesarJSONRenderer, BrowsableAPIRenderer] 38 39 # For filter parameter formatting, see: 40 # db/functions/operations/deserialize.py::get_db_function_from_ma_function_spec function doc> 41 # For sorting parameter formatting, see: 42 # https://github.com/centerofci/sqlalchemy-filters#sort-format 43 def list(self, request, table_pk=None): 44 paginator = TableLimitOffsetPagination() 45 46 serializer = RecordListParameterSerializer(data=request.GET) 47 serializer.is_valid(raise_exception=True) 48 table = get_table_or_404(table_pk) 49 50 filter_unprocessed = serializer.validated_data['filter'] 51 order_by = serializer.validated_data['order_by'] 52 grouping = serializer.validated_data['grouping'] 53 search_fuzzy = serializer.validated_data['search_fuzzy'] 54 filter_processed = None 55 column_names_to_ids = table.get_column_name_id_bidirectional_map() 56 column_ids_to_names = column_names_to_ids.inverse 57 if filter_unprocessed: 58 filter_processed = rewrite_db_function_spec_column_ids_to_names( 59 column_ids_to_names=column_ids_to_names, 60 spec=filter_unprocessed, 61 ) 62 # Replace column id value used in the `field` property with column name 63 name_converted_group_by = None 64 if grouping: 65 group_by_columns_names = [column_ids_to_names[column_id] for column_id in grouping['columns']] 66 name_converted_group_by = {**grouping, 'columns': group_by_columns_names} 67 name_converted_order_by = [{**column, 'field': column_ids_to_names[column['field']]} for column in order_by] 68 name_converted_search = [{**column, 'column': column_ids_to_names[column['field']]} for column in search_fuzzy] 69 70 try: 71 72 records = paginator.paginate_queryset( 73 self.get_queryset(), request, table, column_names_to_ids, 74 filters=filter_processed, 75 order_by=name_converted_order_by, 76 grouping=name_converted_group_by, 77 search=name_converted_search, 78 duplicate_only=serializer.validated_data['duplicate_only'] 79 ) 80 except (BadDBFunctionFormat, UnknownDBFunctionID, ReferencedColumnsDontExist) as e: 81 raise database_api_exceptions.BadFilterAPIException( 82 e, 83 field='filters', 84 status_code=status.HTTP_400_BAD_REQUEST 85 ) 86 except (BadSortFormat, SortFieldNotFound) as e: 87 raise database_api_exceptions.BadSortAPIException( 88 e, 89 field='order_by', 90 status_code=status.HTTP_400_BAD_REQUEST 91 ) 92 except (BadGroupFormat, GroupFieldNotFound, InvalidGroupType) as e: 93 raise database_api_exceptions.BadGroupAPIException( 94 e, 95 field='grouping', 96 status_code=status.HTTP_400_BAD_REQUEST 97 ) 98 except UndefinedFunction as e: 99 raise database_api_exceptions.UndefinedFunctionAPIException( 100 e, 101 details=e.args[0], 102 status_code=status.HTTP_400_BAD_REQUEST 103 ) 104 serializer = RecordSerializer( 105 records, 106 many=True, 107 context=self.get_serializer_context(table) 108 ) 109 return paginator.get_paginated_response(serializer.data) 110 111 def retrieve(self, request, pk=None, table_pk=None): 112 table = get_table_or_404(table_pk) 113 # TODO refactor to use serializer for more DRY response logic 114 paginator = TableLimitOffsetPagination() 115 record_filters = { 116 "equal": [ 117 {"column_name": [table.primary_key_column_name]}, 118 {"literal": [pk]} 119 ] 120 } 121 column_names_to_ids = table.get_column_name_id_bidirectional_map() 122 records = paginator.paginate_queryset( 123 table, 124 request, 125 table, 126 column_names_to_ids, 127 filters=record_filters 128 ) 129 if not records: 130 raise NotFound 131 serializer = RecordSerializer( 132 records, 133 many=True, 134 context=self.get_serializer_context(table) 135 ) 136 return paginator.get_paginated_response(serializer.data) 137 138 def create(self, request, table_pk=None): 139 table = get_table_or_404(table_pk) 140 serializer = RecordSerializer(data=request.data, context=self.get_serializer_context(table)) 141 serializer.is_valid(raise_exception=True) 142 serializer.save() 143 # TODO refactor to use serializer for more DRY response logic 144 column_name_id_map = table.get_column_name_id_bidirectional_map() 145 table_pk_column_id = column_name_id_map[table.primary_key_column_name] 146 pk_value = serializer.data[table_pk_column_id] 147 paginator = TableLimitOffsetPagination() 148 record_filters = { 149 "equal": [ 150 {"column_name": [table.primary_key_column_name]}, 151 {"literal": [pk_value]} 152 ] 153 } 154 column_names_to_ids = table.get_column_name_id_bidirectional_map() 155 records = paginator.paginate_queryset( 156 table, 157 request, 158 table, 159 column_names_to_ids, 160 filters=record_filters 161 ) 162 serializer = RecordSerializer( 163 records, 164 many=True, 165 context=self.get_serializer_context(table) 166 ) 167 response = paginator.get_paginated_response(serializer.data) 168 response.status_code = status.HTTP_201_CREATED 169 return response 170 171 def partial_update(self, request, pk=None, table_pk=None): 172 table = get_table_or_404(table_pk) 173 serializer = RecordSerializer( 174 {'id': pk}, 175 data=request.data, 176 context=self.get_serializer_context(table), 177 partial=True 178 ) 179 serializer.is_valid(raise_exception=True) 180 serializer.save() 181 # TODO refactor to use serializer for more DRY response logic 182 paginator = TableLimitOffsetPagination() 183 record_filters = { 184 "equal": [ 185 {"column_name": [table.primary_key_column_name]}, 186 {"literal": [pk]} 187 ] 188 } 189 column_names_to_ids = table.get_column_name_id_bidirectional_map() 190 records = paginator.paginate_queryset( 191 table, 192 request, 193 table, 194 column_names_to_ids, 195 filters=record_filters 196 ) 197 serializer = RecordSerializer( 198 records, 199 many=True, 200 context=self.get_serializer_context(table) 201 ) 202 return paginator.get_paginated_response(serializer.data) 203 204 def destroy(self, request, pk=None, table_pk=None): 205 table = get_table_or_404(table_pk) 206 if table.get_record(pk) is None: 207 raise generic_api_exceptions.NotFoundAPIException( 208 NotFound, 209 error_code=ErrorCodes.RecordNotFound.value, 210 message="Record doesn't exist" 211 ) 212 try: 213 table.delete_record(pk) 214 except IntegrityError as e: 215 if isinstance(e.orig, ForeignKeyViolation): 216 raise database_api_exceptions.ForeignKeyViolationAPIException( 217 e, 218 status_code=status.HTTP_400_BAD_REQUEST, 219 referent_table=table, 220 ) 221 222 return Response(status=status.HTTP_204_NO_CONTENT) 223 224 def get_serializer_context(self, table): 225 columns_map = table.get_column_name_id_bidirectional_map() 226 context = {'columns_map': columns_map, 'table': table} 227 return context 228 [end of mathesar/api/db/viewsets/records.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mathesar/api/db/viewsets/records.py b/mathesar/api/db/viewsets/records.py --- a/mathesar/api/db/viewsets/records.py +++ b/mathesar/api/db/viewsets/records.py @@ -1,10 +1,10 @@ -from psycopg2.errors import ForeignKeyViolation +from psycopg2.errors import ForeignKeyViolation, InvalidDatetimeFormat from rest_access_policy import AccessViewSetMixin from rest_framework import status, viewsets from rest_framework.exceptions import NotFound from rest_framework.renderers import BrowsableAPIRenderer from rest_framework.response import Response -from sqlalchemy.exc import IntegrityError +from sqlalchemy.exc import IntegrityError, DataError from mathesar.api.db.permissions.records import RecordAccessPolicy from mathesar.api.exceptions.error_codes import ErrorCodes @@ -101,6 +101,13 @@ details=e.args[0], status_code=status.HTTP_400_BAD_REQUEST ) + except DataError as e: + if isinstance(e.orig, InvalidDatetimeFormat): + raise database_api_exceptions.InvalidDateFormatAPIException( + e, + status_code=status.HTTP_400_BAD_REQUEST, + ) + serializer = RecordSerializer( records, many=True,
{"golden_diff": "diff --git a/mathesar/api/db/viewsets/records.py b/mathesar/api/db/viewsets/records.py\n--- a/mathesar/api/db/viewsets/records.py\n+++ b/mathesar/api/db/viewsets/records.py\n@@ -1,10 +1,10 @@\n-from psycopg2.errors import ForeignKeyViolation\n+from psycopg2.errors import ForeignKeyViolation, InvalidDatetimeFormat\n from rest_access_policy import AccessViewSetMixin\n from rest_framework import status, viewsets\n from rest_framework.exceptions import NotFound\n from rest_framework.renderers import BrowsableAPIRenderer\n from rest_framework.response import Response\n-from sqlalchemy.exc import IntegrityError\n+from sqlalchemy.exc import IntegrityError, DataError\n \n from mathesar.api.db.permissions.records import RecordAccessPolicy\n from mathesar.api.exceptions.error_codes import ErrorCodes\n@@ -101,6 +101,13 @@\n details=e.args[0],\n status_code=status.HTTP_400_BAD_REQUEST\n )\n+ except DataError as e:\n+ if isinstance(e.orig, InvalidDatetimeFormat):\n+ raise database_api_exceptions.InvalidDateFormatAPIException(\n+ e,\n+ status_code=status.HTTP_400_BAD_REQUEST,\n+ )\n+\n serializer = RecordSerializer(\n records,\n many=True,\n", "issue": "Fix API error response when filtering on an invalid date\n## Steps to reproduce\n\n1. Send a request like\n\n ```http\n GET http://localhost:8000/api/db/v0/tables/1/records/?limit=500&offset=0&filter={\"equal\":[{\"column_id\":[6]},{\"literal\":[\"2\"]}]}\n ```\n\n Here, column with id `6` is a date column, and the value `\"2\"` is a partially-entered date that should be considered invalid.\n\n1. Expect an error response conforming to our [error standards](https://wiki.mathesar.org/en/engineering/standards/api#errors)\n\n1. Instead, observe an HTTP 500 Django error.\n\n", "before_files": [{"content": "from psycopg2.errors import ForeignKeyViolation\nfrom rest_access_policy import AccessViewSetMixin\nfrom rest_framework import status, viewsets\nfrom rest_framework.exceptions import NotFound\nfrom rest_framework.renderers import BrowsableAPIRenderer\nfrom rest_framework.response import Response\nfrom sqlalchemy.exc import IntegrityError\n\nfrom mathesar.api.db.permissions.records import RecordAccessPolicy\nfrom mathesar.api.exceptions.error_codes import ErrorCodes\nimport mathesar.api.exceptions.database_exceptions.exceptions as database_api_exceptions\nimport mathesar.api.exceptions.generic_exceptions.base_exceptions as generic_api_exceptions\nfrom db.functions.exceptions import (\n BadDBFunctionFormat, ReferencedColumnsDontExist, UnknownDBFunctionID,\n)\nfrom db.records.exceptions import (\n BadGroupFormat, GroupFieldNotFound, InvalidGroupType, UndefinedFunction,\n BadSortFormat, SortFieldNotFound\n)\nfrom mathesar.api.pagination import TableLimitOffsetPagination\nfrom mathesar.api.serializers.records import RecordListParameterSerializer, RecordSerializer\nfrom mathesar.api.utils import get_table_or_404\nfrom mathesar.functions.operations.convert import rewrite_db_function_spec_column_ids_to_names\nfrom mathesar.models.base import Table\nfrom mathesar.utils.json import MathesarJSONRenderer\n\n\nclass RecordViewSet(AccessViewSetMixin, viewsets.ViewSet):\n access_policy = RecordAccessPolicy\n\n # There is no 'update' method.\n # We're not supporting PUT requests because there aren't a lot of use cases\n # where the entire record needs to be replaced, PATCH suffices for updates.\n def get_queryset(self):\n return Table.objects.all().order_by('-created_at')\n\n renderer_classes = [MathesarJSONRenderer, BrowsableAPIRenderer]\n\n # For filter parameter formatting, see:\n # db/functions/operations/deserialize.py::get_db_function_from_ma_function_spec function doc>\n # For sorting parameter formatting, see:\n # https://github.com/centerofci/sqlalchemy-filters#sort-format\n def list(self, request, table_pk=None):\n paginator = TableLimitOffsetPagination()\n\n serializer = RecordListParameterSerializer(data=request.GET)\n serializer.is_valid(raise_exception=True)\n table = get_table_or_404(table_pk)\n\n filter_unprocessed = serializer.validated_data['filter']\n order_by = serializer.validated_data['order_by']\n grouping = serializer.validated_data['grouping']\n search_fuzzy = serializer.validated_data['search_fuzzy']\n filter_processed = None\n column_names_to_ids = table.get_column_name_id_bidirectional_map()\n column_ids_to_names = column_names_to_ids.inverse\n if filter_unprocessed:\n filter_processed = rewrite_db_function_spec_column_ids_to_names(\n column_ids_to_names=column_ids_to_names,\n spec=filter_unprocessed,\n )\n # Replace column id value used in the `field` property with column name\n name_converted_group_by = None\n if grouping:\n group_by_columns_names = [column_ids_to_names[column_id] for column_id in grouping['columns']]\n name_converted_group_by = {**grouping, 'columns': group_by_columns_names}\n name_converted_order_by = [{**column, 'field': column_ids_to_names[column['field']]} for column in order_by]\n name_converted_search = [{**column, 'column': column_ids_to_names[column['field']]} for column in search_fuzzy]\n\n try:\n\n records = paginator.paginate_queryset(\n self.get_queryset(), request, table, column_names_to_ids,\n filters=filter_processed,\n order_by=name_converted_order_by,\n grouping=name_converted_group_by,\n search=name_converted_search,\n duplicate_only=serializer.validated_data['duplicate_only']\n )\n except (BadDBFunctionFormat, UnknownDBFunctionID, ReferencedColumnsDontExist) as e:\n raise database_api_exceptions.BadFilterAPIException(\n e,\n field='filters',\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except (BadSortFormat, SortFieldNotFound) as e:\n raise database_api_exceptions.BadSortAPIException(\n e,\n field='order_by',\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except (BadGroupFormat, GroupFieldNotFound, InvalidGroupType) as e:\n raise database_api_exceptions.BadGroupAPIException(\n e,\n field='grouping',\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except UndefinedFunction as e:\n raise database_api_exceptions.UndefinedFunctionAPIException(\n e,\n details=e.args[0],\n status_code=status.HTTP_400_BAD_REQUEST\n )\n serializer = RecordSerializer(\n records,\n many=True,\n context=self.get_serializer_context(table)\n )\n return paginator.get_paginated_response(serializer.data)\n\n def retrieve(self, request, pk=None, table_pk=None):\n table = get_table_or_404(table_pk)\n # TODO refactor to use serializer for more DRY response logic\n paginator = TableLimitOffsetPagination()\n record_filters = {\n \"equal\": [\n {\"column_name\": [table.primary_key_column_name]},\n {\"literal\": [pk]}\n ]\n }\n column_names_to_ids = table.get_column_name_id_bidirectional_map()\n records = paginator.paginate_queryset(\n table,\n request,\n table,\n column_names_to_ids,\n filters=record_filters\n )\n if not records:\n raise NotFound\n serializer = RecordSerializer(\n records,\n many=True,\n context=self.get_serializer_context(table)\n )\n return paginator.get_paginated_response(serializer.data)\n\n def create(self, request, table_pk=None):\n table = get_table_or_404(table_pk)\n serializer = RecordSerializer(data=request.data, context=self.get_serializer_context(table))\n serializer.is_valid(raise_exception=True)\n serializer.save()\n # TODO refactor to use serializer for more DRY response logic\n column_name_id_map = table.get_column_name_id_bidirectional_map()\n table_pk_column_id = column_name_id_map[table.primary_key_column_name]\n pk_value = serializer.data[table_pk_column_id]\n paginator = TableLimitOffsetPagination()\n record_filters = {\n \"equal\": [\n {\"column_name\": [table.primary_key_column_name]},\n {\"literal\": [pk_value]}\n ]\n }\n column_names_to_ids = table.get_column_name_id_bidirectional_map()\n records = paginator.paginate_queryset(\n table,\n request,\n table,\n column_names_to_ids,\n filters=record_filters\n )\n serializer = RecordSerializer(\n records,\n many=True,\n context=self.get_serializer_context(table)\n )\n response = paginator.get_paginated_response(serializer.data)\n response.status_code = status.HTTP_201_CREATED\n return response\n\n def partial_update(self, request, pk=None, table_pk=None):\n table = get_table_or_404(table_pk)\n serializer = RecordSerializer(\n {'id': pk},\n data=request.data,\n context=self.get_serializer_context(table),\n partial=True\n )\n serializer.is_valid(raise_exception=True)\n serializer.save()\n # TODO refactor to use serializer for more DRY response logic\n paginator = TableLimitOffsetPagination()\n record_filters = {\n \"equal\": [\n {\"column_name\": [table.primary_key_column_name]},\n {\"literal\": [pk]}\n ]\n }\n column_names_to_ids = table.get_column_name_id_bidirectional_map()\n records = paginator.paginate_queryset(\n table,\n request,\n table,\n column_names_to_ids,\n filters=record_filters\n )\n serializer = RecordSerializer(\n records,\n many=True,\n context=self.get_serializer_context(table)\n )\n return paginator.get_paginated_response(serializer.data)\n\n def destroy(self, request, pk=None, table_pk=None):\n table = get_table_or_404(table_pk)\n if table.get_record(pk) is None:\n raise generic_api_exceptions.NotFoundAPIException(\n NotFound,\n error_code=ErrorCodes.RecordNotFound.value,\n message=\"Record doesn't exist\"\n )\n try:\n table.delete_record(pk)\n except IntegrityError as e:\n if isinstance(e.orig, ForeignKeyViolation):\n raise database_api_exceptions.ForeignKeyViolationAPIException(\n e,\n status_code=status.HTTP_400_BAD_REQUEST,\n referent_table=table,\n )\n\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n def get_serializer_context(self, table):\n columns_map = table.get_column_name_id_bidirectional_map()\n context = {'columns_map': columns_map, 'table': table}\n return context\n", "path": "mathesar/api/db/viewsets/records.py"}]}
3,091
265
gh_patches_debug_23967
rasdani/github-patches
git_diff
pwr-Solaar__Solaar-1856
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add git information in about and console **Information** Installed version of solaar:: 1.1.7+git1897-9c361c4~ubuntu22.10.1 Distribution: Ubuntu 22.10 + https://launchpad.net/~solaar-unifying/+archive/ubuntu/ppa (git) Kernel version: Linux 6.1.0-060100rc5-generic x86_64 GNU/Linux Example: a) ![image](https://user-images.githubusercontent.com/48624942/202720677-1d1257ab-3113-429b-b29c-78765161e1c4.png) b) $ solaar show Solaar version 1.1.7-9c361c4 [...] </issue> <code> [start of setup.py] 1 #!/usr/bin/env python3 2 3 from glob import glob as _glob 4 5 try: 6 from setuptools import setup 7 except ImportError: 8 from distutils.core import setup 9 10 main_ns = {} 11 with open('lib/solaar/__init__.py') as ver_file: 12 exec(ver_file.read(), main_ns) 13 NAME = main_ns['NAME'] 14 __version__ = main_ns['__version__'] 15 16 17 def _data_files(): 18 from os.path import dirname as _dirname 19 20 yield 'share/solaar/icons', _glob('share/solaar/icons/solaar*.svg') 21 yield 'share/solaar/icons', _glob('share/solaar/icons/light_*.png') 22 yield 'share/icons/hicolor/scalable/apps', ['share/solaar/icons/solaar.svg'] 23 24 for mo in _glob('share/locale/*/LC_MESSAGES/solaar.mo'): 25 yield _dirname(mo), [mo] 26 27 yield 'share/applications', ['share/applications/solaar.desktop'] 28 yield 'share/solaar/udev-rules.d', ['rules.d/42-logitech-unify-permissions.rules'] 29 yield 'share/metainfo', ['share/solaar/io.github.pwr_solaar.solaar.metainfo.xml'] 30 31 del _dirname 32 33 34 setup( 35 name=NAME.lower(), 36 version=__version__, 37 description='Linux device manager for Logitech receivers, keyboards, mice, and tablets.', 38 long_description=''' 39 Solaar is a Linux device manager for many Logitech peripherals that connect through 40 Unifying and other receivers or via USB or Bluetooth. 41 Solaar is able to pair/unpair devices with receivers and show and modify some of the 42 modifiable features of devices. 43 For instructions on installing Solaar see https://pwr-solaar.github.io/Solaar/installation'''.strip(), 44 author='Daniel Pavel', 45 license='GPLv2', 46 url='http://pwr-solaar.github.io/Solaar/', 47 classifiers=[ 48 'Development Status :: 4 - Beta', 49 'Environment :: X11 Applications :: GTK', 50 'Environment :: Console', 51 'Intended Audience :: End Users/Desktop', 52 'License :: DFSG approved', 53 'License :: OSI Approved :: GNU General Public License v2 (GPLv2)', 54 'Natural Language :: English', 55 'Programming Language :: Python :: 3 :: Only', 56 'Operating System :: POSIX :: Linux', 57 'Topic :: Utilities', 58 ], 59 platforms=['linux'], 60 61 # sudo apt install python-gi python3-gi \ 62 # gir1.2-gtk-3.0 gir1.2-notify-0.7 gir1.2-ayatanaappindicator3-0.1 63 # os_requires=['gi.repository.GObject (>= 2.0)', 'gi.repository.Gtk (>= 3.0)'], 64 python_requires='>=3.7', 65 install_requires=[ 66 'evdev (>= 1.1.2)', 67 'pyudev (>= 0.13)', 68 'PyYAML (>= 3.12)', 69 'python-xlib (>= 0.27)', 70 'psutil (>= 5.4.3)', 71 ], 72 extras_require={ 73 'report-descriptor': ['hid-parser'], 74 'desktop-notifications': ['Notify (>= 0.7)'], 75 }, 76 package_dir={'': 'lib'}, 77 packages=['keysyms', 'hidapi', 'logitech_receiver', 'solaar', 'solaar.ui', 'solaar.cli'], 78 data_files=list(_data_files()), 79 scripts=_glob('bin/*'), 80 ) 81 [end of setup.py] [start of lib/solaar/__init__.py] 1 # -*- python-mode -*- 2 3 ## Copyright (C) 2012-2013 Daniel Pavel 4 ## 5 ## This program is free software; you can redistribute it and/or modify 6 ## it under the terms of the GNU General Public License as published by 7 ## the Free Software Foundation; either version 2 of the License, or 8 ## (at your option) any later version. 9 ## 10 ## This program is distributed in the hope that it will be useful, 11 ## but WITHOUT ANY WARRANTY; without even the implied warranty of 12 ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 ## GNU General Public License for more details. 14 ## 15 ## You should have received a copy of the GNU General Public License along 16 ## with this program; if not, write to the Free Software Foundation, Inc., 17 ## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 18 19 __version__ = '1.1.7' 20 NAME = 'Solaar' 21 [end of lib/solaar/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lib/solaar/__init__.py b/lib/solaar/__init__.py --- a/lib/solaar/__init__.py +++ b/lib/solaar/__init__.py @@ -16,5 +16,11 @@ ## with this program; if not, write to the Free Software Foundation, Inc., ## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. -__version__ = '1.1.7' +__version_short__ = '1.1.7' NAME = 'Solaar' + +try: + import gitinfo + __version__ = __version_short__ + '-' + gitinfo.get_git_info()['commit'][:8] +except Exception: + __version__ = __version_short__ diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -11,7 +11,7 @@ with open('lib/solaar/__init__.py') as ver_file: exec(ver_file.read(), main_ns) NAME = main_ns['NAME'] -__version__ = main_ns['__version__'] +__version__ = main_ns['__version_short__'] def _data_files(): @@ -72,6 +72,7 @@ extras_require={ 'report-descriptor': ['hid-parser'], 'desktop-notifications': ['Notify (>= 0.7)'], + 'git-commit': ['python-git-info'], }, package_dir={'': 'lib'}, packages=['keysyms', 'hidapi', 'logitech_receiver', 'solaar', 'solaar.ui', 'solaar.cli'],
{"golden_diff": "diff --git a/lib/solaar/__init__.py b/lib/solaar/__init__.py\n--- a/lib/solaar/__init__.py\n+++ b/lib/solaar/__init__.py\n@@ -16,5 +16,11 @@\n ## with this program; if not, write to the Free Software Foundation, Inc.,\n ## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n \n-__version__ = '1.1.7'\n+__version_short__ = '1.1.7'\n NAME = 'Solaar'\n+\n+try:\n+ import gitinfo\n+ __version__ = __version_short__ + '-' + gitinfo.get_git_info()['commit'][:8]\n+except Exception:\n+ __version__ = __version_short__\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -11,7 +11,7 @@\n with open('lib/solaar/__init__.py') as ver_file:\n exec(ver_file.read(), main_ns)\n NAME = main_ns['NAME']\n-__version__ = main_ns['__version__']\n+__version__ = main_ns['__version_short__']\n \n \n def _data_files():\n@@ -72,6 +72,7 @@\n extras_require={\n 'report-descriptor': ['hid-parser'],\n 'desktop-notifications': ['Notify (>= 0.7)'],\n+ 'git-commit': ['python-git-info'],\n },\n package_dir={'': 'lib'},\n packages=['keysyms', 'hidapi', 'logitech_receiver', 'solaar', 'solaar.ui', 'solaar.cli'],\n", "issue": "Add git information in about and console\n**Information**\r\nInstalled version of solaar:: 1.1.7+git1897-9c361c4~ubuntu22.10.1\r\nDistribution: Ubuntu 22.10 + https://launchpad.net/~solaar-unifying/+archive/ubuntu/ppa (git)\r\nKernel version: Linux 6.1.0-060100rc5-generic x86_64 GNU/Linux\r\n\r\nExample:\r\na)\r\n![image](https://user-images.githubusercontent.com/48624942/202720677-1d1257ab-3113-429b-b29c-78765161e1c4.png)\r\n\r\nb)\r\n$ solaar show\r\nSolaar version 1.1.7-9c361c4\r\n\r\n[...]\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nfrom glob import glob as _glob\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\nmain_ns = {}\nwith open('lib/solaar/__init__.py') as ver_file:\n exec(ver_file.read(), main_ns)\nNAME = main_ns['NAME']\n__version__ = main_ns['__version__']\n\n\ndef _data_files():\n from os.path import dirname as _dirname\n\n yield 'share/solaar/icons', _glob('share/solaar/icons/solaar*.svg')\n yield 'share/solaar/icons', _glob('share/solaar/icons/light_*.png')\n yield 'share/icons/hicolor/scalable/apps', ['share/solaar/icons/solaar.svg']\n\n for mo in _glob('share/locale/*/LC_MESSAGES/solaar.mo'):\n yield _dirname(mo), [mo]\n\n yield 'share/applications', ['share/applications/solaar.desktop']\n yield 'share/solaar/udev-rules.d', ['rules.d/42-logitech-unify-permissions.rules']\n yield 'share/metainfo', ['share/solaar/io.github.pwr_solaar.solaar.metainfo.xml']\n\n del _dirname\n\n\nsetup(\n name=NAME.lower(),\n version=__version__,\n description='Linux device manager for Logitech receivers, keyboards, mice, and tablets.',\n long_description='''\nSolaar is a Linux device manager for many Logitech peripherals that connect through\nUnifying and other receivers or via USB or Bluetooth.\nSolaar is able to pair/unpair devices with receivers and show and modify some of the\nmodifiable features of devices.\nFor instructions on installing Solaar see https://pwr-solaar.github.io/Solaar/installation'''.strip(),\n author='Daniel Pavel',\n license='GPLv2',\n url='http://pwr-solaar.github.io/Solaar/',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: X11 Applications :: GTK',\n 'Environment :: Console',\n 'Intended Audience :: End Users/Desktop',\n 'License :: DFSG approved',\n 'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 3 :: Only',\n 'Operating System :: POSIX :: Linux',\n 'Topic :: Utilities',\n ],\n platforms=['linux'],\n\n # sudo apt install python-gi python3-gi \\\n # gir1.2-gtk-3.0 gir1.2-notify-0.7 gir1.2-ayatanaappindicator3-0.1\n # os_requires=['gi.repository.GObject (>= 2.0)', 'gi.repository.Gtk (>= 3.0)'],\n python_requires='>=3.7',\n install_requires=[\n 'evdev (>= 1.1.2)',\n 'pyudev (>= 0.13)',\n 'PyYAML (>= 3.12)',\n 'python-xlib (>= 0.27)',\n 'psutil (>= 5.4.3)',\n ],\n extras_require={\n 'report-descriptor': ['hid-parser'],\n 'desktop-notifications': ['Notify (>= 0.7)'],\n },\n package_dir={'': 'lib'},\n packages=['keysyms', 'hidapi', 'logitech_receiver', 'solaar', 'solaar.ui', 'solaar.cli'],\n data_files=list(_data_files()),\n scripts=_glob('bin/*'),\n)\n", "path": "setup.py"}, {"content": "# -*- python-mode -*-\n\n## Copyright (C) 2012-2013 Daniel Pavel\n##\n## This program is free software; you can redistribute it and/or modify\n## it under the terms of the GNU General Public License as published by\n## the Free Software Foundation; either version 2 of the License, or\n## (at your option) any later version.\n##\n## This program is distributed in the hope that it will be useful,\n## but WITHOUT ANY WARRANTY; without even the implied warranty of\n## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n## GNU General Public License for more details.\n##\n## You should have received a copy of the GNU General Public License along\n## with this program; if not, write to the Free Software Foundation, Inc.,\n## 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\n__version__ = '1.1.7'\nNAME = 'Solaar'\n", "path": "lib/solaar/__init__.py"}]}
1,929
368
gh_patches_debug_37400
rasdani/github-patches
git_diff
Project-MONAI__MONAI-866
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> sphinx documentation warnings **Describe the bug** building the documentation locally generates a couple of warnings: ```bash building [mo]: targets for 0 po files that are out of date building [html]: targets for 16 source files that are out of date updating environment: [new config] 16 added, 0 changed, 0 removed /usr/local/anaconda3/envs/py36/lib/python3.6/site-packages/recommonmark/parser.py:75: UserWarning: Container node skipped: type=document warn("Container node skipped: type={0}".format(mdnode.t)) /usr/local/anaconda3/envs/py36/lib/python3.6/site-packages/recommonmark/parser.py:75: UserWarning: Container node skipped: type=document warn("Container node skipped: type={0}".format(mdnode.t)) reading sources... [100%] visualize WARNING: autodoc: failed to import class 'handlers.CheckpointLoader' from module 'monai'; the following exception was raised: cannot import name 'RemovableEventHandle' WARNING: autodoc: failed to import class 'handlers.CheckpointSaver' from module 'monai'; the following exception was raised: cannot import name 'RemovableEventHandle' WARNING: autodoc: failed to import class 'handlers.ClassificationSaver' from module 'monai'; the following exception was raised: cannot import name 'RemovableEventHandle' WARNING: autodoc: failed to import class 'handlers.MeanDice' from module 'monai'; the following exception was raised: cannot import name 'RemovableEventHandle' WARNING: autodoc: failed to import class 'handlers.ROCAUC' from module 'monai'; the following exception was raised: cannot import name 'RemovableEventHandle' WARNING: autodoc: failed to import class 'handlers.MetricLogger' from module 'monai'; the following exception was raised: cannot import name 'RemovableEventHandle' WARNING: autodoc: failed to import class 'handlers.SegmentationSaver' from module 'monai'; the following exception was raised: cannot import name 'RemovableEventHandle' WARNING: autodoc: failed to import class 'handlers.StatsHandler' from module 'monai'; the following exception was raised: cannot import name 'RemovableEventHandle' WARNING: autodoc: failed to import class 'handlers.TensorBoardStatsHandler' from module 'monai'; the following exception was raised: cannot import name 'RemovableEventHandle' WARNING: autodoc: failed to import class 'handlers.LrScheduleHandler' from module 'monai'; the following exception was raised: cannot import name 'RemovableEventHandle' WARNING: autodoc: failed to import class 'handlers.ValidationHandler' from module 'monai'; the following exception was raised: cannot import name 'RemovableEventHandle' looking for now-outdated files... none found pickling environment... done checking consistency... done preparing documents... done writing output... [100%] visualize ``` **To Reproduce** Steps to reproduce the behavior: 1. Go to `MONAI/docs` 2. ` pip install -U -r requirements.txt` to install doc deps 3. Run `make clean; make html` **Expected behavior** no warnings in the building log **Environment (please complete the following information):** ``` MONAI version: 0.2.0+77.g783f6a1 Python version: 3.6.10 |Anaconda, Inc.| (default, Mar 25 2020, 18:53:43) [GCC 4.2.1 Compatible Clang 4.0.1 (tags/RELEASE_401/final)] Numpy version: 1.19.1 Pytorch version: 1.6.0 Optional dependencies: Pytorch Ignite version: 0.3.0 Nibabel version: 3.1.1 scikit-image version: 0.17.2 Pillow version: 7.2.0 Tensorboard version: 2.3.0 ``` </issue> <code> [start of monai/handlers/metric_logger.py] 1 # Copyright 2020 MONAI Consortium 2 # Licensed under the Apache License, Version 2.0 (the "License"); 3 # you may not use this file except in compliance with the License. 4 # You may obtain a copy of the License at 5 # http://www.apache.org/licenses/LICENSE-2.0 6 # Unless required by applicable law or agreed to in writing, software 7 # distributed under the License is distributed on an "AS IS" BASIS, 8 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 # See the License for the specific language governing permissions and 10 # limitations under the License. 11 12 from collections import defaultdict 13 from typing import TYPE_CHECKING, Callable, DefaultDict, List 14 15 from monai.utils import exact_version, optional_import 16 17 Events, _ = optional_import("ignite.engine", "0.3.0", exact_version, "Events") 18 if TYPE_CHECKING: 19 from ignite.engine import Engine, RemovableEventHandle 20 else: 21 Engine, _ = optional_import("ignite.engine", "0.3.0", exact_version, "Engine") 22 RemovableEventHandle, _ = optional_import("ignite.engine", "0.3.0", exact_version, "RemovableEventHandle") 23 24 25 class MetricLogger: 26 def __init__(self, loss_transform: Callable = lambda x: x, metric_transform: Callable = lambda x: x) -> None: 27 self.loss_transform = loss_transform 28 self.metric_transform = metric_transform 29 self.loss: List = [] 30 self.metrics: DefaultDict = defaultdict(list) 31 32 def attach(self, engine: Engine) -> RemovableEventHandle: 33 """ 34 Args: 35 engine: Ignite Engine, it can be a trainer, validator or evaluator. 36 """ 37 return engine.add_event_handler(Events.ITERATION_COMPLETED, self) 38 39 def __call__(self, engine: Engine) -> None: 40 """ 41 Args: 42 engine: Ignite Engine, it can be a trainer, validator or evaluator. 43 """ 44 self.loss.append(self.loss_transform(engine.state.output)) 45 46 for m, v in engine.state.metrics.items(): 47 v = self.metric_transform(v) 48 # # metrics may not be added on the first timestep, pad the list if this is the case 49 # # so that each metric list is the same length as self.loss 50 # if len(self.metrics[m])==0: 51 # self.metrics[m].append([v[0]]*len(self.loss)) 52 53 self.metrics[m].append(v) 54 55 56 metriclogger = MetricLogger 57 [end of monai/handlers/metric_logger.py] [start of monai/handlers/checkpoint_loader.py] 1 # Copyright 2020 MONAI Consortium 2 # Licensed under the Apache License, Version 2.0 (the "License"); 3 # you may not use this file except in compliance with the License. 4 # You may obtain a copy of the License at 5 # http://www.apache.org/licenses/LICENSE-2.0 6 # Unless required by applicable law or agreed to in writing, software 7 # distributed under the License is distributed on an "AS IS" BASIS, 8 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 # See the License for the specific language governing permissions and 10 # limitations under the License. 11 12 import logging 13 from typing import TYPE_CHECKING, Dict, Optional 14 15 import torch 16 17 from monai.utils import exact_version, optional_import 18 19 Events, _ = optional_import("ignite.engine", "0.3.0", exact_version, "Events") 20 Checkpoint, _ = optional_import("ignite.handlers", "0.3.0", exact_version, "Checkpoint") 21 if TYPE_CHECKING: 22 from ignite.engine import Engine, RemovableEventHandle 23 else: 24 Engine, _ = optional_import("ignite.engine", "0.3.0", exact_version, "Engine") 25 RemovableEventHandle, _ = optional_import("ignite.engine", "0.3.0", exact_version, "RemovableEventHandle") 26 27 28 class CheckpointLoader: 29 """ 30 CheckpointLoader acts as an Ignite handler to load checkpoint data from file. 31 It can load variables for network, optimizer, lr_scheduler, etc. 32 If saving checkpoint after `torch.nn.DataParallel`, need to save `model.module` instead 33 as PyTorch recommended and then use this loader to load the model. 34 35 Args: 36 load_path: the file path of checkpoint, it should be a PyTorch `pth` file. 37 load_dict: target objects that load checkpoint to. examples:: 38 39 {'network': net, 'optimizer': optimizer, 'lr_scheduler': lr_scheduler} 40 41 name: identifier of logging.logger to use, if None, defaulting to ``engine.logger``. 42 43 """ 44 45 def __init__(self, load_path: str, load_dict: Dict, name: Optional[str] = None) -> None: 46 assert load_path is not None, "must provide clear path to load checkpoint." 47 self.load_path = load_path 48 assert load_dict is not None and len(load_dict) > 0, "must provide target objects to load." 49 self.logger = logging.getLogger(name) 50 for k, v in load_dict.items(): 51 if hasattr(v, "module"): 52 load_dict[k] = v.module 53 self.load_dict = load_dict 54 55 self._name = name 56 57 def attach(self, engine: Engine) -> RemovableEventHandle: 58 """ 59 Args: 60 engine: Ignite Engine, it can be a trainer, validator or evaluator. 61 """ 62 if self._name is None: 63 self.logger = engine.logger 64 return engine.add_event_handler(Events.STARTED, self) 65 66 def __call__(self, engine: Engine) -> None: 67 """ 68 Args: 69 engine: Ignite Engine, it can be a trainer, validator or evaluator. 70 """ 71 checkpoint = torch.load(self.load_path) 72 if len(self.load_dict) == 1: 73 key = list(self.load_dict.keys())[0] 74 if not (key in checkpoint): 75 checkpoint = {key: checkpoint} 76 77 Checkpoint.load_objects(to_load=self.load_dict, checkpoint=checkpoint) 78 self.logger.info(f"Restored all variables from {self.load_path}") 79 [end of monai/handlers/checkpoint_loader.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/monai/handlers/checkpoint_loader.py b/monai/handlers/checkpoint_loader.py --- a/monai/handlers/checkpoint_loader.py +++ b/monai/handlers/checkpoint_loader.py @@ -19,10 +19,9 @@ Events, _ = optional_import("ignite.engine", "0.3.0", exact_version, "Events") Checkpoint, _ = optional_import("ignite.handlers", "0.3.0", exact_version, "Checkpoint") if TYPE_CHECKING: - from ignite.engine import Engine, RemovableEventHandle + from ignite.engine import Engine else: Engine, _ = optional_import("ignite.engine", "0.3.0", exact_version, "Engine") - RemovableEventHandle, _ = optional_import("ignite.engine", "0.3.0", exact_version, "RemovableEventHandle") class CheckpointLoader: @@ -54,14 +53,14 @@ self._name = name - def attach(self, engine: Engine) -> RemovableEventHandle: + def attach(self, engine: Engine) -> None: """ Args: engine: Ignite Engine, it can be a trainer, validator or evaluator. """ if self._name is None: self.logger = engine.logger - return engine.add_event_handler(Events.STARTED, self) + engine.add_event_handler(Events.STARTED, self) def __call__(self, engine: Engine) -> None: """ diff --git a/monai/handlers/metric_logger.py b/monai/handlers/metric_logger.py --- a/monai/handlers/metric_logger.py +++ b/monai/handlers/metric_logger.py @@ -16,10 +16,9 @@ Events, _ = optional_import("ignite.engine", "0.3.0", exact_version, "Events") if TYPE_CHECKING: - from ignite.engine import Engine, RemovableEventHandle + from ignite.engine import Engine else: Engine, _ = optional_import("ignite.engine", "0.3.0", exact_version, "Engine") - RemovableEventHandle, _ = optional_import("ignite.engine", "0.3.0", exact_version, "RemovableEventHandle") class MetricLogger: @@ -29,12 +28,12 @@ self.loss: List = [] self.metrics: DefaultDict = defaultdict(list) - def attach(self, engine: Engine) -> RemovableEventHandle: + def attach(self, engine: Engine) -> None: """ Args: engine: Ignite Engine, it can be a trainer, validator or evaluator. """ - return engine.add_event_handler(Events.ITERATION_COMPLETED, self) + engine.add_event_handler(Events.ITERATION_COMPLETED, self) def __call__(self, engine: Engine) -> None: """
{"golden_diff": "diff --git a/monai/handlers/checkpoint_loader.py b/monai/handlers/checkpoint_loader.py\n--- a/monai/handlers/checkpoint_loader.py\n+++ b/monai/handlers/checkpoint_loader.py\n@@ -19,10 +19,9 @@\n Events, _ = optional_import(\"ignite.engine\", \"0.3.0\", exact_version, \"Events\")\n Checkpoint, _ = optional_import(\"ignite.handlers\", \"0.3.0\", exact_version, \"Checkpoint\")\n if TYPE_CHECKING:\n- from ignite.engine import Engine, RemovableEventHandle\n+ from ignite.engine import Engine\n else:\n Engine, _ = optional_import(\"ignite.engine\", \"0.3.0\", exact_version, \"Engine\")\n- RemovableEventHandle, _ = optional_import(\"ignite.engine\", \"0.3.0\", exact_version, \"RemovableEventHandle\")\n \n \n class CheckpointLoader:\n@@ -54,14 +53,14 @@\n \n self._name = name\n \n- def attach(self, engine: Engine) -> RemovableEventHandle:\n+ def attach(self, engine: Engine) -> None:\n \"\"\"\n Args:\n engine: Ignite Engine, it can be a trainer, validator or evaluator.\n \"\"\"\n if self._name is None:\n self.logger = engine.logger\n- return engine.add_event_handler(Events.STARTED, self)\n+ engine.add_event_handler(Events.STARTED, self)\n \n def __call__(self, engine: Engine) -> None:\n \"\"\"\ndiff --git a/monai/handlers/metric_logger.py b/monai/handlers/metric_logger.py\n--- a/monai/handlers/metric_logger.py\n+++ b/monai/handlers/metric_logger.py\n@@ -16,10 +16,9 @@\n \n Events, _ = optional_import(\"ignite.engine\", \"0.3.0\", exact_version, \"Events\")\n if TYPE_CHECKING:\n- from ignite.engine import Engine, RemovableEventHandle\n+ from ignite.engine import Engine\n else:\n Engine, _ = optional_import(\"ignite.engine\", \"0.3.0\", exact_version, \"Engine\")\n- RemovableEventHandle, _ = optional_import(\"ignite.engine\", \"0.3.0\", exact_version, \"RemovableEventHandle\")\n \n \n class MetricLogger:\n@@ -29,12 +28,12 @@\n self.loss: List = []\n self.metrics: DefaultDict = defaultdict(list)\n \n- def attach(self, engine: Engine) -> RemovableEventHandle:\n+ def attach(self, engine: Engine) -> None:\n \"\"\"\n Args:\n engine: Ignite Engine, it can be a trainer, validator or evaluator.\n \"\"\"\n- return engine.add_event_handler(Events.ITERATION_COMPLETED, self)\n+ engine.add_event_handler(Events.ITERATION_COMPLETED, self)\n \n def __call__(self, engine: Engine) -> None:\n \"\"\"\n", "issue": "sphinx documentation warnings\n**Describe the bug**\r\nbuilding the documentation locally generates a couple of warnings:\r\n```bash\r\nbuilding [mo]: targets for 0 po files that are out of date\r\nbuilding [html]: targets for 16 source files that are out of date\r\nupdating environment: [new config] 16 added, 0 changed, 0 removed\r\n/usr/local/anaconda3/envs/py36/lib/python3.6/site-packages/recommonmark/parser.py:75: UserWarning: Container node skipped: type=document\r\n warn(\"Container node skipped: type={0}\".format(mdnode.t))\r\n/usr/local/anaconda3/envs/py36/lib/python3.6/site-packages/recommonmark/parser.py:75: UserWarning: Container node skipped: type=document\r\n warn(\"Container node skipped: type={0}\".format(mdnode.t))\r\nreading sources... [100%] visualize \r\nWARNING: autodoc: failed to import class 'handlers.CheckpointLoader' from module 'monai'; the following exception was raised:\r\ncannot import name 'RemovableEventHandle'\r\nWARNING: autodoc: failed to import class 'handlers.CheckpointSaver' from module 'monai'; the following exception was raised:\r\ncannot import name 'RemovableEventHandle'\r\nWARNING: autodoc: failed to import class 'handlers.ClassificationSaver' from module 'monai'; the following exception was raised:\r\ncannot import name 'RemovableEventHandle'\r\nWARNING: autodoc: failed to import class 'handlers.MeanDice' from module 'monai'; the following exception was raised:\r\ncannot import name 'RemovableEventHandle'\r\nWARNING: autodoc: failed to import class 'handlers.ROCAUC' from module 'monai'; the following exception was raised:\r\ncannot import name 'RemovableEventHandle'\r\nWARNING: autodoc: failed to import class 'handlers.MetricLogger' from module 'monai'; the following exception was raised:\r\ncannot import name 'RemovableEventHandle'\r\nWARNING: autodoc: failed to import class 'handlers.SegmentationSaver' from module 'monai'; the following exception was raised:\r\ncannot import name 'RemovableEventHandle'\r\nWARNING: autodoc: failed to import class 'handlers.StatsHandler' from module 'monai'; the following exception was raised:\r\ncannot import name 'RemovableEventHandle'\r\nWARNING: autodoc: failed to import class 'handlers.TensorBoardStatsHandler' from module 'monai'; the following exception was raised:\r\ncannot import name 'RemovableEventHandle'\r\nWARNING: autodoc: failed to import class 'handlers.LrScheduleHandler' from module 'monai'; the following exception was raised:\r\ncannot import name 'RemovableEventHandle'\r\nWARNING: autodoc: failed to import class 'handlers.ValidationHandler' from module 'monai'; the following exception was raised:\r\ncannot import name 'RemovableEventHandle'\r\nlooking for now-outdated files... none found\r\npickling environment... done\r\nchecking consistency... done\r\npreparing documents... done\r\nwriting output... [100%] visualize \r\n```\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Go to `MONAI/docs`\r\n2. ` pip install -U -r requirements.txt` to install doc deps\r\n3. Run `make clean; make html`\r\n\r\n**Expected behavior**\r\nno warnings in the building log\r\n\r\n**Environment (please complete the following information):**\r\n```\r\nMONAI version: 0.2.0+77.g783f6a1\r\nPython version: 3.6.10 |Anaconda, Inc.| (default, Mar 25 2020, 18:53:43) [GCC 4.2.1 Compatible Clang 4.0.1 (tags/RELEASE_401/final)]\r\nNumpy version: 1.19.1\r\nPytorch version: 1.6.0\r\n\r\nOptional dependencies:\r\nPytorch Ignite version: 0.3.0\r\nNibabel version: 3.1.1\r\nscikit-image version: 0.17.2\r\nPillow version: 7.2.0\r\nTensorboard version: 2.3.0\r\n```\r\n\n", "before_files": [{"content": "# Copyright 2020 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom collections import defaultdict\nfrom typing import TYPE_CHECKING, Callable, DefaultDict, List\n\nfrom monai.utils import exact_version, optional_import\n\nEvents, _ = optional_import(\"ignite.engine\", \"0.3.0\", exact_version, \"Events\")\nif TYPE_CHECKING:\n from ignite.engine import Engine, RemovableEventHandle\nelse:\n Engine, _ = optional_import(\"ignite.engine\", \"0.3.0\", exact_version, \"Engine\")\n RemovableEventHandle, _ = optional_import(\"ignite.engine\", \"0.3.0\", exact_version, \"RemovableEventHandle\")\n\n\nclass MetricLogger:\n def __init__(self, loss_transform: Callable = lambda x: x, metric_transform: Callable = lambda x: x) -> None:\n self.loss_transform = loss_transform\n self.metric_transform = metric_transform\n self.loss: List = []\n self.metrics: DefaultDict = defaultdict(list)\n\n def attach(self, engine: Engine) -> RemovableEventHandle:\n \"\"\"\n Args:\n engine: Ignite Engine, it can be a trainer, validator or evaluator.\n \"\"\"\n return engine.add_event_handler(Events.ITERATION_COMPLETED, self)\n\n def __call__(self, engine: Engine) -> None:\n \"\"\"\n Args:\n engine: Ignite Engine, it can be a trainer, validator or evaluator.\n \"\"\"\n self.loss.append(self.loss_transform(engine.state.output))\n\n for m, v in engine.state.metrics.items():\n v = self.metric_transform(v)\n # # metrics may not be added on the first timestep, pad the list if this is the case\n # # so that each metric list is the same length as self.loss\n # if len(self.metrics[m])==0:\n # self.metrics[m].append([v[0]]*len(self.loss))\n\n self.metrics[m].append(v)\n\n\nmetriclogger = MetricLogger\n", "path": "monai/handlers/metric_logger.py"}, {"content": "# Copyright 2020 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nfrom typing import TYPE_CHECKING, Dict, Optional\n\nimport torch\n\nfrom monai.utils import exact_version, optional_import\n\nEvents, _ = optional_import(\"ignite.engine\", \"0.3.0\", exact_version, \"Events\")\nCheckpoint, _ = optional_import(\"ignite.handlers\", \"0.3.0\", exact_version, \"Checkpoint\")\nif TYPE_CHECKING:\n from ignite.engine import Engine, RemovableEventHandle\nelse:\n Engine, _ = optional_import(\"ignite.engine\", \"0.3.0\", exact_version, \"Engine\")\n RemovableEventHandle, _ = optional_import(\"ignite.engine\", \"0.3.0\", exact_version, \"RemovableEventHandle\")\n\n\nclass CheckpointLoader:\n \"\"\"\n CheckpointLoader acts as an Ignite handler to load checkpoint data from file.\n It can load variables for network, optimizer, lr_scheduler, etc.\n If saving checkpoint after `torch.nn.DataParallel`, need to save `model.module` instead\n as PyTorch recommended and then use this loader to load the model.\n\n Args:\n load_path: the file path of checkpoint, it should be a PyTorch `pth` file.\n load_dict: target objects that load checkpoint to. examples::\n\n {'network': net, 'optimizer': optimizer, 'lr_scheduler': lr_scheduler}\n\n name: identifier of logging.logger to use, if None, defaulting to ``engine.logger``.\n\n \"\"\"\n\n def __init__(self, load_path: str, load_dict: Dict, name: Optional[str] = None) -> None:\n assert load_path is not None, \"must provide clear path to load checkpoint.\"\n self.load_path = load_path\n assert load_dict is not None and len(load_dict) > 0, \"must provide target objects to load.\"\n self.logger = logging.getLogger(name)\n for k, v in load_dict.items():\n if hasattr(v, \"module\"):\n load_dict[k] = v.module\n self.load_dict = load_dict\n\n self._name = name\n\n def attach(self, engine: Engine) -> RemovableEventHandle:\n \"\"\"\n Args:\n engine: Ignite Engine, it can be a trainer, validator or evaluator.\n \"\"\"\n if self._name is None:\n self.logger = engine.logger\n return engine.add_event_handler(Events.STARTED, self)\n\n def __call__(self, engine: Engine) -> None:\n \"\"\"\n Args:\n engine: Ignite Engine, it can be a trainer, validator or evaluator.\n \"\"\"\n checkpoint = torch.load(self.load_path)\n if len(self.load_dict) == 1:\n key = list(self.load_dict.keys())[0]\n if not (key in checkpoint):\n checkpoint = {key: checkpoint}\n\n Checkpoint.load_objects(to_load=self.load_dict, checkpoint=checkpoint)\n self.logger.info(f\"Restored all variables from {self.load_path}\")\n", "path": "monai/handlers/checkpoint_loader.py"}]}
3,012
645
gh_patches_debug_9534
rasdani/github-patches
git_diff
boto__boto-1824
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Documentation of security_groups argument doesn't mention VPC The documentation for the 'security_groups' parameter for a LaunchConfiguration object could be better. As stated on: http://docs.aws.amazon.com/cli/latest/reference/autoscaling/create-launch-configuration.html "--security-groups (list) The names of the security groups with which to associate Amazon EC2 or Amazon VPC instances. Specify Amazon EC2 security groups using security group names, such as websrv . Specify Amazon VPC security groups using security group IDs, such as sg-12345678 ." In the Boto documentation for creating a launch configuration (http://boto.readthedocs.org/en/latest/ref/autoscale.html#boto.ec2.autoscale.launchconfig.LaunchConfiguration), the comment for the 'security_groups' parameter says: "Names of the security groups with which to associate the EC2 instances." In this comment, there is no mention of VPC instances or security group id's as seen in Amazon's own docs. I lost about a day of productivity figuring this out so I'm hoping that the documentation can be improved. </issue> <code> [start of boto/ec2/autoscale/launchconfig.py] 1 # Copyright (c) 2009 Reza Lotun http://reza.lotun.name/ 2 # Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved 3 # 4 # Permission is hereby granted, free of charge, to any person obtaining a 5 # copy of this software and associated documentation files (the 6 # "Software"), to deal in the Software without restriction, including 7 # without limitation the rights to use, copy, modify, merge, publish, dis- 8 # tribute, sublicense, and/or sell copies of the Software, and to permit 9 # persons to whom the Software is furnished to do so, subject to the fol- 10 # lowing conditions: 11 # 12 # The above copyright notice and this permission notice shall be included 13 # in all copies or substantial portions of the Software. 14 # 15 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 16 # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- 17 # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT 18 # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, 19 # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 # IN THE SOFTWARE. 22 23 from datetime import datetime 24 from boto.resultset import ResultSet 25 from boto.ec2.elb.listelement import ListElement 26 import boto.utils 27 import base64 28 29 # this should use the corresponding object from boto.ec2 30 31 32 class Ebs(object): 33 def __init__(self, connection=None, snapshot_id=None, volume_size=None): 34 self.connection = connection 35 self.snapshot_id = snapshot_id 36 self.volume_size = volume_size 37 38 def __repr__(self): 39 return 'Ebs(%s, %s)' % (self.snapshot_id, self.volume_size) 40 41 def startElement(self, name, attrs, connection): 42 pass 43 44 def endElement(self, name, value, connection): 45 if name == 'SnapshotId': 46 self.snapshot_id = value 47 elif name == 'VolumeSize': 48 self.volume_size = value 49 50 51 class InstanceMonitoring(object): 52 def __init__(self, connection=None, enabled='false'): 53 self.connection = connection 54 self.enabled = enabled 55 56 def __repr__(self): 57 return 'InstanceMonitoring(%s)' % self.enabled 58 59 def startElement(self, name, attrs, connection): 60 pass 61 62 def endElement(self, name, value, connection): 63 if name == 'Enabled': 64 self.enabled = value 65 66 67 # this should use the BlockDeviceMapping from boto.ec2.blockdevicemapping 68 class BlockDeviceMapping(object): 69 def __init__(self, connection=None, device_name=None, virtual_name=None): 70 self.connection = connection 71 self.device_name = None 72 self.virtual_name = None 73 self.ebs = None 74 75 def __repr__(self): 76 return 'BlockDeviceMapping(%s, %s)' % (self.device_name, 77 self.virtual_name) 78 79 def startElement(self, name, attrs, connection): 80 if name == 'Ebs': 81 self.ebs = Ebs(self) 82 return self.ebs 83 84 def endElement(self, name, value, connection): 85 if name == 'DeviceName': 86 self.device_name = value 87 elif name == 'VirtualName': 88 self.virtual_name = value 89 90 91 class LaunchConfiguration(object): 92 def __init__(self, connection=None, name=None, image_id=None, 93 key_name=None, security_groups=None, user_data=None, 94 instance_type='m1.small', kernel_id=None, 95 ramdisk_id=None, block_device_mappings=None, 96 instance_monitoring=False, spot_price=None, 97 instance_profile_name=None, ebs_optimized=False, 98 associate_public_ip_address=None): 99 """ 100 A launch configuration. 101 102 :type name: str 103 :param name: Name of the launch configuration to create. 104 105 :type image_id: str 106 :param image_id: Unique ID of the Amazon Machine Image (AMI) which was 107 assigned during registration. 108 109 :type key_name: str 110 :param key_name: The name of the EC2 key pair. 111 112 :type security_groups: list 113 :param security_groups: Names of the security groups with which to 114 associate the EC2 instances. 115 116 :type user_data: str 117 :param user_data: The user data available to launched EC2 instances. 118 119 :type instance_type: str 120 :param instance_type: The instance type 121 122 :type kern_id: str 123 :param kern_id: Kernel id for instance 124 125 :type ramdisk_id: str 126 :param ramdisk_id: RAM disk id for instance 127 128 :type block_device_mappings: list 129 :param block_device_mappings: Specifies how block devices are exposed 130 for instances 131 132 :type instance_monitoring: bool 133 :param instance_monitoring: Whether instances in group are launched 134 with detailed monitoring. 135 136 :type spot_price: float 137 :param spot_price: The spot price you are bidding. Only applies 138 if you are building an autoscaling group with spot instances. 139 140 :type instance_profile_name: string 141 :param instance_profile_name: The name or the Amazon Resource 142 Name (ARN) of the instance profile associated with the IAM 143 role for the instance. 144 145 :type ebs_optimized: bool 146 :param ebs_optimized: Specifies whether the instance is optimized 147 for EBS I/O (true) or not (false). 148 149 :type associate_public_ip_address: bool 150 :param associate_public_ip_address: Used for Auto Scaling groups that launch instances into an Amazon Virtual Private Cloud. 151 Specifies whether to assign a public IP address to each instance launched in a Amazon VPC. 152 """ 153 self.connection = connection 154 self.name = name 155 self.instance_type = instance_type 156 self.block_device_mappings = block_device_mappings 157 self.key_name = key_name 158 sec_groups = security_groups or [] 159 self.security_groups = ListElement(sec_groups) 160 self.image_id = image_id 161 self.ramdisk_id = ramdisk_id 162 self.created_time = None 163 self.kernel_id = kernel_id 164 self.user_data = user_data 165 self.created_time = None 166 self.instance_monitoring = instance_monitoring 167 self.spot_price = spot_price 168 self.instance_profile_name = instance_profile_name 169 self.launch_configuration_arn = None 170 self.ebs_optimized = ebs_optimized 171 self.associate_public_ip_address = associate_public_ip_address 172 173 def __repr__(self): 174 return 'LaunchConfiguration:%s' % self.name 175 176 def startElement(self, name, attrs, connection): 177 if name == 'SecurityGroups': 178 return self.security_groups 179 elif name == 'BlockDeviceMappings': 180 self.block_device_mappings = ResultSet([('member', 181 BlockDeviceMapping)]) 182 return self.block_device_mappings 183 elif name == 'InstanceMonitoring': 184 self.instance_monitoring = InstanceMonitoring(self) 185 return self.instance_monitoring 186 187 def endElement(self, name, value, connection): 188 if name == 'InstanceType': 189 self.instance_type = value 190 elif name == 'LaunchConfigurationName': 191 self.name = value 192 elif name == 'KeyName': 193 self.key_name = value 194 elif name == 'ImageId': 195 self.image_id = value 196 elif name == 'CreatedTime': 197 self.created_time = boto.utils.parse_ts(value) 198 elif name == 'KernelId': 199 self.kernel_id = value 200 elif name == 'RamdiskId': 201 self.ramdisk_id = value 202 elif name == 'UserData': 203 try: 204 self.user_data = base64.b64decode(value) 205 except TypeError: 206 self.user_data = value 207 elif name == 'LaunchConfigurationARN': 208 self.launch_configuration_arn = value 209 elif name == 'InstanceMonitoring': 210 self.instance_monitoring = value 211 elif name == 'SpotPrice': 212 self.spot_price = float(value) 213 elif name == 'IamInstanceProfile': 214 self.instance_profile_name = value 215 elif name == 'EbsOptimized': 216 self.ebs_optimized = True if value.lower() == 'true' else False 217 else: 218 setattr(self, name, value) 219 220 def delete(self): 221 """ Delete this launch configuration. """ 222 return self.connection.delete_launch_configuration(self.name) 223 [end of boto/ec2/autoscale/launchconfig.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/boto/ec2/autoscale/launchconfig.py b/boto/ec2/autoscale/launchconfig.py --- a/boto/ec2/autoscale/launchconfig.py +++ b/boto/ec2/autoscale/launchconfig.py @@ -110,8 +110,9 @@ :param key_name: The name of the EC2 key pair. :type security_groups: list - :param security_groups: Names of the security groups with which to - associate the EC2 instances. + :param security_groups: Names or security group id's of the security + groups with which to associate the EC2 instances or VPC instances, + respectively. :type user_data: str :param user_data: The user data available to launched EC2 instances.
{"golden_diff": "diff --git a/boto/ec2/autoscale/launchconfig.py b/boto/ec2/autoscale/launchconfig.py\n--- a/boto/ec2/autoscale/launchconfig.py\n+++ b/boto/ec2/autoscale/launchconfig.py\n@@ -110,8 +110,9 @@\n :param key_name: The name of the EC2 key pair.\n \n :type security_groups: list\n- :param security_groups: Names of the security groups with which to\n- associate the EC2 instances.\n+ :param security_groups: Names or security group id's of the security\n+ groups with which to associate the EC2 instances or VPC instances,\n+ respectively.\n \n :type user_data: str\n :param user_data: The user data available to launched EC2 instances.\n", "issue": "Documentation of security_groups argument doesn't mention VPC\nThe documentation for the 'security_groups' parameter for a LaunchConfiguration object could be better. As stated on:\n\nhttp://docs.aws.amazon.com/cli/latest/reference/autoscaling/create-launch-configuration.html\n\n\"--security-groups (list)\n\nThe names of the security groups with which to associate Amazon EC2 or Amazon VPC instances. Specify Amazon EC2 security groups using security group names, such as websrv . Specify Amazon VPC security groups using security group IDs, such as sg-12345678 .\"\n\nIn the Boto documentation for creating a launch configuration (http://boto.readthedocs.org/en/latest/ref/autoscale.html#boto.ec2.autoscale.launchconfig.LaunchConfiguration), the comment for the 'security_groups' parameter says:\n\n\"Names of the security groups with which to associate the EC2 instances.\"\n\nIn this comment, there is no mention of VPC instances or security group id's as seen in Amazon's own docs. I lost about a day of productivity figuring this out so I'm hoping that the documentation can be improved.\n\n", "before_files": [{"content": "# Copyright (c) 2009 Reza Lotun http://reza.lotun.name/\n# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish, dis-\n# tribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the fol-\n# lowing conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-\n# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT\n# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n\nfrom datetime import datetime\nfrom boto.resultset import ResultSet\nfrom boto.ec2.elb.listelement import ListElement\nimport boto.utils\nimport base64\n\n# this should use the corresponding object from boto.ec2\n\n\nclass Ebs(object):\n def __init__(self, connection=None, snapshot_id=None, volume_size=None):\n self.connection = connection\n self.snapshot_id = snapshot_id\n self.volume_size = volume_size\n\n def __repr__(self):\n return 'Ebs(%s, %s)' % (self.snapshot_id, self.volume_size)\n\n def startElement(self, name, attrs, connection):\n pass\n\n def endElement(self, name, value, connection):\n if name == 'SnapshotId':\n self.snapshot_id = value\n elif name == 'VolumeSize':\n self.volume_size = value\n\n\nclass InstanceMonitoring(object):\n def __init__(self, connection=None, enabled='false'):\n self.connection = connection\n self.enabled = enabled\n\n def __repr__(self):\n return 'InstanceMonitoring(%s)' % self.enabled\n\n def startElement(self, name, attrs, connection):\n pass\n\n def endElement(self, name, value, connection):\n if name == 'Enabled':\n self.enabled = value\n\n\n# this should use the BlockDeviceMapping from boto.ec2.blockdevicemapping\nclass BlockDeviceMapping(object):\n def __init__(self, connection=None, device_name=None, virtual_name=None):\n self.connection = connection\n self.device_name = None\n self.virtual_name = None\n self.ebs = None\n\n def __repr__(self):\n return 'BlockDeviceMapping(%s, %s)' % (self.device_name,\n self.virtual_name)\n\n def startElement(self, name, attrs, connection):\n if name == 'Ebs':\n self.ebs = Ebs(self)\n return self.ebs\n\n def endElement(self, name, value, connection):\n if name == 'DeviceName':\n self.device_name = value\n elif name == 'VirtualName':\n self.virtual_name = value\n\n\nclass LaunchConfiguration(object):\n def __init__(self, connection=None, name=None, image_id=None,\n key_name=None, security_groups=None, user_data=None,\n instance_type='m1.small', kernel_id=None,\n ramdisk_id=None, block_device_mappings=None,\n instance_monitoring=False, spot_price=None,\n instance_profile_name=None, ebs_optimized=False,\n associate_public_ip_address=None):\n \"\"\"\n A launch configuration.\n\n :type name: str\n :param name: Name of the launch configuration to create.\n\n :type image_id: str\n :param image_id: Unique ID of the Amazon Machine Image (AMI) which was\n assigned during registration.\n\n :type key_name: str\n :param key_name: The name of the EC2 key pair.\n\n :type security_groups: list\n :param security_groups: Names of the security groups with which to\n associate the EC2 instances.\n\n :type user_data: str\n :param user_data: The user data available to launched EC2 instances.\n\n :type instance_type: str\n :param instance_type: The instance type\n\n :type kern_id: str\n :param kern_id: Kernel id for instance\n\n :type ramdisk_id: str\n :param ramdisk_id: RAM disk id for instance\n\n :type block_device_mappings: list\n :param block_device_mappings: Specifies how block devices are exposed\n for instances\n\n :type instance_monitoring: bool\n :param instance_monitoring: Whether instances in group are launched\n with detailed monitoring.\n\n :type spot_price: float\n :param spot_price: The spot price you are bidding. Only applies\n if you are building an autoscaling group with spot instances.\n\n :type instance_profile_name: string\n :param instance_profile_name: The name or the Amazon Resource\n Name (ARN) of the instance profile associated with the IAM\n role for the instance.\n\n :type ebs_optimized: bool\n :param ebs_optimized: Specifies whether the instance is optimized\n for EBS I/O (true) or not (false).\n\n :type associate_public_ip_address: bool\n :param associate_public_ip_address: Used for Auto Scaling groups that launch instances into an Amazon Virtual Private Cloud. \n Specifies whether to assign a public IP address to each instance launched in a Amazon VPC.\n \"\"\"\n self.connection = connection\n self.name = name\n self.instance_type = instance_type\n self.block_device_mappings = block_device_mappings\n self.key_name = key_name\n sec_groups = security_groups or []\n self.security_groups = ListElement(sec_groups)\n self.image_id = image_id\n self.ramdisk_id = ramdisk_id\n self.created_time = None\n self.kernel_id = kernel_id\n self.user_data = user_data\n self.created_time = None\n self.instance_monitoring = instance_monitoring\n self.spot_price = spot_price\n self.instance_profile_name = instance_profile_name\n self.launch_configuration_arn = None\n self.ebs_optimized = ebs_optimized\n self.associate_public_ip_address = associate_public_ip_address\n\n def __repr__(self):\n return 'LaunchConfiguration:%s' % self.name\n\n def startElement(self, name, attrs, connection):\n if name == 'SecurityGroups':\n return self.security_groups\n elif name == 'BlockDeviceMappings':\n self.block_device_mappings = ResultSet([('member',\n BlockDeviceMapping)])\n return self.block_device_mappings\n elif name == 'InstanceMonitoring':\n self.instance_monitoring = InstanceMonitoring(self)\n return self.instance_monitoring\n\n def endElement(self, name, value, connection):\n if name == 'InstanceType':\n self.instance_type = value\n elif name == 'LaunchConfigurationName':\n self.name = value\n elif name == 'KeyName':\n self.key_name = value\n elif name == 'ImageId':\n self.image_id = value\n elif name == 'CreatedTime':\n self.created_time = boto.utils.parse_ts(value)\n elif name == 'KernelId':\n self.kernel_id = value\n elif name == 'RamdiskId':\n self.ramdisk_id = value\n elif name == 'UserData':\n try:\n self.user_data = base64.b64decode(value)\n except TypeError:\n self.user_data = value\n elif name == 'LaunchConfigurationARN':\n self.launch_configuration_arn = value\n elif name == 'InstanceMonitoring':\n self.instance_monitoring = value\n elif name == 'SpotPrice':\n self.spot_price = float(value)\n elif name == 'IamInstanceProfile':\n self.instance_profile_name = value\n elif name == 'EbsOptimized':\n self.ebs_optimized = True if value.lower() == 'true' else False\n else:\n setattr(self, name, value)\n\n def delete(self):\n \"\"\" Delete this launch configuration. \"\"\"\n return self.connection.delete_launch_configuration(self.name)\n", "path": "boto/ec2/autoscale/launchconfig.py"}]}
3,157
180
gh_patches_debug_2662
rasdani/github-patches
git_diff
scikit-image__scikit-image-6343
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> imageIO warnings due to v2 -> v3 migration ## Description As of imageIO 2.16.0 (Feb22) there are now a v2 and v3 namespaces in addition to the top-level namespace. As of 2.16.2 (released Apr22) directly using the top-level namespace results in warnings to either explicitly opt-into the v3 API or opt-out and import the v2. This in turn causes warnings when using `skimage.io.imread`. I suggest that this is a good first issue as there is no API design choices here (at least to start) and only needs the ```python try: import newway except ImportError: import old way ``` dance. The warnings look like (lifted from a test suite): ``` ____________________________________________________________________________ ReaderSequence.test_slice_of_slice ____________________________________________________________________________ pims/tests/test_imseq.py:256: in setUp self.v = self.klass(self.filename, **self.kwargs) pims/image_sequence.py:217: in __init__ with self.reader_cls(self._filepaths[0], **self.kwargs) as reader: pims/image_reader.py:60: in __init__ self._data = Frame(imread(filename, **kwargs), frame_no=0) ../../../../.pybuild/bleeding/lib/python3.11/contextlib.py:155: in __exit__ self.gen.throw(typ, value, traceback) ../../../../.virtualenvs/bleeding/lib/python3.11/site-packages/skimage/io/util.py:43: in file_or_url_context yield resource_name ../../../../.virtualenvs/bleeding/lib/python3.11/site-packages/skimage/io/_io.py:53: in imread img = call_plugin('imread', fname, plugin=plugin, **plugin_args) ../../../../.virtualenvs/bleeding/lib/python3.11/site-packages/skimage/io/manage_plugins.py:207: in call_plugin return func(*args, **kwargs) ../../../../.virtualenvs/bleeding/lib/python3.11/site-packages/skimage/io/_plugins/imageio_plugin.py:10: in imread return np.asarray(imageio_imread(*args, **kwargs)) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ uri = '/home/tcaswell/source/bnl/soft-matter/pims/pims/tests/data/image_sequence3d/file001.png', format = None, kwargs = {} def imread(uri, format=None, **kwargs): """imread(uri, format=None, **kwargs) Reads an image from the specified file. Returns a numpy array, which comes with a dict of meta data at its 'meta' attribute. Note that the image data is returned as-is, and may not always have a dtype of uint8 (and thus may differ from what e.g. PIL returns). Parameters ---------- uri : {str, pathlib.Path, bytes, file} The resource to load the image from, e.g. a filename, pathlib.Path, http address or file object, see the docs for more info. format : str The format to use to read the file. By default imageio selects the appropriate for you based on the filename and its contents. kwargs : ... Further keyword arguments are passed to the reader. See :func:`.help` to see what arguments are available for a particular format. """ > warnings.warn( "Starting with ImageIO v3 the behavior of this function will switch to that of" " iio.v3.imread. To keep the current behavior (and make this warning dissapear)" " use `import imageio.v2 as imageio` or call `imageio.v2.imread` directly.", DeprecationWarning, ) E DeprecationWarning: Starting with ImageIO v3 the behavior of this function will switch to that of iio.v3.imread. To keep the current behavior (and make this warning dissapear) use `import imageio.v2 as imageio` or call `imageio.v2.imread` directly. ../../../../.virtualenvs/bleeding/lib/python3.11/site-packages/imageio/__init__.py:89: DeprecationWarning ``` </issue> <code> [start of skimage/io/_plugins/imageio_plugin.py] 1 __all__ = ['imread', 'imsave'] 2 3 from functools import wraps 4 import numpy as np 5 from imageio import imread as imageio_imread, imsave 6 7 8 @wraps(imageio_imread) 9 def imread(*args, **kwargs): 10 return np.asarray(imageio_imread(*args, **kwargs)) 11 [end of skimage/io/_plugins/imageio_plugin.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/skimage/io/_plugins/imageio_plugin.py b/skimage/io/_plugins/imageio_plugin.py --- a/skimage/io/_plugins/imageio_plugin.py +++ b/skimage/io/_plugins/imageio_plugin.py @@ -2,7 +2,12 @@ from functools import wraps import numpy as np -from imageio import imread as imageio_imread, imsave + +try: + # Try using the v2 API directly to avoid a warning from imageio >= 2.16.2 + from imageio.v2 import imread as imageio_imread, imsave +except ImportError: + from imageio import imread as imageio_imread, imsave @wraps(imageio_imread)
{"golden_diff": "diff --git a/skimage/io/_plugins/imageio_plugin.py b/skimage/io/_plugins/imageio_plugin.py\n--- a/skimage/io/_plugins/imageio_plugin.py\n+++ b/skimage/io/_plugins/imageio_plugin.py\n@@ -2,7 +2,12 @@\n \n from functools import wraps\n import numpy as np\n-from imageio import imread as imageio_imread, imsave\n+\n+try:\n+ # Try using the v2 API directly to avoid a warning from imageio >= 2.16.2\n+ from imageio.v2 import imread as imageio_imread, imsave\n+except ImportError:\n+ from imageio import imread as imageio_imread, imsave\n \n \n @wraps(imageio_imread)\n", "issue": "imageIO warnings due to v2 -> v3 migration\n## Description\r\n\r\nAs of imageIO 2.16.0 (Feb22) there are now a v2 and v3 namespaces in addition to the top-level namespace. As of 2.16.2 (released Apr22) directly using the top-level namespace results in warnings to either explicitly opt-into the v3 API or opt-out and import the v2. \r\n\r\nThis in turn causes warnings when using `skimage.io.imread`. \r\n\r\nI suggest that this is a good first issue as there is no API design choices here (at least to start) and only needs the \r\n```python\r\ntry: \r\n import newway\r\nexcept ImportError: \r\n import old way\r\n```\r\ndance.\r\n\r\nThe warnings look like (lifted from a test suite):\r\n\r\n```\r\n____________________________________________________________________________ ReaderSequence.test_slice_of_slice ____________________________________________________________________________\r\npims/tests/test_imseq.py:256: in setUp\r\n self.v = self.klass(self.filename, **self.kwargs)\r\npims/image_sequence.py:217: in __init__\r\n with self.reader_cls(self._filepaths[0], **self.kwargs) as reader:\r\npims/image_reader.py:60: in __init__\r\n self._data = Frame(imread(filename, **kwargs), frame_no=0)\r\n../../../../.pybuild/bleeding/lib/python3.11/contextlib.py:155: in __exit__\r\n self.gen.throw(typ, value, traceback)\r\n../../../../.virtualenvs/bleeding/lib/python3.11/site-packages/skimage/io/util.py:43: in file_or_url_context\r\n yield resource_name\r\n../../../../.virtualenvs/bleeding/lib/python3.11/site-packages/skimage/io/_io.py:53: in imread\r\n img = call_plugin('imread', fname, plugin=plugin, **plugin_args)\r\n../../../../.virtualenvs/bleeding/lib/python3.11/site-packages/skimage/io/manage_plugins.py:207: in call_plugin\r\n return func(*args, **kwargs)\r\n../../../../.virtualenvs/bleeding/lib/python3.11/site-packages/skimage/io/_plugins/imageio_plugin.py:10: in imread\r\n return np.asarray(imageio_imread(*args, **kwargs))\r\n_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ \r\n\r\nuri = '/home/tcaswell/source/bnl/soft-matter/pims/pims/tests/data/image_sequence3d/file001.png', format = None, kwargs = {}\r\n\r\n def imread(uri, format=None, **kwargs):\r\n \"\"\"imread(uri, format=None, **kwargs)\r\n \r\n Reads an image from the specified file. Returns a numpy array, which\r\n comes with a dict of meta data at its 'meta' attribute.\r\n \r\n Note that the image data is returned as-is, and may not always have\r\n a dtype of uint8 (and thus may differ from what e.g. PIL returns).\r\n \r\n Parameters\r\n ----------\r\n uri : {str, pathlib.Path, bytes, file}\r\n The resource to load the image from, e.g. a filename, pathlib.Path,\r\n http address or file object, see the docs for more info.\r\n format : str\r\n The format to use to read the file. By default imageio selects\r\n the appropriate for you based on the filename and its contents.\r\n kwargs : ...\r\n Further keyword arguments are passed to the reader. See :func:`.help`\r\n to see what arguments are available for a particular format.\r\n \"\"\"\r\n \r\n> warnings.warn(\r\n \"Starting with ImageIO v3 the behavior of this function will switch to that of\"\r\n \" iio.v3.imread. To keep the current behavior (and make this warning dissapear)\"\r\n \" use `import imageio.v2 as imageio` or call `imageio.v2.imread` directly.\",\r\n DeprecationWarning,\r\n )\r\nE DeprecationWarning: Starting with ImageIO v3 the behavior of this function will switch to that of iio.v3.imread. To keep the current behavior (and make this warning dissapear) use `import imageio.v2 as imageio` or call `imageio.v2.imread` directly.\r\n\r\n../../../../.virtualenvs/bleeding/lib/python3.11/site-packages/imageio/__init__.py:89: DeprecationWarning\r\n\r\n```\n", "before_files": [{"content": "__all__ = ['imread', 'imsave']\n\nfrom functools import wraps\nimport numpy as np\nfrom imageio import imread as imageio_imread, imsave\n\n\n@wraps(imageio_imread)\ndef imread(*args, **kwargs):\n return np.asarray(imageio_imread(*args, **kwargs))\n", "path": "skimage/io/_plugins/imageio_plugin.py"}]}
1,634
165
gh_patches_debug_9072
rasdani/github-patches
git_diff
RedHatInsights__insights-core-1524
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Ensure current working directory is on the python path with insights-run `insights-run` can't invoke arbitrary scripts because it doesn't include the current working directory in the python path like `python -m insights` does. </issue> <code> [start of insights/__init__.py] 1 from __future__ import print_function 2 import logging 3 import pkgutil 4 import os 5 import yaml 6 from .core import Scannable, LogFileOutput, Parser, IniConfigFile # noqa: F401 7 from .core import FileListing, LegacyItemAccess, SysconfigOptions # noqa: F401 8 from .core import YAMLParser, JSONParser, XMLParser, CommandParser # noqa: F401 9 from .core import AttributeDict # noqa: F401 10 from .core import Syslog # noqa: F401 11 from .core.archives import COMPRESSION_TYPES, extract # noqa: F401 12 from .core import dr # noqa: F401 13 from .core.context import ClusterArchiveContext, HostContext, HostArchiveContext # noqa: F401 14 from .core.dr import SkipComponent # noqa: F401 15 from .core.hydration import create_context 16 from .core.plugins import combiner, fact, metadata, parser, rule # noqa: F401 17 from .core.plugins import datasource, condition, incident # noqa: F401 18 from .core.plugins import make_response, make_metadata, make_fingerprint # noqa: F401 19 from .core.plugins import make_pass, make_fail # noqa: F401 20 from .core.filters import add_filter, apply_filters, get_filters # noqa: F401 21 from .formats import get_formatter 22 from .parsers import get_active_lines # noqa: F401 23 from .util import defaults # noqa: F401 24 25 log = logging.getLogger(__name__) 26 27 28 package_info = dict((k, None) for k in ["RELEASE", "COMMIT", "VERSION", "NAME"]) 29 30 31 for name in package_info: 32 package_info[name] = pkgutil.get_data(__name__, name).strip().decode("utf-8") 33 34 35 def get_nvr(): 36 return "{0}-{1}-{2}".format(package_info["NAME"], 37 package_info["VERSION"], 38 package_info["RELEASE"]) 39 40 41 RULES_STATUS = {} 42 """ 43 Mapping of dictionaries containing nvr and commitid for each rule repo included 44 in this instance 45 46 {"rule_repo_1": {"version": nvr(), "commit": sha1}} 47 """ 48 49 50 def add_status(name, nvr, commit): 51 """ 52 Rule repositories should call this method in their package __init__ to 53 register their version information. 54 """ 55 RULES_STATUS[name] = {"version": nvr, "commit": commit} 56 57 58 def process_dir(broker, root, graph, context, use_pandas=False): 59 ctx = create_context(root, context) 60 log.debug("Processing %s with %s" % (root, ctx)) 61 62 if isinstance(ctx, ClusterArchiveContext): 63 from .core.cluster import process_cluster 64 archives = [f for f in ctx.all_files if f.endswith(COMPRESSION_TYPES)] 65 return process_cluster(archives, use_pandas=use_pandas, broker=broker) 66 67 broker[ctx.__class__] = ctx 68 broker = dr.run(graph, broker=broker) 69 return broker 70 71 72 def _run(broker, graph=None, root=None, context=None, use_pandas=False): 73 """ 74 run is a general interface that is meant for stand alone scripts to use 75 when executing insights components. 76 77 Args: 78 root (str): None will causes a host collection in which command and 79 file specs are run. A directory or archive path will cause 80 collection from the directory or archive, and only file type specs 81 or those that depend on `insights.core.context.HostArchiveContext` 82 will execute. 83 component (function or class): The component to execute. Will only execute 84 the component and its dependency graph. If None, all components with 85 met dependencies will execute. 86 87 Returns: 88 broker: object containing the result of the evaluation. 89 """ 90 91 if not root: 92 context = context or HostContext 93 broker[context] = context() 94 return dr.run(graph, broker=broker) 95 96 if os.path.isdir(root): 97 return process_dir(broker, root, graph, context, use_pandas) 98 else: 99 with extract(root) as ex: 100 return process_dir(broker, ex.tmp_dir, graph, context, use_pandas) 101 102 103 def apply_configs(configs): 104 """ 105 Configures components. They can be enabled or disabled, have timeouts set 106 if applicable, and have metadata customized. Valid keys are name, enabled, 107 metadata, and timeout. 108 109 Args: 110 configs (list): a list of dictionaries with the following keys: 111 name, enabled, metadata, and timeout. All keys are optional except 112 name. 113 114 name is the prefix or exact name of any loaded component. Any 115 component starting with name will have the associated configuration 116 applied. 117 118 enabled is whether the matching components will execute even if 119 their dependencies are met. Defaults to True. 120 121 timeout sets the class level timeout attribute of any component so 122 long as the attribute already exists. 123 124 metadata is any dictionary that you want to attach to the 125 component. The dictionary can be retrieved by the component at 126 runtime. 127 """ 128 delegate_keys = sorted(dr.DELEGATES, key=dr.get_name) 129 for comp_cfg in configs: 130 name = comp_cfg["name"] 131 for c in delegate_keys: 132 delegate = dr.DELEGATES[c] 133 cname = dr.get_name(c) 134 if cname.startswith(name): 135 dr.ENABLED[c] = comp_cfg.get("enabled", True) 136 delegate.metadata.update(comp_cfg.get("metadata", {})) 137 for k, v in delegate.metadata.items(): 138 if hasattr(c, k): 139 setattr(c, k, v) 140 if hasattr(c, "timeout"): 141 c.timeout = comp_cfg.get("timeout", c.timeout) 142 if cname == name: 143 break 144 145 146 def _load_context(path): 147 if path is None: 148 return 149 150 if "." not in path: 151 path = ".".join(["insights.core.context", path]) 152 return dr.get_component(path) 153 154 155 def run(component=None, root=None, print_summary=False, 156 context=None, use_pandas=False, 157 print_component=None): 158 159 from .core import dr 160 dr.load_components("insights.specs.default") 161 dr.load_components("insights.specs.insights_archive") 162 dr.load_components("insights.specs.sos_archive") 163 dr.load_components("insights.specs.jdr_archive") 164 165 args = None 166 formatter = None 167 if print_summary: 168 import argparse 169 import logging 170 p = argparse.ArgumentParser(add_help=False) 171 p.add_argument("archive", nargs="?", help="Archive or directory to analyze.") 172 p.add_argument("-p", "--plugins", default="", help="Comma-separated list without spaces of package(s) or module(s) containing plugins.") 173 p.add_argument("-c", "--config", help="Configure components.") 174 p.add_argument("-v", "--verbose", help="Verbose output.", action="store_true") 175 p.add_argument("-f", "--format", help="Output format.", default="insights.formats.text") 176 p.add_argument("-D", "--debug", help="Verbose debug output.", action="store_true") 177 p.add_argument("--context", help="Execution Context. Defaults to HostContext if an archive isn't passed.") 178 p.add_argument("--pandas", action="store_true", help="Use pandas dataframes with cluster rules.") 179 180 class Args(object): 181 pass 182 183 args = Args() 184 p.parse_known_args(namespace=args) 185 p = argparse.ArgumentParser(parents=[p]) 186 args.format = "insights.formats._json" if args.format == "json" else args.format 187 args.format = "insights.formats._yaml" if args.format == "yaml" else args.format 188 fmt = args.format if "." in args.format else "insights.formats." + args.format 189 Formatter = dr.get_component(fmt) 190 if not Formatter: 191 dr.load_components(fmt, continue_on_error=False) 192 Formatter = get_formatter(fmt) 193 Formatter.configure(p) 194 p.parse_args(namespace=args) 195 formatter = Formatter(args) 196 197 logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO if args.verbose else logging.ERROR) 198 context = _load_context(args.context) or context 199 use_pandas = args.pandas or use_pandas 200 201 root = args.archive or root 202 if root: 203 root = os.path.realpath(root) 204 205 plugins = [] 206 if args.plugins: 207 for path in args.plugins.split(","): 208 path = path.strip() 209 if path.endswith(".py"): 210 path, _ = os.path.splitext(path) 211 path = path.rstrip("/").replace("/", ".") 212 plugins.append(path) 213 214 for p in plugins: 215 dr.load_components(p, continue_on_error=False) 216 217 if args.config: 218 with open(args.config) as f: 219 apply_configs(yaml.load(f)) 220 221 if component is None: 222 component = [] 223 plugins = tuple(plugins) 224 for c in dr.DELEGATES: 225 if c.__module__.startswith(plugins): 226 component.append(c) 227 228 if component: 229 if not isinstance(component, (list, set)): 230 component = [component] 231 graph = {} 232 for c in component: 233 graph.update(dr.get_dependency_graph(c)) 234 else: 235 graph = dr.COMPONENTS[dr.GROUPS.single] 236 237 broker = dr.Broker() 238 239 if formatter: 240 formatter.preprocess(broker) 241 broker = _run(broker, graph, root, context=context, use_pandas=use_pandas) 242 formatter.postprocess(broker) 243 elif print_component: 244 broker = _run(broker, graph, root, context=context, use_pandas=use_pandas) 245 broker.print_component(print_component) 246 else: 247 broker = _run(broker, graph, root, context=context, use_pandas=use_pandas) 248 249 return broker 250 251 252 def main(): 253 run(print_summary=True) 254 255 256 if __name__ == "__main__": 257 main() 258 [end of insights/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/insights/__init__.py b/insights/__init__.py --- a/insights/__init__.py +++ b/insights/__init__.py @@ -2,6 +2,7 @@ import logging import pkgutil import os +import sys import yaml from .core import Scannable, LogFileOutput, Parser, IniConfigFile # noqa: F401 from .core import FileListing, LegacyItemAccess, SysconfigOptions # noqa: F401 @@ -250,6 +251,8 @@ def main(): + if "" not in sys.path: + sys.path.insert(0, "") run(print_summary=True)
{"golden_diff": "diff --git a/insights/__init__.py b/insights/__init__.py\n--- a/insights/__init__.py\n+++ b/insights/__init__.py\n@@ -2,6 +2,7 @@\n import logging\n import pkgutil\n import os\n+import sys\n import yaml\n from .core import Scannable, LogFileOutput, Parser, IniConfigFile # noqa: F401\n from .core import FileListing, LegacyItemAccess, SysconfigOptions # noqa: F401\n@@ -250,6 +251,8 @@\n \n \n def main():\n+ if \"\" not in sys.path:\n+ sys.path.insert(0, \"\")\n run(print_summary=True)\n", "issue": "Ensure current working directory is on the python path with insights-run\n`insights-run` can't invoke arbitrary scripts because it doesn't include the current working directory in the python path like `python -m insights` does.\n", "before_files": [{"content": "from __future__ import print_function\nimport logging\nimport pkgutil\nimport os\nimport yaml\nfrom .core import Scannable, LogFileOutput, Parser, IniConfigFile # noqa: F401\nfrom .core import FileListing, LegacyItemAccess, SysconfigOptions # noqa: F401\nfrom .core import YAMLParser, JSONParser, XMLParser, CommandParser # noqa: F401\nfrom .core import AttributeDict # noqa: F401\nfrom .core import Syslog # noqa: F401\nfrom .core.archives import COMPRESSION_TYPES, extract # noqa: F401\nfrom .core import dr # noqa: F401\nfrom .core.context import ClusterArchiveContext, HostContext, HostArchiveContext # noqa: F401\nfrom .core.dr import SkipComponent # noqa: F401\nfrom .core.hydration import create_context\nfrom .core.plugins import combiner, fact, metadata, parser, rule # noqa: F401\nfrom .core.plugins import datasource, condition, incident # noqa: F401\nfrom .core.plugins import make_response, make_metadata, make_fingerprint # noqa: F401\nfrom .core.plugins import make_pass, make_fail # noqa: F401\nfrom .core.filters import add_filter, apply_filters, get_filters # noqa: F401\nfrom .formats import get_formatter\nfrom .parsers import get_active_lines # noqa: F401\nfrom .util import defaults # noqa: F401\n\nlog = logging.getLogger(__name__)\n\n\npackage_info = dict((k, None) for k in [\"RELEASE\", \"COMMIT\", \"VERSION\", \"NAME\"])\n\n\nfor name in package_info:\n package_info[name] = pkgutil.get_data(__name__, name).strip().decode(\"utf-8\")\n\n\ndef get_nvr():\n return \"{0}-{1}-{2}\".format(package_info[\"NAME\"],\n package_info[\"VERSION\"],\n package_info[\"RELEASE\"])\n\n\nRULES_STATUS = {}\n\"\"\"\nMapping of dictionaries containing nvr and commitid for each rule repo included\nin this instance\n\n{\"rule_repo_1\": {\"version\": nvr(), \"commit\": sha1}}\n\"\"\"\n\n\ndef add_status(name, nvr, commit):\n \"\"\"\n Rule repositories should call this method in their package __init__ to\n register their version information.\n \"\"\"\n RULES_STATUS[name] = {\"version\": nvr, \"commit\": commit}\n\n\ndef process_dir(broker, root, graph, context, use_pandas=False):\n ctx = create_context(root, context)\n log.debug(\"Processing %s with %s\" % (root, ctx))\n\n if isinstance(ctx, ClusterArchiveContext):\n from .core.cluster import process_cluster\n archives = [f for f in ctx.all_files if f.endswith(COMPRESSION_TYPES)]\n return process_cluster(archives, use_pandas=use_pandas, broker=broker)\n\n broker[ctx.__class__] = ctx\n broker = dr.run(graph, broker=broker)\n return broker\n\n\ndef _run(broker, graph=None, root=None, context=None, use_pandas=False):\n \"\"\"\n run is a general interface that is meant for stand alone scripts to use\n when executing insights components.\n\n Args:\n root (str): None will causes a host collection in which command and\n file specs are run. A directory or archive path will cause\n collection from the directory or archive, and only file type specs\n or those that depend on `insights.core.context.HostArchiveContext`\n will execute.\n component (function or class): The component to execute. Will only execute\n the component and its dependency graph. If None, all components with\n met dependencies will execute.\n\n Returns:\n broker: object containing the result of the evaluation.\n \"\"\"\n\n if not root:\n context = context or HostContext\n broker[context] = context()\n return dr.run(graph, broker=broker)\n\n if os.path.isdir(root):\n return process_dir(broker, root, graph, context, use_pandas)\n else:\n with extract(root) as ex:\n return process_dir(broker, ex.tmp_dir, graph, context, use_pandas)\n\n\ndef apply_configs(configs):\n \"\"\"\n Configures components. They can be enabled or disabled, have timeouts set\n if applicable, and have metadata customized. Valid keys are name, enabled,\n metadata, and timeout.\n\n Args:\n configs (list): a list of dictionaries with the following keys:\n name, enabled, metadata, and timeout. All keys are optional except\n name.\n\n name is the prefix or exact name of any loaded component. Any\n component starting with name will have the associated configuration\n applied.\n\n enabled is whether the matching components will execute even if\n their dependencies are met. Defaults to True.\n\n timeout sets the class level timeout attribute of any component so\n long as the attribute already exists.\n\n metadata is any dictionary that you want to attach to the\n component. The dictionary can be retrieved by the component at\n runtime.\n \"\"\"\n delegate_keys = sorted(dr.DELEGATES, key=dr.get_name)\n for comp_cfg in configs:\n name = comp_cfg[\"name\"]\n for c in delegate_keys:\n delegate = dr.DELEGATES[c]\n cname = dr.get_name(c)\n if cname.startswith(name):\n dr.ENABLED[c] = comp_cfg.get(\"enabled\", True)\n delegate.metadata.update(comp_cfg.get(\"metadata\", {}))\n for k, v in delegate.metadata.items():\n if hasattr(c, k):\n setattr(c, k, v)\n if hasattr(c, \"timeout\"):\n c.timeout = comp_cfg.get(\"timeout\", c.timeout)\n if cname == name:\n break\n\n\ndef _load_context(path):\n if path is None:\n return\n\n if \".\" not in path:\n path = \".\".join([\"insights.core.context\", path])\n return dr.get_component(path)\n\n\ndef run(component=None, root=None, print_summary=False,\n context=None, use_pandas=False,\n print_component=None):\n\n from .core import dr\n dr.load_components(\"insights.specs.default\")\n dr.load_components(\"insights.specs.insights_archive\")\n dr.load_components(\"insights.specs.sos_archive\")\n dr.load_components(\"insights.specs.jdr_archive\")\n\n args = None\n formatter = None\n if print_summary:\n import argparse\n import logging\n p = argparse.ArgumentParser(add_help=False)\n p.add_argument(\"archive\", nargs=\"?\", help=\"Archive or directory to analyze.\")\n p.add_argument(\"-p\", \"--plugins\", default=\"\", help=\"Comma-separated list without spaces of package(s) or module(s) containing plugins.\")\n p.add_argument(\"-c\", \"--config\", help=\"Configure components.\")\n p.add_argument(\"-v\", \"--verbose\", help=\"Verbose output.\", action=\"store_true\")\n p.add_argument(\"-f\", \"--format\", help=\"Output format.\", default=\"insights.formats.text\")\n p.add_argument(\"-D\", \"--debug\", help=\"Verbose debug output.\", action=\"store_true\")\n p.add_argument(\"--context\", help=\"Execution Context. Defaults to HostContext if an archive isn't passed.\")\n p.add_argument(\"--pandas\", action=\"store_true\", help=\"Use pandas dataframes with cluster rules.\")\n\n class Args(object):\n pass\n\n args = Args()\n p.parse_known_args(namespace=args)\n p = argparse.ArgumentParser(parents=[p])\n args.format = \"insights.formats._json\" if args.format == \"json\" else args.format\n args.format = \"insights.formats._yaml\" if args.format == \"yaml\" else args.format\n fmt = args.format if \".\" in args.format else \"insights.formats.\" + args.format\n Formatter = dr.get_component(fmt)\n if not Formatter:\n dr.load_components(fmt, continue_on_error=False)\n Formatter = get_formatter(fmt)\n Formatter.configure(p)\n p.parse_args(namespace=args)\n formatter = Formatter(args)\n\n logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO if args.verbose else logging.ERROR)\n context = _load_context(args.context) or context\n use_pandas = args.pandas or use_pandas\n\n root = args.archive or root\n if root:\n root = os.path.realpath(root)\n\n plugins = []\n if args.plugins:\n for path in args.plugins.split(\",\"):\n path = path.strip()\n if path.endswith(\".py\"):\n path, _ = os.path.splitext(path)\n path = path.rstrip(\"/\").replace(\"/\", \".\")\n plugins.append(path)\n\n for p in plugins:\n dr.load_components(p, continue_on_error=False)\n\n if args.config:\n with open(args.config) as f:\n apply_configs(yaml.load(f))\n\n if component is None:\n component = []\n plugins = tuple(plugins)\n for c in dr.DELEGATES:\n if c.__module__.startswith(plugins):\n component.append(c)\n\n if component:\n if not isinstance(component, (list, set)):\n component = [component]\n graph = {}\n for c in component:\n graph.update(dr.get_dependency_graph(c))\n else:\n graph = dr.COMPONENTS[dr.GROUPS.single]\n\n broker = dr.Broker()\n\n if formatter:\n formatter.preprocess(broker)\n broker = _run(broker, graph, root, context=context, use_pandas=use_pandas)\n formatter.postprocess(broker)\n elif print_component:\n broker = _run(broker, graph, root, context=context, use_pandas=use_pandas)\n broker.print_component(print_component)\n else:\n broker = _run(broker, graph, root, context=context, use_pandas=use_pandas)\n\n return broker\n\n\ndef main():\n run(print_summary=True)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "insights/__init__.py"}]}
3,411
158
gh_patches_debug_16306
rasdani/github-patches
git_diff
ARM-DOE__ACT-653
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Example Incorrect ### Description This example says it's for plotting sounding data but the example uses MET data. We should update this to be what it was originally intended. https://arm-doe.github.io/ACT/source/auto_examples/plotting/plot_sonde.html </issue> <code> [start of examples/plotting/plot_sonde.py] 1 """ 2 Plot a timeseries of sounding data 3 ---------------------------------------------------- 4 5 This is a simple example for how to plot a timeseries of sounding 6 data from the ARM SGP site. 7 8 Author: Robert Jackson 9 """ 10 11 from matplotlib import pyplot as plt 12 13 import act 14 15 files = act.tests.sample_files.EXAMPLE_MET_WILDCARD 16 met_ds = act.io.armfiles.read_netcdf(files) 17 print(met_ds) 18 met_temp = met_ds.temp_mean 19 met_rh = met_ds.rh_mean 20 met_lcl = (20.0 + met_temp / 5.0) * (100.0 - met_rh) / 1000.0 21 met_ds['met_lcl'] = met_lcl * 1000.0 22 met_ds['met_lcl'].attrs['units'] = 'm' 23 met_ds['met_lcl'].attrs['long_name'] = 'LCL Calculated from SGP MET E13' 24 25 # Plot data 26 display = act.plotting.TimeSeriesDisplay(met_ds) 27 display.add_subplots((3,), figsize=(15, 10)) 28 display.plot('wspd_vec_mean', subplot_index=(0,)) 29 display.plot('temp_mean', subplot_index=(1,)) 30 display.plot('rh_mean', subplot_index=(2,)) 31 plt.show() 32 [end of examples/plotting/plot_sonde.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/examples/plotting/plot_sonde.py b/examples/plotting/plot_sonde.py --- a/examples/plotting/plot_sonde.py +++ b/examples/plotting/plot_sonde.py @@ -12,20 +12,14 @@ import act -files = act.tests.sample_files.EXAMPLE_MET_WILDCARD -met_ds = act.io.armfiles.read_netcdf(files) -print(met_ds) -met_temp = met_ds.temp_mean -met_rh = met_ds.rh_mean -met_lcl = (20.0 + met_temp / 5.0) * (100.0 - met_rh) / 1000.0 -met_ds['met_lcl'] = met_lcl * 1000.0 -met_ds['met_lcl'].attrs['units'] = 'm' -met_ds['met_lcl'].attrs['long_name'] = 'LCL Calculated from SGP MET E13' +files = act.tests.sample_files.EXAMPLE_SONDE1 +sonde_ds = act.io.armfiles.read_netcdf(files) +print(sonde_ds) # Plot data -display = act.plotting.TimeSeriesDisplay(met_ds) +display = act.plotting.TimeSeriesDisplay(sonde_ds) display.add_subplots((3,), figsize=(15, 10)) -display.plot('wspd_vec_mean', subplot_index=(0,)) -display.plot('temp_mean', subplot_index=(1,)) -display.plot('rh_mean', subplot_index=(2,)) +display.plot('wspd', subplot_index=(0,)) +display.plot('tdry', subplot_index=(1,)) +display.plot('rh', subplot_index=(2,)) plt.show()
{"golden_diff": "diff --git a/examples/plotting/plot_sonde.py b/examples/plotting/plot_sonde.py\n--- a/examples/plotting/plot_sonde.py\n+++ b/examples/plotting/plot_sonde.py\n@@ -12,20 +12,14 @@\n \n import act\n \n-files = act.tests.sample_files.EXAMPLE_MET_WILDCARD\n-met_ds = act.io.armfiles.read_netcdf(files)\n-print(met_ds)\n-met_temp = met_ds.temp_mean\n-met_rh = met_ds.rh_mean\n-met_lcl = (20.0 + met_temp / 5.0) * (100.0 - met_rh) / 1000.0\n-met_ds['met_lcl'] = met_lcl * 1000.0\n-met_ds['met_lcl'].attrs['units'] = 'm'\n-met_ds['met_lcl'].attrs['long_name'] = 'LCL Calculated from SGP MET E13'\n+files = act.tests.sample_files.EXAMPLE_SONDE1\n+sonde_ds = act.io.armfiles.read_netcdf(files)\n+print(sonde_ds)\n \n # Plot data\n-display = act.plotting.TimeSeriesDisplay(met_ds)\n+display = act.plotting.TimeSeriesDisplay(sonde_ds)\n display.add_subplots((3,), figsize=(15, 10))\n-display.plot('wspd_vec_mean', subplot_index=(0,))\n-display.plot('temp_mean', subplot_index=(1,))\n-display.plot('rh_mean', subplot_index=(2,))\n+display.plot('wspd', subplot_index=(0,))\n+display.plot('tdry', subplot_index=(1,))\n+display.plot('rh', subplot_index=(2,))\n plt.show()\n", "issue": "Example Incorrect\n### Description\r\n\r\nThis example says it's for plotting sounding data but the example uses MET data. We should update this to be what it was originally intended.\r\nhttps://arm-doe.github.io/ACT/source/auto_examples/plotting/plot_sonde.html\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nPlot a timeseries of sounding data\n----------------------------------------------------\n\nThis is a simple example for how to plot a timeseries of sounding\ndata from the ARM SGP site.\n\nAuthor: Robert Jackson\n\"\"\"\n\nfrom matplotlib import pyplot as plt\n\nimport act\n\nfiles = act.tests.sample_files.EXAMPLE_MET_WILDCARD\nmet_ds = act.io.armfiles.read_netcdf(files)\nprint(met_ds)\nmet_temp = met_ds.temp_mean\nmet_rh = met_ds.rh_mean\nmet_lcl = (20.0 + met_temp / 5.0) * (100.0 - met_rh) / 1000.0\nmet_ds['met_lcl'] = met_lcl * 1000.0\nmet_ds['met_lcl'].attrs['units'] = 'm'\nmet_ds['met_lcl'].attrs['long_name'] = 'LCL Calculated from SGP MET E13'\n\n# Plot data\ndisplay = act.plotting.TimeSeriesDisplay(met_ds)\ndisplay.add_subplots((3,), figsize=(15, 10))\ndisplay.plot('wspd_vec_mean', subplot_index=(0,))\ndisplay.plot('temp_mean', subplot_index=(1,))\ndisplay.plot('rh_mean', subplot_index=(2,))\nplt.show()\n", "path": "examples/plotting/plot_sonde.py"}]}
932
379
gh_patches_debug_32364
rasdani/github-patches
git_diff
pwndbg__pwndbg-1158
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> nextproginstr doesn't stop when reaching the program instructions The `nextproginstr` command should stop when it reaches an instruction in the binary, but it seems to reach those instructions and continue executing instead of breaking. </issue> <code> [start of pwndbg/proc.py] 1 """ 2 Provides values which would be available from /proc which 3 are not fulfilled by other modules and some process/gdb flow 4 related information. 5 """ 6 7 import functools 8 import sys 9 from types import ModuleType 10 11 import gdb 12 13 import pwndbg.gdblib.qemu 14 import pwndbg.lib.memoize 15 16 17 class module(ModuleType): 18 @property 19 def pid(self): 20 # QEMU usermode emualtion always returns 42000 for some reason. 21 # In any case, we can't use the info. 22 if pwndbg.gdblib.qemu.is_qemu_usermode(): 23 return pwndbg.gdblib.qemu.pid() 24 25 i = gdb.selected_inferior() 26 if i is not None: 27 return i.pid 28 return 0 29 30 @property 31 def tid(self): 32 if pwndbg.gdblib.qemu.is_qemu_usermode(): 33 return pwndbg.gdblib.qemu.pid() 34 35 i = gdb.selected_thread() 36 if i is not None: 37 return i.ptid[1] 38 39 return self.pid 40 41 @property 42 def alive(self): 43 return gdb.selected_thread() is not None 44 45 @property 46 def thread_is_stopped(self): 47 """ 48 This detects whether selected thread is stopped. 49 It is not stopped in situations when gdb is executing commands 50 that are attached to a breakpoint by `command` command. 51 52 For more info see issue #229 ( https://github.com/pwndbg/pwndbg/issues/299 ) 53 :return: Whether gdb executes commands attached to bp with `command` command. 54 """ 55 return gdb.selected_thread().is_stopped() 56 57 @property 58 def exe(self): 59 """ 60 Returns the debugged file name. 61 62 On remote targets, this may be prefixed with "target:" string. 63 See this by executing those in two terminals: 64 1. gdbserver 127.0.0.1:1234 /bin/ls 65 2. gdb -ex "target remote :1234" -ex "pi pwndbg.proc.exe" 66 67 If you need to process the debugged file use: 68 `pwndbg.file.get_file(pwndbg.proc.exe)` 69 """ 70 return gdb.current_progspace().filename 71 72 @property 73 def mem_page(self): 74 return next(p for p in pwndbg.vmmap.get() if p.objfile == self.exe) 75 76 def OnlyWhenRunning(self, func): 77 @functools.wraps(func) 78 def wrapper(*a, **kw): 79 if self.alive: 80 return func(*a, **kw) 81 82 return wrapper 83 84 85 # To prevent garbage collection 86 tether = sys.modules[__name__] 87 88 sys.modules[__name__] = module(__name__, "") 89 [end of pwndbg/proc.py] [start of pwndbg/commands/next.py] 1 """ 2 Stepping until an event occurs 3 """ 4 5 import argparse 6 7 import gdb 8 9 import pwndbg.commands 10 import pwndbg.gdblib.next 11 12 13 @pwndbg.commands.ArgparsedCommand("Breaks at the next jump instruction.", aliases=["nextjump"]) 14 @pwndbg.commands.OnlyWhenRunning 15 def nextjmp(): 16 """Breaks at the next jump instruction""" 17 if pwndbg.gdblib.next.break_next_branch(): 18 pwndbg.commands.context.context() 19 20 21 parser = argparse.ArgumentParser(description="""Breaks at the next call instruction""") 22 parser.add_argument( 23 "symbol_regex", 24 type=str, 25 default=None, 26 nargs="?", 27 help="A regex matching the name of next symbol to be broken on before calling.", 28 ) 29 30 31 @pwndbg.commands.ArgparsedCommand(parser) 32 @pwndbg.commands.OnlyWhenRunning 33 def nextcall(symbol_regex=None): 34 """Breaks at the next call instruction""" 35 if pwndbg.gdblib.next.break_next_call(symbol_regex): 36 pwndbg.commands.context.context() 37 38 39 @pwndbg.commands.ArgparsedCommand("""Breaks at next return-like instruction""") 40 @pwndbg.commands.OnlyWhenRunning 41 def nextret(): 42 """Breaks at next return-like instruction""" 43 if pwndbg.gdblib.next.break_next_ret(): 44 pwndbg.commands.context.context() 45 46 47 @pwndbg.commands.ArgparsedCommand("""Breaks at next return-like instruction by 'stepping' to it""") 48 @pwndbg.commands.OnlyWhenRunning 49 def stepret(): 50 """Breaks at next return-like instruction by 'stepping' to it""" 51 while ( 52 pwndbg.proc.alive 53 and not pwndbg.gdblib.next.break_next_ret() 54 and pwndbg.gdblib.next.break_next_branch() 55 ): 56 # Here we are e.g. on a CALL instruction (temporarily breakpointed by `break_next_branch`) 57 # We need to step so that we take this branch instead of ignoring it 58 gdb.execute("si") 59 continue 60 61 if pwndbg.proc.alive: 62 pwndbg.commands.context.context() 63 64 65 @pwndbg.commands.ArgparsedCommand( 66 """Breaks at the next instruction that belongs to the running program""" 67 ) 68 @pwndbg.commands.OnlyWhenRunning 69 def nextproginstr(): 70 """Breaks at the next instruction that belongs to the running program""" 71 if pwndbg.gdblib.next.break_on_program_code(): 72 pwndbg.commands.context.context() 73 74 75 parser = argparse.ArgumentParser( 76 description="""Sets a breakpoint on the instruction after this one""" 77 ) 78 parser.add_argument("addr", type=int, default=None, nargs="?", help="The address to break after.") 79 80 81 @pwndbg.commands.ArgparsedCommand(parser, aliases=["so"]) 82 @pwndbg.commands.OnlyWhenRunning 83 def stepover(addr=None): 84 """Sets a breakpoint on the instruction after this one""" 85 pwndbg.gdblib.next.break_on_next(addr) 86 87 88 @pwndbg.commands.ArgparsedCommand( 89 "Breaks at the next syscall not taking branches.", aliases=["nextsc"] 90 ) 91 @pwndbg.commands.OnlyWhenRunning 92 def nextsyscall(): 93 """ 94 Breaks at the next syscall not taking branches. 95 """ 96 while ( 97 pwndbg.proc.alive 98 and not pwndbg.gdblib.next.break_next_interrupt() 99 and pwndbg.gdblib.next.break_next_branch() 100 ): 101 continue 102 103 if pwndbg.proc.alive: 104 pwndbg.commands.context.context() 105 106 107 @pwndbg.commands.ArgparsedCommand( 108 "Breaks at the next syscall by taking branches.", aliases=["stepsc"] 109 ) 110 @pwndbg.commands.OnlyWhenRunning 111 def stepsyscall(): 112 """ 113 Breaks at the next syscall by taking branches. 114 """ 115 while ( 116 pwndbg.proc.alive 117 and not pwndbg.gdblib.next.break_next_interrupt() 118 and pwndbg.gdblib.next.break_next_branch() 119 ): 120 # Here we are e.g. on a CALL instruction (temporarily breakpointed by `break_next_branch`) 121 # We need to step so that we take this branch instead of ignoring it 122 gdb.execute("si") 123 continue 124 125 if pwndbg.proc.alive: 126 pwndbg.commands.context.context() 127 [end of pwndbg/commands/next.py] [start of pwndbg/gdblib/next.py] 1 """ 2 Commands for setting temporary breakpoints on the next 3 instruction of some type (call, branch, etc.) 4 """ 5 6 import re 7 8 import capstone 9 import gdb 10 11 import pwndbg.disasm 12 import pwndbg.gdblib.events 13 import pwndbg.gdblib.regs 14 import pwndbg.proc 15 from pwndbg.color import message 16 17 jumps = set((capstone.CS_GRP_CALL, capstone.CS_GRP_JUMP, capstone.CS_GRP_RET, capstone.CS_GRP_IRET)) 18 19 interrupts = set((capstone.CS_GRP_INT,)) 20 21 22 @pwndbg.gdblib.events.exit 23 def clear_temp_breaks(): 24 if not pwndbg.proc.alive: 25 breakpoints = gdb.breakpoints() 26 if breakpoints: 27 for bp in breakpoints: 28 if ( 29 bp.temporary and not bp.visible 30 ): # visible is used instead of internal because older gdb's don't support internal 31 bp.delete() 32 33 34 def next_int(address=None): 35 """ 36 If there is a syscall in the current basic black, 37 return the instruction of the one closest to $PC. 38 39 Otherwise, return None. 40 """ 41 if address is None: 42 ins = pwndbg.disasm.one(pwndbg.gdblib.regs.pc) 43 if not ins: 44 return None 45 address = ins.next 46 47 ins = pwndbg.disasm.one(address) 48 while ins: 49 if set(ins.groups) & jumps: 50 return None 51 if set(ins.groups) & interrupts: 52 return ins 53 ins = pwndbg.disasm.one(ins.next) 54 55 return None 56 57 58 def next_branch(address=None): 59 if address is None: 60 ins = pwndbg.disasm.one(pwndbg.gdblib.regs.pc) 61 if not ins: 62 return None 63 address = ins.next 64 65 ins = pwndbg.disasm.one(address) 66 while ins: 67 if set(ins.groups) & jumps: 68 return ins 69 ins = pwndbg.disasm.one(ins.next) 70 71 return None 72 73 74 def break_next_branch(address=None): 75 ins = next_branch(address) 76 77 if ins: 78 gdb.Breakpoint("*%#x" % ins.address, internal=True, temporary=True) 79 gdb.execute("continue", from_tty=False, to_string=True) 80 return ins 81 82 83 def break_next_interrupt(address=None): 84 ins = next_int(address) 85 86 if ins: 87 gdb.Breakpoint("*%#x" % ins.address, internal=True, temporary=True) 88 gdb.execute("continue", from_tty=False, to_string=True) 89 return ins 90 91 92 def break_next_call(symbol_regex=None): 93 while pwndbg.proc.alive: 94 ins = break_next_branch() 95 96 if not ins: 97 break 98 99 # continue if not a call 100 if capstone.CS_GRP_CALL not in ins.groups: 101 continue 102 103 # return call if we don't search for a symbol 104 if not symbol_regex: 105 return ins 106 107 # return call if we match target address 108 if ins.target_const and re.match("%s$" % symbol_regex, hex(ins.target)): 109 return ins 110 111 # return call if we match symbol name 112 if ins.symbol and re.match("%s$" % symbol_regex, ins.symbol): 113 return ins 114 115 116 def break_next_ret(address=None): 117 while pwndbg.proc.alive: 118 ins = break_next_branch(address) 119 120 if not ins: 121 break 122 123 if capstone.CS_GRP_RET in ins.groups: 124 return ins 125 126 127 def break_on_program_code(): 128 """ 129 Breaks on next instruction that belongs to process' objfile code. 130 :return: True for success, False when process ended or when pc is at the code. 131 """ 132 mp = pwndbg.proc.mem_page 133 start = mp.start 134 end = mp.end 135 136 if start <= pwndbg.gdblib.regs.pc < end: 137 print(message.error("The pc is already at the binary objfile code. Not stepping.")) 138 return False 139 140 while pwndbg.proc.alive: 141 gdb.execute("si", from_tty=False, to_string=False) 142 143 addr = pwndbg.gdblib.regs.pc 144 if start <= addr < end: 145 return True 146 147 return False 148 149 150 def break_on_next(address=None): 151 address = address or pwndbg.gdblib.regs.pc 152 ins = pwndbg.disasm.one(address) 153 154 gdb.Breakpoint("*%#x" % (ins.address + ins.size), temporary=True) 155 gdb.execute("continue", from_tty=False, to_string=True) 156 [end of pwndbg/gdblib/next.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pwndbg/commands/next.py b/pwndbg/commands/next.py --- a/pwndbg/commands/next.py +++ b/pwndbg/commands/next.py @@ -67,9 +67,7 @@ ) @pwndbg.commands.OnlyWhenRunning def nextproginstr(): - """Breaks at the next instruction that belongs to the running program""" - if pwndbg.gdblib.next.break_on_program_code(): - pwndbg.commands.context.context() + pwndbg.gdblib.next.break_on_program_code() parser = argparse.ArgumentParser( diff --git a/pwndbg/gdblib/next.py b/pwndbg/gdblib/next.py --- a/pwndbg/gdblib/next.py +++ b/pwndbg/gdblib/next.py @@ -129,20 +129,24 @@ Breaks on next instruction that belongs to process' objfile code. :return: True for success, False when process ended or when pc is at the code. """ - mp = pwndbg.proc.mem_page - start = mp.start - end = mp.end + exe = pwndbg.proc.exe + binary_exec_page_ranges = [ + (p.start, p.end) for p in pwndbg.vmmap.get() if p.objfile == exe and p.execute + ] - if start <= pwndbg.gdblib.regs.pc < end: - print(message.error("The pc is already at the binary objfile code. Not stepping.")) - return False + pc = pwndbg.gdblib.regs.pc + for start, end in binary_exec_page_ranges: + if start <= pc < end: + print(message.error("The pc is already at the binary objfile code. Not stepping.")) + return False while pwndbg.proc.alive: gdb.execute("si", from_tty=False, to_string=False) - addr = pwndbg.gdblib.regs.pc - if start <= addr < end: - return True + pc = pwndbg.gdblib.regs.pc + for start, end in binary_exec_page_ranges: + if start <= pc < end: + return True return False diff --git a/pwndbg/proc.py b/pwndbg/proc.py --- a/pwndbg/proc.py +++ b/pwndbg/proc.py @@ -69,10 +69,6 @@ """ return gdb.current_progspace().filename - @property - def mem_page(self): - return next(p for p in pwndbg.vmmap.get() if p.objfile == self.exe) - def OnlyWhenRunning(self, func): @functools.wraps(func) def wrapper(*a, **kw):
{"golden_diff": "diff --git a/pwndbg/commands/next.py b/pwndbg/commands/next.py\n--- a/pwndbg/commands/next.py\n+++ b/pwndbg/commands/next.py\n@@ -67,9 +67,7 @@\n )\n @pwndbg.commands.OnlyWhenRunning\n def nextproginstr():\n- \"\"\"Breaks at the next instruction that belongs to the running program\"\"\"\n- if pwndbg.gdblib.next.break_on_program_code():\n- pwndbg.commands.context.context()\n+ pwndbg.gdblib.next.break_on_program_code()\n \n \n parser = argparse.ArgumentParser(\ndiff --git a/pwndbg/gdblib/next.py b/pwndbg/gdblib/next.py\n--- a/pwndbg/gdblib/next.py\n+++ b/pwndbg/gdblib/next.py\n@@ -129,20 +129,24 @@\n Breaks on next instruction that belongs to process' objfile code.\n :return: True for success, False when process ended or when pc is at the code.\n \"\"\"\n- mp = pwndbg.proc.mem_page\n- start = mp.start\n- end = mp.end\n+ exe = pwndbg.proc.exe\n+ binary_exec_page_ranges = [\n+ (p.start, p.end) for p in pwndbg.vmmap.get() if p.objfile == exe and p.execute\n+ ]\n \n- if start <= pwndbg.gdblib.regs.pc < end:\n- print(message.error(\"The pc is already at the binary objfile code. Not stepping.\"))\n- return False\n+ pc = pwndbg.gdblib.regs.pc\n+ for start, end in binary_exec_page_ranges:\n+ if start <= pc < end:\n+ print(message.error(\"The pc is already at the binary objfile code. Not stepping.\"))\n+ return False\n \n while pwndbg.proc.alive:\n gdb.execute(\"si\", from_tty=False, to_string=False)\n \n- addr = pwndbg.gdblib.regs.pc\n- if start <= addr < end:\n- return True\n+ pc = pwndbg.gdblib.regs.pc\n+ for start, end in binary_exec_page_ranges:\n+ if start <= pc < end:\n+ return True\n \n return False\n \ndiff --git a/pwndbg/proc.py b/pwndbg/proc.py\n--- a/pwndbg/proc.py\n+++ b/pwndbg/proc.py\n@@ -69,10 +69,6 @@\n \"\"\"\n return gdb.current_progspace().filename\n \n- @property\n- def mem_page(self):\n- return next(p for p in pwndbg.vmmap.get() if p.objfile == self.exe)\n-\n def OnlyWhenRunning(self, func):\n @functools.wraps(func)\n def wrapper(*a, **kw):\n", "issue": "nextproginstr doesn't stop when reaching the program instructions\nThe `nextproginstr` command should stop when it reaches an instruction in the binary, but it seems to reach those instructions and continue executing instead of breaking.\n", "before_files": [{"content": "\"\"\"\nProvides values which would be available from /proc which\nare not fulfilled by other modules and some process/gdb flow\nrelated information.\n\"\"\"\n\nimport functools\nimport sys\nfrom types import ModuleType\n\nimport gdb\n\nimport pwndbg.gdblib.qemu\nimport pwndbg.lib.memoize\n\n\nclass module(ModuleType):\n @property\n def pid(self):\n # QEMU usermode emualtion always returns 42000 for some reason.\n # In any case, we can't use the info.\n if pwndbg.gdblib.qemu.is_qemu_usermode():\n return pwndbg.gdblib.qemu.pid()\n\n i = gdb.selected_inferior()\n if i is not None:\n return i.pid\n return 0\n\n @property\n def tid(self):\n if pwndbg.gdblib.qemu.is_qemu_usermode():\n return pwndbg.gdblib.qemu.pid()\n\n i = gdb.selected_thread()\n if i is not None:\n return i.ptid[1]\n\n return self.pid\n\n @property\n def alive(self):\n return gdb.selected_thread() is not None\n\n @property\n def thread_is_stopped(self):\n \"\"\"\n This detects whether selected thread is stopped.\n It is not stopped in situations when gdb is executing commands\n that are attached to a breakpoint by `command` command.\n\n For more info see issue #229 ( https://github.com/pwndbg/pwndbg/issues/299 )\n :return: Whether gdb executes commands attached to bp with `command` command.\n \"\"\"\n return gdb.selected_thread().is_stopped()\n\n @property\n def exe(self):\n \"\"\"\n Returns the debugged file name.\n\n On remote targets, this may be prefixed with \"target:\" string.\n See this by executing those in two terminals:\n 1. gdbserver 127.0.0.1:1234 /bin/ls\n 2. gdb -ex \"target remote :1234\" -ex \"pi pwndbg.proc.exe\"\n\n If you need to process the debugged file use:\n `pwndbg.file.get_file(pwndbg.proc.exe)`\n \"\"\"\n return gdb.current_progspace().filename\n\n @property\n def mem_page(self):\n return next(p for p in pwndbg.vmmap.get() if p.objfile == self.exe)\n\n def OnlyWhenRunning(self, func):\n @functools.wraps(func)\n def wrapper(*a, **kw):\n if self.alive:\n return func(*a, **kw)\n\n return wrapper\n\n\n# To prevent garbage collection\ntether = sys.modules[__name__]\n\nsys.modules[__name__] = module(__name__, \"\")\n", "path": "pwndbg/proc.py"}, {"content": "\"\"\"\nStepping until an event occurs\n\"\"\"\n\nimport argparse\n\nimport gdb\n\nimport pwndbg.commands\nimport pwndbg.gdblib.next\n\n\[email protected](\"Breaks at the next jump instruction.\", aliases=[\"nextjump\"])\[email protected]\ndef nextjmp():\n \"\"\"Breaks at the next jump instruction\"\"\"\n if pwndbg.gdblib.next.break_next_branch():\n pwndbg.commands.context.context()\n\n\nparser = argparse.ArgumentParser(description=\"\"\"Breaks at the next call instruction\"\"\")\nparser.add_argument(\n \"symbol_regex\",\n type=str,\n default=None,\n nargs=\"?\",\n help=\"A regex matching the name of next symbol to be broken on before calling.\",\n)\n\n\[email protected](parser)\[email protected]\ndef nextcall(symbol_regex=None):\n \"\"\"Breaks at the next call instruction\"\"\"\n if pwndbg.gdblib.next.break_next_call(symbol_regex):\n pwndbg.commands.context.context()\n\n\[email protected](\"\"\"Breaks at next return-like instruction\"\"\")\[email protected]\ndef nextret():\n \"\"\"Breaks at next return-like instruction\"\"\"\n if pwndbg.gdblib.next.break_next_ret():\n pwndbg.commands.context.context()\n\n\[email protected](\"\"\"Breaks at next return-like instruction by 'stepping' to it\"\"\")\[email protected]\ndef stepret():\n \"\"\"Breaks at next return-like instruction by 'stepping' to it\"\"\"\n while (\n pwndbg.proc.alive\n and not pwndbg.gdblib.next.break_next_ret()\n and pwndbg.gdblib.next.break_next_branch()\n ):\n # Here we are e.g. on a CALL instruction (temporarily breakpointed by `break_next_branch`)\n # We need to step so that we take this branch instead of ignoring it\n gdb.execute(\"si\")\n continue\n\n if pwndbg.proc.alive:\n pwndbg.commands.context.context()\n\n\[email protected](\n \"\"\"Breaks at the next instruction that belongs to the running program\"\"\"\n)\[email protected]\ndef nextproginstr():\n \"\"\"Breaks at the next instruction that belongs to the running program\"\"\"\n if pwndbg.gdblib.next.break_on_program_code():\n pwndbg.commands.context.context()\n\n\nparser = argparse.ArgumentParser(\n description=\"\"\"Sets a breakpoint on the instruction after this one\"\"\"\n)\nparser.add_argument(\"addr\", type=int, default=None, nargs=\"?\", help=\"The address to break after.\")\n\n\[email protected](parser, aliases=[\"so\"])\[email protected]\ndef stepover(addr=None):\n \"\"\"Sets a breakpoint on the instruction after this one\"\"\"\n pwndbg.gdblib.next.break_on_next(addr)\n\n\[email protected](\n \"Breaks at the next syscall not taking branches.\", aliases=[\"nextsc\"]\n)\[email protected]\ndef nextsyscall():\n \"\"\"\n Breaks at the next syscall not taking branches.\n \"\"\"\n while (\n pwndbg.proc.alive\n and not pwndbg.gdblib.next.break_next_interrupt()\n and pwndbg.gdblib.next.break_next_branch()\n ):\n continue\n\n if pwndbg.proc.alive:\n pwndbg.commands.context.context()\n\n\[email protected](\n \"Breaks at the next syscall by taking branches.\", aliases=[\"stepsc\"]\n)\[email protected]\ndef stepsyscall():\n \"\"\"\n Breaks at the next syscall by taking branches.\n \"\"\"\n while (\n pwndbg.proc.alive\n and not pwndbg.gdblib.next.break_next_interrupt()\n and pwndbg.gdblib.next.break_next_branch()\n ):\n # Here we are e.g. on a CALL instruction (temporarily breakpointed by `break_next_branch`)\n # We need to step so that we take this branch instead of ignoring it\n gdb.execute(\"si\")\n continue\n\n if pwndbg.proc.alive:\n pwndbg.commands.context.context()\n", "path": "pwndbg/commands/next.py"}, {"content": "\"\"\"\nCommands for setting temporary breakpoints on the next\ninstruction of some type (call, branch, etc.)\n\"\"\"\n\nimport re\n\nimport capstone\nimport gdb\n\nimport pwndbg.disasm\nimport pwndbg.gdblib.events\nimport pwndbg.gdblib.regs\nimport pwndbg.proc\nfrom pwndbg.color import message\n\njumps = set((capstone.CS_GRP_CALL, capstone.CS_GRP_JUMP, capstone.CS_GRP_RET, capstone.CS_GRP_IRET))\n\ninterrupts = set((capstone.CS_GRP_INT,))\n\n\[email protected]\ndef clear_temp_breaks():\n if not pwndbg.proc.alive:\n breakpoints = gdb.breakpoints()\n if breakpoints:\n for bp in breakpoints:\n if (\n bp.temporary and not bp.visible\n ): # visible is used instead of internal because older gdb's don't support internal\n bp.delete()\n\n\ndef next_int(address=None):\n \"\"\"\n If there is a syscall in the current basic black,\n return the instruction of the one closest to $PC.\n\n Otherwise, return None.\n \"\"\"\n if address is None:\n ins = pwndbg.disasm.one(pwndbg.gdblib.regs.pc)\n if not ins:\n return None\n address = ins.next\n\n ins = pwndbg.disasm.one(address)\n while ins:\n if set(ins.groups) & jumps:\n return None\n if set(ins.groups) & interrupts:\n return ins\n ins = pwndbg.disasm.one(ins.next)\n\n return None\n\n\ndef next_branch(address=None):\n if address is None:\n ins = pwndbg.disasm.one(pwndbg.gdblib.regs.pc)\n if not ins:\n return None\n address = ins.next\n\n ins = pwndbg.disasm.one(address)\n while ins:\n if set(ins.groups) & jumps:\n return ins\n ins = pwndbg.disasm.one(ins.next)\n\n return None\n\n\ndef break_next_branch(address=None):\n ins = next_branch(address)\n\n if ins:\n gdb.Breakpoint(\"*%#x\" % ins.address, internal=True, temporary=True)\n gdb.execute(\"continue\", from_tty=False, to_string=True)\n return ins\n\n\ndef break_next_interrupt(address=None):\n ins = next_int(address)\n\n if ins:\n gdb.Breakpoint(\"*%#x\" % ins.address, internal=True, temporary=True)\n gdb.execute(\"continue\", from_tty=False, to_string=True)\n return ins\n\n\ndef break_next_call(symbol_regex=None):\n while pwndbg.proc.alive:\n ins = break_next_branch()\n\n if not ins:\n break\n\n # continue if not a call\n if capstone.CS_GRP_CALL not in ins.groups:\n continue\n\n # return call if we don't search for a symbol\n if not symbol_regex:\n return ins\n\n # return call if we match target address\n if ins.target_const and re.match(\"%s$\" % symbol_regex, hex(ins.target)):\n return ins\n\n # return call if we match symbol name\n if ins.symbol and re.match(\"%s$\" % symbol_regex, ins.symbol):\n return ins\n\n\ndef break_next_ret(address=None):\n while pwndbg.proc.alive:\n ins = break_next_branch(address)\n\n if not ins:\n break\n\n if capstone.CS_GRP_RET in ins.groups:\n return ins\n\n\ndef break_on_program_code():\n \"\"\"\n Breaks on next instruction that belongs to process' objfile code.\n :return: True for success, False when process ended or when pc is at the code.\n \"\"\"\n mp = pwndbg.proc.mem_page\n start = mp.start\n end = mp.end\n\n if start <= pwndbg.gdblib.regs.pc < end:\n print(message.error(\"The pc is already at the binary objfile code. Not stepping.\"))\n return False\n\n while pwndbg.proc.alive:\n gdb.execute(\"si\", from_tty=False, to_string=False)\n\n addr = pwndbg.gdblib.regs.pc\n if start <= addr < end:\n return True\n\n return False\n\n\ndef break_on_next(address=None):\n address = address or pwndbg.gdblib.regs.pc\n ins = pwndbg.disasm.one(address)\n\n gdb.Breakpoint(\"*%#x\" % (ins.address + ins.size), temporary=True)\n gdb.execute(\"continue\", from_tty=False, to_string=True)\n", "path": "pwndbg/gdblib/next.py"}]}
3,955
635
gh_patches_debug_13026
rasdani/github-patches
git_diff
mampfes__hacs_waste_collection_schedule-561
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Specify local file path information in docs Neither - https://github.com/mampfes/hacs_waste_collection_schedule/blob/master/doc/faq.md nor - https://github.com/mampfes/hacs_waste_collection_schedule/blob/master/doc/source/ics.md#configuration-variables - https://github.com/mampfes/hacs_waste_collection_schedule/blob/master/doc/source/ics.md#local-file specify, where the file has to be stored. 1. Is "calendar.ics" meant to be the share `/config/calendar.ics`? 2. What would the `file:` parameter need to look like when the file is e. g. located in `/config/customfolder/calendar.ics` ? </issue> <code> [start of custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py] 1 import datetime 2 import logging 3 from pathlib import Path 4 5 import requests 6 from waste_collection_schedule import Collection # type: ignore[attr-defined] 7 from waste_collection_schedule.service.ICS import ICS 8 from waste_collection_schedule.service.ICS_v1 import ICS_v1 9 10 TITLE = "ICS" 11 DESCRIPTION = "Source for ICS based schedules." 12 URL = None 13 TEST_CASES = { 14 "Dortmund, Dudenstr. 5": { 15 "url": "https://www.edg.de/ical/kalender.ics?Strasse=Dudenstr.&Hausnummer=5&Erinnerung=-1&Abfallart=1,2,3,4" 16 }, 17 "Leipzig, Sandgrubenweg 27": { 18 "url": "https://stadtreinigung-leipzig.de/wir-kommen-zu-ihnen/abfallkalender/ical.ics?position_nos=38296&name=Sandgrubenweg%2027" 19 }, 20 "Ludwigsburg": { 21 "url": "https://www.avl-ludwigsburg.de/fileadmin/Files/Abfallkalender/ICS/Privat/Privat_{%Y}_Ossweil.ics" 22 }, 23 "Esslingen, Bahnhof": { 24 "url": "https://api.abfall.io/?kh=DaA02103019b46345f1998698563DaAd&t=ics&s=1a862df26f6943997cef90233877a4fe" 25 }, 26 "Test File": { 27 # Path is used here to allow to call the Source from any location. 28 # This is not required in a yaml configuration! 29 "file": str(Path(__file__).resolve().parents[1].joinpath("test/test.ics")) 30 }, 31 "Test File (recurring)": { 32 # Path is used here to allow to call the Source from any location. 33 # This is not required in a yaml configuration! 34 "file": str(Path(__file__).resolve().parents[1].joinpath("test/recurring.ics")) 35 }, 36 "München, Bahnstr. 11": { 37 "url": "https://www.awm-muenchen.de/entsorgen/abfuhrkalender?tx_awmabfuhrkalender_abfuhrkalender%5Bhausnummer%5D=11&tx_awmabfuhrkalender_abfuhrkalender%5Bleerungszyklus%5D%5BB%5D=1%2F2%3BU&tx_awmabfuhrkalender_abfuhrkalender%5Bleerungszyklus%5D%5BP%5D=1%2F2%3BG&tx_awmabfuhrkalender_abfuhrkalender%5Bleerungszyklus%5D%5BR%5D=001%3BU&tx_awmabfuhrkalender_abfuhrkalender%5Bsection%5D=ics&tx_awmabfuhrkalender_abfuhrkalender%5Bsinglestandplatz%5D=false&tx_awmabfuhrkalender_abfuhrkalender%5Bstandplatzwahl%5D=true&tx_awmabfuhrkalender_abfuhrkalender%5Bstellplatz%5D%5Bbio%5D=70024507&tx_awmabfuhrkalender_abfuhrkalender%5Bstellplatz%5D%5Bpapier%5D=70024507&tx_awmabfuhrkalender_abfuhrkalender%5Bstellplatz%5D%5Brestmuell%5D=70024507&tx_awmabfuhrkalender_abfuhrkalender%5Bstrasse%5D=bahnstr.&tx_awmabfuhrkalender_abfuhrkalender%5Byear%5D={%Y}", 38 "version": 1, 39 }, 40 "Buxtehude, Am Berg": { 41 "url": "https://abfall.landkreis-stade.de/api_v2/collection_dates/1/ort/10/strasse/90/hausnummern/1/abfallarten/R02-R04-B02-D04-D12-P04-R12-R14-W0-R22-R24-R31/kalender.ics" 42 }, 43 # "Hausmüllinfo: ASR Chemnitz": { 44 # "url": "https://asc.hausmuell.info/ics/ics.php", 45 # "method": "POST", 46 # "params": { 47 # "hidden_id_egebiet": 439087, 48 # "input_ort": "Chemnitz", 49 # "input_str": "Straße der Nationen", 50 # "input_hnr": 2, 51 # "hidden_send_btn": "ics", 52 # # "hiddenYear": 2021, 53 # "hidden_id_ort": 10, 54 # "hidden_id_ortsteil": 0, 55 # "hidden_id_str": 17814, 56 # "hidden_id_hnr": 5538100, 57 # "hidden_kalenderart": "privat", 58 # "showBinsBio": "on", 59 # "showBinsRest": "on", 60 # "showBinsRest_rc": "on", 61 # "showBinsPapier": "on", 62 # "showBinsOrganic": "on", 63 # "showBinsXmas": "on", 64 # "showBinsDsd": "on", 65 # "showBinsProb": "on", 66 # }, 67 # "year_field": "hiddenYear", 68 # }, 69 "Abfall Zollernalbkreis, Ebingen": { 70 "url": "https://www.abfallkalender-zak.de", 71 "params": { 72 "city": "2,3,4", 73 "street": "3", 74 "types[]": [ 75 "restmuell", 76 "gelbersack", 77 "papiertonne", 78 "biomuell", 79 "gruenabfall", 80 "schadstoffsammlung", 81 "altpapiersammlung", 82 "schrottsammlung", 83 "weihnachtsbaeume", 84 "elektrosammlung", 85 ], 86 "go_ics": "Download", 87 }, 88 "year_field": "year", 89 }, 90 "Detmold": { 91 "url": "https://abfuhrkalender.detmold.de/icsmaker.php", 92 "method": "GET", 93 "params": {"strid": 338}, 94 "year_field": "year", 95 }, 96 "EAW Rheingau Taunus": { 97 "url": "https://www.eaw-rheingau-taunus.de/abfallsammlung/abfuhrtermine/feed.ics?tx_vierwdeaw_garbagecalendarics%5Baction%5D=ics&tx_vierwdeaw_garbagecalendarics%5Bcontroller%5D=GarbageCalendar&tx_vierwdeaw_garbagecalendarics%5Bstreet%5D=38", 98 "split_at": ",", 99 }, 100 "Recollect, Ottawa": { 101 "url": "https://recollect.a.ssl.fastly.net/api/places/BCCDF30E-578B-11E4-AD38-5839C200407A/services/208/events.en.ics", 102 "split_at": "\\, [and ]*", 103 }, 104 "Frankfurt am Main, Achenbachstrasse 3": { 105 "url": "https://www.fes-frankfurt.de/abfallkalender/QWNoZW5iYWNoc3RyLnwzfDYwNTk2.ics" 106 }, 107 "Erlensee, Am Haspel": { 108 "url": "https://sperrmuell.erlensee.de/?type=reminder", 109 "method": "POST", 110 "params": { 111 "street": 8, 112 "eventType[]": [27, 23, 19, 20, 21, 24, 22, 25, 26], 113 "timeframe": 23, 114 "download": "ical", 115 }, 116 }, 117 } 118 119 120 HEADERS = {"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64)"} 121 _LOGGER = logging.getLogger(__name__) 122 123 124 class Source: 125 def __init__( 126 self, 127 url=None, 128 file=None, 129 offset=None, 130 params=None, 131 year_field=None, 132 method="GET", 133 regex=None, 134 split_at=None, 135 version=2, 136 verify_ssl=True, 137 ): 138 self._url = url 139 self._file = file 140 if bool(self._url is not None) == bool(self._file is not None): 141 raise RuntimeError("Specify either url or file") 142 if version == 1: 143 self._ics = ICS_v1(offset=offset, split_at=split_at, regex=regex) 144 else: 145 self._ics = ICS(offset=offset, split_at=split_at, regex=regex) 146 self._params = params 147 self._year_field = year_field # replace this field in params with current year 148 self._method = method # The method to send the params 149 self._verify_ssl = verify_ssl 150 151 def fetch(self): 152 if self._url is not None: 153 if "{%Y}" in self._url or self._year_field is not None: 154 # url contains wildcard or params contains year field 155 now = datetime.datetime.now() 156 157 # replace year in url 158 url = self._url.replace("{%Y}", str(now.year)) 159 160 # replace year in params 161 if self._year_field is not None: 162 if self._params is None: 163 raise RuntimeError("year_field specified without params") 164 self._params[self._year_field] = str(now.year) 165 166 entries = self.fetch_url(url, self._params) 167 168 if now.month == 12: 169 # also get data for next year if we are already in december 170 url = self._url.replace("{%Y}", str(now.year + 1)) 171 if self._year_field is not None: 172 self._params[self._year_field] = str(now.year + 1) 173 174 try: 175 entries.extend(self.fetch_url(url, self._params)) 176 except Exception: 177 # ignore if fetch for next year fails 178 pass 179 return entries 180 else: 181 return self.fetch_url(self._url, self._params) 182 elif self._file is not None: 183 return self.fetch_file(self._file) 184 185 def fetch_url(self, url, params=None): 186 # get ics file 187 if self._method == "GET": 188 r = requests.get( 189 url, params=params, headers=HEADERS, verify=self._verify_ssl 190 ) 191 elif self._method == "POST": 192 r = requests.post( 193 url, data=params, headers=HEADERS, verify=self._verify_ssl 194 ) 195 else: 196 raise RuntimeError( 197 "Error: unknown method to fetch URL, use GET or POST; got {self._method}" 198 ) 199 r.raise_for_status() 200 201 r.encoding = "utf-8" # requests doesn't guess the encoding correctly 202 return self._convert(r.text) 203 204 def fetch_file(self, file): 205 f = open(file) 206 return self._convert(f.read()) 207 208 def _convert(self, data): 209 dates = self._ics.convert(data) 210 211 entries = [] 212 for d in dates: 213 entries.append(Collection(d[0], d[1])) 214 return entries 215 [end of custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py --- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py +++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py @@ -1,5 +1,6 @@ import datetime import logging +from os import getcwd from pathlib import Path import requests @@ -202,7 +203,11 @@ return self._convert(r.text) def fetch_file(self, file): - f = open(file) + try: + f = open(file) + except FileNotFoundError as e: + _LOGGER.error(f"Working directory: '{getcwd()}'") + raise return self._convert(f.read()) def _convert(self, data):
{"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py\n@@ -1,5 +1,6 @@\n import datetime\n import logging\n+from os import getcwd\n from pathlib import Path\n \n import requests\n@@ -202,7 +203,11 @@\n return self._convert(r.text)\n \n def fetch_file(self, file):\n- f = open(file)\n+ try:\n+ f = open(file)\n+ except FileNotFoundError as e:\n+ _LOGGER.error(f\"Working directory: '{getcwd()}'\")\n+ raise\n return self._convert(f.read())\n \n def _convert(self, data):\n", "issue": "Specify local file path information in docs\nNeither\r\n- https://github.com/mampfes/hacs_waste_collection_schedule/blob/master/doc/faq.md\r\n\r\nnor\r\n- https://github.com/mampfes/hacs_waste_collection_schedule/blob/master/doc/source/ics.md#configuration-variables\r\n- https://github.com/mampfes/hacs_waste_collection_schedule/blob/master/doc/source/ics.md#local-file\r\n\r\nspecify, where the file has to be stored.\r\n\r\n1. Is \"calendar.ics\" meant to be the share `/config/calendar.ics`?\r\n2. What would the `file:` parameter need to look like when the file is e. g. located in `/config/customfolder/calendar.ics` ?\n", "before_files": [{"content": "import datetime\nimport logging\nfrom pathlib import Path\n\nimport requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\nfrom waste_collection_schedule.service.ICS import ICS\nfrom waste_collection_schedule.service.ICS_v1 import ICS_v1\n\nTITLE = \"ICS\"\nDESCRIPTION = \"Source for ICS based schedules.\"\nURL = None\nTEST_CASES = {\n \"Dortmund, Dudenstr. 5\": {\n \"url\": \"https://www.edg.de/ical/kalender.ics?Strasse=Dudenstr.&Hausnummer=5&Erinnerung=-1&Abfallart=1,2,3,4\"\n },\n \"Leipzig, Sandgrubenweg 27\": {\n \"url\": \"https://stadtreinigung-leipzig.de/wir-kommen-zu-ihnen/abfallkalender/ical.ics?position_nos=38296&name=Sandgrubenweg%2027\"\n },\n \"Ludwigsburg\": {\n \"url\": \"https://www.avl-ludwigsburg.de/fileadmin/Files/Abfallkalender/ICS/Privat/Privat_{%Y}_Ossweil.ics\"\n },\n \"Esslingen, Bahnhof\": {\n \"url\": \"https://api.abfall.io/?kh=DaA02103019b46345f1998698563DaAd&t=ics&s=1a862df26f6943997cef90233877a4fe\"\n },\n \"Test File\": {\n # Path is used here to allow to call the Source from any location.\n # This is not required in a yaml configuration!\n \"file\": str(Path(__file__).resolve().parents[1].joinpath(\"test/test.ics\"))\n },\n \"Test File (recurring)\": {\n # Path is used here to allow to call the Source from any location.\n # This is not required in a yaml configuration!\n \"file\": str(Path(__file__).resolve().parents[1].joinpath(\"test/recurring.ics\"))\n },\n \"M\u00fcnchen, Bahnstr. 11\": {\n \"url\": \"https://www.awm-muenchen.de/entsorgen/abfuhrkalender?tx_awmabfuhrkalender_abfuhrkalender%5Bhausnummer%5D=11&tx_awmabfuhrkalender_abfuhrkalender%5Bleerungszyklus%5D%5BB%5D=1%2F2%3BU&tx_awmabfuhrkalender_abfuhrkalender%5Bleerungszyklus%5D%5BP%5D=1%2F2%3BG&tx_awmabfuhrkalender_abfuhrkalender%5Bleerungszyklus%5D%5BR%5D=001%3BU&tx_awmabfuhrkalender_abfuhrkalender%5Bsection%5D=ics&tx_awmabfuhrkalender_abfuhrkalender%5Bsinglestandplatz%5D=false&tx_awmabfuhrkalender_abfuhrkalender%5Bstandplatzwahl%5D=true&tx_awmabfuhrkalender_abfuhrkalender%5Bstellplatz%5D%5Bbio%5D=70024507&tx_awmabfuhrkalender_abfuhrkalender%5Bstellplatz%5D%5Bpapier%5D=70024507&tx_awmabfuhrkalender_abfuhrkalender%5Bstellplatz%5D%5Brestmuell%5D=70024507&tx_awmabfuhrkalender_abfuhrkalender%5Bstrasse%5D=bahnstr.&tx_awmabfuhrkalender_abfuhrkalender%5Byear%5D={%Y}\",\n \"version\": 1,\n },\n \"Buxtehude, Am Berg\": {\n \"url\": \"https://abfall.landkreis-stade.de/api_v2/collection_dates/1/ort/10/strasse/90/hausnummern/1/abfallarten/R02-R04-B02-D04-D12-P04-R12-R14-W0-R22-R24-R31/kalender.ics\"\n },\n # \"Hausm\u00fcllinfo: ASR Chemnitz\": {\n # \"url\": \"https://asc.hausmuell.info/ics/ics.php\",\n # \"method\": \"POST\",\n # \"params\": {\n # \"hidden_id_egebiet\": 439087,\n # \"input_ort\": \"Chemnitz\",\n # \"input_str\": \"Stra\u00dfe der Nationen\",\n # \"input_hnr\": 2,\n # \"hidden_send_btn\": \"ics\",\n # # \"hiddenYear\": 2021,\n # \"hidden_id_ort\": 10,\n # \"hidden_id_ortsteil\": 0,\n # \"hidden_id_str\": 17814,\n # \"hidden_id_hnr\": 5538100,\n # \"hidden_kalenderart\": \"privat\",\n # \"showBinsBio\": \"on\",\n # \"showBinsRest\": \"on\",\n # \"showBinsRest_rc\": \"on\",\n # \"showBinsPapier\": \"on\",\n # \"showBinsOrganic\": \"on\",\n # \"showBinsXmas\": \"on\",\n # \"showBinsDsd\": \"on\",\n # \"showBinsProb\": \"on\",\n # },\n # \"year_field\": \"hiddenYear\",\n # },\n \"Abfall Zollernalbkreis, Ebingen\": {\n \"url\": \"https://www.abfallkalender-zak.de\",\n \"params\": {\n \"city\": \"2,3,4\",\n \"street\": \"3\",\n \"types[]\": [\n \"restmuell\",\n \"gelbersack\",\n \"papiertonne\",\n \"biomuell\",\n \"gruenabfall\",\n \"schadstoffsammlung\",\n \"altpapiersammlung\",\n \"schrottsammlung\",\n \"weihnachtsbaeume\",\n \"elektrosammlung\",\n ],\n \"go_ics\": \"Download\",\n },\n \"year_field\": \"year\",\n },\n \"Detmold\": {\n \"url\": \"https://abfuhrkalender.detmold.de/icsmaker.php\",\n \"method\": \"GET\",\n \"params\": {\"strid\": 338},\n \"year_field\": \"year\",\n },\n \"EAW Rheingau Taunus\": {\n \"url\": \"https://www.eaw-rheingau-taunus.de/abfallsammlung/abfuhrtermine/feed.ics?tx_vierwdeaw_garbagecalendarics%5Baction%5D=ics&tx_vierwdeaw_garbagecalendarics%5Bcontroller%5D=GarbageCalendar&tx_vierwdeaw_garbagecalendarics%5Bstreet%5D=38\",\n \"split_at\": \",\",\n },\n \"Recollect, Ottawa\": {\n \"url\": \"https://recollect.a.ssl.fastly.net/api/places/BCCDF30E-578B-11E4-AD38-5839C200407A/services/208/events.en.ics\",\n \"split_at\": \"\\\\, [and ]*\",\n },\n \"Frankfurt am Main, Achenbachstrasse 3\": {\n \"url\": \"https://www.fes-frankfurt.de/abfallkalender/QWNoZW5iYWNoc3RyLnwzfDYwNTk2.ics\"\n },\n \"Erlensee, Am Haspel\": {\n \"url\": \"https://sperrmuell.erlensee.de/?type=reminder\",\n \"method\": \"POST\",\n \"params\": {\n \"street\": 8,\n \"eventType[]\": [27, 23, 19, 20, 21, 24, 22, 25, 26],\n \"timeframe\": 23,\n \"download\": \"ical\",\n },\n },\n}\n\n\nHEADERS = {\"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64)\"}\n_LOGGER = logging.getLogger(__name__)\n\n\nclass Source:\n def __init__(\n self,\n url=None,\n file=None,\n offset=None,\n params=None,\n year_field=None,\n method=\"GET\",\n regex=None,\n split_at=None,\n version=2,\n verify_ssl=True,\n ):\n self._url = url\n self._file = file\n if bool(self._url is not None) == bool(self._file is not None):\n raise RuntimeError(\"Specify either url or file\")\n if version == 1:\n self._ics = ICS_v1(offset=offset, split_at=split_at, regex=regex)\n else:\n self._ics = ICS(offset=offset, split_at=split_at, regex=regex)\n self._params = params\n self._year_field = year_field # replace this field in params with current year\n self._method = method # The method to send the params\n self._verify_ssl = verify_ssl\n\n def fetch(self):\n if self._url is not None:\n if \"{%Y}\" in self._url or self._year_field is not None:\n # url contains wildcard or params contains year field\n now = datetime.datetime.now()\n\n # replace year in url\n url = self._url.replace(\"{%Y}\", str(now.year))\n\n # replace year in params\n if self._year_field is not None:\n if self._params is None:\n raise RuntimeError(\"year_field specified without params\")\n self._params[self._year_field] = str(now.year)\n\n entries = self.fetch_url(url, self._params)\n\n if now.month == 12:\n # also get data for next year if we are already in december\n url = self._url.replace(\"{%Y}\", str(now.year + 1))\n if self._year_field is not None:\n self._params[self._year_field] = str(now.year + 1)\n\n try:\n entries.extend(self.fetch_url(url, self._params))\n except Exception:\n # ignore if fetch for next year fails\n pass\n return entries\n else:\n return self.fetch_url(self._url, self._params)\n elif self._file is not None:\n return self.fetch_file(self._file)\n\n def fetch_url(self, url, params=None):\n # get ics file\n if self._method == \"GET\":\n r = requests.get(\n url, params=params, headers=HEADERS, verify=self._verify_ssl\n )\n elif self._method == \"POST\":\n r = requests.post(\n url, data=params, headers=HEADERS, verify=self._verify_ssl\n )\n else:\n raise RuntimeError(\n \"Error: unknown method to fetch URL, use GET or POST; got {self._method}\"\n )\n r.raise_for_status()\n\n r.encoding = \"utf-8\" # requests doesn't guess the encoding correctly\n return self._convert(r.text)\n\n def fetch_file(self, file):\n f = open(file)\n return self._convert(f.read())\n\n def _convert(self, data):\n dates = self._ics.convert(data)\n\n entries = []\n for d in dates:\n entries.append(Collection(d[0], d[1]))\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py"}]}
3,918
193
gh_patches_debug_135
rasdani/github-patches
git_diff
encode__httpx-691
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Version 0.10.0 Let's get squared away what we need for Version 0.10.0. The key here is we need to make sure we've sorted out any API that'd block us from reintroducing the sync API, so... * [x] Let's drop `.read()` from `Request` - It's not documented anywhere, not *actually* required by users, and the content is available on `request.stream` if absolutely needed. #679 * [x] We need `Response.read()` and `Response.close()` to be `Response.aread()` and `Response.aclose()`. The only point where this is relevant is users performing conditional reads inside a `with httpx.stream(method, url) as response` block. #674 * [x] We ought to have `Client.close()` become `Client.aclose()` for consistency. #675 * [x] Good point to get #617 in. * [x] We'll need to change `response.next()`. We could *either* underspecify the return type, and allow it to be either sync or async depending on the context, *or* we use `response.anext()` and `response.next()`. #676 * [ ] ~Good point to address~ #656. * [x] *Potentially* we could introduce an `httpx.AsyncClient` synonm for `httpx.Client`, and advise our users to switch towards that usage, so that there's no breaking changes for them once we fully reintroduce a sync API all the way through. #680 </issue> <code> [start of httpx/__version__.py] 1 __title__ = "httpx" 2 __description__ = "A next generation HTTP client, for Python 3." 3 __version__ = "0.9.5" 4 [end of httpx/__version__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/httpx/__version__.py b/httpx/__version__.py --- a/httpx/__version__.py +++ b/httpx/__version__.py @@ -1,3 +1,3 @@ __title__ = "httpx" __description__ = "A next generation HTTP client, for Python 3." -__version__ = "0.9.5" +__version__ = "0.10.0"
{"golden_diff": "diff --git a/httpx/__version__.py b/httpx/__version__.py\n--- a/httpx/__version__.py\n+++ b/httpx/__version__.py\n@@ -1,3 +1,3 @@\n __title__ = \"httpx\"\n __description__ = \"A next generation HTTP client, for Python 3.\"\n-__version__ = \"0.9.5\"\n+__version__ = \"0.10.0\"\n", "issue": "Version 0.10.0\nLet's get squared away what we need for Version 0.10.0.\r\n\r\nThe key here is we need to make sure we've sorted out any API that'd block us from reintroducing the sync API, so...\r\n\r\n* [x] Let's drop `.read()` from `Request` - It's not documented anywhere, not *actually* required by users, and the content is available on `request.stream` if absolutely needed. #679\r\n* [x] We need `Response.read()` and `Response.close()` to be `Response.aread()` and `Response.aclose()`. The only point where this is relevant is users performing conditional reads inside a `with httpx.stream(method, url) as response` block. #674\r\n* [x] We ought to have `Client.close()` become `Client.aclose()` for consistency. #675\r\n* [x] Good point to get #617 in.\r\n* [x] We'll need to change `response.next()`. We could *either* underspecify the return type, and allow it to be either sync or async depending on the context, *or* we use `response.anext()` and `response.next()`. #676\r\n* [ ] ~Good point to address~ #656.\r\n* [x] *Potentially* we could introduce an `httpx.AsyncClient` synonm for `httpx.Client`, and advise our users to switch towards that usage, so that there's no breaking changes for them once we fully reintroduce a sync API all the way through. #680\n", "before_files": [{"content": "__title__ = \"httpx\"\n__description__ = \"A next generation HTTP client, for Python 3.\"\n__version__ = \"0.9.5\"\n", "path": "httpx/__version__.py"}]}
921
95
gh_patches_debug_674
rasdani/github-patches
git_diff
pyodide__pyodide-3562
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Error about `--user` and `--target` flag when installing xbuildenv I sometimes get following error while installing xbuild environment: ```bash $ pyodide build . Downloading xbuild environment Installing xbuild environment stderr: ERROR: Can not combine '--user' and '--target' [notice] A new release of pip available: 22.3.1 -> 23.0 [notice] To update, run: /home/gitpod/.pyenv/versions/3.10.2/bin/python -m pip install --upgrade pip ``` It happens here, which installs host site packages: https://github.com/pyodide/pyodide/blob/7cc1058358242a5a9012edbb8163d86a860a1a28/pyodide-build/pyodide_build/install_xbuildenv.py#L50-L57 I think we need to add `--no-user` flag explicitly to prevent this error. </issue> <code> [start of pyodide-build/pyodide_build/install_xbuildenv.py] 1 import argparse 2 import json 3 import shutil 4 import subprocess 5 from pathlib import Path 6 from urllib.request import urlopen, urlretrieve 7 8 from .common import exit_with_stdio, get_make_flag, get_pyodide_root 9 from .create_pypa_index import create_pypa_index 10 from .logger import logger 11 12 13 def make_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: 14 parser.description = ( 15 "Install xbuild env.\n\n" 16 "The installed environment is the same as the one that would result from\n" 17 "`PYODIDE_PACKAGES='scipy' make` except that it is much faster.\n" 18 "The goal is to enable out-of-tree builds for binary packages that depend\n" 19 "on numpy or scipy.\n" 20 "Note: this is a private endpoint that should not be used outside of the Pyodide Makefile." 21 ) 22 parser.add_argument("--download", action="store_true", help="Download xbuild env") 23 parser.add_argument("xbuildenv", type=str, nargs=1) 24 return parser 25 26 27 def download_xbuildenv(version: str, xbuildenv_path: Path) -> None: 28 from shutil import rmtree, unpack_archive 29 from tempfile import NamedTemporaryFile 30 31 logger.info("Downloading xbuild environment") 32 rmtree(xbuildenv_path, ignore_errors=True) 33 with NamedTemporaryFile(suffix=".tar") as f: 34 urlretrieve( 35 f"https://github.com/pyodide/pyodide/releases/download/{version}/xbuildenv-{version}.tar.bz2", 36 f.name, 37 ) 38 unpack_archive(f.name, xbuildenv_path) 39 40 41 def install_xbuildenv(version: str, xbuildenv_path: Path) -> None: 42 logger.info("Installing xbuild environment") 43 xbuildenv_path = xbuildenv_path / "xbuildenv" 44 pyodide_root = get_pyodide_root() 45 xbuildenv_root = xbuildenv_path / "pyodide-root" 46 host_site_packages = xbuildenv_root / Path( 47 get_make_flag("HOSTSITEPACKAGES") 48 ).relative_to(pyodide_root) 49 host_site_packages.mkdir(exist_ok=True, parents=True) 50 result = subprocess.run( 51 [ 52 "pip", 53 "install", 54 "-t", 55 host_site_packages, 56 "-r", 57 xbuildenv_path / "requirements.txt", 58 ], 59 capture_output=True, 60 encoding="utf8", 61 ) 62 if result.returncode != 0: 63 exit_with_stdio(result) 64 # Copy the site-packages-extras (coming from the cross-build-files meta.yaml 65 # key) over the site-packages directory with the newly installed packages. 66 shutil.copytree( 67 xbuildenv_path / "site-packages-extras", host_site_packages, dirs_exist_ok=True 68 ) 69 cdn_base = f"https://cdn.jsdelivr.net/pyodide/v{version}/full/" 70 if (xbuildenv_root / "repodata.json").exists(): 71 repodata_bytes = (xbuildenv_root / "repodata.json").read_bytes() 72 else: 73 repodata_url = cdn_base + "repodata.json" 74 with urlopen(repodata_url) as response: 75 repodata_bytes = response.read() 76 repodata = json.loads(repodata_bytes) 77 version = repodata["info"]["version"] 78 create_pypa_index(repodata["packages"], xbuildenv_root, cdn_base) 79 80 81 def main(args: argparse.Namespace) -> None: 82 from . import __version__ 83 84 xbuildenv_path = Path(args.xbuildenv[0]) 85 version = __version__ 86 if args.download: 87 download_xbuildenv(version, xbuildenv_path) 88 install_xbuildenv(version, xbuildenv_path) 89 [end of pyodide-build/pyodide_build/install_xbuildenv.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pyodide-build/pyodide_build/install_xbuildenv.py b/pyodide-build/pyodide_build/install_xbuildenv.py --- a/pyodide-build/pyodide_build/install_xbuildenv.py +++ b/pyodide-build/pyodide_build/install_xbuildenv.py @@ -51,6 +51,7 @@ [ "pip", "install", + "--no-user", "-t", host_site_packages, "-r",
{"golden_diff": "diff --git a/pyodide-build/pyodide_build/install_xbuildenv.py b/pyodide-build/pyodide_build/install_xbuildenv.py\n--- a/pyodide-build/pyodide_build/install_xbuildenv.py\n+++ b/pyodide-build/pyodide_build/install_xbuildenv.py\n@@ -51,6 +51,7 @@\n [\n \"pip\",\n \"install\",\n+ \"--no-user\",\n \"-t\",\n host_site_packages,\n \"-r\",\n", "issue": "Error about `--user` and `--target` flag when installing xbuildenv\nI sometimes get following error while installing xbuild environment:\r\n\r\n```bash\r\n$ pyodide build .\r\nDownloading xbuild environment\r\nInstalling xbuild environment\r\n stderr:\r\n ERROR: Can not combine '--user' and '--target'\r\n\r\n [notice] A new release of pip available: 22.3.1 -> 23.0\r\n [notice] To update, run: /home/gitpod/.pyenv/versions/3.10.2/bin/python -m pip install --upgrade pip\r\n```\r\n\r\nIt happens here, which installs host site packages:\r\n\r\nhttps://github.com/pyodide/pyodide/blob/7cc1058358242a5a9012edbb8163d86a860a1a28/pyodide-build/pyodide_build/install_xbuildenv.py#L50-L57\r\n\r\nI think we need to add `--no-user` flag explicitly to prevent this error.\n", "before_files": [{"content": "import argparse\nimport json\nimport shutil\nimport subprocess\nfrom pathlib import Path\nfrom urllib.request import urlopen, urlretrieve\n\nfrom .common import exit_with_stdio, get_make_flag, get_pyodide_root\nfrom .create_pypa_index import create_pypa_index\nfrom .logger import logger\n\n\ndef make_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:\n parser.description = (\n \"Install xbuild env.\\n\\n\"\n \"The installed environment is the same as the one that would result from\\n\"\n \"`PYODIDE_PACKAGES='scipy' make` except that it is much faster.\\n\"\n \"The goal is to enable out-of-tree builds for binary packages that depend\\n\"\n \"on numpy or scipy.\\n\"\n \"Note: this is a private endpoint that should not be used outside of the Pyodide Makefile.\"\n )\n parser.add_argument(\"--download\", action=\"store_true\", help=\"Download xbuild env\")\n parser.add_argument(\"xbuildenv\", type=str, nargs=1)\n return parser\n\n\ndef download_xbuildenv(version: str, xbuildenv_path: Path) -> None:\n from shutil import rmtree, unpack_archive\n from tempfile import NamedTemporaryFile\n\n logger.info(\"Downloading xbuild environment\")\n rmtree(xbuildenv_path, ignore_errors=True)\n with NamedTemporaryFile(suffix=\".tar\") as f:\n urlretrieve(\n f\"https://github.com/pyodide/pyodide/releases/download/{version}/xbuildenv-{version}.tar.bz2\",\n f.name,\n )\n unpack_archive(f.name, xbuildenv_path)\n\n\ndef install_xbuildenv(version: str, xbuildenv_path: Path) -> None:\n logger.info(\"Installing xbuild environment\")\n xbuildenv_path = xbuildenv_path / \"xbuildenv\"\n pyodide_root = get_pyodide_root()\n xbuildenv_root = xbuildenv_path / \"pyodide-root\"\n host_site_packages = xbuildenv_root / Path(\n get_make_flag(\"HOSTSITEPACKAGES\")\n ).relative_to(pyodide_root)\n host_site_packages.mkdir(exist_ok=True, parents=True)\n result = subprocess.run(\n [\n \"pip\",\n \"install\",\n \"-t\",\n host_site_packages,\n \"-r\",\n xbuildenv_path / \"requirements.txt\",\n ],\n capture_output=True,\n encoding=\"utf8\",\n )\n if result.returncode != 0:\n exit_with_stdio(result)\n # Copy the site-packages-extras (coming from the cross-build-files meta.yaml\n # key) over the site-packages directory with the newly installed packages.\n shutil.copytree(\n xbuildenv_path / \"site-packages-extras\", host_site_packages, dirs_exist_ok=True\n )\n cdn_base = f\"https://cdn.jsdelivr.net/pyodide/v{version}/full/\"\n if (xbuildenv_root / \"repodata.json\").exists():\n repodata_bytes = (xbuildenv_root / \"repodata.json\").read_bytes()\n else:\n repodata_url = cdn_base + \"repodata.json\"\n with urlopen(repodata_url) as response:\n repodata_bytes = response.read()\n repodata = json.loads(repodata_bytes)\n version = repodata[\"info\"][\"version\"]\n create_pypa_index(repodata[\"packages\"], xbuildenv_root, cdn_base)\n\n\ndef main(args: argparse.Namespace) -> None:\n from . import __version__\n\n xbuildenv_path = Path(args.xbuildenv[0])\n version = __version__\n if args.download:\n download_xbuildenv(version, xbuildenv_path)\n install_xbuildenv(version, xbuildenv_path)\n", "path": "pyodide-build/pyodide_build/install_xbuildenv.py"}]}
1,741
105
gh_patches_debug_16969
rasdani/github-patches
git_diff
cal-itp__benefits-430
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Update rendering of Agency index page ## Background In tandem with #366, this ticket moves the bulk of the contextual information from the application index to the agency index. We need more flexibility on this page than the current viewmodel / page template allows for. ![image](https://user-images.githubusercontent.com/1783439/160712671-c67eee3c-7aef-4bba-bf95-c317699b7de3.png) ## Tasks * [x] Create/update text entries in English and Spanish for each paragraph, title, etc. * [x] Can we embed the **bold** font style in the translation? * [x] Create text entries for a pre-button label / subhead * [x] Create a new template `core/agency_index.html` to use for this page * [x] In the view use the new template </issue> <code> [start of benefits/core/views.py] 1 """ 2 The core application: view definition for the root of the webapp. 3 """ 4 from django.http import HttpResponseBadRequest, HttpResponseNotFound, HttpResponseServerError 5 from django.template import loader 6 from django.template.response import TemplateResponse 7 from django.urls import reverse 8 from django.utils.translation import gettext as _ 9 10 from . import middleware, models, session, viewmodels 11 12 13 def PageTemplateResponse(request, page_vm): 14 """Helper returns a TemplateResponse using the common page template.""" 15 return TemplateResponse(request, "core/page.html", page_vm.context_dict()) 16 17 18 def _index_content_title(): 19 """Helper returns the content title for the common index page.""" 20 return _("core.pages.index.content_title") 21 22 23 def _index_paragraphs(): 24 """Helper returns the content paragraphs for the common index page.""" 25 return [_("core.pages.index.p[0]"), _("core.pages.index.p[1]"), _("core.pages.index.p[2]")] 26 27 28 def _index_url(): 29 """Helper computes the index url path.""" 30 return reverse("core:index") 31 32 33 @middleware.pageview_decorator 34 def index(request): 35 """View handler for the main entry page.""" 36 session.reset(request) 37 38 # generate a button to the landing page for each active agency 39 agencies = models.TransitAgency.all_active() 40 buttons = [viewmodels.Button.outline_primary(text=a.short_name, url=a.index_url) for a in agencies] 41 buttons[0].classes.append("mt-3") 42 buttons[0].label = _("core.pages.index.chooseprovider") 43 44 page = viewmodels.Page( 45 content_title=_index_content_title(), 46 paragraphs=_index_paragraphs(), 47 buttons=buttons, 48 classes="home", 49 ) 50 51 return PageTemplateResponse(request, page) 52 53 54 @middleware.pageview_decorator 55 def agency_index(request, agency): 56 """View handler for an agency entry page.""" 57 session.reset(request) 58 session.update(request, agency=agency, origin=agency.index_url) 59 60 page = viewmodels.Page( 61 content_title=_index_content_title(), 62 paragraphs=_index_paragraphs(), 63 button=viewmodels.Button.primary(text=_("core.pages.index.continue"), url=reverse("eligibility:index")), 64 classes="home", 65 ) 66 67 return PageTemplateResponse(request, page) 68 69 70 @middleware.pageview_decorator 71 def help(request): 72 """View handler for the help page.""" 73 if session.active_agency(request): 74 agency = session.agency(request) 75 buttons = viewmodels.Button.agency_contact_links(agency) 76 else: 77 buttons = [btn for a in models.TransitAgency.all_active() for btn in viewmodels.Button.agency_contact_links(a)] 78 79 buttons.append(viewmodels.Button.home(request, _("core.buttons.back"))) 80 81 page = viewmodels.Page( 82 title=_("core.buttons.help"), 83 content_title=_("core.buttons.help"), 84 buttons=buttons, 85 classes="text-lg-center", 86 noimage=True, 87 ) 88 89 return TemplateResponse(request, "core/help.html", page.context_dict()) 90 91 92 @middleware.pageview_decorator 93 def bad_request(request, exception, template_name="400.html"): 94 """View handler for HTTP 400 Bad Request responses.""" 95 if session.active_agency(request): 96 session.update(request, origin=session.agency(request).index_url) 97 else: 98 session.update(request, origin=_index_url()) 99 100 home = viewmodels.Button.home(request) 101 page = viewmodels.ErrorPage.error(button=home) 102 t = loader.get_template(template_name) 103 104 return HttpResponseBadRequest(t.render(page.context_dict())) 105 106 107 @middleware.pageview_decorator 108 def csrf_failure(request, reason): 109 """ 110 View handler for CSRF_FAILURE_VIEW with custom data. 111 """ 112 if session.active_agency(request): 113 session.update(request, origin=session.agency(request).index_url) 114 else: 115 session.update(request, origin=_index_url()) 116 117 home = viewmodels.Button.home(request) 118 page = viewmodels.ErrorPage.not_found(button=home, path=request.path) 119 t = loader.get_template("400.html") 120 121 return HttpResponseNotFound(t.render(page.context_dict())) 122 123 124 @middleware.pageview_decorator 125 def page_not_found(request, exception, template_name="404.html"): 126 """View handler for HTTP 404 Not Found responses.""" 127 if session.active_agency(request): 128 session.update(request, origin=session.agency(request).index_url) 129 else: 130 session.update(request, origin=_index_url()) 131 132 home = viewmodels.Button.home(request) 133 page = viewmodels.ErrorPage.not_found(button=home, path=request.path) 134 t = loader.get_template(template_name) 135 136 return HttpResponseNotFound(t.render(page.context_dict())) 137 138 139 @middleware.pageview_decorator 140 def server_error(request, template_name="500.html"): 141 """View handler for HTTP 500 Server Error responses.""" 142 if session.active_agency(request): 143 session.update(request, origin=session.agency(request).index_url) 144 else: 145 session.update(request, origin=_index_url()) 146 147 home = viewmodels.Button.home(request) 148 page = viewmodels.ErrorPage.error(button=home) 149 t = loader.get_template(template_name) 150 151 return HttpResponseServerError(t.render(page.context_dict())) 152 [end of benefits/core/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/benefits/core/views.py b/benefits/core/views.py --- a/benefits/core/views.py +++ b/benefits/core/views.py @@ -57,14 +57,19 @@ session.reset(request) session.update(request, agency=agency, origin=agency.index_url) + button = viewmodels.Button.primary(text=_("core.pages.index.continue"), url=reverse("eligibility:index")) + button.label = _("core.pages.agency_index.button.label") + page = viewmodels.Page( - content_title=_index_content_title(), - paragraphs=_index_paragraphs(), - button=viewmodels.Button.primary(text=_("core.pages.index.continue"), url=reverse("eligibility:index")), + content_title=_("core.pages.agency_index.content_title"), + button=button, classes="home", ) - return PageTemplateResponse(request, page) + help_page = reverse("core:help") + context_dict = {**page.context_dict(), **{"info_link": f"{help_page}#about"}} + + return TemplateResponse(request, "core/agency_index.html", context_dict) @middleware.pageview_decorator
{"golden_diff": "diff --git a/benefits/core/views.py b/benefits/core/views.py\n--- a/benefits/core/views.py\n+++ b/benefits/core/views.py\n@@ -57,14 +57,19 @@\n session.reset(request)\n session.update(request, agency=agency, origin=agency.index_url)\n \n+ button = viewmodels.Button.primary(text=_(\"core.pages.index.continue\"), url=reverse(\"eligibility:index\"))\n+ button.label = _(\"core.pages.agency_index.button.label\")\n+\n page = viewmodels.Page(\n- content_title=_index_content_title(),\n- paragraphs=_index_paragraphs(),\n- button=viewmodels.Button.primary(text=_(\"core.pages.index.continue\"), url=reverse(\"eligibility:index\")),\n+ content_title=_(\"core.pages.agency_index.content_title\"),\n+ button=button,\n classes=\"home\",\n )\n \n- return PageTemplateResponse(request, page)\n+ help_page = reverse(\"core:help\")\n+ context_dict = {**page.context_dict(), **{\"info_link\": f\"{help_page}#about\"}}\n+\n+ return TemplateResponse(request, \"core/agency_index.html\", context_dict)\n \n \n @middleware.pageview_decorator\n", "issue": "Update rendering of Agency index page\n## Background\r\n\r\nIn tandem with #366, this ticket moves the bulk of the contextual information from the application index to the agency index.\r\n\r\nWe need more flexibility on this page than the current viewmodel / page template allows for.\r\n\r\n![image](https://user-images.githubusercontent.com/1783439/160712671-c67eee3c-7aef-4bba-bf95-c317699b7de3.png)\r\n\r\n## Tasks\r\n\r\n* [x] Create/update text entries in English and Spanish for each paragraph, title, etc.\r\n* [x] Can we embed the **bold** font style in the translation?\r\n* [x] Create text entries for a pre-button label / subhead\r\n* [x] Create a new template `core/agency_index.html` to use for this page\r\n* [x] In the view use the new template\n", "before_files": [{"content": "\"\"\"\nThe core application: view definition for the root of the webapp.\n\"\"\"\nfrom django.http import HttpResponseBadRequest, HttpResponseNotFound, HttpResponseServerError\nfrom django.template import loader\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse\nfrom django.utils.translation import gettext as _\n\nfrom . import middleware, models, session, viewmodels\n\n\ndef PageTemplateResponse(request, page_vm):\n \"\"\"Helper returns a TemplateResponse using the common page template.\"\"\"\n return TemplateResponse(request, \"core/page.html\", page_vm.context_dict())\n\n\ndef _index_content_title():\n \"\"\"Helper returns the content title for the common index page.\"\"\"\n return _(\"core.pages.index.content_title\")\n\n\ndef _index_paragraphs():\n \"\"\"Helper returns the content paragraphs for the common index page.\"\"\"\n return [_(\"core.pages.index.p[0]\"), _(\"core.pages.index.p[1]\"), _(\"core.pages.index.p[2]\")]\n\n\ndef _index_url():\n \"\"\"Helper computes the index url path.\"\"\"\n return reverse(\"core:index\")\n\n\[email protected]_decorator\ndef index(request):\n \"\"\"View handler for the main entry page.\"\"\"\n session.reset(request)\n\n # generate a button to the landing page for each active agency\n agencies = models.TransitAgency.all_active()\n buttons = [viewmodels.Button.outline_primary(text=a.short_name, url=a.index_url) for a in agencies]\n buttons[0].classes.append(\"mt-3\")\n buttons[0].label = _(\"core.pages.index.chooseprovider\")\n\n page = viewmodels.Page(\n content_title=_index_content_title(),\n paragraphs=_index_paragraphs(),\n buttons=buttons,\n classes=\"home\",\n )\n\n return PageTemplateResponse(request, page)\n\n\[email protected]_decorator\ndef agency_index(request, agency):\n \"\"\"View handler for an agency entry page.\"\"\"\n session.reset(request)\n session.update(request, agency=agency, origin=agency.index_url)\n\n page = viewmodels.Page(\n content_title=_index_content_title(),\n paragraphs=_index_paragraphs(),\n button=viewmodels.Button.primary(text=_(\"core.pages.index.continue\"), url=reverse(\"eligibility:index\")),\n classes=\"home\",\n )\n\n return PageTemplateResponse(request, page)\n\n\[email protected]_decorator\ndef help(request):\n \"\"\"View handler for the help page.\"\"\"\n if session.active_agency(request):\n agency = session.agency(request)\n buttons = viewmodels.Button.agency_contact_links(agency)\n else:\n buttons = [btn for a in models.TransitAgency.all_active() for btn in viewmodels.Button.agency_contact_links(a)]\n\n buttons.append(viewmodels.Button.home(request, _(\"core.buttons.back\")))\n\n page = viewmodels.Page(\n title=_(\"core.buttons.help\"),\n content_title=_(\"core.buttons.help\"),\n buttons=buttons,\n classes=\"text-lg-center\",\n noimage=True,\n )\n\n return TemplateResponse(request, \"core/help.html\", page.context_dict())\n\n\[email protected]_decorator\ndef bad_request(request, exception, template_name=\"400.html\"):\n \"\"\"View handler for HTTP 400 Bad Request responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=_index_url())\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.error(button=home)\n t = loader.get_template(template_name)\n\n return HttpResponseBadRequest(t.render(page.context_dict()))\n\n\[email protected]_decorator\ndef csrf_failure(request, reason):\n \"\"\"\n View handler for CSRF_FAILURE_VIEW with custom data.\n \"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=_index_url())\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.not_found(button=home, path=request.path)\n t = loader.get_template(\"400.html\")\n\n return HttpResponseNotFound(t.render(page.context_dict()))\n\n\[email protected]_decorator\ndef page_not_found(request, exception, template_name=\"404.html\"):\n \"\"\"View handler for HTTP 404 Not Found responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=_index_url())\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.not_found(button=home, path=request.path)\n t = loader.get_template(template_name)\n\n return HttpResponseNotFound(t.render(page.context_dict()))\n\n\[email protected]_decorator\ndef server_error(request, template_name=\"500.html\"):\n \"\"\"View handler for HTTP 500 Server Error responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=_index_url())\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.error(button=home)\n t = loader.get_template(template_name)\n\n return HttpResponseServerError(t.render(page.context_dict()))\n", "path": "benefits/core/views.py"}]}
2,164
254
gh_patches_debug_44234
rasdani/github-patches
git_diff
deepset-ai__haystack-3953
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Remove frontmatter and add to `meta` in markdown converter **Is your feature request related to a problem? Please describe.** We would like to use the markdown converter for the `haystack-tutorials` search **Describe the solution you'd like** Frontmatter is often used to store some meta information of markdown files. So it would be a useful feature to be able to auto add these to the `meta` of the created documents It's also a good idea to remove the fronmatter from the created documents too. </issue> <code> [start of haystack/nodes/file_converter/markdown.py] 1 import logging 2 import re 3 from pathlib import Path 4 from typing import Dict, List, Optional, Tuple, Any 5 6 try: 7 from bs4 import BeautifulSoup, NavigableString 8 from markdown import markdown 9 except (ImportError, ModuleNotFoundError) as ie: 10 from haystack.utils.import_utils import _optional_component_not_installed 11 12 _optional_component_not_installed(__name__, "preprocessing", ie) 13 14 from haystack.nodes.file_converter.base import BaseConverter 15 from haystack.schema import Document 16 17 18 logger = logging.getLogger(__name__) 19 20 21 class MarkdownConverter(BaseConverter): 22 def __init__( 23 self, 24 remove_numeric_tables: bool = False, 25 valid_languages: Optional[List[str]] = None, 26 id_hash_keys: Optional[List[str]] = None, 27 progress_bar: bool = True, 28 remove_code_snippets: bool = True, 29 extract_headlines: bool = False, 30 ): 31 """ 32 :param remove_numeric_tables: Not applicable. 33 :param valid_languages: Not applicable. 34 :param id_hash_keys: Generate the document ID from a custom list of strings that refer to the document's 35 attributes. To make sure you don't have duplicate documents in your DocumentStore if texts are 36 not unique, you can modify the metadata and pass for example, `"meta"` to this field ([`"content"`, `"meta"`]). 37 In this case, the ID is generated by using the content and the defined metadata. 38 :param progress_bar: Show a progress bar for the conversion. 39 :param remove_code_snippets: Whether to remove snippets from the markdown file. 40 :param extract_headlines: Whether to extract headings from the markdown file. 41 """ 42 super().__init__( 43 remove_numeric_tables=remove_numeric_tables, 44 valid_languages=valid_languages, 45 id_hash_keys=id_hash_keys, 46 progress_bar=progress_bar, 47 ) 48 49 self.remove_code_snippets = remove_code_snippets 50 self.extract_headlines = extract_headlines 51 52 def convert( 53 self, 54 file_path: Path, 55 meta: Optional[Dict[str, Any]] = None, 56 remove_numeric_tables: Optional[bool] = None, 57 valid_languages: Optional[List[str]] = None, 58 encoding: Optional[str] = "utf-8", 59 id_hash_keys: Optional[List[str]] = None, 60 remove_code_snippets: Optional[bool] = None, 61 extract_headlines: Optional[bool] = None, 62 ) -> List[Document]: 63 """ 64 Reads text from a markdown file and executes optional preprocessing steps. 65 66 :param file_path: path of the file to convert 67 :param meta: dictionary of meta data key-value pairs to append in the returned document. 68 :param encoding: Select the file encoding (default is `utf-8`) 69 :param remove_numeric_tables: Not applicable 70 :param valid_languages: Not applicable 71 :param id_hash_keys: Generate the document id from a custom list of strings that refer to the document's 72 attributes. If you want to ensure you don't have duplicate documents in your DocumentStore but texts are 73 not unique, you can modify the metadata and pass e.g. `"meta"` to this field (e.g. [`"content"`, `"meta"`]). 74 In this case the id will be generated by using the content and the defined metadata. 75 :param remove_code_snippets: Whether to remove snippets from the markdown file. 76 :param extract_headlines: Whether to extract headings from the markdown file. 77 """ 78 79 id_hash_keys = id_hash_keys if id_hash_keys is not None else self.id_hash_keys 80 remove_code_snippets = remove_code_snippets if remove_code_snippets is not None else self.remove_code_snippets 81 extract_headlines = extract_headlines if extract_headlines is not None else self.extract_headlines 82 83 with open(file_path, encoding=encoding, errors="ignore") as f: 84 markdown_text = f.read() 85 86 # md -> html -> text since BeautifulSoup can extract text cleanly 87 html = markdown(markdown_text) 88 89 # remove code snippets 90 if remove_code_snippets: 91 html = re.sub(r"<pre>(.*?)</pre>", " ", html, flags=re.DOTALL) 92 html = re.sub(r"<code>(.*?)</code>", " ", html, flags=re.DOTALL) 93 soup = BeautifulSoup(html, "html.parser") 94 95 if extract_headlines: 96 text, headlines = self._extract_text_and_headlines(soup) 97 if meta is None: 98 meta = {} 99 meta["headlines"] = headlines 100 else: 101 text = soup.get_text() 102 103 document = Document(content=text, meta=meta, id_hash_keys=id_hash_keys) 104 return [document] 105 106 @staticmethod 107 def _extract_text_and_headlines(soup: BeautifulSoup) -> Tuple[str, List[Dict]]: 108 """ 109 Extracts text and headings from a soup object. 110 """ 111 headline_tags = {"h1", "h2", "h3", "h4", "h5", "h6"} 112 headlines = [] 113 text = "" 114 for desc in soup.descendants: 115 if desc.name in headline_tags: 116 current_headline = desc.get_text() 117 current_start_idx = len(text) 118 current_level = int(desc.name[-1]) - 1 119 headlines.append({"headline": current_headline, "start_idx": current_start_idx, "level": current_level}) 120 121 if isinstance(desc, NavigableString): 122 text += desc.get_text() 123 124 return text, headlines 125 [end of haystack/nodes/file_converter/markdown.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/haystack/nodes/file_converter/markdown.py b/haystack/nodes/file_converter/markdown.py --- a/haystack/nodes/file_converter/markdown.py +++ b/haystack/nodes/file_converter/markdown.py @@ -4,6 +4,7 @@ from typing import Dict, List, Optional, Tuple, Any try: + import frontmatter from bs4 import BeautifulSoup, NavigableString from markdown import markdown except (ImportError, ModuleNotFoundError) as ie: @@ -27,6 +28,7 @@ progress_bar: bool = True, remove_code_snippets: bool = True, extract_headlines: bool = False, + add_frontmatter_to_meta: bool = False, ): """ :param remove_numeric_tables: Not applicable. @@ -38,6 +40,7 @@ :param progress_bar: Show a progress bar for the conversion. :param remove_code_snippets: Whether to remove snippets from the markdown file. :param extract_headlines: Whether to extract headings from the markdown file. + :param add_frontmatter_to_meta: Whether to add the contents of the frontmatter to `meta`. """ super().__init__( remove_numeric_tables=remove_numeric_tables, @@ -48,6 +51,7 @@ self.remove_code_snippets = remove_code_snippets self.extract_headlines = extract_headlines + self.add_frontmatter_to_meta = add_frontmatter_to_meta def convert( self, @@ -59,6 +63,7 @@ id_hash_keys: Optional[List[str]] = None, remove_code_snippets: Optional[bool] = None, extract_headlines: Optional[bool] = None, + add_frontmatter_to_meta: Optional[bool] = None, ) -> List[Document]: """ Reads text from a markdown file and executes optional preprocessing steps. @@ -74,14 +79,18 @@ In this case the id will be generated by using the content and the defined metadata. :param remove_code_snippets: Whether to remove snippets from the markdown file. :param extract_headlines: Whether to extract headings from the markdown file. + :param add_frontmatter_to_meta: Whether to add the contents of the frontmatter to `meta`. """ id_hash_keys = id_hash_keys if id_hash_keys is not None else self.id_hash_keys remove_code_snippets = remove_code_snippets if remove_code_snippets is not None else self.remove_code_snippets extract_headlines = extract_headlines if extract_headlines is not None else self.extract_headlines + add_frontmatter_to_meta = ( + add_frontmatter_to_meta if add_frontmatter_to_meta is not None else self.add_frontmatter_to_meta + ) with open(file_path, encoding=encoding, errors="ignore") as f: - markdown_text = f.read() + metadata, markdown_text = frontmatter.parse(f.read()) # md -> html -> text since BeautifulSoup can extract text cleanly html = markdown(markdown_text) @@ -92,6 +101,12 @@ html = re.sub(r"<code>(.*?)</code>", " ", html, flags=re.DOTALL) soup = BeautifulSoup(html, "html.parser") + if add_frontmatter_to_meta: + if meta is None: + meta = metadata + else: + meta.update(metadata) + if extract_headlines: text, headlines = self._extract_text_and_headlines(soup) if meta is None:
{"golden_diff": "diff --git a/haystack/nodes/file_converter/markdown.py b/haystack/nodes/file_converter/markdown.py\n--- a/haystack/nodes/file_converter/markdown.py\n+++ b/haystack/nodes/file_converter/markdown.py\n@@ -4,6 +4,7 @@\n from typing import Dict, List, Optional, Tuple, Any\n \n try:\n+ import frontmatter\n from bs4 import BeautifulSoup, NavigableString\n from markdown import markdown\n except (ImportError, ModuleNotFoundError) as ie:\n@@ -27,6 +28,7 @@\n progress_bar: bool = True,\n remove_code_snippets: bool = True,\n extract_headlines: bool = False,\n+ add_frontmatter_to_meta: bool = False,\n ):\n \"\"\"\n :param remove_numeric_tables: Not applicable.\n@@ -38,6 +40,7 @@\n :param progress_bar: Show a progress bar for the conversion.\n :param remove_code_snippets: Whether to remove snippets from the markdown file.\n :param extract_headlines: Whether to extract headings from the markdown file.\n+ :param add_frontmatter_to_meta: Whether to add the contents of the frontmatter to `meta`.\n \"\"\"\n super().__init__(\n remove_numeric_tables=remove_numeric_tables,\n@@ -48,6 +51,7 @@\n \n self.remove_code_snippets = remove_code_snippets\n self.extract_headlines = extract_headlines\n+ self.add_frontmatter_to_meta = add_frontmatter_to_meta\n \n def convert(\n self,\n@@ -59,6 +63,7 @@\n id_hash_keys: Optional[List[str]] = None,\n remove_code_snippets: Optional[bool] = None,\n extract_headlines: Optional[bool] = None,\n+ add_frontmatter_to_meta: Optional[bool] = None,\n ) -> List[Document]:\n \"\"\"\n Reads text from a markdown file and executes optional preprocessing steps.\n@@ -74,14 +79,18 @@\n In this case the id will be generated by using the content and the defined metadata.\n :param remove_code_snippets: Whether to remove snippets from the markdown file.\n :param extract_headlines: Whether to extract headings from the markdown file.\n+ :param add_frontmatter_to_meta: Whether to add the contents of the frontmatter to `meta`.\n \"\"\"\n \n id_hash_keys = id_hash_keys if id_hash_keys is not None else self.id_hash_keys\n remove_code_snippets = remove_code_snippets if remove_code_snippets is not None else self.remove_code_snippets\n extract_headlines = extract_headlines if extract_headlines is not None else self.extract_headlines\n+ add_frontmatter_to_meta = (\n+ add_frontmatter_to_meta if add_frontmatter_to_meta is not None else self.add_frontmatter_to_meta\n+ )\n \n with open(file_path, encoding=encoding, errors=\"ignore\") as f:\n- markdown_text = f.read()\n+ metadata, markdown_text = frontmatter.parse(f.read())\n \n # md -> html -> text since BeautifulSoup can extract text cleanly\n html = markdown(markdown_text)\n@@ -92,6 +101,12 @@\n html = re.sub(r\"<code>(.*?)</code>\", \" \", html, flags=re.DOTALL)\n soup = BeautifulSoup(html, \"html.parser\")\n \n+ if add_frontmatter_to_meta:\n+ if meta is None:\n+ meta = metadata\n+ else:\n+ meta.update(metadata)\n+\n if extract_headlines:\n text, headlines = self._extract_text_and_headlines(soup)\n if meta is None:\n", "issue": "Remove frontmatter and add to `meta` in markdown converter\n**Is your feature request related to a problem? Please describe.**\r\nWe would like to use the markdown converter for the `haystack-tutorials` search\r\n\r\n**Describe the solution you'd like**\r\nFrontmatter is often used to store some meta information of markdown files. So it would be a useful feature to be able to auto add these to the `meta` of the created documents\r\nIt's also a good idea to remove the fronmatter from the created documents too. \n", "before_files": [{"content": "import logging\nimport re\nfrom pathlib import Path\nfrom typing import Dict, List, Optional, Tuple, Any\n\ntry:\n from bs4 import BeautifulSoup, NavigableString\n from markdown import markdown\nexcept (ImportError, ModuleNotFoundError) as ie:\n from haystack.utils.import_utils import _optional_component_not_installed\n\n _optional_component_not_installed(__name__, \"preprocessing\", ie)\n\nfrom haystack.nodes.file_converter.base import BaseConverter\nfrom haystack.schema import Document\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass MarkdownConverter(BaseConverter):\n def __init__(\n self,\n remove_numeric_tables: bool = False,\n valid_languages: Optional[List[str]] = None,\n id_hash_keys: Optional[List[str]] = None,\n progress_bar: bool = True,\n remove_code_snippets: bool = True,\n extract_headlines: bool = False,\n ):\n \"\"\"\n :param remove_numeric_tables: Not applicable.\n :param valid_languages: Not applicable.\n :param id_hash_keys: Generate the document ID from a custom list of strings that refer to the document's\n attributes. To make sure you don't have duplicate documents in your DocumentStore if texts are\n not unique, you can modify the metadata and pass for example, `\"meta\"` to this field ([`\"content\"`, `\"meta\"`]).\n In this case, the ID is generated by using the content and the defined metadata.\n :param progress_bar: Show a progress bar for the conversion.\n :param remove_code_snippets: Whether to remove snippets from the markdown file.\n :param extract_headlines: Whether to extract headings from the markdown file.\n \"\"\"\n super().__init__(\n remove_numeric_tables=remove_numeric_tables,\n valid_languages=valid_languages,\n id_hash_keys=id_hash_keys,\n progress_bar=progress_bar,\n )\n\n self.remove_code_snippets = remove_code_snippets\n self.extract_headlines = extract_headlines\n\n def convert(\n self,\n file_path: Path,\n meta: Optional[Dict[str, Any]] = None,\n remove_numeric_tables: Optional[bool] = None,\n valid_languages: Optional[List[str]] = None,\n encoding: Optional[str] = \"utf-8\",\n id_hash_keys: Optional[List[str]] = None,\n remove_code_snippets: Optional[bool] = None,\n extract_headlines: Optional[bool] = None,\n ) -> List[Document]:\n \"\"\"\n Reads text from a markdown file and executes optional preprocessing steps.\n\n :param file_path: path of the file to convert\n :param meta: dictionary of meta data key-value pairs to append in the returned document.\n :param encoding: Select the file encoding (default is `utf-8`)\n :param remove_numeric_tables: Not applicable\n :param valid_languages: Not applicable\n :param id_hash_keys: Generate the document id from a custom list of strings that refer to the document's\n attributes. If you want to ensure you don't have duplicate documents in your DocumentStore but texts are\n not unique, you can modify the metadata and pass e.g. `\"meta\"` to this field (e.g. [`\"content\"`, `\"meta\"`]).\n In this case the id will be generated by using the content and the defined metadata.\n :param remove_code_snippets: Whether to remove snippets from the markdown file.\n :param extract_headlines: Whether to extract headings from the markdown file.\n \"\"\"\n\n id_hash_keys = id_hash_keys if id_hash_keys is not None else self.id_hash_keys\n remove_code_snippets = remove_code_snippets if remove_code_snippets is not None else self.remove_code_snippets\n extract_headlines = extract_headlines if extract_headlines is not None else self.extract_headlines\n\n with open(file_path, encoding=encoding, errors=\"ignore\") as f:\n markdown_text = f.read()\n\n # md -> html -> text since BeautifulSoup can extract text cleanly\n html = markdown(markdown_text)\n\n # remove code snippets\n if remove_code_snippets:\n html = re.sub(r\"<pre>(.*?)</pre>\", \" \", html, flags=re.DOTALL)\n html = re.sub(r\"<code>(.*?)</code>\", \" \", html, flags=re.DOTALL)\n soup = BeautifulSoup(html, \"html.parser\")\n\n if extract_headlines:\n text, headlines = self._extract_text_and_headlines(soup)\n if meta is None:\n meta = {}\n meta[\"headlines\"] = headlines\n else:\n text = soup.get_text()\n\n document = Document(content=text, meta=meta, id_hash_keys=id_hash_keys)\n return [document]\n\n @staticmethod\n def _extract_text_and_headlines(soup: BeautifulSoup) -> Tuple[str, List[Dict]]:\n \"\"\"\n Extracts text and headings from a soup object.\n \"\"\"\n headline_tags = {\"h1\", \"h2\", \"h3\", \"h4\", \"h5\", \"h6\"}\n headlines = []\n text = \"\"\n for desc in soup.descendants:\n if desc.name in headline_tags:\n current_headline = desc.get_text()\n current_start_idx = len(text)\n current_level = int(desc.name[-1]) - 1\n headlines.append({\"headline\": current_headline, \"start_idx\": current_start_idx, \"level\": current_level})\n\n if isinstance(desc, NavigableString):\n text += desc.get_text()\n\n return text, headlines\n", "path": "haystack/nodes/file_converter/markdown.py"}]}
2,084
782
gh_patches_debug_25505
rasdani/github-patches
git_diff
onnx__onnx-1648
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Proposal: Op Registry Once we start getting into more complicated passes having knowledge about operators becomes incredibly useful. What I propose is to maintain a registry of operators mapping to a set of descriptors. For example let's take a look at the [eliminate_nop_monotone_argmax](https://github.com/onnx/onnx/blob/master/onnx/optimizer/passes/eliminate_nop_monotone_argmax.h) pass. Within the pass we have to maintain an unordered set of ops that are monotone. The correct design would be to have a registry which we can ask whether or not if a certain op is monotone. Once such a registry is implemented, passes can be generalized through descriptors, therefore as we add new ops and their respective descriptors our passes naturally scale out. Some possible descriptors we could have are: - monotonic - elementwise independent (op works on an individual element without knowledge of other elements, such as log, but not softmax since it requires calculating a sum over an axis) - elementwise dependent (softmax, logsoftmax, softmin, softplus) - reduction - shape transform (e.g. transpose, view) @houseroad @bddppq What do you guys think? </issue> <code> [start of onnx/optimizer.py] 1 # ATTENTION: The code in this file is highly EXPERIMENTAL. 2 # Adventurous users should note that the APIs will probably change. 3 4 """onnx optimizer 5 6 This enables users to optimize their models. 7 """ 8 from __future__ import absolute_import 9 from __future__ import division 10 from __future__ import print_function 11 from __future__ import unicode_literals 12 13 import onnx 14 import onnx.onnx_cpp2py_export.optimizer as C 15 from onnx import ModelProto 16 from typing import Text, Sequence, Optional 17 18 """Apply the optimization on the serialized ModelProto. 19 20 Arguments: 21 input (ModelProto): model 22 names (list of string): list of optimization names 23 24 Return: 25 return (ModelProto) optimized model 26 27 Supported pass names: 28 -- nop 29 -- eliminate_identity 30 -- eliminate_nop_transpose 31 -- eliminate_nop_pad 32 -- eliminate_unused_initializer 33 -- fuse_consecutive_squeezes 34 -- fuse_consecutive_transposes 35 -- fuse_add_bias_into_conv 36 -- fuse_transpose_into_gemm 37 """ 38 39 get_available_passes = C.get_available_passes 40 41 42 def optimize(model, passes=None, fixed_point=False): # type: (ModelProto, Optional[Sequence[Text]], bool) -> ModelProto 43 if passes is None: 44 passes = ['eliminate_nop_transpose', 45 'eliminate_nop_pad', 46 'fuse_consecutive_transposes', 47 'fuse_transpose_into_gemm'] 48 if not isinstance(model, ModelProto): 49 raise ValueError('Optimizer only accepts ModelProto, incorrect type: {}'.format(type(model))) 50 51 model_str = model.SerializeToString() 52 if fixed_point: 53 optimized_model_str = C.optimize_fixedpoint(model_str, passes) 54 else: 55 optimized_model_str = C.optimize(model_str, passes) 56 57 return onnx.load_from_string(optimized_model_str) 58 [end of onnx/optimizer.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/onnx/optimizer.py b/onnx/optimizer.py --- a/onnx/optimizer.py +++ b/onnx/optimizer.py @@ -26,13 +26,23 @@ Supported pass names: -- nop + -- eliminate_deadend -- eliminate_identity - -- eliminate_nop_transpose + -- eliminate_nop_dropout + -- eliminate_nop_monotone_argmax -- eliminate_nop_pad + -- eliminate_nop_transpose -- eliminate_unused_initializer + -- extract_constant_to_initializer + -- fuse_add_bias_into_conv + -- fuse_bn_into_conv + -- fuse_consecutive_concats + -- fuse_consecutive_log_softmax + -- fuse_consecutive_reduce_unsqueeze -- fuse_consecutive_squeezes -- fuse_consecutive_transposes - -- fuse_add_bias_into_conv + -- fuse_matmul_add_bias_into_gemm + -- fuse_pad_into_conv -- fuse_transpose_into_gemm """ @@ -45,8 +55,10 @@ 'eliminate_nop_pad', 'fuse_consecutive_transposes', 'fuse_transpose_into_gemm'] + if not isinstance(model, ModelProto): - raise ValueError('Optimizer only accepts ModelProto, incorrect type: {}'.format(type(model))) + raise ValueError( + 'Optimizer only accepts ModelProto, incorrect type: {}'.format(type(model))) model_str = model.SerializeToString() if fixed_point:
{"golden_diff": "diff --git a/onnx/optimizer.py b/onnx/optimizer.py\n--- a/onnx/optimizer.py\n+++ b/onnx/optimizer.py\n@@ -26,13 +26,23 @@\n \n Supported pass names:\n -- nop\n+ -- eliminate_deadend\n -- eliminate_identity\n- -- eliminate_nop_transpose\n+ -- eliminate_nop_dropout\n+ -- eliminate_nop_monotone_argmax\n -- eliminate_nop_pad\n+ -- eliminate_nop_transpose\n -- eliminate_unused_initializer\n+ -- extract_constant_to_initializer\n+ -- fuse_add_bias_into_conv\n+ -- fuse_bn_into_conv\n+ -- fuse_consecutive_concats\n+ -- fuse_consecutive_log_softmax\n+ -- fuse_consecutive_reduce_unsqueeze\n -- fuse_consecutive_squeezes\n -- fuse_consecutive_transposes\n- -- fuse_add_bias_into_conv\n+ -- fuse_matmul_add_bias_into_gemm\n+ -- fuse_pad_into_conv\n -- fuse_transpose_into_gemm\n \"\"\"\n \n@@ -45,8 +55,10 @@\n 'eliminate_nop_pad',\n 'fuse_consecutive_transposes',\n 'fuse_transpose_into_gemm']\n+\n if not isinstance(model, ModelProto):\n- raise ValueError('Optimizer only accepts ModelProto, incorrect type: {}'.format(type(model)))\n+ raise ValueError(\n+ 'Optimizer only accepts ModelProto, incorrect type: {}'.format(type(model)))\n \n model_str = model.SerializeToString()\n if fixed_point:\n", "issue": "Proposal: Op Registry\nOnce we start getting into more complicated passes having knowledge about operators becomes incredibly useful. What I propose is to maintain a registry of operators mapping to a set of descriptors. \r\n\r\nFor example let's take a look at the [eliminate_nop_monotone_argmax](https://github.com/onnx/onnx/blob/master/onnx/optimizer/passes/eliminate_nop_monotone_argmax.h) pass. Within the pass we have to maintain an unordered set of ops that are monotone. The correct design would be to have a registry which we can ask whether or not if a certain op is monotone. Once such a registry is implemented, passes can be generalized through descriptors, therefore as we add new ops and their respective descriptors our passes naturally scale out.\r\n\r\nSome possible descriptors we could have are:\r\n- monotonic\r\n- elementwise independent (op works on an individual element without knowledge of other elements, such as log, but not softmax since it requires calculating a sum over an axis)\r\n- elementwise dependent (softmax, logsoftmax, softmin, softplus)\r\n- reduction\r\n- shape transform (e.g. transpose, view)\r\n\r\n@houseroad @bddppq What do you guys think?\n", "before_files": [{"content": "# ATTENTION: The code in this file is highly EXPERIMENTAL.\n# Adventurous users should note that the APIs will probably change.\n\n\"\"\"onnx optimizer\n\nThis enables users to optimize their models.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport onnx\nimport onnx.onnx_cpp2py_export.optimizer as C\nfrom onnx import ModelProto\nfrom typing import Text, Sequence, Optional\n\n\"\"\"Apply the optimization on the serialized ModelProto.\n\nArguments:\n input (ModelProto): model\n names (list of string): list of optimization names\n\nReturn:\n return (ModelProto) optimized model\n\nSupported pass names:\n -- nop\n -- eliminate_identity\n -- eliminate_nop_transpose\n -- eliminate_nop_pad\n -- eliminate_unused_initializer\n -- fuse_consecutive_squeezes\n -- fuse_consecutive_transposes\n -- fuse_add_bias_into_conv\n -- fuse_transpose_into_gemm\n\"\"\"\n\nget_available_passes = C.get_available_passes\n\n\ndef optimize(model, passes=None, fixed_point=False): # type: (ModelProto, Optional[Sequence[Text]], bool) -> ModelProto\n if passes is None:\n passes = ['eliminate_nop_transpose',\n 'eliminate_nop_pad',\n 'fuse_consecutive_transposes',\n 'fuse_transpose_into_gemm']\n if not isinstance(model, ModelProto):\n raise ValueError('Optimizer only accepts ModelProto, incorrect type: {}'.format(type(model)))\n\n model_str = model.SerializeToString()\n if fixed_point:\n optimized_model_str = C.optimize_fixedpoint(model_str, passes)\n else:\n optimized_model_str = C.optimize(model_str, passes)\n\n return onnx.load_from_string(optimized_model_str)\n", "path": "onnx/optimizer.py"}]}
1,289
332
gh_patches_debug_21781
rasdani/github-patches
git_diff
cal-itp__benefits-344
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Refactor language files for multiple verifiers ## Background We need message strings for multiple verifiers ## Tasks * [x] Create a new [verifier fixture](https://github.com/cal-itp/benefits/blob/dev/fixtures/02_eligibilityverifier.json) from the existing one * [x] With a new `pk` and `fields.name` * [x] Adjust both to have different `msgid`, e.g. `eligibility.pages.confirm.dmv.*` and `eligibility.pages.confirm.mst.*` * [x] Adjust the `django.po` files with new `msgid` and new sample content for the Courtesy Cards verifier for each language * [x] Wire the above fixture to one of the [sample agencies](https://github.com/cal-itp/benefits/blob/dev/fixtures/04_transitagency.json) ## Testing Once #322 is merged * [x] Remove your local db and local compiled language files, then run `bin/init.sh` * [x] Observe that the migrations run correctly, the fixture data is loaded, and the language files are compiled * [x] Open the app locally and select the agency configured above with multiple verifiers * [x] Observe the `eligibility:index` view with different verifier selections * [x] Select one of the verifiers * [x] Observe the verifier-specific `eligibility:start` view * [x] Confirm verification of a test user * [x] Go back to `eligibility:index` and select the other verifier * [x] Observe the verifier-specific `eligibility:start` view * [x] Confirm verification of a test user </issue> <code> [start of benefits/eligibility/views.py] 1 """ 2 The eligibility application: view definitions for the eligibility verification flow. 3 """ 4 from django.contrib import messages 5 from django.shortcuts import redirect 6 from django.urls import reverse 7 from django.utils.decorators import decorator_from_middleware 8 from django.utils.translation import pgettext, gettext as _ 9 10 from benefits.core import middleware, recaptcha, session, viewmodels 11 from benefits.core.models import EligibilityVerifier 12 from benefits.core.views import PageTemplateResponse, _index_image 13 from . import analytics, api, forms 14 15 16 @decorator_from_middleware(middleware.AgencySessionRequired) 17 def index(request): 18 """View handler for the eligibility verifier selection form.""" 19 20 session.update(request, eligibility_types=[], origin=reverse("eligibility:index")) 21 agency = session.agency(request) 22 23 eligibility_start = reverse("eligibility:start") 24 25 page = viewmodels.Page( 26 title=_("eligibility.pages.index.title"), 27 content_title=_("eligibility.pages.index.content_title"), 28 forms=forms.EligibilityVerifierSelectionForm(agency=agency), 29 image=_index_image(), 30 ) 31 32 if request.method == "POST": 33 form = forms.EligibilityVerifierSelectionForm(data=request.POST, agency=agency) 34 35 if form.is_valid(): 36 verifier_id = form.cleaned_data.get("verifier") 37 verifier = EligibilityVerifier.objects.get(id=verifier_id) 38 session.update(request, verifier=verifier) 39 40 response = redirect(eligibility_start) 41 else: 42 # form was not valid, allow for correction/resubmission 43 page.forms = [form] 44 response = PageTemplateResponse(request, page) 45 else: 46 if agency.eligibility_verifiers.count() == 1: 47 verifier = agency.eligibility_verifiers.first() 48 session.update(request, verifier=verifier) 49 response = redirect(eligibility_start) 50 else: 51 response = PageTemplateResponse(request, page) 52 53 return response 54 55 56 @decorator_from_middleware(middleware.AgencySessionRequired) 57 @decorator_from_middleware(middleware.VerifierSessionRequired) 58 def start(request): 59 """View handler for the eligibility verification getting started screen.""" 60 61 verifier = session.verifier(request) 62 63 page = viewmodels.Page( 64 title=_("eligibility.pages.start.title"), 65 content_title=_(verifier.start_content_title), 66 media=[ 67 viewmodels.MediaItem( 68 icon=viewmodels.Icon("idcardcheck", pgettext("image alt text", "core.icons.idcardcheck")), 69 heading=_(verifier.start_item_name), 70 details=_(verifier.start_item_description), 71 ), 72 viewmodels.MediaItem( 73 icon=viewmodels.Icon("bankcardcheck", pgettext("image alt text", "core.icons.bankcardcheck")), 74 heading=_("eligibility.pages.start.items[1].title"), 75 details=_("eligibility.pages.start.items[1].text"), 76 ), 77 ], 78 paragraphs=[_(verifier.start_blurb)], 79 image=_index_image(), 80 button=viewmodels.Button.primary(text=_("eligibility.pages.start.button"), url=reverse("eligibility:confirm")), 81 ) 82 83 return PageTemplateResponse(request, page) 84 85 86 @decorator_from_middleware(middleware.AgencySessionRequired) 87 @decorator_from_middleware(middleware.RateLimit) 88 @decorator_from_middleware(middleware.VerifierSessionRequired) 89 def confirm(request): 90 """View handler for the eligibility verification form.""" 91 92 verifier = session.verifier(request) 93 94 page = viewmodels.Page( 95 title=_(verifier.form_title), 96 content_title=_(verifier.form_content_title), 97 paragraphs=[_(verifier.form_blurb)], 98 form=forms.EligibilityVerificationForm(auto_id=True, label_suffix="", verifier=verifier), 99 classes="text-lg-center", 100 ) 101 102 if request.method == "POST": 103 analytics.started_eligibility(request) 104 105 form = forms.EligibilityVerificationForm(data=request.POST, verifier=verifier) 106 response = _verify(request, form) 107 108 if response is None: 109 # form was not valid, allow for correction/resubmission 110 analytics.returned_error(request, form.errors) 111 page.forms = [form] 112 response = PageTemplateResponse(request, page) 113 elif session.eligible(request): 114 eligibility = session.eligibility(request) 115 response = verified(request, [eligibility.name]) 116 else: 117 response = PageTemplateResponse(request, page) 118 119 return response 120 121 122 def _verify(request, form): 123 """Helper calls the eligibility verification API with user input.""" 124 125 if not form.is_valid(): 126 if recaptcha.has_error(form): 127 messages.error(request, "Recaptcha failed. Please try again.") 128 return None 129 130 sub, name = form.cleaned_data.get("sub"), form.cleaned_data.get("name") 131 132 agency = session.agency(request) 133 verifier = session.verifier(request) 134 client = api.Client(agency, verifier) 135 136 response = client.verify(sub, name) 137 138 if response.error and any(response.error): 139 form.add_api_errors(response.error) 140 return None 141 elif any(response.eligibility): 142 return verified(request, response.eligibility) 143 else: 144 return unverified(request) 145 146 147 @decorator_from_middleware(middleware.AgencySessionRequired) 148 def verified(request, verified_types): 149 """View handler for the verified eligibility page.""" 150 151 analytics.returned_success(request) 152 153 enrollment_index = reverse("enrollment:index") 154 session.update(request, eligibility_types=verified_types, origin=enrollment_index) 155 156 return redirect(enrollment_index) 157 158 159 @decorator_from_middleware(middleware.AgencySessionRequired) 160 def unverified(request): 161 """View handler for the unverified eligibility page.""" 162 163 analytics.returned_fail(request) 164 165 # tel: link to agency phone number 166 agency = session.agency(request) 167 buttons = viewmodels.Button.agency_contact_links(agency) 168 169 page = viewmodels.Page( 170 title=_("eligibility.pages.unverified.title"), 171 content_title=_("eligibility.pages.unverified.content_title"), 172 icon=viewmodels.Icon("idcardquestion", pgettext("image alt text", "core.icons.idcardquestion")), 173 paragraphs=[_("eligibility.pages.unverified.p[0]"), _("eligibility.pages.unverified.p[1]")], 174 buttons=buttons, 175 classes="text-lg-center", 176 ) 177 178 return PageTemplateResponse(request, page) 179 [end of benefits/eligibility/views.py] [start of benefits/eligibility/forms.py] 1 """ 2 The eligibility application: Form definition for the eligibility verification flow. 3 """ 4 import logging 5 6 from django import forms 7 from django.utils.translation import gettext_lazy as _ 8 9 from benefits.core import models, recaptcha, widgets 10 11 12 logger = logging.getLogger(__name__) 13 14 15 class EligibilityVerifierSelectionForm(forms.Form): 16 """Form to capture eligibility verifier selection.""" 17 18 action_url = "eligibility:index" 19 method = "POST" 20 21 verifier = forms.ChoiceField(label="", widget=forms.RadioSelect) 22 23 submit_value = _("eligibility.pages.index.button") 24 25 def __init__(self, agency: models.TransitAgency, *args, **kwargs): 26 super().__init__(*args, **kwargs) 27 choices = [(v.id, v.selection_label) for v in agency.eligibility_verifiers.all()] 28 self.fields["verifier"].choices = choices 29 30 31 class EligibilityVerificationForm(forms.Form): 32 """Form to collect eligibility verification details.""" 33 34 action_url = "eligibility:confirm" 35 method = "POST" 36 37 submit_value = _("eligibility.forms.confirm.submit") 38 submitting_value = _("eligibility.forms.confirm.submitting") 39 40 _error_messages = { 41 "invalid": _("eligibility.forms.confirm.errors.invalid"), 42 "missing": _("eligibility.forms.confirm.errors.missing"), 43 } 44 45 def __init__(self, verifier: models.EligibilityVerifier, *args, **kwargs): 46 super().__init__(*args, **kwargs) 47 48 sub_widget = widgets.FormControlTextInput(placeholder=verifier.form_sub_placeholder) 49 if verifier.form_sub_pattern: 50 sub_widget.attrs.update({"pattern": verifier.form_sub_pattern}) 51 52 self.fields["sub"] = forms.CharField(label=_(verifier.form_sub_label), widget=sub_widget) 53 54 name_widget = widgets.FormControlTextInput(placeholder=verifier.form_name_placeholder) 55 if verifier.form_name_max_length: 56 name_widget.attrs.update({"maxlength": verifier.form_name_max_length}) 57 58 self.fields["name"] = forms.CharField(label=_(verifier.form_name_label), widget=name_widget) 59 60 def add_api_errors(self, form_errors): 61 """Handle errors passed back from API server related to submitted form values.""" 62 63 validation_errors = { 64 field: forms.ValidationError(self._error_messages.get(code, _("core.pages.error.title")), code=code) 65 for (field, code) in form_errors.items() 66 if field in self.fields 67 } 68 69 if len(validation_errors) > 0: 70 logger.warning("Form fields are invalid") 71 72 for (field, err) in validation_errors.items(): 73 self.add_error(field, err) 74 75 def clean(self): 76 if not recaptcha.verify(self.data): 77 raise forms.ValidationError("reCAPTCHA failed") 78 [end of benefits/eligibility/forms.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/benefits/eligibility/forms.py b/benefits/eligibility/forms.py --- a/benefits/eligibility/forms.py +++ b/benefits/eligibility/forms.py @@ -20,11 +20,11 @@ verifier = forms.ChoiceField(label="", widget=forms.RadioSelect) - submit_value = _("eligibility.pages.index.button") + submit_value = _("eligibility.buttons.continue") def __init__(self, agency: models.TransitAgency, *args, **kwargs): super().__init__(*args, **kwargs) - choices = [(v.id, v.selection_label) for v in agency.eligibility_verifiers.all()] + choices = [(v.id, _(v.selection_label)) for v in agency.eligibility_verifiers.all()] self.fields["verifier"].choices = choices diff --git a/benefits/eligibility/views.py b/benefits/eligibility/views.py --- a/benefits/eligibility/views.py +++ b/benefits/eligibility/views.py @@ -77,7 +77,7 @@ ], paragraphs=[_(verifier.start_blurb)], image=_index_image(), - button=viewmodels.Button.primary(text=_("eligibility.pages.start.button"), url=reverse("eligibility:confirm")), + button=viewmodels.Button.primary(text=_("eligibility.buttons.continue"), url=reverse("eligibility:confirm")), ) return PageTemplateResponse(request, page)
{"golden_diff": "diff --git a/benefits/eligibility/forms.py b/benefits/eligibility/forms.py\n--- a/benefits/eligibility/forms.py\n+++ b/benefits/eligibility/forms.py\n@@ -20,11 +20,11 @@\n \n verifier = forms.ChoiceField(label=\"\", widget=forms.RadioSelect)\n \n- submit_value = _(\"eligibility.pages.index.button\")\n+ submit_value = _(\"eligibility.buttons.continue\")\n \n def __init__(self, agency: models.TransitAgency, *args, **kwargs):\n super().__init__(*args, **kwargs)\n- choices = [(v.id, v.selection_label) for v in agency.eligibility_verifiers.all()]\n+ choices = [(v.id, _(v.selection_label)) for v in agency.eligibility_verifiers.all()]\n self.fields[\"verifier\"].choices = choices\n \n \ndiff --git a/benefits/eligibility/views.py b/benefits/eligibility/views.py\n--- a/benefits/eligibility/views.py\n+++ b/benefits/eligibility/views.py\n@@ -77,7 +77,7 @@\n ],\n paragraphs=[_(verifier.start_blurb)],\n image=_index_image(),\n- button=viewmodels.Button.primary(text=_(\"eligibility.pages.start.button\"), url=reverse(\"eligibility:confirm\")),\n+ button=viewmodels.Button.primary(text=_(\"eligibility.buttons.continue\"), url=reverse(\"eligibility:confirm\")),\n )\n \n return PageTemplateResponse(request, page)\n", "issue": "Refactor language files for multiple verifiers\n## Background\r\n\r\nWe need message strings for multiple verifiers\r\n\r\n## Tasks\r\n\r\n* [x] Create a new [verifier fixture](https://github.com/cal-itp/benefits/blob/dev/fixtures/02_eligibilityverifier.json) from the existing one\r\n * [x] With a new `pk` and `fields.name`\r\n * [x] Adjust both to have different `msgid`, e.g. `eligibility.pages.confirm.dmv.*` and `eligibility.pages.confirm.mst.*`\r\n* [x] Adjust the `django.po` files with new `msgid` and new sample content for the Courtesy Cards verifier for each language\r\n* [x] Wire the above fixture to one of the [sample agencies](https://github.com/cal-itp/benefits/blob/dev/fixtures/04_transitagency.json)\r\n\r\n## Testing\r\n\r\nOnce #322 is merged\r\n\r\n* [x] Remove your local db and local compiled language files, then run `bin/init.sh`\r\n * [x] Observe that the migrations run correctly, the fixture data is loaded, and the language files are compiled\r\n* [x] Open the app locally and select the agency configured above with multiple verifiers\r\n * [x] Observe the `eligibility:index` view with different verifier selections\r\n* [x] Select one of the verifiers\r\n * [x] Observe the verifier-specific `eligibility:start` view\r\n * [x] Confirm verification of a test user\r\n* [x] Go back to `eligibility:index` and select the other verifier\r\n * [x] Observe the verifier-specific `eligibility:start` view\r\n * [x] Confirm verification of a test user\n", "before_files": [{"content": "\"\"\"\nThe eligibility application: view definitions for the eligibility verification flow.\n\"\"\"\nfrom django.contrib import messages\nfrom django.shortcuts import redirect\nfrom django.urls import reverse\nfrom django.utils.decorators import decorator_from_middleware\nfrom django.utils.translation import pgettext, gettext as _\n\nfrom benefits.core import middleware, recaptcha, session, viewmodels\nfrom benefits.core.models import EligibilityVerifier\nfrom benefits.core.views import PageTemplateResponse, _index_image\nfrom . import analytics, api, forms\n\n\n@decorator_from_middleware(middleware.AgencySessionRequired)\ndef index(request):\n \"\"\"View handler for the eligibility verifier selection form.\"\"\"\n\n session.update(request, eligibility_types=[], origin=reverse(\"eligibility:index\"))\n agency = session.agency(request)\n\n eligibility_start = reverse(\"eligibility:start\")\n\n page = viewmodels.Page(\n title=_(\"eligibility.pages.index.title\"),\n content_title=_(\"eligibility.pages.index.content_title\"),\n forms=forms.EligibilityVerifierSelectionForm(agency=agency),\n image=_index_image(),\n )\n\n if request.method == \"POST\":\n form = forms.EligibilityVerifierSelectionForm(data=request.POST, agency=agency)\n\n if form.is_valid():\n verifier_id = form.cleaned_data.get(\"verifier\")\n verifier = EligibilityVerifier.objects.get(id=verifier_id)\n session.update(request, verifier=verifier)\n\n response = redirect(eligibility_start)\n else:\n # form was not valid, allow for correction/resubmission\n page.forms = [form]\n response = PageTemplateResponse(request, page)\n else:\n if agency.eligibility_verifiers.count() == 1:\n verifier = agency.eligibility_verifiers.first()\n session.update(request, verifier=verifier)\n response = redirect(eligibility_start)\n else:\n response = PageTemplateResponse(request, page)\n\n return response\n\n\n@decorator_from_middleware(middleware.AgencySessionRequired)\n@decorator_from_middleware(middleware.VerifierSessionRequired)\ndef start(request):\n \"\"\"View handler for the eligibility verification getting started screen.\"\"\"\n\n verifier = session.verifier(request)\n\n page = viewmodels.Page(\n title=_(\"eligibility.pages.start.title\"),\n content_title=_(verifier.start_content_title),\n media=[\n viewmodels.MediaItem(\n icon=viewmodels.Icon(\"idcardcheck\", pgettext(\"image alt text\", \"core.icons.idcardcheck\")),\n heading=_(verifier.start_item_name),\n details=_(verifier.start_item_description),\n ),\n viewmodels.MediaItem(\n icon=viewmodels.Icon(\"bankcardcheck\", pgettext(\"image alt text\", \"core.icons.bankcardcheck\")),\n heading=_(\"eligibility.pages.start.items[1].title\"),\n details=_(\"eligibility.pages.start.items[1].text\"),\n ),\n ],\n paragraphs=[_(verifier.start_blurb)],\n image=_index_image(),\n button=viewmodels.Button.primary(text=_(\"eligibility.pages.start.button\"), url=reverse(\"eligibility:confirm\")),\n )\n\n return PageTemplateResponse(request, page)\n\n\n@decorator_from_middleware(middleware.AgencySessionRequired)\n@decorator_from_middleware(middleware.RateLimit)\n@decorator_from_middleware(middleware.VerifierSessionRequired)\ndef confirm(request):\n \"\"\"View handler for the eligibility verification form.\"\"\"\n\n verifier = session.verifier(request)\n\n page = viewmodels.Page(\n title=_(verifier.form_title),\n content_title=_(verifier.form_content_title),\n paragraphs=[_(verifier.form_blurb)],\n form=forms.EligibilityVerificationForm(auto_id=True, label_suffix=\"\", verifier=verifier),\n classes=\"text-lg-center\",\n )\n\n if request.method == \"POST\":\n analytics.started_eligibility(request)\n\n form = forms.EligibilityVerificationForm(data=request.POST, verifier=verifier)\n response = _verify(request, form)\n\n if response is None:\n # form was not valid, allow for correction/resubmission\n analytics.returned_error(request, form.errors)\n page.forms = [form]\n response = PageTemplateResponse(request, page)\n elif session.eligible(request):\n eligibility = session.eligibility(request)\n response = verified(request, [eligibility.name])\n else:\n response = PageTemplateResponse(request, page)\n\n return response\n\n\ndef _verify(request, form):\n \"\"\"Helper calls the eligibility verification API with user input.\"\"\"\n\n if not form.is_valid():\n if recaptcha.has_error(form):\n messages.error(request, \"Recaptcha failed. Please try again.\")\n return None\n\n sub, name = form.cleaned_data.get(\"sub\"), form.cleaned_data.get(\"name\")\n\n agency = session.agency(request)\n verifier = session.verifier(request)\n client = api.Client(agency, verifier)\n\n response = client.verify(sub, name)\n\n if response.error and any(response.error):\n form.add_api_errors(response.error)\n return None\n elif any(response.eligibility):\n return verified(request, response.eligibility)\n else:\n return unverified(request)\n\n\n@decorator_from_middleware(middleware.AgencySessionRequired)\ndef verified(request, verified_types):\n \"\"\"View handler for the verified eligibility page.\"\"\"\n\n analytics.returned_success(request)\n\n enrollment_index = reverse(\"enrollment:index\")\n session.update(request, eligibility_types=verified_types, origin=enrollment_index)\n\n return redirect(enrollment_index)\n\n\n@decorator_from_middleware(middleware.AgencySessionRequired)\ndef unverified(request):\n \"\"\"View handler for the unverified eligibility page.\"\"\"\n\n analytics.returned_fail(request)\n\n # tel: link to agency phone number\n agency = session.agency(request)\n buttons = viewmodels.Button.agency_contact_links(agency)\n\n page = viewmodels.Page(\n title=_(\"eligibility.pages.unverified.title\"),\n content_title=_(\"eligibility.pages.unverified.content_title\"),\n icon=viewmodels.Icon(\"idcardquestion\", pgettext(\"image alt text\", \"core.icons.idcardquestion\")),\n paragraphs=[_(\"eligibility.pages.unverified.p[0]\"), _(\"eligibility.pages.unverified.p[1]\")],\n buttons=buttons,\n classes=\"text-lg-center\",\n )\n\n return PageTemplateResponse(request, page)\n", "path": "benefits/eligibility/views.py"}, {"content": "\"\"\"\nThe eligibility application: Form definition for the eligibility verification flow.\n\"\"\"\nimport logging\n\nfrom django import forms\nfrom django.utils.translation import gettext_lazy as _\n\nfrom benefits.core import models, recaptcha, widgets\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass EligibilityVerifierSelectionForm(forms.Form):\n \"\"\"Form to capture eligibility verifier selection.\"\"\"\n\n action_url = \"eligibility:index\"\n method = \"POST\"\n\n verifier = forms.ChoiceField(label=\"\", widget=forms.RadioSelect)\n\n submit_value = _(\"eligibility.pages.index.button\")\n\n def __init__(self, agency: models.TransitAgency, *args, **kwargs):\n super().__init__(*args, **kwargs)\n choices = [(v.id, v.selection_label) for v in agency.eligibility_verifiers.all()]\n self.fields[\"verifier\"].choices = choices\n\n\nclass EligibilityVerificationForm(forms.Form):\n \"\"\"Form to collect eligibility verification details.\"\"\"\n\n action_url = \"eligibility:confirm\"\n method = \"POST\"\n\n submit_value = _(\"eligibility.forms.confirm.submit\")\n submitting_value = _(\"eligibility.forms.confirm.submitting\")\n\n _error_messages = {\n \"invalid\": _(\"eligibility.forms.confirm.errors.invalid\"),\n \"missing\": _(\"eligibility.forms.confirm.errors.missing\"),\n }\n\n def __init__(self, verifier: models.EligibilityVerifier, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n sub_widget = widgets.FormControlTextInput(placeholder=verifier.form_sub_placeholder)\n if verifier.form_sub_pattern:\n sub_widget.attrs.update({\"pattern\": verifier.form_sub_pattern})\n\n self.fields[\"sub\"] = forms.CharField(label=_(verifier.form_sub_label), widget=sub_widget)\n\n name_widget = widgets.FormControlTextInput(placeholder=verifier.form_name_placeholder)\n if verifier.form_name_max_length:\n name_widget.attrs.update({\"maxlength\": verifier.form_name_max_length})\n\n self.fields[\"name\"] = forms.CharField(label=_(verifier.form_name_label), widget=name_widget)\n\n def add_api_errors(self, form_errors):\n \"\"\"Handle errors passed back from API server related to submitted form values.\"\"\"\n\n validation_errors = {\n field: forms.ValidationError(self._error_messages.get(code, _(\"core.pages.error.title\")), code=code)\n for (field, code) in form_errors.items()\n if field in self.fields\n }\n\n if len(validation_errors) > 0:\n logger.warning(\"Form fields are invalid\")\n\n for (field, err) in validation_errors.items():\n self.add_error(field, err)\n\n def clean(self):\n if not recaptcha.verify(self.data):\n raise forms.ValidationError(\"reCAPTCHA failed\")\n", "path": "benefits/eligibility/forms.py"}]}
3,388
323
gh_patches_debug_36741
rasdani/github-patches
git_diff
cupy__cupy-3011
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Columnar matrix slice doesn't work properly with ravel When creating a view into a 2d array using `[:, n:]` notation, the resulting view doesn't work properly with `ravel` or functions that depend on it. ``` import cupy as cp a = cp.zeros((3, 3)) b = a[:, 1:] b.ravel()[0] = 1 print(b) b[0, 0] = 2 print(a) ``` ``` output [[0. 0.] [0. 0.] [0. 0.]] [[0. 2. 0.] [0. 0. 0.] [0. 0. 0.]] ``` As you can see, `b[0, 0]` still works as expected, but the array produced by `ravel` is a copy. This is not the case for the array produced with a row-wise slice: ``` c = a[1:, :] c.ravel()[0] = 3 print(a) ``` ``` output array([[0., 2., 0.], [3., 0., 0.], [0., 0., 0.]]) ``` This affects me because I'm trying to fill out a tridiagonal matrix using `cupy`, but the upper diagonal can't be properly referenced: ``` d = cp.zeros((3, 3)) cp.fill_diagonal(d, 3) cp.fill_diagonal(d[1:], 2) cp.fill_diagonal(d[:, 1:], 1) print(d) ``` ``` output [[3. 0. 0.] [2. 3. 0.] [0. 2. 3.]] ``` ``` In [182]: cp.show_config() CuPy Version : 7.1.0 CUDA Root : /usr/local/cuda CUDA Build Version : 10000 CUDA Driver Version : 10000 CUDA Runtime Version : 10000 cuBLAS Version : 10000 cuFFT Version : 10000 cuRAND Version : 10000 cuSOLVER Version : (10, 0, 0) cuSPARSE Version : 10000 NVRTC Version : (10, 0) cuDNN Build Version : 7605 cuDNN Version : 7600 NCCL Build Version : 2406 NCCL Runtime Version : 2406 ``` </issue> <code> [start of cupy/indexing/insert.py] 1 import numpy 2 3 import cupy 4 5 6 def place(arr, mask, vals): 7 """Change elements of an array based on conditional and input values. 8 9 This function uses the first N elements of `vals`, where N is the number 10 of true values in `mask`. 11 12 Args: 13 arr (cupy.ndarray): Array to put data into. 14 mask (array-like): Boolean mask array. Must have the same size as `a`. 15 vals (array-like): Values to put into `a`. Only the first 16 N elements are used, where N is the number of True values in 17 `mask`. If `vals` is smaller than N, it will be repeated, and if 18 elements of `a` are to be masked, this sequence must be non-empty. 19 20 Examples 21 -------- 22 >>> arr = np.arange(6).reshape(2, 3) 23 >>> np.place(arr, arr>2, [44, 55]) 24 >>> arr 25 array([[ 0, 1, 2], 26 [44, 55, 44]]) 27 28 .. warning:: 29 30 This function may synchronize the device. 31 32 .. seealso:: :func:`numpy.place` 33 """ 34 # TODO(niboshi): Avoid nonzero which may synchronize the device. 35 mask = cupy.asarray(mask) 36 if arr.size != mask.size: 37 raise ValueError('Mask and data must be the same size.') 38 vals = cupy.asarray(vals) 39 40 mask_indices = mask.ravel().nonzero()[0] # may synchronize 41 if mask_indices.size == 0: 42 return 43 if vals.size == 0: 44 raise ValueError('Cannot insert from an empty array.') 45 arr.put(mask_indices, vals, mode='wrap') 46 47 48 def put(a, ind, v, mode='wrap'): 49 """Replaces specified elements of an array with given values. 50 51 Args: 52 a (cupy.ndarray): Target array. 53 ind (array-like): Target indices, interpreted as integers. 54 v (array-like): Values to place in `a` at target indices. 55 If `v` is shorter than `ind` it will be repeated as necessary. 56 mode (str): How out-of-bounds indices will behave. Its value must be 57 either `'raise'`, `'wrap'` or `'clip'`. Otherwise, 58 :class:`TypeError` is raised. 59 60 .. note:: 61 Default `mode` is set to `'wrap'` to avoid unintended performance drop. 62 If you need NumPy's behavior, please pass `mode='raise'` manually. 63 64 .. seealso:: :func:`numpy.put` 65 """ 66 a.put(ind, v, mode=mode) 67 68 69 # TODO(okuta): Implement putmask 70 71 72 def fill_diagonal(a, val, wrap=False): 73 """Fills the main diagonal of the given array of any dimensionality. 74 75 For an array `a` with ``a.ndim > 2``, the diagonal is the list of 76 locations with indices ``a[i, i, ..., i]`` all identical. This function 77 modifies the input array in-place, it does not return a value. 78 79 Args: 80 a (cupy.ndarray): The array, at least 2-D. 81 val (scalar): The value to be written on the diagonal. 82 Its type must be compatible with that of the array a. 83 wrap (bool): If specified, the diagonal is "wrapped" after N columns. 84 This affects only tall matrices. 85 86 Examples 87 -------- 88 >>> a = cupy.zeros((3, 3), int) 89 >>> cupy.fill_diagonal(a, 5) 90 >>> a 91 array([[5, 0, 0], 92 [0, 5, 0], 93 [0, 0, 5]]) 94 95 .. seealso:: :func:`numpy.fill_diagonal` 96 """ 97 # The followings are imported from the original numpy 98 if a.ndim < 2: 99 raise ValueError('array must be at least 2-d') 100 end = None 101 if a.ndim == 2: 102 step = a.shape[1] + 1 103 if not wrap: 104 end = a.shape[1] * a.shape[1] 105 else: 106 if not numpy.alltrue(numpy.diff(a.shape) == 0): 107 raise ValueError('All dimensions of input must be of equal length') 108 step = 1 + numpy.cumprod(a.shape[:-1]).sum() 109 110 # Since the current cupy does not support a.flat, 111 # we use a.ravel() instead of a.flat 112 a.ravel()[:end:step] = val 113 [end of cupy/indexing/insert.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/cupy/indexing/insert.py b/cupy/indexing/insert.py --- a/cupy/indexing/insert.py +++ b/cupy/indexing/insert.py @@ -1,6 +1,12 @@ +import string + import numpy import cupy +from cupy import util +from cupy.core import _carray +from cupy.core import _scalar +from cupy.cuda import device def place(arr, mask, vals): @@ -69,6 +75,33 @@ # TODO(okuta): Implement putmask +_fill_diagonal_template = string.Template(r''' +#include <cupy/complex.cuh> +#include <cupy/carray.cuh> +extern "C" __global__ +void cupy_fill_diagonal(CArray<${type}, ${a_ndim}> a, + CIndexer<${a_ndim}> a_ind, + int start, + int stop, + int step, + CArray<${type}, ${val_ndim}> val, + CIndexer<${val_ndim}> val_ind) { + int n = (stop - start) / step + 1; + CUPY_FOR(i, n) { + a_ind.set(start + i * step); + val_ind.set(i % val_ind.size()); + a[a_ind.get()] = val[val_ind.get()]; + } +}''') + + [email protected](for_each_device=True) +def _fill_diagonal_kernel(type, a_ndim, val_ndim): + code = _fill_diagonal_template.substitute( + type=type, a_ndim=a_ndim, val_ndim=val_ndim) + return cupy.RawKernel(code, 'cupy_fill_diagonal') + + def fill_diagonal(a, val, wrap=False): """Fills the main diagonal of the given array of any dimensionality. @@ -97,7 +130,7 @@ # The followings are imported from the original numpy if a.ndim < 2: raise ValueError('array must be at least 2-d') - end = None + end = a.size if a.ndim == 2: step = a.shape[1] + 1 if not wrap: @@ -107,6 +140,21 @@ raise ValueError('All dimensions of input must be of equal length') step = 1 + numpy.cumprod(a.shape[:-1]).sum() - # Since the current cupy does not support a.flat, - # we use a.ravel() instead of a.flat - a.ravel()[:end:step] = val + val = cupy.asarray(val, dtype=a.dtype) + + dev_id = device.get_device_id() + for arr in [a, val]: + if arr.data.device_id != dev_id: + raise ValueError( + 'Array device must be same as the current ' + 'device: array device = %d while current = %d' + % (arr.data.device_id, dev_id)) + + typename = _scalar.get_typename(a.dtype) + fill_diagonal_kernel = _fill_diagonal_kernel(typename, a.ndim, val.ndim) + + size = end // step + 1 + a_ind = _carray.Indexer(a.shape) + val_ind = _carray.Indexer(val.shape) + fill_diagonal_kernel.kernel.linear_launch( + size, (a, a_ind, 0, end, step, val, val_ind))
{"golden_diff": "diff --git a/cupy/indexing/insert.py b/cupy/indexing/insert.py\n--- a/cupy/indexing/insert.py\n+++ b/cupy/indexing/insert.py\n@@ -1,6 +1,12 @@\n+import string\n+\n import numpy\n \n import cupy\n+from cupy import util\n+from cupy.core import _carray\n+from cupy.core import _scalar\n+from cupy.cuda import device\n \n \n def place(arr, mask, vals):\n@@ -69,6 +75,33 @@\n # TODO(okuta): Implement putmask\n \n \n+_fill_diagonal_template = string.Template(r'''\n+#include <cupy/complex.cuh>\n+#include <cupy/carray.cuh>\n+extern \"C\" __global__\n+void cupy_fill_diagonal(CArray<${type}, ${a_ndim}> a,\n+ CIndexer<${a_ndim}> a_ind,\n+ int start,\n+ int stop,\n+ int step,\n+ CArray<${type}, ${val_ndim}> val,\n+ CIndexer<${val_ndim}> val_ind) {\n+ int n = (stop - start) / step + 1;\n+ CUPY_FOR(i, n) {\n+ a_ind.set(start + i * step);\n+ val_ind.set(i % val_ind.size());\n+ a[a_ind.get()] = val[val_ind.get()];\n+ }\n+}''')\n+\n+\[email protected](for_each_device=True)\n+def _fill_diagonal_kernel(type, a_ndim, val_ndim):\n+ code = _fill_diagonal_template.substitute(\n+ type=type, a_ndim=a_ndim, val_ndim=val_ndim)\n+ return cupy.RawKernel(code, 'cupy_fill_diagonal')\n+\n+\n def fill_diagonal(a, val, wrap=False):\n \"\"\"Fills the main diagonal of the given array of any dimensionality.\n \n@@ -97,7 +130,7 @@\n # The followings are imported from the original numpy\n if a.ndim < 2:\n raise ValueError('array must be at least 2-d')\n- end = None\n+ end = a.size\n if a.ndim == 2:\n step = a.shape[1] + 1\n if not wrap:\n@@ -107,6 +140,21 @@\n raise ValueError('All dimensions of input must be of equal length')\n step = 1 + numpy.cumprod(a.shape[:-1]).sum()\n \n- # Since the current cupy does not support a.flat,\n- # we use a.ravel() instead of a.flat\n- a.ravel()[:end:step] = val\n+ val = cupy.asarray(val, dtype=a.dtype)\n+\n+ dev_id = device.get_device_id()\n+ for arr in [a, val]:\n+ if arr.data.device_id != dev_id:\n+ raise ValueError(\n+ 'Array device must be same as the current '\n+ 'device: array device = %d while current = %d'\n+ % (arr.data.device_id, dev_id))\n+\n+ typename = _scalar.get_typename(a.dtype)\n+ fill_diagonal_kernel = _fill_diagonal_kernel(typename, a.ndim, val.ndim)\n+\n+ size = end // step + 1\n+ a_ind = _carray.Indexer(a.shape)\n+ val_ind = _carray.Indexer(val.shape)\n+ fill_diagonal_kernel.kernel.linear_launch(\n+ size, (a, a_ind, 0, end, step, val, val_ind))\n", "issue": "Columnar matrix slice doesn't work properly with ravel\nWhen creating a view into a 2d array using `[:, n:]` notation, the resulting view doesn't work properly with `ravel` or functions that depend on it.\r\n\r\n```\r\nimport cupy as cp\r\na = cp.zeros((3, 3))\r\nb = a[:, 1:]\r\nb.ravel()[0] = 1\r\nprint(b)\r\nb[0, 0] = 2\r\nprint(a)\r\n```\r\n\r\n``` output\r\n[[0. 0.]\r\n [0. 0.]\r\n [0. 0.]]\r\n[[0. 2. 0.]\r\n [0. 0. 0.]\r\n [0. 0. 0.]]\r\n```\r\n\r\nAs you can see, `b[0, 0]` still works as expected, but the array produced by `ravel` is a copy. This is not the case for the array produced with a row-wise slice:\r\n\r\n```\r\nc = a[1:, :]\r\nc.ravel()[0] = 3\r\nprint(a)\r\n```\r\n\r\n``` output\r\narray([[0., 2., 0.],\r\n [3., 0., 0.],\r\n [0., 0., 0.]])\r\n```\r\n\r\nThis affects me because I'm trying to fill out a tridiagonal matrix using `cupy`, but the upper diagonal can't be properly referenced:\r\n```\r\nd = cp.zeros((3, 3))\r\ncp.fill_diagonal(d, 3)\r\ncp.fill_diagonal(d[1:], 2)\r\ncp.fill_diagonal(d[:, 1:], 1)\r\nprint(d)\r\n```\r\n\r\n``` output\r\n[[3. 0. 0.]\r\n [2. 3. 0.]\r\n [0. 2. 3.]]\r\n```\r\n\r\n```\r\nIn [182]: cp.show_config()\r\nCuPy Version : 7.1.0\r\nCUDA Root : /usr/local/cuda\r\nCUDA Build Version : 10000\r\nCUDA Driver Version : 10000\r\nCUDA Runtime Version : 10000\r\ncuBLAS Version : 10000\r\ncuFFT Version : 10000\r\ncuRAND Version : 10000\r\ncuSOLVER Version : (10, 0, 0)\r\ncuSPARSE Version : 10000\r\nNVRTC Version : (10, 0)\r\ncuDNN Build Version : 7605\r\ncuDNN Version : 7600\r\nNCCL Build Version : 2406\r\nNCCL Runtime Version : 2406\r\n```\n", "before_files": [{"content": "import numpy\n\nimport cupy\n\n\ndef place(arr, mask, vals):\n \"\"\"Change elements of an array based on conditional and input values.\n\n This function uses the first N elements of `vals`, where N is the number\n of true values in `mask`.\n\n Args:\n arr (cupy.ndarray): Array to put data into.\n mask (array-like): Boolean mask array. Must have the same size as `a`.\n vals (array-like): Values to put into `a`. Only the first\n N elements are used, where N is the number of True values in\n `mask`. If `vals` is smaller than N, it will be repeated, and if\n elements of `a` are to be masked, this sequence must be non-empty.\n\n Examples\n --------\n >>> arr = np.arange(6).reshape(2, 3)\n >>> np.place(arr, arr>2, [44, 55])\n >>> arr\n array([[ 0, 1, 2],\n [44, 55, 44]])\n\n .. warning::\n\n This function may synchronize the device.\n\n .. seealso:: :func:`numpy.place`\n \"\"\"\n # TODO(niboshi): Avoid nonzero which may synchronize the device.\n mask = cupy.asarray(mask)\n if arr.size != mask.size:\n raise ValueError('Mask and data must be the same size.')\n vals = cupy.asarray(vals)\n\n mask_indices = mask.ravel().nonzero()[0] # may synchronize\n if mask_indices.size == 0:\n return\n if vals.size == 0:\n raise ValueError('Cannot insert from an empty array.')\n arr.put(mask_indices, vals, mode='wrap')\n\n\ndef put(a, ind, v, mode='wrap'):\n \"\"\"Replaces specified elements of an array with given values.\n\n Args:\n a (cupy.ndarray): Target array.\n ind (array-like): Target indices, interpreted as integers.\n v (array-like): Values to place in `a` at target indices.\n If `v` is shorter than `ind` it will be repeated as necessary.\n mode (str): How out-of-bounds indices will behave. Its value must be\n either `'raise'`, `'wrap'` or `'clip'`. Otherwise,\n :class:`TypeError` is raised.\n\n .. note::\n Default `mode` is set to `'wrap'` to avoid unintended performance drop.\n If you need NumPy's behavior, please pass `mode='raise'` manually.\n\n .. seealso:: :func:`numpy.put`\n \"\"\"\n a.put(ind, v, mode=mode)\n\n\n# TODO(okuta): Implement putmask\n\n\ndef fill_diagonal(a, val, wrap=False):\n \"\"\"Fills the main diagonal of the given array of any dimensionality.\n\n For an array `a` with ``a.ndim > 2``, the diagonal is the list of\n locations with indices ``a[i, i, ..., i]`` all identical. This function\n modifies the input array in-place, it does not return a value.\n\n Args:\n a (cupy.ndarray): The array, at least 2-D.\n val (scalar): The value to be written on the diagonal.\n Its type must be compatible with that of the array a.\n wrap (bool): If specified, the diagonal is \"wrapped\" after N columns.\n This affects only tall matrices.\n\n Examples\n --------\n >>> a = cupy.zeros((3, 3), int)\n >>> cupy.fill_diagonal(a, 5)\n >>> a\n array([[5, 0, 0],\n [0, 5, 0],\n [0, 0, 5]])\n\n .. seealso:: :func:`numpy.fill_diagonal`\n \"\"\"\n # The followings are imported from the original numpy\n if a.ndim < 2:\n raise ValueError('array must be at least 2-d')\n end = None\n if a.ndim == 2:\n step = a.shape[1] + 1\n if not wrap:\n end = a.shape[1] * a.shape[1]\n else:\n if not numpy.alltrue(numpy.diff(a.shape) == 0):\n raise ValueError('All dimensions of input must be of equal length')\n step = 1 + numpy.cumprod(a.shape[:-1]).sum()\n\n # Since the current cupy does not support a.flat,\n # we use a.ravel() instead of a.flat\n a.ravel()[:end:step] = val\n", "path": "cupy/indexing/insert.py"}]}
2,351
786
gh_patches_debug_1054
rasdani/github-patches
git_diff
liqd__a4-opin-906
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> styling of categories in dashboard (Safari) When using Safari the styling of categories in the dashboard is broken. ![safari styling issue](https://user-images.githubusercontent.com/15954895/28914159-fe84edde-783a-11e7-8ae4-09f0a6b978cd.png) </issue> <code> [start of euth/ideas/forms.py] 1 from adhocracy4.categories import forms as category_forms 2 3 from . import models 4 5 6 class IdeaForm(category_forms.CategorizableForm): 7 class Meta: 8 model = models.Idea 9 fields = ['name', 'description', 'image', 'category'] 10 [end of euth/ideas/forms.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/euth/ideas/forms.py b/euth/ideas/forms.py --- a/euth/ideas/forms.py +++ b/euth/ideas/forms.py @@ -7,3 +7,7 @@ class Meta: model = models.Idea fields = ['name', 'description', 'image', 'category'] + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.fields['category'].empty_label = '---'
{"golden_diff": "diff --git a/euth/ideas/forms.py b/euth/ideas/forms.py\n--- a/euth/ideas/forms.py\n+++ b/euth/ideas/forms.py\n@@ -7,3 +7,7 @@\n class Meta:\n model = models.Idea\n fields = ['name', 'description', 'image', 'category']\n+\n+ def __init__(self, *args, **kwargs):\n+ super().__init__(*args, **kwargs)\n+ self.fields['category'].empty_label = '---'\n", "issue": "styling of categories in dashboard (Safari)\nWhen using Safari the styling of categories in the dashboard is broken. \r\n\r\n![safari styling issue](https://user-images.githubusercontent.com/15954895/28914159-fe84edde-783a-11e7-8ae4-09f0a6b978cd.png)\r\n\n", "before_files": [{"content": "from adhocracy4.categories import forms as category_forms\n\nfrom . import models\n\n\nclass IdeaForm(category_forms.CategorizableForm):\n class Meta:\n model = models.Idea\n fields = ['name', 'description', 'image', 'category']\n", "path": "euth/ideas/forms.py"}]}
691
111
gh_patches_debug_5823
rasdani/github-patches
git_diff
microsoft__botbuilder-python-1207
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [PORT] Remove Slack from the list of channels that support Suggested Actions > Port this change from botbuilder-dotnet/master branch: https://github.com/microsoft/botbuilder-dotnet/pull/4177 Fixes #2291 Removing Slack from the list of channels that do not support suggested actions. # Changed projects * Microsoft.Bot.Builder.Dialogs </issue> <code> [start of libraries/botbuilder-dialogs/botbuilder/dialogs/choices/channel.py] 1 # Copyright (c) Microsoft Corporation. All rights reserved. 2 # Licensed under the MIT License. 3 4 from botbuilder.core import TurnContext 5 from botframework.connector import Channels 6 7 8 class Channel: 9 """ 10 Methods for determining channel-specific functionality. 11 """ 12 13 @staticmethod 14 def supports_suggested_actions(channel_id: str, button_cnt: int = 100) -> bool: 15 """Determine if a number of Suggested Actions are supported by a Channel. 16 17 Args: 18 channel_id (str): The Channel to check the if Suggested Actions are supported in. 19 button_cnt (int, optional): Defaults to 100. The number of Suggested Actions to check for the Channel. 20 21 Returns: 22 bool: True if the Channel supports the button_cnt total Suggested Actions, False if the Channel does not 23 support that number of Suggested Actions. 24 """ 25 26 max_actions = { 27 # https://developers.facebook.com/docs/messenger-platform/send-messages/quick-replies 28 Channels.facebook: 10, 29 Channels.skype: 10, 30 # https://developers.line.biz/en/reference/messaging-api/#items-object 31 Channels.line: 13, 32 # https://dev.kik.com/#/docs/messaging#text-response-object 33 Channels.kik: 20, 34 Channels.telegram: 100, 35 Channels.slack: 100, 36 Channels.emulator: 100, 37 Channels.direct_line: 100, 38 Channels.webchat: 100, 39 } 40 return ( 41 button_cnt <= max_actions[channel_id] 42 if channel_id in max_actions 43 else False 44 ) 45 46 @staticmethod 47 def supports_card_actions(channel_id: str, button_cnt: int = 100) -> bool: 48 """Determine if a number of Card Actions are supported by a Channel. 49 50 Args: 51 channel_id (str): The Channel to check if the Card Actions are supported in. 52 button_cnt (int, optional): Defaults to 100. The number of Card Actions to check for the Channel. 53 54 Returns: 55 bool: True if the Channel supports the button_cnt total Card Actions, False if the Channel does not support 56 that number of Card Actions. 57 """ 58 59 max_actions = { 60 Channels.facebook: 3, 61 Channels.skype: 3, 62 Channels.ms_teams: 3, 63 Channels.line: 99, 64 Channels.slack: 100, 65 Channels.emulator: 100, 66 Channels.direct_line: 100, 67 Channels.webchat: 100, 68 Channels.cortana: 100, 69 } 70 return ( 71 button_cnt <= max_actions[channel_id] 72 if channel_id in max_actions 73 else False 74 ) 75 76 @staticmethod 77 def has_message_feed(channel_id: str) -> bool: 78 """Determine if a Channel has a Message Feed. 79 80 Args: 81 channel_id (str): The Channel to check for Message Feed. 82 83 Returns: 84 bool: True if the Channel has a Message Feed, False if it does not. 85 """ 86 87 return not channel_id == Channels.cortana 88 89 @staticmethod 90 def max_action_title_length( # pylint: disable=unused-argument 91 channel_id: str, 92 ) -> int: 93 """Maximum length allowed for Action Titles. 94 95 Args: 96 channel_id (str): The Channel to determine Maximum Action Title Length. 97 98 Returns: 99 int: The total number of characters allowed for an Action Title on a specific Channel. 100 """ 101 102 return 20 103 104 @staticmethod 105 def get_channel_id(turn_context: TurnContext) -> str: 106 """Get the Channel Id from the current Activity on the Turn Context. 107 108 Args: 109 turn_context (TurnContext): The Turn Context to retrieve the Activity's Channel Id from. 110 111 Returns: 112 str: The Channel Id from the Turn Context's Activity. 113 """ 114 115 if turn_context.activity.channel_id is None: 116 return "" 117 118 return turn_context.activity.channel_id 119 [end of libraries/botbuilder-dialogs/botbuilder/dialogs/choices/channel.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/libraries/botbuilder-dialogs/botbuilder/dialogs/choices/channel.py b/libraries/botbuilder-dialogs/botbuilder/dialogs/choices/channel.py --- a/libraries/botbuilder-dialogs/botbuilder/dialogs/choices/channel.py +++ b/libraries/botbuilder-dialogs/botbuilder/dialogs/choices/channel.py @@ -32,7 +32,6 @@ # https://dev.kik.com/#/docs/messaging#text-response-object Channels.kik: 20, Channels.telegram: 100, - Channels.slack: 100, Channels.emulator: 100, Channels.direct_line: 100, Channels.webchat: 100,
{"golden_diff": "diff --git a/libraries/botbuilder-dialogs/botbuilder/dialogs/choices/channel.py b/libraries/botbuilder-dialogs/botbuilder/dialogs/choices/channel.py\n--- a/libraries/botbuilder-dialogs/botbuilder/dialogs/choices/channel.py\n+++ b/libraries/botbuilder-dialogs/botbuilder/dialogs/choices/channel.py\n@@ -32,7 +32,6 @@\n # https://dev.kik.com/#/docs/messaging#text-response-object\n Channels.kik: 20,\n Channels.telegram: 100,\n- Channels.slack: 100,\n Channels.emulator: 100,\n Channels.direct_line: 100,\n Channels.webchat: 100,\n", "issue": "[PORT] Remove Slack from the list of channels that support Suggested Actions\n> Port this change from botbuilder-dotnet/master branch:\nhttps://github.com/microsoft/botbuilder-dotnet/pull/4177\n\nFixes #2291 \r\n\r\nRemoving Slack from the list of channels that do not support suggested actions.\n\n\r\n# Changed projects\r\n* Microsoft.Bot.Builder.Dialogs\r\n\r\n\r\n\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nfrom botbuilder.core import TurnContext\nfrom botframework.connector import Channels\n\n\nclass Channel:\n \"\"\"\n Methods for determining channel-specific functionality.\n \"\"\"\n\n @staticmethod\n def supports_suggested_actions(channel_id: str, button_cnt: int = 100) -> bool:\n \"\"\"Determine if a number of Suggested Actions are supported by a Channel.\n\n Args:\n channel_id (str): The Channel to check the if Suggested Actions are supported in.\n button_cnt (int, optional): Defaults to 100. The number of Suggested Actions to check for the Channel.\n\n Returns:\n bool: True if the Channel supports the button_cnt total Suggested Actions, False if the Channel does not\n support that number of Suggested Actions.\n \"\"\"\n\n max_actions = {\n # https://developers.facebook.com/docs/messenger-platform/send-messages/quick-replies\n Channels.facebook: 10,\n Channels.skype: 10,\n # https://developers.line.biz/en/reference/messaging-api/#items-object\n Channels.line: 13,\n # https://dev.kik.com/#/docs/messaging#text-response-object\n Channels.kik: 20,\n Channels.telegram: 100,\n Channels.slack: 100,\n Channels.emulator: 100,\n Channels.direct_line: 100,\n Channels.webchat: 100,\n }\n return (\n button_cnt <= max_actions[channel_id]\n if channel_id in max_actions\n else False\n )\n\n @staticmethod\n def supports_card_actions(channel_id: str, button_cnt: int = 100) -> bool:\n \"\"\"Determine if a number of Card Actions are supported by a Channel.\n\n Args:\n channel_id (str): The Channel to check if the Card Actions are supported in.\n button_cnt (int, optional): Defaults to 100. The number of Card Actions to check for the Channel.\n\n Returns:\n bool: True if the Channel supports the button_cnt total Card Actions, False if the Channel does not support\n that number of Card Actions.\n \"\"\"\n\n max_actions = {\n Channels.facebook: 3,\n Channels.skype: 3,\n Channels.ms_teams: 3,\n Channels.line: 99,\n Channels.slack: 100,\n Channels.emulator: 100,\n Channels.direct_line: 100,\n Channels.webchat: 100,\n Channels.cortana: 100,\n }\n return (\n button_cnt <= max_actions[channel_id]\n if channel_id in max_actions\n else False\n )\n\n @staticmethod\n def has_message_feed(channel_id: str) -> bool:\n \"\"\"Determine if a Channel has a Message Feed.\n\n Args:\n channel_id (str): The Channel to check for Message Feed.\n\n Returns:\n bool: True if the Channel has a Message Feed, False if it does not.\n \"\"\"\n\n return not channel_id == Channels.cortana\n\n @staticmethod\n def max_action_title_length( # pylint: disable=unused-argument\n channel_id: str,\n ) -> int:\n \"\"\"Maximum length allowed for Action Titles.\n\n Args:\n channel_id (str): The Channel to determine Maximum Action Title Length.\n\n Returns:\n int: The total number of characters allowed for an Action Title on a specific Channel.\n \"\"\"\n\n return 20\n\n @staticmethod\n def get_channel_id(turn_context: TurnContext) -> str:\n \"\"\"Get the Channel Id from the current Activity on the Turn Context.\n\n Args:\n turn_context (TurnContext): The Turn Context to retrieve the Activity's Channel Id from.\n\n Returns:\n str: The Channel Id from the Turn Context's Activity.\n \"\"\"\n\n if turn_context.activity.channel_id is None:\n return \"\"\n\n return turn_context.activity.channel_id\n", "path": "libraries/botbuilder-dialogs/botbuilder/dialogs/choices/channel.py"}]}
1,761
165
gh_patches_debug_20621
rasdani/github-patches
git_diff
lightly-ai__lightly-537
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> NNCLR Hi,thanks for your work, In NNCLR's model: " # forward pass of first input x0 f0 = self.backbone(x0).flatten(start_dim=1) z0 = self.projection_mlp(f0) p0 = self.prediction_mlp(f0) " I think it's wrong, it should be that : " # forward pass of first input x0 f0 = self.backbone(x0).flatten(start_dim=1) z0 = self.projection_mlp(f0) p0 = self.prediction_mlp(z0) " . </issue> <code> [start of lightly/models/nnclr.py] 1 """ NNCLR Model """ 2 3 # Copyright (c) 2021. Lightly AG and its affiliates. 4 # All Rights Reserved 5 6 import warnings 7 8 import torch 9 import torch.nn as nn 10 11 from lightly.models.modules import NNCLRProjectionHead 12 from lightly.models.modules import NNCLRPredictionHead 13 14 15 def _prediction_mlp(in_dims: int, 16 h_dims: int, 17 out_dims: int) -> nn.Sequential: 18 """Prediction MLP. The original paper's implementation has 2 layers, with 19 BN applied to its hidden fc layers but no BN or ReLU on the output fc layer. 20 21 Note that the hidden dimensions should be smaller than the input/output 22 dimensions (bottleneck structure). The default implementation using a 23 ResNet50 backbone has an input dimension of 2048, hidden dimension of 512, 24 and output dimension of 2048 25 26 Args: 27 in_dims: 28 Input dimension of the first linear layer. 29 h_dims: 30 Hidden dimension of all the fully connected layers (should be a 31 bottleneck!) 32 out_dims: 33 Output Dimension of the final linear layer. 34 35 Returns: 36 nn.Sequential: 37 The projection head. 38 """ 39 l1 = nn.Sequential(nn.Linear(in_dims, h_dims), 40 nn.BatchNorm1d(h_dims), 41 nn.ReLU(inplace=True)) 42 43 l2 = nn.Linear(h_dims, out_dims) 44 45 prediction = nn.Sequential(l1, l2) 46 return prediction 47 48 49 def _projection_mlp(num_ftrs: int, 50 h_dims: int, 51 out_dim: int, 52 num_layers: int = 3) -> nn.Sequential: 53 """Projection MLP. The original paper's implementation has 3 layers, with 54 BN applied to its hidden fc layers but no ReLU on the output fc layer. 55 The CIFAR-10 study used a MLP with only two layers. 56 57 Args: 58 in_dims: 59 Input dimension of the first linear layer. 60 h_dims: 61 Hidden dimension of all the fully connected layers. 62 out_dims: 63 Output Dimension of the final linear layer. 64 num_layers: 65 Controls the number of layers; must be 2 or 3. Defaults to 3. 66 67 Returns: 68 nn.Sequential: 69 The projection head. 70 """ 71 l1 = nn.Sequential(nn.Linear(num_ftrs, h_dims), 72 nn.BatchNorm1d(h_dims), 73 nn.ReLU(inplace=True)) 74 75 l2 = nn.Sequential(nn.Linear(h_dims, h_dims), 76 nn.BatchNorm1d(h_dims), 77 nn.ReLU(inplace=True)) 78 79 l3 = nn.Sequential(nn.Linear(h_dims, out_dim), 80 nn.BatchNorm1d(out_dim)) 81 82 if num_layers == 3: 83 projection = nn.Sequential(l1, l2, l3) 84 elif num_layers == 2: 85 projection = nn.Sequential(l1, l3) 86 else: 87 raise NotImplementedError("Only MLPs with 2 and 3 layers are implemented.") 88 89 return projection 90 91 92 class NNCLR(nn.Module): 93 """Implementation of the NNCLR[0] architecture 94 95 Recommended loss: :py:class:`lightly.loss.ntx_ent_loss.NTXentLoss` 96 Recommended module: :py:class:`lightly.models.modules.nn_memory_bank.NNmemoryBankModule` 97 98 [0] NNCLR, 2021, https://arxiv.org/abs/2104.14548 99 100 Attributes: 101 backbone: 102 Backbone model to extract features from images. 103 num_ftrs: 104 Dimension of the embedding (before the projection head). 105 proj_hidden_dim: 106 Dimension of the hidden layer of the projection head. 107 pred_hidden_dim: 108 Dimension of the hidden layer of the predicion head. 109 out_dim: 110 Dimension of the output (after the projection head). 111 num_mlp_layers: 112 Number of linear layers for MLP. 113 114 Examples: 115 >>> model = NNCLR(backbone) 116 >>> criterion = NTXentLoss(temperature=0.1) 117 >>> 118 >>> nn_replacer = NNmemoryBankModule(size=2 ** 16) 119 >>> 120 >>> # forward pass 121 >>> (z0, p0), (z1, p1) = model(x0, x1) 122 >>> z0 = nn_replacer(z0.detach(), update=False) 123 >>> z1 = nn_replacer(z1.detach(), update=True) 124 >>> 125 >>> loss = 0.5 * (criterion(z0, p1) + criterion(z1, p0)) 126 127 """ 128 129 def __init__(self, 130 backbone: nn.Module, 131 num_ftrs: int = 512, 132 proj_hidden_dim: int = 2048, 133 pred_hidden_dim: int = 4096, 134 out_dim: int = 256): 135 136 super(NNCLR, self).__init__() 137 138 self.backbone = backbone 139 self.num_ftrs = num_ftrs 140 self.proj_hidden_dim = proj_hidden_dim 141 self.pred_hidden_dim = pred_hidden_dim 142 self.out_dim = out_dim 143 144 self.projection_mlp = NNCLRProjectionHead( 145 num_ftrs, 146 proj_hidden_dim, 147 out_dim, 148 ) 149 150 self.prediction_mlp = NNCLRPredictionHead( 151 num_ftrs, 152 pred_hidden_dim, 153 out_dim, 154 ) 155 156 warnings.warn(Warning( 157 'The high-level building block NNCLR will be deprecated in version 1.2.0. ' 158 + 'Use low-level building blocks instead. ' 159 + 'See https://docs.lightly.ai/lightly.models.html for more information'), 160 PendingDeprecationWarning) 161 162 def forward(self, 163 x0: torch.Tensor, 164 x1: torch.Tensor = None, 165 return_features: bool = False): 166 """Embeds and projects the input images. 167 168 Extracts features with the backbone and applies the projection 169 head to the output space. If both x0 and x1 are not None, both will be 170 passed through the backbone and projection head. If x1 is None, only 171 x0 will be forwarded. 172 173 Args: 174 x0: 175 Tensor of shape bsz x channels x W x H. 176 x1: 177 Tensor of shape bsz x channels x W x H. 178 return_features: 179 Whether or not to return the intermediate features backbone(x). 180 181 Returns: 182 The output projection of x0 and (if x1 is not None) the output 183 projection of x1. If return_features is True, the output for each x 184 is a tuple (out, f) where f are the features before the projection 185 head. 186 187 Examples: 188 >>> # single input, single output 189 >>> out = model(x) 190 >>> 191 >>> # single input with return_features=True 192 >>> out, f = model(x, return_features=True) 193 >>> 194 >>> # two inputs, two outputs 195 >>> out0, out1 = model(x0, x1) 196 >>> 197 >>> # two inputs, two outputs with return_features=True 198 >>> (out0, f0), (out1, f1) = model(x0, x1, return_features=True) 199 200 """ 201 202 # forward pass of first input x0 203 f0 = self.backbone(x0).flatten(start_dim=1) 204 z0 = self.projection_mlp(f0) 205 p0 = self.prediction_mlp(f0) 206 207 out0 = (z0, p0) 208 209 # append features if requested 210 if return_features: 211 out0 = (out0, f0) 212 213 # return out0 if x1 is None 214 if x1 is None: 215 return out0 216 217 # forward pass of second input x1 218 f1 = self.backbone(x1).flatten(start_dim=1) 219 z1 = self.projection_mlp(f1) 220 p1 = self.prediction_mlp(f1) 221 222 out1 = (z1, p1) 223 224 # append features if requested 225 if return_features: 226 out1 = (out1, f1) 227 228 # return both outputs 229 return out0, out1 230 [end of lightly/models/nnclr.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lightly/models/nnclr.py b/lightly/models/nnclr.py --- a/lightly/models/nnclr.py +++ b/lightly/models/nnclr.py @@ -148,7 +148,7 @@ ) self.prediction_mlp = NNCLRPredictionHead( - num_ftrs, + out_dim, pred_hidden_dim, out_dim, ) @@ -202,7 +202,7 @@ # forward pass of first input x0 f0 = self.backbone(x0).flatten(start_dim=1) z0 = self.projection_mlp(f0) - p0 = self.prediction_mlp(f0) + p0 = self.prediction_mlp(z0) out0 = (z0, p0) @@ -217,7 +217,7 @@ # forward pass of second input x1 f1 = self.backbone(x1).flatten(start_dim=1) z1 = self.projection_mlp(f1) - p1 = self.prediction_mlp(f1) + p1 = self.prediction_mlp(z1) out1 = (z1, p1)
{"golden_diff": "diff --git a/lightly/models/nnclr.py b/lightly/models/nnclr.py\n--- a/lightly/models/nnclr.py\n+++ b/lightly/models/nnclr.py\n@@ -148,7 +148,7 @@\n )\n \n self.prediction_mlp = NNCLRPredictionHead(\n- num_ftrs,\n+ out_dim,\n pred_hidden_dim,\n out_dim,\n )\n@@ -202,7 +202,7 @@\n # forward pass of first input x0\n f0 = self.backbone(x0).flatten(start_dim=1)\n z0 = self.projection_mlp(f0)\n- p0 = self.prediction_mlp(f0)\n+ p0 = self.prediction_mlp(z0)\n \n out0 = (z0, p0)\n \n@@ -217,7 +217,7 @@\n # forward pass of second input x1\n f1 = self.backbone(x1).flatten(start_dim=1)\n z1 = self.projection_mlp(f1)\n- p1 = self.prediction_mlp(f1)\n+ p1 = self.prediction_mlp(z1)\n \n out1 = (z1, p1)\n", "issue": "NNCLR\nHi,thanks for your work,\r\nIn NNCLR's model:\r\n \"\r\n# forward pass of first input x0\r\n f0 = self.backbone(x0).flatten(start_dim=1)\r\n z0 = self.projection_mlp(f0)\r\n p0 = self.prediction_mlp(f0)\r\n \"\r\n\r\nI think it's wrong, it should be that :\r\n\"\r\n # forward pass of first input x0\r\n f0 = self.backbone(x0).flatten(start_dim=1)\r\n z0 = self.projection_mlp(f0)\r\n p0 = self.prediction_mlp(z0)\r\n\" .\n", "before_files": [{"content": "\"\"\" NNCLR Model \"\"\"\n\n# Copyright (c) 2021. Lightly AG and its affiliates.\n# All Rights Reserved\n\nimport warnings\n\nimport torch\nimport torch.nn as nn\n\nfrom lightly.models.modules import NNCLRProjectionHead\nfrom lightly.models.modules import NNCLRPredictionHead\n\n\ndef _prediction_mlp(in_dims: int, \n h_dims: int, \n out_dims: int) -> nn.Sequential:\n \"\"\"Prediction MLP. The original paper's implementation has 2 layers, with \n BN applied to its hidden fc layers but no BN or ReLU on the output fc layer.\n\n Note that the hidden dimensions should be smaller than the input/output \n dimensions (bottleneck structure). The default implementation using a \n ResNet50 backbone has an input dimension of 2048, hidden dimension of 512, \n and output dimension of 2048\n\n Args:\n in_dims:\n Input dimension of the first linear layer.\n h_dims: \n Hidden dimension of all the fully connected layers (should be a\n bottleneck!)\n out_dims: \n Output Dimension of the final linear layer.\n\n Returns:\n nn.Sequential:\n The projection head.\n \"\"\"\n l1 = nn.Sequential(nn.Linear(in_dims, h_dims),\n nn.BatchNorm1d(h_dims),\n nn.ReLU(inplace=True))\n\n l2 = nn.Linear(h_dims, out_dims)\n\n prediction = nn.Sequential(l1, l2)\n return prediction\n\n\ndef _projection_mlp(num_ftrs: int,\n h_dims: int, \n out_dim: int, \n num_layers: int = 3) -> nn.Sequential:\n \"\"\"Projection MLP. The original paper's implementation has 3 layers, with \n BN applied to its hidden fc layers but no ReLU on the output fc layer. \n The CIFAR-10 study used a MLP with only two layers.\n\n Args:\n in_dims:\n Input dimension of the first linear layer.\n h_dims: \n Hidden dimension of all the fully connected layers.\n out_dims: \n Output Dimension of the final linear layer.\n num_layers:\n Controls the number of layers; must be 2 or 3. Defaults to 3.\n\n Returns:\n nn.Sequential:\n The projection head.\n \"\"\"\n l1 = nn.Sequential(nn.Linear(num_ftrs, h_dims),\n nn.BatchNorm1d(h_dims),\n nn.ReLU(inplace=True))\n\n l2 = nn.Sequential(nn.Linear(h_dims, h_dims),\n nn.BatchNorm1d(h_dims),\n nn.ReLU(inplace=True))\n\n l3 = nn.Sequential(nn.Linear(h_dims, out_dim),\n nn.BatchNorm1d(out_dim))\n\n if num_layers == 3:\n projection = nn.Sequential(l1, l2, l3)\n elif num_layers == 2:\n projection = nn.Sequential(l1, l3)\n else:\n raise NotImplementedError(\"Only MLPs with 2 and 3 layers are implemented.\")\n\n return projection\n\n\nclass NNCLR(nn.Module):\n \"\"\"Implementation of the NNCLR[0] architecture\n\n Recommended loss: :py:class:`lightly.loss.ntx_ent_loss.NTXentLoss`\n Recommended module: :py:class:`lightly.models.modules.nn_memory_bank.NNmemoryBankModule`\n\n [0] NNCLR, 2021, https://arxiv.org/abs/2104.14548\n\n Attributes:\n backbone:\n Backbone model to extract features from images.\n num_ftrs:\n Dimension of the embedding (before the projection head).\n proj_hidden_dim: \n Dimension of the hidden layer of the projection head.\n pred_hidden_dim:\n Dimension of the hidden layer of the predicion head.\n out_dim:\n Dimension of the output (after the projection head).\n num_mlp_layers:\n Number of linear layers for MLP.\n\n Examples:\n >>> model = NNCLR(backbone)\n >>> criterion = NTXentLoss(temperature=0.1)\n >>> \n >>> nn_replacer = NNmemoryBankModule(size=2 ** 16)\n >>>\n >>> # forward pass\n >>> (z0, p0), (z1, p1) = model(x0, x1)\n >>> z0 = nn_replacer(z0.detach(), update=False)\n >>> z1 = nn_replacer(z1.detach(), update=True)\n >>>\n >>> loss = 0.5 * (criterion(z0, p1) + criterion(z1, p0))\n\n \"\"\"\n\n def __init__(self,\n backbone: nn.Module,\n num_ftrs: int = 512,\n proj_hidden_dim: int = 2048,\n pred_hidden_dim: int = 4096,\n out_dim: int = 256):\n\n super(NNCLR, self).__init__()\n\n self.backbone = backbone\n self.num_ftrs = num_ftrs\n self.proj_hidden_dim = proj_hidden_dim\n self.pred_hidden_dim = pred_hidden_dim\n self.out_dim = out_dim\n\n self.projection_mlp = NNCLRProjectionHead(\n num_ftrs,\n proj_hidden_dim,\n out_dim,\n )\n \n self.prediction_mlp = NNCLRPredictionHead(\n num_ftrs,\n pred_hidden_dim,\n out_dim,\n )\n\n warnings.warn(Warning(\n 'The high-level building block NNCLR will be deprecated in version 1.2.0. '\n + 'Use low-level building blocks instead. '\n + 'See https://docs.lightly.ai/lightly.models.html for more information'),\n PendingDeprecationWarning)\n\n def forward(self,\n x0: torch.Tensor,\n x1: torch.Tensor = None,\n return_features: bool = False):\n \"\"\"Embeds and projects the input images.\n\n Extracts features with the backbone and applies the projection\n head to the output space. If both x0 and x1 are not None, both will be\n passed through the backbone and projection head. If x1 is None, only\n x0 will be forwarded.\n\n Args:\n x0:\n Tensor of shape bsz x channels x W x H.\n x1:\n Tensor of shape bsz x channels x W x H.\n return_features:\n Whether or not to return the intermediate features backbone(x).\n\n Returns:\n The output projection of x0 and (if x1 is not None) the output\n projection of x1. If return_features is True, the output for each x\n is a tuple (out, f) where f are the features before the projection\n head.\n\n Examples:\n >>> # single input, single output\n >>> out = model(x) \n >>> \n >>> # single input with return_features=True\n >>> out, f = model(x, return_features=True)\n >>>\n >>> # two inputs, two outputs\n >>> out0, out1 = model(x0, x1)\n >>>\n >>> # two inputs, two outputs with return_features=True\n >>> (out0, f0), (out1, f1) = model(x0, x1, return_features=True)\n\n \"\"\"\n \n # forward pass of first input x0\n f0 = self.backbone(x0).flatten(start_dim=1)\n z0 = self.projection_mlp(f0)\n p0 = self.prediction_mlp(f0)\n\n out0 = (z0, p0)\n\n # append features if requested\n if return_features:\n out0 = (out0, f0)\n\n # return out0 if x1 is None\n if x1 is None:\n return out0\n\n # forward pass of second input x1\n f1 = self.backbone(x1).flatten(start_dim=1)\n z1 = self.projection_mlp(f1)\n p1 = self.prediction_mlp(f1)\n\n out1 = (z1, p1)\n\n # append features if requested\n if return_features:\n out1 = (out1, f1)\n\n # return both outputs\n return out0, out1\n", "path": "lightly/models/nnclr.py"}]}
3,026
274
gh_patches_debug_8267
rasdani/github-patches
git_diff
ipython__ipython-7852
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Unknown error with no read permissions If you do not have read permission on a notebook (e.g., it has `-rw-------` and you are not the owner) and you try to open it, rather than saying permission denied, you get an error that implies there's something wrong with the notebook format: ![image](https://cloud.githubusercontent.com/assets/83444/6356023/53956f6e-bc0f-11e4-884b-aea89261a9e2.png) In the notebook server, there is an unhandled permission denied traceback: ``` [E 11:09:31.650 NotebookApp] Unhandled error in API request Traceback (most recent call last): File "/Users/jhamrick/project/tools/ipython/IPython/html/base/handlers.py", line 365, in wrapper result = yield gen.maybe_future(method(self, *args, **kwargs)) File "/usr/local/lib/python2.7/site-packages/tornado/gen.py", line 807, in run value = future.result() File "/usr/local/lib/python2.7/site-packages/tornado/concurrent.py", line 209, in result raise_exc_info(self._exc_info) File "/usr/local/lib/python2.7/site-packages/tornado/gen.py", line 212, in wrapper yielded = next(result) File "/Users/jhamrick/project/tools/ipython/IPython/html/services/contents/handlers.py", line 126, in get path=path, type=type, format=format, content=content, File "/Users/jhamrick/project/tools/ipython/IPython/html/services/contents/filemanager.py", line 347, in get model = self._notebook_model(path, content=content) File "/Users/jhamrick/project/tools/ipython/IPython/html/services/contents/filemanager.py", line 306, in _notebook_model nb = self._read_notebook(os_path, as_version=4) File "/Users/jhamrick/project/tools/ipython/IPython/html/services/contents/fileio.py", line 104, in _read_notebook with self.open(os_path, 'r', encoding='utf-8') as f: File "/usr/local/Cellar/python/2.7.9/Frameworks/Python.framework/Versions/2.7/lib/python2.7/contextlib.py", line 17, in __enter__ return self.gen.next() File "/Users/jhamrick/project/tools/ipython/IPython/html/services/contents/fileio.py", line 49, in open with io.open(os_path, *args, **kwargs) as f: IOError: [Errno 13] Permission denied: '/Users/jhamrick/project/ipynb/Untitled9.ipynb' ``` Perhaps it would be possible to return a more informative generic error message, like just the last line of the traceback or something along those lines? </issue> <code> [start of IPython/html/services/contents/fileio.py] 1 """ 2 Utilities for file-based Contents/Checkpoints managers. 3 """ 4 5 # Copyright (c) IPython Development Team. 6 # Distributed under the terms of the Modified BSD License. 7 8 import base64 9 from contextlib import contextmanager 10 import errno 11 import io 12 import os 13 import shutil 14 15 from tornado.web import HTTPError 16 17 from IPython.html.utils import ( 18 to_api_path, 19 to_os_path, 20 ) 21 from IPython import nbformat 22 from IPython.utils.io import atomic_writing 23 from IPython.utils.py3compat import str_to_unicode 24 25 26 class FileManagerMixin(object): 27 """ 28 Mixin for ContentsAPI classes that interact with the filesystem. 29 30 Provides facilities for reading, writing, and copying both notebooks and 31 generic files. 32 33 Shared by FileContentsManager and FileCheckpoints. 34 35 Note 36 ---- 37 Classes using this mixin must provide the following attributes: 38 39 root_dir : unicode 40 A directory against against which API-style paths are to be resolved. 41 42 log : logging.Logger 43 """ 44 45 @contextmanager 46 def open(self, os_path, *args, **kwargs): 47 """wrapper around io.open that turns permission errors into 403""" 48 with self.perm_to_403(os_path): 49 with io.open(os_path, *args, **kwargs) as f: 50 yield f 51 52 @contextmanager 53 def atomic_writing(self, os_path, *args, **kwargs): 54 """wrapper around atomic_writing that turns permission errors to 403""" 55 with self.perm_to_403(os_path): 56 with atomic_writing(os_path, *args, **kwargs) as f: 57 yield f 58 59 @contextmanager 60 def perm_to_403(self, os_path=''): 61 """context manager for turning permission errors into 403.""" 62 try: 63 yield 64 except OSError as e: 65 if e.errno in {errno.EPERM, errno.EACCES}: 66 # make 403 error message without root prefix 67 # this may not work perfectly on unicode paths on Python 2, 68 # but nobody should be doing that anyway. 69 if not os_path: 70 os_path = str_to_unicode(e.filename or 'unknown file') 71 path = to_api_path(os_path, root=self.root_dir) 72 raise HTTPError(403, u'Permission denied: %s' % path) 73 else: 74 raise 75 76 def _copy(self, src, dest): 77 """copy src to dest 78 79 like shutil.copy2, but log errors in copystat 80 """ 81 shutil.copyfile(src, dest) 82 try: 83 shutil.copystat(src, dest) 84 except OSError: 85 self.log.debug("copystat on %s failed", dest, exc_info=True) 86 87 def _get_os_path(self, path): 88 """Given an API path, return its file system path. 89 90 Parameters 91 ---------- 92 path : string 93 The relative API path to the named file. 94 95 Returns 96 ------- 97 path : string 98 Native, absolute OS path to for a file. 99 """ 100 return to_os_path(path, self.root_dir) 101 102 def _read_notebook(self, os_path, as_version=4): 103 """Read a notebook from an os path.""" 104 with self.open(os_path, 'r', encoding='utf-8') as f: 105 try: 106 return nbformat.read(f, as_version=as_version) 107 except Exception as e: 108 raise HTTPError( 109 400, 110 u"Unreadable Notebook: %s %r" % (os_path, e), 111 ) 112 113 def _save_notebook(self, os_path, nb): 114 """Save a notebook to an os_path.""" 115 with self.atomic_writing(os_path, encoding='utf-8') as f: 116 nbformat.write(nb, f, version=nbformat.NO_CONVERT) 117 118 def _read_file(self, os_path, format): 119 """Read a non-notebook file. 120 121 os_path: The path to be read. 122 format: 123 If 'text', the contents will be decoded as UTF-8. 124 If 'base64', the raw bytes contents will be encoded as base64. 125 If not specified, try to decode as UTF-8, and fall back to base64 126 """ 127 if not os.path.isfile(os_path): 128 raise HTTPError(400, "Cannot read non-file %s" % os_path) 129 130 with self.open(os_path, 'rb') as f: 131 bcontent = f.read() 132 133 if format is None or format == 'text': 134 # Try to interpret as unicode if format is unknown or if unicode 135 # was explicitly requested. 136 try: 137 return bcontent.decode('utf8'), 'text' 138 except UnicodeError: 139 if format == 'text': 140 raise HTTPError( 141 400, 142 "%s is not UTF-8 encoded" % os_path, 143 reason='bad format', 144 ) 145 return base64.encodestring(bcontent).decode('ascii'), 'base64' 146 147 def _save_file(self, os_path, content, format): 148 """Save content of a generic file.""" 149 if format not in {'text', 'base64'}: 150 raise HTTPError( 151 400, 152 "Must specify format of file contents as 'text' or 'base64'", 153 ) 154 try: 155 if format == 'text': 156 bcontent = content.encode('utf8') 157 else: 158 b64_bytes = content.encode('ascii') 159 bcontent = base64.decodestring(b64_bytes) 160 except Exception as e: 161 raise HTTPError( 162 400, u'Encoding error saving %s: %s' % (os_path, e) 163 ) 164 165 with self.atomic_writing(os_path, text=False) as f: 166 f.write(bcontent) 167 [end of IPython/html/services/contents/fileio.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/IPython/html/services/contents/fileio.py b/IPython/html/services/contents/fileio.py --- a/IPython/html/services/contents/fileio.py +++ b/IPython/html/services/contents/fileio.py @@ -61,7 +61,7 @@ """context manager for turning permission errors into 403.""" try: yield - except OSError as e: + except (OSError, IOError) as e: if e.errno in {errno.EPERM, errno.EACCES}: # make 403 error message without root prefix # this may not work perfectly on unicode paths on Python 2,
{"golden_diff": "diff --git a/IPython/html/services/contents/fileio.py b/IPython/html/services/contents/fileio.py\n--- a/IPython/html/services/contents/fileio.py\n+++ b/IPython/html/services/contents/fileio.py\n@@ -61,7 +61,7 @@\n \"\"\"context manager for turning permission errors into 403.\"\"\"\n try:\n yield\n- except OSError as e:\n+ except (OSError, IOError) as e:\n if e.errno in {errno.EPERM, errno.EACCES}:\n # make 403 error message without root prefix\n # this may not work perfectly on unicode paths on Python 2,\n", "issue": "Unknown error with no read permissions\nIf you do not have read permission on a notebook (e.g., it has `-rw-------` and you are not the owner) and you try to open it, rather than saying permission denied, you get an error that implies there's something wrong with the notebook format:\n\n![image](https://cloud.githubusercontent.com/assets/83444/6356023/53956f6e-bc0f-11e4-884b-aea89261a9e2.png)\n\nIn the notebook server, there is an unhandled permission denied traceback:\n\n```\n[E 11:09:31.650 NotebookApp] Unhandled error in API request\n Traceback (most recent call last):\n File \"/Users/jhamrick/project/tools/ipython/IPython/html/base/handlers.py\", line 365, in wrapper\n result = yield gen.maybe_future(method(self, *args, **kwargs))\n File \"/usr/local/lib/python2.7/site-packages/tornado/gen.py\", line 807, in run\n value = future.result()\n File \"/usr/local/lib/python2.7/site-packages/tornado/concurrent.py\", line 209, in result\n raise_exc_info(self._exc_info)\n File \"/usr/local/lib/python2.7/site-packages/tornado/gen.py\", line 212, in wrapper\n yielded = next(result)\n File \"/Users/jhamrick/project/tools/ipython/IPython/html/services/contents/handlers.py\", line 126, in get\n path=path, type=type, format=format, content=content,\n File \"/Users/jhamrick/project/tools/ipython/IPython/html/services/contents/filemanager.py\", line 347, in get\n model = self._notebook_model(path, content=content)\n File \"/Users/jhamrick/project/tools/ipython/IPython/html/services/contents/filemanager.py\", line 306, in _notebook_model\n nb = self._read_notebook(os_path, as_version=4)\n File \"/Users/jhamrick/project/tools/ipython/IPython/html/services/contents/fileio.py\", line 104, in _read_notebook\n with self.open(os_path, 'r', encoding='utf-8') as f:\n File \"/usr/local/Cellar/python/2.7.9/Frameworks/Python.framework/Versions/2.7/lib/python2.7/contextlib.py\", line 17, in __enter__\n return self.gen.next()\n File \"/Users/jhamrick/project/tools/ipython/IPython/html/services/contents/fileio.py\", line 49, in open\n with io.open(os_path, *args, **kwargs) as f:\n IOError: [Errno 13] Permission denied: '/Users/jhamrick/project/ipynb/Untitled9.ipynb'\n```\n\nPerhaps it would be possible to return a more informative generic error message, like just the last line of the traceback or something along those lines?\n\n", "before_files": [{"content": "\"\"\"\nUtilities for file-based Contents/Checkpoints managers.\n\"\"\"\n\n# Copyright (c) IPython Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nimport base64\nfrom contextlib import contextmanager\nimport errno\nimport io\nimport os\nimport shutil\n\nfrom tornado.web import HTTPError\n\nfrom IPython.html.utils import (\n to_api_path,\n to_os_path,\n)\nfrom IPython import nbformat\nfrom IPython.utils.io import atomic_writing\nfrom IPython.utils.py3compat import str_to_unicode\n\n\nclass FileManagerMixin(object):\n \"\"\"\n Mixin for ContentsAPI classes that interact with the filesystem.\n\n Provides facilities for reading, writing, and copying both notebooks and\n generic files.\n\n Shared by FileContentsManager and FileCheckpoints.\n\n Note\n ----\n Classes using this mixin must provide the following attributes:\n\n root_dir : unicode\n A directory against against which API-style paths are to be resolved.\n\n log : logging.Logger\n \"\"\"\n\n @contextmanager\n def open(self, os_path, *args, **kwargs):\n \"\"\"wrapper around io.open that turns permission errors into 403\"\"\"\n with self.perm_to_403(os_path):\n with io.open(os_path, *args, **kwargs) as f:\n yield f\n\n @contextmanager\n def atomic_writing(self, os_path, *args, **kwargs):\n \"\"\"wrapper around atomic_writing that turns permission errors to 403\"\"\"\n with self.perm_to_403(os_path):\n with atomic_writing(os_path, *args, **kwargs) as f:\n yield f\n\n @contextmanager\n def perm_to_403(self, os_path=''):\n \"\"\"context manager for turning permission errors into 403.\"\"\"\n try:\n yield\n except OSError as e:\n if e.errno in {errno.EPERM, errno.EACCES}:\n # make 403 error message without root prefix\n # this may not work perfectly on unicode paths on Python 2,\n # but nobody should be doing that anyway.\n if not os_path:\n os_path = str_to_unicode(e.filename or 'unknown file')\n path = to_api_path(os_path, root=self.root_dir)\n raise HTTPError(403, u'Permission denied: %s' % path)\n else:\n raise\n\n def _copy(self, src, dest):\n \"\"\"copy src to dest\n\n like shutil.copy2, but log errors in copystat\n \"\"\"\n shutil.copyfile(src, dest)\n try:\n shutil.copystat(src, dest)\n except OSError:\n self.log.debug(\"copystat on %s failed\", dest, exc_info=True)\n\n def _get_os_path(self, path):\n \"\"\"Given an API path, return its file system path.\n\n Parameters\n ----------\n path : string\n The relative API path to the named file.\n\n Returns\n -------\n path : string\n Native, absolute OS path to for a file.\n \"\"\"\n return to_os_path(path, self.root_dir)\n\n def _read_notebook(self, os_path, as_version=4):\n \"\"\"Read a notebook from an os path.\"\"\"\n with self.open(os_path, 'r', encoding='utf-8') as f:\n try:\n return nbformat.read(f, as_version=as_version)\n except Exception as e:\n raise HTTPError(\n 400,\n u\"Unreadable Notebook: %s %r\" % (os_path, e),\n )\n\n def _save_notebook(self, os_path, nb):\n \"\"\"Save a notebook to an os_path.\"\"\"\n with self.atomic_writing(os_path, encoding='utf-8') as f:\n nbformat.write(nb, f, version=nbformat.NO_CONVERT)\n\n def _read_file(self, os_path, format):\n \"\"\"Read a non-notebook file.\n\n os_path: The path to be read.\n format:\n If 'text', the contents will be decoded as UTF-8.\n If 'base64', the raw bytes contents will be encoded as base64.\n If not specified, try to decode as UTF-8, and fall back to base64\n \"\"\"\n if not os.path.isfile(os_path):\n raise HTTPError(400, \"Cannot read non-file %s\" % os_path)\n\n with self.open(os_path, 'rb') as f:\n bcontent = f.read()\n\n if format is None or format == 'text':\n # Try to interpret as unicode if format is unknown or if unicode\n # was explicitly requested.\n try:\n return bcontent.decode('utf8'), 'text'\n except UnicodeError:\n if format == 'text':\n raise HTTPError(\n 400,\n \"%s is not UTF-8 encoded\" % os_path,\n reason='bad format',\n )\n return base64.encodestring(bcontent).decode('ascii'), 'base64'\n\n def _save_file(self, os_path, content, format):\n \"\"\"Save content of a generic file.\"\"\"\n if format not in {'text', 'base64'}:\n raise HTTPError(\n 400,\n \"Must specify format of file contents as 'text' or 'base64'\",\n )\n try:\n if format == 'text':\n bcontent = content.encode('utf8')\n else:\n b64_bytes = content.encode('ascii')\n bcontent = base64.decodestring(b64_bytes)\n except Exception as e:\n raise HTTPError(\n 400, u'Encoding error saving %s: %s' % (os_path, e)\n )\n\n with self.atomic_writing(os_path, text=False) as f:\n f.write(bcontent)\n", "path": "IPython/html/services/contents/fileio.py"}]}
2,871
145
gh_patches_debug_62449
rasdani/github-patches
git_diff
getsentry__sentry-python-296
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Tornado integration does not capture formdata There is no obvious way to get just formdata from Tornado's request object (not mixed with other request params), but I haven't done a lot of research on that. </issue> <code> [start of sentry_sdk/integrations/tornado.py] 1 import sys 2 import weakref 3 from inspect import iscoroutinefunction 4 5 from sentry_sdk.hub import Hub, _should_send_default_pii 6 from sentry_sdk.utils import ( 7 event_from_exception, 8 capture_internal_exceptions, 9 transaction_from_function, 10 ) 11 from sentry_sdk.integrations import Integration 12 from sentry_sdk.integrations._wsgi_common import ( 13 RequestExtractor, 14 _filter_headers, 15 _is_json_content_type, 16 ) 17 from sentry_sdk.integrations.logging import ignore_logger 18 19 from tornado.web import RequestHandler, HTTPError # type: ignore 20 from tornado.gen import coroutine # type: ignore 21 22 if False: 23 from typing import Any 24 from typing import List 25 from typing import Optional 26 from typing import Dict 27 from typing import Callable 28 29 30 class TornadoIntegration(Integration): 31 identifier = "tornado" 32 33 @staticmethod 34 def setup_once(): 35 # type: () -> None 36 import tornado # type: ignore 37 38 tornado_version = getattr(tornado, "version_info", None) 39 if tornado_version is None or tornado_version < (5, 0): 40 raise RuntimeError("Tornado 5+ required") 41 42 if sys.version_info < (3, 7): 43 # Tornado is async. We better have contextvars or we're going to leak 44 # state between requests. 45 raise RuntimeError( 46 "The tornado integration for Sentry requires Python 3.7+" 47 ) 48 49 ignore_logger("tornado.application") 50 ignore_logger("tornado.access") 51 52 old_execute = RequestHandler._execute 53 54 awaitable = iscoroutinefunction(old_execute) 55 56 if awaitable: 57 # Starting Tornado 6 RequestHandler._execute method is a standard Python coroutine (async/await) 58 # In that case our method should be a coroutine function too 59 async def sentry_execute_request_handler(self, *args, **kwargs): 60 # type: (Any, *List, **Any) -> Any 61 hub = Hub.current 62 integration = hub.get_integration(TornadoIntegration) 63 if integration is None: 64 return await old_execute(self, *args, **kwargs) 65 66 weak_handler = weakref.ref(self) 67 68 with Hub(hub) as hub: 69 with hub.configure_scope() as scope: 70 scope.add_event_processor(_make_event_processor(weak_handler)) 71 return await old_execute(self, *args, **kwargs) 72 73 else: 74 75 @coroutine # type: ignore 76 def sentry_execute_request_handler(self, *args, **kwargs): 77 hub = Hub.current 78 integration = hub.get_integration(TornadoIntegration) 79 if integration is None: 80 return old_execute(self, *args, **kwargs) 81 82 weak_handler = weakref.ref(self) 83 84 with Hub(hub) as hub: 85 with hub.configure_scope() as scope: 86 scope.add_event_processor(_make_event_processor(weak_handler)) 87 result = yield from old_execute(self, *args, **kwargs) 88 return result 89 90 RequestHandler._execute = sentry_execute_request_handler 91 92 old_log_exception = RequestHandler.log_exception 93 94 def sentry_log_exception(self, ty, value, tb, *args, **kwargs): 95 # type: (Any, type, BaseException, Any, *Any, **Any) -> Optional[Any] 96 _capture_exception(ty, value, tb) 97 return old_log_exception(self, ty, value, tb, *args, **kwargs) 98 99 RequestHandler.log_exception = sentry_log_exception 100 101 102 def _capture_exception(ty, value, tb): 103 # type: (type, BaseException, Any) -> None 104 hub = Hub.current 105 if hub.get_integration(TornadoIntegration) is None: 106 return 107 if isinstance(value, HTTPError): 108 return 109 110 event, hint = event_from_exception( 111 (ty, value, tb), 112 client_options=hub.client.options, 113 mechanism={"type": "tornado", "handled": False}, 114 ) 115 116 hub.capture_event(event, hint=hint) 117 118 119 def _make_event_processor(weak_handler): 120 # type: (Callable[[], RequestHandler]) -> Callable 121 def tornado_processor(event, hint): 122 # type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any] 123 handler = weak_handler() 124 if handler is None: 125 return event 126 127 request = handler.request 128 129 with capture_internal_exceptions(): 130 method = getattr(handler, handler.request.method.lower()) 131 event["transaction"] = transaction_from_function(method) 132 133 with capture_internal_exceptions(): 134 extractor = TornadoRequestExtractor(request) 135 extractor.extract_into_event(event) 136 137 request_info = event["request"] 138 139 request_info["url"] = "%s://%s%s" % ( 140 request.protocol, 141 request.host, 142 request.path, 143 ) 144 145 request_info["query_string"] = request.query 146 request_info["method"] = request.method 147 request_info["env"] = {"REMOTE_ADDR": request.remote_ip} 148 request_info["headers"] = _filter_headers(dict(request.headers)) 149 150 with capture_internal_exceptions(): 151 if handler.current_user and _should_send_default_pii(): 152 event.setdefault("user", {})["is_authenticated"] = True 153 154 return event 155 156 return tornado_processor 157 158 159 class TornadoRequestExtractor(RequestExtractor): 160 def content_length(self): 161 # type: () -> int 162 if self.request.body is None: 163 return 0 164 return len(self.request.body) 165 166 def cookies(self): 167 # type: () -> Dict 168 return {k: v.value for k, v in self.request.cookies.items()} 169 170 def raw_data(self): 171 # type: () -> bytes 172 return self.request.body 173 174 def form(self): 175 # type: () -> Optional[Any] 176 # TODO: Where to get formdata and nothing else? 177 return None 178 179 def is_json(self): 180 # type: () -> bool 181 return _is_json_content_type(self.request.headers.get("content-type")) 182 183 def files(self): 184 # type: () -> Dict 185 return {k: v[0] for k, v in self.request.files.items() if v} 186 187 def size_of_file(self, file): 188 return len(file.body or ()) 189 [end of sentry_sdk/integrations/tornado.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/sentry_sdk/integrations/tornado.py b/sentry_sdk/integrations/tornado.py --- a/sentry_sdk/integrations/tornado.py +++ b/sentry_sdk/integrations/tornado.py @@ -173,8 +173,10 @@ def form(self): # type: () -> Optional[Any] - # TODO: Where to get formdata and nothing else? - return None + return { + k: [v.decode("latin1", "replace") for v in vs] + for k, vs in self.request.body_arguments.items() + } def is_json(self): # type: () -> bool
{"golden_diff": "diff --git a/sentry_sdk/integrations/tornado.py b/sentry_sdk/integrations/tornado.py\n--- a/sentry_sdk/integrations/tornado.py\n+++ b/sentry_sdk/integrations/tornado.py\n@@ -173,8 +173,10 @@\n \n def form(self):\n # type: () -> Optional[Any]\n- # TODO: Where to get formdata and nothing else?\n- return None\n+ return {\n+ k: [v.decode(\"latin1\", \"replace\") for v in vs]\n+ for k, vs in self.request.body_arguments.items()\n+ }\n \n def is_json(self):\n # type: () -> bool\n", "issue": "Tornado integration does not capture formdata\nThere is no obvious way to get just formdata from Tornado's request object (not mixed with other request params), but I haven't done a lot of research on that.\n", "before_files": [{"content": "import sys\nimport weakref\nfrom inspect import iscoroutinefunction\n\nfrom sentry_sdk.hub import Hub, _should_send_default_pii\nfrom sentry_sdk.utils import (\n event_from_exception,\n capture_internal_exceptions,\n transaction_from_function,\n)\nfrom sentry_sdk.integrations import Integration\nfrom sentry_sdk.integrations._wsgi_common import (\n RequestExtractor,\n _filter_headers,\n _is_json_content_type,\n)\nfrom sentry_sdk.integrations.logging import ignore_logger\n\nfrom tornado.web import RequestHandler, HTTPError # type: ignore\nfrom tornado.gen import coroutine # type: ignore\n\nif False:\n from typing import Any\n from typing import List\n from typing import Optional\n from typing import Dict\n from typing import Callable\n\n\nclass TornadoIntegration(Integration):\n identifier = \"tornado\"\n\n @staticmethod\n def setup_once():\n # type: () -> None\n import tornado # type: ignore\n\n tornado_version = getattr(tornado, \"version_info\", None)\n if tornado_version is None or tornado_version < (5, 0):\n raise RuntimeError(\"Tornado 5+ required\")\n\n if sys.version_info < (3, 7):\n # Tornado is async. We better have contextvars or we're going to leak\n # state between requests.\n raise RuntimeError(\n \"The tornado integration for Sentry requires Python 3.7+\"\n )\n\n ignore_logger(\"tornado.application\")\n ignore_logger(\"tornado.access\")\n\n old_execute = RequestHandler._execute\n\n awaitable = iscoroutinefunction(old_execute)\n\n if awaitable:\n # Starting Tornado 6 RequestHandler._execute method is a standard Python coroutine (async/await)\n # In that case our method should be a coroutine function too\n async def sentry_execute_request_handler(self, *args, **kwargs):\n # type: (Any, *List, **Any) -> Any\n hub = Hub.current\n integration = hub.get_integration(TornadoIntegration)\n if integration is None:\n return await old_execute(self, *args, **kwargs)\n\n weak_handler = weakref.ref(self)\n\n with Hub(hub) as hub:\n with hub.configure_scope() as scope:\n scope.add_event_processor(_make_event_processor(weak_handler))\n return await old_execute(self, *args, **kwargs)\n\n else:\n\n @coroutine # type: ignore\n def sentry_execute_request_handler(self, *args, **kwargs):\n hub = Hub.current\n integration = hub.get_integration(TornadoIntegration)\n if integration is None:\n return old_execute(self, *args, **kwargs)\n\n weak_handler = weakref.ref(self)\n\n with Hub(hub) as hub:\n with hub.configure_scope() as scope:\n scope.add_event_processor(_make_event_processor(weak_handler))\n result = yield from old_execute(self, *args, **kwargs)\n return result\n\n RequestHandler._execute = sentry_execute_request_handler\n\n old_log_exception = RequestHandler.log_exception\n\n def sentry_log_exception(self, ty, value, tb, *args, **kwargs):\n # type: (Any, type, BaseException, Any, *Any, **Any) -> Optional[Any]\n _capture_exception(ty, value, tb)\n return old_log_exception(self, ty, value, tb, *args, **kwargs)\n\n RequestHandler.log_exception = sentry_log_exception\n\n\ndef _capture_exception(ty, value, tb):\n # type: (type, BaseException, Any) -> None\n hub = Hub.current\n if hub.get_integration(TornadoIntegration) is None:\n return\n if isinstance(value, HTTPError):\n return\n\n event, hint = event_from_exception(\n (ty, value, tb),\n client_options=hub.client.options,\n mechanism={\"type\": \"tornado\", \"handled\": False},\n )\n\n hub.capture_event(event, hint=hint)\n\n\ndef _make_event_processor(weak_handler):\n # type: (Callable[[], RequestHandler]) -> Callable\n def tornado_processor(event, hint):\n # type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]\n handler = weak_handler()\n if handler is None:\n return event\n\n request = handler.request\n\n with capture_internal_exceptions():\n method = getattr(handler, handler.request.method.lower())\n event[\"transaction\"] = transaction_from_function(method)\n\n with capture_internal_exceptions():\n extractor = TornadoRequestExtractor(request)\n extractor.extract_into_event(event)\n\n request_info = event[\"request\"]\n\n request_info[\"url\"] = \"%s://%s%s\" % (\n request.protocol,\n request.host,\n request.path,\n )\n\n request_info[\"query_string\"] = request.query\n request_info[\"method\"] = request.method\n request_info[\"env\"] = {\"REMOTE_ADDR\": request.remote_ip}\n request_info[\"headers\"] = _filter_headers(dict(request.headers))\n\n with capture_internal_exceptions():\n if handler.current_user and _should_send_default_pii():\n event.setdefault(\"user\", {})[\"is_authenticated\"] = True\n\n return event\n\n return tornado_processor\n\n\nclass TornadoRequestExtractor(RequestExtractor):\n def content_length(self):\n # type: () -> int\n if self.request.body is None:\n return 0\n return len(self.request.body)\n\n def cookies(self):\n # type: () -> Dict\n return {k: v.value for k, v in self.request.cookies.items()}\n\n def raw_data(self):\n # type: () -> bytes\n return self.request.body\n\n def form(self):\n # type: () -> Optional[Any]\n # TODO: Where to get formdata and nothing else?\n return None\n\n def is_json(self):\n # type: () -> bool\n return _is_json_content_type(self.request.headers.get(\"content-type\"))\n\n def files(self):\n # type: () -> Dict\n return {k: v[0] for k, v in self.request.files.items() if v}\n\n def size_of_file(self, file):\n return len(file.body or ())\n", "path": "sentry_sdk/integrations/tornado.py"}]}
2,386
151
gh_patches_debug_39438
rasdani/github-patches
git_diff
aws-powertools__powertools-lambda-python-2004
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `BaseSegment` defines `put_annotation` as having return type `NoReturn` ### Expected Behaviour The return type should be `None`. `NoReturn` means that the function never returns (e.g. it always throws an exception) and makes type checkers (e.g. mypy) mark all code following `put_annotation` as unreachable. Also applies to `put_metadata`, `patch`, and `patch_all`. ### Current Behaviour These methods breaks type checking in any modules they're used because all code following a call to e.g. `put_annotation` is marked as unreachable. ### Code snippet ```python with Tracer().provider.in_subsegment('test') as subsegment: subsegment.put_metadata('x', 1) print("hi") # marked as unreachable ``` ### Possible Solution Change the return type to `None`. ### Steps to Reproduce Use the code snippet above and run `mypy` on the file. ### AWS Lambda Powertools for Python version latest ### AWS Lambda function runtime 3.9 ### Packaging format used PyPi ### Debugging logs _No response_ </issue> <code> [start of aws_lambda_powertools/tracing/base.py] 1 import abc 2 import numbers 3 import traceback 4 from contextlib import contextmanager 5 from typing import Any, Generator, List, NoReturn, Optional, Sequence, Union 6 7 8 class BaseSegment(abc.ABC): 9 """Holds common properties and methods on segment and subsegment.""" 10 11 @abc.abstractmethod 12 def close(self, end_time: Optional[int] = None): 13 """Close the trace entity by setting `end_time` 14 and flip the in progress flag to False. 15 16 Parameters 17 ---------- 18 end_time: int 19 Time in epoch seconds, by default current time will be used. 20 """ 21 22 @abc.abstractmethod 23 def add_subsegment(self, subsegment: Any): 24 """Add input subsegment as a child subsegment.""" 25 26 @abc.abstractmethod 27 def remove_subsegment(self, subsegment: Any): 28 """Remove input subsegment from child subsegments.""" 29 30 @abc.abstractmethod 31 def put_annotation(self, key: str, value: Union[str, numbers.Number, bool]) -> NoReturn: 32 """Annotate segment or subsegment with a key-value pair. 33 34 Note: Annotations will be indexed for later search query. 35 36 Parameters 37 ---------- 38 key: str 39 Metadata key 40 value: Union[str, numbers.Number, bool] 41 Annotation value 42 """ 43 44 @abc.abstractmethod 45 def put_metadata(self, key: str, value: Any, namespace: str = "default") -> NoReturn: 46 """Add metadata to segment or subsegment. Metadata is not indexed 47 but can be later retrieved by BatchGetTraces API. 48 49 Parameters 50 ---------- 51 key: str 52 Metadata key 53 value: Any 54 Any object that can be serialized into a JSON string 55 namespace: Set[str] 56 Metadata namespace, by default 'default' 57 """ 58 59 @abc.abstractmethod 60 def add_exception(self, exception: BaseException, stack: List[traceback.StackSummary], remote: bool = False): 61 """Add an exception to trace entities. 62 63 Parameters 64 ---------- 65 exception: Exception 66 Caught exception 67 stack: List[traceback.StackSummary] 68 List of traceback summaries 69 70 Output from `traceback.extract_stack()`. 71 remote: bool 72 Whether it's a client error (False) or downstream service error (True), by default False 73 """ 74 75 76 class BaseProvider(abc.ABC): 77 @abc.abstractmethod 78 @contextmanager 79 def in_subsegment(self, name=None, **kwargs) -> Generator[BaseSegment, None, None]: 80 """Return a subsegment context manger. 81 82 Parameters 83 ---------- 84 name: str 85 Subsegment name 86 kwargs: Optional[dict] 87 Optional parameters to be propagated to segment 88 """ 89 90 @abc.abstractmethod 91 @contextmanager 92 def in_subsegment_async(self, name=None, **kwargs) -> Generator[BaseSegment, None, None]: 93 """Return a subsegment async context manger. 94 95 Parameters 96 ---------- 97 name: str 98 Subsegment name 99 kwargs: Optional[dict] 100 Optional parameters to be propagated to segment 101 """ 102 103 @abc.abstractmethod 104 def put_annotation(self, key: str, value: Union[str, numbers.Number, bool]) -> NoReturn: 105 """Annotate current active trace entity with a key-value pair. 106 107 Note: Annotations will be indexed for later search query. 108 109 Parameters 110 ---------- 111 key: str 112 Metadata key 113 value: Union[str, numbers.Number, bool] 114 Annotation value 115 """ 116 117 @abc.abstractmethod 118 def put_metadata(self, key: str, value: Any, namespace: str = "default") -> NoReturn: 119 """Add metadata to the current active trace entity. 120 121 Note: Metadata is not indexed but can be later retrieved by BatchGetTraces API. 122 123 Parameters 124 ---------- 125 key: str 126 Metadata key 127 value: Any 128 Any object that can be serialized into a JSON string 129 namespace: Set[str] 130 Metadata namespace, by default 'default' 131 """ 132 133 @abc.abstractmethod 134 def patch(self, modules: Sequence[str]) -> NoReturn: 135 """Instrument a set of supported libraries 136 137 Parameters 138 ---------- 139 modules: Set[str] 140 Set of modules to be patched 141 """ 142 143 @abc.abstractmethod 144 def patch_all(self) -> NoReturn: 145 """Instrument all supported libraries""" 146 [end of aws_lambda_powertools/tracing/base.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/aws_lambda_powertools/tracing/base.py b/aws_lambda_powertools/tracing/base.py --- a/aws_lambda_powertools/tracing/base.py +++ b/aws_lambda_powertools/tracing/base.py @@ -2,7 +2,7 @@ import numbers import traceback from contextlib import contextmanager -from typing import Any, Generator, List, NoReturn, Optional, Sequence, Union +from typing import Any, Generator, List, Optional, Sequence, Union class BaseSegment(abc.ABC): @@ -28,7 +28,7 @@ """Remove input subsegment from child subsegments.""" @abc.abstractmethod - def put_annotation(self, key: str, value: Union[str, numbers.Number, bool]) -> NoReturn: + def put_annotation(self, key: str, value: Union[str, numbers.Number, bool]) -> None: """Annotate segment or subsegment with a key-value pair. Note: Annotations will be indexed for later search query. @@ -42,7 +42,7 @@ """ @abc.abstractmethod - def put_metadata(self, key: str, value: Any, namespace: str = "default") -> NoReturn: + def put_metadata(self, key: str, value: Any, namespace: str = "default") -> None: """Add metadata to segment or subsegment. Metadata is not indexed but can be later retrieved by BatchGetTraces API. @@ -101,7 +101,7 @@ """ @abc.abstractmethod - def put_annotation(self, key: str, value: Union[str, numbers.Number, bool]) -> NoReturn: + def put_annotation(self, key: str, value: Union[str, numbers.Number, bool]) -> None: """Annotate current active trace entity with a key-value pair. Note: Annotations will be indexed for later search query. @@ -115,7 +115,7 @@ """ @abc.abstractmethod - def put_metadata(self, key: str, value: Any, namespace: str = "default") -> NoReturn: + def put_metadata(self, key: str, value: Any, namespace: str = "default") -> None: """Add metadata to the current active trace entity. Note: Metadata is not indexed but can be later retrieved by BatchGetTraces API. @@ -131,7 +131,7 @@ """ @abc.abstractmethod - def patch(self, modules: Sequence[str]) -> NoReturn: + def patch(self, modules: Sequence[str]) -> None: """Instrument a set of supported libraries Parameters @@ -141,5 +141,5 @@ """ @abc.abstractmethod - def patch_all(self) -> NoReturn: + def patch_all(self) -> None: """Instrument all supported libraries"""
{"golden_diff": "diff --git a/aws_lambda_powertools/tracing/base.py b/aws_lambda_powertools/tracing/base.py\n--- a/aws_lambda_powertools/tracing/base.py\n+++ b/aws_lambda_powertools/tracing/base.py\n@@ -2,7 +2,7 @@\n import numbers\n import traceback\n from contextlib import contextmanager\n-from typing import Any, Generator, List, NoReturn, Optional, Sequence, Union\n+from typing import Any, Generator, List, Optional, Sequence, Union\n \n \n class BaseSegment(abc.ABC):\n@@ -28,7 +28,7 @@\n \"\"\"Remove input subsegment from child subsegments.\"\"\"\n \n @abc.abstractmethod\n- def put_annotation(self, key: str, value: Union[str, numbers.Number, bool]) -> NoReturn:\n+ def put_annotation(self, key: str, value: Union[str, numbers.Number, bool]) -> None:\n \"\"\"Annotate segment or subsegment with a key-value pair.\n \n Note: Annotations will be indexed for later search query.\n@@ -42,7 +42,7 @@\n \"\"\"\n \n @abc.abstractmethod\n- def put_metadata(self, key: str, value: Any, namespace: str = \"default\") -> NoReturn:\n+ def put_metadata(self, key: str, value: Any, namespace: str = \"default\") -> None:\n \"\"\"Add metadata to segment or subsegment. Metadata is not indexed\n but can be later retrieved by BatchGetTraces API.\n \n@@ -101,7 +101,7 @@\n \"\"\"\n \n @abc.abstractmethod\n- def put_annotation(self, key: str, value: Union[str, numbers.Number, bool]) -> NoReturn:\n+ def put_annotation(self, key: str, value: Union[str, numbers.Number, bool]) -> None:\n \"\"\"Annotate current active trace entity with a key-value pair.\n \n Note: Annotations will be indexed for later search query.\n@@ -115,7 +115,7 @@\n \"\"\"\n \n @abc.abstractmethod\n- def put_metadata(self, key: str, value: Any, namespace: str = \"default\") -> NoReturn:\n+ def put_metadata(self, key: str, value: Any, namespace: str = \"default\") -> None:\n \"\"\"Add metadata to the current active trace entity.\n \n Note: Metadata is not indexed but can be later retrieved by BatchGetTraces API.\n@@ -131,7 +131,7 @@\n \"\"\"\n \n @abc.abstractmethod\n- def patch(self, modules: Sequence[str]) -> NoReturn:\n+ def patch(self, modules: Sequence[str]) -> None:\n \"\"\"Instrument a set of supported libraries\n \n Parameters\n@@ -141,5 +141,5 @@\n \"\"\"\n \n @abc.abstractmethod\n- def patch_all(self) -> NoReturn:\n+ def patch_all(self) -> None:\n \"\"\"Instrument all supported libraries\"\"\"\n", "issue": "`BaseSegment` defines `put_annotation` as having return type `NoReturn`\n### Expected Behaviour\r\n\r\nThe return type should be `None`. `NoReturn` means that the function never returns (e.g. it always throws an exception) and makes type checkers (e.g. mypy) mark all code following `put_annotation` as unreachable.\r\n\r\nAlso applies to `put_metadata`, `patch`, and `patch_all`.\r\n\r\n\r\n### Current Behaviour\r\n\r\nThese methods breaks type checking in any modules they're used because all code following a call to e.g. `put_annotation` is marked as unreachable.\r\n\r\n### Code snippet\r\n\r\n```python\r\nwith Tracer().provider.in_subsegment('test') as subsegment:\r\n subsegment.put_metadata('x', 1)\r\n print(\"hi\") # marked as unreachable\r\n```\r\n\r\n\r\n### Possible Solution\r\n\r\nChange the return type to `None`.\r\n\r\n### Steps to Reproduce\r\n\r\nUse the code snippet above and run `mypy` on the file.\r\n\r\n### AWS Lambda Powertools for Python version\r\n\r\nlatest\r\n\r\n### AWS Lambda function runtime\r\n\r\n3.9\r\n\r\n### Packaging format used\r\n\r\nPyPi\r\n\r\n### Debugging logs\r\n\r\n_No response_\n", "before_files": [{"content": "import abc\nimport numbers\nimport traceback\nfrom contextlib import contextmanager\nfrom typing import Any, Generator, List, NoReturn, Optional, Sequence, Union\n\n\nclass BaseSegment(abc.ABC):\n \"\"\"Holds common properties and methods on segment and subsegment.\"\"\"\n\n @abc.abstractmethod\n def close(self, end_time: Optional[int] = None):\n \"\"\"Close the trace entity by setting `end_time`\n and flip the in progress flag to False.\n\n Parameters\n ----------\n end_time: int\n Time in epoch seconds, by default current time will be used.\n \"\"\"\n\n @abc.abstractmethod\n def add_subsegment(self, subsegment: Any):\n \"\"\"Add input subsegment as a child subsegment.\"\"\"\n\n @abc.abstractmethod\n def remove_subsegment(self, subsegment: Any):\n \"\"\"Remove input subsegment from child subsegments.\"\"\"\n\n @abc.abstractmethod\n def put_annotation(self, key: str, value: Union[str, numbers.Number, bool]) -> NoReturn:\n \"\"\"Annotate segment or subsegment with a key-value pair.\n\n Note: Annotations will be indexed for later search query.\n\n Parameters\n ----------\n key: str\n Metadata key\n value: Union[str, numbers.Number, bool]\n Annotation value\n \"\"\"\n\n @abc.abstractmethod\n def put_metadata(self, key: str, value: Any, namespace: str = \"default\") -> NoReturn:\n \"\"\"Add metadata to segment or subsegment. Metadata is not indexed\n but can be later retrieved by BatchGetTraces API.\n\n Parameters\n ----------\n key: str\n Metadata key\n value: Any\n Any object that can be serialized into a JSON string\n namespace: Set[str]\n Metadata namespace, by default 'default'\n \"\"\"\n\n @abc.abstractmethod\n def add_exception(self, exception: BaseException, stack: List[traceback.StackSummary], remote: bool = False):\n \"\"\"Add an exception to trace entities.\n\n Parameters\n ----------\n exception: Exception\n Caught exception\n stack: List[traceback.StackSummary]\n List of traceback summaries\n\n Output from `traceback.extract_stack()`.\n remote: bool\n Whether it's a client error (False) or downstream service error (True), by default False\n \"\"\"\n\n\nclass BaseProvider(abc.ABC):\n @abc.abstractmethod\n @contextmanager\n def in_subsegment(self, name=None, **kwargs) -> Generator[BaseSegment, None, None]:\n \"\"\"Return a subsegment context manger.\n\n Parameters\n ----------\n name: str\n Subsegment name\n kwargs: Optional[dict]\n Optional parameters to be propagated to segment\n \"\"\"\n\n @abc.abstractmethod\n @contextmanager\n def in_subsegment_async(self, name=None, **kwargs) -> Generator[BaseSegment, None, None]:\n \"\"\"Return a subsegment async context manger.\n\n Parameters\n ----------\n name: str\n Subsegment name\n kwargs: Optional[dict]\n Optional parameters to be propagated to segment\n \"\"\"\n\n @abc.abstractmethod\n def put_annotation(self, key: str, value: Union[str, numbers.Number, bool]) -> NoReturn:\n \"\"\"Annotate current active trace entity with a key-value pair.\n\n Note: Annotations will be indexed for later search query.\n\n Parameters\n ----------\n key: str\n Metadata key\n value: Union[str, numbers.Number, bool]\n Annotation value\n \"\"\"\n\n @abc.abstractmethod\n def put_metadata(self, key: str, value: Any, namespace: str = \"default\") -> NoReturn:\n \"\"\"Add metadata to the current active trace entity.\n\n Note: Metadata is not indexed but can be later retrieved by BatchGetTraces API.\n\n Parameters\n ----------\n key: str\n Metadata key\n value: Any\n Any object that can be serialized into a JSON string\n namespace: Set[str]\n Metadata namespace, by default 'default'\n \"\"\"\n\n @abc.abstractmethod\n def patch(self, modules: Sequence[str]) -> NoReturn:\n \"\"\"Instrument a set of supported libraries\n\n Parameters\n ----------\n modules: Set[str]\n Set of modules to be patched\n \"\"\"\n\n @abc.abstractmethod\n def patch_all(self) -> NoReturn:\n \"\"\"Instrument all supported libraries\"\"\"\n", "path": "aws_lambda_powertools/tracing/base.py"}]}
2,060
638
gh_patches_debug_18192
rasdani/github-patches
git_diff
qtile__qtile-2748
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> CryptoTicker migration is incomplete I previously had: ``` widget.CryptoTicker(format="BTC: {avg}", **widget_defaults) ``` but it seems avg is not an allowed key any more per: ``` 2021-09-01 14:42:17,968 ERROR libqtile generic_poll_text.py:poll():L90 got exception polling widget Traceback (most recent call last): File "/home/tycho/.local/lib/python3.9/site-packages/libqtile/widget/generic_poll_text.py", line 88, in poll text = self.parse(body) File "/home/tycho/.local/lib/python3.9/site-packages/libqtile/widget/crypto_ticker.py", line 90, in parse return self.format.format(**variables) ``` /cc @Graeme22 CryptoTicker migration is incomplete I previously had: ``` widget.CryptoTicker(format="BTC: {avg}", **widget_defaults) ``` but it seems avg is not an allowed key any more per: ``` 2021-09-01 14:42:17,968 ERROR libqtile generic_poll_text.py:poll():L90 got exception polling widget Traceback (most recent call last): File "/home/tycho/.local/lib/python3.9/site-packages/libqtile/widget/generic_poll_text.py", line 88, in poll text = self.parse(body) File "/home/tycho/.local/lib/python3.9/site-packages/libqtile/widget/crypto_ticker.py", line 90, in parse return self.format.format(**variables) ``` /cc @Graeme22 </issue> <code> [start of libqtile/scripts/migrate.py] 1 # Copyright (c) 2021, Tycho Andersen. All rights reserved. 2 # 3 # Permission is hereby granted, free of charge, to any person obtaining a copy 4 # of this software and associated documentation files (the "Software"), to deal 5 # in the Software without restriction, including without limitation the rights 6 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 # copies of the Software, and to permit persons to whom the Software is 8 # furnished to do so, subject to the following conditions: 9 # 10 # The above copyright notice and this permission notice shall be included in 11 # all copies or substantial portions of the Software. 12 # 13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 import filecmp 20 import os 21 import os.path 22 import shutil 23 import sys 24 from glob import glob 25 26 BACKUP_SUFFIX = ".migrate.bak" 27 28 try: 29 import bowler 30 except ImportError: 31 pass 32 33 34 def rename_hook(query, fro, to): 35 # could match on dotted_name< 'hook' '.' 'subscribe' '.' '{name}' > 36 # but the replacement gets more complicated... 37 selector = "'{name}'".format(name=fro) 38 q = query.select_pattern(selector) 39 q.current.kwargs["name"] = fro 40 return q.rename(to) 41 42 43 def client_name_updated(query): 44 """ Rename window_name_change -> client_name_updated""" 45 return rename_hook(query, "window_name_change", "client_name_updated") 46 47 48 def tile_master_windows_rename(query): 49 return ( 50 query 51 .select_function("Tile") 52 .modify_argument("masterWindows", "master_length") 53 ) 54 55 56 def threaded_poll_text_rename(query): 57 return ( 58 query 59 .select_class("ThreadedPollText") 60 .rename("ThreadPoolText") 61 ) 62 63 64 def pacman_to_checkupdates(query): 65 return ( 66 query 67 .select_class("Pacman") 68 .rename("CheckUpdates") 69 ) 70 71 72 def bitcoin_to_crypto(query): 73 return ( 74 query 75 .select_class("BitcoinTicker") 76 .rename("CryptoTicker") 77 ) 78 79 80 def hook_main_function(query): 81 def modify_main(node, capture, filename): 82 main = capture.get("function_def") 83 if main.prev_sibling: 84 for leaf in main.prev_sibling.leaves(): 85 if "startup" == leaf.value: 86 return 87 args = capture.get("function_arguments") 88 if args: 89 args[0].remove() 90 main.prefix += "from libqtile import hook, qtile\n" 91 main.prefix += "@hook.subscribe.startup\n" 92 93 return ( 94 query 95 .select_function("main") 96 .is_def() 97 .modify(modify_main) 98 ) 99 100 101 # Deprecated new_at_current key replaced by new_client_position. 102 # In the node, we want to change the key name 103 # and adapts its value depending of the previous value : 104 # new_at_current=True => new_client_position=before_current 105 # new_at_current<>True => new_client_position=after_current 106 def update_node_nac(node, capture, filename): 107 key = capture.get("k") 108 key.value = "new_client_position" 109 val = capture.get("v") 110 if val.value == "True": 111 val.value = "'before_current'" 112 else: 113 val.value = "'after_current'" 114 115 116 def new_at_current_to_new_client_position(query): 117 old_pattern = """ 118 argument< k="new_at_current" "=" v=any > 119 """ 120 return ( 121 query 122 .select(old_pattern) 123 .modify(update_node_nac) 124 ) 125 126 127 MIGRATIONS = [ 128 client_name_updated, 129 tile_master_windows_rename, 130 threaded_poll_text_rename, 131 pacman_to_checkupdates, 132 bitcoin_to_crypto, 133 hook_main_function, 134 new_at_current_to_new_client_position, 135 ] 136 137 138 MODULE_RENAMES = [ 139 ("libqtile.command_graph", "libqtile.command.graph"), 140 ("libqtile.command_client", "libqtile.command.client"), 141 ("libqtile.command_interface", "libqtile.command.interface"), 142 ("libqtile.command_object", "libqtile.command.base"), 143 ("libqtile.window", "libqtile.backend.x11.window"), 144 ] 145 146 for (fro, to) in MODULE_RENAMES: 147 def f(query, fro=fro, to=to): 148 return ( 149 query 150 .select_module(fro) 151 .rename(to) 152 ) 153 MIGRATIONS.append(f) 154 155 156 def file_and_backup(config_dir): 157 for py in glob(os.path.join(config_dir, "*.py")): 158 backup = py + BACKUP_SUFFIX 159 yield py, backup 160 161 162 def do_migrate(args): 163 if "bowler" not in sys.modules: 164 print("bowler can't be found, not migrating config file") 165 print("install it and try again") 166 sys.exit(1) 167 168 config_dir = os.path.dirname(args.config) 169 for py, backup in file_and_backup(config_dir): 170 shutil.copyfile(py, backup) 171 172 for m in MIGRATIONS: 173 q = bowler.Query(config_dir) 174 m(q).execute(interactive=not args.yes, write=True) 175 176 changed = False 177 for py, backup in file_and_backup(config_dir): 178 backup = py + BACKUP_SUFFIX 179 if not filecmp.cmp(py, backup, shallow=False): 180 changed = True 181 break 182 183 if not changed: 184 print("Config unchanged.") 185 for _, backup in file_and_backup(config_dir): 186 os.remove(backup) 187 188 189 def add_subcommand(subparsers, parents): 190 parser = subparsers.add_parser( 191 "migrate", 192 parents=parents, 193 help="Migrate a configuration file to the current API" 194 ) 195 parser.add_argument( 196 "-c", 197 "--config", 198 action="store", 199 default=os.path.expanduser( 200 os.path.join(os.getenv("XDG_CONFIG_HOME", "~/.config"), "qtile", "config.py") 201 ), 202 help="Use the specified configuration file (migrates every .py file in this directory)", 203 ) 204 parser.add_argument( 205 "--yes", 206 action="store_true", 207 help="Automatically apply diffs with no confirmation", 208 ) 209 parser.set_defaults(func=do_migrate) 210 [end of libqtile/scripts/migrate.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/libqtile/scripts/migrate.py b/libqtile/scripts/migrate.py --- a/libqtile/scripts/migrate.py +++ b/libqtile/scripts/migrate.py @@ -69,10 +69,32 @@ ) +def reset_format(node, capture, filename): + args = capture.get("class_arguments") + if args: + if args[0].type == 260: # argument list + n_children = len(args[0].children) + for i in range(n_children): + # we only want to remove the format argument + if 'format' in str(args[0].children[i]): + # remove the argument and the trailing or preceeding comma + if i == n_children - 1: # last argument + args[0].children[i - 1].remove() + args[0].children[i - 1].remove() + else: + args[0].children[i].remove() + args[0].children[i].remove() + + break + else: # there's only one argument + args[0].remove() + + def bitcoin_to_crypto(query): return ( query .select_class("BitcoinTicker") + .modify(reset_format) .rename("CryptoTicker") )
{"golden_diff": "diff --git a/libqtile/scripts/migrate.py b/libqtile/scripts/migrate.py\n--- a/libqtile/scripts/migrate.py\n+++ b/libqtile/scripts/migrate.py\n@@ -69,10 +69,32 @@\n )\n \n \n+def reset_format(node, capture, filename):\n+ args = capture.get(\"class_arguments\")\n+ if args:\n+ if args[0].type == 260: # argument list\n+ n_children = len(args[0].children)\n+ for i in range(n_children):\n+ # we only want to remove the format argument\n+ if 'format' in str(args[0].children[i]):\n+ # remove the argument and the trailing or preceeding comma\n+ if i == n_children - 1: # last argument\n+ args[0].children[i - 1].remove()\n+ args[0].children[i - 1].remove()\n+ else:\n+ args[0].children[i].remove()\n+ args[0].children[i].remove()\n+\n+ break\n+ else: # there's only one argument\n+ args[0].remove()\n+\n+\n def bitcoin_to_crypto(query):\n return (\n query\n .select_class(\"BitcoinTicker\")\n+ .modify(reset_format)\n .rename(\"CryptoTicker\")\n )\n", "issue": "CryptoTicker migration is incomplete\nI previously had:\r\n\r\n```\r\nwidget.CryptoTicker(format=\"BTC: {avg}\", **widget_defaults)\r\n```\r\n\r\nbut it seems avg is not an allowed key any more per:\r\n\r\n```\r\n2021-09-01 14:42:17,968 ERROR libqtile generic_poll_text.py:poll():L90 got exception polling widget\r\nTraceback (most recent call last):\r\n File \"/home/tycho/.local/lib/python3.9/site-packages/libqtile/widget/generic_poll_text.py\", line 88, in poll\r\n text = self.parse(body)\r\n File \"/home/tycho/.local/lib/python3.9/site-packages/libqtile/widget/crypto_ticker.py\", line 90, in parse\r\n return self.format.format(**variables)\r\n```\r\n\r\n/cc @Graeme22 \nCryptoTicker migration is incomplete\nI previously had:\r\n\r\n```\r\nwidget.CryptoTicker(format=\"BTC: {avg}\", **widget_defaults)\r\n```\r\n\r\nbut it seems avg is not an allowed key any more per:\r\n\r\n```\r\n2021-09-01 14:42:17,968 ERROR libqtile generic_poll_text.py:poll():L90 got exception polling widget\r\nTraceback (most recent call last):\r\n File \"/home/tycho/.local/lib/python3.9/site-packages/libqtile/widget/generic_poll_text.py\", line 88, in poll\r\n text = self.parse(body)\r\n File \"/home/tycho/.local/lib/python3.9/site-packages/libqtile/widget/crypto_ticker.py\", line 90, in parse\r\n return self.format.format(**variables)\r\n```\r\n\r\n/cc @Graeme22 \n", "before_files": [{"content": "# Copyright (c) 2021, Tycho Andersen. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nimport filecmp\nimport os\nimport os.path\nimport shutil\nimport sys\nfrom glob import glob\n\nBACKUP_SUFFIX = \".migrate.bak\"\n\ntry:\n import bowler\nexcept ImportError:\n pass\n\n\ndef rename_hook(query, fro, to):\n # could match on dotted_name< 'hook' '.' 'subscribe' '.' '{name}' >\n # but the replacement gets more complicated...\n selector = \"'{name}'\".format(name=fro)\n q = query.select_pattern(selector)\n q.current.kwargs[\"name\"] = fro\n return q.rename(to)\n\n\ndef client_name_updated(query):\n \"\"\" Rename window_name_change -> client_name_updated\"\"\"\n return rename_hook(query, \"window_name_change\", \"client_name_updated\")\n\n\ndef tile_master_windows_rename(query):\n return (\n query\n .select_function(\"Tile\")\n .modify_argument(\"masterWindows\", \"master_length\")\n )\n\n\ndef threaded_poll_text_rename(query):\n return (\n query\n .select_class(\"ThreadedPollText\")\n .rename(\"ThreadPoolText\")\n )\n\n\ndef pacman_to_checkupdates(query):\n return (\n query\n .select_class(\"Pacman\")\n .rename(\"CheckUpdates\")\n )\n\n\ndef bitcoin_to_crypto(query):\n return (\n query\n .select_class(\"BitcoinTicker\")\n .rename(\"CryptoTicker\")\n )\n\n\ndef hook_main_function(query):\n def modify_main(node, capture, filename):\n main = capture.get(\"function_def\")\n if main.prev_sibling:\n for leaf in main.prev_sibling.leaves():\n if \"startup\" == leaf.value:\n return\n args = capture.get(\"function_arguments\")\n if args:\n args[0].remove()\n main.prefix += \"from libqtile import hook, qtile\\n\"\n main.prefix += \"@hook.subscribe.startup\\n\"\n\n return (\n query\n .select_function(\"main\")\n .is_def()\n .modify(modify_main)\n )\n\n\n# Deprecated new_at_current key replaced by new_client_position.\n# In the node, we want to change the key name\n# and adapts its value depending of the previous value :\n# new_at_current=True => new_client_position=before_current\n# new_at_current<>True => new_client_position=after_current\ndef update_node_nac(node, capture, filename):\n key = capture.get(\"k\")\n key.value = \"new_client_position\"\n val = capture.get(\"v\")\n if val.value == \"True\":\n val.value = \"'before_current'\"\n else:\n val.value = \"'after_current'\"\n\n\ndef new_at_current_to_new_client_position(query):\n old_pattern = \"\"\"\n argument< k=\"new_at_current\" \"=\" v=any >\n \"\"\"\n return (\n query\n .select(old_pattern)\n .modify(update_node_nac)\n )\n\n\nMIGRATIONS = [\n client_name_updated,\n tile_master_windows_rename,\n threaded_poll_text_rename,\n pacman_to_checkupdates,\n bitcoin_to_crypto,\n hook_main_function,\n new_at_current_to_new_client_position,\n]\n\n\nMODULE_RENAMES = [\n (\"libqtile.command_graph\", \"libqtile.command.graph\"),\n (\"libqtile.command_client\", \"libqtile.command.client\"),\n (\"libqtile.command_interface\", \"libqtile.command.interface\"),\n (\"libqtile.command_object\", \"libqtile.command.base\"),\n (\"libqtile.window\", \"libqtile.backend.x11.window\"),\n]\n\nfor (fro, to) in MODULE_RENAMES:\n def f(query, fro=fro, to=to):\n return (\n query\n .select_module(fro)\n .rename(to)\n )\n MIGRATIONS.append(f)\n\n\ndef file_and_backup(config_dir):\n for py in glob(os.path.join(config_dir, \"*.py\")):\n backup = py + BACKUP_SUFFIX\n yield py, backup\n\n\ndef do_migrate(args):\n if \"bowler\" not in sys.modules:\n print(\"bowler can't be found, not migrating config file\")\n print(\"install it and try again\")\n sys.exit(1)\n\n config_dir = os.path.dirname(args.config)\n for py, backup in file_and_backup(config_dir):\n shutil.copyfile(py, backup)\n\n for m in MIGRATIONS:\n q = bowler.Query(config_dir)\n m(q).execute(interactive=not args.yes, write=True)\n\n changed = False\n for py, backup in file_and_backup(config_dir):\n backup = py + BACKUP_SUFFIX\n if not filecmp.cmp(py, backup, shallow=False):\n changed = True\n break\n\n if not changed:\n print(\"Config unchanged.\")\n for _, backup in file_and_backup(config_dir):\n os.remove(backup)\n\n\ndef add_subcommand(subparsers, parents):\n parser = subparsers.add_parser(\n \"migrate\",\n parents=parents,\n help=\"Migrate a configuration file to the current API\"\n )\n parser.add_argument(\n \"-c\",\n \"--config\",\n action=\"store\",\n default=os.path.expanduser(\n os.path.join(os.getenv(\"XDG_CONFIG_HOME\", \"~/.config\"), \"qtile\", \"config.py\")\n ),\n help=\"Use the specified configuration file (migrates every .py file in this directory)\",\n )\n parser.add_argument(\n \"--yes\",\n action=\"store_true\",\n help=\"Automatically apply diffs with no confirmation\",\n )\n parser.set_defaults(func=do_migrate)\n", "path": "libqtile/scripts/migrate.py"}]}
2,856
298
gh_patches_debug_43024
rasdani/github-patches
git_diff
comic__grand-challenge.org-2087
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Move `wait_for_build_completion` to acks late micro short queue and use delays `wait_for_build_completion` uses a countdown that causes the same issues described in https://github.com/comic/grand-challenge.org/blob/ce130d451034377b0f3dfd83590268e48a66d518/app/grandchallenge/components/tasks.py#L255-L271. This task is not resource-intensive so: - If the task is not idempotent it should be made so - It should be run on the acks-late-micro-short queue - Retries should use the same strategy as https://github.com/comic/grand-challenge.org/blob/ce130d451034377b0f3dfd83590268e48a66d518/app/grandchallenge/components/tasks.py#L323-L327 </issue> <code> [start of app/grandchallenge/codebuild/models.py] 1 import gzip 2 from tempfile import NamedTemporaryFile 3 4 import boto3 5 from django.conf import settings 6 from django.core import files 7 from django.db import models 8 from django.utils.translation import gettext_lazy as _ 9 10 from grandchallenge.algorithms.models import AlgorithmImage 11 from grandchallenge.core.models import UUIDModel 12 from grandchallenge.core.storage import private_s3_storage 13 from grandchallenge.github.models import GitHubWebhookMessage 14 15 16 class BuildStatusChoices(models.TextChoices): 17 """From https://docs.aws.amazon.com/codebuild/latest/APIReference/API_Build.html""" 18 19 SUCCEEDED = "SUCCEEDED", _("Succeeded") 20 FAILED = "FAILED", _("Failed") 21 FAULT = "FAULT", _("Fault") 22 TIMED_OUT = "TIMED_OUT", _("Timed Out") 23 IN_PROGRESS = "IN_PROGRESS", _("In Progress") 24 STOPPED = "STOPPED", _("Stopped") 25 26 27 class Build(UUIDModel): 28 webhook_message = models.ForeignKey( 29 GitHubWebhookMessage, on_delete=models.SET_NULL, null=True 30 ) 31 algorithm_image = models.OneToOneField( 32 AlgorithmImage, on_delete=models.SET_NULL, null=True 33 ) 34 build_config = models.JSONField() 35 build_id = models.CharField(max_length=1024) 36 status = models.CharField( 37 choices=BuildStatusChoices.choices, max_length=11 38 ) 39 build_log = models.TextField(blank=True) 40 41 BuildStatusChoices = BuildStatusChoices 42 __client = None 43 44 @property 45 def client(self): 46 if self.__client is None: 47 self.__client = boto3.client("codebuild") 48 return self.__client 49 50 @property 51 def build_number(self): 52 return self.build_id.split(":")[-1] 53 54 def refresh_status(self): 55 build_statuses = self.client.batch_get_builds(ids=[self.build_id]) 56 self.status = build_statuses["builds"][0]["buildStatus"] 57 58 def refresh_logs(self): 59 try: 60 with private_s3_storage.open( 61 f"codebuild/logs/{self.build_number}.gz" 62 ) as file: 63 self.build_log = gzip.open(file).read().decode("utf-8") 64 except FileNotFoundError: 65 self.build_log = "Log file not available." 66 67 def add_image_to_algorithm(self): 68 with private_s3_storage.open( 69 f"codebuild/artifacts/{self.build_number}/{self.build_config['projectName']}/container-image.tar.gz" 70 ) as file: 71 with NamedTemporaryFile(delete=True) as tmp_file: 72 with open(tmp_file.name, "wb") as fd: 73 for chunk in file.chunks(): 74 fd.write(chunk) 75 76 tmp_file.flush() 77 temp_file = files.File( 78 tmp_file, name=f"{str(self.pk)}.tar.gz", 79 ) 80 81 self.algorithm_image.image = temp_file 82 self.algorithm_image.save() 83 84 def _create_build(self): 85 self.build_config = { 86 "projectName": settings.CODEBUILD_PROJECT_NAME, 87 "sourceLocationOverride": f"{settings.PRIVATE_S3_STORAGE_KWARGS['bucket_name']}/{self.webhook_message.zipfile.name}", 88 "sourceTypeOverride": "S3", 89 "environmentVariablesOverride": [ 90 { 91 "name": "IMAGE_REPO_NAME", 92 "value": f"{AlgorithmImage._meta.app_label}/{AlgorithmImage._meta.model_name}", 93 }, 94 {"name": "IMAGE_TAG", "value": str(self.algorithm_image.pk)}, 95 ], 96 } 97 98 build_data = self.client.start_build(**self.build_config) 99 100 self.build_id = build_data["build"]["id"] 101 self.status = build_data["build"]["buildStatus"] 102 103 def save(self, *args, **kwargs): 104 if self._state.adding: 105 self._create_build() 106 107 super().save(*args, **kwargs) 108 109 @property 110 def animate(self): 111 return self.status == BuildStatusChoices.IN_PROGRESS 112 113 @property 114 def status_context(self): 115 if self.status == BuildStatusChoices.SUCCEEDED: 116 return "success" 117 elif self.status in {BuildStatusChoices.STOPPED}: 118 return "warning" 119 elif self.status in { 120 BuildStatusChoices.FAILED, 121 BuildStatusChoices.FAULT, 122 BuildStatusChoices.TIMED_OUT, 123 }: 124 return "danger" 125 elif self.status in {BuildStatusChoices.IN_PROGRESS}: 126 return "info" 127 else: 128 return "secondary" 129 130 class Meta: 131 indexes = [ 132 models.Index(fields=["build_id"]), 133 ] 134 [end of app/grandchallenge/codebuild/models.py] [start of app/grandchallenge/codebuild/tasks.py] 1 from celery import shared_task 2 from django.apps import apps 3 from django.core.exceptions import ObjectDoesNotExist 4 from django.db.transaction import on_commit 5 6 from grandchallenge.algorithms.models import Algorithm, AlgorithmImage 7 8 9 @shared_task() 10 def create_codebuild_build(*, pk): 11 GitHubWebhookMessage = apps.get_model( # noqa: N806 12 app_label="github", model_name="GitHubWebhookMessage" 13 ) 14 ghwm = GitHubWebhookMessage.objects.get(pk=pk) 15 16 Build = apps.get_model( # noqa: N806 17 app_label="codebuild", model_name="Build" 18 ) 19 20 if Build.objects.filter(webhook_message=ghwm).exists(): 21 # Build already exists 22 return 23 24 try: 25 algorithm = Algorithm.objects.get( 26 repo_name=ghwm.payload["repository"]["full_name"] 27 ) 28 except ObjectDoesNotExist: 29 # Repository is not linked to algorithm 30 return 31 32 algorithm_image = AlgorithmImage.objects.create( 33 algorithm=algorithm, 34 requires_gpu=algorithm.image_requires_gpu, 35 requires_memory_gb=algorithm.image_requires_memory_gb, 36 ) 37 38 build = Build.objects.create( 39 webhook_message=ghwm, algorithm_image=algorithm_image, 40 ) 41 42 on_commit( 43 lambda: wait_for_build_completion.apply_async( 44 kwargs={"build_pk": str(build.pk)} 45 ) 46 ) 47 48 49 @shared_task(bind=True, max_retries=120) 50 def wait_for_build_completion(self, *, build_pk): 51 Build = apps.get_model( # noqa: N806 52 app_label="codebuild", model_name="Build" 53 ) 54 55 build = Build.objects.get(pk=build_pk) 56 build.refresh_status() 57 58 if build.status == build.BuildStatusChoices.IN_PROGRESS: 59 self.retry(countdown=30) 60 else: 61 build.refresh_logs() 62 build.save() 63 if build.status == build.BuildStatusChoices.SUCCEEDED: 64 on_commit( 65 lambda: add_image_to_algorithm.apply_async( 66 kwargs={"build_pk": str(build_pk)} 67 ) 68 ) 69 70 71 @shared_task 72 def add_image_to_algorithm(*, build_pk): 73 Build = apps.get_model( # noqa: N806 74 app_label="codebuild", model_name="Build" 75 ) 76 77 build = Build.objects.get(pk=build_pk) 78 build.add_image_to_algorithm() 79 [end of app/grandchallenge/codebuild/tasks.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/app/grandchallenge/codebuild/models.py b/app/grandchallenge/codebuild/models.py --- a/app/grandchallenge/codebuild/models.py +++ b/app/grandchallenge/codebuild/models.py @@ -65,6 +65,7 @@ self.build_log = "Log file not available." def add_image_to_algorithm(self): + # TODO, this would be much faster using S3 copy, can then run on a smaller queue with private_s3_storage.open( f"codebuild/artifacts/{self.build_number}/{self.build_config['projectName']}/container-image.tar.gz" ) as file: diff --git a/app/grandchallenge/codebuild/tasks.py b/app/grandchallenge/codebuild/tasks.py --- a/app/grandchallenge/codebuild/tasks.py +++ b/app/grandchallenge/codebuild/tasks.py @@ -1,12 +1,15 @@ from celery import shared_task from django.apps import apps +from django.conf import settings from django.core.exceptions import ObjectDoesNotExist +from django.db import transaction from django.db.transaction import on_commit from grandchallenge.algorithms.models import Algorithm, AlgorithmImage +from grandchallenge.components.tasks import _retry -@shared_task() +@shared_task(**settings.CELERY_TASK_DECORATOR_KWARGS["acks-late-micro-short"]) def create_codebuild_build(*, pk): GitHubWebhookMessage = apps.get_model( # noqa: N806 app_label="github", model_name="GitHubWebhookMessage" @@ -29,50 +32,59 @@ # Repository is not linked to algorithm return - algorithm_image = AlgorithmImage.objects.create( - algorithm=algorithm, - requires_gpu=algorithm.image_requires_gpu, - requires_memory_gb=algorithm.image_requires_memory_gb, - ) - - build = Build.objects.create( - webhook_message=ghwm, algorithm_image=algorithm_image, - ) + with transaction.atomic(): + algorithm_image = AlgorithmImage.objects.create( + algorithm=algorithm, + requires_gpu=algorithm.image_requires_gpu, + requires_memory_gb=algorithm.image_requires_memory_gb, + ) + build = Build.objects.create( + webhook_message=ghwm, algorithm_image=algorithm_image, + ) - on_commit( - lambda: wait_for_build_completion.apply_async( - kwargs={"build_pk": str(build.pk)} + on_commit( + lambda: wait_for_build_completion.apply_async( + kwargs={"build_pk": str(build.pk)} + ) ) - ) -@shared_task(bind=True, max_retries=120) -def wait_for_build_completion(self, *, build_pk): +@shared_task(**settings.CELERY_TASK_DECORATOR_KWARGS["acks-late-micro-short"]) +def wait_for_build_completion(*, build_pk, retries=0): Build = apps.get_model( # noqa: N806 app_label="codebuild", model_name="Build" ) build = Build.objects.get(pk=build_pk) - build.refresh_status() - - if build.status == build.BuildStatusChoices.IN_PROGRESS: - self.retry(countdown=30) - else: - build.refresh_logs() - build.save() - if build.status == build.BuildStatusChoices.SUCCEEDED: - on_commit( - lambda: add_image_to_algorithm.apply_async( - kwargs={"build_pk": str(build_pk)} - ) + + with transaction.atomic(): + build.refresh_status() + + if build.status == build.BuildStatusChoices.IN_PROGRESS: + _retry( + task=wait_for_build_completion, + signature_kwargs={"kwargs": {"build_pk": build_pk}}, + retries=retries, ) + return + else: + build.refresh_logs() + build.save() + if build.status == build.BuildStatusChoices.SUCCEEDED: + on_commit( + lambda: add_image_to_algorithm.apply_async( + kwargs={"build_pk": str(build_pk)} + ) + ) -@shared_task +@shared_task(**settings.CELERY_TASK_DECORATOR_KWARGS["acks-late-2xlarge"]) def add_image_to_algorithm(*, build_pk): Build = apps.get_model( # noqa: N806 app_label="codebuild", model_name="Build" ) build = Build.objects.get(pk=build_pk) - build.add_image_to_algorithm() + + if not build.algorithm_image.image: + build.add_image_to_algorithm()
{"golden_diff": "diff --git a/app/grandchallenge/codebuild/models.py b/app/grandchallenge/codebuild/models.py\n--- a/app/grandchallenge/codebuild/models.py\n+++ b/app/grandchallenge/codebuild/models.py\n@@ -65,6 +65,7 @@\n self.build_log = \"Log file not available.\"\n \n def add_image_to_algorithm(self):\n+ # TODO, this would be much faster using S3 copy, can then run on a smaller queue\n with private_s3_storage.open(\n f\"codebuild/artifacts/{self.build_number}/{self.build_config['projectName']}/container-image.tar.gz\"\n ) as file:\ndiff --git a/app/grandchallenge/codebuild/tasks.py b/app/grandchallenge/codebuild/tasks.py\n--- a/app/grandchallenge/codebuild/tasks.py\n+++ b/app/grandchallenge/codebuild/tasks.py\n@@ -1,12 +1,15 @@\n from celery import shared_task\n from django.apps import apps\n+from django.conf import settings\n from django.core.exceptions import ObjectDoesNotExist\n+from django.db import transaction\n from django.db.transaction import on_commit\n \n from grandchallenge.algorithms.models import Algorithm, AlgorithmImage\n+from grandchallenge.components.tasks import _retry\n \n \n-@shared_task()\n+@shared_task(**settings.CELERY_TASK_DECORATOR_KWARGS[\"acks-late-micro-short\"])\n def create_codebuild_build(*, pk):\n GitHubWebhookMessage = apps.get_model( # noqa: N806\n app_label=\"github\", model_name=\"GitHubWebhookMessage\"\n@@ -29,50 +32,59 @@\n # Repository is not linked to algorithm\n return\n \n- algorithm_image = AlgorithmImage.objects.create(\n- algorithm=algorithm,\n- requires_gpu=algorithm.image_requires_gpu,\n- requires_memory_gb=algorithm.image_requires_memory_gb,\n- )\n-\n- build = Build.objects.create(\n- webhook_message=ghwm, algorithm_image=algorithm_image,\n- )\n+ with transaction.atomic():\n+ algorithm_image = AlgorithmImage.objects.create(\n+ algorithm=algorithm,\n+ requires_gpu=algorithm.image_requires_gpu,\n+ requires_memory_gb=algorithm.image_requires_memory_gb,\n+ )\n+ build = Build.objects.create(\n+ webhook_message=ghwm, algorithm_image=algorithm_image,\n+ )\n \n- on_commit(\n- lambda: wait_for_build_completion.apply_async(\n- kwargs={\"build_pk\": str(build.pk)}\n+ on_commit(\n+ lambda: wait_for_build_completion.apply_async(\n+ kwargs={\"build_pk\": str(build.pk)}\n+ )\n )\n- )\n \n \n-@shared_task(bind=True, max_retries=120)\n-def wait_for_build_completion(self, *, build_pk):\n+@shared_task(**settings.CELERY_TASK_DECORATOR_KWARGS[\"acks-late-micro-short\"])\n+def wait_for_build_completion(*, build_pk, retries=0):\n Build = apps.get_model( # noqa: N806\n app_label=\"codebuild\", model_name=\"Build\"\n )\n \n build = Build.objects.get(pk=build_pk)\n- build.refresh_status()\n-\n- if build.status == build.BuildStatusChoices.IN_PROGRESS:\n- self.retry(countdown=30)\n- else:\n- build.refresh_logs()\n- build.save()\n- if build.status == build.BuildStatusChoices.SUCCEEDED:\n- on_commit(\n- lambda: add_image_to_algorithm.apply_async(\n- kwargs={\"build_pk\": str(build_pk)}\n- )\n+\n+ with transaction.atomic():\n+ build.refresh_status()\n+\n+ if build.status == build.BuildStatusChoices.IN_PROGRESS:\n+ _retry(\n+ task=wait_for_build_completion,\n+ signature_kwargs={\"kwargs\": {\"build_pk\": build_pk}},\n+ retries=retries,\n )\n+ return\n+ else:\n+ build.refresh_logs()\n+ build.save()\n+ if build.status == build.BuildStatusChoices.SUCCEEDED:\n+ on_commit(\n+ lambda: add_image_to_algorithm.apply_async(\n+ kwargs={\"build_pk\": str(build_pk)}\n+ )\n+ )\n \n \n-@shared_task\n+@shared_task(**settings.CELERY_TASK_DECORATOR_KWARGS[\"acks-late-2xlarge\"])\n def add_image_to_algorithm(*, build_pk):\n Build = apps.get_model( # noqa: N806\n app_label=\"codebuild\", model_name=\"Build\"\n )\n \n build = Build.objects.get(pk=build_pk)\n- build.add_image_to_algorithm()\n+\n+ if not build.algorithm_image.image:\n+ build.add_image_to_algorithm()\n", "issue": "Move `wait_for_build_completion` to acks late micro short queue and use delays\n`wait_for_build_completion` uses a countdown that causes the same issues described in https://github.com/comic/grand-challenge.org/blob/ce130d451034377b0f3dfd83590268e48a66d518/app/grandchallenge/components/tasks.py#L255-L271. \r\n\r\nThis task is not resource-intensive so:\r\n- If the task is not idempotent it should be made so\r\n- It should be run on the acks-late-micro-short queue\r\n- Retries should use the same strategy as https://github.com/comic/grand-challenge.org/blob/ce130d451034377b0f3dfd83590268e48a66d518/app/grandchallenge/components/tasks.py#L323-L327\n", "before_files": [{"content": "import gzip\nfrom tempfile import NamedTemporaryFile\n\nimport boto3\nfrom django.conf import settings\nfrom django.core import files\nfrom django.db import models\nfrom django.utils.translation import gettext_lazy as _\n\nfrom grandchallenge.algorithms.models import AlgorithmImage\nfrom grandchallenge.core.models import UUIDModel\nfrom grandchallenge.core.storage import private_s3_storage\nfrom grandchallenge.github.models import GitHubWebhookMessage\n\n\nclass BuildStatusChoices(models.TextChoices):\n \"\"\"From https://docs.aws.amazon.com/codebuild/latest/APIReference/API_Build.html\"\"\"\n\n SUCCEEDED = \"SUCCEEDED\", _(\"Succeeded\")\n FAILED = \"FAILED\", _(\"Failed\")\n FAULT = \"FAULT\", _(\"Fault\")\n TIMED_OUT = \"TIMED_OUT\", _(\"Timed Out\")\n IN_PROGRESS = \"IN_PROGRESS\", _(\"In Progress\")\n STOPPED = \"STOPPED\", _(\"Stopped\")\n\n\nclass Build(UUIDModel):\n webhook_message = models.ForeignKey(\n GitHubWebhookMessage, on_delete=models.SET_NULL, null=True\n )\n algorithm_image = models.OneToOneField(\n AlgorithmImage, on_delete=models.SET_NULL, null=True\n )\n build_config = models.JSONField()\n build_id = models.CharField(max_length=1024)\n status = models.CharField(\n choices=BuildStatusChoices.choices, max_length=11\n )\n build_log = models.TextField(blank=True)\n\n BuildStatusChoices = BuildStatusChoices\n __client = None\n\n @property\n def client(self):\n if self.__client is None:\n self.__client = boto3.client(\"codebuild\")\n return self.__client\n\n @property\n def build_number(self):\n return self.build_id.split(\":\")[-1]\n\n def refresh_status(self):\n build_statuses = self.client.batch_get_builds(ids=[self.build_id])\n self.status = build_statuses[\"builds\"][0][\"buildStatus\"]\n\n def refresh_logs(self):\n try:\n with private_s3_storage.open(\n f\"codebuild/logs/{self.build_number}.gz\"\n ) as file:\n self.build_log = gzip.open(file).read().decode(\"utf-8\")\n except FileNotFoundError:\n self.build_log = \"Log file not available.\"\n\n def add_image_to_algorithm(self):\n with private_s3_storage.open(\n f\"codebuild/artifacts/{self.build_number}/{self.build_config['projectName']}/container-image.tar.gz\"\n ) as file:\n with NamedTemporaryFile(delete=True) as tmp_file:\n with open(tmp_file.name, \"wb\") as fd:\n for chunk in file.chunks():\n fd.write(chunk)\n\n tmp_file.flush()\n temp_file = files.File(\n tmp_file, name=f\"{str(self.pk)}.tar.gz\",\n )\n\n self.algorithm_image.image = temp_file\n self.algorithm_image.save()\n\n def _create_build(self):\n self.build_config = {\n \"projectName\": settings.CODEBUILD_PROJECT_NAME,\n \"sourceLocationOverride\": f\"{settings.PRIVATE_S3_STORAGE_KWARGS['bucket_name']}/{self.webhook_message.zipfile.name}\",\n \"sourceTypeOverride\": \"S3\",\n \"environmentVariablesOverride\": [\n {\n \"name\": \"IMAGE_REPO_NAME\",\n \"value\": f\"{AlgorithmImage._meta.app_label}/{AlgorithmImage._meta.model_name}\",\n },\n {\"name\": \"IMAGE_TAG\", \"value\": str(self.algorithm_image.pk)},\n ],\n }\n\n build_data = self.client.start_build(**self.build_config)\n\n self.build_id = build_data[\"build\"][\"id\"]\n self.status = build_data[\"build\"][\"buildStatus\"]\n\n def save(self, *args, **kwargs):\n if self._state.adding:\n self._create_build()\n\n super().save(*args, **kwargs)\n\n @property\n def animate(self):\n return self.status == BuildStatusChoices.IN_PROGRESS\n\n @property\n def status_context(self):\n if self.status == BuildStatusChoices.SUCCEEDED:\n return \"success\"\n elif self.status in {BuildStatusChoices.STOPPED}:\n return \"warning\"\n elif self.status in {\n BuildStatusChoices.FAILED,\n BuildStatusChoices.FAULT,\n BuildStatusChoices.TIMED_OUT,\n }:\n return \"danger\"\n elif self.status in {BuildStatusChoices.IN_PROGRESS}:\n return \"info\"\n else:\n return \"secondary\"\n\n class Meta:\n indexes = [\n models.Index(fields=[\"build_id\"]),\n ]\n", "path": "app/grandchallenge/codebuild/models.py"}, {"content": "from celery import shared_task\nfrom django.apps import apps\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db.transaction import on_commit\n\nfrom grandchallenge.algorithms.models import Algorithm, AlgorithmImage\n\n\n@shared_task()\ndef create_codebuild_build(*, pk):\n GitHubWebhookMessage = apps.get_model( # noqa: N806\n app_label=\"github\", model_name=\"GitHubWebhookMessage\"\n )\n ghwm = GitHubWebhookMessage.objects.get(pk=pk)\n\n Build = apps.get_model( # noqa: N806\n app_label=\"codebuild\", model_name=\"Build\"\n )\n\n if Build.objects.filter(webhook_message=ghwm).exists():\n # Build already exists\n return\n\n try:\n algorithm = Algorithm.objects.get(\n repo_name=ghwm.payload[\"repository\"][\"full_name\"]\n )\n except ObjectDoesNotExist:\n # Repository is not linked to algorithm\n return\n\n algorithm_image = AlgorithmImage.objects.create(\n algorithm=algorithm,\n requires_gpu=algorithm.image_requires_gpu,\n requires_memory_gb=algorithm.image_requires_memory_gb,\n )\n\n build = Build.objects.create(\n webhook_message=ghwm, algorithm_image=algorithm_image,\n )\n\n on_commit(\n lambda: wait_for_build_completion.apply_async(\n kwargs={\"build_pk\": str(build.pk)}\n )\n )\n\n\n@shared_task(bind=True, max_retries=120)\ndef wait_for_build_completion(self, *, build_pk):\n Build = apps.get_model( # noqa: N806\n app_label=\"codebuild\", model_name=\"Build\"\n )\n\n build = Build.objects.get(pk=build_pk)\n build.refresh_status()\n\n if build.status == build.BuildStatusChoices.IN_PROGRESS:\n self.retry(countdown=30)\n else:\n build.refresh_logs()\n build.save()\n if build.status == build.BuildStatusChoices.SUCCEEDED:\n on_commit(\n lambda: add_image_to_algorithm.apply_async(\n kwargs={\"build_pk\": str(build_pk)}\n )\n )\n\n\n@shared_task\ndef add_image_to_algorithm(*, build_pk):\n Build = apps.get_model( # noqa: N806\n app_label=\"codebuild\", model_name=\"Build\"\n )\n\n build = Build.objects.get(pk=build_pk)\n build.add_image_to_algorithm()\n", "path": "app/grandchallenge/codebuild/tasks.py"}]}
2,681
991
gh_patches_debug_30096
rasdani/github-patches
git_diff
SeldonIO__MLServer-464
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Install requirements system-wide when building custom images Currently, custom dependencies coming from a `requirements.txt` get installed on the base environment for user `1000` (i.e. the default non-`root` user in the image). Therefore, if the image is loaded with a different user ID, these custom dependencies are not available. To work around this, we should install these custom deps under `root`, so that they are installed system-wide and accessible by any user. </issue> <code> [start of runtimes/sklearn/mlserver_sklearn/encoding.py] 1 from dataclasses import dataclass 2 from typing import List, Union 3 4 import numpy as np 5 import pandas as pd 6 7 from mlserver.codecs import NumpyCodec, PandasCodec 8 from mlserver.errors import InferenceError 9 from mlserver.types import ResponseOutput 10 11 # TODO: more support! 12 SKLearnResponse = Union[np.ndarray, pd.DataFrame] 13 14 15 @dataclass 16 class SKLearnPayload: 17 """Class for keeping track of requested outputs 18 and corresponding model responses.""" 19 20 requested_output: ResponseOutput 21 model_output: SKLearnResponse 22 23 24 def to_outputs(sklearn_payloads: List[SKLearnPayload]) -> List[ResponseOutput]: 25 """ 26 Encodes a list of SKLearn payloads into a list of proto-able ResponseOutputs. 27 28 :param sklearn_payloads: List of requested outputs + the responses from the 29 SKLearn model 30 :return: response_outputs: List of encoded response outputs 31 :raises: InferenceError if multiple columnar responses were returned by the model 32 """ 33 response_outputs = [] 34 35 all_output_names = [p.requested_output.name for p in sklearn_payloads] 36 37 for payload in sklearn_payloads: 38 if _is_columnar_data(payload) and len(sklearn_payloads) > 1: 39 raise InferenceError( 40 f"{payload.requested_output.name} returned columnar data of type" 41 f" {type(payload.model_output)} and {all_output_names} were" 42 f" requested. Cannot encode multiple columnar data responses" 43 f" one response." 44 ) 45 46 if isinstance(payload.model_output, pd.DataFrame): 47 # Immediately return the outputs of columnar data encoding, 48 # don't try to jam more outputs together in one response. 49 return PandasCodec.encode("some-model", payload.model_output).outputs 50 51 response_output = NumpyCodec.encode( 52 name=payload.requested_output.name, payload=payload.model_output 53 ) 54 response_outputs.append(response_output) 55 56 return response_outputs 57 58 59 def _is_columnar_data(payload: SKLearnPayload) -> bool: 60 return isinstance(payload.model_output, pd.DataFrame) 61 [end of runtimes/sklearn/mlserver_sklearn/encoding.py] [start of runtimes/sklearn/mlserver_sklearn/sklearn.py] 1 from typing import List 2 3 import joblib 4 from mlserver_sklearn import encoding 5 from mlserver_sklearn.encoding import SKLearnPayload 6 from sklearn.pipeline import Pipeline 7 8 from mlserver import types 9 from mlserver.codecs import NumpyRequestCodec 10 from mlserver.errors import InferenceError 11 from mlserver.model import MLModel 12 from mlserver.types import InferenceResponse 13 from mlserver.utils import get_model_uri 14 15 PREDICT_OUTPUT = "predict" 16 PREDICT_PROBA_OUTPUT = "predict_proba" 17 VALID_OUTPUTS = [PREDICT_OUTPUT, PREDICT_PROBA_OUTPUT] 18 19 WELLKNOWN_MODEL_FILENAMES = ["model.joblib", "model.pickle", "model.pkl"] 20 21 22 class SKLearnModel(MLModel): 23 """ 24 Implementation of the MLModel interface to load and serve `scikit-learn` 25 models persisted with `joblib`. 26 """ 27 28 async def load(self) -> bool: 29 # TODO: Log info message 30 model_uri = await get_model_uri( 31 self._settings, wellknown_filenames=WELLKNOWN_MODEL_FILENAMES 32 ) 33 self._model = joblib.load(model_uri) 34 35 self.ready = True 36 return self.ready 37 38 async def predict(self, payload: types.InferenceRequest) -> types.InferenceResponse: 39 payload = self._check_request(payload) 40 41 model_responses = self._get_model_outputs(payload) 42 43 return InferenceResponse( 44 model_name=self.name, 45 model_version=self.version, 46 outputs=encoding.to_outputs(sklearn_payloads=model_responses), 47 ) 48 49 def _check_request(self, payload: types.InferenceRequest) -> types.InferenceRequest: 50 if not payload.outputs: 51 # By default, only return the result of `predict()` 52 payload.outputs = [types.RequestOutput(name=PREDICT_OUTPUT)] 53 else: 54 for request_output in payload.outputs: 55 if request_output.name not in VALID_OUTPUTS: 56 raise InferenceError( 57 f"SKLearnModel only supports '{PREDICT_OUTPUT}' and " 58 f"'{PREDICT_PROBA_OUTPUT}' as outputs " 59 f"({request_output.name} was received)" 60 ) 61 62 # Regression models do not support `predict_proba` 63 if PREDICT_PROBA_OUTPUT in [o.name for o in payload.outputs]: 64 # Ensure model supports it 65 maybe_regressor = self._model 66 if isinstance(self._model, Pipeline): 67 maybe_regressor = maybe_regressor.steps[-1][-1] 68 69 if not hasattr(maybe_regressor, PREDICT_PROBA_OUTPUT): 70 raise InferenceError( 71 f"{type(maybe_regressor)} models do not support " 72 f"'{PREDICT_PROBA_OUTPUT}" 73 ) 74 75 return payload 76 77 def _get_model_outputs( 78 self, payload: types.InferenceRequest 79 ) -> List[SKLearnPayload]: 80 decoded_request = self.decode_request(payload, default_codec=NumpyRequestCodec) 81 82 outputs = [] 83 for request_output in payload.outputs: # type: ignore 84 predict_fn = getattr(self._model, request_output.name) 85 y = predict_fn(decoded_request) 86 payload = SKLearnPayload(requested_output=request_output, model_output=y) 87 outputs.append(payload) 88 89 return outputs 90 [end of runtimes/sklearn/mlserver_sklearn/sklearn.py] [start of mlserver/cli/constants.py] 1 DockerfileName = "Dockerfile" 2 DockerfileTemplate = """ 3 FROM continuumio/miniconda3:4.10.3 AS env-builder 4 SHELL ["/bin/bash", "-c"] 5 6 ARG MLSERVER_ENV_NAME="mlserver-custom-env" \\ 7 MLSERVER_ENV_TARBALL="./envs/base.tar.gz" 8 9 RUN conda config --add channels conda-forge && \\ 10 conda install conda-pack 11 12 # The `[]` character range will ensure that Docker doesn't complain if the 13 # files don't exist: 14 # https://stackoverflow.com/a/65138098/5015573 15 COPY \\ 16 ./environment.ym[l] \\ 17 ./environment.yam[l] \\ 18 ./conda.ym[l] \\ 19 ./conda.yam[l] \\ 20 . 21 RUN mkdir $(dirname $MLSERVER_ENV_TARBALL); \\ 22 for envFile in environment.yml environment.yaml conda.yml conda.yaml; do \\ 23 if [[ -f $envFile ]]; then \\ 24 conda env create \ 25 --name $MLSERVER_ENV_NAME \\ 26 --file $envFile; \\ 27 conda-pack \ 28 -n $MLSERVER_ENV_NAME \\ 29 -o $MLSERVER_ENV_TARBALL; \\ 30 fi \\ 31 done; \\ 32 chmod -R 776 $(dirname $MLSERVER_ENV_TARBALL) 33 34 FROM seldonio/mlserver:{version}-slim 35 SHELL ["/bin/bash", "-c"] 36 37 # Copy all potential sources for custom environments 38 COPY \\ 39 --chown=1000 \\ 40 --from=env-builder \\ 41 /envs/base.tar.g[z] \\ 42 ./envs/base.tar.gz 43 COPY \\ 44 ./settings.jso[n] \\ 45 ./model-settings.jso[n] \\ 46 ./requirements.tx[t] \\ 47 . 48 49 RUN ./hack/build-env.sh . ./envs/base 50 51 # Copy everything else 52 COPY . . 53 54 # Override MLServer's own `CMD` to activate the embedded environment 55 # (optionally activating the hot-loaded one as well). 56 CMD source ./hack/activate-env.sh ./envs/base.tar.gz ./envs/base && \\ 57 mlserver start $MLSERVER_MODELS_DIR 58 """ 59 60 DockerignoreName = ".dockerignore" 61 Dockerignore = """ 62 # Binaries for programs and plugins 63 *.exe 64 *.exe~ 65 *.dll 66 *.so 67 *.dylib 68 *.pyc 69 *.pyo 70 *.pyd 71 bin 72 73 # Mac file system 74 **/.DS_Store 75 76 # Python dev 77 __pycache__ 78 .Python 79 env 80 pip-log.txt 81 pip-delete-this-directory.txt 82 .mypy_cache 83 eggs/ 84 .eggs/ 85 *.egg-info/ 86 ./pytest_cache 87 .tox 88 build/ 89 dist/ 90 91 # Notebook Checkpoints 92 .ipynb_checkpoints 93 94 .coverage 95 .coverage.* 96 .cache 97 nosetests.xml 98 coverage.xml 99 *,cover 100 *.log 101 .git 102 """ 103 [end of mlserver/cli/constants.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mlserver/cli/constants.py b/mlserver/cli/constants.py --- a/mlserver/cli/constants.py +++ b/mlserver/cli/constants.py @@ -46,7 +46,13 @@ ./requirements.tx[t] \\ . -RUN ./hack/build-env.sh . ./envs/base +USER root +# Install dependencies system-wide, to ensure that they are available for every +# user +RUN ./hack/build-env.sh . ./envs/base && \ + chown -R 1000:0 ./envs/base && \\ + chmod -R 776 ./envs/base +USER 1000 # Copy everything else COPY . . diff --git a/runtimes/sklearn/mlserver_sklearn/encoding.py b/runtimes/sklearn/mlserver_sklearn/encoding.py --- a/runtimes/sklearn/mlserver_sklearn/encoding.py +++ b/runtimes/sklearn/mlserver_sklearn/encoding.py @@ -6,7 +6,7 @@ from mlserver.codecs import NumpyCodec, PandasCodec from mlserver.errors import InferenceError -from mlserver.types import ResponseOutput +from mlserver.types import RequestOutput, ResponseOutput # TODO: more support! SKLearnResponse = Union[np.ndarray, pd.DataFrame] @@ -17,7 +17,7 @@ """Class for keeping track of requested outputs and corresponding model responses.""" - requested_output: ResponseOutput + requested_output: RequestOutput model_output: SKLearnResponse diff --git a/runtimes/sklearn/mlserver_sklearn/sklearn.py b/runtimes/sklearn/mlserver_sklearn/sklearn.py --- a/runtimes/sklearn/mlserver_sklearn/sklearn.py +++ b/runtimes/sklearn/mlserver_sklearn/sklearn.py @@ -83,7 +83,7 @@ for request_output in payload.outputs: # type: ignore predict_fn = getattr(self._model, request_output.name) y = predict_fn(decoded_request) - payload = SKLearnPayload(requested_output=request_output, model_output=y) - outputs.append(payload) + output = SKLearnPayload(requested_output=request_output, model_output=y) + outputs.append(output) return outputs
{"golden_diff": "diff --git a/mlserver/cli/constants.py b/mlserver/cli/constants.py\n--- a/mlserver/cli/constants.py\n+++ b/mlserver/cli/constants.py\n@@ -46,7 +46,13 @@\n ./requirements.tx[t] \\\\\n .\n \n-RUN ./hack/build-env.sh . ./envs/base\n+USER root\n+# Install dependencies system-wide, to ensure that they are available for every\n+# user\n+RUN ./hack/build-env.sh . ./envs/base && \\\n+ chown -R 1000:0 ./envs/base && \\\\\n+ chmod -R 776 ./envs/base\n+USER 1000\n \n # Copy everything else\n COPY . .\ndiff --git a/runtimes/sklearn/mlserver_sklearn/encoding.py b/runtimes/sklearn/mlserver_sklearn/encoding.py\n--- a/runtimes/sklearn/mlserver_sklearn/encoding.py\n+++ b/runtimes/sklearn/mlserver_sklearn/encoding.py\n@@ -6,7 +6,7 @@\n \n from mlserver.codecs import NumpyCodec, PandasCodec\n from mlserver.errors import InferenceError\n-from mlserver.types import ResponseOutput\n+from mlserver.types import RequestOutput, ResponseOutput\n \n # TODO: more support!\n SKLearnResponse = Union[np.ndarray, pd.DataFrame]\n@@ -17,7 +17,7 @@\n \"\"\"Class for keeping track of requested outputs\n and corresponding model responses.\"\"\"\n \n- requested_output: ResponseOutput\n+ requested_output: RequestOutput\n model_output: SKLearnResponse\n \n \ndiff --git a/runtimes/sklearn/mlserver_sklearn/sklearn.py b/runtimes/sklearn/mlserver_sklearn/sklearn.py\n--- a/runtimes/sklearn/mlserver_sklearn/sklearn.py\n+++ b/runtimes/sklearn/mlserver_sklearn/sklearn.py\n@@ -83,7 +83,7 @@\n for request_output in payload.outputs: # type: ignore\n predict_fn = getattr(self._model, request_output.name)\n y = predict_fn(decoded_request)\n- payload = SKLearnPayload(requested_output=request_output, model_output=y)\n- outputs.append(payload)\n+ output = SKLearnPayload(requested_output=request_output, model_output=y)\n+ outputs.append(output)\n \n return outputs\n", "issue": "Install requirements system-wide when building custom images\nCurrently, custom dependencies coming from a `requirements.txt` get installed on the base environment for user `1000` (i.e. the default non-`root` user in the image). Therefore, if the image is loaded with a different user ID, these custom dependencies are not available.\r\n\r\nTo work around this, we should install these custom deps under `root`, so that they are installed system-wide and accessible by any user.\n", "before_files": [{"content": "from dataclasses import dataclass\nfrom typing import List, Union\n\nimport numpy as np\nimport pandas as pd\n\nfrom mlserver.codecs import NumpyCodec, PandasCodec\nfrom mlserver.errors import InferenceError\nfrom mlserver.types import ResponseOutput\n\n# TODO: more support!\nSKLearnResponse = Union[np.ndarray, pd.DataFrame]\n\n\n@dataclass\nclass SKLearnPayload:\n \"\"\"Class for keeping track of requested outputs\n and corresponding model responses.\"\"\"\n\n requested_output: ResponseOutput\n model_output: SKLearnResponse\n\n\ndef to_outputs(sklearn_payloads: List[SKLearnPayload]) -> List[ResponseOutput]:\n \"\"\"\n Encodes a list of SKLearn payloads into a list of proto-able ResponseOutputs.\n\n :param sklearn_payloads: List of requested outputs + the responses from the\n SKLearn model\n :return: response_outputs: List of encoded response outputs\n :raises: InferenceError if multiple columnar responses were returned by the model\n \"\"\"\n response_outputs = []\n\n all_output_names = [p.requested_output.name for p in sklearn_payloads]\n\n for payload in sklearn_payloads:\n if _is_columnar_data(payload) and len(sklearn_payloads) > 1:\n raise InferenceError(\n f\"{payload.requested_output.name} returned columnar data of type\"\n f\" {type(payload.model_output)} and {all_output_names} were\"\n f\" requested. Cannot encode multiple columnar data responses\"\n f\" one response.\"\n )\n\n if isinstance(payload.model_output, pd.DataFrame):\n # Immediately return the outputs of columnar data encoding,\n # don't try to jam more outputs together in one response.\n return PandasCodec.encode(\"some-model\", payload.model_output).outputs\n\n response_output = NumpyCodec.encode(\n name=payload.requested_output.name, payload=payload.model_output\n )\n response_outputs.append(response_output)\n\n return response_outputs\n\n\ndef _is_columnar_data(payload: SKLearnPayload) -> bool:\n return isinstance(payload.model_output, pd.DataFrame)\n", "path": "runtimes/sklearn/mlserver_sklearn/encoding.py"}, {"content": "from typing import List\n\nimport joblib\nfrom mlserver_sklearn import encoding\nfrom mlserver_sklearn.encoding import SKLearnPayload\nfrom sklearn.pipeline import Pipeline\n\nfrom mlserver import types\nfrom mlserver.codecs import NumpyRequestCodec\nfrom mlserver.errors import InferenceError\nfrom mlserver.model import MLModel\nfrom mlserver.types import InferenceResponse\nfrom mlserver.utils import get_model_uri\n\nPREDICT_OUTPUT = \"predict\"\nPREDICT_PROBA_OUTPUT = \"predict_proba\"\nVALID_OUTPUTS = [PREDICT_OUTPUT, PREDICT_PROBA_OUTPUT]\n\nWELLKNOWN_MODEL_FILENAMES = [\"model.joblib\", \"model.pickle\", \"model.pkl\"]\n\n\nclass SKLearnModel(MLModel):\n \"\"\"\n Implementation of the MLModel interface to load and serve `scikit-learn`\n models persisted with `joblib`.\n \"\"\"\n\n async def load(self) -> bool:\n # TODO: Log info message\n model_uri = await get_model_uri(\n self._settings, wellknown_filenames=WELLKNOWN_MODEL_FILENAMES\n )\n self._model = joblib.load(model_uri)\n\n self.ready = True\n return self.ready\n\n async def predict(self, payload: types.InferenceRequest) -> types.InferenceResponse:\n payload = self._check_request(payload)\n\n model_responses = self._get_model_outputs(payload)\n\n return InferenceResponse(\n model_name=self.name,\n model_version=self.version,\n outputs=encoding.to_outputs(sklearn_payloads=model_responses),\n )\n\n def _check_request(self, payload: types.InferenceRequest) -> types.InferenceRequest:\n if not payload.outputs:\n # By default, only return the result of `predict()`\n payload.outputs = [types.RequestOutput(name=PREDICT_OUTPUT)]\n else:\n for request_output in payload.outputs:\n if request_output.name not in VALID_OUTPUTS:\n raise InferenceError(\n f\"SKLearnModel only supports '{PREDICT_OUTPUT}' and \"\n f\"'{PREDICT_PROBA_OUTPUT}' as outputs \"\n f\"({request_output.name} was received)\"\n )\n\n # Regression models do not support `predict_proba`\n if PREDICT_PROBA_OUTPUT in [o.name for o in payload.outputs]:\n # Ensure model supports it\n maybe_regressor = self._model\n if isinstance(self._model, Pipeline):\n maybe_regressor = maybe_regressor.steps[-1][-1]\n\n if not hasattr(maybe_regressor, PREDICT_PROBA_OUTPUT):\n raise InferenceError(\n f\"{type(maybe_regressor)} models do not support \"\n f\"'{PREDICT_PROBA_OUTPUT}\"\n )\n\n return payload\n\n def _get_model_outputs(\n self, payload: types.InferenceRequest\n ) -> List[SKLearnPayload]:\n decoded_request = self.decode_request(payload, default_codec=NumpyRequestCodec)\n\n outputs = []\n for request_output in payload.outputs: # type: ignore\n predict_fn = getattr(self._model, request_output.name)\n y = predict_fn(decoded_request)\n payload = SKLearnPayload(requested_output=request_output, model_output=y)\n outputs.append(payload)\n\n return outputs\n", "path": "runtimes/sklearn/mlserver_sklearn/sklearn.py"}, {"content": "DockerfileName = \"Dockerfile\"\nDockerfileTemplate = \"\"\"\nFROM continuumio/miniconda3:4.10.3 AS env-builder\nSHELL [\"/bin/bash\", \"-c\"]\n\nARG MLSERVER_ENV_NAME=\"mlserver-custom-env\" \\\\\n MLSERVER_ENV_TARBALL=\"./envs/base.tar.gz\"\n\nRUN conda config --add channels conda-forge && \\\\\n conda install conda-pack\n\n# The `[]` character range will ensure that Docker doesn't complain if the\n# files don't exist:\n# https://stackoverflow.com/a/65138098/5015573\nCOPY \\\\\n ./environment.ym[l] \\\\\n ./environment.yam[l] \\\\\n ./conda.ym[l] \\\\\n ./conda.yam[l] \\\\\n .\nRUN mkdir $(dirname $MLSERVER_ENV_TARBALL); \\\\\n for envFile in environment.yml environment.yaml conda.yml conda.yaml; do \\\\\n if [[ -f $envFile ]]; then \\\\\n conda env create \\\n --name $MLSERVER_ENV_NAME \\\\\n --file $envFile; \\\\\n conda-pack \\\n -n $MLSERVER_ENV_NAME \\\\\n -o $MLSERVER_ENV_TARBALL; \\\\\n fi \\\\\n done; \\\\\n chmod -R 776 $(dirname $MLSERVER_ENV_TARBALL)\n\nFROM seldonio/mlserver:{version}-slim\nSHELL [\"/bin/bash\", \"-c\"]\n\n# Copy all potential sources for custom environments\nCOPY \\\\\n --chown=1000 \\\\\n --from=env-builder \\\\\n /envs/base.tar.g[z] \\\\\n ./envs/base.tar.gz\nCOPY \\\\\n ./settings.jso[n] \\\\\n ./model-settings.jso[n] \\\\\n ./requirements.tx[t] \\\\\n .\n\nRUN ./hack/build-env.sh . ./envs/base\n\n# Copy everything else\nCOPY . .\n\n# Override MLServer's own `CMD` to activate the embedded environment\n# (optionally activating the hot-loaded one as well).\nCMD source ./hack/activate-env.sh ./envs/base.tar.gz ./envs/base && \\\\\n mlserver start $MLSERVER_MODELS_DIR\n\"\"\"\n\nDockerignoreName = \".dockerignore\"\nDockerignore = \"\"\"\n# Binaries for programs and plugins\n*.exe\n*.exe~\n*.dll\n*.so\n*.dylib\n*.pyc\n*.pyo\n*.pyd\nbin\n\n# Mac file system\n**/.DS_Store\n\n# Python dev\n__pycache__\n.Python\nenv\npip-log.txt\npip-delete-this-directory.txt\n.mypy_cache\neggs/\n.eggs/\n*.egg-info/\n./pytest_cache\n.tox\nbuild/\ndist/\n\n# Notebook Checkpoints\n.ipynb_checkpoints\n\n.coverage\n.coverage.*\n.cache\nnosetests.xml\ncoverage.xml\n*,cover\n*.log\n.git\n\"\"\"\n", "path": "mlserver/cli/constants.py"}]}
2,943
508
gh_patches_debug_11273
rasdani/github-patches
git_diff
pre-commit__pre-commit-387
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> do not recommend `pre-commit autoupdate` on failure of `pre-commit autoupdate` It would be preferable to recommend something that has chance of fixing the problem instruction to reproduce in #385 </issue> <code> [start of pre_commit/repository.py] 1 from __future__ import unicode_literals 2 3 import io 4 import json 5 import logging 6 import os 7 import shutil 8 from collections import defaultdict 9 10 import pkg_resources 11 from cached_property import cached_property 12 13 from pre_commit import five 14 from pre_commit import git 15 from pre_commit.clientlib.validate_config import is_local_hooks 16 from pre_commit.clientlib.validate_manifest import MANIFEST_JSON_SCHEMA 17 from pre_commit.jsonschema_extensions import apply_defaults 18 from pre_commit.languages.all import languages 19 from pre_commit.languages.helpers import environment_dir 20 from pre_commit.manifest import Manifest 21 from pre_commit.prefixed_command_runner import PrefixedCommandRunner 22 23 24 logger = logging.getLogger('pre_commit') 25 26 _pre_commit_version = pkg_resources.parse_version( 27 pkg_resources.get_distribution('pre-commit').version 28 ) 29 30 # Bump when installation changes in a backwards / forwards incompatible way 31 INSTALLED_STATE_VERSION = '1' 32 33 34 class Repository(object): 35 def __init__(self, repo_config, repo_path_getter): 36 self.repo_config = repo_config 37 self.repo_path_getter = repo_path_getter 38 self.__installed = False 39 40 @classmethod 41 def create(cls, config, store): 42 if is_local_hooks(config): 43 return LocalRepository(config) 44 else: 45 repo_path_getter = store.get_repo_path_getter( 46 config['repo'], config['sha'] 47 ) 48 return cls(config, repo_path_getter) 49 50 @cached_property 51 def repo_url(self): 52 return self.repo_config['repo'] 53 54 @cached_property 55 def sha(self): 56 return self.repo_config['sha'] 57 58 @cached_property 59 def languages(self): 60 return set( 61 (hook['language'], hook['language_version']) 62 for _, hook in self.hooks 63 ) 64 65 @cached_property 66 def additional_dependencies(self): 67 dep_dict = defaultdict(lambda: defaultdict(set)) 68 for _, hook in self.hooks: 69 dep_dict[hook['language']][hook['language_version']].update( 70 hook.get('additional_dependencies', []), 71 ) 72 return dep_dict 73 74 @cached_property 75 def hooks(self): 76 for hook in self.repo_config['hooks']: 77 if hook['id'] not in self.manifest.hooks: 78 logger.error( 79 '`{0}` is not present in repository {1}. ' 80 'Typo? Perhaps it is introduced in a newer version? ' 81 'Often `pre-commit autoupdate` fixes this.'.format( 82 hook['id'], self.repo_config['repo'], 83 ) 84 ) 85 exit(1) 86 hook_version = pkg_resources.parse_version( 87 self.manifest.hooks[hook['id']]['minimum_pre_commit_version'], 88 ) 89 if hook_version > _pre_commit_version: 90 logger.error( 91 'The hook `{0}` requires pre-commit version {1} but ' 92 'version {2} is installed. ' 93 'Perhaps run `pip install --upgrade pre-commit`.'.format( 94 hook['id'], hook_version, _pre_commit_version, 95 ) 96 ) 97 exit(1) 98 return tuple( 99 (hook['id'], dict(self.manifest.hooks[hook['id']], **hook)) 100 for hook in self.repo_config['hooks'] 101 ) 102 103 @cached_property 104 def manifest(self): 105 return Manifest(self.repo_path_getter) 106 107 @cached_property 108 def cmd_runner(self): 109 return PrefixedCommandRunner(self.repo_path_getter.repo_path) 110 111 def require_installed(self): 112 if self.__installed: 113 return 114 115 self.install() 116 self.__installed = True 117 118 def install(self): 119 """Install the hook repository.""" 120 def state(language_name, language_version): 121 return { 122 'additional_dependencies': sorted( 123 self.additional_dependencies[ 124 language_name 125 ][language_version], 126 ) 127 } 128 129 def state_filename(venv, suffix=''): 130 return self.cmd_runner.path( 131 venv, '.install_state_v' + INSTALLED_STATE_VERSION + suffix, 132 ) 133 134 def read_state(venv): 135 if not os.path.exists(state_filename(venv)): 136 return None 137 else: 138 return json.loads(io.open(state_filename(venv)).read()) 139 140 def write_state(venv, language_name, language_version): 141 with io.open( 142 state_filename(venv, suffix='staging'), 'w', 143 ) as state_file: 144 state_file.write(five.to_text(json.dumps( 145 state(language_name, language_version), 146 ))) 147 # Move the file into place atomically to indicate we've installed 148 os.rename( 149 state_filename(venv, suffix='staging'), 150 state_filename(venv), 151 ) 152 153 def language_is_installed(language_name, language_version): 154 language = languages[language_name] 155 venv = environment_dir(language.ENVIRONMENT_DIR, language_version) 156 return ( 157 venv is None or 158 read_state(venv) == state(language_name, language_version) 159 ) 160 161 if not all( 162 language_is_installed(language_name, language_version) 163 for language_name, language_version in self.languages 164 ): 165 logger.info( 166 'Installing environment for {0}.'.format(self.repo_url) 167 ) 168 logger.info('Once installed this environment will be reused.') 169 logger.info('This may take a few minutes...') 170 171 for language_name, language_version in self.languages: 172 if language_is_installed(language_name, language_version): 173 continue 174 175 language = languages[language_name] 176 venv = environment_dir(language.ENVIRONMENT_DIR, language_version) 177 178 # There's potentially incomplete cleanup from previous runs 179 # Clean it up! 180 if self.cmd_runner.exists(venv): 181 shutil.rmtree(self.cmd_runner.path(venv)) 182 183 language.install_environment( 184 self.cmd_runner, language_version, 185 self.additional_dependencies[language_name][language_version], 186 ) 187 # Write our state to indicate we're installed 188 write_state(venv, language_name, language_version) 189 190 def run_hook(self, hook, file_args): 191 """Run a hook. 192 193 Args: 194 hook - Hook dictionary 195 file_args - List of files to run 196 """ 197 self.require_installed() 198 return languages[hook['language']].run_hook( 199 self.cmd_runner, hook, file_args, 200 ) 201 202 203 class LocalRepository(Repository): 204 def __init__(self, repo_config): 205 super(LocalRepository, self).__init__(repo_config, None) 206 207 @cached_property 208 def hooks(self): 209 return tuple( 210 (hook['id'], apply_defaults(hook, MANIFEST_JSON_SCHEMA['items'])) 211 for hook in self.repo_config['hooks'] 212 ) 213 214 @cached_property 215 def cmd_runner(self): 216 return PrefixedCommandRunner(git.get_root()) 217 218 @cached_property 219 def sha(self): 220 raise NotImplementedError 221 222 @cached_property 223 def manifest(self): 224 raise NotImplementedError 225 [end of pre_commit/repository.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pre_commit/repository.py b/pre_commit/repository.py --- a/pre_commit/repository.py +++ b/pre_commit/repository.py @@ -78,7 +78,9 @@ logger.error( '`{0}` is not present in repository {1}. ' 'Typo? Perhaps it is introduced in a newer version? ' - 'Often `pre-commit autoupdate` fixes this.'.format( + 'Often you can fix this by removing the hook, running ' + '`pre-commit autoupdate`, ' + 'and then adding the hook.'.format( hook['id'], self.repo_config['repo'], ) )
{"golden_diff": "diff --git a/pre_commit/repository.py b/pre_commit/repository.py\n--- a/pre_commit/repository.py\n+++ b/pre_commit/repository.py\n@@ -78,7 +78,9 @@\n logger.error(\n '`{0}` is not present in repository {1}. '\n 'Typo? Perhaps it is introduced in a newer version? '\n- 'Often `pre-commit autoupdate` fixes this.'.format(\n+ 'Often you can fix this by removing the hook, running '\n+ '`pre-commit autoupdate`, '\n+ 'and then adding the hook.'.format(\n hook['id'], self.repo_config['repo'],\n )\n )\n", "issue": "do not recommend `pre-commit autoupdate` on failure of `pre-commit autoupdate` \nIt would be preferable to recommend something that has chance of fixing the problem\n\ninstruction to reproduce in #385\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport io\nimport json\nimport logging\nimport os\nimport shutil\nfrom collections import defaultdict\n\nimport pkg_resources\nfrom cached_property import cached_property\n\nfrom pre_commit import five\nfrom pre_commit import git\nfrom pre_commit.clientlib.validate_config import is_local_hooks\nfrom pre_commit.clientlib.validate_manifest import MANIFEST_JSON_SCHEMA\nfrom pre_commit.jsonschema_extensions import apply_defaults\nfrom pre_commit.languages.all import languages\nfrom pre_commit.languages.helpers import environment_dir\nfrom pre_commit.manifest import Manifest\nfrom pre_commit.prefixed_command_runner import PrefixedCommandRunner\n\n\nlogger = logging.getLogger('pre_commit')\n\n_pre_commit_version = pkg_resources.parse_version(\n pkg_resources.get_distribution('pre-commit').version\n)\n\n# Bump when installation changes in a backwards / forwards incompatible way\nINSTALLED_STATE_VERSION = '1'\n\n\nclass Repository(object):\n def __init__(self, repo_config, repo_path_getter):\n self.repo_config = repo_config\n self.repo_path_getter = repo_path_getter\n self.__installed = False\n\n @classmethod\n def create(cls, config, store):\n if is_local_hooks(config):\n return LocalRepository(config)\n else:\n repo_path_getter = store.get_repo_path_getter(\n config['repo'], config['sha']\n )\n return cls(config, repo_path_getter)\n\n @cached_property\n def repo_url(self):\n return self.repo_config['repo']\n\n @cached_property\n def sha(self):\n return self.repo_config['sha']\n\n @cached_property\n def languages(self):\n return set(\n (hook['language'], hook['language_version'])\n for _, hook in self.hooks\n )\n\n @cached_property\n def additional_dependencies(self):\n dep_dict = defaultdict(lambda: defaultdict(set))\n for _, hook in self.hooks:\n dep_dict[hook['language']][hook['language_version']].update(\n hook.get('additional_dependencies', []),\n )\n return dep_dict\n\n @cached_property\n def hooks(self):\n for hook in self.repo_config['hooks']:\n if hook['id'] not in self.manifest.hooks:\n logger.error(\n '`{0}` is not present in repository {1}. '\n 'Typo? Perhaps it is introduced in a newer version? '\n 'Often `pre-commit autoupdate` fixes this.'.format(\n hook['id'], self.repo_config['repo'],\n )\n )\n exit(1)\n hook_version = pkg_resources.parse_version(\n self.manifest.hooks[hook['id']]['minimum_pre_commit_version'],\n )\n if hook_version > _pre_commit_version:\n logger.error(\n 'The hook `{0}` requires pre-commit version {1} but '\n 'version {2} is installed. '\n 'Perhaps run `pip install --upgrade pre-commit`.'.format(\n hook['id'], hook_version, _pre_commit_version,\n )\n )\n exit(1)\n return tuple(\n (hook['id'], dict(self.manifest.hooks[hook['id']], **hook))\n for hook in self.repo_config['hooks']\n )\n\n @cached_property\n def manifest(self):\n return Manifest(self.repo_path_getter)\n\n @cached_property\n def cmd_runner(self):\n return PrefixedCommandRunner(self.repo_path_getter.repo_path)\n\n def require_installed(self):\n if self.__installed:\n return\n\n self.install()\n self.__installed = True\n\n def install(self):\n \"\"\"Install the hook repository.\"\"\"\n def state(language_name, language_version):\n return {\n 'additional_dependencies': sorted(\n self.additional_dependencies[\n language_name\n ][language_version],\n )\n }\n\n def state_filename(venv, suffix=''):\n return self.cmd_runner.path(\n venv, '.install_state_v' + INSTALLED_STATE_VERSION + suffix,\n )\n\n def read_state(venv):\n if not os.path.exists(state_filename(venv)):\n return None\n else:\n return json.loads(io.open(state_filename(venv)).read())\n\n def write_state(venv, language_name, language_version):\n with io.open(\n state_filename(venv, suffix='staging'), 'w',\n ) as state_file:\n state_file.write(five.to_text(json.dumps(\n state(language_name, language_version),\n )))\n # Move the file into place atomically to indicate we've installed\n os.rename(\n state_filename(venv, suffix='staging'),\n state_filename(venv),\n )\n\n def language_is_installed(language_name, language_version):\n language = languages[language_name]\n venv = environment_dir(language.ENVIRONMENT_DIR, language_version)\n return (\n venv is None or\n read_state(venv) == state(language_name, language_version)\n )\n\n if not all(\n language_is_installed(language_name, language_version)\n for language_name, language_version in self.languages\n ):\n logger.info(\n 'Installing environment for {0}.'.format(self.repo_url)\n )\n logger.info('Once installed this environment will be reused.')\n logger.info('This may take a few minutes...')\n\n for language_name, language_version in self.languages:\n if language_is_installed(language_name, language_version):\n continue\n\n language = languages[language_name]\n venv = environment_dir(language.ENVIRONMENT_DIR, language_version)\n\n # There's potentially incomplete cleanup from previous runs\n # Clean it up!\n if self.cmd_runner.exists(venv):\n shutil.rmtree(self.cmd_runner.path(venv))\n\n language.install_environment(\n self.cmd_runner, language_version,\n self.additional_dependencies[language_name][language_version],\n )\n # Write our state to indicate we're installed\n write_state(venv, language_name, language_version)\n\n def run_hook(self, hook, file_args):\n \"\"\"Run a hook.\n\n Args:\n hook - Hook dictionary\n file_args - List of files to run\n \"\"\"\n self.require_installed()\n return languages[hook['language']].run_hook(\n self.cmd_runner, hook, file_args,\n )\n\n\nclass LocalRepository(Repository):\n def __init__(self, repo_config):\n super(LocalRepository, self).__init__(repo_config, None)\n\n @cached_property\n def hooks(self):\n return tuple(\n (hook['id'], apply_defaults(hook, MANIFEST_JSON_SCHEMA['items']))\n for hook in self.repo_config['hooks']\n )\n\n @cached_property\n def cmd_runner(self):\n return PrefixedCommandRunner(git.get_root())\n\n @cached_property\n def sha(self):\n raise NotImplementedError\n\n @cached_property\n def manifest(self):\n raise NotImplementedError\n", "path": "pre_commit/repository.py"}]}
2,602
142
gh_patches_debug_36558
rasdani/github-patches
git_diff
getsentry__sentry-23023
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Create comment when linking to existing Jira issue ## Summary It would be nice if when linking to an existing Jira issue Sentry created a comment with a link back to the Sentry issue. ## Motivation - Sentry already creates a backlink when creating a _new_ Jira issue - Being able to navigate from Jira issue to the original Sentry issue is just as important and useful as navigating from Sentry to Jira. ## Additional Context When creating a new Jira issue, there is a backlink: ![image](https://user-images.githubusercontent.com/52021/98541556-ae574a00-22a0-11eb-9711-4c7f1ef3dbc6.png) ![image](https://user-images.githubusercontent.com/52021/98541575-b31bfe00-22a0-11eb-89dc-0c057ee1519b.png) ![image](https://user-images.githubusercontent.com/52021/98541590-b911df00-22a0-11eb-902f-49981ccbf1bb.png) When linking to an existing Jira issue, there is no comment and no backlink. We are using on-premises Jira Server. </issue> <code> [start of src/sentry/integrations/jira_server/integration.py] 1 import logging 2 3 from cryptography.hazmat.primitives.serialization import load_pem_private_key 4 from cryptography.hazmat.backends import default_backend 5 from django import forms 6 from django.core.urlresolvers import reverse 7 from django.core.validators import URLValidator 8 from django.utils.translation import ugettext as _ 9 from django.views.decorators.csrf import csrf_exempt 10 from urllib.parse import urlparse 11 12 from sentry.integrations import ( 13 IntegrationFeatures, 14 IntegrationProvider, 15 IntegrationMetadata, 16 FeatureDescription, 17 ) 18 from sentry.shared_integrations.exceptions import IntegrationError, ApiError 19 from sentry.integrations.jira import JiraIntegration 20 from sentry.pipeline import PipelineView 21 from sentry.utils.hashlib import sha1_text 22 from sentry.utils.decorators import classproperty 23 from sentry.web.helpers import render_to_response 24 from .client import JiraServer, JiraServerSetupClient, JiraServerClient 25 26 27 logger = logging.getLogger("sentry.integrations.jira_server") 28 29 DESCRIPTION = """ 30 Connect your Sentry organization into one or more of your Jira Server instances. 31 Get started streamlining your bug squashing workflow by unifying your Sentry and 32 Jira instances together. 33 """ 34 35 FEATURE_DESCRIPTIONS = [ 36 FeatureDescription( 37 """ 38 Create and link Sentry issue groups directly to a Jira ticket in any of your 39 projects, providing a quick way to jump from Sentry bug to tracked ticket! 40 """, 41 IntegrationFeatures.ISSUE_BASIC, 42 ), 43 FeatureDescription( 44 """ 45 Automatically synchronize assignees to and from Jira. Don't get confused 46 who's fixing what, let us handle ensuring your issues and tickets match up 47 to your Sentry and Jira assignees. 48 """, 49 IntegrationFeatures.ISSUE_SYNC, 50 ), 51 FeatureDescription( 52 """ 53 Synchronize Comments on Sentry Issues directly to the linked Jira ticket. 54 """, 55 IntegrationFeatures.ISSUE_SYNC, 56 ), 57 ] 58 59 setup_alert = { 60 "type": "warning", 61 "icon": "icon-warning-sm", 62 "text": "Your Jira instance must be able to communicate with Sentry." 63 " Sentry makes outbound requests from a [static set of IP" 64 " addresses](https://docs.sentry.io/ip-ranges/) that you may wish" 65 " to allow in your firewall to support this integration.", 66 } 67 68 69 metadata = IntegrationMetadata( 70 description=_(DESCRIPTION.strip()), 71 features=FEATURE_DESCRIPTIONS, 72 author="The Sentry Team", 73 noun=_("Installation"), 74 issue_url="https://github.com/getsentry/sentry/issues/new?assignees=&labels=Component:%20Integrations&template=bug_report.md&title=Jira%20Server%20Integration%20Problem", 75 source_url="https://github.com/getsentry/sentry/tree/master/src/sentry/integrations/jira_server", 76 aspects={"alerts": [setup_alert]}, 77 ) 78 79 80 class InstallationForm(forms.Form): 81 url = forms.CharField( 82 label=_("Jira URL"), 83 help_text=_("The base URL for your Jira Server instance, including the host and protocol."), 84 widget=forms.TextInput(attrs={"placeholder": "https://jira.example.com"}), 85 validators=[URLValidator()], 86 ) 87 verify_ssl = forms.BooleanField( 88 label=_("Verify SSL"), 89 help_text=_( 90 "By default, we verify SSL certificates " "when making requests to your Jira instance." 91 ), 92 widget=forms.CheckboxInput(), 93 required=False, 94 initial=True, 95 ) 96 consumer_key = forms.CharField( 97 label=_("Jira Consumer Key"), 98 widget=forms.TextInput(attrs={"placeholder": "sentry-consumer-key"}), 99 ) 100 private_key = forms.CharField( 101 label=_("Jira Consumer Private Key"), 102 widget=forms.Textarea( 103 attrs={ 104 "placeholder": "-----BEGIN RSA PRIVATE KEY-----\n...\n-----END RSA PRIVATE KEY-----" 105 } 106 ), 107 ) 108 109 def clean_url(self): 110 """Strip off trailing / as they cause invalid URLs downstream""" 111 return self.cleaned_data["url"].rstrip("/") 112 113 def clean_private_key(self): 114 data = self.cleaned_data["private_key"] 115 116 try: 117 load_pem_private_key(data.encode("utf-8"), None, default_backend()) 118 except Exception: 119 raise forms.ValidationError( 120 "Private key must be a valid SSH private key encoded in a PEM format." 121 ) 122 return data 123 124 def clean_consumer_key(self): 125 data = self.cleaned_data["consumer_key"] 126 if len(data) > 200: 127 raise forms.ValidationError("Consumer key is limited to 200 characters.") 128 return data 129 130 131 class InstallationConfigView(PipelineView): 132 """ 133 Collect the OAuth client credentials from the user. 134 """ 135 136 def dispatch(self, request, pipeline): 137 if request.method == "POST": 138 form = InstallationForm(request.POST) 139 if form.is_valid(): 140 form_data = form.cleaned_data 141 142 pipeline.bind_state("installation_data", form_data) 143 return pipeline.next_step() 144 else: 145 form = InstallationForm() 146 147 return render_to_response( 148 template="sentry/integrations/jira-server-config.html", 149 context={"form": form}, 150 request=request, 151 ) 152 153 154 class OAuthLoginView(PipelineView): 155 """ 156 Start the OAuth dance by creating a request token 157 and redirecting the user to approve it. 158 """ 159 160 @csrf_exempt 161 def dispatch(self, request, pipeline): 162 if "oauth_token" in request.GET: 163 return pipeline.next_step() 164 165 config = pipeline.fetch_state("installation_data") 166 client = JiraServerSetupClient( 167 config.get("url"), 168 config.get("consumer_key"), 169 config.get("private_key"), 170 config.get("verify_ssl"), 171 ) 172 try: 173 request_token = client.get_request_token() 174 pipeline.bind_state("request_token", request_token) 175 authorize_url = client.get_authorize_url(request_token) 176 177 return self.redirect(authorize_url) 178 except ApiError as error: 179 logger.info( 180 "identity.jira-server.request-token", 181 extra={"url": config.get("url"), "error": error}, 182 ) 183 return pipeline.error("Could not fetch a request token from Jira. %s" % error) 184 185 186 class OAuthCallbackView(PipelineView): 187 """ 188 Complete the OAuth dance by exchanging our request token 189 into an access token. 190 """ 191 192 @csrf_exempt 193 def dispatch(self, request, pipeline): 194 config = pipeline.fetch_state("installation_data") 195 client = JiraServerSetupClient( 196 config.get("url"), 197 config.get("consumer_key"), 198 config.get("private_key"), 199 config.get("verify_ssl"), 200 ) 201 202 try: 203 access_token = client.get_access_token( 204 pipeline.fetch_state("request_token"), request.GET["oauth_token"] 205 ) 206 pipeline.bind_state("access_token", access_token) 207 208 return pipeline.next_step() 209 except ApiError as error: 210 logger.info("identity.jira-server.access-token", extra={"error": error}) 211 return pipeline.error("Could not fetch an access token from Jira") 212 213 214 class JiraServerIntegration(JiraIntegration): 215 """ 216 IntegrationInstallation implementation for Jira-Server 217 """ 218 219 default_identity = None 220 221 @classproperty 222 def use_email_scope(cls): 223 # jira server doesn't need the email scope since it's not restricted by GDPR 224 return False 225 226 def get_client(self): 227 if self.default_identity is None: 228 self.default_identity = self.get_default_identity() 229 230 return JiraServerClient( 231 self.model.metadata["base_url"], 232 JiraServer(self.default_identity.data), 233 self.model.metadata["verify_ssl"], 234 ) 235 236 def get_link_issue_config(self, group, **kwargs): 237 fields = super(JiraIntegration, self).get_link_issue_config(group, **kwargs) 238 org = group.organization 239 autocomplete_url = reverse( 240 "sentry-extensions-jiraserver-search", args=[org.slug, self.model.id] 241 ) 242 for field in fields: 243 if field["name"] == "externalIssue": 244 field["url"] = autocomplete_url 245 field["type"] = "select" 246 return fields 247 248 def search_url(self, org_slug): 249 return reverse("sentry-extensions-jiraserver-search", args=[org_slug, self.model.id]) 250 251 252 class JiraServerIntegrationProvider(IntegrationProvider): 253 key = "jira_server" 254 name = "Jira Server" 255 metadata = metadata 256 integration_cls = JiraServerIntegration 257 258 needs_default_identity = True 259 260 can_add = True 261 262 features = frozenset([IntegrationFeatures.ISSUE_BASIC, IntegrationFeatures.ISSUE_SYNC]) 263 264 setup_dialog_config = {"width": 1030, "height": 1000} 265 266 def get_pipeline_views(self): 267 return [InstallationConfigView(), OAuthLoginView(), OAuthCallbackView()] 268 269 def build_integration(self, state): 270 install = state["installation_data"] 271 access_token = state["access_token"] 272 273 webhook_secret = sha1_text(install["private_key"]).hexdigest() 274 275 hostname = urlparse(install["url"]).netloc 276 external_id = "{}:{}".format(hostname, install["consumer_key"])[:64] 277 278 credentials = { 279 "consumer_key": install["consumer_key"], 280 "private_key": install["private_key"], 281 "access_token": access_token["oauth_token"], 282 "access_token_secret": access_token["oauth_token_secret"], 283 } 284 # Create the webhook before the integration record exists 285 # so that if it fails we don't persist a broken integration. 286 self.create_webhook(external_id, webhook_secret, install, credentials) 287 288 return { 289 "name": install["consumer_key"], 290 "provider": "jira_server", 291 "external_id": external_id, 292 "metadata": { 293 "base_url": install["url"], 294 "domain_name": hostname, 295 "verify_ssl": install["verify_ssl"], 296 "webhook_secret": webhook_secret, 297 }, 298 "user_identity": { 299 "type": "jira_server", 300 "external_id": external_id, 301 "scopes": [], 302 "data": credentials, 303 }, 304 } 305 306 def create_webhook(self, external_id, webhook_secret, install, credentials): 307 client = JiraServerSetupClient( 308 install["url"], install["consumer_key"], install["private_key"], install["verify_ssl"] 309 ) 310 try: 311 client.create_issue_webhook(external_id, webhook_secret, credentials) 312 except ApiError as err: 313 logger.info( 314 "jira-server.webhook.failed", 315 extra={"error": str(err), "external_id": external_id}, 316 ) 317 try: 318 details = next(x for x in err.json["messages"][0].values()) 319 except (KeyError, TypeError, StopIteration): 320 details = "" 321 message = f"Could not create issue webhook in Jira. {details}" 322 raise IntegrationError(message) 323 [end of src/sentry/integrations/jira_server/integration.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/sentry/integrations/jira_server/integration.py b/src/sentry/integrations/jira_server/integration.py --- a/src/sentry/integrations/jira_server/integration.py +++ b/src/sentry/integrations/jira_server/integration.py @@ -18,8 +18,9 @@ from sentry.shared_integrations.exceptions import IntegrationError, ApiError from sentry.integrations.jira import JiraIntegration from sentry.pipeline import PipelineView -from sentry.utils.hashlib import sha1_text from sentry.utils.decorators import classproperty +from sentry.utils.hashlib import sha1_text +from sentry.utils.http import absolute_uri from sentry.web.helpers import render_to_response from .client import JiraServer, JiraServerSetupClient, JiraServerClient @@ -234,7 +235,8 @@ ) def get_link_issue_config(self, group, **kwargs): - fields = super(JiraIntegration, self).get_link_issue_config(group, **kwargs) + fields = super().get_link_issue_config(group, **kwargs) + org = group.organization autocomplete_url = reverse( "sentry-extensions-jiraserver-search", args=[org.slug, self.model.id] @@ -243,11 +245,35 @@ if field["name"] == "externalIssue": field["url"] = autocomplete_url field["type"] = "select" + + default_comment = "Linked Sentry Issue: [{}|{}]".format( + group.qualified_short_id, + absolute_uri(group.get_absolute_url(params={"referrer": "jira_server"})), + ) + fields.append( + { + "name": "comment", + "label": "Comment", + "default": default_comment, + "type": "textarea", + "autosize": True, + "maxRows": 10, + } + ) + return fields def search_url(self, org_slug): return reverse("sentry-extensions-jiraserver-search", args=[org_slug, self.model.id]) + def after_link_issue(self, external_issue, data=None, **kwargs): + super().after_link_issue(external_issue, **kwargs) + + if data: + comment = data.get("comment") + if comment: + self.get_client().create_comment(external_issue.key, comment) + class JiraServerIntegrationProvider(IntegrationProvider): key = "jira_server"
{"golden_diff": "diff --git a/src/sentry/integrations/jira_server/integration.py b/src/sentry/integrations/jira_server/integration.py\n--- a/src/sentry/integrations/jira_server/integration.py\n+++ b/src/sentry/integrations/jira_server/integration.py\n@@ -18,8 +18,9 @@\n from sentry.shared_integrations.exceptions import IntegrationError, ApiError\n from sentry.integrations.jira import JiraIntegration\n from sentry.pipeline import PipelineView\n-from sentry.utils.hashlib import sha1_text\n from sentry.utils.decorators import classproperty\n+from sentry.utils.hashlib import sha1_text\n+from sentry.utils.http import absolute_uri\n from sentry.web.helpers import render_to_response\n from .client import JiraServer, JiraServerSetupClient, JiraServerClient\n \n@@ -234,7 +235,8 @@\n )\n \n def get_link_issue_config(self, group, **kwargs):\n- fields = super(JiraIntegration, self).get_link_issue_config(group, **kwargs)\n+ fields = super().get_link_issue_config(group, **kwargs)\n+\n org = group.organization\n autocomplete_url = reverse(\n \"sentry-extensions-jiraserver-search\", args=[org.slug, self.model.id]\n@@ -243,11 +245,35 @@\n if field[\"name\"] == \"externalIssue\":\n field[\"url\"] = autocomplete_url\n field[\"type\"] = \"select\"\n+\n+ default_comment = \"Linked Sentry Issue: [{}|{}]\".format(\n+ group.qualified_short_id,\n+ absolute_uri(group.get_absolute_url(params={\"referrer\": \"jira_server\"})),\n+ )\n+ fields.append(\n+ {\n+ \"name\": \"comment\",\n+ \"label\": \"Comment\",\n+ \"default\": default_comment,\n+ \"type\": \"textarea\",\n+ \"autosize\": True,\n+ \"maxRows\": 10,\n+ }\n+ )\n+\n return fields\n \n def search_url(self, org_slug):\n return reverse(\"sentry-extensions-jiraserver-search\", args=[org_slug, self.model.id])\n \n+ def after_link_issue(self, external_issue, data=None, **kwargs):\n+ super().after_link_issue(external_issue, **kwargs)\n+\n+ if data:\n+ comment = data.get(\"comment\")\n+ if comment:\n+ self.get_client().create_comment(external_issue.key, comment)\n+\n \n class JiraServerIntegrationProvider(IntegrationProvider):\n key = \"jira_server\"\n", "issue": "Create comment when linking to existing Jira issue\n## Summary\r\n\r\nIt would be nice if when linking to an existing Jira issue Sentry created a comment with a link back to the Sentry issue.\r\n\r\n## Motivation\r\n\r\n- Sentry already creates a backlink when creating a _new_ Jira issue\r\n- Being able to navigate from Jira issue to the original Sentry issue is just as important and useful as navigating from Sentry to Jira.\r\n\r\n## Additional Context\r\n\r\nWhen creating a new Jira issue, there is a backlink:\r\n\r\n![image](https://user-images.githubusercontent.com/52021/98541556-ae574a00-22a0-11eb-9711-4c7f1ef3dbc6.png)\r\n\r\n![image](https://user-images.githubusercontent.com/52021/98541575-b31bfe00-22a0-11eb-89dc-0c057ee1519b.png)\r\n\r\n![image](https://user-images.githubusercontent.com/52021/98541590-b911df00-22a0-11eb-902f-49981ccbf1bb.png)\r\n\r\nWhen linking to an existing Jira issue, there is no comment and no backlink.\r\n\r\nWe are using on-premises Jira Server.\n", "before_files": [{"content": "import logging\n\nfrom cryptography.hazmat.primitives.serialization import load_pem_private_key\nfrom cryptography.hazmat.backends import default_backend\nfrom django import forms\nfrom django.core.urlresolvers import reverse\nfrom django.core.validators import URLValidator\nfrom django.utils.translation import ugettext as _\nfrom django.views.decorators.csrf import csrf_exempt\nfrom urllib.parse import urlparse\n\nfrom sentry.integrations import (\n IntegrationFeatures,\n IntegrationProvider,\n IntegrationMetadata,\n FeatureDescription,\n)\nfrom sentry.shared_integrations.exceptions import IntegrationError, ApiError\nfrom sentry.integrations.jira import JiraIntegration\nfrom sentry.pipeline import PipelineView\nfrom sentry.utils.hashlib import sha1_text\nfrom sentry.utils.decorators import classproperty\nfrom sentry.web.helpers import render_to_response\nfrom .client import JiraServer, JiraServerSetupClient, JiraServerClient\n\n\nlogger = logging.getLogger(\"sentry.integrations.jira_server\")\n\nDESCRIPTION = \"\"\"\nConnect your Sentry organization into one or more of your Jira Server instances.\nGet started streamlining your bug squashing workflow by unifying your Sentry and\nJira instances together.\n\"\"\"\n\nFEATURE_DESCRIPTIONS = [\n FeatureDescription(\n \"\"\"\n Create and link Sentry issue groups directly to a Jira ticket in any of your\n projects, providing a quick way to jump from Sentry bug to tracked ticket!\n \"\"\",\n IntegrationFeatures.ISSUE_BASIC,\n ),\n FeatureDescription(\n \"\"\"\n Automatically synchronize assignees to and from Jira. Don't get confused\n who's fixing what, let us handle ensuring your issues and tickets match up\n to your Sentry and Jira assignees.\n \"\"\",\n IntegrationFeatures.ISSUE_SYNC,\n ),\n FeatureDescription(\n \"\"\"\n Synchronize Comments on Sentry Issues directly to the linked Jira ticket.\n \"\"\",\n IntegrationFeatures.ISSUE_SYNC,\n ),\n]\n\nsetup_alert = {\n \"type\": \"warning\",\n \"icon\": \"icon-warning-sm\",\n \"text\": \"Your Jira instance must be able to communicate with Sentry.\"\n \" Sentry makes outbound requests from a [static set of IP\"\n \" addresses](https://docs.sentry.io/ip-ranges/) that you may wish\"\n \" to allow in your firewall to support this integration.\",\n}\n\n\nmetadata = IntegrationMetadata(\n description=_(DESCRIPTION.strip()),\n features=FEATURE_DESCRIPTIONS,\n author=\"The Sentry Team\",\n noun=_(\"Installation\"),\n issue_url=\"https://github.com/getsentry/sentry/issues/new?assignees=&labels=Component:%20Integrations&template=bug_report.md&title=Jira%20Server%20Integration%20Problem\",\n source_url=\"https://github.com/getsentry/sentry/tree/master/src/sentry/integrations/jira_server\",\n aspects={\"alerts\": [setup_alert]},\n)\n\n\nclass InstallationForm(forms.Form):\n url = forms.CharField(\n label=_(\"Jira URL\"),\n help_text=_(\"The base URL for your Jira Server instance, including the host and protocol.\"),\n widget=forms.TextInput(attrs={\"placeholder\": \"https://jira.example.com\"}),\n validators=[URLValidator()],\n )\n verify_ssl = forms.BooleanField(\n label=_(\"Verify SSL\"),\n help_text=_(\n \"By default, we verify SSL certificates \" \"when making requests to your Jira instance.\"\n ),\n widget=forms.CheckboxInput(),\n required=False,\n initial=True,\n )\n consumer_key = forms.CharField(\n label=_(\"Jira Consumer Key\"),\n widget=forms.TextInput(attrs={\"placeholder\": \"sentry-consumer-key\"}),\n )\n private_key = forms.CharField(\n label=_(\"Jira Consumer Private Key\"),\n widget=forms.Textarea(\n attrs={\n \"placeholder\": \"-----BEGIN RSA PRIVATE KEY-----\\n...\\n-----END RSA PRIVATE KEY-----\"\n }\n ),\n )\n\n def clean_url(self):\n \"\"\"Strip off trailing / as they cause invalid URLs downstream\"\"\"\n return self.cleaned_data[\"url\"].rstrip(\"/\")\n\n def clean_private_key(self):\n data = self.cleaned_data[\"private_key\"]\n\n try:\n load_pem_private_key(data.encode(\"utf-8\"), None, default_backend())\n except Exception:\n raise forms.ValidationError(\n \"Private key must be a valid SSH private key encoded in a PEM format.\"\n )\n return data\n\n def clean_consumer_key(self):\n data = self.cleaned_data[\"consumer_key\"]\n if len(data) > 200:\n raise forms.ValidationError(\"Consumer key is limited to 200 characters.\")\n return data\n\n\nclass InstallationConfigView(PipelineView):\n \"\"\"\n Collect the OAuth client credentials from the user.\n \"\"\"\n\n def dispatch(self, request, pipeline):\n if request.method == \"POST\":\n form = InstallationForm(request.POST)\n if form.is_valid():\n form_data = form.cleaned_data\n\n pipeline.bind_state(\"installation_data\", form_data)\n return pipeline.next_step()\n else:\n form = InstallationForm()\n\n return render_to_response(\n template=\"sentry/integrations/jira-server-config.html\",\n context={\"form\": form},\n request=request,\n )\n\n\nclass OAuthLoginView(PipelineView):\n \"\"\"\n Start the OAuth dance by creating a request token\n and redirecting the user to approve it.\n \"\"\"\n\n @csrf_exempt\n def dispatch(self, request, pipeline):\n if \"oauth_token\" in request.GET:\n return pipeline.next_step()\n\n config = pipeline.fetch_state(\"installation_data\")\n client = JiraServerSetupClient(\n config.get(\"url\"),\n config.get(\"consumer_key\"),\n config.get(\"private_key\"),\n config.get(\"verify_ssl\"),\n )\n try:\n request_token = client.get_request_token()\n pipeline.bind_state(\"request_token\", request_token)\n authorize_url = client.get_authorize_url(request_token)\n\n return self.redirect(authorize_url)\n except ApiError as error:\n logger.info(\n \"identity.jira-server.request-token\",\n extra={\"url\": config.get(\"url\"), \"error\": error},\n )\n return pipeline.error(\"Could not fetch a request token from Jira. %s\" % error)\n\n\nclass OAuthCallbackView(PipelineView):\n \"\"\"\n Complete the OAuth dance by exchanging our request token\n into an access token.\n \"\"\"\n\n @csrf_exempt\n def dispatch(self, request, pipeline):\n config = pipeline.fetch_state(\"installation_data\")\n client = JiraServerSetupClient(\n config.get(\"url\"),\n config.get(\"consumer_key\"),\n config.get(\"private_key\"),\n config.get(\"verify_ssl\"),\n )\n\n try:\n access_token = client.get_access_token(\n pipeline.fetch_state(\"request_token\"), request.GET[\"oauth_token\"]\n )\n pipeline.bind_state(\"access_token\", access_token)\n\n return pipeline.next_step()\n except ApiError as error:\n logger.info(\"identity.jira-server.access-token\", extra={\"error\": error})\n return pipeline.error(\"Could not fetch an access token from Jira\")\n\n\nclass JiraServerIntegration(JiraIntegration):\n \"\"\"\n IntegrationInstallation implementation for Jira-Server\n \"\"\"\n\n default_identity = None\n\n @classproperty\n def use_email_scope(cls):\n # jira server doesn't need the email scope since it's not restricted by GDPR\n return False\n\n def get_client(self):\n if self.default_identity is None:\n self.default_identity = self.get_default_identity()\n\n return JiraServerClient(\n self.model.metadata[\"base_url\"],\n JiraServer(self.default_identity.data),\n self.model.metadata[\"verify_ssl\"],\n )\n\n def get_link_issue_config(self, group, **kwargs):\n fields = super(JiraIntegration, self).get_link_issue_config(group, **kwargs)\n org = group.organization\n autocomplete_url = reverse(\n \"sentry-extensions-jiraserver-search\", args=[org.slug, self.model.id]\n )\n for field in fields:\n if field[\"name\"] == \"externalIssue\":\n field[\"url\"] = autocomplete_url\n field[\"type\"] = \"select\"\n return fields\n\n def search_url(self, org_slug):\n return reverse(\"sentry-extensions-jiraserver-search\", args=[org_slug, self.model.id])\n\n\nclass JiraServerIntegrationProvider(IntegrationProvider):\n key = \"jira_server\"\n name = \"Jira Server\"\n metadata = metadata\n integration_cls = JiraServerIntegration\n\n needs_default_identity = True\n\n can_add = True\n\n features = frozenset([IntegrationFeatures.ISSUE_BASIC, IntegrationFeatures.ISSUE_SYNC])\n\n setup_dialog_config = {\"width\": 1030, \"height\": 1000}\n\n def get_pipeline_views(self):\n return [InstallationConfigView(), OAuthLoginView(), OAuthCallbackView()]\n\n def build_integration(self, state):\n install = state[\"installation_data\"]\n access_token = state[\"access_token\"]\n\n webhook_secret = sha1_text(install[\"private_key\"]).hexdigest()\n\n hostname = urlparse(install[\"url\"]).netloc\n external_id = \"{}:{}\".format(hostname, install[\"consumer_key\"])[:64]\n\n credentials = {\n \"consumer_key\": install[\"consumer_key\"],\n \"private_key\": install[\"private_key\"],\n \"access_token\": access_token[\"oauth_token\"],\n \"access_token_secret\": access_token[\"oauth_token_secret\"],\n }\n # Create the webhook before the integration record exists\n # so that if it fails we don't persist a broken integration.\n self.create_webhook(external_id, webhook_secret, install, credentials)\n\n return {\n \"name\": install[\"consumer_key\"],\n \"provider\": \"jira_server\",\n \"external_id\": external_id,\n \"metadata\": {\n \"base_url\": install[\"url\"],\n \"domain_name\": hostname,\n \"verify_ssl\": install[\"verify_ssl\"],\n \"webhook_secret\": webhook_secret,\n },\n \"user_identity\": {\n \"type\": \"jira_server\",\n \"external_id\": external_id,\n \"scopes\": [],\n \"data\": credentials,\n },\n }\n\n def create_webhook(self, external_id, webhook_secret, install, credentials):\n client = JiraServerSetupClient(\n install[\"url\"], install[\"consumer_key\"], install[\"private_key\"], install[\"verify_ssl\"]\n )\n try:\n client.create_issue_webhook(external_id, webhook_secret, credentials)\n except ApiError as err:\n logger.info(\n \"jira-server.webhook.failed\",\n extra={\"error\": str(err), \"external_id\": external_id},\n )\n try:\n details = next(x for x in err.json[\"messages\"][0].values())\n except (KeyError, TypeError, StopIteration):\n details = \"\"\n message = f\"Could not create issue webhook in Jira. {details}\"\n raise IntegrationError(message)\n", "path": "src/sentry/integrations/jira_server/integration.py"}]}
4,032
556
gh_patches_debug_25290
rasdani/github-patches
git_diff
mkdocs__mkdocs-166
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Exit gracefully when misconfigured Currently if `mkdocs.yaml` does not exist or if `pages`/`project_name` are not set, we'll exit with an exception. Really we should print a simple error to stderr and exit with an appropriate error code. </issue> <code> [start of mkdocs/config.py] 1 # coding: utf-8 2 3 from mkdocs import utils 4 from mkdocs.compat import urlparse 5 import os 6 import yaml 7 8 DEFAULT_CONFIG = { 9 'site_name': None, 10 'pages': None, 11 12 'site_url': None, 13 'site_description': None, 14 'site_author': None, 15 'site_favicon': None, 16 17 'theme': 'mkdocs', 18 'docs_dir': 'docs', 19 'site_dir': 'site', 20 'theme_dir': None, 21 22 'copyright': None, 23 'google-analytics': None, 24 25 # The address on which to serve the livereloading docs server. 26 'dev_addr': '127.0.0.1:8000', 27 28 # If `True`, use `<page_name>/index.hmtl` style files with hyperlinks to the directory. 29 # If `False`, use `<page_name>.html style file with hyperlinks to the file. 30 # True generates nicer URLs, but False is useful if browsing the output on a filesystem. 31 'use_directory_urls': True, 32 33 # Specify a link to the project source repo to be included 34 # in the documentation pages. 35 'repo_url': None, 36 37 # A name to use for the link to the project source repo. 38 # Default: If repo_url is unset then None, otherwise 39 # "GitHub" or "Bitbucket" for known url or Hostname for unknown urls. 40 'repo_name': None, 41 42 # Specify which css or javascript files from the docs 43 # directionary should be additionally included in the site. 44 # Default: List of all .css and .js files in the docs dir. 45 'extra_css': None, 46 'extra_javascript': None, 47 48 # Determine if the site should include the nav and next/prev elements. 49 # Default: True if the site has more than one page, False otherwise. 50 'include_nav': None, 51 'include_next_prev': None, 52 53 # PyMarkdown extension names. 54 'markdown_extensions': (), 55 56 # Determine if the site should generate a json search index and include 57 # search elements in the theme. - TODO 58 'include_search': False, 59 60 # Determine if the site should include a 404.html page. 61 # TODO: Implment this. Make this None, have it True if a 404.html 62 # template exists in the theme or docs dir. 63 'include_404': False, 64 65 # Determine if the site should include a sitemap.xml page. 66 # TODO: Implement this. Make this None, have it True if a sitemap.xml 67 # template exists in the theme or docs dir. 68 'include_sitemap': False, 69 } 70 71 72 def load_config(filename='mkdocs.yml', options=None): 73 options = options or {} 74 if 'config' in options: 75 filename = options['config'] 76 assert os.path.exists(filename), "Config file '%s' does not exist." % filename 77 with open(filename, 'r') as fp: 78 user_config = yaml.load(fp) 79 user_config.update(options) 80 return validate_config(user_config) 81 82 83 def validate_config(user_config): 84 config = DEFAULT_CONFIG.copy() 85 config.update(user_config) 86 87 assert config['site_name'], "Config must contain 'site_name' setting." 88 89 # If not specified, then the 'pages' config simply includes all 90 # markdown files in the docs dir, without generating any header items 91 # for them. 92 pages = [] 93 extra_css = [] 94 extra_javascript = [] 95 for (dirpath, dirnames, filenames) in os.walk(config['docs_dir']): 96 for filename in sorted(filenames): 97 fullpath = os.path.join(dirpath, filename) 98 relpath = os.path.relpath(fullpath, config['docs_dir']) 99 100 if utils.is_markdown_file(filename): 101 # index pages should always be the first listed page. 102 if os.path.splitext(relpath)[0] == 'index': 103 pages.insert(0, relpath) 104 else: 105 pages.append(relpath) 106 elif utils.is_css_file(filename): 107 extra_css.append(relpath) 108 elif utils.is_javascript_file(filename): 109 extra_javascript.append(relpath) 110 111 if config['pages'] is None: 112 config['pages'] = pages 113 114 if config['extra_css'] is None: 115 config['extra_css'] = extra_css 116 117 if config['extra_javascript'] is None: 118 config['extra_javascript'] = extra_javascript 119 120 package_dir = os.path.dirname(__file__) 121 theme_dir = [os.path.join(package_dir, 'themes', config['theme'])] 122 123 if config['theme_dir'] is not None: 124 theme_dir.insert(0, config['theme_dir']) 125 126 config['theme_dir'] = theme_dir 127 128 if config['repo_url'] is not None and config['repo_name'] is None: 129 repo_host = urlparse(config['repo_url']).netloc.lower() 130 if repo_host == 'github.com': 131 config['repo_name'] = 'GitHub' 132 elif repo_host == 'bitbucket.com': 133 config['repo_name'] = 'Bitbucket' 134 else: 135 config['repo_name'] = repo_host.split('.')[0].title() 136 137 if config['include_next_prev'] is None: 138 config['include_next_prev'] = len(config['pages']) > 1 139 140 if config['include_nav'] is None: 141 config['include_nav'] = len(config['pages']) > 1 142 143 # To Do: 144 145 # The docs dir must exist. 146 # The theme dir must exist. 147 # Ensure 'theme' is one of 'mkdocs', 'readthedocs', 'custom' 148 # A homepage 'index' must exist. 149 # The theme 'base.html' file must exist. 150 # Cannot set repo_name without setting repo_url. 151 # Cannot set 'include_next_prev: true' when only one page exists. 152 # Cannot set 'include_nav: true' when only one page exists. 153 # Error if any config keys provided that are not in the DEFAULT_CONFIG. 154 155 return config 156 [end of mkdocs/config.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mkdocs/config.py b/mkdocs/config.py --- a/mkdocs/config.py +++ b/mkdocs/config.py @@ -2,7 +2,9 @@ from mkdocs import utils from mkdocs.compat import urlparse +import errno import os +import sys import yaml DEFAULT_CONFIG = { @@ -73,7 +75,9 @@ options = options or {} if 'config' in options: filename = options['config'] - assert os.path.exists(filename), "Config file '%s' does not exist." % filename + if not os.path.exists(filename): + sys.stderr.write("Config file '%s' does not exist." % filename) + sys.exit(errno.ENOENT) with open(filename, 'r') as fp: user_config = yaml.load(fp) user_config.update(options) @@ -84,7 +88,9 @@ config = DEFAULT_CONFIG.copy() config.update(user_config) - assert config['site_name'], "Config must contain 'site_name' setting." + if not config['site_name']: + sys.stderr.write("Config must contain 'site_name' setting.") + sys.exit(errno.EINVAL) # If not specified, then the 'pages' config simply includes all # markdown files in the docs dir, without generating any header items
{"golden_diff": "diff --git a/mkdocs/config.py b/mkdocs/config.py\n--- a/mkdocs/config.py\n+++ b/mkdocs/config.py\n@@ -2,7 +2,9 @@\n \n from mkdocs import utils\n from mkdocs.compat import urlparse\n+import errno\n import os\n+import sys\n import yaml\n \n DEFAULT_CONFIG = {\n@@ -73,7 +75,9 @@\n options = options or {}\n if 'config' in options:\n filename = options['config']\n- assert os.path.exists(filename), \"Config file '%s' does not exist.\" % filename\n+ if not os.path.exists(filename):\n+ sys.stderr.write(\"Config file '%s' does not exist.\" % filename)\n+ sys.exit(errno.ENOENT)\n with open(filename, 'r') as fp:\n user_config = yaml.load(fp)\n user_config.update(options)\n@@ -84,7 +88,9 @@\n config = DEFAULT_CONFIG.copy()\n config.update(user_config)\n \n- assert config['site_name'], \"Config must contain 'site_name' setting.\"\n+ if not config['site_name']:\n+ sys.stderr.write(\"Config must contain 'site_name' setting.\")\n+ sys.exit(errno.EINVAL)\n \n # If not specified, then the 'pages' config simply includes all\n # markdown files in the docs dir, without generating any header items\n", "issue": "Exit gracefully when misconfigured\nCurrently if `mkdocs.yaml` does not exist or if `pages`/`project_name` are not set, we'll exit with an exception. Really we should print a simple error to stderr and exit with an appropriate error code.\n\n", "before_files": [{"content": "# coding: utf-8\n\nfrom mkdocs import utils\nfrom mkdocs.compat import urlparse\nimport os\nimport yaml\n\nDEFAULT_CONFIG = {\n 'site_name': None,\n 'pages': None,\n\n 'site_url': None,\n 'site_description': None,\n 'site_author': None,\n 'site_favicon': None,\n\n 'theme': 'mkdocs',\n 'docs_dir': 'docs',\n 'site_dir': 'site',\n 'theme_dir': None,\n\n 'copyright': None,\n 'google-analytics': None,\n\n # The address on which to serve the livereloading docs server.\n 'dev_addr': '127.0.0.1:8000',\n\n # If `True`, use `<page_name>/index.hmtl` style files with hyperlinks to the directory.\n # If `False`, use `<page_name>.html style file with hyperlinks to the file.\n # True generates nicer URLs, but False is useful if browsing the output on a filesystem.\n 'use_directory_urls': True,\n\n # Specify a link to the project source repo to be included\n # in the documentation pages.\n 'repo_url': None,\n\n # A name to use for the link to the project source repo.\n # Default: If repo_url is unset then None, otherwise\n # \"GitHub\" or \"Bitbucket\" for known url or Hostname for unknown urls.\n 'repo_name': None,\n\n # Specify which css or javascript files from the docs\n # directionary should be additionally included in the site.\n # Default: List of all .css and .js files in the docs dir.\n 'extra_css': None,\n 'extra_javascript': None,\n\n # Determine if the site should include the nav and next/prev elements.\n # Default: True if the site has more than one page, False otherwise.\n 'include_nav': None,\n 'include_next_prev': None,\n\n # PyMarkdown extension names.\n 'markdown_extensions': (),\n\n # Determine if the site should generate a json search index and include\n # search elements in the theme. - TODO\n 'include_search': False,\n\n # Determine if the site should include a 404.html page.\n # TODO: Implment this. Make this None, have it True if a 404.html\n # template exists in the theme or docs dir.\n 'include_404': False,\n\n # Determine if the site should include a sitemap.xml page.\n # TODO: Implement this. Make this None, have it True if a sitemap.xml\n # template exists in the theme or docs dir.\n 'include_sitemap': False,\n}\n\n\ndef load_config(filename='mkdocs.yml', options=None):\n options = options or {}\n if 'config' in options:\n filename = options['config']\n assert os.path.exists(filename), \"Config file '%s' does not exist.\" % filename\n with open(filename, 'r') as fp:\n user_config = yaml.load(fp)\n user_config.update(options)\n return validate_config(user_config)\n\n\ndef validate_config(user_config):\n config = DEFAULT_CONFIG.copy()\n config.update(user_config)\n\n assert config['site_name'], \"Config must contain 'site_name' setting.\"\n\n # If not specified, then the 'pages' config simply includes all\n # markdown files in the docs dir, without generating any header items\n # for them.\n pages = []\n extra_css = []\n extra_javascript = []\n for (dirpath, dirnames, filenames) in os.walk(config['docs_dir']):\n for filename in sorted(filenames):\n fullpath = os.path.join(dirpath, filename)\n relpath = os.path.relpath(fullpath, config['docs_dir'])\n\n if utils.is_markdown_file(filename):\n # index pages should always be the first listed page.\n if os.path.splitext(relpath)[0] == 'index':\n pages.insert(0, relpath)\n else:\n pages.append(relpath)\n elif utils.is_css_file(filename):\n extra_css.append(relpath)\n elif utils.is_javascript_file(filename):\n extra_javascript.append(relpath)\n\n if config['pages'] is None:\n config['pages'] = pages\n\n if config['extra_css'] is None:\n config['extra_css'] = extra_css\n\n if config['extra_javascript'] is None:\n config['extra_javascript'] = extra_javascript\n\n package_dir = os.path.dirname(__file__)\n theme_dir = [os.path.join(package_dir, 'themes', config['theme'])]\n\n if config['theme_dir'] is not None:\n theme_dir.insert(0, config['theme_dir'])\n\n config['theme_dir'] = theme_dir\n\n if config['repo_url'] is not None and config['repo_name'] is None:\n repo_host = urlparse(config['repo_url']).netloc.lower()\n if repo_host == 'github.com':\n config['repo_name'] = 'GitHub'\n elif repo_host == 'bitbucket.com':\n config['repo_name'] = 'Bitbucket'\n else:\n config['repo_name'] = repo_host.split('.')[0].title()\n\n if config['include_next_prev'] is None:\n config['include_next_prev'] = len(config['pages']) > 1\n\n if config['include_nav'] is None:\n config['include_nav'] = len(config['pages']) > 1\n\n # To Do:\n\n # The docs dir must exist.\n # The theme dir must exist.\n # Ensure 'theme' is one of 'mkdocs', 'readthedocs', 'custom'\n # A homepage 'index' must exist.\n # The theme 'base.html' file must exist.\n # Cannot set repo_name without setting repo_url.\n # Cannot set 'include_next_prev: true' when only one page exists.\n # Cannot set 'include_nav: true' when only one page exists.\n # Error if any config keys provided that are not in the DEFAULT_CONFIG.\n\n return config\n", "path": "mkdocs/config.py"}]}
2,263
297
gh_patches_debug_33574
rasdani/github-patches
git_diff
getnikola__nikola-3501
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> dispatch PANDOC_OPTIONS base on input extensions <!-- Before creating an issue: * make sure you are using an up-to-date version of Nikola * search for existing issues that might be related Describe your requested features as precisely as possible. --> # Summary Currently, `COMPILERS` only has a key `pandoc`, and `PANDOC_OPTIONS` is set for all the extensions specified there. The proposed feature simply suggests to have configurable dispatch logic to allow multiple extensions calling pandoc with a different set of `PANDOC_OPTIONS`. # Example use case My personal use for this would be to convert one of my site currently using `.md` and `.markdown` to mean a different target format. Both will be converted to HTML, but one of them is slideshow, another is normal "article"/post. Another possible use case is, if one want to specify markdown extensions in pandoc, say `-t markdown+...-...`, then `COMPILERS['pandoc'] = ['.rst', '.md']` won't work with `PANDOC_OPTIONS = ['-t', 'markdown+...-...']` because that would be wrong input format for `.rst`. # Suggested implementation Probably the natural way is to promote ```py PANDOC_OPTIONS: Union[list, Dict[str, list]] ``` where the keys are those in `COMPILERS['pandoc']`. If it makes sense I can send a PR doing that. It seems that it only requires changes in <https://github.com/getnikola/nikola/blob/master/nikola/plugins/compile/pandoc.py#L52-L56>, is it true? </issue> <code> [start of nikola/plugins/compile/pandoc.py] 1 # -*- coding: utf-8 -*- 2 3 # Copyright Β© 2012-2021 Roberto Alsina and others. 4 5 # Permission is hereby granted, free of charge, to any 6 # person obtaining a copy of this software and associated 7 # documentation files (the "Software"), to deal in the 8 # Software without restriction, including without limitation 9 # the rights to use, copy, modify, merge, publish, 10 # distribute, sublicense, and/or sell copies of the 11 # Software, and to permit persons to whom the Software is 12 # furnished to do so, subject to the following conditions: 13 # 14 # The above copyright notice and this permission notice 15 # shall be included in all copies or substantial portions of 16 # the Software. 17 # 18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY 19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE 20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR 21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS 22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 27 """Page compiler plugin for pandoc. 28 29 You will need, of course, to install pandoc 30 """ 31 32 33 import io 34 import os 35 import subprocess 36 37 from nikola.plugin_categories import PageCompiler 38 from nikola.utils import req_missing, makedirs, write_metadata 39 40 41 class CompilePandoc(PageCompiler): 42 """Compile markups into HTML using pandoc.""" 43 44 name = "pandoc" 45 friendly_name = "pandoc" 46 47 def set_site(self, site): 48 """Set Nikola site.""" 49 self.config_dependencies = [str(site.config['PANDOC_OPTIONS'])] 50 super().set_site(site) 51 52 def compile(self, source, dest, is_two_file=True, post=None, lang=None): 53 """Compile the source file into HTML and save as dest.""" 54 makedirs(os.path.dirname(dest)) 55 try: 56 subprocess.check_call(['pandoc', '-o', dest, source] + self.site.config['PANDOC_OPTIONS']) 57 with open(dest, 'r', encoding='utf-8-sig') as inf: 58 output, shortcode_deps = self.site.apply_shortcodes(inf.read()) 59 with open(dest, 'w', encoding='utf-8') as outf: 60 outf.write(output) 61 if post is None: 62 if shortcode_deps: 63 self.logger.error( 64 "Cannot save dependencies for post {0} (post unknown)", 65 source) 66 else: 67 post._depfile[dest] += shortcode_deps 68 except OSError as e: 69 if e.strreror == 'No such file or directory': 70 req_missing(['pandoc'], 'build this site (compile with pandoc)', python=False) 71 72 def compile_string(self, data, source_path=None, is_two_file=True, post=None, lang=None): 73 """Compile into HTML strings.""" 74 raise ValueError("Pandoc compiler does not support compile_string due to multiple output formats") 75 76 def create_post(self, path, **kw): 77 """Create a new post.""" 78 content = kw.pop('content', None) 79 onefile = kw.pop('onefile', False) 80 # is_page is not used by create_post as of now. 81 kw.pop('is_page', False) 82 metadata = {} 83 metadata.update(self.default_metadata) 84 metadata.update(kw) 85 makedirs(os.path.dirname(path)) 86 if not content.endswith('\n'): 87 content += '\n' 88 with io.open(path, "w+", encoding="utf8") as fd: 89 if onefile: 90 fd.write(write_metadata(metadata, comment_wrap=True, site=self.site, compiler=self)) 91 fd.write(content) 92 [end of nikola/plugins/compile/pandoc.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/nikola/plugins/compile/pandoc.py b/nikola/plugins/compile/pandoc.py --- a/nikola/plugins/compile/pandoc.py +++ b/nikola/plugins/compile/pandoc.py @@ -33,6 +33,8 @@ import io import os import subprocess +from typing import List +from pathlib import Path from nikola.plugin_categories import PageCompiler from nikola.utils import req_missing, makedirs, write_metadata @@ -49,11 +51,30 @@ self.config_dependencies = [str(site.config['PANDOC_OPTIONS'])] super().set_site(site) + def _get_pandoc_options(self, source: str) -> List[str]: + """Obtain pandoc args from config depending on type and file extensions.""" + # Union[List[str], Dict[str, List[str]]] + config_options = self.site.config['PANDOC_OPTIONS'] + type_ = type(config_options) + if type_ is list: + pandoc_options = config_options + elif type_ is dict: + ext = Path(source).suffix + try: + pandoc_options = config_options[ext] + except KeyError: + self.logger.warn('Setting PANDOC_OPTIONS to [], because extension {} is not defined in PANDOC_OPTIONS: {}.'.format(ext, config_options)) + pandoc_options = [] + else: + self.logger.warn('Setting PANDOC_OPTIONS to [], because PANDOC_OPTIONS is expected to be of type Union[List[str], Dict[str, List[str]]] but this is not: {}'.format(config_options)) + pandoc_options = [] + return pandoc_options + def compile(self, source, dest, is_two_file=True, post=None, lang=None): """Compile the source file into HTML and save as dest.""" makedirs(os.path.dirname(dest)) try: - subprocess.check_call(['pandoc', '-o', dest, source] + self.site.config['PANDOC_OPTIONS']) + subprocess.check_call(['pandoc', '-o', dest, source] + self._get_pandoc_options(source)) with open(dest, 'r', encoding='utf-8-sig') as inf: output, shortcode_deps = self.site.apply_shortcodes(inf.read()) with open(dest, 'w', encoding='utf-8') as outf:
{"golden_diff": "diff --git a/nikola/plugins/compile/pandoc.py b/nikola/plugins/compile/pandoc.py\n--- a/nikola/plugins/compile/pandoc.py\n+++ b/nikola/plugins/compile/pandoc.py\n@@ -33,6 +33,8 @@\n import io\n import os\n import subprocess\n+from typing import List\n+from pathlib import Path\n \n from nikola.plugin_categories import PageCompiler\n from nikola.utils import req_missing, makedirs, write_metadata\n@@ -49,11 +51,30 @@\n self.config_dependencies = [str(site.config['PANDOC_OPTIONS'])]\n super().set_site(site)\n \n+ def _get_pandoc_options(self, source: str) -> List[str]:\n+ \"\"\"Obtain pandoc args from config depending on type and file extensions.\"\"\"\n+ # Union[List[str], Dict[str, List[str]]]\n+ config_options = self.site.config['PANDOC_OPTIONS']\n+ type_ = type(config_options)\n+ if type_ is list:\n+ pandoc_options = config_options\n+ elif type_ is dict:\n+ ext = Path(source).suffix\n+ try:\n+ pandoc_options = config_options[ext]\n+ except KeyError:\n+ self.logger.warn('Setting PANDOC_OPTIONS to [], because extension {} is not defined in PANDOC_OPTIONS: {}.'.format(ext, config_options))\n+ pandoc_options = []\n+ else:\n+ self.logger.warn('Setting PANDOC_OPTIONS to [], because PANDOC_OPTIONS is expected to be of type Union[List[str], Dict[str, List[str]]] but this is not: {}'.format(config_options))\n+ pandoc_options = []\n+ return pandoc_options\n+\n def compile(self, source, dest, is_two_file=True, post=None, lang=None):\n \"\"\"Compile the source file into HTML and save as dest.\"\"\"\n makedirs(os.path.dirname(dest))\n try:\n- subprocess.check_call(['pandoc', '-o', dest, source] + self.site.config['PANDOC_OPTIONS'])\n+ subprocess.check_call(['pandoc', '-o', dest, source] + self._get_pandoc_options(source))\n with open(dest, 'r', encoding='utf-8-sig') as inf:\n output, shortcode_deps = self.site.apply_shortcodes(inf.read())\n with open(dest, 'w', encoding='utf-8') as outf:\n", "issue": "dispatch PANDOC_OPTIONS base on input extensions\n<!--\r\nBefore creating an issue:\r\n* make sure you are using an up-to-date version of Nikola\r\n* search for existing issues that might be related\r\n\r\nDescribe your requested features as precisely as possible. -->\r\n\r\n# Summary\r\n\r\nCurrently, `COMPILERS` only has a key `pandoc`, and `PANDOC_OPTIONS` is set for all the extensions specified there. The proposed feature simply suggests to have configurable dispatch logic to allow multiple extensions calling pandoc with a different set of `PANDOC_OPTIONS`.\r\n\r\n# Example use case\r\n\r\nMy personal use for this would be to convert one of my site currently using `.md` and `.markdown` to mean a different target format. Both will be converted to HTML, but one of them is slideshow, another is normal \"article\"/post.\r\n\r\nAnother possible use case is, if one want to specify markdown extensions in pandoc, say `-t markdown+...-...`, then `COMPILERS['pandoc'] = ['.rst', '.md']` won't work with `PANDOC_OPTIONS = ['-t', 'markdown+...-...']` because that would be wrong input format for `.rst`.\r\n\r\n# Suggested implementation\r\n\r\nProbably the natural way is to promote\r\n\r\n```py\r\nPANDOC_OPTIONS: Union[list, Dict[str, list]]\r\n```\r\n\r\nwhere the keys are those in `COMPILERS['pandoc']`.\r\n\r\nIf it makes sense I can send a PR doing that. It seems that it only requires changes in <https://github.com/getnikola/nikola/blob/master/nikola/plugins/compile/pandoc.py#L52-L56>, is it true?\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2021 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Page compiler plugin for pandoc.\n\nYou will need, of course, to install pandoc\n\"\"\"\n\n\nimport io\nimport os\nimport subprocess\n\nfrom nikola.plugin_categories import PageCompiler\nfrom nikola.utils import req_missing, makedirs, write_metadata\n\n\nclass CompilePandoc(PageCompiler):\n \"\"\"Compile markups into HTML using pandoc.\"\"\"\n\n name = \"pandoc\"\n friendly_name = \"pandoc\"\n\n def set_site(self, site):\n \"\"\"Set Nikola site.\"\"\"\n self.config_dependencies = [str(site.config['PANDOC_OPTIONS'])]\n super().set_site(site)\n\n def compile(self, source, dest, is_two_file=True, post=None, lang=None):\n \"\"\"Compile the source file into HTML and save as dest.\"\"\"\n makedirs(os.path.dirname(dest))\n try:\n subprocess.check_call(['pandoc', '-o', dest, source] + self.site.config['PANDOC_OPTIONS'])\n with open(dest, 'r', encoding='utf-8-sig') as inf:\n output, shortcode_deps = self.site.apply_shortcodes(inf.read())\n with open(dest, 'w', encoding='utf-8') as outf:\n outf.write(output)\n if post is None:\n if shortcode_deps:\n self.logger.error(\n \"Cannot save dependencies for post {0} (post unknown)\",\n source)\n else:\n post._depfile[dest] += shortcode_deps\n except OSError as e:\n if e.strreror == 'No such file or directory':\n req_missing(['pandoc'], 'build this site (compile with pandoc)', python=False)\n\n def compile_string(self, data, source_path=None, is_two_file=True, post=None, lang=None):\n \"\"\"Compile into HTML strings.\"\"\"\n raise ValueError(\"Pandoc compiler does not support compile_string due to multiple output formats\")\n\n def create_post(self, path, **kw):\n \"\"\"Create a new post.\"\"\"\n content = kw.pop('content', None)\n onefile = kw.pop('onefile', False)\n # is_page is not used by create_post as of now.\n kw.pop('is_page', False)\n metadata = {}\n metadata.update(self.default_metadata)\n metadata.update(kw)\n makedirs(os.path.dirname(path))\n if not content.endswith('\\n'):\n content += '\\n'\n with io.open(path, \"w+\", encoding=\"utf8\") as fd:\n if onefile:\n fd.write(write_metadata(metadata, comment_wrap=True, site=self.site, compiler=self))\n fd.write(content)\n", "path": "nikola/plugins/compile/pandoc.py"}]}
1,877
526
gh_patches_debug_3348
rasdani/github-patches
git_diff
Mailu__Mailu-811
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Dovecot service takes long to start ``` docker stack ps mailu ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS yh6u3jrckt32 mailu_redis.1 redis:alpine prod02.usrpro.com Running Running 4 minutes ago otpt6os15sy3 mailu_smtp.1 mailu/postfix:master prod01.usrpro.com Running Running 4 minutes ago artmzgamypoy mailu_webdav.1 mailu/radicale:master prod03.usrpro.com Running Running 4 minutes ago x5ynw6h5ap3p mailu_imap.1 usrpro/dovecot:workaround-lucene prod03.usrpro.com Running Starting 4 minutes ago n674xs649jlw mailu_admin.1 mailu/admin:master prod01.usrpro.com Running Running 4 minutes ago xrmri5cqjzqn mailu_antivirus.1 mailu/clamav:master prod02.usrpro.com Running Running 2 minutes ago koiuyxlj2cpt mailu_antispam.1 mailu/rspamd:master prod03.usrpro.com Running Running 4 minutes ago hdpu4gq182ha mailu_webmail.1 mailu/rainloop:master prod01.usrpro.com Running Running 4 minutes ago wgqkwnz6wyxp mailu_front.1 mailu/nginx:master prod01.usrpro.com Running Running 4 minutes ago v2eznworzs0g mailu_front.2 mailu/nginx:master prod02.usrpro.com Running Running 4 minutes ago nckm82d8aryd mailu_front.3 mailu/nginx:master prod03.usrpro.com Running Running 4 minutes ago ``` In `start.py`, most probably it is stuck on: ``` multiprocessing.Process(target=start_podop).start() ``` Have to confirm with writing some logging, like requested in issue #588 . </issue> <code> [start of core/dovecot/start.py] 1 #!/usr/bin/python3 2 3 import jinja2 4 import os 5 import socket 6 import glob 7 import multiprocessing 8 import tenacity 9 10 from tenacity import retry 11 from podop import run_server 12 13 14 def start_podop(): 15 os.setuid(8) 16 run_server(3 if "DEBUG" in os.environ else 0, "dovecot", "/tmp/podop.socket", [ 17 ("quota", "url", "http://admin/internal/dovecot/Β§"), 18 ("auth", "url", "http://admin/internal/dovecot/Β§"), 19 ("sieve", "url", "http://admin/internal/dovecot/Β§"), 20 ]) 21 22 convert = lambda src, dst: open(dst, "w").write(jinja2.Template(open(src).read()).render(**os.environ)) 23 24 # Actual startup script 25 resolve = retry(socket.gethostbyname, stop=tenacity.stop_after_attempt(100), wait=tenacity.wait_random(min=2, max=5)) 26 os.environ["FRONT_ADDRESS"] = resolve(os.environ.get("FRONT_ADDRESS", "front")) 27 os.environ["REDIS_ADDRESS"] = resolve(os.environ.get("REDIS_ADDRESS", "redis")) 28 if os.environ["WEBMAIL"] != "none": 29 os.environ["WEBMAIL_ADDRESS"] = resolve(os.environ.get("WEBMAIL_ADDRESS", "webmail")) 30 31 for dovecot_file in glob.glob("/conf/*.conf"): 32 convert(dovecot_file, os.path.join("/etc/dovecot", os.path.basename(dovecot_file))) 33 34 # Run Podop, then postfix 35 multiprocessing.Process(target=start_podop).start() 36 os.system("chown -R mail:mail /mail /var/lib/dovecot /conf") 37 os.execv("/usr/sbin/dovecot", ["dovecot", "-c", "/etc/dovecot/dovecot.conf", "-F"]) 38 [end of core/dovecot/start.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/core/dovecot/start.py b/core/dovecot/start.py --- a/core/dovecot/start.py +++ b/core/dovecot/start.py @@ -33,5 +33,6 @@ # Run Podop, then postfix multiprocessing.Process(target=start_podop).start() -os.system("chown -R mail:mail /mail /var/lib/dovecot /conf") +os.system("chown mail:mail /mail") +os.system("chown -R mail:mail /var/lib/dovecot /conf") os.execv("/usr/sbin/dovecot", ["dovecot", "-c", "/etc/dovecot/dovecot.conf", "-F"])
{"golden_diff": "diff --git a/core/dovecot/start.py b/core/dovecot/start.py\n--- a/core/dovecot/start.py\n+++ b/core/dovecot/start.py\n@@ -33,5 +33,6 @@\n \n # Run Podop, then postfix\n multiprocessing.Process(target=start_podop).start()\n-os.system(\"chown -R mail:mail /mail /var/lib/dovecot /conf\")\n+os.system(\"chown mail:mail /mail\")\n+os.system(\"chown -R mail:mail /var/lib/dovecot /conf\")\n os.execv(\"/usr/sbin/dovecot\", [\"dovecot\", \"-c\", \"/etc/dovecot/dovecot.conf\", \"-F\"])\n", "issue": "Dovecot service takes long to start\n```\r\ndocker stack ps mailu\r\nID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS\r\nyh6u3jrckt32 mailu_redis.1 redis:alpine prod02.usrpro.com Running Running 4 minutes ago \r\notpt6os15sy3 mailu_smtp.1 mailu/postfix:master prod01.usrpro.com Running Running 4 minutes ago \r\nartmzgamypoy mailu_webdav.1 mailu/radicale:master prod03.usrpro.com Running Running 4 minutes ago \r\nx5ynw6h5ap3p mailu_imap.1 usrpro/dovecot:workaround-lucene prod03.usrpro.com Running Starting 4 minutes ago \r\nn674xs649jlw mailu_admin.1 mailu/admin:master prod01.usrpro.com Running Running 4 minutes ago \r\nxrmri5cqjzqn mailu_antivirus.1 mailu/clamav:master prod02.usrpro.com Running Running 2 minutes ago \r\nkoiuyxlj2cpt mailu_antispam.1 mailu/rspamd:master prod03.usrpro.com Running Running 4 minutes ago \r\nhdpu4gq182ha mailu_webmail.1 mailu/rainloop:master prod01.usrpro.com Running Running 4 minutes ago \r\nwgqkwnz6wyxp mailu_front.1 mailu/nginx:master prod01.usrpro.com Running Running 4 minutes ago \r\nv2eznworzs0g mailu_front.2 mailu/nginx:master prod02.usrpro.com Running Running 4 minutes ago \r\nnckm82d8aryd mailu_front.3 mailu/nginx:master prod03.usrpro.com Running Running 4 minutes ago \r\n```\r\nIn `start.py`, most probably it is stuck on:\r\n```\r\nmultiprocessing.Process(target=start_podop).start()\r\n```\r\n\r\nHave to confirm with writing some logging, like requested in issue #588 .\n", "before_files": [{"content": "#!/usr/bin/python3\n\nimport jinja2\nimport os\nimport socket\nimport glob\nimport multiprocessing\nimport tenacity\n\nfrom tenacity import retry\nfrom podop import run_server\n\n\ndef start_podop():\n os.setuid(8)\n run_server(3 if \"DEBUG\" in os.environ else 0, \"dovecot\", \"/tmp/podop.socket\", [\n\t\t(\"quota\", \"url\", \"http://admin/internal/dovecot/\u00a7\"),\n\t\t(\"auth\", \"url\", \"http://admin/internal/dovecot/\u00a7\"),\n\t\t(\"sieve\", \"url\", \"http://admin/internal/dovecot/\u00a7\"),\n ])\n\nconvert = lambda src, dst: open(dst, \"w\").write(jinja2.Template(open(src).read()).render(**os.environ))\n\n# Actual startup script\nresolve = retry(socket.gethostbyname, stop=tenacity.stop_after_attempt(100), wait=tenacity.wait_random(min=2, max=5))\nos.environ[\"FRONT_ADDRESS\"] = resolve(os.environ.get(\"FRONT_ADDRESS\", \"front\"))\nos.environ[\"REDIS_ADDRESS\"] = resolve(os.environ.get(\"REDIS_ADDRESS\", \"redis\"))\nif os.environ[\"WEBMAIL\"] != \"none\":\n os.environ[\"WEBMAIL_ADDRESS\"] = resolve(os.environ.get(\"WEBMAIL_ADDRESS\", \"webmail\"))\n\nfor dovecot_file in glob.glob(\"/conf/*.conf\"):\n convert(dovecot_file, os.path.join(\"/etc/dovecot\", os.path.basename(dovecot_file)))\n\n# Run Podop, then postfix\nmultiprocessing.Process(target=start_podop).start()\nos.system(\"chown -R mail:mail /mail /var/lib/dovecot /conf\")\nos.execv(\"/usr/sbin/dovecot\", [\"dovecot\", \"-c\", \"/etc/dovecot/dovecot.conf\", \"-F\"])\n", "path": "core/dovecot/start.py"}]}
1,513
150
gh_patches_debug_6841
rasdani/github-patches
git_diff
activeloopai__deeplake-1738
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [FEATURE]Do not overwrite python logging configuration ## 🚨🚨 Feature Request Be able to use both `logging` and `hub` in the same python script ### Current behavior I found the following code has undesired behavior: when adding the line `import hub`, the logging configuration will be overwritten. ```python import logging import hub # this will overwrite the logging config logging.basicConfig(filename = "sample.log", level = logging.DEBUG, format = '%(asctime)s: %(filename)s: %(levelname)s: %(message)s') logger = logging.getLogger(__name__) logger.debug("this is a debug message") logger.info("this is an info message") logger.warning("this is a warning message") logger.error("this is an error message") ``` </issue> <code> [start of hub/client/log.py] 1 """ 2 License: 3 This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. 4 If a copy of the MPL was not distributed with this file, You can obtain one at https://mozilla.org/MPL/2.0/. 5 """ 6 7 import logging 8 import sys 9 10 logger = logging.getLogger("hub") 11 12 13 def configure_logger(debug=0): 14 log_level = logging.DEBUG if debug == 1 else logging.INFO 15 logging.basicConfig(format="%(message)s", level=log_level, stream=sys.stdout) 16 17 18 configure_logger(0) 19 [end of hub/client/log.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/hub/client/log.py b/hub/client/log.py --- a/hub/client/log.py +++ b/hub/client/log.py @@ -12,7 +12,17 @@ def configure_logger(debug=0): log_level = logging.DEBUG if debug == 1 else logging.INFO - logging.basicConfig(format="%(message)s", level=log_level, stream=sys.stdout) + logger.setLevel(log_level) + + stream_handler = logging.StreamHandler(stream=sys.stdout) + stream_handler.setLevel(log_level) + + formatter = logging.Formatter("%(message)s") + + stream_handler.setFormatter(formatter) + + logger.addHandler(stream_handler) + logger.propagate = False configure_logger(0)
{"golden_diff": "diff --git a/hub/client/log.py b/hub/client/log.py\n--- a/hub/client/log.py\n+++ b/hub/client/log.py\n@@ -12,7 +12,17 @@\n \n def configure_logger(debug=0):\n log_level = logging.DEBUG if debug == 1 else logging.INFO\n- logging.basicConfig(format=\"%(message)s\", level=log_level, stream=sys.stdout)\n+ logger.setLevel(log_level)\n+\n+ stream_handler = logging.StreamHandler(stream=sys.stdout)\n+ stream_handler.setLevel(log_level)\n+\n+ formatter = logging.Formatter(\"%(message)s\")\n+\n+ stream_handler.setFormatter(formatter)\n+\n+ logger.addHandler(stream_handler)\n+ logger.propagate = False\n \n \n configure_logger(0)\n", "issue": "[FEATURE]Do not overwrite python logging configuration\n## \ud83d\udea8\ud83d\udea8 Feature Request\r\n\r\nBe able to use both `logging` and `hub` in the same python script\r\n\r\n### Current behavior\r\n\r\nI found the following code has undesired behavior: when adding the line `import hub`, the logging configuration will be overwritten.\r\n\r\n```python\r\nimport logging\r\nimport hub # this will overwrite the logging config\r\n\r\nlogging.basicConfig(filename = \"sample.log\", level = logging.DEBUG,\r\n format = '%(asctime)s: %(filename)s: %(levelname)s: %(message)s')\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\nlogger.debug(\"this is a debug message\")\r\nlogger.info(\"this is an info message\")\r\nlogger.warning(\"this is a warning message\")\r\nlogger.error(\"this is an error message\")\r\n\r\n```\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nLicense:\nThis Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.\nIf a copy of the MPL was not distributed with this file, You can obtain one at https://mozilla.org/MPL/2.0/.\n\"\"\"\n\nimport logging\nimport sys\n\nlogger = logging.getLogger(\"hub\")\n\n\ndef configure_logger(debug=0):\n log_level = logging.DEBUG if debug == 1 else logging.INFO\n logging.basicConfig(format=\"%(message)s\", level=log_level, stream=sys.stdout)\n\n\nconfigure_logger(0)\n", "path": "hub/client/log.py"}]}
842
157
gh_patches_debug_24390
rasdani/github-patches
git_diff
pulp__pulpcore-4188
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Reclaim space for repository fails with Cannot delete some instances of model 'Artifact' because they are referenced through protected foreign keys: 'ContentArtifact.artifact'." **Version** 3.16, but probably all versions **Describe the bug** - Reclaim space for repository fails with the following error. ~~~ Task paused with error: "("Cannot delete some instances of model 'Artifact' because they are referenced through protected foreign keys: 'ContentArtifact.artifact'.", {<ContentArtifact: pk=452959ad-c045-4e85-bf9f-6651ba37f57d>})" ~~~ **To Reproduce** See BZ **Additional context** https://bugzilla.redhat.com/show_bug.cgi?id=2169322 </issue> <code> [start of pulpcore/app/tasks/reclaim_space.py] 1 from pulpcore.app.models import ( 2 Artifact, 3 Content, 4 ContentArtifact, 5 ProgressReport, 6 PublishedMetadata, 7 Repository, 8 RepositoryVersion, 9 ) 10 from pulpcore.app.util import get_domain 11 12 13 def reclaim_space(repo_pks, keeplist_rv_pks=None, force=False): 14 """ 15 This task frees-up disk space by removing Artifact files from the filesystem for Content 16 exclusive to the list of provided repos. 17 18 Note: content marked as `proctected` will be excluded from the reclaim disk space. 19 20 Kwargs: 21 repo_pks (list): A list of repo pks the disk reclaim space is performed on. 22 keeplist_rv_pks (list): A list of repo version pks that will be excluded from the reclaim 23 disk space. 24 force (bool): If True, uploaded content will be taken into account. 25 26 """ 27 reclaimed_repos = Repository.objects.filter(pk__in=repo_pks) 28 for repo in reclaimed_repos: 29 repo.invalidate_cache(everything=True) 30 31 domain = get_domain() 32 rest_of_repos = Repository.objects.filter(pulp_domain=domain).exclude(pk__in=repo_pks) 33 c_keep_qs = Content.objects.filter(repositories__in=rest_of_repos) 34 c_reclaim_qs = Content.objects.filter(repositories__in=repo_pks) 35 c_reclaim_qs = c_reclaim_qs.exclude( 36 pk__in=c_keep_qs, pulp_type=PublishedMetadata.get_pulp_type() 37 ) 38 39 if keeplist_rv_pks: 40 rv_qs = RepositoryVersion.objects.filter(pk__in=keeplist_rv_pks) 41 rv_content = Content.objects.none() 42 for rv in rv_qs.iterator(): 43 rv_content |= rv.content 44 c_reclaim_qs = c_reclaim_qs.exclude(pk__in=rv_content) 45 46 content_distinct = c_reclaim_qs.distinct("pulp_type") 47 unprotected = [] 48 for content in content_distinct: 49 if not content.cast().PROTECTED_FROM_RECLAIM: 50 unprotected.append(content.pulp_type) 51 52 ca_qs = ContentArtifact.objects.select_related("content", "artifact").filter( 53 content__in=c_reclaim_qs.values("pk"), artifact__isnull=False 54 ) 55 if not force: 56 ca_qs = ca_qs.filter(remoteartifact__isnull=False) 57 artifact_pks = set() 58 ca_to_update = [] 59 for ca in ca_qs.iterator(): 60 if ca.content.pulp_type in unprotected: 61 artifact_pks.add(ca.artifact.pk) 62 ca.artifact = None 63 ca_to_update.append(ca) 64 65 ContentArtifact.objects.bulk_update(objs=ca_to_update, fields=["artifact"], batch_size=1000) 66 artifacts_to_delete = Artifact.objects.filter(pk__in=artifact_pks) 67 progress_bar = ProgressReport( 68 message="Reclaim disk space", 69 total=artifacts_to_delete.count(), 70 code="reclaim-space.artifact", 71 done=0, 72 state="running", 73 ) 74 progress_bar.save() 75 76 counter = 0 77 interval = 100 78 for artifact in artifacts_to_delete.iterator(): 79 # we need to manually call delete() because it cleans up the file on the filesystem 80 artifact.delete() 81 progress_bar.done += 1 82 counter += 1 83 84 if counter >= interval: 85 progress_bar.save() 86 counter = 0 87 88 progress_bar.state = "completed" 89 progress_bar.save() 90 [end of pulpcore/app/tasks/reclaim_space.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pulpcore/app/tasks/reclaim_space.py b/pulpcore/app/tasks/reclaim_space.py --- a/pulpcore/app/tasks/reclaim_space.py +++ b/pulpcore/app/tasks/reclaim_space.py @@ -1,3 +1,7 @@ +from logging import getLogger + +from django.db.models.deletion import ProtectedError + from pulpcore.app.models import ( Artifact, Content, @@ -9,6 +13,8 @@ ) from pulpcore.app.util import get_domain +log = getLogger(__name__) + def reclaim_space(repo_pks, keeplist_rv_pks=None, force=False): """ @@ -76,10 +82,16 @@ counter = 0 interval = 100 for artifact in artifacts_to_delete.iterator(): - # we need to manually call delete() because it cleans up the file on the filesystem - artifact.delete() - progress_bar.done += 1 - counter += 1 + try: + # we need to manually call delete() because it cleans up the file on the filesystem + artifact.delete() + except ProtectedError as e: + # Rarely artifact could be shared between to different content units. + # Just log and skip the artifact deletion in this case + log.info(e) + else: + progress_bar.done += 1 + counter += 1 if counter >= interval: progress_bar.save()
{"golden_diff": "diff --git a/pulpcore/app/tasks/reclaim_space.py b/pulpcore/app/tasks/reclaim_space.py\n--- a/pulpcore/app/tasks/reclaim_space.py\n+++ b/pulpcore/app/tasks/reclaim_space.py\n@@ -1,3 +1,7 @@\n+from logging import getLogger\n+\n+from django.db.models.deletion import ProtectedError\n+\n from pulpcore.app.models import (\n Artifact,\n Content,\n@@ -9,6 +13,8 @@\n )\n from pulpcore.app.util import get_domain\n \n+log = getLogger(__name__)\n+\n \n def reclaim_space(repo_pks, keeplist_rv_pks=None, force=False):\n \"\"\"\n@@ -76,10 +82,16 @@\n counter = 0\n interval = 100\n for artifact in artifacts_to_delete.iterator():\n- # we need to manually call delete() because it cleans up the file on the filesystem\n- artifact.delete()\n- progress_bar.done += 1\n- counter += 1\n+ try:\n+ # we need to manually call delete() because it cleans up the file on the filesystem\n+ artifact.delete()\n+ except ProtectedError as e:\n+ # Rarely artifact could be shared between to different content units.\n+ # Just log and skip the artifact deletion in this case\n+ log.info(e)\n+ else:\n+ progress_bar.done += 1\n+ counter += 1\n \n if counter >= interval:\n progress_bar.save()\n", "issue": "Reclaim space for repository fails with Cannot delete some instances of model 'Artifact' because they are referenced through protected foreign keys: 'ContentArtifact.artifact'.\"\n**Version**\r\n3.16, but probably all versions\r\n\r\n**Describe the bug**\r\n\r\n- Reclaim space for repository fails with the following error.\r\n\r\n ~~~\r\n Task paused with error: \"(\"Cannot delete some instances of model 'Artifact' because they are referenced through protected foreign keys: 'ContentArtifact.artifact'.\", {<ContentArtifact: pk=452959ad-c045-4e85-bf9f-6651ba37f57d>})\"\r\n ~~~\r\n\r\n**To Reproduce**\r\nSee BZ\r\n\r\n**Additional context**\r\nhttps://bugzilla.redhat.com/show_bug.cgi?id=2169322\r\n\n", "before_files": [{"content": "from pulpcore.app.models import (\n Artifact,\n Content,\n ContentArtifact,\n ProgressReport,\n PublishedMetadata,\n Repository,\n RepositoryVersion,\n)\nfrom pulpcore.app.util import get_domain\n\n\ndef reclaim_space(repo_pks, keeplist_rv_pks=None, force=False):\n \"\"\"\n This task frees-up disk space by removing Artifact files from the filesystem for Content\n exclusive to the list of provided repos.\n\n Note: content marked as `proctected` will be excluded from the reclaim disk space.\n\n Kwargs:\n repo_pks (list): A list of repo pks the disk reclaim space is performed on.\n keeplist_rv_pks (list): A list of repo version pks that will be excluded from the reclaim\n disk space.\n force (bool): If True, uploaded content will be taken into account.\n\n \"\"\"\n reclaimed_repos = Repository.objects.filter(pk__in=repo_pks)\n for repo in reclaimed_repos:\n repo.invalidate_cache(everything=True)\n\n domain = get_domain()\n rest_of_repos = Repository.objects.filter(pulp_domain=domain).exclude(pk__in=repo_pks)\n c_keep_qs = Content.objects.filter(repositories__in=rest_of_repos)\n c_reclaim_qs = Content.objects.filter(repositories__in=repo_pks)\n c_reclaim_qs = c_reclaim_qs.exclude(\n pk__in=c_keep_qs, pulp_type=PublishedMetadata.get_pulp_type()\n )\n\n if keeplist_rv_pks:\n rv_qs = RepositoryVersion.objects.filter(pk__in=keeplist_rv_pks)\n rv_content = Content.objects.none()\n for rv in rv_qs.iterator():\n rv_content |= rv.content\n c_reclaim_qs = c_reclaim_qs.exclude(pk__in=rv_content)\n\n content_distinct = c_reclaim_qs.distinct(\"pulp_type\")\n unprotected = []\n for content in content_distinct:\n if not content.cast().PROTECTED_FROM_RECLAIM:\n unprotected.append(content.pulp_type)\n\n ca_qs = ContentArtifact.objects.select_related(\"content\", \"artifact\").filter(\n content__in=c_reclaim_qs.values(\"pk\"), artifact__isnull=False\n )\n if not force:\n ca_qs = ca_qs.filter(remoteartifact__isnull=False)\n artifact_pks = set()\n ca_to_update = []\n for ca in ca_qs.iterator():\n if ca.content.pulp_type in unprotected:\n artifact_pks.add(ca.artifact.pk)\n ca.artifact = None\n ca_to_update.append(ca)\n\n ContentArtifact.objects.bulk_update(objs=ca_to_update, fields=[\"artifact\"], batch_size=1000)\n artifacts_to_delete = Artifact.objects.filter(pk__in=artifact_pks)\n progress_bar = ProgressReport(\n message=\"Reclaim disk space\",\n total=artifacts_to_delete.count(),\n code=\"reclaim-space.artifact\",\n done=0,\n state=\"running\",\n )\n progress_bar.save()\n\n counter = 0\n interval = 100\n for artifact in artifacts_to_delete.iterator():\n # we need to manually call delete() because it cleans up the file on the filesystem\n artifact.delete()\n progress_bar.done += 1\n counter += 1\n\n if counter >= interval:\n progress_bar.save()\n counter = 0\n\n progress_bar.state = \"completed\"\n progress_bar.save()\n", "path": "pulpcore/app/tasks/reclaim_space.py"}]}
1,627
322
gh_patches_debug_42862
rasdani/github-patches
git_diff
Textualize__textual-3830
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Make Title of a Collapsible Widget Editable Can not update title property of the Collapsible widget , looking at CollapsibleTitle class it seems to only set at init Looks like this was an intentional choice ? But I think it would be helpful to make the title property editable , Eg: changing content in the collapsed state and don't want to toggle it open to get a quick status update, so write something in .title... ~~~ from textual.app import App, ComposeResult from textual.containers import Container, Horizontal, VerticalScroll from textual.widgets import Header, Footer, Collapsible, Placeholder class AnApp(App): TITLE = "Collapsible Titles Should Update" BINDINGS = [("d", "toggle_dark", "Update"), ("c", "collapse_or_expand(True)", "Collapse"), ("e", "collapse_or_expand(False)", "Expand")] def action_collapse_or_expand(self, collapse: bool) -> None: for child in self.walk_children(Collapsible): child.collapsed = collapse def compose(self) -> ComposeResult: """Create child widgets for the app.""" yield Header(show_clock=True) yield VerticalScroll( Collapsible( Horizontal( Placeholder(variant="text"), Placeholder(variant="size"), Placeholder(variant="text"), ), title="Top_Stuff",id="top" ) ) yield Footer() def action_toggle_dark(self) -> None: """An action to toggle dark mode.""" top_collapsible_widget = self.query_one("#top") top_collapsible_widget.title = "UPDATED_Top_Stuff" self.app.title = "UPDATED_App_Title" self.dark = not self.dark if __name__ == "__main__": app = AnApp() app.run() ~~~ </issue> <code> [start of src/textual/widgets/_collapsible.py] 1 from __future__ import annotations 2 3 from rich.console import RenderableType 4 from rich.text import Text 5 6 from .. import events 7 from ..app import ComposeResult 8 from ..binding import Binding 9 from ..containers import Container 10 from ..css.query import NoMatches 11 from ..message import Message 12 from ..reactive import reactive 13 from ..widget import Widget 14 15 __all__ = ["Collapsible", "CollapsibleTitle"] 16 17 18 class CollapsibleTitle(Widget, can_focus=True): 19 """Title and symbol for the Collapsible.""" 20 21 DEFAULT_CSS = """ 22 CollapsibleTitle { 23 width: auto; 24 height: auto; 25 padding: 0 1 0 1; 26 } 27 28 CollapsibleTitle:hover { 29 background: $foreground 10%; 30 color: $text; 31 } 32 33 CollapsibleTitle:focus { 34 background: $accent; 35 color: $text; 36 } 37 """ 38 39 BINDINGS = [Binding("enter", "toggle", "Toggle collapsible", show=False)] 40 """ 41 | Key(s) | Description | 42 | :- | :- | 43 | enter | Toggle the collapsible. | 44 """ 45 46 collapsed = reactive(True) 47 48 def __init__( 49 self, 50 *, 51 label: str, 52 collapsed_symbol: str, 53 expanded_symbol: str, 54 collapsed: bool, 55 ) -> None: 56 super().__init__() 57 self.collapsed_symbol = collapsed_symbol 58 self.expanded_symbol = expanded_symbol 59 self.label = label 60 self.collapse = collapsed 61 62 class Toggle(Message): 63 """Request toggle.""" 64 65 async def _on_click(self, event: events.Click) -> None: 66 """Inform ancestor we want to toggle.""" 67 event.stop() 68 self.post_message(self.Toggle()) 69 70 def action_toggle(self) -> None: 71 """Toggle the state of the parent collapsible.""" 72 self.post_message(self.Toggle()) 73 74 def render(self) -> RenderableType: 75 """Compose right/down arrow and label.""" 76 if self.collapsed: 77 return Text(f"{self.collapsed_symbol} {self.label}") 78 else: 79 return Text(f"{self.expanded_symbol} {self.label}") 80 81 82 class Collapsible(Widget): 83 """A collapsible container.""" 84 85 collapsed = reactive(True) 86 87 DEFAULT_CSS = """ 88 Collapsible { 89 width: 1fr; 90 height: auto; 91 background: $boost; 92 border-top: hkey $background; 93 padding-bottom: 1; 94 padding-left: 1; 95 } 96 97 Collapsible.-collapsed > Contents { 98 display: none; 99 } 100 """ 101 102 class Toggled(Message): 103 """Parent class subclassed by `Collapsible` messages. 104 105 Can be handled with `on(Collapsible.Toggled)` if you want to handle expansions 106 and collapsed in the same way, or you can handle the specific events individually. 107 """ 108 109 def __init__(self, collapsible: Collapsible) -> None: 110 """Create an instance of the message. 111 112 Args: 113 collapsible: The `Collapsible` widget that was toggled. 114 """ 115 self.collapsible: Collapsible = collapsible 116 """The collapsible that was toggled.""" 117 super().__init__() 118 119 @property 120 def control(self) -> Collapsible: 121 """An alias for [Toggled.collapsible][textual.widgets.Collapsible.Toggled.collapsible].""" 122 return self.collapsible 123 124 class Expanded(Toggled): 125 """Event sent when the `Collapsible` widget is expanded. 126 127 Can be handled using `on_collapsible_expanded` in a subclass of 128 [`Collapsible`][textual.widgets.Collapsible] or in a parent widget in the DOM. 129 """ 130 131 class Collapsed(Toggled): 132 """Event sent when the `Collapsible` widget is collapsed. 133 134 Can be handled using `on_collapsible_collapsed` in a subclass of 135 [`Collapsible`][textual.widgets.Collapsible] or in a parent widget in the DOM. 136 """ 137 138 class Contents(Container): 139 DEFAULT_CSS = """ 140 Contents { 141 width: 100%; 142 height: auto; 143 padding: 1 0 0 3; 144 } 145 """ 146 147 def __init__( 148 self, 149 *children: Widget, 150 title: str = "Toggle", 151 collapsed: bool = True, 152 collapsed_symbol: str = "β–Ά", 153 expanded_symbol: str = "β–Ό", 154 name: str | None = None, 155 id: str | None = None, 156 classes: str | None = None, 157 disabled: bool = False, 158 ) -> None: 159 """Initialize a Collapsible widget. 160 161 Args: 162 *children: Contents that will be collapsed/expanded. 163 title: Title of the collapsed/expanded contents. 164 collapsed: Default status of the contents. 165 collapsed_symbol: Collapsed symbol before the title. 166 expanded_symbol: Expanded symbol before the title. 167 name: The name of the collapsible. 168 id: The ID of the collapsible in the DOM. 169 classes: The CSS classes of the collapsible. 170 disabled: Whether the collapsible is disabled or not. 171 """ 172 self._title = CollapsibleTitle( 173 label=title, 174 collapsed_symbol=collapsed_symbol, 175 expanded_symbol=expanded_symbol, 176 collapsed=collapsed, 177 ) 178 self._contents_list: list[Widget] = list(children) 179 super().__init__(name=name, id=id, classes=classes, disabled=disabled) 180 self.collapsed = collapsed 181 182 def _on_collapsible_title_toggle(self, event: CollapsibleTitle.Toggle) -> None: 183 event.stop() 184 self.collapsed = not self.collapsed 185 if self.collapsed: 186 self.post_message(self.Collapsed(self)) 187 else: 188 self.post_message(self.Expanded(self)) 189 190 def _watch_collapsed(self, collapsed: bool) -> None: 191 """Update collapsed state when reactive is changed.""" 192 self._update_collapsed(collapsed) 193 194 def _update_collapsed(self, collapsed: bool) -> None: 195 """Update children to match collapsed state.""" 196 try: 197 self._title.collapsed = collapsed 198 self.set_class(collapsed, "-collapsed") 199 except NoMatches: 200 pass 201 202 def _on_mount(self, event: events.Mount) -> None: 203 """Initialise collapsed state.""" 204 self._update_collapsed(self.collapsed) 205 206 def compose(self) -> ComposeResult: 207 yield self._title 208 yield self.Contents(*self._contents_list) 209 210 def compose_add_child(self, widget: Widget) -> None: 211 """When using the context manager compose syntax, we want to attach nodes to the contents. 212 213 Args: 214 widget: A Widget to add. 215 """ 216 self._contents_list.append(widget) 217 [end of src/textual/widgets/_collapsible.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/textual/widgets/_collapsible.py b/src/textual/widgets/_collapsible.py --- a/src/textual/widgets/_collapsible.py +++ b/src/textual/widgets/_collapsible.py @@ -1,8 +1,5 @@ from __future__ import annotations -from rich.console import RenderableType -from rich.text import Text - from .. import events from ..app import ComposeResult from ..binding import Binding @@ -11,11 +8,12 @@ from ..message import Message from ..reactive import reactive from ..widget import Widget +from ..widgets import Static __all__ = ["Collapsible", "CollapsibleTitle"] -class CollapsibleTitle(Widget, can_focus=True): +class CollapsibleTitle(Static, can_focus=True): """Title and symbol for the Collapsible.""" DEFAULT_CSS = """ @@ -44,6 +42,7 @@ """ collapsed = reactive(True) + label = reactive("Toggle") def __init__( self, @@ -57,7 +56,9 @@ self.collapsed_symbol = collapsed_symbol self.expanded_symbol = expanded_symbol self.label = label - self.collapse = collapsed + self.collapsed = collapsed + self._collapsed_label = f"{collapsed_symbol} {label}" + self._expanded_label = f"{expanded_symbol} {label}" class Toggle(Message): """Request toggle.""" @@ -71,18 +72,26 @@ """Toggle the state of the parent collapsible.""" self.post_message(self.Toggle()) - def render(self) -> RenderableType: - """Compose right/down arrow and label.""" + def _watch_label(self, label: str) -> None: + self._collapsed_label = f"{self.collapsed_symbol} {label}" + self._expanded_label = f"{self.expanded_symbol} {label}" if self.collapsed: - return Text(f"{self.collapsed_symbol} {self.label}") + self.update(self._collapsed_label) + else: + self.update(self._expanded_label) + + def _watch_collapsed(self, collapsed: bool) -> None: + if collapsed: + self.update(self._collapsed_label) else: - return Text(f"{self.expanded_symbol} {self.label}") + self.update(self._expanded_label) class Collapsible(Widget): """A collapsible container.""" collapsed = reactive(True) + title = reactive("Toggle") DEFAULT_CSS = """ Collapsible { @@ -169,14 +178,15 @@ classes: The CSS classes of the collapsible. disabled: Whether the collapsible is disabled or not. """ + super().__init__(name=name, id=id, classes=classes, disabled=disabled) self._title = CollapsibleTitle( label=title, collapsed_symbol=collapsed_symbol, expanded_symbol=expanded_symbol, collapsed=collapsed, ) + self.title = title self._contents_list: list[Widget] = list(children) - super().__init__(name=name, id=id, classes=classes, disabled=disabled) self.collapsed = collapsed def _on_collapsible_title_toggle(self, event: CollapsibleTitle.Toggle) -> None: @@ -214,3 +224,6 @@ widget: A Widget to add. """ self._contents_list.append(widget) + + def _watch_title(self, title: str) -> None: + self._title.label = title
{"golden_diff": "diff --git a/src/textual/widgets/_collapsible.py b/src/textual/widgets/_collapsible.py\n--- a/src/textual/widgets/_collapsible.py\n+++ b/src/textual/widgets/_collapsible.py\n@@ -1,8 +1,5 @@\n from __future__ import annotations\n \n-from rich.console import RenderableType\n-from rich.text import Text\n-\n from .. import events\n from ..app import ComposeResult\n from ..binding import Binding\n@@ -11,11 +8,12 @@\n from ..message import Message\n from ..reactive import reactive\n from ..widget import Widget\n+from ..widgets import Static\n \n __all__ = [\"Collapsible\", \"CollapsibleTitle\"]\n \n \n-class CollapsibleTitle(Widget, can_focus=True):\n+class CollapsibleTitle(Static, can_focus=True):\n \"\"\"Title and symbol for the Collapsible.\"\"\"\n \n DEFAULT_CSS = \"\"\"\n@@ -44,6 +42,7 @@\n \"\"\"\n \n collapsed = reactive(True)\n+ label = reactive(\"Toggle\")\n \n def __init__(\n self,\n@@ -57,7 +56,9 @@\n self.collapsed_symbol = collapsed_symbol\n self.expanded_symbol = expanded_symbol\n self.label = label\n- self.collapse = collapsed\n+ self.collapsed = collapsed\n+ self._collapsed_label = f\"{collapsed_symbol} {label}\"\n+ self._expanded_label = f\"{expanded_symbol} {label}\"\n \n class Toggle(Message):\n \"\"\"Request toggle.\"\"\"\n@@ -71,18 +72,26 @@\n \"\"\"Toggle the state of the parent collapsible.\"\"\"\n self.post_message(self.Toggle())\n \n- def render(self) -> RenderableType:\n- \"\"\"Compose right/down arrow and label.\"\"\"\n+ def _watch_label(self, label: str) -> None:\n+ self._collapsed_label = f\"{self.collapsed_symbol} {label}\"\n+ self._expanded_label = f\"{self.expanded_symbol} {label}\"\n if self.collapsed:\n- return Text(f\"{self.collapsed_symbol} {self.label}\")\n+ self.update(self._collapsed_label)\n+ else:\n+ self.update(self._expanded_label)\n+\n+ def _watch_collapsed(self, collapsed: bool) -> None:\n+ if collapsed:\n+ self.update(self._collapsed_label)\n else:\n- return Text(f\"{self.expanded_symbol} {self.label}\")\n+ self.update(self._expanded_label)\n \n \n class Collapsible(Widget):\n \"\"\"A collapsible container.\"\"\"\n \n collapsed = reactive(True)\n+ title = reactive(\"Toggle\")\n \n DEFAULT_CSS = \"\"\"\n Collapsible {\n@@ -169,14 +178,15 @@\n classes: The CSS classes of the collapsible.\n disabled: Whether the collapsible is disabled or not.\n \"\"\"\n+ super().__init__(name=name, id=id, classes=classes, disabled=disabled)\n self._title = CollapsibleTitle(\n label=title,\n collapsed_symbol=collapsed_symbol,\n expanded_symbol=expanded_symbol,\n collapsed=collapsed,\n )\n+ self.title = title\n self._contents_list: list[Widget] = list(children)\n- super().__init__(name=name, id=id, classes=classes, disabled=disabled)\n self.collapsed = collapsed\n \n def _on_collapsible_title_toggle(self, event: CollapsibleTitle.Toggle) -> None:\n@@ -214,3 +224,6 @@\n widget: A Widget to add.\n \"\"\"\n self._contents_list.append(widget)\n+\n+ def _watch_title(self, title: str) -> None:\n+ self._title.label = title\n", "issue": "Make Title of a Collapsible Widget Editable\nCan not update title property of the Collapsible widget , looking at CollapsibleTitle class it seems to only set at init\r\nLooks like this was an intentional choice ?\r\nBut I think it would be helpful to make the title property editable ,\r\nEg: changing content in the collapsed state and don't want to toggle it open to get a quick status update, so write something in .title... \r\n\r\n~~~\r\nfrom textual.app import App, ComposeResult\r\nfrom textual.containers import Container, Horizontal, VerticalScroll\r\nfrom textual.widgets import Header, Footer, Collapsible, Placeholder\r\n\r\nclass AnApp(App):\r\n\r\n TITLE = \"Collapsible Titles Should Update\"\r\n\r\n BINDINGS = [(\"d\", \"toggle_dark\", \"Update\"),\r\n (\"c\", \"collapse_or_expand(True)\", \"Collapse\"),\r\n (\"e\", \"collapse_or_expand(False)\", \"Expand\")]\r\n \r\n def action_collapse_or_expand(self, collapse: bool) -> None:\r\n for child in self.walk_children(Collapsible):\r\n child.collapsed = collapse\r\n\r\n def compose(self) -> ComposeResult:\r\n \"\"\"Create child widgets for the app.\"\"\"\r\n yield Header(show_clock=True)\r\n yield VerticalScroll(\r\n Collapsible(\r\n Horizontal(\r\n Placeholder(variant=\"text\"),\r\n Placeholder(variant=\"size\"),\r\n Placeholder(variant=\"text\"),\r\n ),\r\n title=\"Top_Stuff\",id=\"top\"\r\n )\r\n )\r\n yield Footer()\r\n\r\n def action_toggle_dark(self) -> None:\r\n \"\"\"An action to toggle dark mode.\"\"\"\r\n top_collapsible_widget = self.query_one(\"#top\")\r\n top_collapsible_widget.title = \"UPDATED_Top_Stuff\"\r\n self.app.title = \"UPDATED_App_Title\"\r\n self.dark = not self.dark\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app = AnApp()\r\n app.run()\r\n~~~\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom rich.console import RenderableType\nfrom rich.text import Text\n\nfrom .. import events\nfrom ..app import ComposeResult\nfrom ..binding import Binding\nfrom ..containers import Container\nfrom ..css.query import NoMatches\nfrom ..message import Message\nfrom ..reactive import reactive\nfrom ..widget import Widget\n\n__all__ = [\"Collapsible\", \"CollapsibleTitle\"]\n\n\nclass CollapsibleTitle(Widget, can_focus=True):\n \"\"\"Title and symbol for the Collapsible.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n CollapsibleTitle {\n width: auto;\n height: auto;\n padding: 0 1 0 1;\n }\n\n CollapsibleTitle:hover {\n background: $foreground 10%;\n color: $text;\n }\n\n CollapsibleTitle:focus {\n background: $accent;\n color: $text;\n }\n \"\"\"\n\n BINDINGS = [Binding(\"enter\", \"toggle\", \"Toggle collapsible\", show=False)]\n \"\"\"\n | Key(s) | Description |\n | :- | :- |\n | enter | Toggle the collapsible. |\n \"\"\"\n\n collapsed = reactive(True)\n\n def __init__(\n self,\n *,\n label: str,\n collapsed_symbol: str,\n expanded_symbol: str,\n collapsed: bool,\n ) -> None:\n super().__init__()\n self.collapsed_symbol = collapsed_symbol\n self.expanded_symbol = expanded_symbol\n self.label = label\n self.collapse = collapsed\n\n class Toggle(Message):\n \"\"\"Request toggle.\"\"\"\n\n async def _on_click(self, event: events.Click) -> None:\n \"\"\"Inform ancestor we want to toggle.\"\"\"\n event.stop()\n self.post_message(self.Toggle())\n\n def action_toggle(self) -> None:\n \"\"\"Toggle the state of the parent collapsible.\"\"\"\n self.post_message(self.Toggle())\n\n def render(self) -> RenderableType:\n \"\"\"Compose right/down arrow and label.\"\"\"\n if self.collapsed:\n return Text(f\"{self.collapsed_symbol} {self.label}\")\n else:\n return Text(f\"{self.expanded_symbol} {self.label}\")\n\n\nclass Collapsible(Widget):\n \"\"\"A collapsible container.\"\"\"\n\n collapsed = reactive(True)\n\n DEFAULT_CSS = \"\"\"\n Collapsible {\n width: 1fr;\n height: auto;\n background: $boost;\n border-top: hkey $background;\n padding-bottom: 1;\n padding-left: 1;\n }\n\n Collapsible.-collapsed > Contents {\n display: none;\n }\n \"\"\"\n\n class Toggled(Message):\n \"\"\"Parent class subclassed by `Collapsible` messages.\n\n Can be handled with `on(Collapsible.Toggled)` if you want to handle expansions\n and collapsed in the same way, or you can handle the specific events individually.\n \"\"\"\n\n def __init__(self, collapsible: Collapsible) -> None:\n \"\"\"Create an instance of the message.\n\n Args:\n collapsible: The `Collapsible` widget that was toggled.\n \"\"\"\n self.collapsible: Collapsible = collapsible\n \"\"\"The collapsible that was toggled.\"\"\"\n super().__init__()\n\n @property\n def control(self) -> Collapsible:\n \"\"\"An alias for [Toggled.collapsible][textual.widgets.Collapsible.Toggled.collapsible].\"\"\"\n return self.collapsible\n\n class Expanded(Toggled):\n \"\"\"Event sent when the `Collapsible` widget is expanded.\n\n Can be handled using `on_collapsible_expanded` in a subclass of\n [`Collapsible`][textual.widgets.Collapsible] or in a parent widget in the DOM.\n \"\"\"\n\n class Collapsed(Toggled):\n \"\"\"Event sent when the `Collapsible` widget is collapsed.\n\n Can be handled using `on_collapsible_collapsed` in a subclass of\n [`Collapsible`][textual.widgets.Collapsible] or in a parent widget in the DOM.\n \"\"\"\n\n class Contents(Container):\n DEFAULT_CSS = \"\"\"\n Contents {\n width: 100%;\n height: auto;\n padding: 1 0 0 3;\n }\n \"\"\"\n\n def __init__(\n self,\n *children: Widget,\n title: str = \"Toggle\",\n collapsed: bool = True,\n collapsed_symbol: str = \"\u25b6\",\n expanded_symbol: str = \"\u25bc\",\n name: str | None = None,\n id: str | None = None,\n classes: str | None = None,\n disabled: bool = False,\n ) -> None:\n \"\"\"Initialize a Collapsible widget.\n\n Args:\n *children: Contents that will be collapsed/expanded.\n title: Title of the collapsed/expanded contents.\n collapsed: Default status of the contents.\n collapsed_symbol: Collapsed symbol before the title.\n expanded_symbol: Expanded symbol before the title.\n name: The name of the collapsible.\n id: The ID of the collapsible in the DOM.\n classes: The CSS classes of the collapsible.\n disabled: Whether the collapsible is disabled or not.\n \"\"\"\n self._title = CollapsibleTitle(\n label=title,\n collapsed_symbol=collapsed_symbol,\n expanded_symbol=expanded_symbol,\n collapsed=collapsed,\n )\n self._contents_list: list[Widget] = list(children)\n super().__init__(name=name, id=id, classes=classes, disabled=disabled)\n self.collapsed = collapsed\n\n def _on_collapsible_title_toggle(self, event: CollapsibleTitle.Toggle) -> None:\n event.stop()\n self.collapsed = not self.collapsed\n if self.collapsed:\n self.post_message(self.Collapsed(self))\n else:\n self.post_message(self.Expanded(self))\n\n def _watch_collapsed(self, collapsed: bool) -> None:\n \"\"\"Update collapsed state when reactive is changed.\"\"\"\n self._update_collapsed(collapsed)\n\n def _update_collapsed(self, collapsed: bool) -> None:\n \"\"\"Update children to match collapsed state.\"\"\"\n try:\n self._title.collapsed = collapsed\n self.set_class(collapsed, \"-collapsed\")\n except NoMatches:\n pass\n\n def _on_mount(self, event: events.Mount) -> None:\n \"\"\"Initialise collapsed state.\"\"\"\n self._update_collapsed(self.collapsed)\n\n def compose(self) -> ComposeResult:\n yield self._title\n yield self.Contents(*self._contents_list)\n\n def compose_add_child(self, widget: Widget) -> None:\n \"\"\"When using the context manager compose syntax, we want to attach nodes to the contents.\n\n Args:\n widget: A Widget to add.\n \"\"\"\n self._contents_list.append(widget)\n", "path": "src/textual/widgets/_collapsible.py"}]}
2,906
781
gh_patches_debug_53978
rasdani/github-patches
git_diff
scikit-hep__pyhf-1091
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Add CodeMeta JSON-LD Context File # Description As part of an IRIS-HEP initiative to improve the citation quality/ease for all software it has been recommended by @danielskatz that projects: - Track authors/contributors to software as it is developed - Create a citation metadata file in the software repo and keep it up to date - Link Github and Zenodo to get DOIs for major releases - In the README and docs clearly says how to cite the software `pyhf` already does all of this, as: - [x] There is an [`AUTHORS` file](https://github.com/scikit-hep/pyhf/blob/31d34a0edd91593d75181cc3ad71563a52600793/AUTHORS) as well as a [listing of the authors in the docs](https://scikit-hep.org/pyhf/index.html#authors) - [x] There is a [`.zenodo.json`](https://github.com/scikit-hep/pyhf/blob/2b0a1e2ba41a2aa1233511524e3aa78e696ead34/.zenodo.json) that formats Zenodo with the correct metadata and adds the ORCID of the authors - [x] There is a Zenodo DOI that is kept in sync with our release schedule (almost) fully automatically (almost as it requires 1 button click). [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.1169739.svg)](https://doi.org/10.5281/zenodo.1169739) - [x] There is a Citation section in both the [`README`](https://github.com/scikit-hep/pyhf/blob/2b0a1e2ba41a2aa1233511524e3aa78e696ead34/README.rst#citation) and the [docs webpage](https://scikit-hep.org/pyhf/citations.html) with the preferred citation It has been pointed out though that we should also look into adding a [CodeMeta Project JSON-LD Context File](https://codemeta.github.io/jsonld/) as with it > zenodo will read [it] and will be basically the same as [schema.org](http://schema.org/) so that this will help google index software, and tools be able to use the metadata file in [softwareheritage.org](http://softwareheritage.org/) to automatically build a citation for a particular commit. (cc @gordonwatts as this might be of interest) </issue> <code> [start of setup.py] 1 from setuptools import setup 2 3 extras_require = { 4 'shellcomplete': ['click_completion'], 5 'tensorflow': [ 6 'tensorflow~=2.2.0', # TensorFlow minor releases are as volatile as major 7 'tensorflow-probability~=0.10.0', 8 ], 9 'torch': ['torch~=1.2'], 10 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'], 11 'xmlio': ['uproot~=3.6'], # Future proof against uproot4 API changes 12 'minuit': ['iminuit~=1.4.3'], # v1.5.0 breaks pyhf for 32b TensorFlow and PyTorch 13 } 14 extras_require['backends'] = sorted( 15 set( 16 extras_require['tensorflow'] 17 + extras_require['torch'] 18 + extras_require['jax'] 19 + extras_require['minuit'] 20 ) 21 ) 22 extras_require['contrib'] = sorted(set(['matplotlib', 'requests'])) 23 extras_require['lint'] = sorted(set(['pyflakes', 'black'])) 24 25 extras_require['test'] = sorted( 26 set( 27 extras_require['backends'] 28 + extras_require['xmlio'] 29 + extras_require['contrib'] 30 + extras_require['shellcomplete'] 31 + [ 32 'pytest~=6.0', 33 'pytest-cov>=2.5.1', 34 'pytest-mock', 35 'pytest-benchmark[histogram]', 36 'pytest-console-scripts', 37 'pytest-mpl', 38 'pydocstyle', 39 'coverage>=4.0', # coveralls 40 'papermill~=2.0', 41 'nteract-scrapbook~=0.2', 42 'jupyter', 43 'uproot~=3.3', 44 'graphviz', 45 'jsonpatch', 46 ] 47 ) 48 ) 49 extras_require['docs'] = sorted( 50 set( 51 [ 52 'sphinx>=3.1.2', 53 'sphinxcontrib-bibtex', 54 'sphinx-click', 55 'sphinx_rtd_theme', 56 'nbsphinx', 57 'ipywidgets', 58 'sphinx-issues', 59 'sphinx-copybutton>0.2.9', 60 ] 61 ) 62 ) 63 extras_require['develop'] = sorted( 64 set( 65 extras_require['docs'] 66 + extras_require['lint'] 67 + extras_require['test'] 68 + ['nbdime', 'bump2version', 'ipython', 'pre-commit', 'check-manifest', 'twine'] 69 ) 70 ) 71 extras_require['complete'] = sorted(set(sum(extras_require.values(), []))) 72 73 74 setup( 75 extras_require=extras_require, 76 use_scm_version=lambda: {'local_scheme': lambda version: ''}, 77 ) 78 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -65,7 +65,15 @@ extras_require['docs'] + extras_require['lint'] + extras_require['test'] - + ['nbdime', 'bump2version', 'ipython', 'pre-commit', 'check-manifest', 'twine'] + + [ + 'nbdime', + 'bump2version', + 'ipython', + 'pre-commit', + 'check-manifest', + 'codemetapy>=0.3.4', + 'twine', + ] ) ) extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -65,7 +65,15 @@\n extras_require['docs']\n + extras_require['lint']\n + extras_require['test']\n- + ['nbdime', 'bump2version', 'ipython', 'pre-commit', 'check-manifest', 'twine']\n+ + [\n+ 'nbdime',\n+ 'bump2version',\n+ 'ipython',\n+ 'pre-commit',\n+ 'check-manifest',\n+ 'codemetapy>=0.3.4',\n+ 'twine',\n+ ]\n )\n )\n extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n", "issue": "Add CodeMeta JSON-LD Context File\n# Description\r\n\r\nAs part of an IRIS-HEP initiative to improve the citation quality/ease for all software it has been recommended by @danielskatz that projects:\r\n- Track authors/contributors to software as it is developed\r\n- Create a citation metadata file in the software repo and keep it up to date\r\n- Link Github and Zenodo to get DOIs for major releases\r\n- In the README and docs clearly says how to cite the software\r\n\r\n`pyhf` already does all of this, as:\r\n- [x] There is an [`AUTHORS` file](https://github.com/scikit-hep/pyhf/blob/31d34a0edd91593d75181cc3ad71563a52600793/AUTHORS) as well as a [listing of the authors in the docs](https://scikit-hep.org/pyhf/index.html#authors)\r\n- [x] There is a [`.zenodo.json`](https://github.com/scikit-hep/pyhf/blob/2b0a1e2ba41a2aa1233511524e3aa78e696ead34/.zenodo.json) that formats Zenodo with the correct metadata and adds the ORCID of the authors\r\n- [x] There is a Zenodo DOI that is kept in sync with our release schedule (almost) fully automatically (almost as it requires 1 button click). [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.1169739.svg)](https://doi.org/10.5281/zenodo.1169739)\r\n- [x] There is a Citation section in both the [`README`](https://github.com/scikit-hep/pyhf/blob/2b0a1e2ba41a2aa1233511524e3aa78e696ead34/README.rst#citation) and the [docs webpage](https://scikit-hep.org/pyhf/citations.html) with the preferred citation\r\n\r\nIt has been pointed out though that we should also look into adding a [CodeMeta Project JSON-LD Context File](https://codemeta.github.io/jsonld/) as with it\r\n\r\n> zenodo will read [it] and will be basically the same as [schema.org](http://schema.org/) so that this will help google index software, and tools be able to use the metadata file in [softwareheritage.org](http://softwareheritage.org/) to automatically build a citation for a particular commit.\r\n\r\n(cc @gordonwatts as this might be of interest)\n", "before_files": [{"content": "from setuptools import setup\n\nextras_require = {\n 'shellcomplete': ['click_completion'],\n 'tensorflow': [\n 'tensorflow~=2.2.0', # TensorFlow minor releases are as volatile as major\n 'tensorflow-probability~=0.10.0',\n ],\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],\n 'xmlio': ['uproot~=3.6'], # Future proof against uproot4 API changes\n 'minuit': ['iminuit~=1.4.3'], # v1.5.0 breaks pyhf for 32b TensorFlow and PyTorch\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted(set(['matplotlib', 'requests']))\nextras_require['lint'] = sorted(set(['pyflakes', 'black']))\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + extras_require['shellcomplete']\n + [\n 'pytest~=6.0',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'coverage>=4.0', # coveralls\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'jupyter',\n 'uproot~=3.3',\n 'graphviz',\n 'jsonpatch',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n [\n 'sphinx>=3.1.2',\n 'sphinxcontrib-bibtex',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>0.2.9',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['lint']\n + extras_require['test']\n + ['nbdime', 'bump2version', 'ipython', 'pre-commit', 'check-manifest', 'twine']\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n extras_require=extras_require,\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}]}
1,864
163
gh_patches_debug_9416
rasdani/github-patches
git_diff
searxng__searxng-1686
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Bug: bing videos engine **Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG** Repository: https://github.com/searxng/searxng Branch: master Version: 2022.08.14-649d1a14 <!-- Check if these values are correct --> **How did you install SearXNG?** searxng-docker **What happened?** bing video search crashed **How To Reproduce** search for anything **Expected behavior** <!-- A clear and concise description of what you expected to happen. --> **Screenshots & Logs** <!-- If applicable, add screenshots, logs to help explain your problem. --> **Additional context** <!-- Add any other context about the problem here. --> **Technical report** Error * Error: IndexError * Percentage: 100 * Parameters: `()` * File name: `searx/engines/bing_videos.py:84` * Function: `response` * Code: `metadata = loads(result.xpath('.//div[@class="vrhdata"]/@vrhm')[0])` </issue> <code> [start of searx/engines/bing_videos.py] 1 # SPDX-License-Identifier: AGPL-3.0-or-later 2 # lint: pylint 3 """Bing (Videos) 4 5 """ 6 7 from json import loads 8 from urllib.parse import urlencode 9 10 from lxml import html 11 12 from searx.utils import match_language 13 from searx.engines.bing import language_aliases 14 15 from searx.engines.bing import ( # pylint: disable=unused-import 16 _fetch_supported_languages, 17 supported_languages_url, 18 ) 19 20 about = { 21 "website": 'https://www.bing.com/videos', 22 "wikidata_id": 'Q4914152', 23 "official_api_documentation": 'https://www.microsoft.com/en-us/bing/apis/bing-video-search-api', 24 "use_official_api": False, 25 "require_api_key": False, 26 "results": 'HTML', 27 } 28 29 categories = ['videos', 'web'] 30 paging = True 31 safesearch = True 32 time_range_support = True 33 send_accept_language_header = True 34 number_of_results = 28 35 36 base_url = 'https://www.bing.com/' 37 search_string = ( 38 # fmt: off 39 'videos/search' 40 '?{query}' 41 '&count={count}' 42 '&first={first}' 43 '&scope=video' 44 '&FORM=QBLH' 45 # fmt: on 46 ) 47 time_range_string = '&qft=+filterui:videoage-lt{interval}' 48 time_range_dict = {'day': '1440', 'week': '10080', 'month': '43200', 'year': '525600'} 49 50 # safesearch definitions 51 safesearch_types = {2: 'STRICT', 1: 'DEMOTE', 0: 'OFF'} 52 53 54 # do search-request 55 def request(query, params): 56 offset = ((params['pageno'] - 1) * number_of_results) + 1 57 58 search_path = search_string.format(query=urlencode({'q': query}), count=number_of_results, first=offset) 59 60 # safesearch cookie 61 params['cookies']['SRCHHPGUSR'] = 'ADLT=' + safesearch_types.get(params['safesearch'], 'DEMOTE') 62 63 # language cookie 64 language = match_language(params['language'], supported_languages, language_aliases).lower() 65 params['cookies']['_EDGE_S'] = 'mkt=' + language + '&F=1' 66 67 # query and paging 68 params['url'] = base_url + search_path 69 70 # time range 71 if params['time_range'] in time_range_dict: 72 params['url'] += time_range_string.format(interval=time_range_dict[params['time_range']]) 73 74 return params 75 76 77 # get response from search-request 78 def response(resp): 79 results = [] 80 81 dom = html.fromstring(resp.text) 82 83 for result in dom.xpath('//div[@class="dg_u"]'): 84 metadata = loads(result.xpath('.//div[@class="vrhdata"]/@vrhm')[0]) 85 info = ' - '.join(result.xpath('.//div[@class="mc_vtvc_meta_block"]//span/text()')).strip() 86 content = '{0} - {1}'.format(metadata['du'], info) 87 thumbnail = '{0}th?id={1}'.format(base_url, metadata['thid']) 88 results.append( 89 { 90 'url': metadata['murl'], 91 'thumbnail': thumbnail, 92 'title': metadata.get('vt', ''), 93 'content': content, 94 'template': 'videos.html', 95 } 96 ) 97 98 return results 99 [end of searx/engines/bing_videos.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/searx/engines/bing_videos.py b/searx/engines/bing_videos.py --- a/searx/engines/bing_videos.py +++ b/searx/engines/bing_videos.py @@ -80,7 +80,7 @@ dom = html.fromstring(resp.text) - for result in dom.xpath('//div[@class="dg_u"]'): + for result in dom.xpath('//div[@class="dg_u"]/div[contains(@class, "mc_vtvc")]'): metadata = loads(result.xpath('.//div[@class="vrhdata"]/@vrhm')[0]) info = ' - '.join(result.xpath('.//div[@class="mc_vtvc_meta_block"]//span/text()')).strip() content = '{0} - {1}'.format(metadata['du'], info)
{"golden_diff": "diff --git a/searx/engines/bing_videos.py b/searx/engines/bing_videos.py\n--- a/searx/engines/bing_videos.py\n+++ b/searx/engines/bing_videos.py\n@@ -80,7 +80,7 @@\n \n dom = html.fromstring(resp.text)\n \n- for result in dom.xpath('//div[@class=\"dg_u\"]'):\n+ for result in dom.xpath('//div[@class=\"dg_u\"]/div[contains(@class, \"mc_vtvc\")]'):\n metadata = loads(result.xpath('.//div[@class=\"vrhdata\"]/@vrhm')[0])\n info = ' - '.join(result.xpath('.//div[@class=\"mc_vtvc_meta_block\"]//span/text()')).strip()\n content = '{0} - {1}'.format(metadata['du'], info)\n", "issue": "Bug: bing videos engine\n**Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG**\r\nRepository: https://github.com/searxng/searxng\r\nBranch: master\r\nVersion: 2022.08.14-649d1a14\r\n<!-- Check if these values are correct -->\r\n\r\n**How did you install SearXNG?**\r\nsearxng-docker\r\n\r\n**What happened?**\r\nbing video search crashed\r\n\r\n**How To Reproduce**\r\nsearch for anything\r\n\r\n**Expected behavior**\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\n**Screenshots & Logs**\r\n<!-- If applicable, add screenshots, logs to help explain your problem. -->\r\n\r\n**Additional context**\r\n<!-- Add any other context about the problem here. -->\r\n\r\n**Technical report**\r\n\r\nError\r\n * Error: IndexError\r\n * Percentage: 100\r\n * Parameters: `()`\r\n * File name: `searx/engines/bing_videos.py:84`\r\n * Function: `response`\r\n * Code: `metadata = loads(result.xpath('.//div[@class=\"vrhdata\"]/@vrhm')[0])`\r\n\r\n\n", "before_files": [{"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n# lint: pylint\n\"\"\"Bing (Videos)\n\n\"\"\"\n\nfrom json import loads\nfrom urllib.parse import urlencode\n\nfrom lxml import html\n\nfrom searx.utils import match_language\nfrom searx.engines.bing import language_aliases\n\nfrom searx.engines.bing import ( # pylint: disable=unused-import\n _fetch_supported_languages,\n supported_languages_url,\n)\n\nabout = {\n \"website\": 'https://www.bing.com/videos',\n \"wikidata_id\": 'Q4914152',\n \"official_api_documentation\": 'https://www.microsoft.com/en-us/bing/apis/bing-video-search-api',\n \"use_official_api\": False,\n \"require_api_key\": False,\n \"results\": 'HTML',\n}\n\ncategories = ['videos', 'web']\npaging = True\nsafesearch = True\ntime_range_support = True\nsend_accept_language_header = True\nnumber_of_results = 28\n\nbase_url = 'https://www.bing.com/'\nsearch_string = (\n # fmt: off\n 'videos/search'\n '?{query}'\n '&count={count}'\n '&first={first}'\n '&scope=video'\n '&FORM=QBLH'\n # fmt: on\n)\ntime_range_string = '&qft=+filterui:videoage-lt{interval}'\ntime_range_dict = {'day': '1440', 'week': '10080', 'month': '43200', 'year': '525600'}\n\n# safesearch definitions\nsafesearch_types = {2: 'STRICT', 1: 'DEMOTE', 0: 'OFF'}\n\n\n# do search-request\ndef request(query, params):\n offset = ((params['pageno'] - 1) * number_of_results) + 1\n\n search_path = search_string.format(query=urlencode({'q': query}), count=number_of_results, first=offset)\n\n # safesearch cookie\n params['cookies']['SRCHHPGUSR'] = 'ADLT=' + safesearch_types.get(params['safesearch'], 'DEMOTE')\n\n # language cookie\n language = match_language(params['language'], supported_languages, language_aliases).lower()\n params['cookies']['_EDGE_S'] = 'mkt=' + language + '&F=1'\n\n # query and paging\n params['url'] = base_url + search_path\n\n # time range\n if params['time_range'] in time_range_dict:\n params['url'] += time_range_string.format(interval=time_range_dict[params['time_range']])\n\n return params\n\n\n# get response from search-request\ndef response(resp):\n results = []\n\n dom = html.fromstring(resp.text)\n\n for result in dom.xpath('//div[@class=\"dg_u\"]'):\n metadata = loads(result.xpath('.//div[@class=\"vrhdata\"]/@vrhm')[0])\n info = ' - '.join(result.xpath('.//div[@class=\"mc_vtvc_meta_block\"]//span/text()')).strip()\n content = '{0} - {1}'.format(metadata['du'], info)\n thumbnail = '{0}th?id={1}'.format(base_url, metadata['thid'])\n results.append(\n {\n 'url': metadata['murl'],\n 'thumbnail': thumbnail,\n 'title': metadata.get('vt', ''),\n 'content': content,\n 'template': 'videos.html',\n }\n )\n\n return results\n", "path": "searx/engines/bing_videos.py"}]}
1,773
186
gh_patches_debug_24839
rasdani/github-patches
git_diff
kymatio__kymatio-312
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> BUG scattering3d_qm7.py It seems that line 180 should be modified as follows: Current: order_0, order_1, order_2 = [], [], [] Should be: order_0, orders_1_and_2 = [], [] </issue> <code> [start of examples/3d/scattering3d_qm7.py] 1 """ 2 3D scattering quantum chemistry regression 3 ========================================== 4 This uses the 3D scattering on a standard dataset. 5 """ 6 7 import numpy as np 8 import time 9 import torch 10 import os 11 12 from sklearn import linear_model, model_selection, preprocessing, pipeline 13 from kymatio.scattering3d import HarmonicScattering3D 14 from kymatio.scattering3d.utils import compute_integrals, generate_weighted_sum_of_gaussians 15 from kymatio.datasets import fetch_qm7 16 from kymatio.caching import get_cache_dir 17 from scipy.spatial.distance import pdist 18 19 20 def evaluate_linear_regression(X, y, n_folds=5): 21 """ 22 Evaluates linear ridge regression predictions of y using X. 23 24 Parameters 25 ---------- 26 X: numpy array 27 input features, shape (N, D) 28 y: numpy array 29 target value, shape (N, 1) 30 31 """ 32 n_datapoints = X.shape[0] 33 P = np.random.permutation(n_datapoints).reshape((n_folds, -1)) 34 cross_val_folds = [] 35 36 for i_fold in range(n_folds): 37 fold = (np.concatenate(P[np.arange(n_folds) != i_fold], axis=0), P[i_fold]) 38 cross_val_folds.append(fold) 39 40 alphas = 10.**(-np.arange(0, 10)) 41 for i, alpha in enumerate(alphas): 42 regressor = pipeline.make_pipeline( 43 preprocessing.StandardScaler(), linear_model.Ridge(alpha=alpha)) 44 y_prediction = model_selection.cross_val_predict( 45 regressor, X=X, y=y, cv=cross_val_folds) 46 MAE = np.mean(np.abs(y_prediction - y)) 47 RMSE = np.sqrt(np.mean((y_prediction - y)**2)) 48 print('Ridge regression, alpha: {}, MAE: {}, RMSE: {}'.format( 49 alpha, MAE, RMSE)) 50 51 52 def get_valence(charges): 53 """ 54 Returns the number valence electrons of a particle given the 55 nuclear charge. 56 57 Parameters 58 ---------- 59 charges: numpy array 60 array containing the nuclear charges, arbitrary size 61 62 Returns 63 ------- 64 valence_charges : numpy array 65 same size as the input 66 """ 67 return ( 68 charges * (charges <= 2) + 69 (charges - 2) * np.logical_and(charges > 2, charges <= 10) + 70 (charges - 10) * np.logical_and(charges > 10, charges <= 18)) 71 72 73 def get_qm7_energies(): 74 """ 75 Loads the energies of the molecules of the QM7 dataset. 76 77 Returns 78 ------- 79 energies: numpy array 80 array containing the energies of the molecules 81 """ 82 qm7 = fetch_qm7() 83 return qm7['energies'] 84 85 86 87 def get_qm7_positions_and_charges(sigma, overlapping_precision=1e-1): 88 """ 89 Loads the positions and charges of the molecules of the QM7 dataset. 90 QM7 is a dataset of 7165 organic molecules with up to 7 non-hydrogen 91 atoms, whose energies were computed with a quantun chemistry 92 computational method named Density Functional Theory. 93 This dataset has been made available to train machine learning models 94 to predict these energies. 95 96 Parameters 97 ---------- 98 sigma : float 99 width parameter of the Gaussian that represents a particle 100 101 overlapping_precision : float, optional 102 affects the scaling of the positions. The positions are re-scaled 103 such that two Gaussian functions of width sigma centerd at the qm7 104 positions overlapp with amplitude <= the overlapping_precision 105 106 Returns 107 ------- 108 positions, charges, valence_charges: torch arrays 109 array containing the positions, charges and valence charges 110 of the QM7 database molecules 111 """ 112 qm7 = fetch_qm7(align=True) 113 positions = qm7['positions'] 114 charges = qm7['charges'].astype('float32') 115 valence_charges = get_valence(charges) 116 117 # normalize positions 118 min_dist = np.inf 119 for i in range(positions.shape[0]): 120 n_atoms = np.sum(charges[i] != 0) 121 pos = positions[i, :n_atoms, :] 122 min_dist = min(min_dist, pdist(pos).min()) 123 delta = sigma * np.sqrt(-8 * np.log(overlapping_precision)) 124 positions = positions * delta / min_dist 125 126 return (torch.from_numpy(positions), 127 torch.from_numpy(charges), 128 torch.from_numpy(valence_charges)) 129 130 131 def compute_qm7_solid_harmonic_scattering_coefficients( 132 M=192, N=128, O=96, sigma=2., J=2, L=3, 133 integral_powers=(0.5, 1., 2., 3.), batch_size=16): 134 """ 135 Computes the scattering coefficients of the molecules of the 136 QM7 database. Channels used are full charges, valence charges 137 and core charges. Linear regression of the qm7 energies with 138 the given values gives MAE 2.75, RMSE 4.18 (kcal.mol-1). 139 140 Parameters 141 ---------- 142 M, N, O: int 143 dimensions of the numerical grid 144 sigma : float 145 width parameter of the Gaussian that represents a particle 146 J: int 147 maximal scale of the solid harmonic wavelets 148 L: int 149 maximal first order of the solid harmonic wavelets 150 integral_powers: list of int 151 powers for the integrals 152 batch_size: int 153 size of the batch for computations 154 155 Returns 156 ------- 157 order_0: torch tensor 158 array containing zeroth-order scattering coefficients 159 orders_1_and_2: torch tensor 160 array containing first- and second-order scattering coefficients 161 """ 162 cuda = torch.cuda.is_available() 163 grid = torch.from_numpy( 164 np.fft.ifftshift( 165 np.mgrid[-M//2:-M//2+M, -N//2:-N//2+N, -O//2:-O//2+O].astype('float32'), 166 axes=(1, 2, 3))) 167 pos, full_charges, valence_charges = get_qm7_positions_and_charges(sigma) 168 169 n_molecules = pos.size(0) 170 n_batches = np.ceil(n_molecules / batch_size).astype(int) 171 172 scattering = HarmonicScattering3D(J=J, shape=(M, N, O), L=L, sigma_0=sigma) 173 174 if cuda: 175 grid = grid.cuda() 176 pos = pos.cuda() 177 full_charges = full_charges.cuda() 178 valence_charges = valence_charges.cuda() 179 scattering.cuda() 180 181 order_0, order_1, order_2 = [], [], [] 182 print('Computing solid harmonic scattering coefficients of {} molecules ' 183 'of QM7 database on {}'.format(pos.size(0), 'GPU' if cuda else 'CPU')) 184 print('sigma: {}, L: {}, J: {}, integral powers: {}'.format(sigma, L, J, integral_powers)) 185 186 this_time = None 187 last_time = None 188 for i in range(n_batches): 189 this_time = time.time() 190 if last_time is not None: 191 dt = this_time - last_time 192 print("Iteration {} ETA: [{:02}:{:02}:{:02}]".format( 193 i + 1, int(((n_batches - i - 1) * dt) // 3600), 194 int((((n_batches - i - 1) * dt) // 60) % 60), 195 int(((n_batches - i - 1) * dt) % 60)), end='\r') 196 else: 197 print("Iteration {} ETA: {}".format(i + 1,'-'),end='\r') 198 last_time = this_time 199 time.sleep(1) 200 201 start, end = i * batch_size, min((i + 1) * batch_size, n_molecules) 202 203 pos_batch = pos[start:end] 204 full_batch = full_charges[start:end] 205 val_batch = valence_charges[start:end] 206 207 full_density_batch = generate_weighted_sum_of_gaussians( 208 grid, pos_batch, full_batch, sigma, cuda=cuda) 209 full_order_0 = compute_integrals(full_density_batch, integral_powers) 210 scattering.max_order = 2 211 scattering.method = 'integral' 212 scattering.integral_powers = integral_powers 213 full_scattering = scattering(full_density_batch) 214 215 val_density_batch = generate_weighted_sum_of_gaussians( 216 grid, pos_batch, val_batch, sigma, cuda=cuda) 217 val_order_0 = compute_integrals(val_density_batch, integral_powers) 218 val_scattering= scattering(val_density_batch) 219 220 core_density_batch = full_density_batch - val_density_batch 221 core_order_0 = compute_integrals(core_density_batch, integral_powers) 222 core_scattering = scattering(core_density_batch) 223 224 225 order_0.append( 226 torch.stack([full_order_0, val_order_0, core_order_0], dim=-1)) 227 orders_1_and_2.append( 228 torch.stack( 229 [full_scattering, val_scattering, core_scattering], dim=-1)) 230 231 order_0 = torch.cat(order_0, dim=0) 232 orders_1_and_2 = torch.cat(orders_1_and_2, dim=0) 233 234 return order_0, orders_1_and_2 235 236 M, N, O, J, L = 192, 128, 96, 2, 3 237 integral_powers = [0.5, 1., 2., 3.] 238 sigma = 2. 239 240 order_0, orders_1_and_2 = compute_qm7_solid_harmonic_scattering_coefficients( 241 M=M, N=N, O=O, J=J, L=L, integral_powers=integral_powers, 242 sigma=sigma, batch_size=8) 243 244 n_molecules = order_0.size(0) 245 246 np_order_0 = order_0.numpy().reshape((n_molecules, -1)) 247 np_orders_1_and_2 = orders_1_and_2.numpy().reshape((n_molecules, -1)) 248 249 basename = 'qm7_L_{}_J_{}_sigma_{}_MNO_{}_powers_{}.npy'.format( 250 L, J, sigma, (M, N, O), integral_powers) 251 cachedir = get_cache_dir("qm7/experiments") 252 np.save(os.path.join(cachedir, 'order_0_' + basename), np_order_0) 253 np.save(os.path.join( 254 cachedir, 'orders_1_and_2_' + basename), np_orders_1_and_2) 255 256 scattering_coef = np.concatenate([np_order_0, np_orders_1_and_2], axis=1) 257 target = get_qm7_energies() 258 259 evaluate_linear_regression(scattering_coef, target) 260 [end of examples/3d/scattering3d_qm7.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/examples/3d/scattering3d_qm7.py b/examples/3d/scattering3d_qm7.py --- a/examples/3d/scattering3d_qm7.py +++ b/examples/3d/scattering3d_qm7.py @@ -88,7 +88,7 @@ """ Loads the positions and charges of the molecules of the QM7 dataset. QM7 is a dataset of 7165 organic molecules with up to 7 non-hydrogen - atoms, whose energies were computed with a quantun chemistry + atoms, whose energies were computed with a quantum chemistry computational method named Density Functional Theory. This dataset has been made available to train machine learning models to predict these energies. @@ -178,7 +178,7 @@ valence_charges = valence_charges.cuda() scattering.cuda() - order_0, order_1, order_2 = [], [], [] + order_0, orders_1_and_2 = [], [] print('Computing solid harmonic scattering coefficients of {} molecules ' 'of QM7 database on {}'.format(pos.size(0), 'GPU' if cuda else 'CPU')) print('sigma: {}, L: {}, J: {}, integral powers: {}'.format(sigma, L, J, integral_powers))
{"golden_diff": "diff --git a/examples/3d/scattering3d_qm7.py b/examples/3d/scattering3d_qm7.py\n--- a/examples/3d/scattering3d_qm7.py\n+++ b/examples/3d/scattering3d_qm7.py\n@@ -88,7 +88,7 @@\n \"\"\"\n Loads the positions and charges of the molecules of the QM7 dataset.\n QM7 is a dataset of 7165 organic molecules with up to 7 non-hydrogen\n- atoms, whose energies were computed with a quantun chemistry\n+ atoms, whose energies were computed with a quantum chemistry\n computational method named Density Functional Theory.\n This dataset has been made available to train machine learning models\n to predict these energies.\n@@ -178,7 +178,7 @@\n valence_charges = valence_charges.cuda()\n scattering.cuda()\n \n- order_0, order_1, order_2 = [], [], []\n+ order_0, orders_1_and_2 = [], []\n print('Computing solid harmonic scattering coefficients of {} molecules '\n 'of QM7 database on {}'.format(pos.size(0), 'GPU' if cuda else 'CPU'))\n print('sigma: {}, L: {}, J: {}, integral powers: {}'.format(sigma, L, J, integral_powers))\n", "issue": "BUG scattering3d_qm7.py\nIt seems that line 180 should be modified as follows:\r\nCurrent:\r\norder_0, order_1, order_2 = [], [], []\r\nShould be:\r\norder_0, orders_1_and_2 = [], []\n", "before_files": [{"content": "\"\"\"\n3D scattering quantum chemistry regression\n==========================================\nThis uses the 3D scattering on a standard dataset.\n\"\"\"\n\nimport numpy as np\nimport time\nimport torch\nimport os\n\nfrom sklearn import linear_model, model_selection, preprocessing, pipeline\nfrom kymatio.scattering3d import HarmonicScattering3D\nfrom kymatio.scattering3d.utils import compute_integrals, generate_weighted_sum_of_gaussians\nfrom kymatio.datasets import fetch_qm7\nfrom kymatio.caching import get_cache_dir\nfrom scipy.spatial.distance import pdist\n\n\ndef evaluate_linear_regression(X, y, n_folds=5):\n \"\"\"\n Evaluates linear ridge regression predictions of y using X.\n\n Parameters\n ----------\n X: numpy array\n input features, shape (N, D)\n y: numpy array\n target value, shape (N, 1)\n\n \"\"\"\n n_datapoints = X.shape[0]\n P = np.random.permutation(n_datapoints).reshape((n_folds, -1))\n cross_val_folds = []\n\n for i_fold in range(n_folds):\n fold = (np.concatenate(P[np.arange(n_folds) != i_fold], axis=0), P[i_fold])\n cross_val_folds.append(fold)\n\n alphas = 10.**(-np.arange(0, 10))\n for i, alpha in enumerate(alphas):\n regressor = pipeline.make_pipeline(\n preprocessing.StandardScaler(), linear_model.Ridge(alpha=alpha))\n y_prediction = model_selection.cross_val_predict(\n regressor, X=X, y=y, cv=cross_val_folds)\n MAE = np.mean(np.abs(y_prediction - y))\n RMSE = np.sqrt(np.mean((y_prediction - y)**2))\n print('Ridge regression, alpha: {}, MAE: {}, RMSE: {}'.format(\n alpha, MAE, RMSE))\n\n\ndef get_valence(charges):\n \"\"\"\n Returns the number valence electrons of a particle given the\n nuclear charge.\n\n Parameters\n ----------\n charges: numpy array\n array containing the nuclear charges, arbitrary size\n\n Returns\n -------\n valence_charges : numpy array\n same size as the input\n \"\"\"\n return (\n charges * (charges <= 2) +\n (charges - 2) * np.logical_and(charges > 2, charges <= 10) +\n (charges - 10) * np.logical_and(charges > 10, charges <= 18))\n\n\ndef get_qm7_energies():\n \"\"\"\n Loads the energies of the molecules of the QM7 dataset.\n\n Returns\n -------\n energies: numpy array\n array containing the energies of the molecules\n \"\"\"\n qm7 = fetch_qm7()\n return qm7['energies']\n\n\n\ndef get_qm7_positions_and_charges(sigma, overlapping_precision=1e-1):\n \"\"\"\n Loads the positions and charges of the molecules of the QM7 dataset.\n QM7 is a dataset of 7165 organic molecules with up to 7 non-hydrogen\n atoms, whose energies were computed with a quantun chemistry\n computational method named Density Functional Theory.\n This dataset has been made available to train machine learning models\n to predict these energies.\n\n Parameters\n ----------\n sigma : float\n width parameter of the Gaussian that represents a particle\n\n overlapping_precision : float, optional\n affects the scaling of the positions. The positions are re-scaled\n such that two Gaussian functions of width sigma centerd at the qm7\n positions overlapp with amplitude <= the overlapping_precision\n\n Returns\n -------\n positions, charges, valence_charges: torch arrays\n array containing the positions, charges and valence charges\n of the QM7 database molecules\n \"\"\"\n qm7 = fetch_qm7(align=True)\n positions = qm7['positions']\n charges = qm7['charges'].astype('float32')\n valence_charges = get_valence(charges)\n\n # normalize positions\n min_dist = np.inf\n for i in range(positions.shape[0]):\n n_atoms = np.sum(charges[i] != 0)\n pos = positions[i, :n_atoms, :]\n min_dist = min(min_dist, pdist(pos).min())\n delta = sigma * np.sqrt(-8 * np.log(overlapping_precision))\n positions = positions * delta / min_dist\n\n return (torch.from_numpy(positions),\n torch.from_numpy(charges),\n torch.from_numpy(valence_charges))\n\n\ndef compute_qm7_solid_harmonic_scattering_coefficients(\n M=192, N=128, O=96, sigma=2., J=2, L=3,\n integral_powers=(0.5, 1., 2., 3.), batch_size=16):\n \"\"\"\n Computes the scattering coefficients of the molecules of the\n QM7 database. Channels used are full charges, valence charges\n and core charges. Linear regression of the qm7 energies with\n the given values gives MAE 2.75, RMSE 4.18 (kcal.mol-1).\n\n Parameters\n ----------\n M, N, O: int\n dimensions of the numerical grid\n sigma : float\n width parameter of the Gaussian that represents a particle\n J: int\n maximal scale of the solid harmonic wavelets\n L: int\n maximal first order of the solid harmonic wavelets\n integral_powers: list of int\n powers for the integrals\n batch_size: int\n size of the batch for computations\n\n Returns\n -------\n order_0: torch tensor\n array containing zeroth-order scattering coefficients\n orders_1_and_2: torch tensor\n array containing first- and second-order scattering coefficients\n \"\"\"\n cuda = torch.cuda.is_available()\n grid = torch.from_numpy(\n np.fft.ifftshift(\n np.mgrid[-M//2:-M//2+M, -N//2:-N//2+N, -O//2:-O//2+O].astype('float32'),\n axes=(1, 2, 3)))\n pos, full_charges, valence_charges = get_qm7_positions_and_charges(sigma)\n\n n_molecules = pos.size(0)\n n_batches = np.ceil(n_molecules / batch_size).astype(int)\n\n scattering = HarmonicScattering3D(J=J, shape=(M, N, O), L=L, sigma_0=sigma)\n\n if cuda:\n grid = grid.cuda()\n pos = pos.cuda()\n full_charges = full_charges.cuda()\n valence_charges = valence_charges.cuda()\n scattering.cuda()\n\n order_0, order_1, order_2 = [], [], []\n print('Computing solid harmonic scattering coefficients of {} molecules '\n 'of QM7 database on {}'.format(pos.size(0), 'GPU' if cuda else 'CPU'))\n print('sigma: {}, L: {}, J: {}, integral powers: {}'.format(sigma, L, J, integral_powers))\n\n this_time = None\n last_time = None\n for i in range(n_batches):\n this_time = time.time()\n if last_time is not None:\n dt = this_time - last_time\n print(\"Iteration {} ETA: [{:02}:{:02}:{:02}]\".format(\n i + 1, int(((n_batches - i - 1) * dt) // 3600),\n int((((n_batches - i - 1) * dt) // 60) % 60),\n int(((n_batches - i - 1) * dt) % 60)), end='\\r')\n else:\n print(\"Iteration {} ETA: {}\".format(i + 1,'-'),end='\\r')\n last_time = this_time\n time.sleep(1)\n\n start, end = i * batch_size, min((i + 1) * batch_size, n_molecules)\n\n pos_batch = pos[start:end]\n full_batch = full_charges[start:end]\n val_batch = valence_charges[start:end]\n\n full_density_batch = generate_weighted_sum_of_gaussians(\n grid, pos_batch, full_batch, sigma, cuda=cuda)\n full_order_0 = compute_integrals(full_density_batch, integral_powers)\n scattering.max_order = 2\n scattering.method = 'integral'\n scattering.integral_powers = integral_powers\n full_scattering = scattering(full_density_batch)\n\n val_density_batch = generate_weighted_sum_of_gaussians(\n grid, pos_batch, val_batch, sigma, cuda=cuda)\n val_order_0 = compute_integrals(val_density_batch, integral_powers)\n val_scattering= scattering(val_density_batch)\n\n core_density_batch = full_density_batch - val_density_batch\n core_order_0 = compute_integrals(core_density_batch, integral_powers)\n core_scattering = scattering(core_density_batch)\n\n\n order_0.append(\n torch.stack([full_order_0, val_order_0, core_order_0], dim=-1))\n orders_1_and_2.append(\n torch.stack(\n [full_scattering, val_scattering, core_scattering], dim=-1))\n\n order_0 = torch.cat(order_0, dim=0)\n orders_1_and_2 = torch.cat(orders_1_and_2, dim=0)\n\n return order_0, orders_1_and_2\n\nM, N, O, J, L = 192, 128, 96, 2, 3\nintegral_powers = [0.5, 1., 2., 3.]\nsigma = 2.\n\norder_0, orders_1_and_2 = compute_qm7_solid_harmonic_scattering_coefficients(\n M=M, N=N, O=O, J=J, L=L, integral_powers=integral_powers,\n sigma=sigma, batch_size=8)\n\nn_molecules = order_0.size(0)\n\nnp_order_0 = order_0.numpy().reshape((n_molecules, -1))\nnp_orders_1_and_2 = orders_1_and_2.numpy().reshape((n_molecules, -1))\n\nbasename = 'qm7_L_{}_J_{}_sigma_{}_MNO_{}_powers_{}.npy'.format(\n L, J, sigma, (M, N, O), integral_powers)\ncachedir = get_cache_dir(\"qm7/experiments\")\nnp.save(os.path.join(cachedir, 'order_0_' + basename), np_order_0)\nnp.save(os.path.join(\n cachedir, 'orders_1_and_2_' + basename), np_orders_1_and_2)\n\nscattering_coef = np.concatenate([np_order_0, np_orders_1_and_2], axis=1)\ntarget = get_qm7_energies()\n\nevaluate_linear_regression(scattering_coef, target)\n", "path": "examples/3d/scattering3d_qm7.py"}]}
3,710
296
gh_patches_debug_22806
rasdani/github-patches
git_diff
sopel-irc__sopel-1859
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> announce: Batch up to MAXTARGETS channels at a time if available The simple loop in `announce.py` is kind of...meh. The number of lines it actually sends to the server could be reduced as much as 75% depending on how many channels Sopel is in when `.announce` is called. Some IRC servers (Hybrid, for example) send a `MAXTARGETS` field in the `ISUPPORT` properties when a client connects. If this is present, it would be useful for `announce.py` to have that information and send its announcement messages to `MAXTARGETS` channels at a time. Long-term thing, more of an optimization than anything. </issue> <code> [start of sopel/modules/announce.py] 1 # coding=utf-8 2 """ 3 announce.py - Sopel Announcement Plugin 4 Sends announcements to all channels the bot has joined. 5 Copyright Β© 2013, Elad Alfassa, <[email protected]> 6 Licensed under the Eiffel Forum License 2. 7 8 https://sopel.chat 9 """ 10 from __future__ import absolute_import, division, print_function, unicode_literals 11 12 from sopel import plugin 13 14 15 @plugin.command('announce') 16 @plugin.example('.announce Some important message here') 17 @plugin.require_admin('Sorry, I can\'t let you do that', reply=True) 18 @plugin.output_prefix('[ANNOUNCEMENT] ') 19 def announce(bot, trigger): 20 """Send an announcement to all channels the bot is in""" 21 for channel in bot.channels: 22 bot.say(trigger.group(2), channel) 23 bot.reply('Announce complete.') 24 [end of sopel/modules/announce.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/sopel/modules/announce.py b/sopel/modules/announce.py --- a/sopel/modules/announce.py +++ b/sopel/modules/announce.py @@ -12,12 +12,30 @@ from sopel import plugin +def _chunks(items, size): + """Break a list of items into groups. + + :param items: the collection of items to chunk + :type items: :term:`iterable` + :param int size: the size of each chunk + """ + # from https://stackoverflow.com/a/312464/5991 with modified names for readability + for delim in range(0, len(items), size): + yield items[delim:delim + size] + + @plugin.command('announce') @plugin.example('.announce Some important message here') @plugin.require_admin('Sorry, I can\'t let you do that', reply=True) @plugin.output_prefix('[ANNOUNCEMENT] ') def announce(bot, trigger): - """Send an announcement to all channels the bot is in""" - for channel in bot.channels: - bot.say(trigger.group(2), channel) + """Send an announcement to all channels the bot is in.""" + if trigger.group(2) is None: + bot.reply('Announce what? I need a message to say.') + return + + size = bot.isupport.get('TARGMAX', {}).get('PRIVMSG', 1) + channels = _chunks(bot.channels.keys(), size) + for cgroup in channels: + bot.say(trigger.group(2), ','.join(cgroup)) bot.reply('Announce complete.')
{"golden_diff": "diff --git a/sopel/modules/announce.py b/sopel/modules/announce.py\n--- a/sopel/modules/announce.py\n+++ b/sopel/modules/announce.py\n@@ -12,12 +12,30 @@\n from sopel import plugin\n \n \n+def _chunks(items, size):\n+ \"\"\"Break a list of items into groups.\n+\n+ :param items: the collection of items to chunk\n+ :type items: :term:`iterable`\n+ :param int size: the size of each chunk\n+ \"\"\"\n+ # from https://stackoverflow.com/a/312464/5991 with modified names for readability\n+ for delim in range(0, len(items), size):\n+ yield items[delim:delim + size]\n+\n+\n @plugin.command('announce')\n @plugin.example('.announce Some important message here')\n @plugin.require_admin('Sorry, I can\\'t let you do that', reply=True)\n @plugin.output_prefix('[ANNOUNCEMENT] ')\n def announce(bot, trigger):\n- \"\"\"Send an announcement to all channels the bot is in\"\"\"\n- for channel in bot.channels:\n- bot.say(trigger.group(2), channel)\n+ \"\"\"Send an announcement to all channels the bot is in.\"\"\"\n+ if trigger.group(2) is None:\n+ bot.reply('Announce what? I need a message to say.')\n+ return\n+\n+ size = bot.isupport.get('TARGMAX', {}).get('PRIVMSG', 1)\n+ channels = _chunks(bot.channels.keys(), size)\n+ for cgroup in channels:\n+ bot.say(trigger.group(2), ','.join(cgroup))\n bot.reply('Announce complete.')\n", "issue": "announce: Batch up to MAXTARGETS channels at a time if available\nThe simple loop in `announce.py` is kind of...meh. The number of lines it actually sends to the server could be reduced as much as 75% depending on how many channels Sopel is in when `.announce` is called.\r\n\r\nSome IRC servers (Hybrid, for example) send a `MAXTARGETS` field in the `ISUPPORT` properties when a client connects. If this is present, it would be useful for `announce.py` to have that information and send its announcement messages to `MAXTARGETS` channels at a time.\r\n\r\nLong-term thing, more of an optimization than anything.\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"\nannounce.py - Sopel Announcement Plugin\nSends announcements to all channels the bot has joined.\nCopyright \u00a9 2013, Elad Alfassa, <[email protected]>\nLicensed under the Eiffel Forum License 2.\n\nhttps://sopel.chat\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom sopel import plugin\n\n\[email protected]('announce')\[email protected]('.announce Some important message here')\[email protected]_admin('Sorry, I can\\'t let you do that', reply=True)\[email protected]_prefix('[ANNOUNCEMENT] ')\ndef announce(bot, trigger):\n \"\"\"Send an announcement to all channels the bot is in\"\"\"\n for channel in bot.channels:\n bot.say(trigger.group(2), channel)\n bot.reply('Announce complete.')\n", "path": "sopel/modules/announce.py"}]}
901
373
gh_patches_debug_40819
rasdani/github-patches
git_diff
bridgecrewio__checkov-4393
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> GitLab SAST encounters error parsing the report **Describe the issue** `gitlab_sast` was added to the output in #4315. GitLab SAST produced the following error when attempting to parse the report. ``` [Schema] property '/vulnerabilities/0/identifiers/0/url' is not of type: string [Schema] property '/vulnerabilities/0/links/0/url' is not of type: string ``` I found that `url` value is set to `null` which is not supported e.g. ```json "vulnerabilities": [ { "id": "..." "identifiers": [ { "name": "CKV_AWS_283", "type": "checkov", "url": null, "value": "CKV_AWS_283" } ], "links": [ { "url": null } ], ... ] ``` `url` is not a required field so instead of setting the value to `null`, drop the key altogether schema reference: https://gitlab.com/gitlab-org/security-products/security-report-schemas/-/blob/master/dist/sast-report-format.json </issue> <code> [start of checkov/common/output/gitlab_sast.py] 1 from __future__ import annotations 2 3 from datetime import datetime 4 from typing import TYPE_CHECKING, Any 5 from uuid import uuid4 6 7 from checkov.common.output.cyclonedx_consts import SCA_CHECKTYPES 8 from checkov.version import version 9 10 if TYPE_CHECKING: 11 from checkov.common.output.record import Record 12 from checkov.common.output.report import Report 13 14 DEFAULT_SEVERITY_GITLAB_LEVEL = "Unknown" 15 SEVERITY_TO_GITLAB_LEVEL = { 16 "critical": "Critical", 17 "high": "High", 18 "medium": "Medium", 19 "low": "Low", 20 "none": "Info", 21 } 22 23 24 class GitLabSast: 25 def __init__(self, reports: list[Report]): 26 self.reports = reports 27 28 self.sast_json = self.create_sast_json() 29 30 def create_sast_json(self) -> dict[str, Any]: 31 return { 32 "schema": "https://gitlab.com/gitlab-org/security-products/security-report-schemas/-/raw/v15.0.4/dist/sast-report-format.json", 33 "version": "15.0.4", 34 "scan": self._create_scan(), 35 "vulnerabilities": self._create_vulnerabilities(), 36 } 37 38 def _create_scan(self) -> dict[str, Any]: 39 current_datetime = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S") 40 scanner = { 41 "id": "checkov", 42 "name": "Checkov", 43 "url": "https://www.checkov.io/", 44 "vendor": { 45 "name": "Bridgecrew", 46 }, 47 "version": version, 48 } 49 50 return { 51 "start_time": current_datetime, # needs to be done properly in a later stage 52 "end_time": current_datetime, 53 "analyzer": scanner, # same for us 54 "scanner": scanner, 55 "status": "success", 56 "type": "sast", 57 } 58 59 def _create_vulnerabilities(self) -> list[dict[str, Any]]: 60 vulnerabilities = [] 61 62 for report in self.reports: 63 if report.check_type in SCA_CHECKTYPES: 64 for check in report.failed_checks: 65 vulnerability = None 66 if check.check_id.startswith("BC_LIC"): 67 vulnerability = self._create_license_vulnerability(record=check) 68 elif check.check_id.startswith(("BC_VUL", "CKV_CVE")): 69 vulnerability = self._create_cve_vulnerability(record=check) 70 71 if vulnerability: 72 vulnerabilities.append(vulnerability) 73 else: 74 for check in report.failed_checks: 75 vulnerabilities.append(self._create_iac_vulnerability(record=check)) 76 77 return vulnerabilities 78 79 def _create_iac_vulnerability(self, record: Record) -> dict[str, Any]: 80 severity = record.severity.name.lower() if record.severity else "" 81 82 return { 83 "id": str(uuid4()), 84 "identifiers": [ 85 { 86 "name": record.check_id, 87 "type": "checkov", 88 "url": record.guideline, 89 "value": record.check_id, 90 } 91 ], 92 "links": [ 93 { 94 "url": record.guideline, 95 } 96 ], 97 "location": { 98 "file": record.repo_file_path.lstrip("/"), 99 "start_line": record.file_line_range[0], 100 "end_line": record.file_line_range[1], 101 }, 102 "name": record.check_name, 103 "description": f"Further info can be found {record.guideline}", 104 "severity": SEVERITY_TO_GITLAB_LEVEL.get(severity, DEFAULT_SEVERITY_GITLAB_LEVEL), 105 "solution": f"Further info can be found {record.guideline}", 106 } 107 108 def _create_cve_vulnerability(self, record: Record) -> dict[str, Any] | None: 109 details = record.vulnerability_details 110 if not details: 111 # this shouldn't happen 112 return None 113 114 severity = record.severity.name.lower() if record.severity else "" 115 116 return { 117 "id": str(uuid4()), 118 "identifiers": [ 119 { 120 "name": record.short_description, 121 "type": "cve", 122 "url": details.get("link"), 123 "value": details["id"], 124 } 125 ], 126 "links": [ 127 { 128 "url": details.get("link"), 129 } 130 ], 131 "location": { 132 "file": record.repo_file_path.lstrip("/"), 133 }, 134 "name": record.short_description, 135 "description": details.get("description"), 136 "severity": SEVERITY_TO_GITLAB_LEVEL.get(severity, DEFAULT_SEVERITY_GITLAB_LEVEL), 137 "solution": details.get("status"), 138 } 139 140 def _create_license_vulnerability(self, record: Record) -> dict[str, Any] | None: 141 details = record.vulnerability_details 142 if not details: 143 # this shouldn't happen 144 return None 145 146 return { 147 "id": str(uuid4()), 148 "identifiers": [ 149 { 150 "name": record.check_id, 151 "type": "license", 152 "value": record.check_id, 153 } 154 ], 155 "location": { 156 "file": record.repo_file_path.lstrip("/"), 157 }, 158 "name": record.short_description, 159 "description": f"Package {details['package_name']}@{details['package_version']} has license {details['license']}", 160 } 161 [end of checkov/common/output/gitlab_sast.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/checkov/common/output/gitlab_sast.py b/checkov/common/output/gitlab_sast.py --- a/checkov/common/output/gitlab_sast.py +++ b/checkov/common/output/gitlab_sast.py @@ -79,21 +79,15 @@ def _create_iac_vulnerability(self, record: Record) -> dict[str, Any]: severity = record.severity.name.lower() if record.severity else "" - return { + vulnerability: "dict[str, Any]" = { "id": str(uuid4()), "identifiers": [ { "name": record.check_id, "type": "checkov", - "url": record.guideline, "value": record.check_id, } ], - "links": [ - { - "url": record.guideline, - } - ], "location": { "file": record.repo_file_path.lstrip("/"), "start_line": record.file_line_range[0], @@ -105,6 +99,17 @@ "solution": f"Further info can be found {record.guideline}", } + if record.guideline: + # url can't be None + vulnerability["identifiers"][0]["url"] = record.guideline + vulnerability["links"] = [ + { + "url": record.guideline, + } + ] + + return vulnerability + def _create_cve_vulnerability(self, record: Record) -> dict[str, Any] | None: details = record.vulnerability_details if not details: @@ -113,21 +118,15 @@ severity = record.severity.name.lower() if record.severity else "" - return { + vulnerability: "dict[str, Any]" = { "id": str(uuid4()), "identifiers": [ { "name": record.short_description, "type": "cve", - "url": details.get("link"), "value": details["id"], } ], - "links": [ - { - "url": details.get("link"), - } - ], "location": { "file": record.repo_file_path.lstrip("/"), }, @@ -137,6 +136,18 @@ "solution": details.get("status"), } + link = details.get("link") + if link: + # url can't be None + vulnerability["identifiers"][0]["url"] = link + vulnerability["links"] = [ + { + "url": link, + } + ] + + return vulnerability + def _create_license_vulnerability(self, record: Record) -> dict[str, Any] | None: details = record.vulnerability_details if not details:
{"golden_diff": "diff --git a/checkov/common/output/gitlab_sast.py b/checkov/common/output/gitlab_sast.py\n--- a/checkov/common/output/gitlab_sast.py\n+++ b/checkov/common/output/gitlab_sast.py\n@@ -79,21 +79,15 @@\n def _create_iac_vulnerability(self, record: Record) -> dict[str, Any]:\n severity = record.severity.name.lower() if record.severity else \"\"\n \n- return {\n+ vulnerability: \"dict[str, Any]\" = {\n \"id\": str(uuid4()),\n \"identifiers\": [\n {\n \"name\": record.check_id,\n \"type\": \"checkov\",\n- \"url\": record.guideline,\n \"value\": record.check_id,\n }\n ],\n- \"links\": [\n- {\n- \"url\": record.guideline,\n- }\n- ],\n \"location\": {\n \"file\": record.repo_file_path.lstrip(\"/\"),\n \"start_line\": record.file_line_range[0],\n@@ -105,6 +99,17 @@\n \"solution\": f\"Further info can be found {record.guideline}\",\n }\n \n+ if record.guideline:\n+ # url can't be None\n+ vulnerability[\"identifiers\"][0][\"url\"] = record.guideline\n+ vulnerability[\"links\"] = [\n+ {\n+ \"url\": record.guideline,\n+ }\n+ ]\n+\n+ return vulnerability\n+\n def _create_cve_vulnerability(self, record: Record) -> dict[str, Any] | None:\n details = record.vulnerability_details\n if not details:\n@@ -113,21 +118,15 @@\n \n severity = record.severity.name.lower() if record.severity else \"\"\n \n- return {\n+ vulnerability: \"dict[str, Any]\" = {\n \"id\": str(uuid4()),\n \"identifiers\": [\n {\n \"name\": record.short_description,\n \"type\": \"cve\",\n- \"url\": details.get(\"link\"),\n \"value\": details[\"id\"],\n }\n ],\n- \"links\": [\n- {\n- \"url\": details.get(\"link\"),\n- }\n- ],\n \"location\": {\n \"file\": record.repo_file_path.lstrip(\"/\"),\n },\n@@ -137,6 +136,18 @@\n \"solution\": details.get(\"status\"),\n }\n \n+ link = details.get(\"link\")\n+ if link:\n+ # url can't be None\n+ vulnerability[\"identifiers\"][0][\"url\"] = link\n+ vulnerability[\"links\"] = [\n+ {\n+ \"url\": link,\n+ }\n+ ]\n+\n+ return vulnerability\n+\n def _create_license_vulnerability(self, record: Record) -> dict[str, Any] | None:\n details = record.vulnerability_details\n if not details:\n", "issue": "GitLab SAST encounters error parsing the report\n**Describe the issue**\r\n`gitlab_sast` was added to the output in #4315. GitLab SAST produced the following error when attempting to parse the report.\r\n\r\n```\r\n[Schema] property '/vulnerabilities/0/identifiers/0/url' is not of type: string\r\n[Schema] property '/vulnerabilities/0/links/0/url' is not of type: string\r\n```\r\n\r\nI found that `url` value is set to `null` which is not supported e.g.\r\n\r\n```json\r\n \"vulnerabilities\": [\r\n {\r\n \"id\": \"...\"\r\n \"identifiers\": [\r\n {\r\n \"name\": \"CKV_AWS_283\",\r\n \"type\": \"checkov\",\r\n \"url\": null,\r\n \"value\": \"CKV_AWS_283\"\r\n }\r\n ],\r\n \"links\": [\r\n {\r\n \"url\": null\r\n }\r\n ],\r\n ...\r\n ]\r\n```\r\n\r\n`url` is not a required field so instead of setting the value to `null`, drop the key altogether\r\n\r\nschema reference: https://gitlab.com/gitlab-org/security-products/security-report-schemas/-/blob/master/dist/sast-report-format.json\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom datetime import datetime\nfrom typing import TYPE_CHECKING, Any\nfrom uuid import uuid4\n\nfrom checkov.common.output.cyclonedx_consts import SCA_CHECKTYPES\nfrom checkov.version import version\n\nif TYPE_CHECKING:\n from checkov.common.output.record import Record\n from checkov.common.output.report import Report\n\nDEFAULT_SEVERITY_GITLAB_LEVEL = \"Unknown\"\nSEVERITY_TO_GITLAB_LEVEL = {\n \"critical\": \"Critical\",\n \"high\": \"High\",\n \"medium\": \"Medium\",\n \"low\": \"Low\",\n \"none\": \"Info\",\n}\n\n\nclass GitLabSast:\n def __init__(self, reports: list[Report]):\n self.reports = reports\n\n self.sast_json = self.create_sast_json()\n\n def create_sast_json(self) -> dict[str, Any]:\n return {\n \"schema\": \"https://gitlab.com/gitlab-org/security-products/security-report-schemas/-/raw/v15.0.4/dist/sast-report-format.json\",\n \"version\": \"15.0.4\",\n \"scan\": self._create_scan(),\n \"vulnerabilities\": self._create_vulnerabilities(),\n }\n\n def _create_scan(self) -> dict[str, Any]:\n current_datetime = datetime.utcnow().strftime(\"%Y-%m-%dT%H:%M:%S\")\n scanner = {\n \"id\": \"checkov\",\n \"name\": \"Checkov\",\n \"url\": \"https://www.checkov.io/\",\n \"vendor\": {\n \"name\": \"Bridgecrew\",\n },\n \"version\": version,\n }\n\n return {\n \"start_time\": current_datetime, # needs to be done properly in a later stage\n \"end_time\": current_datetime,\n \"analyzer\": scanner, # same for us\n \"scanner\": scanner,\n \"status\": \"success\",\n \"type\": \"sast\",\n }\n\n def _create_vulnerabilities(self) -> list[dict[str, Any]]:\n vulnerabilities = []\n\n for report in self.reports:\n if report.check_type in SCA_CHECKTYPES:\n for check in report.failed_checks:\n vulnerability = None\n if check.check_id.startswith(\"BC_LIC\"):\n vulnerability = self._create_license_vulnerability(record=check)\n elif check.check_id.startswith((\"BC_VUL\", \"CKV_CVE\")):\n vulnerability = self._create_cve_vulnerability(record=check)\n\n if vulnerability:\n vulnerabilities.append(vulnerability)\n else:\n for check in report.failed_checks:\n vulnerabilities.append(self._create_iac_vulnerability(record=check))\n\n return vulnerabilities\n\n def _create_iac_vulnerability(self, record: Record) -> dict[str, Any]:\n severity = record.severity.name.lower() if record.severity else \"\"\n\n return {\n \"id\": str(uuid4()),\n \"identifiers\": [\n {\n \"name\": record.check_id,\n \"type\": \"checkov\",\n \"url\": record.guideline,\n \"value\": record.check_id,\n }\n ],\n \"links\": [\n {\n \"url\": record.guideline,\n }\n ],\n \"location\": {\n \"file\": record.repo_file_path.lstrip(\"/\"),\n \"start_line\": record.file_line_range[0],\n \"end_line\": record.file_line_range[1],\n },\n \"name\": record.check_name,\n \"description\": f\"Further info can be found {record.guideline}\",\n \"severity\": SEVERITY_TO_GITLAB_LEVEL.get(severity, DEFAULT_SEVERITY_GITLAB_LEVEL),\n \"solution\": f\"Further info can be found {record.guideline}\",\n }\n\n def _create_cve_vulnerability(self, record: Record) -> dict[str, Any] | None:\n details = record.vulnerability_details\n if not details:\n # this shouldn't happen\n return None\n\n severity = record.severity.name.lower() if record.severity else \"\"\n\n return {\n \"id\": str(uuid4()),\n \"identifiers\": [\n {\n \"name\": record.short_description,\n \"type\": \"cve\",\n \"url\": details.get(\"link\"),\n \"value\": details[\"id\"],\n }\n ],\n \"links\": [\n {\n \"url\": details.get(\"link\"),\n }\n ],\n \"location\": {\n \"file\": record.repo_file_path.lstrip(\"/\"),\n },\n \"name\": record.short_description,\n \"description\": details.get(\"description\"),\n \"severity\": SEVERITY_TO_GITLAB_LEVEL.get(severity, DEFAULT_SEVERITY_GITLAB_LEVEL),\n \"solution\": details.get(\"status\"),\n }\n\n def _create_license_vulnerability(self, record: Record) -> dict[str, Any] | None:\n details = record.vulnerability_details\n if not details:\n # this shouldn't happen\n return None\n\n return {\n \"id\": str(uuid4()),\n \"identifiers\": [\n {\n \"name\": record.check_id,\n \"type\": \"license\",\n \"value\": record.check_id,\n }\n ],\n \"location\": {\n \"file\": record.repo_file_path.lstrip(\"/\"),\n },\n \"name\": record.short_description,\n \"description\": f\"Package {details['package_name']}@{details['package_version']} has license {details['license']}\",\n }\n", "path": "checkov/common/output/gitlab_sast.py"}]}
2,354
636
gh_patches_debug_16507
rasdani/github-patches
git_diff
azavea__raster-vision-725
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Cannot handle CRS w/o EPSG ``` Checking for existing output [####################################] 100% Saving command configuration to /opt/data/rv_root/chip/xview-object_detection/command-config.json... Saving command configuration to /opt/data/rv_root/train/xview-object-detection-mobilenet/command-config.json... Saving command configuration to /opt/data/rv_root/bundle/xview-object-detection-mobilenet/command-config.json... Saving command configuration to /opt/data/rv_root/predict/xview-object-detection-mobilenet/command-config.json... Saving command configuration to /opt/data/rv_root/eval/xview-object-detection-mobilenet/command-config.json... python -m rastervision run_command /opt/data/rv_root/chip/xview-object_detection/command-config.json Making training chips... 2019-03-20 16:00:47:rastervision.utils.files: INFO - Downloading s3://azavea-nyc-ml/cogs/000227.tif to /tmp/tmp98w0p7cl/tmpypmuuz6t/tmpqbscq03c/s3/azavea-nyc-ml/cogs/000227.tif Traceback (most recent call last): File "/usr/lib/python3.5/runpy.py", line 184, in _run_module_as_main "__main__", mod_spec) File "/usr/lib/python3.5/runpy.py", line 85, in _run_code exec(code, run_globals) File "/opt/src/rastervision/__main__.py", line 17, in <module> rv.main() File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 722, in __call__ return self.main(*args, **kwargs) File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 697, in main rv = self.invoke(ctx) File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 1066, in invoke return _process_result(sub_ctx.command.invoke(sub_ctx)) File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 895, in invoke return ctx.invoke(self.callback, **ctx.params) File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 535, in invoke return callback(*args, **kwargs) File "/opt/src/rastervision/cli/main.py", line 253, in run_command rv.runner.CommandRunner.run(command_config_uri) File "/opt/src/rastervision/runner/command_runner.py", line 11, in run CommandRunner.run_from_proto(msg) File "/opt/src/rastervision/runner/command_runner.py", line 17, in run_from_proto command.run() File "/opt/src/rastervision/command/chip_command.py", line 22, in run map(lambda s: s.create_scene(cc.task, tmp_dir), cc.train_scenes)) File "/opt/src/rastervision/command/chip_command.py", line 22, in <lambda> map(lambda s: s.create_scene(cc.task, tmp_dir), cc.train_scenes)) File "/opt/src/rastervision/data/scene_config.py", line 36, in create_scene raster_source = self.raster_source.create_source(tmp_dir) File "/opt/src/rastervision/data/raster_source/geotiff_source_config.py", line 70, in create_source y_shift_meters=y_shift_meters) File "/opt/src/rastervision/data/raster_source/geotiff_source.py", line 46, in __init__ super().__init__(raster_transformers, temp_dir, channel_order) File "/opt/src/rastervision/data/raster_source/rasterio_source.py", line 47, in __init__ with self.activate(): File "/opt/src/rastervision/data/activate_mixin.py", line 21, in __enter__ self.activate() File "/opt/src/rastervision/data/activate_mixin.py", line 54, in do_activate self._activate() File "/opt/src/rastervision/data/raster_source/geotiff_source.py", line 104, in _activate super()._activate() File "/opt/src/rastervision/data/raster_source/rasterio_source.py", line 106, in _activate self._set_crs_transformer() File "/opt/src/rastervision/data/raster_source/geotiff_source.py", line 56, in _set_crs_transformer self.image_dataset) File "/opt/src/rastervision/data/crs_transformer/rasterio_crs_transformer.py", line 59, in from_dataset image_crs = dataset.crs['init'] File "/usr/lib/python3.5/collections/__init__.py", line 986, in __getitem__ raise KeyError(key) KeyError: 'init' /tmp/tmpkon0yhf2/tmpyysq6a82/Makefile:6: recipe for target '0' failed make: *** [0] Error 1 ``` </issue> <code> [start of rastervision/data/crs_transformer/rasterio_crs_transformer.py] 1 import pyproj 2 3 from rasterio.transform import (rowcol, xy) 4 5 from rastervision.data.crs_transformer import (CRSTransformer, 6 IdentityCRSTransformer) 7 8 9 class RasterioCRSTransformer(CRSTransformer): 10 """Transformer for a RasterioRasterSource.""" 11 12 def __init__(self, transform, image_crs, map_crs='epsg:4326'): 13 """Construct transformer. 14 15 Args: 16 image_dataset: Rasterio DatasetReader 17 map_crs: CRS code 18 """ 19 self.map_proj = pyproj.Proj(init=map_crs) 20 self.image_proj = pyproj.Proj(init=image_crs) 21 22 super().__init__(image_crs, map_crs, transform) 23 24 def map_to_pixel(self, map_point): 25 """Transform point from map to pixel-based coordinates. 26 27 Args: 28 map_point: (x, y) tuple in map coordinates 29 30 Returns: 31 (x, y) tuple in pixel coordinates 32 """ 33 image_point = pyproj.transform(self.map_proj, self.image_proj, 34 map_point[0], map_point[1]) 35 pixel_point = rowcol(self.transform, image_point[0], image_point[1]) 36 pixel_point = (pixel_point[1], pixel_point[0]) 37 return pixel_point 38 39 def pixel_to_map(self, pixel_point): 40 """Transform point from pixel to map-based coordinates. 41 42 Args: 43 pixel_point: (x, y) tuple in pixel coordinates 44 45 Returns: 46 (x, y) tuple in map coordinates 47 """ 48 image_point = xy(self.transform, int(pixel_point[1]), 49 int(pixel_point[0])) 50 map_point = pyproj.transform(self.image_proj, self.map_proj, 51 image_point[0], image_point[1]) 52 return map_point 53 54 @classmethod 55 def from_dataset(cls, dataset, map_crs='epsg:4326'): 56 if dataset.crs is None: 57 return IdentityCRSTransformer() 58 transform = dataset.transform 59 image_crs = dataset.crs['init'] 60 return cls(transform, image_crs, map_crs) 61 62 def get_affine_transform(self): 63 return self.transform 64 [end of rastervision/data/crs_transformer/rasterio_crs_transformer.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/rastervision/data/crs_transformer/rasterio_crs_transformer.py b/rastervision/data/crs_transformer/rasterio_crs_transformer.py --- a/rastervision/data/crs_transformer/rasterio_crs_transformer.py +++ b/rastervision/data/crs_transformer/rasterio_crs_transformer.py @@ -17,7 +17,7 @@ map_crs: CRS code """ self.map_proj = pyproj.Proj(init=map_crs) - self.image_proj = pyproj.Proj(init=image_crs) + self.image_proj = pyproj.Proj(image_crs) super().__init__(image_crs, map_crs, transform) @@ -56,7 +56,7 @@ if dataset.crs is None: return IdentityCRSTransformer() transform = dataset.transform - image_crs = dataset.crs['init'] + image_crs = dataset.crs return cls(transform, image_crs, map_crs) def get_affine_transform(self):
{"golden_diff": "diff --git a/rastervision/data/crs_transformer/rasterio_crs_transformer.py b/rastervision/data/crs_transformer/rasterio_crs_transformer.py\n--- a/rastervision/data/crs_transformer/rasterio_crs_transformer.py\n+++ b/rastervision/data/crs_transformer/rasterio_crs_transformer.py\n@@ -17,7 +17,7 @@\n map_crs: CRS code\n \"\"\"\n self.map_proj = pyproj.Proj(init=map_crs)\n- self.image_proj = pyproj.Proj(init=image_crs)\n+ self.image_proj = pyproj.Proj(image_crs)\n \n super().__init__(image_crs, map_crs, transform)\n \n@@ -56,7 +56,7 @@\n if dataset.crs is None:\n return IdentityCRSTransformer()\n transform = dataset.transform\n- image_crs = dataset.crs['init']\n+ image_crs = dataset.crs\n return cls(transform, image_crs, map_crs)\n \n def get_affine_transform(self):\n", "issue": "Cannot handle CRS w/o EPSG\n```\r\nChecking for existing output [####################################] 100%\r\nSaving command configuration to /opt/data/rv_root/chip/xview-object_detection/command-config.json...\r\nSaving command configuration to /opt/data/rv_root/train/xview-object-detection-mobilenet/command-config.json...\r\nSaving command configuration to /opt/data/rv_root/bundle/xview-object-detection-mobilenet/command-config.json...\r\nSaving command configuration to /opt/data/rv_root/predict/xview-object-detection-mobilenet/command-config.json...\r\nSaving command configuration to /opt/data/rv_root/eval/xview-object-detection-mobilenet/command-config.json...\r\npython -m rastervision run_command /opt/data/rv_root/chip/xview-object_detection/command-config.json\r\nMaking training chips...\r\n2019-03-20 16:00:47:rastervision.utils.files: INFO - Downloading s3://azavea-nyc-ml/cogs/000227.tif to /tmp/tmp98w0p7cl/tmpypmuuz6t/tmpqbscq03c/s3/azavea-nyc-ml/cogs/000227.tif\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python3.5/runpy.py\", line 184, in _run_module_as_main\r\n \"__main__\", mod_spec)\r\n File \"/usr/lib/python3.5/runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)\r\n File \"/opt/src/rastervision/__main__.py\", line 17, in <module>\r\n rv.main()\r\n File \"/usr/local/lib/python3.5/dist-packages/click/core.py\", line 722, in __call__\r\n return self.main(*args, **kwargs)\r\n File \"/usr/local/lib/python3.5/dist-packages/click/core.py\", line 697, in main\r\n rv = self.invoke(ctx)\r\n File \"/usr/local/lib/python3.5/dist-packages/click/core.py\", line 1066, in invoke\r\n return _process_result(sub_ctx.command.invoke(sub_ctx))\r\n File \"/usr/local/lib/python3.5/dist-packages/click/core.py\", line 895, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n File \"/usr/local/lib/python3.5/dist-packages/click/core.py\", line 535, in invoke\r\n return callback(*args, **kwargs)\r\n File \"/opt/src/rastervision/cli/main.py\", line 253, in run_command\r\n rv.runner.CommandRunner.run(command_config_uri)\r\n File \"/opt/src/rastervision/runner/command_runner.py\", line 11, in run\r\n CommandRunner.run_from_proto(msg)\r\n File \"/opt/src/rastervision/runner/command_runner.py\", line 17, in run_from_proto\r\n command.run()\r\n File \"/opt/src/rastervision/command/chip_command.py\", line 22, in run\r\n map(lambda s: s.create_scene(cc.task, tmp_dir), cc.train_scenes))\r\n File \"/opt/src/rastervision/command/chip_command.py\", line 22, in <lambda>\r\n map(lambda s: s.create_scene(cc.task, tmp_dir), cc.train_scenes))\r\n File \"/opt/src/rastervision/data/scene_config.py\", line 36, in create_scene\r\n raster_source = self.raster_source.create_source(tmp_dir)\r\n File \"/opt/src/rastervision/data/raster_source/geotiff_source_config.py\", line 70, in create_source\r\n y_shift_meters=y_shift_meters)\r\n File \"/opt/src/rastervision/data/raster_source/geotiff_source.py\", line 46, in __init__\r\n super().__init__(raster_transformers, temp_dir, channel_order)\r\n File \"/opt/src/rastervision/data/raster_source/rasterio_source.py\", line 47, in __init__\r\n with self.activate():\r\n File \"/opt/src/rastervision/data/activate_mixin.py\", line 21, in __enter__\r\n self.activate()\r\n File \"/opt/src/rastervision/data/activate_mixin.py\", line 54, in do_activate\r\n self._activate()\r\n File \"/opt/src/rastervision/data/raster_source/geotiff_source.py\", line 104, in _activate\r\n super()._activate()\r\n File \"/opt/src/rastervision/data/raster_source/rasterio_source.py\", line 106, in _activate\r\n self._set_crs_transformer()\r\n File \"/opt/src/rastervision/data/raster_source/geotiff_source.py\", line 56, in _set_crs_transformer\r\n self.image_dataset)\r\n File \"/opt/src/rastervision/data/crs_transformer/rasterio_crs_transformer.py\", line 59, in from_dataset\r\n image_crs = dataset.crs['init']\r\n File \"/usr/lib/python3.5/collections/__init__.py\", line 986, in __getitem__\r\n raise KeyError(key)\r\nKeyError: 'init'\r\n/tmp/tmpkon0yhf2/tmpyysq6a82/Makefile:6: recipe for target '0' failed\r\nmake: *** [0] Error 1\r\n```\n", "before_files": [{"content": "import pyproj\n\nfrom rasterio.transform import (rowcol, xy)\n\nfrom rastervision.data.crs_transformer import (CRSTransformer,\n IdentityCRSTransformer)\n\n\nclass RasterioCRSTransformer(CRSTransformer):\n \"\"\"Transformer for a RasterioRasterSource.\"\"\"\n\n def __init__(self, transform, image_crs, map_crs='epsg:4326'):\n \"\"\"Construct transformer.\n\n Args:\n image_dataset: Rasterio DatasetReader\n map_crs: CRS code\n \"\"\"\n self.map_proj = pyproj.Proj(init=map_crs)\n self.image_proj = pyproj.Proj(init=image_crs)\n\n super().__init__(image_crs, map_crs, transform)\n\n def map_to_pixel(self, map_point):\n \"\"\"Transform point from map to pixel-based coordinates.\n\n Args:\n map_point: (x, y) tuple in map coordinates\n\n Returns:\n (x, y) tuple in pixel coordinates\n \"\"\"\n image_point = pyproj.transform(self.map_proj, self.image_proj,\n map_point[0], map_point[1])\n pixel_point = rowcol(self.transform, image_point[0], image_point[1])\n pixel_point = (pixel_point[1], pixel_point[0])\n return pixel_point\n\n def pixel_to_map(self, pixel_point):\n \"\"\"Transform point from pixel to map-based coordinates.\n\n Args:\n pixel_point: (x, y) tuple in pixel coordinates\n\n Returns:\n (x, y) tuple in map coordinates\n \"\"\"\n image_point = xy(self.transform, int(pixel_point[1]),\n int(pixel_point[0]))\n map_point = pyproj.transform(self.image_proj, self.map_proj,\n image_point[0], image_point[1])\n return map_point\n\n @classmethod\n def from_dataset(cls, dataset, map_crs='epsg:4326'):\n if dataset.crs is None:\n return IdentityCRSTransformer()\n transform = dataset.transform\n image_crs = dataset.crs['init']\n return cls(transform, image_crs, map_crs)\n\n def get_affine_transform(self):\n return self.transform\n", "path": "rastervision/data/crs_transformer/rasterio_crs_transformer.py"}]}
2,331
241
gh_patches_debug_3302
rasdani/github-patches
git_diff
Project-MONAI__MONAI-2093
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Enhance the log of SegmentationSaver **Is your feature request related to a problem? Please describe.** We need to print out more info when saving segmentation results, now it's only: ``` saved all the model outputs into files. ``` </issue> <code> [start of monai/data/nifti_saver.py] 1 # Copyright 2020 - 2021 MONAI Consortium 2 # Licensed under the Apache License, Version 2.0 (the "License"); 3 # you may not use this file except in compliance with the License. 4 # You may obtain a copy of the License at 5 # http://www.apache.org/licenses/LICENSE-2.0 6 # Unless required by applicable law or agreed to in writing, software 7 # distributed under the License is distributed on an "AS IS" BASIS, 8 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 9 # See the License for the specific language governing permissions and 10 # limitations under the License. 11 12 from typing import Dict, Optional, Union 13 14 import numpy as np 15 import torch 16 17 from monai.config import DtypeLike 18 from monai.data.nifti_writer import write_nifti 19 from monai.data.utils import create_file_basename 20 from monai.utils import GridSampleMode, GridSamplePadMode 21 from monai.utils import ImageMetaKey as Key 22 23 24 class NiftiSaver: 25 """ 26 Save the data as NIfTI file, it can support single data content or a batch of data. 27 Typically, the data can be segmentation predictions, call `save` for single data 28 or call `save_batch` to save a batch of data together. If no meta data provided, 29 use index from 0 as the filename prefix. 30 31 NB: image should include channel dimension: [B],C,H,W,[D]. 32 """ 33 34 def __init__( 35 self, 36 output_dir: str = "./", 37 output_postfix: str = "seg", 38 output_ext: str = ".nii.gz", 39 resample: bool = True, 40 mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR, 41 padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.BORDER, 42 align_corners: bool = False, 43 dtype: DtypeLike = np.float64, 44 output_dtype: DtypeLike = np.float32, 45 squeeze_end_dims: bool = True, 46 data_root_dir: str = "", 47 ) -> None: 48 """ 49 Args: 50 output_dir: output image directory. 51 output_postfix: a string appended to all output file names. 52 output_ext: output file extension name. 53 resample: whether to resample before saving the data array. 54 mode: {``"bilinear"``, ``"nearest"``} 55 This option is used when ``resample = True``. 56 Interpolation mode to calculate output values. Defaults to ``"bilinear"``. 57 See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample 58 padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``} 59 This option is used when ``resample = True``. 60 Padding mode for outside grid values. Defaults to ``"border"``. 61 See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample 62 align_corners: Geometrically, we consider the pixels of the input as squares rather than points. 63 See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample 64 dtype: data type for resampling computation. Defaults to ``np.float64`` for best precision. 65 If None, use the data type of input data. 66 output_dtype: data type for saving data. Defaults to ``np.float32``. 67 squeeze_end_dims: if True, any trailing singleton dimensions will be removed (after the channel 68 has been moved to the end). So if input is (C,H,W,D), this will be altered to (H,W,D,C), and 69 then if C==1, it will be saved as (H,W,D). If D also ==1, it will be saved as (H,W). If false, 70 image will always be saved as (H,W,D,C). 71 data_root_dir: if not empty, it specifies the beginning parts of the input file's 72 absolute path. it's used to compute `input_file_rel_path`, the relative path to the file from 73 `data_root_dir` to preserve folder structure when saving in case there are files in different 74 folders with the same file names. for example: 75 input_file_name: /foo/bar/test1/image.nii, 76 postfix: seg 77 output_ext: nii.gz 78 output_dir: /output, 79 data_root_dir: /foo/bar, 80 output will be: /output/test1/image/image_seg.nii.gz 81 82 """ 83 self.output_dir = output_dir 84 self.output_postfix = output_postfix 85 self.output_ext = output_ext 86 self.resample = resample 87 self.mode: GridSampleMode = GridSampleMode(mode) 88 self.padding_mode: GridSamplePadMode = GridSamplePadMode(padding_mode) 89 self.align_corners = align_corners 90 self.dtype = dtype 91 self.output_dtype = output_dtype 92 self._data_index = 0 93 self.squeeze_end_dims = squeeze_end_dims 94 self.data_root_dir = data_root_dir 95 96 def save(self, data: Union[torch.Tensor, np.ndarray], meta_data: Optional[Dict] = None) -> None: 97 """ 98 Save data into a Nifti file. 99 The meta_data could optionally have the following keys: 100 101 - ``'filename_or_obj'`` -- for output file name creation, corresponding to filename or object. 102 - ``'original_affine'`` -- for data orientation handling, defaulting to an identity matrix. 103 - ``'affine'`` -- for data output affine, defaulting to an identity matrix. 104 - ``'spatial_shape'`` -- for data output shape. 105 - ``'patch_index'`` -- if the data is a patch of big image, append the patch index to filename. 106 107 When meta_data is specified, the saver will try to resample batch data from the space 108 defined by "affine" to the space defined by "original_affine". 109 110 If meta_data is None, use the default index (starting from 0) as the filename. 111 112 Args: 113 data: target data content that to be saved as a NIfTI format file. 114 Assuming the data shape starts with a channel dimension and followed by spatial dimensions. 115 meta_data: the meta data information corresponding to the data. 116 117 See Also 118 :py:meth:`monai.data.nifti_writer.write_nifti` 119 """ 120 filename = meta_data[Key.FILENAME_OR_OBJ] if meta_data else str(self._data_index) 121 self._data_index += 1 122 original_affine = meta_data.get("original_affine", None) if meta_data else None 123 affine = meta_data.get("affine", None) if meta_data else None 124 spatial_shape = meta_data.get("spatial_shape", None) if meta_data else None 125 patch_index = meta_data.get(Key.PATCH_INDEX, None) if meta_data else None 126 127 if isinstance(data, torch.Tensor): 128 data = data.detach().cpu().numpy() 129 130 path = create_file_basename(self.output_postfix, filename, self.output_dir, self.data_root_dir, patch_index) 131 path = f"{path}{self.output_ext}" 132 # change data shape to be (channel, h, w, d) 133 while len(data.shape) < 4: 134 data = np.expand_dims(data, -1) 135 # change data to "channel last" format and write to nifti format file 136 data = np.moveaxis(np.asarray(data), 0, -1) 137 138 # if desired, remove trailing singleton dimensions 139 if self.squeeze_end_dims: 140 while data.shape[-1] == 1: 141 data = np.squeeze(data, -1) 142 143 write_nifti( 144 data, 145 file_name=path, 146 affine=affine, 147 target_affine=original_affine, 148 resample=self.resample, 149 output_spatial_shape=spatial_shape, 150 mode=self.mode, 151 padding_mode=self.padding_mode, 152 align_corners=self.align_corners, 153 dtype=self.dtype, 154 output_dtype=self.output_dtype, 155 ) 156 157 def save_batch(self, batch_data: Union[torch.Tensor, np.ndarray], meta_data: Optional[Dict] = None) -> None: 158 """ 159 Save a batch of data into Nifti format files. 160 161 Spatially it supports up to three dimensions, that is, H, HW, HWD for 162 1D, 2D, 3D respectively (with resampling supports for 2D and 3D only). 163 164 When saving multiple time steps or multiple channels `batch_data`, 165 time and/or modality axes should be appended after the batch dimensions. 166 For example, the shape of a batch of 2D eight-class 167 segmentation probabilities to be saved could be `(batch, 8, 64, 64)`; 168 in this case each item in the batch will be saved as (64, 64, 1, 8) 169 NIfTI file (the third dimension is reserved as a spatial dimension). 170 171 Args: 172 batch_data: target batch data content that save into NIfTI format. 173 meta_data: every key-value in the meta_data is corresponding to a batch of data. 174 175 """ 176 for i, data in enumerate(batch_data): # save a batch of files 177 self.save(data=data, meta_data={k: meta_data[k][i] for k in meta_data} if meta_data is not None else None) 178 [end of monai/data/nifti_saver.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/monai/data/nifti_saver.py b/monai/data/nifti_saver.py --- a/monai/data/nifti_saver.py +++ b/monai/data/nifti_saver.py @@ -153,6 +153,7 @@ dtype=self.dtype, output_dtype=self.output_dtype, ) + print(f"file written: {path}.") def save_batch(self, batch_data: Union[torch.Tensor, np.ndarray], meta_data: Optional[Dict] = None) -> None: """
{"golden_diff": "diff --git a/monai/data/nifti_saver.py b/monai/data/nifti_saver.py\n--- a/monai/data/nifti_saver.py\n+++ b/monai/data/nifti_saver.py\n@@ -153,6 +153,7 @@\n dtype=self.dtype,\n output_dtype=self.output_dtype,\n )\n+ print(f\"file written: {path}.\")\n \n def save_batch(self, batch_data: Union[torch.Tensor, np.ndarray], meta_data: Optional[Dict] = None) -> None:\n \"\"\"\n", "issue": "Enhance the log of SegmentationSaver\n**Is your feature request related to a problem? Please describe.**\r\nWe need to print out more info when saving segmentation results, now it's only:\r\n```\r\nsaved all the model outputs into files.\r\n```\r\n\n", "before_files": [{"content": "# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Dict, Optional, Union\n\nimport numpy as np\nimport torch\n\nfrom monai.config import DtypeLike\nfrom monai.data.nifti_writer import write_nifti\nfrom monai.data.utils import create_file_basename\nfrom monai.utils import GridSampleMode, GridSamplePadMode\nfrom monai.utils import ImageMetaKey as Key\n\n\nclass NiftiSaver:\n \"\"\"\n Save the data as NIfTI file, it can support single data content or a batch of data.\n Typically, the data can be segmentation predictions, call `save` for single data\n or call `save_batch` to save a batch of data together. If no meta data provided,\n use index from 0 as the filename prefix.\n\n NB: image should include channel dimension: [B],C,H,W,[D].\n \"\"\"\n\n def __init__(\n self,\n output_dir: str = \"./\",\n output_postfix: str = \"seg\",\n output_ext: str = \".nii.gz\",\n resample: bool = True,\n mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,\n padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.BORDER,\n align_corners: bool = False,\n dtype: DtypeLike = np.float64,\n output_dtype: DtypeLike = np.float32,\n squeeze_end_dims: bool = True,\n data_root_dir: str = \"\",\n ) -> None:\n \"\"\"\n Args:\n output_dir: output image directory.\n output_postfix: a string appended to all output file names.\n output_ext: output file extension name.\n resample: whether to resample before saving the data array.\n mode: {``\"bilinear\"``, ``\"nearest\"``}\n This option is used when ``resample = True``.\n Interpolation mode to calculate output values. Defaults to ``\"bilinear\"``.\n See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample\n padding_mode: {``\"zeros\"``, ``\"border\"``, ``\"reflection\"``}\n This option is used when ``resample = True``.\n Padding mode for outside grid values. Defaults to ``\"border\"``.\n See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample\n align_corners: Geometrically, we consider the pixels of the input as squares rather than points.\n See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample\n dtype: data type for resampling computation. Defaults to ``np.float64`` for best precision.\n If None, use the data type of input data.\n output_dtype: data type for saving data. Defaults to ``np.float32``.\n squeeze_end_dims: if True, any trailing singleton dimensions will be removed (after the channel\n has been moved to the end). So if input is (C,H,W,D), this will be altered to (H,W,D,C), and\n then if C==1, it will be saved as (H,W,D). If D also ==1, it will be saved as (H,W). If false,\n image will always be saved as (H,W,D,C).\n data_root_dir: if not empty, it specifies the beginning parts of the input file's\n absolute path. it's used to compute `input_file_rel_path`, the relative path to the file from\n `data_root_dir` to preserve folder structure when saving in case there are files in different\n folders with the same file names. for example:\n input_file_name: /foo/bar/test1/image.nii,\n postfix: seg\n output_ext: nii.gz\n output_dir: /output,\n data_root_dir: /foo/bar,\n output will be: /output/test1/image/image_seg.nii.gz\n\n \"\"\"\n self.output_dir = output_dir\n self.output_postfix = output_postfix\n self.output_ext = output_ext\n self.resample = resample\n self.mode: GridSampleMode = GridSampleMode(mode)\n self.padding_mode: GridSamplePadMode = GridSamplePadMode(padding_mode)\n self.align_corners = align_corners\n self.dtype = dtype\n self.output_dtype = output_dtype\n self._data_index = 0\n self.squeeze_end_dims = squeeze_end_dims\n self.data_root_dir = data_root_dir\n\n def save(self, data: Union[torch.Tensor, np.ndarray], meta_data: Optional[Dict] = None) -> None:\n \"\"\"\n Save data into a Nifti file.\n The meta_data could optionally have the following keys:\n\n - ``'filename_or_obj'`` -- for output file name creation, corresponding to filename or object.\n - ``'original_affine'`` -- for data orientation handling, defaulting to an identity matrix.\n - ``'affine'`` -- for data output affine, defaulting to an identity matrix.\n - ``'spatial_shape'`` -- for data output shape.\n - ``'patch_index'`` -- if the data is a patch of big image, append the patch index to filename.\n\n When meta_data is specified, the saver will try to resample batch data from the space\n defined by \"affine\" to the space defined by \"original_affine\".\n\n If meta_data is None, use the default index (starting from 0) as the filename.\n\n Args:\n data: target data content that to be saved as a NIfTI format file.\n Assuming the data shape starts with a channel dimension and followed by spatial dimensions.\n meta_data: the meta data information corresponding to the data.\n\n See Also\n :py:meth:`monai.data.nifti_writer.write_nifti`\n \"\"\"\n filename = meta_data[Key.FILENAME_OR_OBJ] if meta_data else str(self._data_index)\n self._data_index += 1\n original_affine = meta_data.get(\"original_affine\", None) if meta_data else None\n affine = meta_data.get(\"affine\", None) if meta_data else None\n spatial_shape = meta_data.get(\"spatial_shape\", None) if meta_data else None\n patch_index = meta_data.get(Key.PATCH_INDEX, None) if meta_data else None\n\n if isinstance(data, torch.Tensor):\n data = data.detach().cpu().numpy()\n\n path = create_file_basename(self.output_postfix, filename, self.output_dir, self.data_root_dir, patch_index)\n path = f\"{path}{self.output_ext}\"\n # change data shape to be (channel, h, w, d)\n while len(data.shape) < 4:\n data = np.expand_dims(data, -1)\n # change data to \"channel last\" format and write to nifti format file\n data = np.moveaxis(np.asarray(data), 0, -1)\n\n # if desired, remove trailing singleton dimensions\n if self.squeeze_end_dims:\n while data.shape[-1] == 1:\n data = np.squeeze(data, -1)\n\n write_nifti(\n data,\n file_name=path,\n affine=affine,\n target_affine=original_affine,\n resample=self.resample,\n output_spatial_shape=spatial_shape,\n mode=self.mode,\n padding_mode=self.padding_mode,\n align_corners=self.align_corners,\n dtype=self.dtype,\n output_dtype=self.output_dtype,\n )\n\n def save_batch(self, batch_data: Union[torch.Tensor, np.ndarray], meta_data: Optional[Dict] = None) -> None:\n \"\"\"\n Save a batch of data into Nifti format files.\n\n Spatially it supports up to three dimensions, that is, H, HW, HWD for\n 1D, 2D, 3D respectively (with resampling supports for 2D and 3D only).\n\n When saving multiple time steps or multiple channels `batch_data`,\n time and/or modality axes should be appended after the batch dimensions.\n For example, the shape of a batch of 2D eight-class\n segmentation probabilities to be saved could be `(batch, 8, 64, 64)`;\n in this case each item in the batch will be saved as (64, 64, 1, 8)\n NIfTI file (the third dimension is reserved as a spatial dimension).\n\n Args:\n batch_data: target batch data content that save into NIfTI format.\n meta_data: every key-value in the meta_data is corresponding to a batch of data.\n\n \"\"\"\n for i, data in enumerate(batch_data): # save a batch of files\n self.save(data=data, meta_data={k: meta_data[k][i] for k in meta_data} if meta_data is not None else None)\n", "path": "monai/data/nifti_saver.py"}]}
3,057
125
gh_patches_debug_23949
rasdani/github-patches
git_diff
pypa__virtualenv-1866
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> .gitignore added to places it should not Refs #1806 Virtualenv now drops a .gitignore file. While I understand where you are coming from, it is a mistake to assume virtualenv owns the target directory. For example, I and many people I work with typically do this: ``` git clone [email protected]:stefanholek/foo cd foo virtualenv . ./bin/pip install -e . ./bin/python -m unittest ``` Recent versions of virtualenv add a .gitignore file into my sandbox. To make matters worse it even **overwrites** an existing .gitignore file! This is very clearly wrong. I suggest to not write the .gitignore file unless virtualenv has created the target directory, or at least don't write it if the target directory already contains a .git directory and/or a .gitignore file. Thanks! </issue> <code> [start of src/virtualenv/create/creator.py] 1 from __future__ import absolute_import, print_function, unicode_literals 2 3 import json 4 import logging 5 import os 6 import sys 7 from abc import ABCMeta, abstractmethod 8 from argparse import ArgumentTypeError 9 from ast import literal_eval 10 from collections import OrderedDict 11 from textwrap import dedent 12 13 from six import add_metaclass 14 15 from virtualenv.discovery.cached_py_info import LogCmd 16 from virtualenv.info import WIN_CPYTHON_2 17 from virtualenv.util.path import Path, safe_delete 18 from virtualenv.util.six import ensure_str, ensure_text 19 from virtualenv.util.subprocess import run_cmd 20 from virtualenv.version import __version__ 21 22 from .pyenv_cfg import PyEnvCfg 23 24 HERE = Path(os.path.abspath(__file__)).parent 25 DEBUG_SCRIPT = HERE / "debug.py" 26 27 28 class CreatorMeta(object): 29 def __init__(self): 30 self.error = None 31 32 33 @add_metaclass(ABCMeta) 34 class Creator(object): 35 """A class that given a python Interpreter creates a virtual environment""" 36 37 def __init__(self, options, interpreter): 38 """Construct a new virtual environment creator. 39 40 :param options: the CLI option as parsed from :meth:`add_parser_arguments` 41 :param interpreter: the interpreter to create virtual environment from 42 """ 43 self.interpreter = interpreter 44 self._debug = None 45 self.dest = Path(options.dest) 46 self.clear = options.clear 47 self.pyenv_cfg = PyEnvCfg.from_folder(self.dest) 48 self.app_data = options.app_data 49 50 def __repr__(self): 51 return ensure_str(self.__unicode__()) 52 53 def __unicode__(self): 54 return "{}({})".format(self.__class__.__name__, ", ".join("{}={}".format(k, v) for k, v in self._args())) 55 56 def _args(self): 57 return [ 58 ("dest", ensure_text(str(self.dest))), 59 ("clear", self.clear), 60 ] 61 62 @classmethod 63 def can_create(cls, interpreter): 64 """Determine if we can create a virtual environment. 65 66 :param interpreter: the interpreter in question 67 :return: ``None`` if we can't create, any other object otherwise that will be forwarded to \ 68 :meth:`add_parser_arguments` 69 """ 70 return True 71 72 @classmethod 73 def add_parser_arguments(cls, parser, interpreter, meta, app_data): 74 """Add CLI arguments for the creator. 75 76 :param parser: the CLI parser 77 :param app_data: the application data folder 78 :param interpreter: the interpreter we're asked to create virtual environment for 79 :param meta: value as returned by :meth:`can_create` 80 """ 81 parser.add_argument( 82 "dest", help="directory to create virtualenv at", type=cls.validate_dest, 83 ) 84 parser.add_argument( 85 "--clear", 86 dest="clear", 87 action="store_true", 88 help="remove the destination directory if exist before starting (will overwrite files otherwise)", 89 default=False, 90 ) 91 92 @abstractmethod 93 def create(self): 94 """Perform the virtual environment creation.""" 95 raise NotImplementedError 96 97 @classmethod 98 def validate_dest(cls, raw_value): 99 """No path separator in the path, valid chars and must be write-able""" 100 101 def non_write_able(dest, value): 102 common = Path(*os.path.commonprefix([value.parts, dest.parts])) 103 raise ArgumentTypeError( 104 "the destination {} is not write-able at {}".format(dest.relative_to(common), common), 105 ) 106 107 # the file system must be able to encode 108 # note in newer CPython this is always utf-8 https://www.python.org/dev/peps/pep-0529/ 109 encoding = sys.getfilesystemencoding() 110 refused = OrderedDict() 111 kwargs = {"errors": "ignore"} if encoding != "mbcs" else {} 112 for char in ensure_text(raw_value): 113 try: 114 trip = char.encode(encoding, **kwargs).decode(encoding) 115 if trip == char: 116 continue 117 raise ValueError(trip) 118 except ValueError: 119 refused[char] = None 120 if refused: 121 raise ArgumentTypeError( 122 "the file system codec ({}) cannot handle characters {!r} within {!r}".format( 123 encoding, "".join(refused.keys()), raw_value, 124 ), 125 ) 126 if os.pathsep in raw_value: 127 raise ArgumentTypeError( 128 "destination {!r} must not contain the path separator ({}) as this would break " 129 "the activation scripts".format(raw_value, os.pathsep), 130 ) 131 132 value = Path(raw_value) 133 if value.exists() and value.is_file(): 134 raise ArgumentTypeError("the destination {} already exists and is a file".format(value)) 135 if (3, 3) <= sys.version_info <= (3, 6): 136 # pre 3.6 resolve is always strict, aka must exists, sidestep by using os.path operation 137 dest = Path(os.path.realpath(raw_value)) 138 else: 139 dest = Path(os.path.abspath(str(value))).resolve() # on Windows absolute does not imply resolve so use both 140 value = dest 141 while dest: 142 if dest.exists(): 143 if os.access(ensure_text(str(dest)), os.W_OK): 144 break 145 else: 146 non_write_able(dest, value) 147 base, _ = dest.parent, dest.name 148 if base == dest: 149 non_write_able(dest, value) # pragma: no cover 150 dest = base 151 return str(value) 152 153 def run(self): 154 if self.dest.exists() and self.clear: 155 logging.debug("delete %s", self.dest) 156 safe_delete(self.dest) 157 self.create() 158 self.set_pyenv_cfg() 159 self.setup_ignore_vcs() 160 161 def set_pyenv_cfg(self): 162 self.pyenv_cfg.content = OrderedDict() 163 self.pyenv_cfg["home"] = self.interpreter.system_exec_prefix 164 self.pyenv_cfg["implementation"] = self.interpreter.implementation 165 self.pyenv_cfg["version_info"] = ".".join(str(i) for i in self.interpreter.version_info) 166 self.pyenv_cfg["virtualenv"] = __version__ 167 168 def setup_ignore_vcs(self): 169 """Generate ignore instructions for version control systems.""" 170 # mark this folder to be ignored by VCS, handle https://www.python.org/dev/peps/pep-0610/#registered-vcs 171 (self.dest / ".gitignore").write_text( 172 dedent( 173 """ 174 # created by virtualenv automatically 175 * 176 """, 177 ).lstrip(), 178 ) 179 # Mercurial - does not support the .hgignore file inside a subdirectory directly, but only if included via the 180 # subinclude directive from root, at which point on might as well ignore the directory itself, see 181 # https://www.selenic.com/mercurial/hgignore.5.html for more details 182 # Bazaar - does not support ignore files in sub-directories, only at root level via .bzrignore 183 # Subversion - does not support ignore files, requires direct manipulation with the svn tool 184 185 @property 186 def debug(self): 187 """ 188 :return: debug information about the virtual environment (only valid after :meth:`create` has run) 189 """ 190 if self._debug is None and self.exe is not None: 191 self._debug = get_env_debug_info(self.exe, self.debug_script(), self.app_data) 192 return self._debug 193 194 # noinspection PyMethodMayBeStatic 195 def debug_script(self): 196 return DEBUG_SCRIPT 197 198 199 def get_env_debug_info(env_exe, debug_script, app_data): 200 env = os.environ.copy() 201 env.pop(str("PYTHONPATH"), None) 202 203 with app_data.ensure_extracted(debug_script) as debug_script: 204 cmd = [str(env_exe), str(debug_script)] 205 if WIN_CPYTHON_2: 206 cmd = [ensure_text(i) for i in cmd] 207 logging.debug(str("debug via %r"), LogCmd(cmd)) 208 code, out, err = run_cmd(cmd) 209 210 # noinspection PyBroadException 211 try: 212 if code != 0: 213 result = literal_eval(out) 214 else: 215 result = json.loads(out) 216 if err: 217 result["err"] = err 218 except Exception as exception: 219 return {"out": out, "err": err, "returncode": code, "exception": repr(exception)} 220 if "sys" in result and "path" in result["sys"]: 221 del result["sys"]["path"][0] 222 return result 223 [end of src/virtualenv/create/creator.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/virtualenv/create/creator.py b/src/virtualenv/create/creator.py --- a/src/virtualenv/create/creator.py +++ b/src/virtualenv/create/creator.py @@ -168,14 +168,16 @@ def setup_ignore_vcs(self): """Generate ignore instructions for version control systems.""" # mark this folder to be ignored by VCS, handle https://www.python.org/dev/peps/pep-0610/#registered-vcs - (self.dest / ".gitignore").write_text( - dedent( - """ - # created by virtualenv automatically - * - """, - ).lstrip(), - ) + git_ignore = self.dest / ".gitignore" + if not git_ignore.exists(): + git_ignore.write_text( + dedent( + """ + # created by virtualenv automatically + * + """, + ).lstrip(), + ) # Mercurial - does not support the .hgignore file inside a subdirectory directly, but only if included via the # subinclude directive from root, at which point on might as well ignore the directory itself, see # https://www.selenic.com/mercurial/hgignore.5.html for more details
{"golden_diff": "diff --git a/src/virtualenv/create/creator.py b/src/virtualenv/create/creator.py\n--- a/src/virtualenv/create/creator.py\n+++ b/src/virtualenv/create/creator.py\n@@ -168,14 +168,16 @@\n def setup_ignore_vcs(self):\n \"\"\"Generate ignore instructions for version control systems.\"\"\"\n # mark this folder to be ignored by VCS, handle https://www.python.org/dev/peps/pep-0610/#registered-vcs\n- (self.dest / \".gitignore\").write_text(\n- dedent(\n- \"\"\"\n- # created by virtualenv automatically\n- *\n- \"\"\",\n- ).lstrip(),\n- )\n+ git_ignore = self.dest / \".gitignore\"\n+ if not git_ignore.exists():\n+ git_ignore.write_text(\n+ dedent(\n+ \"\"\"\n+ # created by virtualenv automatically\n+ *\n+ \"\"\",\n+ ).lstrip(),\n+ )\n # Mercurial - does not support the .hgignore file inside a subdirectory directly, but only if included via the\n # subinclude directive from root, at which point on might as well ignore the directory itself, see\n # https://www.selenic.com/mercurial/hgignore.5.html for more details\n", "issue": ".gitignore added to places it should not\nRefs #1806\r\n\r\nVirtualenv now drops a .gitignore file. While I understand where you are coming from, it is a mistake to assume virtualenv owns the target directory. For example, I and many people I work with typically do this:\r\n\r\n```\r\ngit clone [email protected]:stefanholek/foo\r\ncd foo\r\nvirtualenv .\r\n./bin/pip install -e .\r\n./bin/python -m unittest\r\n```\r\n\r\nRecent versions of virtualenv add a .gitignore file into my sandbox. To make matters worse it even **overwrites** an existing .gitignore file! This is very clearly wrong.\r\n\r\nI suggest to not write the .gitignore file unless virtualenv has created the target directory, or at least don't write it if the target directory already contains a .git directory and/or a .gitignore file.\r\n\r\nThanks!\n", "before_files": [{"content": "from __future__ import absolute_import, print_function, unicode_literals\n\nimport json\nimport logging\nimport os\nimport sys\nfrom abc import ABCMeta, abstractmethod\nfrom argparse import ArgumentTypeError\nfrom ast import literal_eval\nfrom collections import OrderedDict\nfrom textwrap import dedent\n\nfrom six import add_metaclass\n\nfrom virtualenv.discovery.cached_py_info import LogCmd\nfrom virtualenv.info import WIN_CPYTHON_2\nfrom virtualenv.util.path import Path, safe_delete\nfrom virtualenv.util.six import ensure_str, ensure_text\nfrom virtualenv.util.subprocess import run_cmd\nfrom virtualenv.version import __version__\n\nfrom .pyenv_cfg import PyEnvCfg\n\nHERE = Path(os.path.abspath(__file__)).parent\nDEBUG_SCRIPT = HERE / \"debug.py\"\n\n\nclass CreatorMeta(object):\n def __init__(self):\n self.error = None\n\n\n@add_metaclass(ABCMeta)\nclass Creator(object):\n \"\"\"A class that given a python Interpreter creates a virtual environment\"\"\"\n\n def __init__(self, options, interpreter):\n \"\"\"Construct a new virtual environment creator.\n\n :param options: the CLI option as parsed from :meth:`add_parser_arguments`\n :param interpreter: the interpreter to create virtual environment from\n \"\"\"\n self.interpreter = interpreter\n self._debug = None\n self.dest = Path(options.dest)\n self.clear = options.clear\n self.pyenv_cfg = PyEnvCfg.from_folder(self.dest)\n self.app_data = options.app_data\n\n def __repr__(self):\n return ensure_str(self.__unicode__())\n\n def __unicode__(self):\n return \"{}({})\".format(self.__class__.__name__, \", \".join(\"{}={}\".format(k, v) for k, v in self._args()))\n\n def _args(self):\n return [\n (\"dest\", ensure_text(str(self.dest))),\n (\"clear\", self.clear),\n ]\n\n @classmethod\n def can_create(cls, interpreter):\n \"\"\"Determine if we can create a virtual environment.\n\n :param interpreter: the interpreter in question\n :return: ``None`` if we can't create, any other object otherwise that will be forwarded to \\\n :meth:`add_parser_arguments`\n \"\"\"\n return True\n\n @classmethod\n def add_parser_arguments(cls, parser, interpreter, meta, app_data):\n \"\"\"Add CLI arguments for the creator.\n\n :param parser: the CLI parser\n :param app_data: the application data folder\n :param interpreter: the interpreter we're asked to create virtual environment for\n :param meta: value as returned by :meth:`can_create`\n \"\"\"\n parser.add_argument(\n \"dest\", help=\"directory to create virtualenv at\", type=cls.validate_dest,\n )\n parser.add_argument(\n \"--clear\",\n dest=\"clear\",\n action=\"store_true\",\n help=\"remove the destination directory if exist before starting (will overwrite files otherwise)\",\n default=False,\n )\n\n @abstractmethod\n def create(self):\n \"\"\"Perform the virtual environment creation.\"\"\"\n raise NotImplementedError\n\n @classmethod\n def validate_dest(cls, raw_value):\n \"\"\"No path separator in the path, valid chars and must be write-able\"\"\"\n\n def non_write_able(dest, value):\n common = Path(*os.path.commonprefix([value.parts, dest.parts]))\n raise ArgumentTypeError(\n \"the destination {} is not write-able at {}\".format(dest.relative_to(common), common),\n )\n\n # the file system must be able to encode\n # note in newer CPython this is always utf-8 https://www.python.org/dev/peps/pep-0529/\n encoding = sys.getfilesystemencoding()\n refused = OrderedDict()\n kwargs = {\"errors\": \"ignore\"} if encoding != \"mbcs\" else {}\n for char in ensure_text(raw_value):\n try:\n trip = char.encode(encoding, **kwargs).decode(encoding)\n if trip == char:\n continue\n raise ValueError(trip)\n except ValueError:\n refused[char] = None\n if refused:\n raise ArgumentTypeError(\n \"the file system codec ({}) cannot handle characters {!r} within {!r}\".format(\n encoding, \"\".join(refused.keys()), raw_value,\n ),\n )\n if os.pathsep in raw_value:\n raise ArgumentTypeError(\n \"destination {!r} must not contain the path separator ({}) as this would break \"\n \"the activation scripts\".format(raw_value, os.pathsep),\n )\n\n value = Path(raw_value)\n if value.exists() and value.is_file():\n raise ArgumentTypeError(\"the destination {} already exists and is a file\".format(value))\n if (3, 3) <= sys.version_info <= (3, 6):\n # pre 3.6 resolve is always strict, aka must exists, sidestep by using os.path operation\n dest = Path(os.path.realpath(raw_value))\n else:\n dest = Path(os.path.abspath(str(value))).resolve() # on Windows absolute does not imply resolve so use both\n value = dest\n while dest:\n if dest.exists():\n if os.access(ensure_text(str(dest)), os.W_OK):\n break\n else:\n non_write_able(dest, value)\n base, _ = dest.parent, dest.name\n if base == dest:\n non_write_able(dest, value) # pragma: no cover\n dest = base\n return str(value)\n\n def run(self):\n if self.dest.exists() and self.clear:\n logging.debug(\"delete %s\", self.dest)\n safe_delete(self.dest)\n self.create()\n self.set_pyenv_cfg()\n self.setup_ignore_vcs()\n\n def set_pyenv_cfg(self):\n self.pyenv_cfg.content = OrderedDict()\n self.pyenv_cfg[\"home\"] = self.interpreter.system_exec_prefix\n self.pyenv_cfg[\"implementation\"] = self.interpreter.implementation\n self.pyenv_cfg[\"version_info\"] = \".\".join(str(i) for i in self.interpreter.version_info)\n self.pyenv_cfg[\"virtualenv\"] = __version__\n\n def setup_ignore_vcs(self):\n \"\"\"Generate ignore instructions for version control systems.\"\"\"\n # mark this folder to be ignored by VCS, handle https://www.python.org/dev/peps/pep-0610/#registered-vcs\n (self.dest / \".gitignore\").write_text(\n dedent(\n \"\"\"\n # created by virtualenv automatically\n *\n \"\"\",\n ).lstrip(),\n )\n # Mercurial - does not support the .hgignore file inside a subdirectory directly, but only if included via the\n # subinclude directive from root, at which point on might as well ignore the directory itself, see\n # https://www.selenic.com/mercurial/hgignore.5.html for more details\n # Bazaar - does not support ignore files in sub-directories, only at root level via .bzrignore\n # Subversion - does not support ignore files, requires direct manipulation with the svn tool\n\n @property\n def debug(self):\n \"\"\"\n :return: debug information about the virtual environment (only valid after :meth:`create` has run)\n \"\"\"\n if self._debug is None and self.exe is not None:\n self._debug = get_env_debug_info(self.exe, self.debug_script(), self.app_data)\n return self._debug\n\n # noinspection PyMethodMayBeStatic\n def debug_script(self):\n return DEBUG_SCRIPT\n\n\ndef get_env_debug_info(env_exe, debug_script, app_data):\n env = os.environ.copy()\n env.pop(str(\"PYTHONPATH\"), None)\n\n with app_data.ensure_extracted(debug_script) as debug_script:\n cmd = [str(env_exe), str(debug_script)]\n if WIN_CPYTHON_2:\n cmd = [ensure_text(i) for i in cmd]\n logging.debug(str(\"debug via %r\"), LogCmd(cmd))\n code, out, err = run_cmd(cmd)\n\n # noinspection PyBroadException\n try:\n if code != 0:\n result = literal_eval(out)\n else:\n result = json.loads(out)\n if err:\n result[\"err\"] = err\n except Exception as exception:\n return {\"out\": out, \"err\": err, \"returncode\": code, \"exception\": repr(exception)}\n if \"sys\" in result and \"path\" in result[\"sys\"]:\n del result[\"sys\"][\"path\"][0]\n return result\n", "path": "src/virtualenv/create/creator.py"}]}
3,113
291
gh_patches_debug_4156
rasdani/github-patches
git_diff
mampfes__hacs_waste_collection_schedule-190
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Berlin Recycling broken due to SSL misconfiguration Berlin Recycling is currently broken and fails with an `SSLCertVerificationError`: > Logger: waste_collection_schedule.scraper > Source: custom_components/waste_collection_schedule/waste_collection_schedule/scraper.py:118 > Integration: waste_collection_schedule (documentation) > First occurred: 22. MΓ€rz 2022, 16:39:19 (2 occurrences) > Last logged: 01:12:00 > > fetch failed for source Berline Recycling: Traceback (most recent call last): File "/usr/local/lib/python3.9/site-packages/urllib3/connectionpool.py", line 703, in urlopen httplib_response = self._make_request( File "/usr/local/lib/python3.9/site-packages/urllib3/connectionpool.py", line 386, in _make_request self._validate_conn(conn) File "/usr/local/lib/python3.9/site-packages/urllib3/connectionpool.py", line 1040, in _validate_conn conn.connect() File "/usr/local/lib/python3.9/site-packages/urllib3/connection.py", line 414, in connect self.sock = ssl_wrap_socket( File "/usr/local/lib/python3.9/site-packages/urllib3/util/ssl_.py", line 449, in ssl_wrap_socket ssl_sock = _ssl_wrap_socket_impl( File "/usr/local/lib/python3.9/site-packages/urllib3/util/ssl_.py", line 493, in _ssl_wrap_socket_impl return ssl_context.wrap_socket(sock, server_hostname=server_hostname) File "/usr/local/lib/python3.9/ssl.py", line 500, in wrap_socket return self.sslsocket_class._create( File "/usr/local/lib/python3.9/ssl.py", line 1040, in _create self.do_handshake() File "/usr/local/lib/python3.9/ssl.py", line 1309, in do_handshake self._sslobj.do_handshake() ssl.SSLCertVerificationError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1129) During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/usr/local/lib/python3.9/site-packages/requests/adapters.py", line 440, in send resp = conn.urlopen( File "/usr/local/lib/python3.9/site-packages/urllib3/connectionpool.py", line 785, in urlopen retries = retries.increment( File "/usr/local/lib/python3.9/site-packages/urllib3/util/retry.py", line 592, in increment raise MaxRetryError(_pool, url, error or ResponseError(cause)) urllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='kundenportal.berlin-recycling.de', port=443): Max retries exceeded with url: / (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1129)'))) During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/config/custom_components/waste_collection_schedule/waste_collection_schedule/scraper.py", line 116, in fetch entries = self._source.fetch() File "/config/custom_components/waste_collection_schedule/waste_collection_schedule/source/berlin_recycling_de.py", line 48, in fetch r = session.get(SERVICE_URL, allow_redirects=False) File "/usr/local/lib/python3.9/site-packages/requests/sessions.py", line 542, in get return self.request('GET', url, **kwargs) File "/usr/local/lib/python3.9/site-packages/requests/sessions.py", line 529, in request resp = self.send(prep, **send_kwargs) File "/usr/local/lib/python3.9/site-packages/requests/sessions.py", line 645, in send r = adapter.send(request, **kwargs) File "/usr/local/lib/python3.9/site-packages/requests/adapters.py", line 517, in send raise SSLError(e, request=request) requests.exceptions.SSLError: HTTPSConnectionPool(host='kundenportal.berlin-recycling.de', port=443): Max retries exceeded with url: / (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1129)'))) Seems the reason is https://kundenportal.berlin-recycling.de has a misconfigured HTTPS server [which is not sending the intermediate certificate](https://www.ssllabs.com/ssltest/analyze.html?d=kundenportal.berlin-recycling.de) ([Starfield Secure Certificate Authority - G2](https://certs.starfieldtech.com/repository/)). Unlike browsers, python [does not support](https://bugs.python.org/issue18617) automatic fetching of missing intermediates. I think an option to avoid setting `verify = False` on the request would be pinning the CAs and shipping the certificates with the project. </issue> <code> [start of custom_components/waste_collection_schedule/waste_collection_schedule/source/berlin_recycling_de.py] 1 import json 2 from datetime import datetime 3 from html.parser import HTMLParser 4 5 import requests 6 from waste_collection_schedule import Collection # type: ignore[attr-defined] 7 8 TITLE = "Berline Recycling" 9 DESCRIPTION = "Source for Berlin Recycling waste collection." 10 URL = "https://berlin-recycling.de" 11 TEST_CASES = { 12 "Germanenstrasse": { 13 "username": "!secret berlin_recycling_username", 14 "password": "!secret berlin_recycling_password", 15 }, 16 } 17 18 19 # Parser for HTML input (hidden) text 20 class HiddenInputParser(HTMLParser): 21 def __init__(self): 22 super().__init__() 23 self._args = {} 24 25 @property 26 def args(self): 27 return self._args 28 29 def handle_starttag(self, tag, attrs): 30 if tag == "input": 31 d = dict(attrs) 32 if str(d["type"]).lower() == "hidden": 33 self._args[d["name"]] = d["value"] if "value" in d else "" 34 35 36 SERVICE_URL = "https://kundenportal.berlin-recycling.de/" 37 38 39 class Source: 40 def __init__(self, username, password): 41 self._username = username 42 self._password = password 43 44 def fetch(self): 45 session = requests.session() 46 47 # first get returns session specific url 48 r = session.get(SERVICE_URL, allow_redirects=False) 49 50 # get session id's 51 r = session.get(r.url) 52 53 parser = HiddenInputParser() 54 parser.feed(r.text) 55 args = parser.args 56 args["__EVENTTARGET"] = "btnLog" 57 args["__EVENTARGUMENT"] = None 58 args["Username"] = self._username 59 args["Password"] = self._password 60 61 # login 62 r = session.post(r.url, data=args) 63 serviceUrl = r.url 64 65 request_data = {"withhtml": "true"} 66 r = session.post(serviceUrl + "/GetDashboard", json=request_data) 67 68 request_data = {"datasettable": "ENWIS_ABFUHRKALENDER"} 69 r = session.post(serviceUrl + "/ChangeDatasetTable", json=request_data) 70 71 request_data = { 72 "datasettablecode": "ENWIS_ABFUHRKALENDER", 73 "startindex": 0, 74 "searchtext": "", 75 "rangefilter": "", 76 "ordername": "", 77 "orderdir": "", 78 "ClientParameters": "", 79 "headrecid": "", 80 } 81 r = session.post(serviceUrl + "/GetDatasetTableHead", json=request_data) 82 83 data = json.loads(r.text) 84 # load json again, because response is double coded 85 data = json.loads(data["d"]) 86 87 entries = [] 88 for d in data["data"]: 89 date = datetime.strptime(d["Task Date"], "%Y-%m-%d").date() 90 entries.append(Collection(date, d["Material Description"])) 91 return entries 92 [end of custom_components/waste_collection_schedule/waste_collection_schedule/source/berlin_recycling_de.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/berlin_recycling_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/berlin_recycling_de.py --- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/berlin_recycling_de.py +++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/berlin_recycling_de.py @@ -45,7 +45,7 @@ session = requests.session() # first get returns session specific url - r = session.get(SERVICE_URL, allow_redirects=False) + r = session.get(SERVICE_URL, allow_redirects=False, verify=False) # get session id's r = session.get(r.url)
{"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/berlin_recycling_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/berlin_recycling_de.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/berlin_recycling_de.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/berlin_recycling_de.py\n@@ -45,7 +45,7 @@\n session = requests.session()\n \n # first get returns session specific url\n- r = session.get(SERVICE_URL, allow_redirects=False)\n+ r = session.get(SERVICE_URL, allow_redirects=False, verify=False)\n \n # get session id's\n r = session.get(r.url)\n", "issue": "Berlin Recycling broken due to SSL misconfiguration\nBerlin Recycling is currently broken and fails with an `SSLCertVerificationError`:\r\n\r\n> Logger: waste_collection_schedule.scraper\r\n> Source: custom_components/waste_collection_schedule/waste_collection_schedule/scraper.py:118\r\n> Integration: waste_collection_schedule (documentation)\r\n> First occurred: 22. M\u00e4rz 2022, 16:39:19 (2 occurrences)\r\n> Last logged: 01:12:00\r\n> \r\n> fetch failed for source Berline Recycling: Traceback (most recent call last): File \"/usr/local/lib/python3.9/site-packages/urllib3/connectionpool.py\", line 703, in urlopen httplib_response = self._make_request( File \"/usr/local/lib/python3.9/site-packages/urllib3/connectionpool.py\", line 386, in _make_request self._validate_conn(conn) File \"/usr/local/lib/python3.9/site-packages/urllib3/connectionpool.py\", line 1040, in _validate_conn conn.connect() File \"/usr/local/lib/python3.9/site-packages/urllib3/connection.py\", line 414, in connect self.sock = ssl_wrap_socket( File \"/usr/local/lib/python3.9/site-packages/urllib3/util/ssl_.py\", line 449, in ssl_wrap_socket ssl_sock = _ssl_wrap_socket_impl( File \"/usr/local/lib/python3.9/site-packages/urllib3/util/ssl_.py\", line 493, in _ssl_wrap_socket_impl return ssl_context.wrap_socket(sock, server_hostname=server_hostname) File \"/usr/local/lib/python3.9/ssl.py\", line 500, in wrap_socket return self.sslsocket_class._create( File \"/usr/local/lib/python3.9/ssl.py\", line 1040, in _create self.do_handshake() File \"/usr/local/lib/python3.9/ssl.py\", line 1309, in do_handshake self._sslobj.do_handshake() ssl.SSLCertVerificationError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1129) During handling of the above exception, another exception occurred: Traceback (most recent call last): File \"/usr/local/lib/python3.9/site-packages/requests/adapters.py\", line 440, in send resp = conn.urlopen( File \"/usr/local/lib/python3.9/site-packages/urllib3/connectionpool.py\", line 785, in urlopen retries = retries.increment( File \"/usr/local/lib/python3.9/site-packages/urllib3/util/retry.py\", line 592, in increment raise MaxRetryError(_pool, url, error or ResponseError(cause)) urllib3.exceptions.MaxRetryError: HTTPSConnectionPool(host='kundenportal.berlin-recycling.de', port=443): Max retries exceeded with url: / (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1129)'))) During handling of the above exception, another exception occurred: Traceback (most recent call last): File \"/config/custom_components/waste_collection_schedule/waste_collection_schedule/scraper.py\", line 116, in fetch entries = self._source.fetch() File \"/config/custom_components/waste_collection_schedule/waste_collection_schedule/source/berlin_recycling_de.py\", line 48, in fetch r = session.get(SERVICE_URL, allow_redirects=False) File \"/usr/local/lib/python3.9/site-packages/requests/sessions.py\", line 542, in get return self.request('GET', url, **kwargs) File \"/usr/local/lib/python3.9/site-packages/requests/sessions.py\", line 529, in request resp = self.send(prep, **send_kwargs) File \"/usr/local/lib/python3.9/site-packages/requests/sessions.py\", line 645, in send r = adapter.send(request, **kwargs) File \"/usr/local/lib/python3.9/site-packages/requests/adapters.py\", line 517, in send raise SSLError(e, request=request) requests.exceptions.SSLError: HTTPSConnectionPool(host='kundenportal.berlin-recycling.de', port=443): Max retries exceeded with url: / (Caused by SSLError(SSLCertVerificationError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate (_ssl.c:1129)')))\r\n\r\nSeems the reason is https://kundenportal.berlin-recycling.de has a misconfigured HTTPS server [which is not sending the intermediate certificate](https://www.ssllabs.com/ssltest/analyze.html?d=kundenportal.berlin-recycling.de) ([Starfield Secure Certificate Authority - G2](https://certs.starfieldtech.com/repository/)). Unlike browsers, python [does not support](https://bugs.python.org/issue18617) automatic fetching of missing intermediates.\r\n\r\nI think an option to avoid setting `verify = False` on the request would be pinning the CAs and shipping the certificates with the project.\n", "before_files": [{"content": "import json\nfrom datetime import datetime\nfrom html.parser import HTMLParser\n\nimport requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\n\nTITLE = \"Berline Recycling\"\nDESCRIPTION = \"Source for Berlin Recycling waste collection.\"\nURL = \"https://berlin-recycling.de\"\nTEST_CASES = {\n \"Germanenstrasse\": {\n \"username\": \"!secret berlin_recycling_username\",\n \"password\": \"!secret berlin_recycling_password\",\n },\n}\n\n\n# Parser for HTML input (hidden) text\nclass HiddenInputParser(HTMLParser):\n def __init__(self):\n super().__init__()\n self._args = {}\n\n @property\n def args(self):\n return self._args\n\n def handle_starttag(self, tag, attrs):\n if tag == \"input\":\n d = dict(attrs)\n if str(d[\"type\"]).lower() == \"hidden\":\n self._args[d[\"name\"]] = d[\"value\"] if \"value\" in d else \"\"\n\n\nSERVICE_URL = \"https://kundenportal.berlin-recycling.de/\"\n\n\nclass Source:\n def __init__(self, username, password):\n self._username = username\n self._password = password\n\n def fetch(self):\n session = requests.session()\n\n # first get returns session specific url\n r = session.get(SERVICE_URL, allow_redirects=False)\n\n # get session id's\n r = session.get(r.url)\n\n parser = HiddenInputParser()\n parser.feed(r.text)\n args = parser.args\n args[\"__EVENTTARGET\"] = \"btnLog\"\n args[\"__EVENTARGUMENT\"] = None\n args[\"Username\"] = self._username\n args[\"Password\"] = self._password\n\n # login\n r = session.post(r.url, data=args)\n serviceUrl = r.url\n\n request_data = {\"withhtml\": \"true\"}\n r = session.post(serviceUrl + \"/GetDashboard\", json=request_data)\n\n request_data = {\"datasettable\": \"ENWIS_ABFUHRKALENDER\"}\n r = session.post(serviceUrl + \"/ChangeDatasetTable\", json=request_data)\n\n request_data = {\n \"datasettablecode\": \"ENWIS_ABFUHRKALENDER\",\n \"startindex\": 0,\n \"searchtext\": \"\",\n \"rangefilter\": \"\",\n \"ordername\": \"\",\n \"orderdir\": \"\",\n \"ClientParameters\": \"\",\n \"headrecid\": \"\",\n }\n r = session.post(serviceUrl + \"/GetDatasetTableHead\", json=request_data)\n\n data = json.loads(r.text)\n # load json again, because response is double coded\n data = json.loads(data[\"d\"])\n\n entries = []\n for d in data[\"data\"]:\n date = datetime.strptime(d[\"Task Date\"], \"%Y-%m-%d\").date()\n entries.append(Collection(date, d[\"Material Description\"]))\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/berlin_recycling_de.py"}]}
2,493
161
gh_patches_debug_16342
rasdani/github-patches
git_diff
learningequality__kolibri-8880
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> User on SouD should not be able to edit their user profile From @jamalex in bug bash: The FacilityUser object is read-only for single-user certificates, so these changes will not be synced back. We had a spec doc that laid out "what should be allowed on a SoUD", I think this was documented in there β€” we should do an audit to make sure other cases are covered as well. https://www.notion.so/learningequality/CRITICAL-FEATURE-BUG-Users-on-SoUD-should-not-be-allowed-to-edit-their-user-profile-e5765979e7ed4fcaa694cfe7932ac4df </issue> <code> [start of kolibri/plugins/user_profile/kolibri_plugin.py] 1 from __future__ import absolute_import 2 from __future__ import print_function 3 from __future__ import unicode_literals 4 5 from kolibri.core.hooks import NavigationHook 6 from kolibri.core.webpack import hooks as webpack_hooks 7 from kolibri.plugins import KolibriPluginBase 8 from kolibri.plugins.hooks import register_hook 9 10 11 class UserProfile(KolibriPluginBase): 12 translated_view_urls = "urls" 13 14 @property 15 def url_slug(self): 16 return "profile" 17 18 19 @register_hook 20 class UserAuthAsset(webpack_hooks.WebpackBundleHook): 21 bundle_id = "app" 22 23 24 @register_hook 25 class ProfileNavAction(NavigationHook): 26 bundle_id = "user_profile_side_nav" 27 [end of kolibri/plugins/user_profile/kolibri_plugin.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/kolibri/plugins/user_profile/kolibri_plugin.py b/kolibri/plugins/user_profile/kolibri_plugin.py --- a/kolibri/plugins/user_profile/kolibri_plugin.py +++ b/kolibri/plugins/user_profile/kolibri_plugin.py @@ -2,6 +2,7 @@ from __future__ import print_function from __future__ import unicode_literals +from kolibri.core.device.utils import get_device_setting from kolibri.core.hooks import NavigationHook from kolibri.core.webpack import hooks as webpack_hooks from kolibri.plugins import KolibriPluginBase @@ -20,6 +21,10 @@ class UserAuthAsset(webpack_hooks.WebpackBundleHook): bundle_id = "app" + @property + def plugin_data(self): + return {"isSubsetOfUsersDevice": get_device_setting("subset_of_users_device")} + @register_hook class ProfileNavAction(NavigationHook):
{"golden_diff": "diff --git a/kolibri/plugins/user_profile/kolibri_plugin.py b/kolibri/plugins/user_profile/kolibri_plugin.py\n--- a/kolibri/plugins/user_profile/kolibri_plugin.py\n+++ b/kolibri/plugins/user_profile/kolibri_plugin.py\n@@ -2,6 +2,7 @@\n from __future__ import print_function\n from __future__ import unicode_literals\n \n+from kolibri.core.device.utils import get_device_setting\n from kolibri.core.hooks import NavigationHook\n from kolibri.core.webpack import hooks as webpack_hooks\n from kolibri.plugins import KolibriPluginBase\n@@ -20,6 +21,10 @@\n class UserAuthAsset(webpack_hooks.WebpackBundleHook):\n bundle_id = \"app\"\n \n+ @property\n+ def plugin_data(self):\n+ return {\"isSubsetOfUsersDevice\": get_device_setting(\"subset_of_users_device\")}\n+\n \n @register_hook\n class ProfileNavAction(NavigationHook):\n", "issue": "User on SouD should not be able to edit their user profile\nFrom @jamalex in bug bash: \r\nThe FacilityUser object is read-only for single-user certificates, so these changes will not be synced back. We had a spec doc that laid out \"what should be allowed on a SoUD\", I think this was documented in there \u2014 we should do an audit to make sure other cases are covered as well.\r\n\r\nhttps://www.notion.so/learningequality/CRITICAL-FEATURE-BUG-Users-on-SoUD-should-not-be-allowed-to-edit-their-user-profile-e5765979e7ed4fcaa694cfe7932ac4df\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom kolibri.core.hooks import NavigationHook\nfrom kolibri.core.webpack import hooks as webpack_hooks\nfrom kolibri.plugins import KolibriPluginBase\nfrom kolibri.plugins.hooks import register_hook\n\n\nclass UserProfile(KolibriPluginBase):\n translated_view_urls = \"urls\"\n\n @property\n def url_slug(self):\n return \"profile\"\n\n\n@register_hook\nclass UserAuthAsset(webpack_hooks.WebpackBundleHook):\n bundle_id = \"app\"\n\n\n@register_hook\nclass ProfileNavAction(NavigationHook):\n bundle_id = \"user_profile_side_nav\"\n", "path": "kolibri/plugins/user_profile/kolibri_plugin.py"}]}
886
204
gh_patches_debug_18363
rasdani/github-patches
git_diff
pytorch__pytorch-78948
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Functional API for FileLister ### πŸš€ The feature, motivation and pitch Similar to https://github.com/pytorch/data/issues/387 This allows for ```python IterableWrapper([...]).list_file() ``` ### Alternatives _No response_ ### Additional context _No response_ cc @VitalyFedyunin @ejguan @NivekT </issue> <code> [start of torch/utils/data/datapipes/iter/filelister.py] 1 from typing import Iterator, List, Sequence, Union 2 3 from torch.utils.data.datapipes.datapipe import IterDataPipe 4 from torch.utils.data.datapipes.iter import IterableWrapper 5 from torch.utils.data.datapipes.utils.common import get_file_pathnames_from_root 6 7 __all__ = ["FileListerIterDataPipe", ] 8 9 10 class FileListerIterDataPipe(IterDataPipe[str]): 11 r""" 12 Given path(s) to the root directory, yields file pathname(s) (path + filename) of files within the root directory. 13 Multiple root directories can be provided. 14 15 Args: 16 root: Root directory or a sequence of root directories 17 masks: Unix style filter string or string list for filtering file name(s) 18 recursive: Whether to return pathname from nested directories or not 19 abspath: Whether to return relative pathname or absolute pathname 20 non_deterministic: Whether to return pathname in sorted order or not. 21 If ``False``, the results yielded from each root directory will be sorted 22 length: Nominal length of the datapipe 23 24 Example: 25 >>> from torchdata.datapipes.iter import FileLister 26 >>> dp = FileLister(root=".", recursive=True) 27 >>> list(dp) 28 ['example.py', './data/data.tar'] 29 """ 30 31 def __init__( 32 self, 33 root: Union[str, Sequence[str], IterDataPipe] = '.', 34 masks: Union[str, List[str]] = '', 35 *, 36 recursive: bool = False, 37 abspath: bool = False, 38 non_deterministic: bool = False, 39 length: int = -1 40 ) -> None: 41 super().__init__() 42 if isinstance(root, str): 43 root = [root, ] 44 if not isinstance(root, IterDataPipe): 45 root = IterableWrapper(root) 46 self.datapipe: IterDataPipe = root 47 self.masks: Union[str, List[str]] = masks 48 self.recursive: bool = recursive 49 self.abspath: bool = abspath 50 self.non_deterministic: bool = non_deterministic 51 self.length: int = length 52 53 def __iter__(self) -> Iterator[str] : 54 for path in self.datapipe: 55 yield from get_file_pathnames_from_root(path, self.masks, self.recursive, self.abspath, self.non_deterministic) 56 57 def __len__(self): 58 if self.length == -1: 59 raise TypeError("{} instance doesn't have valid length".format(type(self).__name__)) 60 return self.length 61 [end of torch/utils/data/datapipes/iter/filelister.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/torch/utils/data/datapipes/iter/filelister.py b/torch/utils/data/datapipes/iter/filelister.py --- a/torch/utils/data/datapipes/iter/filelister.py +++ b/torch/utils/data/datapipes/iter/filelister.py @@ -1,5 +1,8 @@ from typing import Iterator, List, Sequence, Union + +from torch.utils.data.datapipes._decorator import functional_datapipe + from torch.utils.data.datapipes.datapipe import IterDataPipe from torch.utils.data.datapipes.iter import IterableWrapper from torch.utils.data.datapipes.utils.common import get_file_pathnames_from_root @@ -7,6 +10,7 @@ __all__ = ["FileListerIterDataPipe", ] +@functional_datapipe("list_files") class FileListerIterDataPipe(IterDataPipe[str]): r""" Given path(s) to the root directory, yields file pathname(s) (path + filename) of files within the root directory.
{"golden_diff": "diff --git a/torch/utils/data/datapipes/iter/filelister.py b/torch/utils/data/datapipes/iter/filelister.py\n--- a/torch/utils/data/datapipes/iter/filelister.py\n+++ b/torch/utils/data/datapipes/iter/filelister.py\n@@ -1,5 +1,8 @@\n from typing import Iterator, List, Sequence, Union\n \n+\n+from torch.utils.data.datapipes._decorator import functional_datapipe\n+\n from torch.utils.data.datapipes.datapipe import IterDataPipe\n from torch.utils.data.datapipes.iter import IterableWrapper\n from torch.utils.data.datapipes.utils.common import get_file_pathnames_from_root\n@@ -7,6 +10,7 @@\n __all__ = [\"FileListerIterDataPipe\", ]\n \n \n+@functional_datapipe(\"list_files\")\n class FileListerIterDataPipe(IterDataPipe[str]):\n r\"\"\"\n Given path(s) to the root directory, yields file pathname(s) (path + filename) of files within the root directory.\n", "issue": "Functional API for FileLister\n### \ud83d\ude80 The feature, motivation and pitch\n\nSimilar to https://github.com/pytorch/data/issues/387\r\n\r\nThis allows for \r\n```python\r\nIterableWrapper([...]).list_file()\r\n```\n\n### Alternatives\n\n_No response_\n\n### Additional context\n\n_No response_\n\ncc @VitalyFedyunin @ejguan @NivekT\n", "before_files": [{"content": "from typing import Iterator, List, Sequence, Union\n\nfrom torch.utils.data.datapipes.datapipe import IterDataPipe\nfrom torch.utils.data.datapipes.iter import IterableWrapper\nfrom torch.utils.data.datapipes.utils.common import get_file_pathnames_from_root\n\n__all__ = [\"FileListerIterDataPipe\", ]\n\n\nclass FileListerIterDataPipe(IterDataPipe[str]):\n r\"\"\"\n Given path(s) to the root directory, yields file pathname(s) (path + filename) of files within the root directory.\n Multiple root directories can be provided.\n\n Args:\n root: Root directory or a sequence of root directories\n masks: Unix style filter string or string list for filtering file name(s)\n recursive: Whether to return pathname from nested directories or not\n abspath: Whether to return relative pathname or absolute pathname\n non_deterministic: Whether to return pathname in sorted order or not.\n If ``False``, the results yielded from each root directory will be sorted\n length: Nominal length of the datapipe\n\n Example:\n >>> from torchdata.datapipes.iter import FileLister\n >>> dp = FileLister(root=\".\", recursive=True)\n >>> list(dp)\n ['example.py', './data/data.tar']\n \"\"\"\n\n def __init__(\n self,\n root: Union[str, Sequence[str], IterDataPipe] = '.',\n masks: Union[str, List[str]] = '',\n *,\n recursive: bool = False,\n abspath: bool = False,\n non_deterministic: bool = False,\n length: int = -1\n ) -> None:\n super().__init__()\n if isinstance(root, str):\n root = [root, ]\n if not isinstance(root, IterDataPipe):\n root = IterableWrapper(root)\n self.datapipe: IterDataPipe = root\n self.masks: Union[str, List[str]] = masks\n self.recursive: bool = recursive\n self.abspath: bool = abspath\n self.non_deterministic: bool = non_deterministic\n self.length: int = length\n\n def __iter__(self) -> Iterator[str] :\n for path in self.datapipe:\n yield from get_file_pathnames_from_root(path, self.masks, self.recursive, self.abspath, self.non_deterministic)\n\n def __len__(self):\n if self.length == -1:\n raise TypeError(\"{} instance doesn't have valid length\".format(type(self).__name__))\n return self.length\n", "path": "torch/utils/data/datapipes/iter/filelister.py"}]}
1,281
226
gh_patches_debug_59556
rasdani/github-patches
git_diff
elastic__apm-agent-python-1076
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> FastAPI: Calls to undefined routes result in 500 **Description** When using the apm agent with FastAPI, calls to any undefined route results in a 500 internal server error. Without apm, 404 not found is returned correctly. **To Reproduce** Run the following simple FastAPI app: ```python import uvicorn from elasticapm.contrib.starlette import ElasticAPM, make_apm_client from fastapi import FastAPI app = FastAPI() apm = make_apm_client( { "SERVICE_NAME": "undefined-route", } ) app.add_middleware(ElasticAPM, client=apm) @app.get("/hi") async def hi(): return "hi" if __name__ == "__main__": uvicorn.run(app, host="0.0.0.0", port=80) ``` **Observed behavior** Calling `http://localhost/bye` results in a 500 internal server error. **Expected behavior** Calling `http://localhost/bye` results in a 404 not found error. **Environment** - OS: - Client: Windows - Server: Ubuntu - Python version: `3.7.3` - Framework and version: `fastapi==0.61.2` - APM Server version: docker image `elasticsearch/elasticsearch:7.10.2` - Agent version: `elastic-apm==master` </issue> <code> [start of elasticapm/contrib/starlette/__init__.py] 1 # BSD 3-Clause License 2 # 3 # Copyright (c) 2012, the Sentry Team, see AUTHORS for more details 4 # Copyright (c) 2019, Elasticsearch BV 5 # All rights reserved. 6 # 7 # Redistribution and use in source and binary forms, with or without 8 # modification, are permitted provided that the following conditions are met: 9 # 10 # * Redistributions of source code must retain the above copyright notice, this 11 # list of conditions and the following disclaimer. 12 # 13 # * Redistributions in binary form must reproduce the above copyright notice, 14 # this list of conditions and the following disclaimer in the documentation 15 # and/or other materials provided with the distribution. 16 # 17 # * Neither the name of the copyright holder nor the names of its 18 # contributors may be used to endorse or promote products derived from 19 # this software without specific prior written permission. 20 # 21 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 24 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 25 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 27 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 28 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 29 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 31 32 from __future__ import absolute_import 33 34 import starlette 35 from starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint 36 from starlette.requests import Request 37 from starlette.responses import Response 38 from starlette.routing import Match, Mount 39 from starlette.types import ASGIApp 40 41 import elasticapm 42 import elasticapm.instrumentation.control 43 from elasticapm.base import Client 44 from elasticapm.conf import constants 45 from elasticapm.contrib.asyncio.traces import set_context 46 from elasticapm.contrib.starlette.utils import get_body, get_data_from_request, get_data_from_response 47 from elasticapm.utils.disttracing import TraceParent 48 from elasticapm.utils.logging import get_logger 49 50 logger = get_logger("elasticapm.errors.client") 51 52 53 def make_apm_client(config: dict, client_cls=Client, **defaults) -> Client: 54 """Builds ElasticAPM client. 55 56 Args: 57 config (dict): Dictionary of Client configuration. All keys must be uppercase. See `elasticapm.conf.Config`. 58 client_cls (Client): Must be Client or its child. 59 **defaults: Additional parameters for Client. See `elasticapm.base.Client` 60 61 Returns: 62 Client 63 """ 64 if "framework_name" not in defaults: 65 defaults["framework_name"] = "starlette" 66 defaults["framework_version"] = starlette.__version__ 67 68 return client_cls(config, **defaults) 69 70 71 class ElasticAPM(BaseHTTPMiddleware): 72 """ 73 Starlette / FastAPI middleware for Elastic APM capturing. 74 75 >>> elasticapm = make_apm_client({ 76 >>> 'SERVICE_NAME': 'myapp', 77 >>> 'DEBUG': True, 78 >>> 'SERVER_URL': 'http://localhost:8200', 79 >>> 'CAPTURE_HEADERS': True, 80 >>> 'CAPTURE_BODY': 'all' 81 >>> }) 82 83 >>> app.add_middleware(ElasticAPM, client=elasticapm) 84 85 Pass an arbitrary APP_NAME and SECRET_TOKEN:: 86 87 >>> elasticapm = ElasticAPM(app, service_name='myapp', secret_token='asdasdasd') 88 89 Pass an explicit client:: 90 91 >>> elasticapm = ElasticAPM(app, client=client) 92 93 Automatically configure logging:: 94 95 >>> elasticapm = ElasticAPM(app, logging=True) 96 97 Capture an exception:: 98 99 >>> try: 100 >>> 1 / 0 101 >>> except ZeroDivisionError: 102 >>> elasticapm.capture_exception() 103 104 Capture a message:: 105 106 >>> elasticapm.capture_message('hello, world!') 107 """ 108 109 def __init__(self, app: ASGIApp, client: Client): 110 """ 111 112 Args: 113 app (ASGIApp): Starlette app 114 client (Client): ElasticAPM Client 115 """ 116 self.client = client 117 118 if self.client.config.instrument and self.client.config.enabled: 119 elasticapm.instrumentation.control.instrument() 120 121 super().__init__(app) 122 123 async def dispatch(self, request: Request, call_next: RequestResponseEndpoint) -> Response: 124 """Processes the whole request APM capturing. 125 126 Args: 127 request (Request) 128 call_next (RequestResponseEndpoint): Next request process in Starlette. 129 130 Returns: 131 Response 132 """ 133 await self._request_started(request) 134 135 try: 136 response = await call_next(request) 137 elasticapm.set_transaction_outcome(constants.OUTCOME.SUCCESS, override=False) 138 except Exception: 139 await self.capture_exception( 140 context={"request": await get_data_from_request(request, self.client.config, constants.ERROR)} 141 ) 142 elasticapm.set_transaction_result("HTTP 5xx", override=False) 143 elasticapm.set_transaction_outcome(constants.OUTCOME.FAILURE, override=False) 144 elasticapm.set_context({"status_code": 500}, "response") 145 146 raise 147 else: 148 await self._request_finished(response) 149 finally: 150 self.client.end_transaction() 151 152 return response 153 154 async def capture_exception(self, *args, **kwargs): 155 """Captures your exception. 156 157 Args: 158 *args: 159 **kwargs: 160 """ 161 self.client.capture_exception(*args, **kwargs) 162 163 async def capture_message(self, *args, **kwargs): 164 """Captures your message. 165 166 Args: 167 *args: Whatever 168 **kwargs: Whatever 169 """ 170 self.client.capture_message(*args, **kwargs) 171 172 async def _request_started(self, request: Request): 173 """Captures the begin of the request processing to APM. 174 175 Args: 176 request (Request) 177 """ 178 # When we consume the body, we replace the streaming mechanism with 179 # a mocked version -- this workaround came from 180 # https://github.com/encode/starlette/issues/495#issuecomment-513138055 181 # and we call the workaround here to make sure that regardless of 182 # `capture_body` settings, we will have access to the body if we need it. 183 if self.client.config.capture_body != "off": 184 await get_body(request) 185 186 if not self.client.should_ignore_url(request.url.path): 187 trace_parent = TraceParent.from_headers(dict(request.headers)) 188 self.client.begin_transaction("request", trace_parent=trace_parent) 189 190 await set_context( 191 lambda: get_data_from_request(request, self.client.config, constants.TRANSACTION), "request" 192 ) 193 transaction_name = self.get_route_name(request) or request.url.path 194 elasticapm.set_transaction_name("{} {}".format(request.method, transaction_name), override=False) 195 196 async def _request_finished(self, response: Response): 197 """Captures the end of the request processing to APM. 198 199 Args: 200 response (Response) 201 """ 202 await set_context( 203 lambda: get_data_from_response(response, self.client.config, constants.TRANSACTION), "response" 204 ) 205 206 result = "HTTP {}xx".format(response.status_code // 100) 207 elasticapm.set_transaction_result(result, override=False) 208 209 def get_route_name(self, request: Request) -> str: 210 app = request.app 211 scope = request.scope 212 routes = app.routes 213 route_name = self._get_route_name(scope, routes) 214 215 # Starlette magically redirects requests if the path matches a route name with a trailing slash 216 # appended or removed. To not spam the transaction names list, we do the same here and put these 217 # redirects all in the same "redirect trailing slashes" transaction name 218 if not route_name and app.router.redirect_slashes and scope["path"] != "/": 219 redirect_scope = dict(scope) 220 if scope["path"].endswith("/"): 221 redirect_scope["path"] = scope["path"][:-1] 222 trim = True 223 else: 224 redirect_scope["path"] = scope["path"] + "/" 225 trim = False 226 227 route_name = self._get_route_name(redirect_scope, routes) 228 route_name = route_name + "/" if trim else route_name[:-1] 229 return route_name 230 231 def _get_route_name(self, scope, routes, route_name=None): 232 for route in routes: 233 match, child_scope = route.matches(scope) 234 if match == Match.FULL: 235 route_name = route.path 236 child_scope = {**scope, **child_scope} 237 if isinstance(route, Mount): 238 child_route_name = self._get_route_name(child_scope, route.routes, route_name) 239 if child_route_name is None: 240 route_name = None 241 else: 242 route_name += child_route_name 243 return route_name 244 elif match == Match.PARTIAL and route_name is None: 245 route_name = route.path 246 [end of elasticapm/contrib/starlette/__init__.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/elasticapm/contrib/starlette/__init__.py b/elasticapm/contrib/starlette/__init__.py --- a/elasticapm/contrib/starlette/__init__.py +++ b/elasticapm/contrib/starlette/__init__.py @@ -225,7 +225,8 @@ trim = False route_name = self._get_route_name(redirect_scope, routes) - route_name = route_name + "/" if trim else route_name[:-1] + if route_name is not None: + route_name = route_name + "/" if trim else route_name[:-1] return route_name def _get_route_name(self, scope, routes, route_name=None):
{"golden_diff": "diff --git a/elasticapm/contrib/starlette/__init__.py b/elasticapm/contrib/starlette/__init__.py\n--- a/elasticapm/contrib/starlette/__init__.py\n+++ b/elasticapm/contrib/starlette/__init__.py\n@@ -225,7 +225,8 @@\n trim = False\n \n route_name = self._get_route_name(redirect_scope, routes)\n- route_name = route_name + \"/\" if trim else route_name[:-1]\n+ if route_name is not None:\n+ route_name = route_name + \"/\" if trim else route_name[:-1]\n return route_name\n \n def _get_route_name(self, scope, routes, route_name=None):\n", "issue": "FastAPI: Calls to undefined routes result in 500\n**Description**\r\nWhen using the apm agent with FastAPI, calls to any undefined route results in a 500 internal server error. Without apm, 404 not found is returned correctly.\r\n\r\n**To Reproduce**\r\n\r\nRun the following simple FastAPI app:\r\n```python\r\nimport uvicorn\r\nfrom elasticapm.contrib.starlette import ElasticAPM, make_apm_client\r\nfrom fastapi import FastAPI\r\n\r\napp = FastAPI()\r\n\r\napm = make_apm_client(\r\n {\r\n \"SERVICE_NAME\": \"undefined-route\",\r\n }\r\n)\r\n\r\napp.add_middleware(ElasticAPM, client=apm)\r\n\r\[email protected](\"/hi\")\r\nasync def hi():\r\n return \"hi\"\r\n\r\nif __name__ == \"__main__\":\r\n uvicorn.run(app, host=\"0.0.0.0\", port=80)\r\n```\r\n\r\n**Observed behavior**\r\nCalling `http://localhost/bye` results in a 500 internal server error.\r\n\r\n**Expected behavior**\r\nCalling `http://localhost/bye` results in a 404 not found error.\r\n\r\n**Environment**\r\n\r\n- OS:\r\n - Client: Windows\r\n - Server: Ubuntu\r\n- Python version: `3.7.3`\r\n- Framework and version: `fastapi==0.61.2`\r\n- APM Server version: docker image `elasticsearch/elasticsearch:7.10.2`\r\n- Agent version: `elastic-apm==master`\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2012, the Sentry Team, see AUTHORS for more details\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n\n\nfrom __future__ import absolute_import\n\nimport starlette\nfrom starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint\nfrom starlette.requests import Request\nfrom starlette.responses import Response\nfrom starlette.routing import Match, Mount\nfrom starlette.types import ASGIApp\n\nimport elasticapm\nimport elasticapm.instrumentation.control\nfrom elasticapm.base import Client\nfrom elasticapm.conf import constants\nfrom elasticapm.contrib.asyncio.traces import set_context\nfrom elasticapm.contrib.starlette.utils import get_body, get_data_from_request, get_data_from_response\nfrom elasticapm.utils.disttracing import TraceParent\nfrom elasticapm.utils.logging import get_logger\n\nlogger = get_logger(\"elasticapm.errors.client\")\n\n\ndef make_apm_client(config: dict, client_cls=Client, **defaults) -> Client:\n \"\"\"Builds ElasticAPM client.\n\n Args:\n config (dict): Dictionary of Client configuration. All keys must be uppercase. See `elasticapm.conf.Config`.\n client_cls (Client): Must be Client or its child.\n **defaults: Additional parameters for Client. See `elasticapm.base.Client`\n\n Returns:\n Client\n \"\"\"\n if \"framework_name\" not in defaults:\n defaults[\"framework_name\"] = \"starlette\"\n defaults[\"framework_version\"] = starlette.__version__\n\n return client_cls(config, **defaults)\n\n\nclass ElasticAPM(BaseHTTPMiddleware):\n \"\"\"\n Starlette / FastAPI middleware for Elastic APM capturing.\n\n >>> elasticapm = make_apm_client({\n >>> 'SERVICE_NAME': 'myapp',\n >>> 'DEBUG': True,\n >>> 'SERVER_URL': 'http://localhost:8200',\n >>> 'CAPTURE_HEADERS': True,\n >>> 'CAPTURE_BODY': 'all'\n >>> })\n\n >>> app.add_middleware(ElasticAPM, client=elasticapm)\n\n Pass an arbitrary APP_NAME and SECRET_TOKEN::\n\n >>> elasticapm = ElasticAPM(app, service_name='myapp', secret_token='asdasdasd')\n\n Pass an explicit client::\n\n >>> elasticapm = ElasticAPM(app, client=client)\n\n Automatically configure logging::\n\n >>> elasticapm = ElasticAPM(app, logging=True)\n\n Capture an exception::\n\n >>> try:\n >>> 1 / 0\n >>> except ZeroDivisionError:\n >>> elasticapm.capture_exception()\n\n Capture a message::\n\n >>> elasticapm.capture_message('hello, world!')\n \"\"\"\n\n def __init__(self, app: ASGIApp, client: Client):\n \"\"\"\n\n Args:\n app (ASGIApp): Starlette app\n client (Client): ElasticAPM Client\n \"\"\"\n self.client = client\n\n if self.client.config.instrument and self.client.config.enabled:\n elasticapm.instrumentation.control.instrument()\n\n super().__init__(app)\n\n async def dispatch(self, request: Request, call_next: RequestResponseEndpoint) -> Response:\n \"\"\"Processes the whole request APM capturing.\n\n Args:\n request (Request)\n call_next (RequestResponseEndpoint): Next request process in Starlette.\n\n Returns:\n Response\n \"\"\"\n await self._request_started(request)\n\n try:\n response = await call_next(request)\n elasticapm.set_transaction_outcome(constants.OUTCOME.SUCCESS, override=False)\n except Exception:\n await self.capture_exception(\n context={\"request\": await get_data_from_request(request, self.client.config, constants.ERROR)}\n )\n elasticapm.set_transaction_result(\"HTTP 5xx\", override=False)\n elasticapm.set_transaction_outcome(constants.OUTCOME.FAILURE, override=False)\n elasticapm.set_context({\"status_code\": 500}, \"response\")\n\n raise\n else:\n await self._request_finished(response)\n finally:\n self.client.end_transaction()\n\n return response\n\n async def capture_exception(self, *args, **kwargs):\n \"\"\"Captures your exception.\n\n Args:\n *args:\n **kwargs:\n \"\"\"\n self.client.capture_exception(*args, **kwargs)\n\n async def capture_message(self, *args, **kwargs):\n \"\"\"Captures your message.\n\n Args:\n *args: Whatever\n **kwargs: Whatever\n \"\"\"\n self.client.capture_message(*args, **kwargs)\n\n async def _request_started(self, request: Request):\n \"\"\"Captures the begin of the request processing to APM.\n\n Args:\n request (Request)\n \"\"\"\n # When we consume the body, we replace the streaming mechanism with\n # a mocked version -- this workaround came from\n # https://github.com/encode/starlette/issues/495#issuecomment-513138055\n # and we call the workaround here to make sure that regardless of\n # `capture_body` settings, we will have access to the body if we need it.\n if self.client.config.capture_body != \"off\":\n await get_body(request)\n\n if not self.client.should_ignore_url(request.url.path):\n trace_parent = TraceParent.from_headers(dict(request.headers))\n self.client.begin_transaction(\"request\", trace_parent=trace_parent)\n\n await set_context(\n lambda: get_data_from_request(request, self.client.config, constants.TRANSACTION), \"request\"\n )\n transaction_name = self.get_route_name(request) or request.url.path\n elasticapm.set_transaction_name(\"{} {}\".format(request.method, transaction_name), override=False)\n\n async def _request_finished(self, response: Response):\n \"\"\"Captures the end of the request processing to APM.\n\n Args:\n response (Response)\n \"\"\"\n await set_context(\n lambda: get_data_from_response(response, self.client.config, constants.TRANSACTION), \"response\"\n )\n\n result = \"HTTP {}xx\".format(response.status_code // 100)\n elasticapm.set_transaction_result(result, override=False)\n\n def get_route_name(self, request: Request) -> str:\n app = request.app\n scope = request.scope\n routes = app.routes\n route_name = self._get_route_name(scope, routes)\n\n # Starlette magically redirects requests if the path matches a route name with a trailing slash\n # appended or removed. To not spam the transaction names list, we do the same here and put these\n # redirects all in the same \"redirect trailing slashes\" transaction name\n if not route_name and app.router.redirect_slashes and scope[\"path\"] != \"/\":\n redirect_scope = dict(scope)\n if scope[\"path\"].endswith(\"/\"):\n redirect_scope[\"path\"] = scope[\"path\"][:-1]\n trim = True\n else:\n redirect_scope[\"path\"] = scope[\"path\"] + \"/\"\n trim = False\n\n route_name = self._get_route_name(redirect_scope, routes)\n route_name = route_name + \"/\" if trim else route_name[:-1]\n return route_name\n\n def _get_route_name(self, scope, routes, route_name=None):\n for route in routes:\n match, child_scope = route.matches(scope)\n if match == Match.FULL:\n route_name = route.path\n child_scope = {**scope, **child_scope}\n if isinstance(route, Mount):\n child_route_name = self._get_route_name(child_scope, route.routes, route_name)\n if child_route_name is None:\n route_name = None\n else:\n route_name += child_route_name\n return route_name\n elif match == Match.PARTIAL and route_name is None:\n route_name = route.path\n", "path": "elasticapm/contrib/starlette/__init__.py"}]}
3,471
158
gh_patches_debug_13893
rasdani/github-patches
git_diff
litestar-org__litestar-1370
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Bug: msgspec version needs updating ### Description Starlite currently needs msgspec >= 0.11.0 from here https://github.com/starlite-api/starlite/blob/8a49aff754d5b409397183eb395cac5a44193d72/pyproject.toml#L52. Starlite uses msgspec.inspect: https://github.com/starlite-api/starlite/blob/41089a2d5dff696a25c78a58322d3a8a08ebcac3/starlite/constants.py#L3 msgspec.inspect wasn't introduced in msgspec until 0.12.0: https://github.com/jcrist/msgspec/releases/tag/0.12.0 ### URL to code causing the issue _No response_ ### MCVE _No response_ ### Steps to reproduce ```bash 1. In a new environment, pip install starlite msgspec==0.11.0 2. Try to import starlite ``` ### Screenshots _No response_ ### Logs _No response_ ### Starlite Version 1.51.7 ### Platform - [ ] Linux - [ ] Mac - [ ] Windows - [ ] Other (Please specify in the description above) </issue> <code> [start of starlite/constants.py] 1 from inspect import Signature 2 3 from msgspec.inspect import UNSET 4 from pydantic.fields import Undefined 5 6 from starlite.types import Empty 7 8 DEFAULT_ALLOWED_CORS_HEADERS = {"Accept", "Accept-Language", "Content-Language", "Content-Type"} 9 DEFAULT_CHUNK_SIZE = 1024 * 128 # 128KB 10 HTTP_RESPONSE_BODY = "http.response.body" 11 HTTP_RESPONSE_START = "http.response.start" 12 ONE_MEGABYTE = 1024 * 1024 13 REDIRECT_STATUS_CODES = {301, 302, 303, 307, 308} 14 RESERVED_KWARGS = {"state", "headers", "cookies", "request", "socket", "data", "query", "scope", "body"} 15 SCOPE_STATE_DEPENDENCY_CACHE = "dependency_cache" 16 SCOPE_STATE_NAMESPACE = "__starlite__" 17 SCOPE_STATE_RESPONSE_COMPRESSED = "response_compressed" 18 UNDEFINED_SENTINELS = {Undefined, Signature.empty, UNSET, Empty, Ellipsis} 19 SKIP_VALIDATION_NAMES = {"request", "socket", "scope", "receive", "send"} 20 [end of starlite/constants.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/starlite/constants.py b/starlite/constants.py --- a/starlite/constants.py +++ b/starlite/constants.py @@ -1,6 +1,5 @@ from inspect import Signature -from msgspec.inspect import UNSET from pydantic.fields import Undefined from starlite.types import Empty @@ -15,5 +14,5 @@ SCOPE_STATE_DEPENDENCY_CACHE = "dependency_cache" SCOPE_STATE_NAMESPACE = "__starlite__" SCOPE_STATE_RESPONSE_COMPRESSED = "response_compressed" -UNDEFINED_SENTINELS = {Undefined, Signature.empty, UNSET, Empty, Ellipsis} +UNDEFINED_SENTINELS = {Undefined, Signature.empty, Empty, Ellipsis} SKIP_VALIDATION_NAMES = {"request", "socket", "scope", "receive", "send"}
{"golden_diff": "diff --git a/starlite/constants.py b/starlite/constants.py\n--- a/starlite/constants.py\n+++ b/starlite/constants.py\n@@ -1,6 +1,5 @@\n from inspect import Signature\n \n-from msgspec.inspect import UNSET\n from pydantic.fields import Undefined\n \n from starlite.types import Empty\n@@ -15,5 +14,5 @@\n SCOPE_STATE_DEPENDENCY_CACHE = \"dependency_cache\"\n SCOPE_STATE_NAMESPACE = \"__starlite__\"\n SCOPE_STATE_RESPONSE_COMPRESSED = \"response_compressed\"\n-UNDEFINED_SENTINELS = {Undefined, Signature.empty, UNSET, Empty, Ellipsis}\n+UNDEFINED_SENTINELS = {Undefined, Signature.empty, Empty, Ellipsis}\n SKIP_VALIDATION_NAMES = {\"request\", \"socket\", \"scope\", \"receive\", \"send\"}\n", "issue": "Bug: msgspec version needs updating\n### Description\n\nStarlite currently needs msgspec >= 0.11.0 from here https://github.com/starlite-api/starlite/blob/8a49aff754d5b409397183eb395cac5a44193d72/pyproject.toml#L52.\r\n\r\nStarlite uses msgspec.inspect:\r\nhttps://github.com/starlite-api/starlite/blob/41089a2d5dff696a25c78a58322d3a8a08ebcac3/starlite/constants.py#L3\r\n\r\nmsgspec.inspect wasn't introduced in msgspec until 0.12.0:\r\nhttps://github.com/jcrist/msgspec/releases/tag/0.12.0\n\n### URL to code causing the issue\n\n_No response_\n\n### MCVE\n\n_No response_\n\n### Steps to reproduce\n\n```bash\n1. In a new environment, pip install starlite msgspec==0.11.0\r\n2. Try to import starlite\n```\n\n\n### Screenshots\n\n_No response_\n\n### Logs\n\n_No response_\n\n### Starlite Version\n\n1.51.7\n\n### Platform\n\n- [ ] Linux\n- [ ] Mac\n- [ ] Windows\n- [ ] Other (Please specify in the description above)\n", "before_files": [{"content": "from inspect import Signature\n\nfrom msgspec.inspect import UNSET\nfrom pydantic.fields import Undefined\n\nfrom starlite.types import Empty\n\nDEFAULT_ALLOWED_CORS_HEADERS = {\"Accept\", \"Accept-Language\", \"Content-Language\", \"Content-Type\"}\nDEFAULT_CHUNK_SIZE = 1024 * 128 # 128KB\nHTTP_RESPONSE_BODY = \"http.response.body\"\nHTTP_RESPONSE_START = \"http.response.start\"\nONE_MEGABYTE = 1024 * 1024\nREDIRECT_STATUS_CODES = {301, 302, 303, 307, 308}\nRESERVED_KWARGS = {\"state\", \"headers\", \"cookies\", \"request\", \"socket\", \"data\", \"query\", \"scope\", \"body\"}\nSCOPE_STATE_DEPENDENCY_CACHE = \"dependency_cache\"\nSCOPE_STATE_NAMESPACE = \"__starlite__\"\nSCOPE_STATE_RESPONSE_COMPRESSED = \"response_compressed\"\nUNDEFINED_SENTINELS = {Undefined, Signature.empty, UNSET, Empty, Ellipsis}\nSKIP_VALIDATION_NAMES = {\"request\", \"socket\", \"scope\", \"receive\", \"send\"}\n", "path": "starlite/constants.py"}]}
1,101
170
gh_patches_debug_7811
rasdani/github-patches
git_diff
open-mmlab__mmpose-970
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> [Bug] Possible bug in `mmpose/apis/train.py` https://github.com/open-mmlab/mmpose/blob/8d668a578f1580373d90a1eea88f23f6816df60e/mmpose/apis/train.py#L140 Here is the code that may have problem. And I think the default value for `workers_per_gpu` should be an integer but not a dict. </issue> <code> [start of mmpose/apis/train.py] 1 # Copyright (c) OpenMMLab. All rights reserved. 2 import warnings 3 4 import torch 5 from mmcv.parallel import MMDataParallel, MMDistributedDataParallel 6 from mmcv.runner import DistSamplerSeedHook, EpochBasedRunner, OptimizerHook 7 8 from mmpose.core import DistEvalHook, EvalHook, build_optimizers 9 from mmpose.core.distributed_wrapper import DistributedDataParallelWrapper 10 from mmpose.datasets import build_dataloader, build_dataset 11 from mmpose.utils import get_root_logger 12 13 try: 14 from mmcv.runner import Fp16OptimizerHook 15 except ImportError: 16 warnings.warn( 17 'Fp16OptimizerHook from mmpose will be deprecated from ' 18 'v0.15.0. Please install mmcv>=1.1.4', DeprecationWarning) 19 from mmpose.core import Fp16OptimizerHook 20 21 22 def train_model(model, 23 dataset, 24 cfg, 25 distributed=False, 26 validate=False, 27 timestamp=None, 28 meta=None): 29 """Train model entry function. 30 31 Args: 32 model (nn.Module): The model to be trained. 33 dataset (Dataset): Train dataset. 34 cfg (dict): The config dict for training. 35 distributed (bool): Whether to use distributed training. 36 Default: False. 37 validate (bool): Whether to do evaluation. Default: False. 38 timestamp (str | None): Local time for runner. Default: None. 39 meta (dict | None): Meta dict to record some important information. 40 Default: None 41 """ 42 logger = get_root_logger(cfg.log_level) 43 44 # prepare data loaders 45 dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset] 46 # step 1: give default values and override (if exist) from cfg.data 47 loader_cfg = { 48 **dict( 49 seed=cfg.get('seed'), 50 drop_last=False, 51 dist=distributed, 52 num_gpus=len(cfg.gpu_ids)), 53 **({} if torch.__version__ != 'parrots' else dict( 54 prefetch_num=2, 55 pin_memory=False, 56 )), 57 **dict((k, cfg.data[k]) for k in [ 58 'samples_per_gpu', 59 'workers_per_gpu', 60 'shuffle', 61 'seed', 62 'drop_last', 63 'prefetch_num', 64 'pin_memory', 65 'persistent_workers', 66 ] if k in cfg.data) 67 } 68 69 # step 2: cfg.data.train_dataloader has highest priority 70 train_loader_cfg = dict(loader_cfg, **cfg.data.get('train_dataloader', {})) 71 72 data_loaders = [build_dataloader(ds, **train_loader_cfg) for ds in dataset] 73 74 # determine wether use adversarial training precess or not 75 use_adverserial_train = cfg.get('use_adversarial_train', False) 76 77 # put model on gpus 78 if distributed: 79 find_unused_parameters = cfg.get('find_unused_parameters', True) 80 # Sets the `find_unused_parameters` parameter in 81 # torch.nn.parallel.DistributedDataParallel 82 83 if use_adverserial_train: 84 # Use DistributedDataParallelWrapper for adversarial training 85 model = DistributedDataParallelWrapper( 86 model, 87 device_ids=[torch.cuda.current_device()], 88 broadcast_buffers=False, 89 find_unused_parameters=find_unused_parameters) 90 else: 91 model = MMDistributedDataParallel( 92 model.cuda(), 93 device_ids=[torch.cuda.current_device()], 94 broadcast_buffers=False, 95 find_unused_parameters=find_unused_parameters) 96 else: 97 model = MMDataParallel( 98 model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids) 99 100 # build runner 101 optimizer = build_optimizers(model, cfg.optimizer) 102 103 runner = EpochBasedRunner( 104 model, 105 optimizer=optimizer, 106 work_dir=cfg.work_dir, 107 logger=logger, 108 meta=meta) 109 # an ugly workaround to make .log and .log.json filenames the same 110 runner.timestamp = timestamp 111 112 if use_adverserial_train: 113 # The optimizer step process is included in the train_step function 114 # of the model, so the runner should NOT include optimizer hook. 115 optimizer_config = None 116 else: 117 # fp16 setting 118 fp16_cfg = cfg.get('fp16', None) 119 if fp16_cfg is not None: 120 optimizer_config = Fp16OptimizerHook( 121 **cfg.optimizer_config, **fp16_cfg, distributed=distributed) 122 elif distributed and 'type' not in cfg.optimizer_config: 123 optimizer_config = OptimizerHook(**cfg.optimizer_config) 124 else: 125 optimizer_config = cfg.optimizer_config 126 127 # register hooks 128 runner.register_training_hooks(cfg.lr_config, optimizer_config, 129 cfg.checkpoint_config, cfg.log_config, 130 cfg.get('momentum_config', None)) 131 if distributed: 132 runner.register_hook(DistSamplerSeedHook()) 133 134 # register eval hooks 135 if validate: 136 eval_cfg = cfg.get('evaluation', {}) 137 val_dataset = build_dataset(cfg.data.val, dict(test_mode=True)) 138 dataloader_setting = dict( 139 samples_per_gpu=1, 140 workers_per_gpu=cfg.data.get('workers_per_gpu', {}), 141 # cfg.gpus will be ignored if distributed 142 num_gpus=len(cfg.gpu_ids), 143 dist=distributed, 144 drop_last=False, 145 shuffle=False) 146 dataloader_setting = dict(dataloader_setting, 147 **cfg.data.get('val_dataloader', {})) 148 val_dataloader = build_dataloader(val_dataset, **dataloader_setting) 149 eval_hook = DistEvalHook if distributed else EvalHook 150 runner.register_hook(eval_hook(val_dataloader, **eval_cfg)) 151 152 if cfg.resume_from: 153 runner.resume(cfg.resume_from) 154 elif cfg.load_from: 155 runner.load_checkpoint(cfg.load_from) 156 runner.run(data_loaders, cfg.workflow, cfg.total_epochs) 157 [end of mmpose/apis/train.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/mmpose/apis/train.py b/mmpose/apis/train.py --- a/mmpose/apis/train.py +++ b/mmpose/apis/train.py @@ -137,7 +137,7 @@ val_dataset = build_dataset(cfg.data.val, dict(test_mode=True)) dataloader_setting = dict( samples_per_gpu=1, - workers_per_gpu=cfg.data.get('workers_per_gpu', {}), + workers_per_gpu=cfg.data.get('workers_per_gpu', 1), # cfg.gpus will be ignored if distributed num_gpus=len(cfg.gpu_ids), dist=distributed,
{"golden_diff": "diff --git a/mmpose/apis/train.py b/mmpose/apis/train.py\n--- a/mmpose/apis/train.py\n+++ b/mmpose/apis/train.py\n@@ -137,7 +137,7 @@\n val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))\n dataloader_setting = dict(\n samples_per_gpu=1,\n- workers_per_gpu=cfg.data.get('workers_per_gpu', {}),\n+ workers_per_gpu=cfg.data.get('workers_per_gpu', 1),\n # cfg.gpus will be ignored if distributed\n num_gpus=len(cfg.gpu_ids),\n dist=distributed,\n", "issue": "[Bug] Possible bug in `mmpose/apis/train.py`\nhttps://github.com/open-mmlab/mmpose/blob/8d668a578f1580373d90a1eea88f23f6816df60e/mmpose/apis/train.py#L140\r\n\r\nHere is the code that may have problem. And I think the default value for `workers_per_gpu` should be an integer but not a dict. \n", "before_files": [{"content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport torch\nfrom mmcv.parallel import MMDataParallel, MMDistributedDataParallel\nfrom mmcv.runner import DistSamplerSeedHook, EpochBasedRunner, OptimizerHook\n\nfrom mmpose.core import DistEvalHook, EvalHook, build_optimizers\nfrom mmpose.core.distributed_wrapper import DistributedDataParallelWrapper\nfrom mmpose.datasets import build_dataloader, build_dataset\nfrom mmpose.utils import get_root_logger\n\ntry:\n from mmcv.runner import Fp16OptimizerHook\nexcept ImportError:\n warnings.warn(\n 'Fp16OptimizerHook from mmpose will be deprecated from '\n 'v0.15.0. Please install mmcv>=1.1.4', DeprecationWarning)\n from mmpose.core import Fp16OptimizerHook\n\n\ndef train_model(model,\n dataset,\n cfg,\n distributed=False,\n validate=False,\n timestamp=None,\n meta=None):\n \"\"\"Train model entry function.\n\n Args:\n model (nn.Module): The model to be trained.\n dataset (Dataset): Train dataset.\n cfg (dict): The config dict for training.\n distributed (bool): Whether to use distributed training.\n Default: False.\n validate (bool): Whether to do evaluation. Default: False.\n timestamp (str | None): Local time for runner. Default: None.\n meta (dict | None): Meta dict to record some important information.\n Default: None\n \"\"\"\n logger = get_root_logger(cfg.log_level)\n\n # prepare data loaders\n dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]\n # step 1: give default values and override (if exist) from cfg.data\n loader_cfg = {\n **dict(\n seed=cfg.get('seed'),\n drop_last=False,\n dist=distributed,\n num_gpus=len(cfg.gpu_ids)),\n **({} if torch.__version__ != 'parrots' else dict(\n prefetch_num=2,\n pin_memory=False,\n )),\n **dict((k, cfg.data[k]) for k in [\n 'samples_per_gpu',\n 'workers_per_gpu',\n 'shuffle',\n 'seed',\n 'drop_last',\n 'prefetch_num',\n 'pin_memory',\n 'persistent_workers',\n ] if k in cfg.data)\n }\n\n # step 2: cfg.data.train_dataloader has highest priority\n train_loader_cfg = dict(loader_cfg, **cfg.data.get('train_dataloader', {}))\n\n data_loaders = [build_dataloader(ds, **train_loader_cfg) for ds in dataset]\n\n # determine wether use adversarial training precess or not\n use_adverserial_train = cfg.get('use_adversarial_train', False)\n\n # put model on gpus\n if distributed:\n find_unused_parameters = cfg.get('find_unused_parameters', True)\n # Sets the `find_unused_parameters` parameter in\n # torch.nn.parallel.DistributedDataParallel\n\n if use_adverserial_train:\n # Use DistributedDataParallelWrapper for adversarial training\n model = DistributedDataParallelWrapper(\n model,\n device_ids=[torch.cuda.current_device()],\n broadcast_buffers=False,\n find_unused_parameters=find_unused_parameters)\n else:\n model = MMDistributedDataParallel(\n model.cuda(),\n device_ids=[torch.cuda.current_device()],\n broadcast_buffers=False,\n find_unused_parameters=find_unused_parameters)\n else:\n model = MMDataParallel(\n model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids)\n\n # build runner\n optimizer = build_optimizers(model, cfg.optimizer)\n\n runner = EpochBasedRunner(\n model,\n optimizer=optimizer,\n work_dir=cfg.work_dir,\n logger=logger,\n meta=meta)\n # an ugly workaround to make .log and .log.json filenames the same\n runner.timestamp = timestamp\n\n if use_adverserial_train:\n # The optimizer step process is included in the train_step function\n # of the model, so the runner should NOT include optimizer hook.\n optimizer_config = None\n else:\n # fp16 setting\n fp16_cfg = cfg.get('fp16', None)\n if fp16_cfg is not None:\n optimizer_config = Fp16OptimizerHook(\n **cfg.optimizer_config, **fp16_cfg, distributed=distributed)\n elif distributed and 'type' not in cfg.optimizer_config:\n optimizer_config = OptimizerHook(**cfg.optimizer_config)\n else:\n optimizer_config = cfg.optimizer_config\n\n # register hooks\n runner.register_training_hooks(cfg.lr_config, optimizer_config,\n cfg.checkpoint_config, cfg.log_config,\n cfg.get('momentum_config', None))\n if distributed:\n runner.register_hook(DistSamplerSeedHook())\n\n # register eval hooks\n if validate:\n eval_cfg = cfg.get('evaluation', {})\n val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))\n dataloader_setting = dict(\n samples_per_gpu=1,\n workers_per_gpu=cfg.data.get('workers_per_gpu', {}),\n # cfg.gpus will be ignored if distributed\n num_gpus=len(cfg.gpu_ids),\n dist=distributed,\n drop_last=False,\n shuffle=False)\n dataloader_setting = dict(dataloader_setting,\n **cfg.data.get('val_dataloader', {}))\n val_dataloader = build_dataloader(val_dataset, **dataloader_setting)\n eval_hook = DistEvalHook if distributed else EvalHook\n runner.register_hook(eval_hook(val_dataloader, **eval_cfg))\n\n if cfg.resume_from:\n runner.resume(cfg.resume_from)\n elif cfg.load_from:\n runner.load_checkpoint(cfg.load_from)\n runner.run(data_loaders, cfg.workflow, cfg.total_epochs)\n", "path": "mmpose/apis/train.py"}]}
2,254
137
gh_patches_debug_6437
rasdani/github-patches
git_diff
liqd__a4-meinberlin-532
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Participated projects visible for all on user profile If I write a comment on platform meinBerlin and take part in a participation process, everyone can see my profile and in which participation process I took part, even I'm not logged in. It's not anonymous enough. </issue> <code> [start of apps/users/views.py] 1 from django.views.generic.detail import DetailView 2 3 from adhocracy4.projects.models import Project 4 5 from . import models 6 7 8 class ProfileView(DetailView): 9 model = models.User 10 slug_field = 'username' 11 12 @property 13 def get_participated_projects(self): 14 user = self.object 15 16 qs = Project.objects.filter( 17 action__actor=user 18 ).distinct() 19 20 return qs 21 [end of apps/users/views.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/apps/users/views.py b/apps/users/views.py --- a/apps/users/views.py +++ b/apps/users/views.py @@ -1,20 +1,8 @@ from django.views.generic.detail import DetailView -from adhocracy4.projects.models import Project - from . import models class ProfileView(DetailView): model = models.User slug_field = 'username' - - @property - def get_participated_projects(self): - user = self.object - - qs = Project.objects.filter( - action__actor=user - ).distinct() - - return qs
{"golden_diff": "diff --git a/apps/users/views.py b/apps/users/views.py\n--- a/apps/users/views.py\n+++ b/apps/users/views.py\n@@ -1,20 +1,8 @@\n from django.views.generic.detail import DetailView\n \n-from adhocracy4.projects.models import Project\n-\n from . import models\n \n \n class ProfileView(DetailView):\n model = models.User\n slug_field = 'username'\n-\n- @property\n- def get_participated_projects(self):\n- user = self.object\n-\n- qs = Project.objects.filter(\n- action__actor=user\n- ).distinct()\n-\n- return qs\n", "issue": "Participated projects visible for all on user profile\nIf I write a comment on platform meinBerlin and take part in a participation process, everyone can see my profile and in which participation process I took part, even I'm not logged in. It's not anonymous enough.\n", "before_files": [{"content": "from django.views.generic.detail import DetailView\n\nfrom adhocracy4.projects.models import Project\n\nfrom . import models\n\n\nclass ProfileView(DetailView):\n model = models.User\n slug_field = 'username'\n\n @property\n def get_participated_projects(self):\n user = self.object\n\n qs = Project.objects.filter(\n action__actor=user\n ).distinct()\n\n return qs\n", "path": "apps/users/views.py"}]}
712
135
gh_patches_debug_32831
rasdani/github-patches
git_diff
pymeasure__pymeasure-866
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `TypeError` on SerialAdapter `read_bytes(-1)` When I use `self.read_bytes(-1)` from my instrument using a `SerialAdapter`, I get the error: > C:\Program Files\Python310\lib\ctypes\__init__.py:66: TypeError With the traceback: ``` ..\..\..\pymeasure\instruments\velleman\velleman_k8090.py:247: in read response = self.read_bytes(-1) ..\..\..\pymeasure\instruments\instrument.py:153: in read_bytes return self.adapter.read_bytes(count, **kwargs) ..\..\..\pymeasure\adapters\adapter.py:123: in read_bytes read = self._read_bytes(count, break_on_termchar, **kwargs) ..\..\..\pymeasure\adapters\serial.py:108: in _read_bytes return self.connection.read(1e99 if count == -1 else count, **kwargs) ..\..\..\venv\lib\site-packages\serial\serialwin32.py:278: in read buf = ctypes.create_string_buffer(n) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ init = 1e+99, size = None def create_string_buffer(init, size=None): """create_string_buffer(aBytes) -> character array create_string_buffer(anInteger) -> character array create_string_buffer(aBytes, anInteger) -> character array """ if isinstance(init, bytes): if size is None: size = len(init)+1 _sys.audit("ctypes.create_string_buffer", init, size) buftype = c_char * size buf = buftype() buf.value = init return buf elif isinstance(init, int): _sys.audit("ctypes.create_string_buffer", None, init) buftype = c_char * init buf = buftype() return buf > raise TypeError(init) E TypeError: 1e+99 C:\Program Files\Python310\lib\ctypes\__init__.py:66: TypeError ``` It looks like reading 1e99 bytes no (longer?) works. </issue> <code> [start of pymeasure/adapters/serial.py] 1 # 2 # This file is part of the PyMeasure package. 3 # 4 # Copyright (c) 2013-2023 PyMeasure Developers 5 # 6 # Permission is hereby granted, free of charge, to any person obtaining a copy 7 # of this software and associated documentation files (the "Software"), to deal 8 # in the Software without restriction, including without limitation the rights 9 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 # copies of the Software, and to permit persons to whom the Software is 11 # furnished to do so, subject to the following conditions: 12 # 13 # The above copyright notice and this permission notice shall be included in 14 # all copies or substantial portions of the Software. 15 # 16 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 # THE SOFTWARE. 23 # 24 25 import logging 26 27 import serial 28 from .adapter import Adapter 29 30 log = logging.getLogger(__name__) 31 log.addHandler(logging.NullHandler()) 32 33 34 class SerialAdapter(Adapter): 35 """ Adapter class for using the Python Serial package to allow 36 serial communication to instrument 37 38 :param port: Serial port 39 :param preprocess_reply: An optional callable used to preprocess strings 40 received from the instrument. The callable returns the processed string. 41 42 .. deprecated:: 0.11 43 Implement it in the instrument's `read` method instead. 44 45 :param write_termination: String appended to messages before writing them. 46 :param read_termination: String expected at end of read message and removed. 47 :param \\**kwargs: Any valid key-word argument for serial.Serial 48 """ 49 50 def __init__(self, port, preprocess_reply=None, 51 write_termination="", read_termination="", 52 **kwargs): 53 super().__init__(preprocess_reply=preprocess_reply) 54 if isinstance(port, serial.SerialBase): 55 self.connection = port 56 else: 57 self.connection = serial.Serial(port, **kwargs) 58 self.write_termination = write_termination 59 self.read_termination = read_termination 60 61 def _write(self, command, **kwargs): 62 """Write a string command to the instrument appending `write_termination`. 63 64 :param str command: Command string to be sent to the instrument 65 (without termination). 66 :param \\**kwargs: Keyword arguments for the connection itself. 67 """ 68 command += self.write_termination 69 self._write_bytes(command.encode(), **kwargs) 70 71 def _write_bytes(self, content, **kwargs): 72 """Write the bytes `content` to the instrument. 73 74 :param bytes content: The bytes to write to the instrument. 75 :param \\**kwargs: Keyword arguments for the connection itself. 76 """ 77 self.connection.write(content, **kwargs) 78 79 def _read(self, **kwargs): 80 """Read up to (excluding) `read_termination` or the whole read buffer. 81 82 :param \\**kwargs: Keyword arguments for the connection itself. 83 :returns str: ASCII response of the instrument (read_termination is removed first). 84 """ 85 read = self._read_bytes(-1, break_on_termchar=True, **kwargs).decode() 86 # Python>3.8 this shorter form is possible: 87 # self._read_bytes(-1).decode().removesuffix(self.read_termination) 88 if self.read_termination: 89 return read.split(self.read_termination)[0] 90 else: 91 return read 92 93 def _read_bytes(self, count, break_on_termchar, **kwargs): 94 """Read a certain number of bytes from the instrument. 95 96 :param int count: Number of bytes to read. A value of -1 indicates to 97 read from the whole read buffer. 98 :param bool break_on_termchar: Stop reading at a termination character. 99 :param \\**kwargs: Keyword arguments for the connection itself. 100 :returns bytes: Bytes response of the instrument (including termination). 101 """ 102 if break_on_termchar and self.read_termination: 103 return self.connection.read_until(self.read_termination.encode(), 104 count if count > 0 else None, 105 **kwargs) 106 else: 107 # At -1 we read a very large number of bytes, which can be considered the whole buffer. 108 return self.connection.read(1e99 if count == -1 else count, **kwargs) 109 110 def flush_read_buffer(self): 111 """Flush and discard the input buffer.""" 112 self.connection.reset_input_buffer() 113 114 def __repr__(self): 115 return "<SerialAdapter(port='%s')>" % self.connection.port 116 [end of pymeasure/adapters/serial.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pymeasure/adapters/serial.py b/pymeasure/adapters/serial.py --- a/pymeasure/adapters/serial.py +++ b/pymeasure/adapters/serial.py @@ -94,7 +94,7 @@ """Read a certain number of bytes from the instrument. :param int count: Number of bytes to read. A value of -1 indicates to - read from the whole read buffer. + read from the whole read buffer (waits for timeout). :param bool break_on_termchar: Stop reading at a termination character. :param \\**kwargs: Keyword arguments for the connection itself. :returns bytes: Bytes response of the instrument (including termination). @@ -103,9 +103,25 @@ return self.connection.read_until(self.read_termination.encode(), count if count > 0 else None, **kwargs) + elif count >= 0: + return self.connection.read(count, **kwargs) else: - # At -1 we read a very large number of bytes, which can be considered the whole buffer. - return self.connection.read(1e99 if count == -1 else count, **kwargs) + # For -1 we empty the buffer completely + return self._read_bytes_until_timeout() + + def _read_bytes_until_timeout(self, chunk_size=256, **kwargs): + """Read from the serial until a timeout occurs, regardless of the number of bytes. + + :chunk_size: The number of bytes attempted to in a single transaction. + Multiple of these transactions will occur. + """ + # `Serial.readlines()` has an unpredictable timeout, see PR #866 + data = bytes() + while True: + chunk = self.connection.read(chunk_size, **kwargs) + data += chunk + if len(chunk) < chunk_size: # If fewer bytes got returned, we had a timeout + return data def flush_read_buffer(self): """Flush and discard the input buffer."""
{"golden_diff": "diff --git a/pymeasure/adapters/serial.py b/pymeasure/adapters/serial.py\n--- a/pymeasure/adapters/serial.py\n+++ b/pymeasure/adapters/serial.py\n@@ -94,7 +94,7 @@\n \"\"\"Read a certain number of bytes from the instrument.\n \n :param int count: Number of bytes to read. A value of -1 indicates to\n- read from the whole read buffer.\n+ read from the whole read buffer (waits for timeout).\n :param bool break_on_termchar: Stop reading at a termination character.\n :param \\\\**kwargs: Keyword arguments for the connection itself.\n :returns bytes: Bytes response of the instrument (including termination).\n@@ -103,9 +103,25 @@\n return self.connection.read_until(self.read_termination.encode(),\n count if count > 0 else None,\n **kwargs)\n+ elif count >= 0:\n+ return self.connection.read(count, **kwargs)\n else:\n- # At -1 we read a very large number of bytes, which can be considered the whole buffer.\n- return self.connection.read(1e99 if count == -1 else count, **kwargs)\n+ # For -1 we empty the buffer completely\n+ return self._read_bytes_until_timeout()\n+\n+ def _read_bytes_until_timeout(self, chunk_size=256, **kwargs):\n+ \"\"\"Read from the serial until a timeout occurs, regardless of the number of bytes.\n+\n+ :chunk_size: The number of bytes attempted to in a single transaction.\n+ Multiple of these transactions will occur.\n+ \"\"\"\n+ # `Serial.readlines()` has an unpredictable timeout, see PR #866\n+ data = bytes()\n+ while True:\n+ chunk = self.connection.read(chunk_size, **kwargs)\n+ data += chunk\n+ if len(chunk) < chunk_size: # If fewer bytes got returned, we had a timeout\n+ return data\n \n def flush_read_buffer(self):\n \"\"\"Flush and discard the input buffer.\"\"\"\n", "issue": "`TypeError` on SerialAdapter `read_bytes(-1)`\nWhen I use `self.read_bytes(-1)` from my instrument using a `SerialAdapter`, I get the error:\r\n\r\n> C:\\Program Files\\Python310\\lib\\ctypes\\__init__.py:66: TypeError\r\n\r\nWith the traceback:\r\n\r\n```\r\n..\\..\\..\\pymeasure\\instruments\\velleman\\velleman_k8090.py:247: in read\r\n response = self.read_bytes(-1)\r\n..\\..\\..\\pymeasure\\instruments\\instrument.py:153: in read_bytes\r\n return self.adapter.read_bytes(count, **kwargs)\r\n..\\..\\..\\pymeasure\\adapters\\adapter.py:123: in read_bytes\r\n read = self._read_bytes(count, break_on_termchar, **kwargs)\r\n..\\..\\..\\pymeasure\\adapters\\serial.py:108: in _read_bytes\r\n return self.connection.read(1e99 if count == -1 else count, **kwargs)\r\n..\\..\\..\\venv\\lib\\site-packages\\serial\\serialwin32.py:278: in read\r\n buf = ctypes.create_string_buffer(n)\r\n_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _\r\n\r\ninit = 1e+99, size = None\r\n\r\n def create_string_buffer(init, size=None):\r\n \"\"\"create_string_buffer(aBytes) -> character array\r\n create_string_buffer(anInteger) -> character array\r\n create_string_buffer(aBytes, anInteger) -> character array\r\n \"\"\"\r\n if isinstance(init, bytes):\r\n if size is None:\r\n size = len(init)+1\r\n _sys.audit(\"ctypes.create_string_buffer\", init, size)\r\n buftype = c_char * size\r\n buf = buftype()\r\n buf.value = init\r\n return buf\r\n elif isinstance(init, int):\r\n _sys.audit(\"ctypes.create_string_buffer\", None, init)\r\n buftype = c_char * init\r\n buf = buftype()\r\n return buf\r\n> raise TypeError(init)\r\nE TypeError: 1e+99\r\n\r\nC:\\Program Files\\Python310\\lib\\ctypes\\__init__.py:66: TypeError\r\n\r\n```\r\n\r\nIt looks like reading 1e99 bytes no (longer?) works.\n", "before_files": [{"content": "#\n# This file is part of the PyMeasure package.\n#\n# Copyright (c) 2013-2023 PyMeasure Developers\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n\nimport logging\n\nimport serial\nfrom .adapter import Adapter\n\nlog = logging.getLogger(__name__)\nlog.addHandler(logging.NullHandler())\n\n\nclass SerialAdapter(Adapter):\n \"\"\" Adapter class for using the Python Serial package to allow\n serial communication to instrument\n\n :param port: Serial port\n :param preprocess_reply: An optional callable used to preprocess strings\n received from the instrument. The callable returns the processed string.\n\n .. deprecated:: 0.11\n Implement it in the instrument's `read` method instead.\n\n :param write_termination: String appended to messages before writing them.\n :param read_termination: String expected at end of read message and removed.\n :param \\\\**kwargs: Any valid key-word argument for serial.Serial\n \"\"\"\n\n def __init__(self, port, preprocess_reply=None,\n write_termination=\"\", read_termination=\"\",\n **kwargs):\n super().__init__(preprocess_reply=preprocess_reply)\n if isinstance(port, serial.SerialBase):\n self.connection = port\n else:\n self.connection = serial.Serial(port, **kwargs)\n self.write_termination = write_termination\n self.read_termination = read_termination\n\n def _write(self, command, **kwargs):\n \"\"\"Write a string command to the instrument appending `write_termination`.\n\n :param str command: Command string to be sent to the instrument\n (without termination).\n :param \\\\**kwargs: Keyword arguments for the connection itself.\n \"\"\"\n command += self.write_termination\n self._write_bytes(command.encode(), **kwargs)\n\n def _write_bytes(self, content, **kwargs):\n \"\"\"Write the bytes `content` to the instrument.\n\n :param bytes content: The bytes to write to the instrument.\n :param \\\\**kwargs: Keyword arguments for the connection itself.\n \"\"\"\n self.connection.write(content, **kwargs)\n\n def _read(self, **kwargs):\n \"\"\"Read up to (excluding) `read_termination` or the whole read buffer.\n\n :param \\\\**kwargs: Keyword arguments for the connection itself.\n :returns str: ASCII response of the instrument (read_termination is removed first).\n \"\"\"\n read = self._read_bytes(-1, break_on_termchar=True, **kwargs).decode()\n # Python>3.8 this shorter form is possible:\n # self._read_bytes(-1).decode().removesuffix(self.read_termination)\n if self.read_termination:\n return read.split(self.read_termination)[0]\n else:\n return read\n\n def _read_bytes(self, count, break_on_termchar, **kwargs):\n \"\"\"Read a certain number of bytes from the instrument.\n\n :param int count: Number of bytes to read. A value of -1 indicates to\n read from the whole read buffer.\n :param bool break_on_termchar: Stop reading at a termination character.\n :param \\\\**kwargs: Keyword arguments for the connection itself.\n :returns bytes: Bytes response of the instrument (including termination).\n \"\"\"\n if break_on_termchar and self.read_termination:\n return self.connection.read_until(self.read_termination.encode(),\n count if count > 0 else None,\n **kwargs)\n else:\n # At -1 we read a very large number of bytes, which can be considered the whole buffer.\n return self.connection.read(1e99 if count == -1 else count, **kwargs)\n\n def flush_read_buffer(self):\n \"\"\"Flush and discard the input buffer.\"\"\"\n self.connection.reset_input_buffer()\n\n def __repr__(self):\n return \"<SerialAdapter(port='%s')>\" % self.connection.port\n", "path": "pymeasure/adapters/serial.py"}]}
2,337
449
gh_patches_debug_35950
rasdani/github-patches
git_diff
dask__dask-1325
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> NotImplementedError: Unknown protocol hdfs After running into issue #1232, I upgraded to the the source version. hdfs3 is installed and working, but ``` import hdfs3 from dask import dataframe as dd namenode = 'mynode.com' port = 8020 hdfs = hdfs3.HDFileSystem(namenode, port) hdfs.list('/path/to/file.csv') # returns file node information data = df.read_csv("hdfs://{0}/path/to/file.csv".format(namenode)) ``` This produces the following error ``` NotImplementedErrorTraceback (most recent call last) <ipython-input-30-2d58fa3887f0> in <module>() ----> 1 data = dd.read_csv('hdfs://{0}/path.to/file.csv'.format(namenode)) /usr/local/share/virtualenvs/cartman/lib/python2.7/site-packages/dask/dataframe/csv.pyc in read_csv(filename, blocksize, chunkbytes, collection, lineterminator, compression, sample, enforce, storage_options, **kwargs) /usr/local/share/virtualenvs/cartman/lib/python2.7/site-packages/dask/bytes/core.pyc in read_bytes(path, delimiter, not_zero, blocksize, sample, compression, **kwargs) NotImplementedError: Unknown protocol hdfs://mynode.com/path/to/file.csv ``` </issue> <code> [start of dask/bytes/core.py] 1 from __future__ import print_function, division, absolute_import 2 3 import io 4 5 from toolz import merge 6 7 from .compression import seekable_files, files as compress_files 8 from .utils import SeekableFile 9 from ..compatibility import PY2 10 from ..delayed import delayed 11 from ..utils import infer_storage_options, system_encoding 12 13 delayed = delayed(pure=True) 14 15 # Global registration dictionaries for backend storage functions 16 # See docstrings to functions below for more information 17 _read_bytes = dict() 18 _open_files = dict() 19 _open_text_files = dict() 20 21 22 def read_bytes(urlpath, delimiter=None, not_zero=False, blocksize=2**27, 23 sample=True, compression=None, **kwargs): 24 """ Convert path to a list of delayed values 25 26 The path may be a filename like ``'2015-01-01.csv'`` or a globstring 27 like ``'2015-*-*.csv'``. 28 29 The path may be preceded by a protocol, like ``s3://`` or ``hdfs://`` if 30 those libraries are installed. 31 32 This cleanly breaks data by a delimiter if given, so that block boundaries 33 start directly after a delimiter and end on the delimiter. 34 35 Parameters 36 ---------- 37 urlpath: string 38 Absolute or relative filepath, URL (may include protocols like 39 ``s3://``), or globstring pointing to data. 40 delimiter: bytes 41 An optional delimiter, like ``b'\n'`` on which to split blocks of bytes 42 not_zero: force seek of start-of-file delimiter, discarding header 43 blocksize: int (=128MB) 44 Chunk size 45 compression: string or None 46 String like 'gzip' or 'xz'. Must support efficient random access. 47 sample: bool, int 48 Whether or not to return a sample from the first 10k bytes 49 **kwargs: dict 50 Extra options that make sense to a particular storage connection, e.g. 51 host, port, username, password, etc. 52 53 Examples 54 -------- 55 >>> sample, blocks = read_bytes('2015-*-*.csv', delimiter=b'\\n') # doctest: +SKIP 56 >>> sample, blocks = read_bytes('s3://bucket/2015-*-*.csv', delimiter=b'\\n') # doctest: +SKIP 57 58 Returns 59 ------- 60 10kB sample header and list of ``dask.Delayed`` objects or list of lists of 61 delayed objects if ``fn`` is a globstring. 62 """ 63 if compression is not None and compression not in compress_files: 64 raise ValueError("Compression type %s not supported" % compression) 65 66 storage_options = infer_storage_options(urlpath, 67 inherit_storage_options=kwargs) 68 protocol = storage_options.pop('protocol') 69 try: 70 read_bytes = _read_bytes[protocol] 71 except KeyError: 72 raise NotImplementedError("Unknown protocol %s (%s)" % 73 (protocol, urlpath)) 74 75 return read_bytes(storage_options.pop('path'), delimiter=delimiter, 76 not_zero=not_zero, blocksize=blocksize, sample=sample, 77 compression=compression, **storage_options) 78 79 80 def open_files_by(open_files_backend, path, compression=None, **kwargs): 81 """ Given open files backend and path return dask.delayed file-like objects 82 83 NOTE: This is an internal helper function, please refer to 84 :func:`open_files` documentation for more details. 85 86 Parameters 87 ---------- 88 path: string 89 Filepath or globstring 90 compression: string 91 Compression to use. See ``dask.bytes.compression.files`` for options. 92 **kwargs: dict 93 Extra options that make sense to a particular storage connection, e.g. 94 host, port, username, password, etc. 95 96 Returns 97 ------- 98 List of ``dask.delayed`` objects that compute to file-like objects 99 """ 100 files = open_files_backend(path, **kwargs) 101 102 if compression: 103 decompress = merge(seekable_files, compress_files)[compression] 104 if PY2: 105 files = [delayed(SeekableFile)(file) for file in files] 106 files = [delayed(decompress)(file) for file in files] 107 108 return files 109 110 111 def open_files(urlpath, compression=None, **kwargs): 112 """ Given path return dask.delayed file-like objects 113 114 Parameters 115 ---------- 116 urlpath: string 117 Absolute or relative filepath, URL (may include protocols like 118 ``s3://``), or globstring pointing to data. 119 compression: string 120 Compression to use. See ``dask.bytes.compression.files`` for options. 121 **kwargs: dict 122 Extra options that make sense to a particular storage connection, e.g. 123 host, port, username, password, etc. 124 125 Examples 126 -------- 127 >>> files = open_files('2015-*-*.csv') # doctest: +SKIP 128 >>> files = open_files('s3://bucket/2015-*-*.csv.gz', compression='gzip') # doctest: +SKIP 129 130 Returns 131 ------- 132 List of ``dask.delayed`` objects that compute to file-like objects 133 """ 134 if compression is not None and compression not in compress_files: 135 raise ValueError("Compression type %s not supported" % compression) 136 137 storage_options = infer_storage_options(urlpath, 138 inherit_storage_options=kwargs) 139 protocol = storage_options.pop('protocol') 140 try: 141 open_files_backend = _open_files[protocol] 142 except KeyError: 143 raise NotImplementedError("Unknown protocol %s (%s)" % 144 (protocol, urlpath)) 145 146 return open_files_by(open_files_backend, storage_options.pop('path'), 147 compression=compression, **storage_options) 148 149 150 def open_text_files(urlpath, encoding=system_encoding, errors='strict', 151 compression=None, **kwargs): 152 """ Given path return dask.delayed file-like objects in text mode 153 154 Parameters 155 ---------- 156 urlpath: string 157 Absolute or relative filepath, URL (may include protocols like 158 ``s3://``), or globstring pointing to data. 159 encoding: string 160 errors: string 161 compression: string 162 Compression to use. See ``dask.bytes.compression.files`` for options. 163 **kwargs: dict 164 Extra options that make sense to a particular storage connection, e.g. 165 host, port, username, password, etc. 166 167 Examples 168 -------- 169 >>> files = open_text_files('2015-*-*.csv', encoding='utf-8') # doctest: +SKIP 170 >>> files = open_text_files('s3://bucket/2015-*-*.csv') # doctest: +SKIP 171 172 Returns 173 ------- 174 List of ``dask.delayed`` objects that compute to text file-like objects 175 """ 176 if compression is not None and compression not in compress_files: 177 raise ValueError("Compression type %s not supported" % compression) 178 179 storage_options = infer_storage_options(urlpath, 180 inherit_storage_options=kwargs) 181 path = storage_options.pop('path') 182 protocol = storage_options.pop('protocol') 183 if protocol in _open_text_files and compression is None: 184 return _open_text_files[protocol](path, 185 encoding=encoding, 186 errors=errors, 187 **storage_options) 188 elif protocol in _open_files: 189 files = open_files_by(_open_files[protocol], 190 path, 191 compression=compression, 192 **storage_options) 193 if PY2: 194 files = [delayed(SeekableFile)(file) for file in files] 195 return [delayed(io.TextIOWrapper)(file, encoding=encoding, 196 errors=errors) for file in files] 197 else: 198 raise NotImplementedError("Unknown protocol %s (%s)" % 199 (protocol, urlpath)) 200 [end of dask/bytes/core.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/dask/bytes/core.py b/dask/bytes/core.py --- a/dask/bytes/core.py +++ b/dask/bytes/core.py @@ -66,6 +66,7 @@ storage_options = infer_storage_options(urlpath, inherit_storage_options=kwargs) protocol = storage_options.pop('protocol') + ensure_protocol(protocol) try: read_bytes = _read_bytes[protocol] except KeyError: @@ -137,6 +138,7 @@ storage_options = infer_storage_options(urlpath, inherit_storage_options=kwargs) protocol = storage_options.pop('protocol') + ensure_protocol(protocol) try: open_files_backend = _open_files[protocol] except KeyError: @@ -180,6 +182,7 @@ inherit_storage_options=kwargs) path = storage_options.pop('path') protocol = storage_options.pop('protocol') + ensure_protocol(protocol) if protocol in _open_text_files and compression is None: return _open_text_files[protocol](path, encoding=encoding, @@ -197,3 +200,28 @@ else: raise NotImplementedError("Unknown protocol %s (%s)" % (protocol, urlpath)) + + +def ensure_protocol(protocol): + if protocol in _read_bytes or protocol in _open_files: + return + + if protocol == 's3': + try: + import dask.s3 + except ImportError: + raise ImportError("Need to install `s3fs` library for s3 support\n" + " conda install s3fs -c conda-forge\n" + " or\n" + " pip install s3fs") + + elif protocol == 'hdfs': + try: + import distributed.hdfs + except ImportError: + raise ImportError("Need to install `distributed` and `hdfs3` " + "for HDFS support\n" + " conda install distributed hdfs3 -c conda-forge") + + else: + raise ValueError("Unknown protocol %s" % protocol)
{"golden_diff": "diff --git a/dask/bytes/core.py b/dask/bytes/core.py\n--- a/dask/bytes/core.py\n+++ b/dask/bytes/core.py\n@@ -66,6 +66,7 @@\n storage_options = infer_storage_options(urlpath,\n inherit_storage_options=kwargs)\n protocol = storage_options.pop('protocol')\n+ ensure_protocol(protocol)\n try:\n read_bytes = _read_bytes[protocol]\n except KeyError:\n@@ -137,6 +138,7 @@\n storage_options = infer_storage_options(urlpath,\n inherit_storage_options=kwargs)\n protocol = storage_options.pop('protocol')\n+ ensure_protocol(protocol)\n try:\n open_files_backend = _open_files[protocol]\n except KeyError:\n@@ -180,6 +182,7 @@\n inherit_storage_options=kwargs)\n path = storage_options.pop('path')\n protocol = storage_options.pop('protocol')\n+ ensure_protocol(protocol)\n if protocol in _open_text_files and compression is None:\n return _open_text_files[protocol](path,\n encoding=encoding,\n@@ -197,3 +200,28 @@\n else:\n raise NotImplementedError(\"Unknown protocol %s (%s)\" %\n (protocol, urlpath))\n+\n+\n+def ensure_protocol(protocol):\n+ if protocol in _read_bytes or protocol in _open_files:\n+ return\n+\n+ if protocol == 's3':\n+ try:\n+ import dask.s3\n+ except ImportError:\n+ raise ImportError(\"Need to install `s3fs` library for s3 support\\n\"\n+ \" conda install s3fs -c conda-forge\\n\"\n+ \" or\\n\"\n+ \" pip install s3fs\")\n+\n+ elif protocol == 'hdfs':\n+ try:\n+ import distributed.hdfs\n+ except ImportError:\n+ raise ImportError(\"Need to install `distributed` and `hdfs3` \"\n+ \"for HDFS support\\n\"\n+ \" conda install distributed hdfs3 -c conda-forge\")\n+\n+ else:\n+ raise ValueError(\"Unknown protocol %s\" % protocol)\n", "issue": "NotImplementedError: Unknown protocol hdfs\nAfter running into issue #1232, I upgraded to the the source version. hdfs3 is installed and working, but\n\n```\nimport hdfs3\nfrom dask import dataframe as dd\n\nnamenode = 'mynode.com'\nport = 8020\nhdfs = hdfs3.HDFileSystem(namenode, port)\nhdfs.list('/path/to/file.csv')\n# returns file node information\ndata = df.read_csv(\"hdfs://{0}/path/to/file.csv\".format(namenode))\n```\n\nThis produces the following error\n\n```\nNotImplementedErrorTraceback (most recent call last)\n<ipython-input-30-2d58fa3887f0> in <module>()\n----> 1 data = dd.read_csv('hdfs://{0}/path.to/file.csv'.format(namenode))\n\n/usr/local/share/virtualenvs/cartman/lib/python2.7/site-packages/dask/dataframe/csv.pyc in read_csv(filename, blocksize, chunkbytes, collection, lineterminator, compression, sample, enforce, storage_options, **kwargs)\n\n/usr/local/share/virtualenvs/cartman/lib/python2.7/site-packages/dask/bytes/core.pyc in read_bytes(path, delimiter, not_zero, blocksize, sample, compression, **kwargs)\n\nNotImplementedError: Unknown protocol hdfs://mynode.com/path/to/file.csv\n```\n\n", "before_files": [{"content": "from __future__ import print_function, division, absolute_import\n\nimport io\n\nfrom toolz import merge\n\nfrom .compression import seekable_files, files as compress_files\nfrom .utils import SeekableFile\nfrom ..compatibility import PY2\nfrom ..delayed import delayed\nfrom ..utils import infer_storage_options, system_encoding\n\ndelayed = delayed(pure=True)\n\n# Global registration dictionaries for backend storage functions\n# See docstrings to functions below for more information\n_read_bytes = dict()\n_open_files = dict()\n_open_text_files = dict()\n\n\ndef read_bytes(urlpath, delimiter=None, not_zero=False, blocksize=2**27,\n sample=True, compression=None, **kwargs):\n \"\"\" Convert path to a list of delayed values\n\n The path may be a filename like ``'2015-01-01.csv'`` or a globstring\n like ``'2015-*-*.csv'``.\n\n The path may be preceded by a protocol, like ``s3://`` or ``hdfs://`` if\n those libraries are installed.\n\n This cleanly breaks data by a delimiter if given, so that block boundaries\n start directly after a delimiter and end on the delimiter.\n\n Parameters\n ----------\n urlpath: string\n Absolute or relative filepath, URL (may include protocols like\n ``s3://``), or globstring pointing to data.\n delimiter: bytes\n An optional delimiter, like ``b'\\n'`` on which to split blocks of bytes\n not_zero: force seek of start-of-file delimiter, discarding header\n blocksize: int (=128MB)\n Chunk size\n compression: string or None\n String like 'gzip' or 'xz'. Must support efficient random access.\n sample: bool, int\n Whether or not to return a sample from the first 10k bytes\n **kwargs: dict\n Extra options that make sense to a particular storage connection, e.g.\n host, port, username, password, etc.\n\n Examples\n --------\n >>> sample, blocks = read_bytes('2015-*-*.csv', delimiter=b'\\\\n') # doctest: +SKIP\n >>> sample, blocks = read_bytes('s3://bucket/2015-*-*.csv', delimiter=b'\\\\n') # doctest: +SKIP\n\n Returns\n -------\n 10kB sample header and list of ``dask.Delayed`` objects or list of lists of\n delayed objects if ``fn`` is a globstring.\n \"\"\"\n if compression is not None and compression not in compress_files:\n raise ValueError(\"Compression type %s not supported\" % compression)\n\n storage_options = infer_storage_options(urlpath,\n inherit_storage_options=kwargs)\n protocol = storage_options.pop('protocol')\n try:\n read_bytes = _read_bytes[protocol]\n except KeyError:\n raise NotImplementedError(\"Unknown protocol %s (%s)\" %\n (protocol, urlpath))\n\n return read_bytes(storage_options.pop('path'), delimiter=delimiter,\n not_zero=not_zero, blocksize=blocksize, sample=sample,\n compression=compression, **storage_options)\n\n\ndef open_files_by(open_files_backend, path, compression=None, **kwargs):\n \"\"\" Given open files backend and path return dask.delayed file-like objects\n\n NOTE: This is an internal helper function, please refer to\n :func:`open_files` documentation for more details.\n\n Parameters\n ----------\n path: string\n Filepath or globstring\n compression: string\n Compression to use. See ``dask.bytes.compression.files`` for options.\n **kwargs: dict\n Extra options that make sense to a particular storage connection, e.g.\n host, port, username, password, etc.\n\n Returns\n -------\n List of ``dask.delayed`` objects that compute to file-like objects\n \"\"\"\n files = open_files_backend(path, **kwargs)\n\n if compression:\n decompress = merge(seekable_files, compress_files)[compression]\n if PY2:\n files = [delayed(SeekableFile)(file) for file in files]\n files = [delayed(decompress)(file) for file in files]\n\n return files\n\n\ndef open_files(urlpath, compression=None, **kwargs):\n \"\"\" Given path return dask.delayed file-like objects\n\n Parameters\n ----------\n urlpath: string\n Absolute or relative filepath, URL (may include protocols like\n ``s3://``), or globstring pointing to data.\n compression: string\n Compression to use. See ``dask.bytes.compression.files`` for options.\n **kwargs: dict\n Extra options that make sense to a particular storage connection, e.g.\n host, port, username, password, etc.\n\n Examples\n --------\n >>> files = open_files('2015-*-*.csv') # doctest: +SKIP\n >>> files = open_files('s3://bucket/2015-*-*.csv.gz', compression='gzip') # doctest: +SKIP\n\n Returns\n -------\n List of ``dask.delayed`` objects that compute to file-like objects\n \"\"\"\n if compression is not None and compression not in compress_files:\n raise ValueError(\"Compression type %s not supported\" % compression)\n\n storage_options = infer_storage_options(urlpath,\n inherit_storage_options=kwargs)\n protocol = storage_options.pop('protocol')\n try:\n open_files_backend = _open_files[protocol]\n except KeyError:\n raise NotImplementedError(\"Unknown protocol %s (%s)\" %\n (protocol, urlpath))\n\n return open_files_by(open_files_backend, storage_options.pop('path'),\n compression=compression, **storage_options)\n\n\ndef open_text_files(urlpath, encoding=system_encoding, errors='strict',\n compression=None, **kwargs):\n \"\"\" Given path return dask.delayed file-like objects in text mode\n\n Parameters\n ----------\n urlpath: string\n Absolute or relative filepath, URL (may include protocols like\n ``s3://``), or globstring pointing to data.\n encoding: string\n errors: string\n compression: string\n Compression to use. See ``dask.bytes.compression.files`` for options.\n **kwargs: dict\n Extra options that make sense to a particular storage connection, e.g.\n host, port, username, password, etc.\n\n Examples\n --------\n >>> files = open_text_files('2015-*-*.csv', encoding='utf-8') # doctest: +SKIP\n >>> files = open_text_files('s3://bucket/2015-*-*.csv') # doctest: +SKIP\n\n Returns\n -------\n List of ``dask.delayed`` objects that compute to text file-like objects\n \"\"\"\n if compression is not None and compression not in compress_files:\n raise ValueError(\"Compression type %s not supported\" % compression)\n\n storage_options = infer_storage_options(urlpath,\n inherit_storage_options=kwargs)\n path = storage_options.pop('path')\n protocol = storage_options.pop('protocol')\n if protocol in _open_text_files and compression is None:\n return _open_text_files[protocol](path,\n encoding=encoding,\n errors=errors,\n **storage_options)\n elif protocol in _open_files:\n files = open_files_by(_open_files[protocol],\n path,\n compression=compression,\n **storage_options)\n if PY2:\n files = [delayed(SeekableFile)(file) for file in files]\n return [delayed(io.TextIOWrapper)(file, encoding=encoding,\n errors=errors) for file in files]\n else:\n raise NotImplementedError(\"Unknown protocol %s (%s)\" %\n (protocol, urlpath))\n", "path": "dask/bytes/core.py"}]}
3,030
470
gh_patches_debug_7817
rasdani/github-patches
git_diff
lhotse-speech__lhotse-1197
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> The 'speaker' field in the stcmds dataset is incorrect The file 20170001P00001I0001.wav is spoken by a female, and the file 20170001P00001A0001.wav is spoken by a male. But they have the same speaker ID P00001 </issue> <code> [start of lhotse/recipes/stcmds.py] 1 """ 2 Stcmds is an open-source Chinese Mandarin corpus by Surfingtech (www.surfing.ai), containing utterances from 855 speakers, 102600 utterances; 3 Publicly available on https://www.openslr.org/resources/38 4 ST-CMDS (110 hours) 5 6 """ 7 import logging 8 import os 9 import shutil 10 import tarfile 11 from collections import defaultdict 12 from pathlib import Path 13 from typing import Dict, Optional, Union 14 15 from tqdm.auto import tqdm 16 17 from lhotse import fix_manifests, validate_recordings_and_supervisions 18 from lhotse.audio import Recording, RecordingSet 19 from lhotse.supervision import SupervisionSegment, SupervisionSet 20 from lhotse.utils import Pathlike, resumable_download, safe_extract 21 22 23 def text_normalize(line: str): 24 """ 25 Modified from https://github.com/wenet-e2e/wenet/blob/main/examples/multi_cn/s0/local/stcmds_data_prep.sh#L42 26 paste -d' ' $data/utt.list $data/text.list |\ 27 sed 's/,//g' |\ 28 tr '[a-z]' '[A-Z]' |\ 29 awk '{if (NF > 1) print $0;}' > $data/train/text 30 """ 31 line = line.replace(",", "") 32 line = line.upper() 33 return line 34 35 36 def download_stcmds( 37 target_dir: Pathlike = ".", 38 force_download: bool = False, 39 base_url: str = "http://www.openslr.org/resources", 40 ) -> Path: 41 """ 42 Downdload and untar the dataset 43 :param target_dir: Pathlike, the path of the dir to storage the dataset. 44 :param force_download: Bool, if True, download the tars no matter if the tars exist. 45 :param base_url: str, the url of the OpenSLR resources. 46 :return: the path to downloaded and extracted directory with data. 47 """ 48 url = f"{base_url}/38" 49 target_dir = Path(target_dir) 50 target_dir.mkdir(parents=True, exist_ok=True) 51 corpus_dir = target_dir / "stcmds" 52 dataset_tar_name = "ST-CMDS-20170001_1-OS.tar.gz" 53 for tar_name in [dataset_tar_name]: 54 tar_path = target_dir / tar_name 55 extracted_dir = corpus_dir / tar_name[:-7] 56 completed_detector = extracted_dir / ".completed" 57 if completed_detector.is_file(): 58 logging.info(f"Skipping download of because {completed_detector} exists.") 59 continue 60 resumable_download( 61 f"{url}/{tar_name}", filename=tar_path, force_download=force_download 62 ) 63 shutil.rmtree(extracted_dir, ignore_errors=True) 64 with tarfile.open(tar_path) as tar: 65 safe_extract(tar, path=corpus_dir) 66 completed_detector.touch() 67 68 return corpus_dir 69 70 71 def prepare_stcmds( 72 corpus_dir: Pathlike, output_dir: Optional[Pathlike] = None 73 ) -> Dict[str, Dict[str, Union[RecordingSet, SupervisionSet]]]: 74 """ 75 Returns the manifests which consist of the Recordings and Supervisions 76 :param corpus_dir: Pathlike, the path of the data dir. 77 :param output_dir: Pathlike, the path where to write the manifests. 78 :return: a Dict whose key is the dataset part, and the value is Dicts with the keys 'recordings' and 'supervisions'. 79 """ 80 corpus_dir = Path(corpus_dir) 81 assert corpus_dir.is_dir(), f"No such directory: {corpus_dir}" 82 if output_dir is not None: 83 output_dir = Path(output_dir) 84 output_dir.mkdir(parents=True, exist_ok=True) 85 86 path = corpus_dir / "ST-CMDS-20170001_1-OS" 87 transcript_dict = {} 88 for text_path in path.rglob("**/*.txt"): 89 idx = text_path.stem 90 logging.info(f"processing stcmds transcript {text_path}") 91 with open(text_path, "r", encoding="utf-8") as f: 92 for line in f: 93 line = text_normalize(line) 94 transcript_dict[idx] = line 95 96 manifests = defaultdict(dict) 97 dataset_parts = ["train"] 98 for part in tqdm( 99 dataset_parts, 100 desc="process stcmds audio, it needs waste about 2169 seconds time.", 101 ): 102 logging.info(f"Processing stcmds {part}") 103 recordings = [] 104 supervisions = [] 105 for audio_path in path.rglob("**/*.wav"): 106 idx = audio_path.stem 107 speaker = "".join(list(idx)[8:14]) 108 if idx not in transcript_dict: 109 logging.warning(f"No transcript: {idx}") 110 logging.warning(f"{audio_path} has no transcript") 111 continue 112 text = transcript_dict[idx] 113 if not audio_path.is_file(): 114 logging.warning(f"No such file: {audio_path}") 115 continue 116 recording = Recording.from_file(audio_path) 117 recordings.append(recording) 118 segment = SupervisionSegment( 119 id=idx, 120 recording_id=idx, 121 start=0.0, 122 duration=recording.duration, 123 channel=0, 124 language="Chinese", 125 speaker=speaker, 126 text=text.strip(), 127 ) 128 supervisions.append(segment) 129 130 recording_set = RecordingSet.from_recordings(recordings) 131 supervision_set = SupervisionSet.from_segments(supervisions) 132 133 recording_set, supervision_set = fix_manifests(recording_set, supervision_set) 134 validate_recordings_and_supervisions(recording_set, supervision_set) 135 136 if output_dir is not None: 137 supervision_set.to_file(output_dir / f"stcmds_supervisions_{part}.jsonl.gz") 138 recording_set.to_file(output_dir / f"stcmds_recordings_{part}.jsonl.gz") 139 140 manifests[part] = {"recordings": recording_set, "supervisions": supervision_set} 141 142 return manifests 143 [end of lhotse/recipes/stcmds.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/lhotse/recipes/stcmds.py b/lhotse/recipes/stcmds.py --- a/lhotse/recipes/stcmds.py +++ b/lhotse/recipes/stcmds.py @@ -104,7 +104,7 @@ supervisions = [] for audio_path in path.rglob("**/*.wav"): idx = audio_path.stem - speaker = "".join(list(idx)[8:14]) + speaker = "".join(list(idx)[8:15]) if idx not in transcript_dict: logging.warning(f"No transcript: {idx}") logging.warning(f"{audio_path} has no transcript")
{"golden_diff": "diff --git a/lhotse/recipes/stcmds.py b/lhotse/recipes/stcmds.py\n--- a/lhotse/recipes/stcmds.py\n+++ b/lhotse/recipes/stcmds.py\n@@ -104,7 +104,7 @@\n supervisions = []\n for audio_path in path.rglob(\"**/*.wav\"):\n idx = audio_path.stem\n- speaker = \"\".join(list(idx)[8:14])\n+ speaker = \"\".join(list(idx)[8:15])\n if idx not in transcript_dict:\n logging.warning(f\"No transcript: {idx}\")\n logging.warning(f\"{audio_path} has no transcript\")\n", "issue": "The 'speaker' field in the stcmds dataset is incorrect\nThe file 20170001P00001I0001.wav is spoken by a female, \r\nand the file 20170001P00001A0001.wav is spoken by a male.\r\nBut they have the same speaker ID P00001\n", "before_files": [{"content": "\"\"\"\nStcmds is an open-source Chinese Mandarin corpus by Surfingtech (www.surfing.ai), containing utterances from 855 speakers, 102600 utterances;\nPublicly available on https://www.openslr.org/resources/38\nST-CMDS (110 hours)\n\n\"\"\"\nimport logging\nimport os\nimport shutil\nimport tarfile\nfrom collections import defaultdict\nfrom pathlib import Path\nfrom typing import Dict, Optional, Union\n\nfrom tqdm.auto import tqdm\n\nfrom lhotse import fix_manifests, validate_recordings_and_supervisions\nfrom lhotse.audio import Recording, RecordingSet\nfrom lhotse.supervision import SupervisionSegment, SupervisionSet\nfrom lhotse.utils import Pathlike, resumable_download, safe_extract\n\n\ndef text_normalize(line: str):\n \"\"\"\n Modified from https://github.com/wenet-e2e/wenet/blob/main/examples/multi_cn/s0/local/stcmds_data_prep.sh#L42\n paste -d' ' $data/utt.list $data/text.list |\\\n sed 's/\uff0c//g' |\\\n tr '[a-z]' '[A-Z]' |\\\n awk '{if (NF > 1) print $0;}' > $data/train/text\n \"\"\"\n line = line.replace(\"\uff0c\", \"\")\n line = line.upper()\n return line\n\n\ndef download_stcmds(\n target_dir: Pathlike = \".\",\n force_download: bool = False,\n base_url: str = \"http://www.openslr.org/resources\",\n) -> Path:\n \"\"\"\n Downdload and untar the dataset\n :param target_dir: Pathlike, the path of the dir to storage the dataset.\n :param force_download: Bool, if True, download the tars no matter if the tars exist.\n :param base_url: str, the url of the OpenSLR resources.\n :return: the path to downloaded and extracted directory with data.\n \"\"\"\n url = f\"{base_url}/38\"\n target_dir = Path(target_dir)\n target_dir.mkdir(parents=True, exist_ok=True)\n corpus_dir = target_dir / \"stcmds\"\n dataset_tar_name = \"ST-CMDS-20170001_1-OS.tar.gz\"\n for tar_name in [dataset_tar_name]:\n tar_path = target_dir / tar_name\n extracted_dir = corpus_dir / tar_name[:-7]\n completed_detector = extracted_dir / \".completed\"\n if completed_detector.is_file():\n logging.info(f\"Skipping download of because {completed_detector} exists.\")\n continue\n resumable_download(\n f\"{url}/{tar_name}\", filename=tar_path, force_download=force_download\n )\n shutil.rmtree(extracted_dir, ignore_errors=True)\n with tarfile.open(tar_path) as tar:\n safe_extract(tar, path=corpus_dir)\n completed_detector.touch()\n\n return corpus_dir\n\n\ndef prepare_stcmds(\n corpus_dir: Pathlike, output_dir: Optional[Pathlike] = None\n) -> Dict[str, Dict[str, Union[RecordingSet, SupervisionSet]]]:\n \"\"\"\n Returns the manifests which consist of the Recordings and Supervisions\n :param corpus_dir: Pathlike, the path of the data dir.\n :param output_dir: Pathlike, the path where to write the manifests.\n :return: a Dict whose key is the dataset part, and the value is Dicts with the keys 'recordings' and 'supervisions'.\n \"\"\"\n corpus_dir = Path(corpus_dir)\n assert corpus_dir.is_dir(), f\"No such directory: {corpus_dir}\"\n if output_dir is not None:\n output_dir = Path(output_dir)\n output_dir.mkdir(parents=True, exist_ok=True)\n\n path = corpus_dir / \"ST-CMDS-20170001_1-OS\"\n transcript_dict = {}\n for text_path in path.rglob(\"**/*.txt\"):\n idx = text_path.stem\n logging.info(f\"processing stcmds transcript {text_path}\")\n with open(text_path, \"r\", encoding=\"utf-8\") as f:\n for line in f:\n line = text_normalize(line)\n transcript_dict[idx] = line\n\n manifests = defaultdict(dict)\n dataset_parts = [\"train\"]\n for part in tqdm(\n dataset_parts,\n desc=\"process stcmds audio, it needs waste about 2169 seconds time.\",\n ):\n logging.info(f\"Processing stcmds {part}\")\n recordings = []\n supervisions = []\n for audio_path in path.rglob(\"**/*.wav\"):\n idx = audio_path.stem\n speaker = \"\".join(list(idx)[8:14])\n if idx not in transcript_dict:\n logging.warning(f\"No transcript: {idx}\")\n logging.warning(f\"{audio_path} has no transcript\")\n continue\n text = transcript_dict[idx]\n if not audio_path.is_file():\n logging.warning(f\"No such file: {audio_path}\")\n continue\n recording = Recording.from_file(audio_path)\n recordings.append(recording)\n segment = SupervisionSegment(\n id=idx,\n recording_id=idx,\n start=0.0,\n duration=recording.duration,\n channel=0,\n language=\"Chinese\",\n speaker=speaker,\n text=text.strip(),\n )\n supervisions.append(segment)\n\n recording_set = RecordingSet.from_recordings(recordings)\n supervision_set = SupervisionSet.from_segments(supervisions)\n\n recording_set, supervision_set = fix_manifests(recording_set, supervision_set)\n validate_recordings_and_supervisions(recording_set, supervision_set)\n\n if output_dir is not None:\n supervision_set.to_file(output_dir / f\"stcmds_supervisions_{part}.jsonl.gz\")\n recording_set.to_file(output_dir / f\"stcmds_recordings_{part}.jsonl.gz\")\n\n manifests[part] = {\"recordings\": recording_set, \"supervisions\": supervision_set}\n\n return manifests\n", "path": "lhotse/recipes/stcmds.py"}]}
2,241
140
gh_patches_debug_38811
rasdani/github-patches
git_diff
xonsh__xonsh-4673
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> make promp.env.env_name venv --prompt aware (set in pyvenv.cfg) ## xonfig <details> ``` > xonfig <xonsh-code>:1:0 - xonfig <xonsh-code>:1:0 + ![xonfig] +------------------+----------------------+ | xonsh | 0.11.0 | | Git SHA | adfa60ea | | Commit Date | Feb 11 14:53:00 2022 | | Python | 3.9.7 | | PLY | 3.11 | | have readline | True | | prompt toolkit | None | | shell type | readline | | history backend | json | | pygments | 2.11.2 | | on posix | True | | on linux | True | | distro | unknown | | on wsl | False | | on darwin | False | | on windows | False | | on cygwin | False | | on msys2 | False | | is superuser | False | | default encoding | utf-8 | | xonsh encoding | utf-8 | | encoding errors | surrogateescape | | on jupyter | False | | jupyter kernel | None | | xontrib 1 | coreutils | | xontrib 2 | vox | | xontrib 3 | voxapi | | RC file 1 | /home/ob/.xonshrc | +------------------+----------------------+ ``` </details> ## Expected Behavior When activating a venv via `vox activate` the name should be set to the value of `prompt` key in `pyvenv.cfg` if present (file and key) - see https://docs.python.org/3/library/venv.html. ## Current Behavior The prompt is always set to name of venv directory independent of prompt settings. ## Steps to Reproduce ```shell $ python -m venv --prompt "MY SPECIAL PROMPT" .venv $ cat .venv/pyvenv.cfg | grep prompt prompt = 'MY SPECIAL PROMPT' vox activate ./.venv ``` new prompt looks like this: `[17:58:10] (.venv) ob@ob1 ~/oss/xonsh dropthedot|βœ“` but should look like this: `[17:58:10] (MY SPECIAL PROMPT) ob@ob1 ~/oss/xonsh dropthedot|βœ“` ## Also add `--prompt` to `vox new`? If this is done, maybe a good idea would be to also add `--prompt` to `vox new` to make this possible on creation (also in the interest of symmetry), but it seems like the common pattern for many xonsh users is to have virtualenvs in `~/.virtualenv` with the venv folder name being the same as the project, therefore automatically having a meaningful prompt, so this does not seem to be an urgently wanted feature for most. I want to prepare a PR for the "passive" part of the functionality, but could also have a stab at adding `--prompt` to `vox new` at a later date. My main itch would be to respect `prompt` from already existing venvs first. ## For community ⬇️ **Please click the πŸ‘ reaction instead of leaving a `+1` or πŸ‘ comment** </issue> <code> [start of xonsh/prompt/env.py] 1 """Prompt formatter for virtualenv and others""" 2 3 import os 4 5 from xonsh.built_ins import XSH 6 7 8 def find_env_name(): 9 """Finds the current environment name from $VIRTUAL_ENV or 10 $CONDA_DEFAULT_ENV if that is set. 11 """ 12 env_path = XSH.env.get("VIRTUAL_ENV", "") 13 if env_path: 14 env_name = os.path.basename(env_path) 15 else: 16 env_name = XSH.env.get("CONDA_DEFAULT_ENV", "") 17 return env_name 18 19 20 def env_name(): 21 """Returns the current env_name if it non-empty, surrounded by the 22 ``{env_prefix}`` and ``{env_postfix}`` fields. 23 """ 24 env_name = find_env_name() 25 if XSH.env.get("VIRTUAL_ENV_DISABLE_PROMPT") or not env_name: 26 # env name prompt printing disabled, or no environment; just return 27 return 28 29 venv_prompt = XSH.env.get("VIRTUAL_ENV_PROMPT") 30 if venv_prompt is not None: 31 return venv_prompt 32 else: 33 pf = XSH.shell.prompt_formatter 34 pre = pf._get_field_value("env_prefix") 35 post = pf._get_field_value("env_postfix") 36 return pre + env_name + post 37 38 39 def vte_new_tab_cwd(): 40 """This prints an escape sequence that tells VTE terminals the hostname 41 and pwd. This should not be needed in most cases, but sometimes is for 42 certain Linux terminals that do not read the PWD from the environment 43 on startup. Note that this does not return a string, it simply prints 44 and flushes the escape sequence to stdout directly. 45 """ 46 env = XSH.env 47 t = "\033]7;file://{}{}\007" 48 s = t.format(env.get("HOSTNAME"), env.get("PWD")) 49 print(s, end="", flush=True) 50 [end of xonsh/prompt/env.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/xonsh/prompt/env.py b/xonsh/prompt/env.py --- a/xonsh/prompt/env.py +++ b/xonsh/prompt/env.py @@ -1,42 +1,70 @@ """Prompt formatter for virtualenv and others""" - -import os +import functools +import re +from pathlib import Path +from typing import Optional from xonsh.built_ins import XSH -def find_env_name(): - """Finds the current environment name from $VIRTUAL_ENV or - $CONDA_DEFAULT_ENV if that is set. +def find_env_name() -> Optional[str]: + """Find current environment name from available sources. + + If ``$VIRTUAL_ENV`` is set, it is determined from the prompt setting in + ``<venv>/pyvenv.cfg`` or from the folder name of the environment. + + Otherwise - if it is set - from ``$CONDA_DEFAULT_ENV``. + """ + virtual_env = XSH.env.get("VIRTUAL_ENV") + if virtual_env: + name = _determine_env_name(virtual_env) + if name: + return name + conda_default_env = XSH.env.get("CONDA_DEFAULT_ENV") + if conda_default_env: + return conda_default_env + + +def env_name() -> str: + """Build env_name based on different sources. Respect order of precedence. + + Name from VIRTUAL_ENV_PROMPT will be used as-is. + Names from other sources are surrounded with ``{env_prefix}`` and + ``{env_postfix}`` fields. """ - env_path = XSH.env.get("VIRTUAL_ENV", "") - if env_path: - env_name = os.path.basename(env_path) - else: - env_name = XSH.env.get("CONDA_DEFAULT_ENV", "") - return env_name + if XSH.env.get("VIRTUAL_ENV_DISABLE_PROMPT"): + return "" + virtual_env_prompt = XSH.env.get("VIRTUAL_ENV_PROMPT") + if virtual_env_prompt: + return virtual_env_prompt + found_envname = find_env_name() + return _surround_env_name(found_envname) if found_envname else "" + [email protected]_cache(maxsize=5) +def _determine_env_name(virtual_env: str) -> str: + """Use prompt setting from pyvenv.cfg or basename of virtual_env. -def env_name(): - """Returns the current env_name if it non-empty, surrounded by the - ``{env_prefix}`` and ``{env_postfix}`` fields. + Tries to be resilient to subtle changes in whitespace and quoting in the + configuration file format as it adheres to no clear standard. """ - env_name = find_env_name() - if XSH.env.get("VIRTUAL_ENV_DISABLE_PROMPT") or not env_name: - # env name prompt printing disabled, or no environment; just return - return - - venv_prompt = XSH.env.get("VIRTUAL_ENV_PROMPT") - if venv_prompt is not None: - return venv_prompt - else: - pf = XSH.shell.prompt_formatter - pre = pf._get_field_value("env_prefix") - post = pf._get_field_value("env_postfix") - return pre + env_name + post - - -def vte_new_tab_cwd(): + venv_path = Path(virtual_env) + pyvenv_cfg = venv_path / "pyvenv.cfg" + if pyvenv_cfg.is_file(): + match = re.search(r"prompt\s*=\s*(.*)", pyvenv_cfg.read_text()) + if match: + return match.group(1).strip().lstrip("'\"").rstrip("'\"") + return venv_path.name + + +def _surround_env_name(name: str) -> str: + pf = XSH.shell.prompt_formatter + pre = pf._get_field_value("env_prefix") + post = pf._get_field_value("env_postfix") + return f"{pre}{name}{post}" + + +def vte_new_tab_cwd() -> None: """This prints an escape sequence that tells VTE terminals the hostname and pwd. This should not be needed in most cases, but sometimes is for certain Linux terminals that do not read the PWD from the environment
{"golden_diff": "diff --git a/xonsh/prompt/env.py b/xonsh/prompt/env.py\n--- a/xonsh/prompt/env.py\n+++ b/xonsh/prompt/env.py\n@@ -1,42 +1,70 @@\n \"\"\"Prompt formatter for virtualenv and others\"\"\"\n-\n-import os\n+import functools\n+import re\n+from pathlib import Path\n+from typing import Optional\n \n from xonsh.built_ins import XSH\n \n \n-def find_env_name():\n- \"\"\"Finds the current environment name from $VIRTUAL_ENV or\n- $CONDA_DEFAULT_ENV if that is set.\n+def find_env_name() -> Optional[str]:\n+ \"\"\"Find current environment name from available sources.\n+\n+ If ``$VIRTUAL_ENV`` is set, it is determined from the prompt setting in\n+ ``<venv>/pyvenv.cfg`` or from the folder name of the environment.\n+\n+ Otherwise - if it is set - from ``$CONDA_DEFAULT_ENV``.\n+ \"\"\"\n+ virtual_env = XSH.env.get(\"VIRTUAL_ENV\")\n+ if virtual_env:\n+ name = _determine_env_name(virtual_env)\n+ if name:\n+ return name\n+ conda_default_env = XSH.env.get(\"CONDA_DEFAULT_ENV\")\n+ if conda_default_env:\n+ return conda_default_env\n+\n+\n+def env_name() -> str:\n+ \"\"\"Build env_name based on different sources. Respect order of precedence.\n+\n+ Name from VIRTUAL_ENV_PROMPT will be used as-is.\n+ Names from other sources are surrounded with ``{env_prefix}`` and\n+ ``{env_postfix}`` fields.\n \"\"\"\n- env_path = XSH.env.get(\"VIRTUAL_ENV\", \"\")\n- if env_path:\n- env_name = os.path.basename(env_path)\n- else:\n- env_name = XSH.env.get(\"CONDA_DEFAULT_ENV\", \"\")\n- return env_name\n+ if XSH.env.get(\"VIRTUAL_ENV_DISABLE_PROMPT\"):\n+ return \"\"\n+ virtual_env_prompt = XSH.env.get(\"VIRTUAL_ENV_PROMPT\")\n+ if virtual_env_prompt:\n+ return virtual_env_prompt\n+ found_envname = find_env_name()\n+ return _surround_env_name(found_envname) if found_envname else \"\"\n+\n \[email protected]_cache(maxsize=5)\n+def _determine_env_name(virtual_env: str) -> str:\n+ \"\"\"Use prompt setting from pyvenv.cfg or basename of virtual_env.\n \n-def env_name():\n- \"\"\"Returns the current env_name if it non-empty, surrounded by the\n- ``{env_prefix}`` and ``{env_postfix}`` fields.\n+ Tries to be resilient to subtle changes in whitespace and quoting in the\n+ configuration file format as it adheres to no clear standard.\n \"\"\"\n- env_name = find_env_name()\n- if XSH.env.get(\"VIRTUAL_ENV_DISABLE_PROMPT\") or not env_name:\n- # env name prompt printing disabled, or no environment; just return\n- return\n-\n- venv_prompt = XSH.env.get(\"VIRTUAL_ENV_PROMPT\")\n- if venv_prompt is not None:\n- return venv_prompt\n- else:\n- pf = XSH.shell.prompt_formatter\n- pre = pf._get_field_value(\"env_prefix\")\n- post = pf._get_field_value(\"env_postfix\")\n- return pre + env_name + post\n-\n-\n-def vte_new_tab_cwd():\n+ venv_path = Path(virtual_env)\n+ pyvenv_cfg = venv_path / \"pyvenv.cfg\"\n+ if pyvenv_cfg.is_file():\n+ match = re.search(r\"prompt\\s*=\\s*(.*)\", pyvenv_cfg.read_text())\n+ if match:\n+ return match.group(1).strip().lstrip(\"'\\\"\").rstrip(\"'\\\"\")\n+ return venv_path.name\n+\n+\n+def _surround_env_name(name: str) -> str:\n+ pf = XSH.shell.prompt_formatter\n+ pre = pf._get_field_value(\"env_prefix\")\n+ post = pf._get_field_value(\"env_postfix\")\n+ return f\"{pre}{name}{post}\"\n+\n+\n+def vte_new_tab_cwd() -> None:\n \"\"\"This prints an escape sequence that tells VTE terminals the hostname\n and pwd. This should not be needed in most cases, but sometimes is for\n certain Linux terminals that do not read the PWD from the environment\n", "issue": "make promp.env.env_name venv --prompt aware (set in pyvenv.cfg)\n## xonfig\r\n\r\n<details>\r\n\r\n```\r\n> xonfig\r\n<xonsh-code>:1:0 - xonfig\r\n<xonsh-code>:1:0 + ![xonfig]\r\n+------------------+----------------------+\r\n| xonsh | 0.11.0 |\r\n| Git SHA | adfa60ea |\r\n| Commit Date | Feb 11 14:53:00 2022 |\r\n| Python | 3.9.7 |\r\n| PLY | 3.11 |\r\n| have readline | True |\r\n| prompt toolkit | None |\r\n| shell type | readline |\r\n| history backend | json |\r\n| pygments | 2.11.2 |\r\n| on posix | True |\r\n| on linux | True |\r\n| distro | unknown |\r\n| on wsl | False |\r\n| on darwin | False |\r\n| on windows | False |\r\n| on cygwin | False |\r\n| on msys2 | False |\r\n| is superuser | False |\r\n| default encoding | utf-8 |\r\n| xonsh encoding | utf-8 |\r\n| encoding errors | surrogateescape |\r\n| on jupyter | False |\r\n| jupyter kernel | None |\r\n| xontrib 1 | coreutils |\r\n| xontrib 2 | vox |\r\n| xontrib 3 | voxapi |\r\n| RC file 1 | /home/ob/.xonshrc |\r\n+------------------+----------------------+\r\n```\r\n\r\n</details>\r\n\r\n## Expected Behavior\r\n\r\nWhen activating a venv via `vox activate` the name should be set to the value of `prompt` key in `pyvenv.cfg` if present (file and key) - see https://docs.python.org/3/library/venv.html.\r\n\r\n## Current Behavior\r\n\r\nThe prompt is always set to name of venv directory independent of prompt settings.\r\n\r\n## Steps to Reproduce\r\n\r\n```shell\r\n$ python -m venv --prompt \"MY SPECIAL PROMPT\" .venv\r\n$ cat .venv/pyvenv.cfg | grep prompt\r\nprompt = 'MY SPECIAL PROMPT'\r\nvox activate ./.venv\r\n```\r\n\r\nnew prompt looks like this:\r\n`[17:58:10] (.venv) ob@ob1 ~/oss/xonsh dropthedot|\u2713`\r\n\r\nbut should look like this:\r\n`[17:58:10] (MY SPECIAL PROMPT) ob@ob1 ~/oss/xonsh dropthedot|\u2713`\r\n\r\n## Also add `--prompt` to `vox new`?\r\nIf this is done, maybe a good idea would be to also add `--prompt` to `vox new` to make this possible on creation (also in the interest of symmetry), but it seems like the common pattern for many xonsh users is to have virtualenvs in `~/.virtualenv` with the venv folder name being the same as the project, therefore automatically having a meaningful prompt, so this does not seem to be an urgently wanted feature for most.\r\n\r\nI want to prepare a PR for the \"passive\" part of the functionality, but could also have a stab at adding `--prompt` to `vox new` at a later date. My main itch would be to respect `prompt` from already existing venvs first.\r\n\r\n## For community\r\n\u2b07\ufe0f **Please click the \ud83d\udc4d reaction instead of leaving a `+1` or \ud83d\udc4d comment**\r\n\n", "before_files": [{"content": "\"\"\"Prompt formatter for virtualenv and others\"\"\"\n\nimport os\n\nfrom xonsh.built_ins import XSH\n\n\ndef find_env_name():\n \"\"\"Finds the current environment name from $VIRTUAL_ENV or\n $CONDA_DEFAULT_ENV if that is set.\n \"\"\"\n env_path = XSH.env.get(\"VIRTUAL_ENV\", \"\")\n if env_path:\n env_name = os.path.basename(env_path)\n else:\n env_name = XSH.env.get(\"CONDA_DEFAULT_ENV\", \"\")\n return env_name\n\n\ndef env_name():\n \"\"\"Returns the current env_name if it non-empty, surrounded by the\n ``{env_prefix}`` and ``{env_postfix}`` fields.\n \"\"\"\n env_name = find_env_name()\n if XSH.env.get(\"VIRTUAL_ENV_DISABLE_PROMPT\") or not env_name:\n # env name prompt printing disabled, or no environment; just return\n return\n\n venv_prompt = XSH.env.get(\"VIRTUAL_ENV_PROMPT\")\n if venv_prompt is not None:\n return venv_prompt\n else:\n pf = XSH.shell.prompt_formatter\n pre = pf._get_field_value(\"env_prefix\")\n post = pf._get_field_value(\"env_postfix\")\n return pre + env_name + post\n\n\ndef vte_new_tab_cwd():\n \"\"\"This prints an escape sequence that tells VTE terminals the hostname\n and pwd. This should not be needed in most cases, but sometimes is for\n certain Linux terminals that do not read the PWD from the environment\n on startup. Note that this does not return a string, it simply prints\n and flushes the escape sequence to stdout directly.\n \"\"\"\n env = XSH.env\n t = \"\\033]7;file://{}{}\\007\"\n s = t.format(env.get(\"HOSTNAME\"), env.get(\"PWD\"))\n print(s, end=\"\", flush=True)\n", "path": "xonsh/prompt/env.py"}]}
1,826
977
gh_patches_debug_50236
rasdani/github-patches
git_diff
pex-tool__pex-577
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Release 1.4.8 User facing issues on the docket: + non-zip_safe pex extraction has a race on renaming #557 + Execute pex archive at runtime with -m parameter #547 + OSX's python 2.7.10 interpreter reports a bad local platform, bdists fail to resolve #523 + `pex.resolver.resolve` does the wrong thing when given an `interpreter` and no `platform` #511 + [Errno 13] Permission denied when executing with bdist_pex #570 </issue> <code> [start of pex/version.py] 1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 4 __version__ = '1.4.7' 5 6 # Versions 34.0.0 through 35.0.2 (last pre-36.0.0) de-vendored dependencies which causes problems 7 # for pex code so we exclude that range. 8 SETUPTOOLS_REQUIREMENT = 'setuptools>=20.3,<41,!=34.*,!=35.*' 9 10 WHEEL_REQUIREMENT = 'wheel>=0.26.0,<0.32' 11 [end of pex/version.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pex/version.py b/pex/version.py --- a/pex/version.py +++ b/pex/version.py @@ -1,7 +1,7 @@ # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). -__version__ = '1.4.7' +__version__ = '1.4.8' # Versions 34.0.0 through 35.0.2 (last pre-36.0.0) de-vendored dependencies which causes problems # for pex code so we exclude that range.
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,7 +1,7 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = '1.4.7'\n+__version__ = '1.4.8'\n \n # Versions 34.0.0 through 35.0.2 (last pre-36.0.0) de-vendored dependencies which causes problems\n # for pex code so we exclude that range.\n", "issue": "Release 1.4.8\nUser facing issues on the docket:\r\n+ non-zip_safe pex extraction has a race on renaming #557\r\n+ Execute pex archive at runtime with -m parameter #547\r\n+ OSX's python 2.7.10 interpreter reports a bad local platform, bdists fail to resolve #523 \r\n+ `pex.resolver.resolve` does the wrong thing when given an `interpreter` and no `platform` #511\r\n+ [Errno 13] Permission denied when executing with bdist_pex #570\r\n\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '1.4.7'\n\n# Versions 34.0.0 through 35.0.2 (last pre-36.0.0) de-vendored dependencies which causes problems\n# for pex code so we exclude that range.\nSETUPTOOLS_REQUIREMENT = 'setuptools>=20.3,<41,!=34.*,!=35.*'\n\nWHEEL_REQUIREMENT = 'wheel>=0.26.0,<0.32'\n", "path": "pex/version.py"}]}
817
143
gh_patches_debug_35556
rasdani/github-patches
git_diff
streamlink__streamlink-5279
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> plugins.nbcnews: not live ### Checklist - [X] This is a plugin issue and not a different kind of issue - [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink) - [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22) - [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master) ### Streamlink version Latest build from the master branch ### Description Every time it start the same video but on the site it is live. Something was changed on the site few days ago. ### Debug log ```text [cli][debug] OS: Linux-6.1.0-7-amd64-x86_64-with-glibc2.36 [cli][debug] Python: 3.11.2 [cli][debug] Streamlink: 5.3.1+84.g7978e4ab [cli][debug] Dependencies: [cli][debug] certifi: 2022.9.24 [cli][debug] isodate: 0.6.1 [cli][debug] lxml: 4.9.2 [cli][debug] pycountry: 22.3.5 [cli][debug] pycryptodome: 3.16.0 [cli][debug] PySocks: 1.7.1 [cli][debug] requests: 2.28.1 [cli][debug] urllib3: 1.26.12 [cli][debug] websocket-client: 1.2.3 [cli][debug] esprima: 4.0.1 [cli][debug] importlib-metadata: 4.12.0 [cli][debug] Arguments: [cli][debug] url=https://www.nbcnews.com/now/ [cli][debug] --loglevel=debug [cli][debug] --player=mpv [cli][debug] --default-stream=['best'] [cli][info] Found matching plugin nbcnews for URL https://www.nbcnews.com/now/ [plugins.nbcnews][debug] API ID: 2007524 [utils.l10n][debug] Language code: ru_RU [cli][info] Available streams: 144p_alt (worst), 144p, 216p_alt, 216p, 288p_alt, 288p, 360p_alt, 360p, 504p_alt, 504p, 576p_alt, 576p, 720p_alt, 720p, 1080p_alt, 1080p (best) [cli][info] Opening stream: 1080p (hls) [cli][info] Starting player: mpv [stream.hls][debug] Reloading playlist [cli][debug] Pre-buffering 8192 bytes [stream.hls][debug] First Sequence: 1786423; Last Sequence: 1786722 [stream.hls][debug] Start offset: 0; Duration: None; Start Sequence: 1786423; End Sequence: 1786722 [stream.hls][debug] Adding segment 1786423 to queue ``` </issue> <code> [start of src/streamlink/plugins/nbcnews.py] 1 """ 2 $description 24-hour world, US and local news channel, based in the United States of America. 3 $url nbcnews.com 4 $type live 5 """ 6 7 import logging 8 import re 9 10 from streamlink.plugin import Plugin, pluginmatcher 11 from streamlink.plugin.api import validate 12 from streamlink.stream.hls import HLSStream 13 14 15 log = logging.getLogger(__name__) 16 17 18 @pluginmatcher(re.compile( 19 r"https?://(?:www\.)?nbcnews\.com/now", 20 )) 21 class NBCNews(Plugin): 22 URL_API = "https://api-leap.nbcsports.com/feeds/assets/{}?application=NBCNews&format=nbc-player&platform=desktop" 23 URL_TOKEN = "https://tokens.playmakerservices.com/" 24 25 title = "NBC News Now" 26 27 def _get_streams(self): 28 self.id = self.session.http.get( 29 self.url, 30 schema=validate.Schema( 31 validate.parse_html(), 32 validate.xml_xpath_string(".//script[@type='application/ld+json'][1]/text()"), 33 validate.none_or_all( 34 validate.parse_json(), 35 {"embedUrl": validate.url()}, 36 validate.get("embedUrl"), 37 validate.transform(lambda embed_url: embed_url.split("/")[-1]), 38 ), 39 ), 40 ) 41 if self.id is None: 42 return 43 log.debug(f"API ID: {self.id}") 44 45 stream = self.session.http.get( 46 self.URL_API.format(self.id), 47 schema=validate.Schema( 48 validate.parse_json(), 49 { 50 "videoSources": [{ 51 "cdnSources": { 52 "primary": [{ 53 "sourceUrl": validate.url(path=validate.endswith(".m3u8")), 54 }], 55 }, 56 }], 57 }, 58 validate.get(("videoSources", 0, "cdnSources", "primary", 0, "sourceUrl")), 59 ), 60 ) 61 62 url = self.session.http.post( 63 self.URL_TOKEN, 64 json={ 65 "requestorId": "nbcnews", 66 "pid": self.id, 67 "application": "NBCSports", 68 "version": "v1", 69 "platform": "desktop", 70 "token": "", 71 "resourceId": "", 72 "inPath": "false", 73 "authenticationType": "unauth", 74 "cdn": "akamai", 75 "url": stream, 76 }, 77 schema=validate.Schema( 78 validate.parse_json(), 79 { 80 "akamai": [{ 81 "tokenizedUrl": validate.url(), 82 }], 83 }, 84 validate.get(("akamai", 0, "tokenizedUrl")), 85 ), 86 ) 87 return HLSStream.parse_variant_playlist(self.session, url) 88 89 90 __plugin__ = NBCNews 91 [end of src/streamlink/plugins/nbcnews.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/src/streamlink/plugins/nbcnews.py b/src/streamlink/plugins/nbcnews.py deleted file mode 100644 --- a/src/streamlink/plugins/nbcnews.py +++ /dev/null @@ -1,90 +0,0 @@ -""" -$description 24-hour world, US and local news channel, based in the United States of America. -$url nbcnews.com -$type live -""" - -import logging -import re - -from streamlink.plugin import Plugin, pluginmatcher -from streamlink.plugin.api import validate -from streamlink.stream.hls import HLSStream - - -log = logging.getLogger(__name__) - - -@pluginmatcher(re.compile( - r"https?://(?:www\.)?nbcnews\.com/now", -)) -class NBCNews(Plugin): - URL_API = "https://api-leap.nbcsports.com/feeds/assets/{}?application=NBCNews&format=nbc-player&platform=desktop" - URL_TOKEN = "https://tokens.playmakerservices.com/" - - title = "NBC News Now" - - def _get_streams(self): - self.id = self.session.http.get( - self.url, - schema=validate.Schema( - validate.parse_html(), - validate.xml_xpath_string(".//script[@type='application/ld+json'][1]/text()"), - validate.none_or_all( - validate.parse_json(), - {"embedUrl": validate.url()}, - validate.get("embedUrl"), - validate.transform(lambda embed_url: embed_url.split("/")[-1]), - ), - ), - ) - if self.id is None: - return - log.debug(f"API ID: {self.id}") - - stream = self.session.http.get( - self.URL_API.format(self.id), - schema=validate.Schema( - validate.parse_json(), - { - "videoSources": [{ - "cdnSources": { - "primary": [{ - "sourceUrl": validate.url(path=validate.endswith(".m3u8")), - }], - }, - }], - }, - validate.get(("videoSources", 0, "cdnSources", "primary", 0, "sourceUrl")), - ), - ) - - url = self.session.http.post( - self.URL_TOKEN, - json={ - "requestorId": "nbcnews", - "pid": self.id, - "application": "NBCSports", - "version": "v1", - "platform": "desktop", - "token": "", - "resourceId": "", - "inPath": "false", - "authenticationType": "unauth", - "cdn": "akamai", - "url": stream, - }, - schema=validate.Schema( - validate.parse_json(), - { - "akamai": [{ - "tokenizedUrl": validate.url(), - }], - }, - validate.get(("akamai", 0, "tokenizedUrl")), - ), - ) - return HLSStream.parse_variant_playlist(self.session, url) - - -__plugin__ = NBCNews
{"golden_diff": "diff --git a/src/streamlink/plugins/nbcnews.py b/src/streamlink/plugins/nbcnews.py\ndeleted file mode 100644\n--- a/src/streamlink/plugins/nbcnews.py\n+++ /dev/null\n@@ -1,90 +0,0 @@\n-\"\"\"\n-$description 24-hour world, US and local news channel, based in the United States of America.\n-$url nbcnews.com\n-$type live\n-\"\"\"\n-\n-import logging\n-import re\n-\n-from streamlink.plugin import Plugin, pluginmatcher\n-from streamlink.plugin.api import validate\n-from streamlink.stream.hls import HLSStream\n-\n-\n-log = logging.getLogger(__name__)\n-\n-\n-@pluginmatcher(re.compile(\n- r\"https?://(?:www\\.)?nbcnews\\.com/now\",\n-))\n-class NBCNews(Plugin):\n- URL_API = \"https://api-leap.nbcsports.com/feeds/assets/{}?application=NBCNews&format=nbc-player&platform=desktop\"\n- URL_TOKEN = \"https://tokens.playmakerservices.com/\"\n-\n- title = \"NBC News Now\"\n-\n- def _get_streams(self):\n- self.id = self.session.http.get(\n- self.url,\n- schema=validate.Schema(\n- validate.parse_html(),\n- validate.xml_xpath_string(\".//script[@type='application/ld+json'][1]/text()\"),\n- validate.none_or_all(\n- validate.parse_json(),\n- {\"embedUrl\": validate.url()},\n- validate.get(\"embedUrl\"),\n- validate.transform(lambda embed_url: embed_url.split(\"/\")[-1]),\n- ),\n- ),\n- )\n- if self.id is None:\n- return\n- log.debug(f\"API ID: {self.id}\")\n-\n- stream = self.session.http.get(\n- self.URL_API.format(self.id),\n- schema=validate.Schema(\n- validate.parse_json(),\n- {\n- \"videoSources\": [{\n- \"cdnSources\": {\n- \"primary\": [{\n- \"sourceUrl\": validate.url(path=validate.endswith(\".m3u8\")),\n- }],\n- },\n- }],\n- },\n- validate.get((\"videoSources\", 0, \"cdnSources\", \"primary\", 0, \"sourceUrl\")),\n- ),\n- )\n-\n- url = self.session.http.post(\n- self.URL_TOKEN,\n- json={\n- \"requestorId\": \"nbcnews\",\n- \"pid\": self.id,\n- \"application\": \"NBCSports\",\n- \"version\": \"v1\",\n- \"platform\": \"desktop\",\n- \"token\": \"\",\n- \"resourceId\": \"\",\n- \"inPath\": \"false\",\n- \"authenticationType\": \"unauth\",\n- \"cdn\": \"akamai\",\n- \"url\": stream,\n- },\n- schema=validate.Schema(\n- validate.parse_json(),\n- {\n- \"akamai\": [{\n- \"tokenizedUrl\": validate.url(),\n- }],\n- },\n- validate.get((\"akamai\", 0, \"tokenizedUrl\")),\n- ),\n- )\n- return HLSStream.parse_variant_playlist(self.session, url)\n-\n-\n-__plugin__ = NBCNews\n", "issue": "plugins.nbcnews: not live\n### Checklist\n\n- [X] This is a plugin issue and not a different kind of issue\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\n- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)\n- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\n\n### Streamlink version\n\nLatest build from the master branch\n\n### Description\n\nEvery time it start the same video but on the site it is live. Something was changed on the site few days ago.\n\n### Debug log\n\n```text\n[cli][debug] OS: Linux-6.1.0-7-amd64-x86_64-with-glibc2.36\r\n[cli][debug] Python: 3.11.2\r\n[cli][debug] Streamlink: 5.3.1+84.g7978e4ab\r\n[cli][debug] Dependencies:\r\n[cli][debug] certifi: 2022.9.24\r\n[cli][debug] isodate: 0.6.1\r\n[cli][debug] lxml: 4.9.2\r\n[cli][debug] pycountry: 22.3.5\r\n[cli][debug] pycryptodome: 3.16.0\r\n[cli][debug] PySocks: 1.7.1\r\n[cli][debug] requests: 2.28.1\r\n[cli][debug] urllib3: 1.26.12\r\n[cli][debug] websocket-client: 1.2.3\r\n[cli][debug] esprima: 4.0.1\r\n[cli][debug] importlib-metadata: 4.12.0\r\n[cli][debug] Arguments:\r\n[cli][debug] url=https://www.nbcnews.com/now/\r\n[cli][debug] --loglevel=debug\r\n[cli][debug] --player=mpv\r\n[cli][debug] --default-stream=['best']\r\n[cli][info] Found matching plugin nbcnews for URL https://www.nbcnews.com/now/\r\n[plugins.nbcnews][debug] API ID: 2007524\r\n[utils.l10n][debug] Language code: ru_RU\r\n[cli][info] Available streams: 144p_alt (worst), 144p, 216p_alt, 216p, 288p_alt, 288p, 360p_alt, 360p, 504p_alt, 504p, 576p_alt, 576p, 720p_alt, 720p, 1080p_alt, 1080p (best)\r\n[cli][info] Opening stream: 1080p (hls)\r\n[cli][info] Starting player: mpv\r\n[stream.hls][debug] Reloading playlist\r\n[cli][debug] Pre-buffering 8192 bytes\r\n[stream.hls][debug] First Sequence: 1786423; Last Sequence: 1786722\r\n[stream.hls][debug] Start offset: 0; Duration: None; Start Sequence: 1786423; End Sequence: 1786722\r\n[stream.hls][debug] Adding segment 1786423 to queue\n```\n\n", "before_files": [{"content": "\"\"\"\n$description 24-hour world, US and local news channel, based in the United States of America.\n$url nbcnews.com\n$type live\n\"\"\"\n\nimport logging\nimport re\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.hls import HLSStream\n\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(\n r\"https?://(?:www\\.)?nbcnews\\.com/now\",\n))\nclass NBCNews(Plugin):\n URL_API = \"https://api-leap.nbcsports.com/feeds/assets/{}?application=NBCNews&format=nbc-player&platform=desktop\"\n URL_TOKEN = \"https://tokens.playmakerservices.com/\"\n\n title = \"NBC News Now\"\n\n def _get_streams(self):\n self.id = self.session.http.get(\n self.url,\n schema=validate.Schema(\n validate.parse_html(),\n validate.xml_xpath_string(\".//script[@type='application/ld+json'][1]/text()\"),\n validate.none_or_all(\n validate.parse_json(),\n {\"embedUrl\": validate.url()},\n validate.get(\"embedUrl\"),\n validate.transform(lambda embed_url: embed_url.split(\"/\")[-1]),\n ),\n ),\n )\n if self.id is None:\n return\n log.debug(f\"API ID: {self.id}\")\n\n stream = self.session.http.get(\n self.URL_API.format(self.id),\n schema=validate.Schema(\n validate.parse_json(),\n {\n \"videoSources\": [{\n \"cdnSources\": {\n \"primary\": [{\n \"sourceUrl\": validate.url(path=validate.endswith(\".m3u8\")),\n }],\n },\n }],\n },\n validate.get((\"videoSources\", 0, \"cdnSources\", \"primary\", 0, \"sourceUrl\")),\n ),\n )\n\n url = self.session.http.post(\n self.URL_TOKEN,\n json={\n \"requestorId\": \"nbcnews\",\n \"pid\": self.id,\n \"application\": \"NBCSports\",\n \"version\": \"v1\",\n \"platform\": \"desktop\",\n \"token\": \"\",\n \"resourceId\": \"\",\n \"inPath\": \"false\",\n \"authenticationType\": \"unauth\",\n \"cdn\": \"akamai\",\n \"url\": stream,\n },\n schema=validate.Schema(\n validate.parse_json(),\n {\n \"akamai\": [{\n \"tokenizedUrl\": validate.url(),\n }],\n },\n validate.get((\"akamai\", 0, \"tokenizedUrl\")),\n ),\n )\n return HLSStream.parse_variant_playlist(self.session, url)\n\n\n__plugin__ = NBCNews\n", "path": "src/streamlink/plugins/nbcnews.py"}]}
2,132
696
gh_patches_debug_21241
rasdani/github-patches
git_diff
liqd__a4-meinberlin-2280
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> events not editable for groupmembers group member CAN edit a project but CANNOT an event related to the project. 403 error. </issue> <code> [start of meinberlin/apps/offlineevents/rules.py] 1 import rules 2 from rules.predicates import is_superuser 3 4 from adhocracy4.modules import predicates as module_predicates 5 from adhocracy4.organisations.predicates import is_initiator 6 from adhocracy4.projects.predicates import is_moderator 7 8 rules.add_perm( 9 'meinberlin_offlineevents.list_offlineevent', 10 is_superuser | is_initiator | is_moderator 11 ) 12 13 rules.add_perm( 14 'meinberlin_offlineevents.view_offlineevent', 15 module_predicates.is_allowed_view_item 16 ) 17 18 rules.add_perm( 19 'meinberlin_offlineevents.add_offlineevent', 20 is_superuser | is_initiator | is_moderator 21 ) 22 23 rules.add_perm( 24 'meinberlin_offlineevents.change_offlineevent', 25 is_superuser | is_initiator | is_moderator 26 ) 27 [end of meinberlin/apps/offlineevents/rules.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/meinberlin/apps/offlineevents/rules.py b/meinberlin/apps/offlineevents/rules.py --- a/meinberlin/apps/offlineevents/rules.py +++ b/meinberlin/apps/offlineevents/rules.py @@ -4,10 +4,11 @@ from adhocracy4.modules import predicates as module_predicates from adhocracy4.organisations.predicates import is_initiator from adhocracy4.projects.predicates import is_moderator +from adhocracy4.projects.predicates import is_prj_group_member rules.add_perm( 'meinberlin_offlineevents.list_offlineevent', - is_superuser | is_initiator | is_moderator + is_superuser | is_initiator | is_moderator | is_prj_group_member ) rules.add_perm( @@ -17,10 +18,10 @@ rules.add_perm( 'meinberlin_offlineevents.add_offlineevent', - is_superuser | is_initiator | is_moderator + is_superuser | is_initiator | is_moderator | is_prj_group_member ) rules.add_perm( 'meinberlin_offlineevents.change_offlineevent', - is_superuser | is_initiator | is_moderator + is_superuser | is_initiator | is_moderator | is_prj_group_member )
{"golden_diff": "diff --git a/meinberlin/apps/offlineevents/rules.py b/meinberlin/apps/offlineevents/rules.py\n--- a/meinberlin/apps/offlineevents/rules.py\n+++ b/meinberlin/apps/offlineevents/rules.py\n@@ -4,10 +4,11 @@\n from adhocracy4.modules import predicates as module_predicates\n from adhocracy4.organisations.predicates import is_initiator\n from adhocracy4.projects.predicates import is_moderator\n+from adhocracy4.projects.predicates import is_prj_group_member\n \n rules.add_perm(\n 'meinberlin_offlineevents.list_offlineevent',\n- is_superuser | is_initiator | is_moderator\n+ is_superuser | is_initiator | is_moderator | is_prj_group_member\n )\n \n rules.add_perm(\n@@ -17,10 +18,10 @@\n \n rules.add_perm(\n 'meinberlin_offlineevents.add_offlineevent',\n- is_superuser | is_initiator | is_moderator\n+ is_superuser | is_initiator | is_moderator | is_prj_group_member\n )\n \n rules.add_perm(\n 'meinberlin_offlineevents.change_offlineevent',\n- is_superuser | is_initiator | is_moderator\n+ is_superuser | is_initiator | is_moderator | is_prj_group_member\n )\n", "issue": "events not editable for groupmembers\ngroup member CAN edit a project but CANNOT an event related to the project. 403 error.\n", "before_files": [{"content": "import rules\nfrom rules.predicates import is_superuser\n\nfrom adhocracy4.modules import predicates as module_predicates\nfrom adhocracy4.organisations.predicates import is_initiator\nfrom adhocracy4.projects.predicates import is_moderator\n\nrules.add_perm(\n 'meinberlin_offlineevents.list_offlineevent',\n is_superuser | is_initiator | is_moderator\n)\n\nrules.add_perm(\n 'meinberlin_offlineevents.view_offlineevent',\n module_predicates.is_allowed_view_item\n)\n\nrules.add_perm(\n 'meinberlin_offlineevents.add_offlineevent',\n is_superuser | is_initiator | is_moderator\n)\n\nrules.add_perm(\n 'meinberlin_offlineevents.change_offlineevent',\n is_superuser | is_initiator | is_moderator\n)\n", "path": "meinberlin/apps/offlineevents/rules.py"}]}
798
301
gh_patches_debug_1221
rasdani/github-patches
git_diff
google__mobly-524
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Fix pytest warnings in Python 3 `pytests` currently produces the following warnings: mobly/mobly/test_runner.py:181: PytestWarning: cannot collect test class 'TestRunner' because it has a __init__ constructor class TestRunner(object): mobly/tests/mobly/base_instrumentation_test_test.py:179: DeprecationWarning: Please use assertEqual instead. expected_completed_and_passed) mobly/tests/mobly/base_instrumentation_test_test.py:179: DeprecationWarning: Please use assertEqual instead. expected_completed_and_passed) mobly/tests/mobly/base_instrumentation_test_test.py:192: DeprecationWarning: Please use assertEqual instead. self.assertEquals(actual_test.begin_time, expected_begin_time) mobly/tests/mobly/base_instrumentation_test_test.py:193: DeprecationWarning: Please use assertEqual instead. self.assertEquals(actual_test.end_time, expected_end_time) mobly/tests/mobly/base_instrumentation_test_test.py:179: DeprecationWarning: Please use assertEqual instead. expected_completed_and_passed) mobly/tests/mobly/base_instrumentation_test_test.py:192: DeprecationWarning: Please use assertEqual instead. self.assertEquals(actual_test.begin_time, expected_begin_time) mobly/tests/mobly/base_instrumentation_test_test.py:193: DeprecationWarning: Please use assertEqual instead. self.assertEquals(actual_test.end_time, expected_end_time) mobly/tests/mobly/base_instrumentation_test_test.py:192: DeprecationWarning: Please use assertEqual instead. self.assertEquals(actual_test.begin_time, expected_begin_time) mobly/tests/mobly/base_instrumentation_test_test.py:193: DeprecationWarning: Please use assertEqual instead. self.assertEquals(actual_test.end_time, expected_end_time) mobly/tests/mobly/base_instrumentation_test_test.py:179: DeprecationWarning: Please use assertEqual instead. expected_completed_and_passed) mobly/tests/mobly/base_instrumentation_test_test.py:179: DeprecationWarning: Please use assertEqual instead. expected_completed_and_passed) mobly/tests/mobly/base_instrumentation_test_test.py:179: DeprecationWarning: Please use assertEqual instead. expected_completed_and_passed) mobly/tests/mobly/base_instrumentation_test_test.py:192: DeprecationWarning: Please use assertEqual instead. self.assertEquals(actual_test.begin_time, expected_begin_time) mobly/tests/mobly/base_instrumentation_test_test.py:193: DeprecationWarning: Please use assertEqual instead. self.assertEquals(actual_test.end_time, expected_end_time) mobly/tests/mobly/base_instrumentation_test_test.py:192: DeprecationWarning: Please use assertEqual instead. self.assertEquals(actual_test.begin_time, expected_begin_time) mobly/tests/mobly/base_instrumentation_test_test.py:193: DeprecationWarning: Please use assertEqual instead. self.assertEquals(actual_test.end_time, expected_end_time) mobly/tests/mobly/base_instrumentation_test_test.py:179: DeprecationWarning: Please use assertEqual instead. expected_completed_and_passed) mobly/tests/mobly/base_instrumentation_test_test.py:179: DeprecationWarning: Please use assertEqual instead. expected_completed_and_passed) mobly/tests/mobly/base_instrumentation_test_test.py:192: DeprecationWarning: Please use assertEqual instead. self.assertEquals(actual_test.begin_time, expected_begin_time) mobly/tests/mobly/base_instrumentation_test_test.py:193: DeprecationWarning: Please use assertEqual instead. self.assertEquals(actual_test.end_time, expected_end_time) mobly/tests/mobly/base_instrumentation_test_test.py:179: DeprecationWarning: Please use assertEqual instead. expected_completed_and_passed) mobly/tests/mobly/base_instrumentation_test_test.py:179: DeprecationWarning: Please use assertEqual instead. expected_completed_and_passed) mobly/tests/mobly/output_test.py:171: DeprecationWarning: Please use assertNotEqual instead. self.assertNotEquals(output_dir1, output_dir2) mobly/tests/mobly/output_test.py:205: DeprecationWarning: Please use assertNotEqual instead. self.assertNotEquals(output_dir1, output_dir2) -- Docs: https://docs.pytest.org/en/latest/warnings.html </issue> <code> [start of setup.py] 1 # Copyright 2016 Google Inc. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import platform 16 import setuptools 17 from setuptools.command import test 18 import sys 19 20 install_requires = [ 21 'future', 'portpicker', 'psutil>=5.4.4', 'pyserial', 'pyyaml', 22 'timeout_decorator' 23 ] 24 25 if sys.version_info < (3, ): 26 install_requires.extend([ 27 'enum34', 28 # "futures" is needed for py2 compatibility and it only works in 2.7 29 'futures', 30 ]) 31 32 if platform.system() == 'Windows': 33 install_requires.append('pywin32') 34 35 36 class PyTest(test.test): 37 """Class used to execute unit tests using PyTest. This allows us to execute 38 unit tests without having to install the package. 39 """ 40 41 def finalize_options(self): 42 test.test.finalize_options(self) 43 self.test_args = ['-x', "tests"] 44 self.test_suite = True 45 46 def run_tests(self): 47 import pytest 48 errno = pytest.main(self.test_args) 49 sys.exit(errno) 50 51 52 def main(): 53 setuptools.setup( 54 name='mobly', 55 version='1.7.5', 56 maintainer='Ang Li', 57 maintainer_email='[email protected]', 58 description='Automation framework for special end-to-end test cases', 59 license='Apache2.0', 60 url='https://github.com/google/mobly', 61 download_url='https://github.com/google/mobly/tarball/1.7.5', 62 packages=setuptools.find_packages(), 63 include_package_data=False, 64 scripts=['tools/sl4a_shell.py', 'tools/snippet_shell.py'], 65 tests_require=[ 66 'mock', 67 'pytest', 68 'pytz', 69 ], 70 install_requires=install_requires, 71 cmdclass={'test': PyTest}, 72 ) 73 74 75 if __name__ == '__main__': 76 main() 77 [end of setup.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -40,7 +40,7 @@ def finalize_options(self): test.test.finalize_options(self) - self.test_args = ['-x', "tests"] + self.test_args = ['-x', "tests/mobly"] self.test_suite = True def run_tests(self):
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -40,7 +40,7 @@\n \n def finalize_options(self):\n test.test.finalize_options(self)\n- self.test_args = ['-x', \"tests\"]\n+ self.test_args = ['-x', \"tests/mobly\"]\n self.test_suite = True\n \n def run_tests(self):\n", "issue": "Fix pytest warnings in Python 3\n`pytests` currently produces the following warnings:\r\n\r\n mobly/mobly/test_runner.py:181: PytestWarning: cannot collect test class 'TestRunner' because it has a __init__ constructor\r\n class TestRunner(object):\r\n\r\n mobly/tests/mobly/base_instrumentation_test_test.py:179: DeprecationWarning: Please use assertEqual instead.\r\n expected_completed_and_passed)\r\n\r\n mobly/tests/mobly/base_instrumentation_test_test.py:179: DeprecationWarning: Please use assertEqual instead.\r\n expected_completed_and_passed)\r\n\r\n mobly/tests/mobly/base_instrumentation_test_test.py:192: DeprecationWarning: Please use assertEqual instead.\r\n self.assertEquals(actual_test.begin_time, expected_begin_time)\r\n mobly/tests/mobly/base_instrumentation_test_test.py:193: DeprecationWarning: Please use assertEqual instead.\r\n self.assertEquals(actual_test.end_time, expected_end_time)\r\n\r\n mobly/tests/mobly/base_instrumentation_test_test.py:179: DeprecationWarning: Please use assertEqual instead.\r\n expected_completed_and_passed)\r\n mobly/tests/mobly/base_instrumentation_test_test.py:192: DeprecationWarning: Please use assertEqual instead.\r\n self.assertEquals(actual_test.begin_time, expected_begin_time)\r\n mobly/tests/mobly/base_instrumentation_test_test.py:193: DeprecationWarning: Please use assertEqual instead.\r\n self.assertEquals(actual_test.end_time, expected_end_time)\r\n mobly/tests/mobly/base_instrumentation_test_test.py:192: DeprecationWarning: Please use assertEqual instead.\r\n self.assertEquals(actual_test.begin_time, expected_begin_time)\r\n mobly/tests/mobly/base_instrumentation_test_test.py:193: DeprecationWarning: Please use assertEqual instead.\r\n self.assertEquals(actual_test.end_time, expected_end_time)\r\n\r\n mobly/tests/mobly/base_instrumentation_test_test.py:179: DeprecationWarning: Please use assertEqual instead.\r\n expected_completed_and_passed)\r\n\r\n mobly/tests/mobly/base_instrumentation_test_test.py:179: DeprecationWarning: Please use assertEqual instead.\r\n expected_completed_and_passed)\r\n\r\n mobly/tests/mobly/base_instrumentation_test_test.py:179: DeprecationWarning: Please use assertEqual instead.\r\n expected_completed_and_passed)\r\n mobly/tests/mobly/base_instrumentation_test_test.py:192: DeprecationWarning: Please use assertEqual instead.\r\n self.assertEquals(actual_test.begin_time, expected_begin_time)\r\n mobly/tests/mobly/base_instrumentation_test_test.py:193: DeprecationWarning: Please use assertEqual instead.\r\n self.assertEquals(actual_test.end_time, expected_end_time)\r\n mobly/tests/mobly/base_instrumentation_test_test.py:192: DeprecationWarning: Please use assertEqual instead.\r\n self.assertEquals(actual_test.begin_time, expected_begin_time)\r\n mobly/tests/mobly/base_instrumentation_test_test.py:193: DeprecationWarning: Please use assertEqual instead.\r\n self.assertEquals(actual_test.end_time, expected_end_time)\r\n\r\n mobly/tests/mobly/base_instrumentation_test_test.py:179: DeprecationWarning: Please use assertEqual instead.\r\n expected_completed_and_passed)\r\n\r\n mobly/tests/mobly/base_instrumentation_test_test.py:179: DeprecationWarning: Please use assertEqual instead.\r\n expected_completed_and_passed)\r\n mobly/tests/mobly/base_instrumentation_test_test.py:192: DeprecationWarning: Please use assertEqual instead.\r\n self.assertEquals(actual_test.begin_time, expected_begin_time)\r\n mobly/tests/mobly/base_instrumentation_test_test.py:193: DeprecationWarning: Please use assertEqual instead.\r\n self.assertEquals(actual_test.end_time, expected_end_time)\r\n\r\n mobly/tests/mobly/base_instrumentation_test_test.py:179: DeprecationWarning: Please use assertEqual instead.\r\n expected_completed_and_passed)\r\n\r\n mobly/tests/mobly/base_instrumentation_test_test.py:179: DeprecationWarning: Please use assertEqual instead.\r\n expected_completed_and_passed)\r\n\r\n mobly/tests/mobly/output_test.py:171: DeprecationWarning: Please use assertNotEqual instead.\r\n self.assertNotEquals(output_dir1, output_dir2)\r\n\r\n mobly/tests/mobly/output_test.py:205: DeprecationWarning: Please use assertNotEqual instead.\r\n self.assertNotEquals(output_dir1, output_dir2)\r\n\r\n -- Docs: https://docs.pytest.org/en/latest/warnings.html\n", "before_files": [{"content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport platform\nimport setuptools\nfrom setuptools.command import test\nimport sys\n\ninstall_requires = [\n 'future', 'portpicker', 'psutil>=5.4.4', 'pyserial', 'pyyaml',\n 'timeout_decorator'\n]\n\nif sys.version_info < (3, ):\n install_requires.extend([\n 'enum34',\n # \"futures\" is needed for py2 compatibility and it only works in 2.7\n 'futures',\n ])\n\nif platform.system() == 'Windows':\n install_requires.append('pywin32')\n\n\nclass PyTest(test.test):\n \"\"\"Class used to execute unit tests using PyTest. This allows us to execute\n unit tests without having to install the package.\n \"\"\"\n\n def finalize_options(self):\n test.test.finalize_options(self)\n self.test_args = ['-x', \"tests\"]\n self.test_suite = True\n\n def run_tests(self):\n import pytest\n errno = pytest.main(self.test_args)\n sys.exit(errno)\n\n\ndef main():\n setuptools.setup(\n name='mobly',\n version='1.7.5',\n maintainer='Ang Li',\n maintainer_email='[email protected]',\n description='Automation framework for special end-to-end test cases',\n license='Apache2.0',\n url='https://github.com/google/mobly',\n download_url='https://github.com/google/mobly/tarball/1.7.5',\n packages=setuptools.find_packages(),\n include_package_data=False,\n scripts=['tools/sl4a_shell.py', 'tools/snippet_shell.py'],\n tests_require=[\n 'mock',\n 'pytest',\n 'pytz',\n ],\n install_requires=install_requires,\n cmdclass={'test': PyTest},\n )\n\n\nif __name__ == '__main__':\n main()\n", "path": "setup.py"}]}
2,220
87
gh_patches_debug_26726
rasdani/github-patches
git_diff
ivy-llc__ivy-15478
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> loggamma </issue> <code> [start of ivy/functional/frontends/jax/random.py] 1 # local 2 import ivy 3 from ivy.func_wrapper import with_unsupported_dtypes 4 from ivy.functional.frontends.jax.func_wrapper import ( 5 to_ivy_arrays_and_back, 6 handle_jax_dtype, 7 ) 8 9 10 @to_ivy_arrays_and_back 11 def PRNGKey(seed): 12 return ivy.array([0, seed % 4294967295 - (seed // 4294967295)], dtype=ivy.int64) 13 14 15 @handle_jax_dtype 16 @to_ivy_arrays_and_back 17 def uniform(key, shape=(), dtype=None, minval=0.0, maxval=1.0): 18 return ivy.random_uniform( 19 low=minval, high=maxval, shape=shape, dtype=dtype, seed=ivy.to_scalar(key[1]) 20 ) 21 22 23 @handle_jax_dtype 24 @to_ivy_arrays_and_back 25 def normal(key, shape=(), dtype=None): 26 return ivy.random_normal(shape=shape, dtype=dtype, seed=ivy.to_scalar(key[1])) 27 28 29 def _get_seed(key): 30 key1, key2 = int(key[0]), int(key[1]) 31 return ivy.to_scalar(int("".join(map(str, [key1, key2])))) 32 33 34 @handle_jax_dtype 35 @to_ivy_arrays_and_back 36 @with_unsupported_dtypes( 37 { 38 "0.3.14 and below": ( 39 "float16", 40 "bfloat16", 41 ) 42 }, 43 "jax", 44 ) 45 def beta(key, a, b, shape=None, dtype=None): 46 seed = _get_seed(key) 47 return ivy.beta(a, b, shape=shape, dtype=dtype, seed=seed) 48 49 50 @handle_jax_dtype 51 @to_ivy_arrays_and_back 52 @with_unsupported_dtypes( 53 { 54 "0.3.14 and below": ( 55 "float16", 56 "bfloat16", 57 ) 58 }, 59 "jax", 60 ) 61 def dirichlet(key, alpha, shape=None, dtype="float32"): 62 seed = _get_seed(key) 63 alpha = ivy.astype(alpha, dtype) 64 return ivy.dirichlet(alpha, size=shape, dtype=dtype, seed=seed) 65 66 67 @handle_jax_dtype 68 @to_ivy_arrays_and_back 69 def cauchy(key, shape=(), dtype="float64"): 70 seed = _get_seed(key) 71 u = ivy.random_uniform(low=0.0, high=1.0, shape=shape, dtype=dtype, seed=seed) 72 return ivy.tan(ivy.pi * (u - 0.5)) 73 74 75 @handle_jax_dtype 76 @to_ivy_arrays_and_back 77 @with_unsupported_dtypes( 78 {"0.3.14 and below": ("unsigned", "int8", "int16")}, 79 "jax", 80 ) 81 def poisson(key, lam, shape=None, dtype=None): 82 seed = _get_seed(key) 83 return ivy.poisson(lam, shape=shape, dtype=dtype, seed=seed) 84 85 86 @handle_jax_dtype 87 @to_ivy_arrays_and_back 88 @with_unsupported_dtypes( 89 { 90 "0.3.14 and below": ( 91 "float16", 92 "bfloat16", 93 ) 94 }, 95 "jax", 96 ) 97 def gamma(key, a, shape=None, dtype="float64"): 98 seed = _get_seed(key) 99 return ivy.gamma(a, 1.0, shape=shape, dtype=dtype, seed=seed) 100 101 102 @handle_jax_dtype 103 @to_ivy_arrays_and_back 104 @with_unsupported_dtypes( 105 { 106 "0.3.14 and below": ( 107 "float16", 108 "bfloat16", 109 ) 110 }, 111 "jax", 112 ) 113 def gumbel(key, shape=(), dtype="float64"): 114 seed = _get_seed(key) 115 uniform_x = ivy.random_uniform( 116 low=0.0, 117 high=1.0, 118 shape=shape, 119 dtype=dtype, 120 seed=seed, 121 ) 122 return -ivy.log(-ivy.log(uniform_x)) 123 124 125 @handle_jax_dtype 126 @to_ivy_arrays_and_back 127 @with_unsupported_dtypes( 128 {"0.3.14 and below": ("unsigned", "int8", "int16")}, 129 "jax", 130 ) 131 def rademacher(key, shape, dtype="int64"): 132 seed = _get_seed(key) 133 b = ivy.bernoulli(ivy.array([0.5]), shape=shape, dtype="float32", seed=seed) 134 b = ivy.astype(b, dtype) 135 return 2 * b - 1 136 137 138 @handle_jax_dtype 139 @to_ivy_arrays_and_back 140 @with_unsupported_dtypes( 141 { 142 "0.3.14 and below": ( 143 "float16", 144 "bfloat16", 145 ) 146 }, 147 "jax", 148 ) 149 def generalized_normal(key, p, shape=(), dtype="float64"): 150 seed = _get_seed(key) 151 g = ivy.gamma(1 / p, 1.0, shape=shape, dtype=dtype, seed=seed) 152 b = ivy.bernoulli(ivy.array([0.5]), shape=shape, dtype=dtype, seed=seed) 153 r = 2 * b - 1 154 return r * g ** (1 / p) 155 156 157 def t(key, df, shape=(), dtype="float64"): 158 seed = _get_seed(key) 159 n = ivy.random_normal(shape=shape, dtype=dtype, seed=seed) 160 half_df = df / 2.0 161 g = ivy.gamma(half_df, 1.0, shape=shape, dtype=dtype, seed=seed) 162 return n * ivy.sqrt(ivy.divide(half_df, g)) 163 164 165 @handle_jax_dtype 166 @to_ivy_arrays_and_back 167 @with_unsupported_dtypes( 168 {"0.3.14 and below": ("unsigned", "int8", "int16")}, 169 "jax", 170 ) 171 def randint(key, shape, minval, maxval, dtype="int64"): 172 seed = _get_seed(key) 173 return ivy.randint(minval, maxval, shape=shape, dtype=dtype, seed=seed) 174 175 @to_ivy_arrays_and_back 176 def bernoulli(key, p=0.5, shape=None): 177 seed = _get_seed(key) 178 return ivy.bernoulli(p, shape=shape, seed=seed) 179 180 @to_ivy_arrays_and_back 181 def fold_in(key, data): 182 s = ivy.bitwise_left_shift( 183 ivy.asarray(data, dtype=ivy.uint32), ivy.array(32, dtype=ivy.uint32) 184 ) 185 return ivy.bitwise_xor(key, s) 186 187 @to_ivy_arrays_and_back 188 def permutation(key, x, axis=0, independent=False): 189 x = ivy.array(x) 190 seed = _get_seed(key) 191 if not ivy.get_num_dims(x): 192 r = int(x) 193 return ivy.shuffle(ivy.arange(r), axis, seed=seed) 194 if independent: 195 return ivy.shuffle(x, axis, seed=seed) 196 rand = ivy.arange(x.shape[axis]) 197 ind = ivy.shuffle(rand, 0, seed=seed) 198 return ivy.gather(x, ind, axis=axis) 199 [end of ivy/functional/frontends/jax/random.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ivy/functional/frontends/jax/random.py b/ivy/functional/frontends/jax/random.py --- a/ivy/functional/frontends/jax/random.py +++ b/ivy/functional/frontends/jax/random.py @@ -172,11 +172,13 @@ seed = _get_seed(key) return ivy.randint(minval, maxval, shape=shape, dtype=dtype, seed=seed) + @to_ivy_arrays_and_back def bernoulli(key, p=0.5, shape=None): seed = _get_seed(key) return ivy.bernoulli(p, shape=shape, seed=seed) + @to_ivy_arrays_and_back def fold_in(key, data): s = ivy.bitwise_left_shift( @@ -184,6 +186,7 @@ ) return ivy.bitwise_xor(key, s) + @to_ivy_arrays_and_back def permutation(key, x, axis=0, independent=False): x = ivy.array(x) @@ -196,3 +199,21 @@ rand = ivy.arange(x.shape[axis]) ind = ivy.shuffle(rand, 0, seed=seed) return ivy.gather(x, ind, axis=axis) + + +# loggamma +@to_ivy_arrays_and_back +@handle_jax_dtype +@to_ivy_arrays_and_back +@with_unsupported_dtypes( + { + "0.3.14 and below": ( + "float16", + "bfloat16", + ) + }, + "jax", +) +def loggamma(key, a, shape=None, dtype="float64"): + seed = _get_seed(key) + return ivy.log(ivy.gamma(a, 1.0, shape=shape, dtype=dtype, seed=seed))
{"golden_diff": "diff --git a/ivy/functional/frontends/jax/random.py b/ivy/functional/frontends/jax/random.py\n--- a/ivy/functional/frontends/jax/random.py\n+++ b/ivy/functional/frontends/jax/random.py\n@@ -172,11 +172,13 @@\n seed = _get_seed(key)\n return ivy.randint(minval, maxval, shape=shape, dtype=dtype, seed=seed)\n \n+\n @to_ivy_arrays_and_back\n def bernoulli(key, p=0.5, shape=None):\n seed = _get_seed(key)\n return ivy.bernoulli(p, shape=shape, seed=seed)\n \n+\n @to_ivy_arrays_and_back\n def fold_in(key, data):\n s = ivy.bitwise_left_shift(\n@@ -184,6 +186,7 @@\n )\n return ivy.bitwise_xor(key, s)\n \n+\n @to_ivy_arrays_and_back\n def permutation(key, x, axis=0, independent=False):\n x = ivy.array(x)\n@@ -196,3 +199,21 @@\n rand = ivy.arange(x.shape[axis])\n ind = ivy.shuffle(rand, 0, seed=seed)\n return ivy.gather(x, ind, axis=axis)\n+\n+\n+# loggamma\n+@to_ivy_arrays_and_back\n+@handle_jax_dtype\n+@to_ivy_arrays_and_back\n+@with_unsupported_dtypes(\n+ {\n+ \"0.3.14 and below\": (\n+ \"float16\",\n+ \"bfloat16\",\n+ )\n+ },\n+ \"jax\",\n+)\n+def loggamma(key, a, shape=None, dtype=\"float64\"):\n+ seed = _get_seed(key)\n+ return ivy.log(ivy.gamma(a, 1.0, shape=shape, dtype=dtype, seed=seed))\n", "issue": "loggamma\n\n", "before_files": [{"content": "# local\nimport ivy\nfrom ivy.func_wrapper import with_unsupported_dtypes\nfrom ivy.functional.frontends.jax.func_wrapper import (\n to_ivy_arrays_and_back,\n handle_jax_dtype,\n)\n\n\n@to_ivy_arrays_and_back\ndef PRNGKey(seed):\n return ivy.array([0, seed % 4294967295 - (seed // 4294967295)], dtype=ivy.int64)\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\ndef uniform(key, shape=(), dtype=None, minval=0.0, maxval=1.0):\n return ivy.random_uniform(\n low=minval, high=maxval, shape=shape, dtype=dtype, seed=ivy.to_scalar(key[1])\n )\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\ndef normal(key, shape=(), dtype=None):\n return ivy.random_normal(shape=shape, dtype=dtype, seed=ivy.to_scalar(key[1]))\n\n\ndef _get_seed(key):\n key1, key2 = int(key[0]), int(key[1])\n return ivy.to_scalar(int(\"\".join(map(str, [key1, key2]))))\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes(\n {\n \"0.3.14 and below\": (\n \"float16\",\n \"bfloat16\",\n )\n },\n \"jax\",\n)\ndef beta(key, a, b, shape=None, dtype=None):\n seed = _get_seed(key)\n return ivy.beta(a, b, shape=shape, dtype=dtype, seed=seed)\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes(\n {\n \"0.3.14 and below\": (\n \"float16\",\n \"bfloat16\",\n )\n },\n \"jax\",\n)\ndef dirichlet(key, alpha, shape=None, dtype=\"float32\"):\n seed = _get_seed(key)\n alpha = ivy.astype(alpha, dtype)\n return ivy.dirichlet(alpha, size=shape, dtype=dtype, seed=seed)\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\ndef cauchy(key, shape=(), dtype=\"float64\"):\n seed = _get_seed(key)\n u = ivy.random_uniform(low=0.0, high=1.0, shape=shape, dtype=dtype, seed=seed)\n return ivy.tan(ivy.pi * (u - 0.5))\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes(\n {\"0.3.14 and below\": (\"unsigned\", \"int8\", \"int16\")},\n \"jax\",\n)\ndef poisson(key, lam, shape=None, dtype=None):\n seed = _get_seed(key)\n return ivy.poisson(lam, shape=shape, dtype=dtype, seed=seed)\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes(\n {\n \"0.3.14 and below\": (\n \"float16\",\n \"bfloat16\",\n )\n },\n \"jax\",\n)\ndef gamma(key, a, shape=None, dtype=\"float64\"):\n seed = _get_seed(key)\n return ivy.gamma(a, 1.0, shape=shape, dtype=dtype, seed=seed)\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes(\n {\n \"0.3.14 and below\": (\n \"float16\",\n \"bfloat16\",\n )\n },\n \"jax\",\n)\ndef gumbel(key, shape=(), dtype=\"float64\"):\n seed = _get_seed(key)\n uniform_x = ivy.random_uniform(\n low=0.0,\n high=1.0,\n shape=shape,\n dtype=dtype,\n seed=seed,\n )\n return -ivy.log(-ivy.log(uniform_x))\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes(\n {\"0.3.14 and below\": (\"unsigned\", \"int8\", \"int16\")},\n \"jax\",\n)\ndef rademacher(key, shape, dtype=\"int64\"):\n seed = _get_seed(key)\n b = ivy.bernoulli(ivy.array([0.5]), shape=shape, dtype=\"float32\", seed=seed)\n b = ivy.astype(b, dtype)\n return 2 * b - 1\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes(\n {\n \"0.3.14 and below\": (\n \"float16\",\n \"bfloat16\",\n )\n },\n \"jax\",\n)\ndef generalized_normal(key, p, shape=(), dtype=\"float64\"):\n seed = _get_seed(key)\n g = ivy.gamma(1 / p, 1.0, shape=shape, dtype=dtype, seed=seed)\n b = ivy.bernoulli(ivy.array([0.5]), shape=shape, dtype=dtype, seed=seed)\n r = 2 * b - 1\n return r * g ** (1 / p)\n\n\ndef t(key, df, shape=(), dtype=\"float64\"):\n seed = _get_seed(key)\n n = ivy.random_normal(shape=shape, dtype=dtype, seed=seed)\n half_df = df / 2.0\n g = ivy.gamma(half_df, 1.0, shape=shape, dtype=dtype, seed=seed)\n return n * ivy.sqrt(ivy.divide(half_df, g))\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes(\n {\"0.3.14 and below\": (\"unsigned\", \"int8\", \"int16\")},\n \"jax\",\n)\ndef randint(key, shape, minval, maxval, dtype=\"int64\"):\n seed = _get_seed(key)\n return ivy.randint(minval, maxval, shape=shape, dtype=dtype, seed=seed)\n\n@to_ivy_arrays_and_back\ndef bernoulli(key, p=0.5, shape=None):\n seed = _get_seed(key)\n return ivy.bernoulli(p, shape=shape, seed=seed)\n\n@to_ivy_arrays_and_back\ndef fold_in(key, data):\n s = ivy.bitwise_left_shift(\n ivy.asarray(data, dtype=ivy.uint32), ivy.array(32, dtype=ivy.uint32)\n )\n return ivy.bitwise_xor(key, s)\n\n@to_ivy_arrays_and_back\ndef permutation(key, x, axis=0, independent=False):\n x = ivy.array(x)\n seed = _get_seed(key)\n if not ivy.get_num_dims(x):\n r = int(x)\n return ivy.shuffle(ivy.arange(r), axis, seed=seed)\n if independent:\n return ivy.shuffle(x, axis, seed=seed)\n rand = ivy.arange(x.shape[axis])\n ind = ivy.shuffle(rand, 0, seed=seed)\n return ivy.gather(x, ind, axis=axis)\n", "path": "ivy/functional/frontends/jax/random.py"}]}
2,678
426
gh_patches_debug_8409
rasdani/github-patches
git_diff
pre-commit__pre-commit-478
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> `docker build` argument order is invalid on old versions of Docker We do: `docker build . --tag thing --label thing` But this produces an error on Docker 1.11.2: ``` ckuehl@dev4-uswest1cdevc:~/proj/pre-commit$ docker build . --tag thing --label thing docker: "build" requires 1 argument. See 'docker build --help'. Usage: docker build [OPTIONS] PATH | URL | - Build an image from a Dockerfile ``` The path needs to go at the end on 1.11.2, but it works on 1.13.0 as-is. We should probably just change the order of the arguments to make every version happy. </issue> <code> [start of pre_commit/languages/docker.py] 1 from __future__ import absolute_import 2 from __future__ import unicode_literals 3 4 import hashlib 5 import os 6 7 from pre_commit import five 8 from pre_commit.languages import helpers 9 from pre_commit.util import CalledProcessError 10 from pre_commit.util import clean_path_on_failure 11 from pre_commit.util import cmd_output 12 from pre_commit.xargs import xargs 13 14 15 ENVIRONMENT_DIR = 'docker' 16 PRE_COMMIT_LABEL = 'PRE_COMMIT' 17 18 19 def md5(s): # pragma: windows no cover 20 return hashlib.md5(five.to_bytes(s)).hexdigest() 21 22 23 def docker_tag(repo_cmd_runner): # pragma: windows no cover 24 return 'pre-commit-{}'.format( 25 md5(os.path.basename(repo_cmd_runner.path())) 26 ).lower() 27 28 29 def docker_is_running(): # pragma: windows no cover 30 try: 31 return cmd_output('docker', 'ps')[0] == 0 32 except CalledProcessError: 33 return False 34 35 36 def assert_docker_available(): # pragma: windows no cover 37 assert docker_is_running(), ( 38 'Docker is either not running or not configured in this environment' 39 ) 40 41 42 def build_docker_image(repo_cmd_runner, **kwargs): # pragma: windows no cover 43 pull = kwargs.pop('pull') 44 assert not kwargs, kwargs 45 cmd = ( 46 'docker', 'build', '.', 47 '--tag', docker_tag(repo_cmd_runner), 48 '--label', PRE_COMMIT_LABEL, 49 ) 50 if pull: 51 cmd += ('--pull',) 52 helpers.run_setup_cmd(repo_cmd_runner, cmd) 53 54 55 def install_environment( 56 repo_cmd_runner, 57 version='default', 58 additional_dependencies=(), 59 ): # pragma: windows no cover 60 assert repo_cmd_runner.exists('Dockerfile'), ( 61 'No Dockerfile was found in the hook repository' 62 ) 63 helpers.assert_version_default('docker', version) 64 helpers.assert_no_additional_deps('docker', additional_dependencies) 65 assert_docker_available() 66 67 directory = repo_cmd_runner.path( 68 helpers.environment_dir(ENVIRONMENT_DIR, 'default'), 69 ) 70 71 # Docker doesn't really have relevant disk environment, but pre-commit 72 # still needs to cleanup it's state files on failure 73 with clean_path_on_failure(directory): 74 build_docker_image(repo_cmd_runner, pull=True) 75 os.mkdir(directory) 76 77 78 def run_hook(repo_cmd_runner, hook, file_args): # pragma: windows no cover 79 assert_docker_available() 80 # Rebuild the docker image in case it has gone missing, as many people do 81 # automated cleanup of docker images. 82 build_docker_image(repo_cmd_runner, pull=False) 83 84 hook_cmd = helpers.to_cmd(hook) 85 entry_executable, cmd_rest = hook_cmd[0], hook_cmd[1:] 86 87 cmd = ( 88 'docker', 'run', 89 '--rm', 90 '-u', '{}:{}'.format(os.getuid(), os.getgid()), 91 '-v', '{}:/src:rw'.format(os.getcwd()), 92 '--workdir', '/src', 93 '--entrypoint', entry_executable, 94 docker_tag(repo_cmd_runner) 95 ) + cmd_rest 96 97 return xargs(cmd, file_args) 98 [end of pre_commit/languages/docker.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/pre_commit/languages/docker.py b/pre_commit/languages/docker.py --- a/pre_commit/languages/docker.py +++ b/pre_commit/languages/docker.py @@ -43,12 +43,14 @@ pull = kwargs.pop('pull') assert not kwargs, kwargs cmd = ( - 'docker', 'build', '.', + 'docker', 'build', '--tag', docker_tag(repo_cmd_runner), '--label', PRE_COMMIT_LABEL, ) if pull: cmd += ('--pull',) + # This must come last for old versions of docker. See #477 + cmd += ('.',) helpers.run_setup_cmd(repo_cmd_runner, cmd)
{"golden_diff": "diff --git a/pre_commit/languages/docker.py b/pre_commit/languages/docker.py\n--- a/pre_commit/languages/docker.py\n+++ b/pre_commit/languages/docker.py\n@@ -43,12 +43,14 @@\n pull = kwargs.pop('pull')\n assert not kwargs, kwargs\n cmd = (\n- 'docker', 'build', '.',\n+ 'docker', 'build',\n '--tag', docker_tag(repo_cmd_runner),\n '--label', PRE_COMMIT_LABEL,\n )\n if pull:\n cmd += ('--pull',)\n+ # This must come last for old versions of docker. See #477\n+ cmd += ('.',)\n helpers.run_setup_cmd(repo_cmd_runner, cmd)\n", "issue": "`docker build` argument order is invalid on old versions of Docker\nWe do: `docker build . --tag thing --label thing`\r\n\r\nBut this produces an error on Docker 1.11.2:\r\n```\r\nckuehl@dev4-uswest1cdevc:~/proj/pre-commit$ docker build . --tag thing --label thing\r\ndocker: \"build\" requires 1 argument.\r\nSee 'docker build --help'.\r\n\r\nUsage: docker build [OPTIONS] PATH | URL | -\r\n\r\nBuild an image from a Dockerfile\r\n```\r\n\r\nThe path needs to go at the end on 1.11.2, but it works on 1.13.0 as-is. We should probably just change the order of the arguments to make every version happy.\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import unicode_literals\n\nimport hashlib\nimport os\n\nfrom pre_commit import five\nfrom pre_commit.languages import helpers\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output\nfrom pre_commit.xargs import xargs\n\n\nENVIRONMENT_DIR = 'docker'\nPRE_COMMIT_LABEL = 'PRE_COMMIT'\n\n\ndef md5(s): # pragma: windows no cover\n return hashlib.md5(five.to_bytes(s)).hexdigest()\n\n\ndef docker_tag(repo_cmd_runner): # pragma: windows no cover\n return 'pre-commit-{}'.format(\n md5(os.path.basename(repo_cmd_runner.path()))\n ).lower()\n\n\ndef docker_is_running(): # pragma: windows no cover\n try:\n return cmd_output('docker', 'ps')[0] == 0\n except CalledProcessError:\n return False\n\n\ndef assert_docker_available(): # pragma: windows no cover\n assert docker_is_running(), (\n 'Docker is either not running or not configured in this environment'\n )\n\n\ndef build_docker_image(repo_cmd_runner, **kwargs): # pragma: windows no cover\n pull = kwargs.pop('pull')\n assert not kwargs, kwargs\n cmd = (\n 'docker', 'build', '.',\n '--tag', docker_tag(repo_cmd_runner),\n '--label', PRE_COMMIT_LABEL,\n )\n if pull:\n cmd += ('--pull',)\n helpers.run_setup_cmd(repo_cmd_runner, cmd)\n\n\ndef install_environment(\n repo_cmd_runner,\n version='default',\n additional_dependencies=(),\n): # pragma: windows no cover\n assert repo_cmd_runner.exists('Dockerfile'), (\n 'No Dockerfile was found in the hook repository'\n )\n helpers.assert_version_default('docker', version)\n helpers.assert_no_additional_deps('docker', additional_dependencies)\n assert_docker_available()\n\n directory = repo_cmd_runner.path(\n helpers.environment_dir(ENVIRONMENT_DIR, 'default'),\n )\n\n # Docker doesn't really have relevant disk environment, but pre-commit\n # still needs to cleanup it's state files on failure\n with clean_path_on_failure(directory):\n build_docker_image(repo_cmd_runner, pull=True)\n os.mkdir(directory)\n\n\ndef run_hook(repo_cmd_runner, hook, file_args): # pragma: windows no cover\n assert_docker_available()\n # Rebuild the docker image in case it has gone missing, as many people do\n # automated cleanup of docker images.\n build_docker_image(repo_cmd_runner, pull=False)\n\n hook_cmd = helpers.to_cmd(hook)\n entry_executable, cmd_rest = hook_cmd[0], hook_cmd[1:]\n\n cmd = (\n 'docker', 'run',\n '--rm',\n '-u', '{}:{}'.format(os.getuid(), os.getgid()),\n '-v', '{}:/src:rw'.format(os.getcwd()),\n '--workdir', '/src',\n '--entrypoint', entry_executable,\n docker_tag(repo_cmd_runner)\n ) + cmd_rest\n\n return xargs(cmd, file_args)\n", "path": "pre_commit/languages/docker.py"}]}
1,565
156
gh_patches_debug_60679
rasdani/github-patches
git_diff
OCHA-DAP__hdx-ckan-1655
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Main Nav changes to accomodate "Feedback" button Updating the action based on discussions: 1. Remove `Contact` from main nav. 2. Between `About` and `Submit Data`, add a button `Feedback`. 3. While you are there, change `Submit Data` to `Share Data` (there's a later issue for that which this will close) Button style should be the same as the "Follow Us" button here, except gray, not blue: http://docs.hdx.rwlabs.org/get-involved/ Note that the megaphone icon shown below will not be used. No icon on the button. </issue> <code> [start of ckanext-hdx_theme/ckanext/hdx_theme/version.py] 1 hdx_version = 'v0.4.5' 2 [end of ckanext-hdx_theme/ckanext/hdx_theme/version.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/version.py b/ckanext-hdx_theme/ckanext/hdx_theme/version.py --- a/ckanext-hdx_theme/ckanext/hdx_theme/version.py +++ b/ckanext-hdx_theme/ckanext/hdx_theme/version.py @@ -1 +1 @@ -hdx_version = 'v0.4.5' +hdx_version = 'v0.4.6'
{"golden_diff": "diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/version.py b/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n--- a/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n+++ b/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n@@ -1 +1 @@\n-hdx_version = 'v0.4.5'\n+hdx_version = 'v0.4.6'\n", "issue": "Main Nav changes to accomodate \"Feedback\" button\nUpdating the action based on discussions:\n1. Remove `Contact` from main nav. \n2. Between `About` and `Submit Data`, add a button `Feedback`. \n3. While you are there, change `Submit Data` to `Share Data` (there's a later issue for that which this will close)\n\nButton style should be the same as the \"Follow Us\" button here, except gray, not blue: http://docs.hdx.rwlabs.org/get-involved/\n\nNote that the megaphone icon shown below will not be used. No icon on the button.\n\n", "before_files": [{"content": "hdx_version = 'v0.4.5'\n", "path": "ckanext-hdx_theme/ckanext/hdx_theme/version.py"}]}
701
106
gh_patches_debug_17445
rasdani/github-patches
git_diff
facebookresearch__fairscale-78
You will be provided with a partial code base and an issue statement explaining a problem to resolve. <issue> Async broadcast request in OSS ## πŸš€ Feature Make sure that the broadcast request to sync shards are async, and not blocking the main thread ## Motivation OSS is currently too slow ## Pitch Easy fix, no compromise ## Alternatives Stay slow for no reason ## Additional context Clear bottleneck when tracing execution </issue> <code> [start of fairscale/optim/oss.py] 1 # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. 2 # 3 # This source code is licensed under the BSD license found in the 4 # LICENSE file in the root directory of this source tree. 5 6 import copy 7 from itertools import chain 8 import logging 9 from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Type 10 11 import torch 12 import torch.distributed as dist 13 from torch.optim import SGD, Optimizer 14 15 from .utils import broadcast_object, recursive_copy_to_device 16 17 if TYPE_CHECKING: # pragma: no cover 18 from torch.optim.optimizer import _params_t 19 else: 20 _params_t = Any 21 22 23 class OSS(Optimizer): 24 """Wraps an arbitrary :class:`optim.Optimizer <torch.optim.Optimizer>` 25 optimizer and shards its state as described by ZeRO_. 26 :: 27 opt = OSS(params, optim=torch.optim.Adam, lr=0.01) 28 29 .. _ZeRO: https://arxiv.org/abs/1910.02054 30 31 We use a greedy algorithm to pack a number of parameters 32 at each rank. Each parameter belongs to a single rank and 33 is not divided among rank. 34 35 After each rank completed their parameter update, they broadcast 36 the new version of the parameters to all other ranks to synchronize 37 the parameters for next round forward/backward computation. 38 39 Args: 40 params (list of tensors): 41 parameters to be optimized 42 Keyword Args: 43 optim (torch.nn.Optimizer): 44 optimizer to shard (default: SGD) 45 group (group): 46 torch.distributed group (default: group.WORLD) 47 """ 48 49 optim: Optimizer 50 in_super_constructor: bool 51 52 def __init__(self, params: _params_t, optim: Type[Optimizer] = SGD, group: Any = dist.group.WORLD, **defaults: Any): 53 # Hold all the model params in the root .param_groups 54 self.in_super_constructor = True 55 super().__init__(params, defaults) 56 self.in_super_constructor = False 57 58 # Build the wrapped optimizer, responsible for a shard of the params 59 self.group = group 60 self.rank = dist.get_rank(group) 61 split_param_groups = self.partition_parameters() 62 self.optim = optim(split_param_groups[self.rank], **defaults) 63 64 # Optional consolidated optimizer state 65 self._all_states: List[Dict[str, Any]] = [] 66 67 # Current device is set by the parameters allocated to this rank 68 self._device = split_param_groups[self.rank][0]["params"][0].device 69 70 # Sync local and global param_groups keys 71 for global_group, local_group in zip(self.param_groups, self.optim.param_groups): 72 for k, v in local_group.items(): 73 if k != "params": 74 global_group[k] = v 75 76 def partition_parameters(self) -> List[List[dict]]: 77 """Partitions parameters across distributed ranks. 78 79 Returns a list of param_groups (which is a list of dict) where each 80 element of the list contains the param_groups for a rank. Element 0 81 corresponds to rank 0, etc. We need all the ranks for the broadcast 82 inside step(). 83 """ 84 world_size = dist.get_world_size(self.group) 85 param_groups: List[List] = [list() for _ in range(world_size)] 86 sizes = [0] * world_size 87 for param_group in self.param_groups: 88 param_lists: List[List] = [list() for _ in range(world_size)] 89 for param in param_group["params"]: 90 # Add this param to rank with smallest size. 91 rank = sizes.index(min(sizes)) 92 param_lists[rank].append(param) 93 sizes[rank] += param.numel() 94 for rank, params in enumerate(param_lists): 95 param_group_rank = copy.copy(param_group) 96 param_group_rank["params"] = params 97 param_groups[rank].append(param_group_rank) 98 return param_groups 99 100 # NOTE(msb) We add a kwargs in order to support Optimizer sub-classes that support extra kwargs. 101 # For example, the apex library contains fused optimizers with a step that supports extra kwargs. 102 def step(self, closure: Optional[Callable[[], float]] = None, **kwargs: Any) -> Optional[float]: 103 # Sync oss param_groups attributes in case they've been updated by a scheduler. 104 self._sync_param_groups() 105 106 # Run the optimizer step on this shard only 107 loss = self.optim.step(closure=closure, **kwargs) # type: ignore 108 109 # Sync all the states 110 for rank, param_groups in enumerate(self.partition_parameters()): 111 for param_group in param_groups: 112 for param in param_group["params"]: 113 dist.broadcast(tensor=param, src=rank, group=self.group) 114 return loss 115 116 def local_state_dict(self) -> dict: 117 """ Gets this rank's state_dict. """ 118 return self.optim.state_dict() 119 120 def consolidate_state_dict(self, recipient_rank: int = 0) -> None: 121 """ Update the consolidated state_dict list, one per rank. 122 123 This needs to be called on all replicas """ 124 125 # Sync lr and other attributes in case its been updated 126 self._sync_param_groups() 127 128 if self.rank == recipient_rank: 129 # Pull the sharded state from all the other replicas 130 # Store all the states in order, rank by rank 131 logging.debug("Pulling the sharded optimizer state from all replicas") 132 self._all_states = self._collect_sharded_states() 133 else: 134 # Acknowledge broadcasts, and send this rank's shard when needed 135 self._broadcast_state_dict() 136 137 def state_dict(self) -> Dict[str, Any]: 138 """ 139 Return the last known global optimizer state, which consist of a list of the shards. 140 141 NOTE: This is limited to the replica which was responsible for the consolidation. 142 The state may also not be up to date, depending on when `consolidate_state_dict` was last called. 143 """ 144 145 assert ( 146 len(self._all_states) > 0 147 ), "The optimizer state is not materialized, please call consolidate_state_dict on every replica beforehand" 148 149 # Flatten the param_groups, save the partition which logs the rank <> shard correspondence 150 partition: List[Tuple[int, int]] = [] 151 param_groups: List[Dict[Any, Any]] = [] 152 153 start = 0 154 for i, s in enumerate(self._all_states): 155 param_groups.extend(s["param_groups"]) 156 end = start + len(s["param_groups"]) 157 partition.append((start, end)) 158 start = end 159 160 return { 161 "state": [s["state"] for s in self._all_states], 162 "param_groups": param_groups, 163 "partition": partition, 164 } 165 166 def load_local_state_dict(self, state_dict: dict) -> None: 167 """ Loads this rank's state_dict. """ 168 169 self.optim.load_state_dict(state_dict) 170 171 # Workaround PyTorch bug that casts state (https://github.com/pytorch/pytorch/issues/43706) 172 # Copied from https://github.com/pytorch/fairseq/blob/v0.9.0/fairseq/optim/fp16_optimizer.py#L251-L268 173 groups = self.optim.param_groups 174 saved_groups = state_dict["param_groups"] 175 id_map = { 176 old_id: p 177 for old_id, p in zip(chain(*(g["params"] for g in saved_groups)), chain(*(g["params"] for g in groups))) 178 } 179 for k, v in state_dict["state"].items(): 180 if k in id_map: 181 param = id_map[k] 182 self.optim.state[param] = recursive_copy_to_device(v, non_blocking=True, device=param.device) 183 184 # Restore the global param_groups (the params themselves are already correct) 185 for global_group, local_group in zip(self.param_groups, groups): 186 for k, v in local_group.items(): 187 if k != "params": 188 global_group[k] = v 189 190 def load_state_dict(self, state_dict: Dict[str, Any]) -> None: 191 """ Restore the global parameter groups as well as the shard """ 192 193 # Get this optimizer's param_groups shard 194 param_groups = state_dict["param_groups"][ 195 state_dict["partition"][self.rank][0] : state_dict["partition"][self.rank][1] 196 ] 197 # Dispatch this rank's state dictionary to the wrapped shard optimizer 198 self.load_local_state_dict({"state": state_dict["state"][self.rank], "param_groups": param_groups}) 199 200 def add_param_group(self, param_group: dict) -> None: 201 super().add_param_group(param_group) 202 if not self.in_super_constructor: 203 param_groups = self.partition_parameters()[self.rank] 204 if len(param_groups) == len(self.optim.param_groups) + 1: 205 self.optim.add_param_group(param_groups[-1]) 206 207 def _sync_param_groups(self) -> None: 208 """Sync learning rate and other optimizer attributes (needed to support schedulers).""" 209 for global_group, local_group in zip(self.param_groups, self.optim.param_groups): 210 for k in local_group.keys(): 211 if k != "params": 212 # Params have been sharded and should not be synced here 213 local_group[k] = global_group[k] 214 215 def _collect_sharded_states(self) -> List[Dict[str, Any]]: 216 """ 217 Collect all the state shards, in CPU memory. 218 """ 219 empty_buffer = torch.tensor([0], dtype=torch.uint8, device=self._device) 220 all_states: List[Dict[str, Any]] = [] 221 222 for rank in range(dist.get_world_size(group=self.group)): 223 if rank == self.rank: 224 logging.debug("Saving self state") 225 all_states.append( 226 recursive_copy_to_device(self.local_state_dict(), non_blocking=True, device=torch.device("cpu")) 227 ) 228 229 # Sync with other replicas 230 broadcast_object(empty_buffer, src_rank=rank, group=self.group, dist_device=self._device) 231 else: 232 # Fetch the optim state from the other replicas 233 logging.debug("Receiving state from rank %s ", rank) 234 replica_state = broadcast_object( 235 empty_buffer, src_rank=rank, group=self.group, dist_device=self._device 236 ) 237 238 all_states.append( 239 recursive_copy_to_device(replica_state, non_blocking=True, device=torch.device("cpu")) 240 ) 241 242 logging.debug("State from rank %s received", rank) 243 244 return all_states 245 246 def _broadcast_state_dict(self) -> None: 247 """ 248 Broadcast this rank's state shard, discard others 249 """ 250 empty_buffer = torch.tensor([0], dtype=torch.uint8, device=self._device) 251 252 for rank in range(dist.get_world_size(group=self.group)): 253 if rank == self.rank: 254 # Send the state to the reference replica 255 logging.debug( 256 "Sending the sharded optimizer state to the reference replica from rank %s", rank, 257 ) 258 broadcast_object(self.local_state_dict(), src_rank=rank, group=self.group, dist_device=self._device) 259 else: 260 # Discard this tensor/rank, broadcast necessary for syncing 261 logging.debug("Discarding broadcast from rank %s", rank) 262 broadcast_object(empty_buffer, src_rank=rank, group=self.group, dist_device=self._device) 263 [end of fairscale/optim/oss.py] </code> I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format. <patch> diff --git a/file.py b/file.py --- a/file.py +++ b/file.py @@ -1,27 +1,35 @@ def euclidean(a, b): - while b: - a, b = b, a % b - return a + if b == 0: + return a + return euclidean(b, a % b) def bresenham(x0, y0, x1, y1): points = [] dx = abs(x1 - x0) dy = abs(y1 - y0) - sx = 1 if x0 < x1 else -1 - sy = 1 if y0 < y1 else -1 - err = dx - dy + x, y = x0, y0 + sx = -1 if x0 > x1 else 1 + sy = -1 if y0 > y1 else 1 - while True: - points.append((x0, y0)) - if x0 == x1 and y0 == y1: - break - e2 = 2 * err - if e2 > -dy: - err -= dy - x0 += sx - if e2 < dx: - err += dx - y0 += sy + if dx > dy: + err = dx / 2.0 + while x != x1: + points.append((x, y)) + err -= dy + if err < 0: + y += sy + err += dx + x += sx + else: + err = dy / 2.0 + while y != y1: + points.append((x, y)) + err -= dx + if err < 0: + x += sx + err += dy + y += sy + + points.append((x, y)) return points </patch>
diff --git a/fairscale/optim/oss.py b/fairscale/optim/oss.py --- a/fairscale/optim/oss.py +++ b/fairscale/optim/oss.py @@ -106,11 +106,14 @@ # Run the optimizer step on this shard only loss = self.optim.step(closure=closure, **kwargs) # type: ignore - # Sync all the states + # Sync all the states. Broadcast requests are issued async, we check completeness before moving on + requests = [] for rank, param_groups in enumerate(self.partition_parameters()): for param_group in param_groups: for param in param_group["params"]: - dist.broadcast(tensor=param, src=rank, group=self.group) + requests.append(dist.broadcast(tensor=param, src=rank, group=self.group, async_op=True)) + + _ = list(map(lambda x: x.wait(), requests)) return loss def local_state_dict(self) -> dict:
{"golden_diff": "diff --git a/fairscale/optim/oss.py b/fairscale/optim/oss.py\n--- a/fairscale/optim/oss.py\n+++ b/fairscale/optim/oss.py\n@@ -106,11 +106,14 @@\n # Run the optimizer step on this shard only\n loss = self.optim.step(closure=closure, **kwargs) # type: ignore\n \n- # Sync all the states\n+ # Sync all the states. Broadcast requests are issued async, we check completeness before moving on\n+ requests = []\n for rank, param_groups in enumerate(self.partition_parameters()):\n for param_group in param_groups:\n for param in param_group[\"params\"]:\n- dist.broadcast(tensor=param, src=rank, group=self.group)\n+ requests.append(dist.broadcast(tensor=param, src=rank, group=self.group, async_op=True))\n+\n+ _ = list(map(lambda x: x.wait(), requests))\n return loss\n \n def local_state_dict(self) -> dict:\n", "issue": "Async broadcast request in OSS\n## \ud83d\ude80 Feature\r\nMake sure that the broadcast request to sync shards are async, and not blocking the main thread\r\n\r\n## Motivation\r\nOSS is currently too slow\r\n\r\n## Pitch\r\nEasy fix, no compromise\r\n\r\n## Alternatives\r\nStay slow for no reason\r\n\r\n## Additional context\r\nClear bottleneck when tracing execution\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.\n#\n# This source code is licensed under the BSD license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport copy\nfrom itertools import chain\nimport logging\nfrom typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Type\n\nimport torch\nimport torch.distributed as dist\nfrom torch.optim import SGD, Optimizer\n\nfrom .utils import broadcast_object, recursive_copy_to_device\n\nif TYPE_CHECKING: # pragma: no cover\n from torch.optim.optimizer import _params_t\nelse:\n _params_t = Any\n\n\nclass OSS(Optimizer):\n \"\"\"Wraps an arbitrary :class:`optim.Optimizer <torch.optim.Optimizer>`\n optimizer and shards its state as described by ZeRO_.\n ::\n opt = OSS(params, optim=torch.optim.Adam, lr=0.01)\n\n .. _ZeRO: https://arxiv.org/abs/1910.02054\n\n We use a greedy algorithm to pack a number of parameters\n at each rank. Each parameter belongs to a single rank and\n is not divided among rank.\n\n After each rank completed their parameter update, they broadcast\n the new version of the parameters to all other ranks to synchronize\n the parameters for next round forward/backward computation.\n\n Args:\n params (list of tensors):\n parameters to be optimized\n Keyword Args:\n optim (torch.nn.Optimizer):\n optimizer to shard (default: SGD)\n group (group):\n torch.distributed group (default: group.WORLD)\n \"\"\"\n\n optim: Optimizer\n in_super_constructor: bool\n\n def __init__(self, params: _params_t, optim: Type[Optimizer] = SGD, group: Any = dist.group.WORLD, **defaults: Any):\n # Hold all the model params in the root .param_groups\n self.in_super_constructor = True\n super().__init__(params, defaults)\n self.in_super_constructor = False\n\n # Build the wrapped optimizer, responsible for a shard of the params\n self.group = group\n self.rank = dist.get_rank(group)\n split_param_groups = self.partition_parameters()\n self.optim = optim(split_param_groups[self.rank], **defaults)\n\n # Optional consolidated optimizer state\n self._all_states: List[Dict[str, Any]] = []\n\n # Current device is set by the parameters allocated to this rank\n self._device = split_param_groups[self.rank][0][\"params\"][0].device\n\n # Sync local and global param_groups keys\n for global_group, local_group in zip(self.param_groups, self.optim.param_groups):\n for k, v in local_group.items():\n if k != \"params\":\n global_group[k] = v\n\n def partition_parameters(self) -> List[List[dict]]:\n \"\"\"Partitions parameters across distributed ranks.\n\n Returns a list of param_groups (which is a list of dict) where each\n element of the list contains the param_groups for a rank. Element 0\n corresponds to rank 0, etc. We need all the ranks for the broadcast\n inside step().\n \"\"\"\n world_size = dist.get_world_size(self.group)\n param_groups: List[List] = [list() for _ in range(world_size)]\n sizes = [0] * world_size\n for param_group in self.param_groups:\n param_lists: List[List] = [list() for _ in range(world_size)]\n for param in param_group[\"params\"]:\n # Add this param to rank with smallest size.\n rank = sizes.index(min(sizes))\n param_lists[rank].append(param)\n sizes[rank] += param.numel()\n for rank, params in enumerate(param_lists):\n param_group_rank = copy.copy(param_group)\n param_group_rank[\"params\"] = params\n param_groups[rank].append(param_group_rank)\n return param_groups\n\n # NOTE(msb) We add a kwargs in order to support Optimizer sub-classes that support extra kwargs.\n # For example, the apex library contains fused optimizers with a step that supports extra kwargs.\n def step(self, closure: Optional[Callable[[], float]] = None, **kwargs: Any) -> Optional[float]:\n # Sync oss param_groups attributes in case they've been updated by a scheduler.\n self._sync_param_groups()\n\n # Run the optimizer step on this shard only\n loss = self.optim.step(closure=closure, **kwargs) # type: ignore\n\n # Sync all the states\n for rank, param_groups in enumerate(self.partition_parameters()):\n for param_group in param_groups:\n for param in param_group[\"params\"]:\n dist.broadcast(tensor=param, src=rank, group=self.group)\n return loss\n\n def local_state_dict(self) -> dict:\n \"\"\" Gets this rank's state_dict. \"\"\"\n return self.optim.state_dict()\n\n def consolidate_state_dict(self, recipient_rank: int = 0) -> None:\n \"\"\" Update the consolidated state_dict list, one per rank.\n\n This needs to be called on all replicas \"\"\"\n\n # Sync lr and other attributes in case its been updated\n self._sync_param_groups()\n\n if self.rank == recipient_rank:\n # Pull the sharded state from all the other replicas\n # Store all the states in order, rank by rank\n logging.debug(\"Pulling the sharded optimizer state from all replicas\")\n self._all_states = self._collect_sharded_states()\n else:\n # Acknowledge broadcasts, and send this rank's shard when needed\n self._broadcast_state_dict()\n\n def state_dict(self) -> Dict[str, Any]:\n \"\"\"\n Return the last known global optimizer state, which consist of a list of the shards.\n\n NOTE: This is limited to the replica which was responsible for the consolidation.\n The state may also not be up to date, depending on when `consolidate_state_dict` was last called.\n \"\"\"\n\n assert (\n len(self._all_states) > 0\n ), \"The optimizer state is not materialized, please call consolidate_state_dict on every replica beforehand\"\n\n # Flatten the param_groups, save the partition which logs the rank <> shard correspondence\n partition: List[Tuple[int, int]] = []\n param_groups: List[Dict[Any, Any]] = []\n\n start = 0\n for i, s in enumerate(self._all_states):\n param_groups.extend(s[\"param_groups\"])\n end = start + len(s[\"param_groups\"])\n partition.append((start, end))\n start = end\n\n return {\n \"state\": [s[\"state\"] for s in self._all_states],\n \"param_groups\": param_groups,\n \"partition\": partition,\n }\n\n def load_local_state_dict(self, state_dict: dict) -> None:\n \"\"\" Loads this rank's state_dict. \"\"\"\n\n self.optim.load_state_dict(state_dict)\n\n # Workaround PyTorch bug that casts state (https://github.com/pytorch/pytorch/issues/43706)\n # Copied from https://github.com/pytorch/fairseq/blob/v0.9.0/fairseq/optim/fp16_optimizer.py#L251-L268\n groups = self.optim.param_groups\n saved_groups = state_dict[\"param_groups\"]\n id_map = {\n old_id: p\n for old_id, p in zip(chain(*(g[\"params\"] for g in saved_groups)), chain(*(g[\"params\"] for g in groups)))\n }\n for k, v in state_dict[\"state\"].items():\n if k in id_map:\n param = id_map[k]\n self.optim.state[param] = recursive_copy_to_device(v, non_blocking=True, device=param.device)\n\n # Restore the global param_groups (the params themselves are already correct)\n for global_group, local_group in zip(self.param_groups, groups):\n for k, v in local_group.items():\n if k != \"params\":\n global_group[k] = v\n\n def load_state_dict(self, state_dict: Dict[str, Any]) -> None:\n \"\"\" Restore the global parameter groups as well as the shard \"\"\"\n\n # Get this optimizer's param_groups shard\n param_groups = state_dict[\"param_groups\"][\n state_dict[\"partition\"][self.rank][0] : state_dict[\"partition\"][self.rank][1]\n ]\n # Dispatch this rank's state dictionary to the wrapped shard optimizer\n self.load_local_state_dict({\"state\": state_dict[\"state\"][self.rank], \"param_groups\": param_groups})\n\n def add_param_group(self, param_group: dict) -> None:\n super().add_param_group(param_group)\n if not self.in_super_constructor:\n param_groups = self.partition_parameters()[self.rank]\n if len(param_groups) == len(self.optim.param_groups) + 1:\n self.optim.add_param_group(param_groups[-1])\n\n def _sync_param_groups(self) -> None:\n \"\"\"Sync learning rate and other optimizer attributes (needed to support schedulers).\"\"\"\n for global_group, local_group in zip(self.param_groups, self.optim.param_groups):\n for k in local_group.keys():\n if k != \"params\":\n # Params have been sharded and should not be synced here\n local_group[k] = global_group[k]\n\n def _collect_sharded_states(self) -> List[Dict[str, Any]]:\n \"\"\"\n Collect all the state shards, in CPU memory.\n \"\"\"\n empty_buffer = torch.tensor([0], dtype=torch.uint8, device=self._device)\n all_states: List[Dict[str, Any]] = []\n\n for rank in range(dist.get_world_size(group=self.group)):\n if rank == self.rank:\n logging.debug(\"Saving self state\")\n all_states.append(\n recursive_copy_to_device(self.local_state_dict(), non_blocking=True, device=torch.device(\"cpu\"))\n )\n\n # Sync with other replicas\n broadcast_object(empty_buffer, src_rank=rank, group=self.group, dist_device=self._device)\n else:\n # Fetch the optim state from the other replicas\n logging.debug(\"Receiving state from rank %s \", rank)\n replica_state = broadcast_object(\n empty_buffer, src_rank=rank, group=self.group, dist_device=self._device\n )\n\n all_states.append(\n recursive_copy_to_device(replica_state, non_blocking=True, device=torch.device(\"cpu\"))\n )\n\n logging.debug(\"State from rank %s received\", rank)\n\n return all_states\n\n def _broadcast_state_dict(self) -> None:\n \"\"\"\n Broadcast this rank's state shard, discard others\n \"\"\"\n empty_buffer = torch.tensor([0], dtype=torch.uint8, device=self._device)\n\n for rank in range(dist.get_world_size(group=self.group)):\n if rank == self.rank:\n # Send the state to the reference replica\n logging.debug(\n \"Sending the sharded optimizer state to the reference replica from rank %s\", rank,\n )\n broadcast_object(self.local_state_dict(), src_rank=rank, group=self.group, dist_device=self._device)\n else:\n # Discard this tensor/rank, broadcast necessary for syncing\n logging.debug(\"Discarding broadcast from rank %s\", rank)\n broadcast_object(empty_buffer, src_rank=rank, group=self.group, dist_device=self._device)\n", "path": "fairscale/optim/oss.py"}]}
3,738
221