problem_id
stringlengths
18
22
source
stringclasses
1 value
task_type
stringclasses
1 value
in_source_id
stringlengths
13
58
prompt
stringlengths
1.1k
25.4k
golden_diff
stringlengths
145
5.13k
verification_info
stringlengths
582
39.1k
num_tokens
int64
271
4.1k
num_tokens_diff
int64
47
1.02k
gh_patches_debug_17970
rasdani/github-patches
git_diff
bookwyrm-social__bookwyrm-604
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- link formatting not working properly I made a markdown link, but the "<a href" part was trimmed and garbled html remained --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `bookwyrm/views/status.py` Content: ``` 1 ''' what are we here for if not for posting ''' 2 import re 3 from django.contrib.auth.decorators import login_required 4 from django.http import HttpResponseBadRequest 5 from django.shortcuts import get_object_or_404, redirect 6 from django.utils.decorators import method_decorator 7 from django.views import View 8 from markdown import markdown 9 10 from bookwyrm import forms, models 11 from bookwyrm.sanitize_html import InputHtmlParser 12 from bookwyrm.settings import DOMAIN 13 from bookwyrm.status import create_notification, delete_status 14 from bookwyrm.utils import regex 15 from .helpers import handle_remote_webfinger 16 17 18 # pylint: disable= no-self-use 19 @method_decorator(login_required, name='dispatch') 20 class CreateStatus(View): 21 ''' the view for *posting* ''' 22 def post(self, request, status_type): 23 ''' create status of whatever type ''' 24 status_type = status_type[0].upper() + status_type[1:] 25 26 try: 27 form = getattr(forms, '%sForm' % status_type)(request.POST) 28 except AttributeError: 29 return HttpResponseBadRequest() 30 if not form.is_valid(): 31 return redirect(request.headers.get('Referer', '/')) 32 33 status = form.save(commit=False) 34 if not status.sensitive and status.content_warning: 35 # the cw text field remains populated when you click "remove" 36 status.content_warning = None 37 status.save(broadcast=False) 38 39 # inspect the text for user tags 40 content = status.content 41 for (mention_text, mention_user) in find_mentions(content): 42 # add them to status mentions fk 43 status.mention_users.add(mention_user) 44 45 # turn the mention into a link 46 content = re.sub( 47 r'%s([^@]|$)' % mention_text, 48 r'<a href="%s">%s</a>\g<1>' % \ 49 (mention_user.remote_id, mention_text), 50 content) 51 52 # add reply parent to mentions and notify 53 if status.reply_parent: 54 status.mention_users.add(status.reply_parent.user) 55 56 if status.reply_parent.user.local: 57 create_notification( 58 status.reply_parent.user, 59 'REPLY', 60 related_user=request.user, 61 related_status=status 62 ) 63 64 # deduplicate mentions 65 status.mention_users.set(set(status.mention_users.all())) 66 # create mention notifications 67 for mention_user in status.mention_users.all(): 68 if status.reply_parent and mention_user == status.reply_parent.user: 69 continue 70 if mention_user.local: 71 create_notification( 72 mention_user, 73 'MENTION', 74 related_user=request.user, 75 related_status=status 76 ) 77 78 # don't apply formatting to generated notes 79 if not isinstance(status, models.GeneratedNote): 80 status.content = to_markdown(content) 81 # do apply formatting to quotes 82 if hasattr(status, 'quote'): 83 status.quote = to_markdown(status.quote) 84 85 status.save(created=True) 86 return redirect(request.headers.get('Referer', '/')) 87 88 89 class DeleteStatus(View): 90 ''' tombstone that bad boy ''' 91 def post(self, request, status_id): 92 ''' delete and tombstone a status ''' 93 status = get_object_or_404(models.Status, id=status_id) 94 95 # don't let people delete other people's statuses 96 if status.user != request.user: 97 return HttpResponseBadRequest() 98 99 # perform deletion 100 delete_status(status) 101 return redirect(request.headers.get('Referer', '/')) 102 103 def find_mentions(content): 104 ''' detect @mentions in raw status content ''' 105 for match in re.finditer(regex.strict_username, content): 106 username = match.group().strip().split('@')[1:] 107 if len(username) == 1: 108 # this looks like a local user (@user), fill in the domain 109 username.append(DOMAIN) 110 username = '@'.join(username) 111 112 mention_user = handle_remote_webfinger(username) 113 if not mention_user: 114 # we can ignore users we don't know about 115 continue 116 yield (match.group(), mention_user) 117 118 119 def format_links(content): 120 ''' detect and format links ''' 121 return re.sub( 122 r'([^(href=")]|^|\()(https?:\/\/(%s([\w\.\-_\/+&\?=:;,])*))' % \ 123 regex.domain, 124 r'\g<1><a href="\g<2>">\g<3></a>', 125 content) 126 127 def to_markdown(content): 128 ''' catch links and convert to markdown ''' 129 content = format_links(content) 130 content = markdown(content) 131 # sanitize resulting html 132 sanitizer = InputHtmlParser() 133 sanitizer.feed(content) 134 return sanitizer.get_output() 135 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/bookwyrm/views/status.py b/bookwyrm/views/status.py --- a/bookwyrm/views/status.py +++ b/bookwyrm/views/status.py @@ -48,7 +48,6 @@ r'<a href="%s">%s</a>\g<1>' % \ (mention_user.remote_id, mention_text), content) - # add reply parent to mentions and notify if status.reply_parent: status.mention_users.add(status.reply_parent.user) @@ -126,8 +125,8 @@ def to_markdown(content): ''' catch links and convert to markdown ''' - content = format_links(content) content = markdown(content) + content = format_links(content) # sanitize resulting html sanitizer = InputHtmlParser() sanitizer.feed(content)
{"golden_diff": "diff --git a/bookwyrm/views/status.py b/bookwyrm/views/status.py\n--- a/bookwyrm/views/status.py\n+++ b/bookwyrm/views/status.py\n@@ -48,7 +48,6 @@\n r'<a href=\"%s\">%s</a>\\g<1>' % \\\n (mention_user.remote_id, mention_text),\n content)\n-\n # add reply parent to mentions and notify\n if status.reply_parent:\n status.mention_users.add(status.reply_parent.user)\n@@ -126,8 +125,8 @@\n \n def to_markdown(content):\n ''' catch links and convert to markdown '''\n- content = format_links(content)\n content = markdown(content)\n+ content = format_links(content)\n # sanitize resulting html\n sanitizer = InputHtmlParser()\n sanitizer.feed(content)\n", "issue": "link formatting not working properly\nI made a markdown link, but the \"<a href\" part was trimmed and garbled html remained\n", "before_files": [{"content": "''' what are we here for if not for posting '''\nimport re\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponseBadRequest\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\nfrom markdown import markdown\n\nfrom bookwyrm import forms, models\nfrom bookwyrm.sanitize_html import InputHtmlParser\nfrom bookwyrm.settings import DOMAIN\nfrom bookwyrm.status import create_notification, delete_status\nfrom bookwyrm.utils import regex\nfrom .helpers import handle_remote_webfinger\n\n\n# pylint: disable= no-self-use\n@method_decorator(login_required, name='dispatch')\nclass CreateStatus(View):\n ''' the view for *posting* '''\n def post(self, request, status_type):\n ''' create status of whatever type '''\n status_type = status_type[0].upper() + status_type[1:]\n\n try:\n form = getattr(forms, '%sForm' % status_type)(request.POST)\n except AttributeError:\n return HttpResponseBadRequest()\n if not form.is_valid():\n return redirect(request.headers.get('Referer', '/'))\n\n status = form.save(commit=False)\n if not status.sensitive and status.content_warning:\n # the cw text field remains populated when you click \"remove\"\n status.content_warning = None\n status.save(broadcast=False)\n\n # inspect the text for user tags\n content = status.content\n for (mention_text, mention_user) in find_mentions(content):\n # add them to status mentions fk\n status.mention_users.add(mention_user)\n\n # turn the mention into a link\n content = re.sub(\n r'%s([^@]|$)' % mention_text,\n r'<a href=\"%s\">%s</a>\\g<1>' % \\\n (mention_user.remote_id, mention_text),\n content)\n\n # add reply parent to mentions and notify\n if status.reply_parent:\n status.mention_users.add(status.reply_parent.user)\n\n if status.reply_parent.user.local:\n create_notification(\n status.reply_parent.user,\n 'REPLY',\n related_user=request.user,\n related_status=status\n )\n\n # deduplicate mentions\n status.mention_users.set(set(status.mention_users.all()))\n # create mention notifications\n for mention_user in status.mention_users.all():\n if status.reply_parent and mention_user == status.reply_parent.user:\n continue\n if mention_user.local:\n create_notification(\n mention_user,\n 'MENTION',\n related_user=request.user,\n related_status=status\n )\n\n # don't apply formatting to generated notes\n if not isinstance(status, models.GeneratedNote):\n status.content = to_markdown(content)\n # do apply formatting to quotes\n if hasattr(status, 'quote'):\n status.quote = to_markdown(status.quote)\n\n status.save(created=True)\n return redirect(request.headers.get('Referer', '/'))\n\n\nclass DeleteStatus(View):\n ''' tombstone that bad boy '''\n def post(self, request, status_id):\n ''' delete and tombstone a status '''\n status = get_object_or_404(models.Status, id=status_id)\n\n # don't let people delete other people's statuses\n if status.user != request.user:\n return HttpResponseBadRequest()\n\n # perform deletion\n delete_status(status)\n return redirect(request.headers.get('Referer', '/'))\n\ndef find_mentions(content):\n ''' detect @mentions in raw status content '''\n for match in re.finditer(regex.strict_username, content):\n username = match.group().strip().split('@')[1:]\n if len(username) == 1:\n # this looks like a local user (@user), fill in the domain\n username.append(DOMAIN)\n username = '@'.join(username)\n\n mention_user = handle_remote_webfinger(username)\n if not mention_user:\n # we can ignore users we don't know about\n continue\n yield (match.group(), mention_user)\n\n\ndef format_links(content):\n ''' detect and format links '''\n return re.sub(\n r'([^(href=\")]|^|\\()(https?:\\/\\/(%s([\\w\\.\\-_\\/+&\\?=:;,])*))' % \\\n regex.domain,\n r'\\g<1><a href=\"\\g<2>\">\\g<3></a>',\n content)\n\ndef to_markdown(content):\n ''' catch links and convert to markdown '''\n content = format_links(content)\n content = markdown(content)\n # sanitize resulting html\n sanitizer = InputHtmlParser()\n sanitizer.feed(content)\n return sanitizer.get_output()\n", "path": "bookwyrm/views/status.py"}], "after_files": [{"content": "''' what are we here for if not for posting '''\nimport re\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponseBadRequest\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\nfrom markdown import markdown\n\nfrom bookwyrm import forms, models\nfrom bookwyrm.sanitize_html import InputHtmlParser\nfrom bookwyrm.settings import DOMAIN\nfrom bookwyrm.status import create_notification, delete_status\nfrom bookwyrm.utils import regex\nfrom .helpers import handle_remote_webfinger\n\n\n# pylint: disable= no-self-use\n@method_decorator(login_required, name='dispatch')\nclass CreateStatus(View):\n ''' the view for *posting* '''\n def post(self, request, status_type):\n ''' create status of whatever type '''\n status_type = status_type[0].upper() + status_type[1:]\n\n try:\n form = getattr(forms, '%sForm' % status_type)(request.POST)\n except AttributeError:\n return HttpResponseBadRequest()\n if not form.is_valid():\n return redirect(request.headers.get('Referer', '/'))\n\n status = form.save(commit=False)\n if not status.sensitive and status.content_warning:\n # the cw text field remains populated when you click \"remove\"\n status.content_warning = None\n status.save(broadcast=False)\n\n # inspect the text for user tags\n content = status.content\n for (mention_text, mention_user) in find_mentions(content):\n # add them to status mentions fk\n status.mention_users.add(mention_user)\n\n # turn the mention into a link\n content = re.sub(\n r'%s([^@]|$)' % mention_text,\n r'<a href=\"%s\">%s</a>\\g<1>' % \\\n (mention_user.remote_id, mention_text),\n content)\n # add reply parent to mentions and notify\n if status.reply_parent:\n status.mention_users.add(status.reply_parent.user)\n\n if status.reply_parent.user.local:\n create_notification(\n status.reply_parent.user,\n 'REPLY',\n related_user=request.user,\n related_status=status\n )\n\n # deduplicate mentions\n status.mention_users.set(set(status.mention_users.all()))\n # create mention notifications\n for mention_user in status.mention_users.all():\n if status.reply_parent and mention_user == status.reply_parent.user:\n continue\n if mention_user.local:\n create_notification(\n mention_user,\n 'MENTION',\n related_user=request.user,\n related_status=status\n )\n\n # don't apply formatting to generated notes\n if not isinstance(status, models.GeneratedNote):\n status.content = to_markdown(content)\n # do apply formatting to quotes\n if hasattr(status, 'quote'):\n status.quote = to_markdown(status.quote)\n\n status.save(created=True)\n return redirect(request.headers.get('Referer', '/'))\n\n\nclass DeleteStatus(View):\n ''' tombstone that bad boy '''\n def post(self, request, status_id):\n ''' delete and tombstone a status '''\n status = get_object_or_404(models.Status, id=status_id)\n\n # don't let people delete other people's statuses\n if status.user != request.user:\n return HttpResponseBadRequest()\n\n # perform deletion\n delete_status(status)\n return redirect(request.headers.get('Referer', '/'))\n\ndef find_mentions(content):\n ''' detect @mentions in raw status content '''\n for match in re.finditer(regex.strict_username, content):\n username = match.group().strip().split('@')[1:]\n if len(username) == 1:\n # this looks like a local user (@user), fill in the domain\n username.append(DOMAIN)\n username = '@'.join(username)\n\n mention_user = handle_remote_webfinger(username)\n if not mention_user:\n # we can ignore users we don't know about\n continue\n yield (match.group(), mention_user)\n\n\ndef format_links(content):\n ''' detect and format links '''\n return re.sub(\n r'([^(href=\")]|^|\\()(https?:\\/\\/(%s([\\w\\.\\-_\\/+&\\?=:;,])*))' % \\\n regex.domain,\n r'\\g<1><a href=\"\\g<2>\">\\g<3></a>',\n content)\n\ndef to_markdown(content):\n ''' catch links and convert to markdown '''\n content = markdown(content)\n content = format_links(content)\n # sanitize resulting html\n sanitizer = InputHtmlParser()\n sanitizer.feed(content)\n return sanitizer.get_output()\n", "path": "bookwyrm/views/status.py"}]}
1,565
177
gh_patches_debug_20485
rasdani/github-patches
git_diff
celery__celery-6457
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- workdir option no longer works after upgrade to 5.0.1 <!-- Please fill this template entirely and do not erase parts of it. We reserve the right to close without a response bug reports which are incomplete. --> # Checklist <!-- To check an item on the list replace [ ] with [x]. --> - [x] I have verified that the issue exists against the `master` branch of Celery. - [x] This has already been asked to the [discussion group](https://groups.google.com/forum/#!forum/celery-users) first. - [x] I have read the relevant section in the [contribution guide](http://docs.celeryproject.org/en/latest/contributing.html#other-bugs) on reporting bugs. - [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22) for similar or identical bug reports. - [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22) for existing proposed fixes. - [x] I have checked the [commit log](https://github.com/celery/celery/commits/master) to find out if the bug was already fixed in the master branch. - [x] I have included all related issues and possible duplicate issues in this issue (If there are none, check this box anyway). ## Mandatory Debugging Information - [] I have included the output of ``celery -A proj report`` in the issue. (if you are not able to do this, then at least specify the Celery version affected). - [x] I have verified that the issue exists against the `master` branch of Celery. - [x] I have included the contents of ``pip freeze`` in the issue. - [x] I have included all the versions of all the external dependencies required to reproduce this bug. ## Optional Debugging Information <!-- Try some of the below if you think they are relevant. It will help us figure out the scope of the bug and how many users it affects. --> - [ ] I have tried reproducing the issue on more than one Python version and/or implementation. - [ ] I have tried reproducing the issue on more than one message broker and/or result backend. - [ ] I have tried reproducing the issue on more than one version of the message broker and/or result backend. - [ ] I have tried reproducing the issue on more than one operating system. - [ ] I have tried reproducing the issue on more than one workers pool. - [ ] I have tried reproducing the issue with autoscaling, retries, ETA/Countdown & rate limits disabled. - [x] I have tried reproducing the issue after downgrading and/or upgrading Celery and its dependencies. ## Related Issues and Possible Duplicates <!-- Please make sure to search and mention any related issues or possible duplicates to this issue as requested by the checklist above. This may or may not include issues in other repositories that the Celery project maintains or other repositories that are dependencies of Celery. If you don't know how to mention issues, please refer to Github's documentation on the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests --> #### Related Issues - None #### Possible Duplicates - None ## Environment & Settings <!-- Include the contents of celery --version below --> **Celery version**: <!-- Include the output of celery -A proj report below --> <details> <summary><b><code>celery report</code> Output:</b></summary> <p> software -> celery:5.0.1 (singularity) kombu:5.0.2 py:3.8.6 billiard:3.6.3.0 redis:3.5.3 platform -> system:Linux arch:64bit, ELF kernel version:4.14.200-155.322.amzn2.x86_64 imp:CPython loader -> celery.loaders.app.AppLoader settings -> transport:redis results:**************** </p> </details> # Steps to Reproduce ## Required Dependencies <!-- Please fill the required dependencies to reproduce this issue --> * **Minimal Python Version**: N/A or Unknown * **Minimal Celery Version**: 5.0.1 * **Minimal Kombu Version**: N/A or Unknown * **Minimal Broker Version**: N/A or Unknown * **Minimal Result Backend Version**: N/A or Unknown * **Minimal OS and/or Kernel Version**: N/A or Unknown * **Minimal Broker Client Version**: N/A or Unknown * **Minimal Result Backend Client Version**: N/A or Unknown ### Python Packages <!-- Please fill the contents of pip freeze below --> <details> <summary><b><code>pip freeze</code> Output:</b></summary> <p> ``` ``` </p> </details> ### Other Dependencies <!-- Please provide system dependencies, configuration files and other dependency information if applicable --> <details> <p> N/A </p> </details> ## Minimally Reproducible Test Case <!-- Please provide a reproducible test case. Refer to the Reporting Bugs section in our contribution guide. We prefer submitting test cases in the form of a PR to our integration test suite. If you can provide one, please mention the PR number below. If not, please attach the most minimal code example required to reproduce the issue below. If the test case is too large, please include a link to a gist or a repository below. --> <details> <p> ```python ``` </p> </details> # Expected Behavior <!-- Describe in detail what you expect to happen --> I think we are supposed to be able to run celery multi start in other directories with --workdir option passed. # Actual Behavior <!-- Describe in detail what actually happened. Please include a backtrace and surround it with triple backticks (```). In addition, include the Celery daemon logs, the broker logs, the result backend logs and system logs below if they will help us debug the issue. --> After upgrading to 5.0.1 from 4.x, the celery multi start command stopped working when not running from the workdir, even with workdir option passed in the command. The daemon service could not start as a result. /opt/Cranberry/venv/bin/celery multi start worker1 --workdir=/opt/Cranberry/ --app=Cranberry Error from the command above: Error: Invalid value for '-A' / '--app': Unable to load celery application. Module 'cranberry' has no attribute 'celery' > [email protected]: * Child terminated with exit code 2 FAILED I inserted print('Workdir: ', workdir) at: https://github.com/celery/celery/blob/7c3da03a07882ca86b801ad78dd509a67cba60af/celery/bin/celery.py#L123 And it prints: Workdir: None Any help on this is much appreciated! Updates 14:50: Tried to debug it a little more and it seems multi ends up calling subprocess with "celery --app=Cranberry worker --workdir=/opt/Cranberry/ ..." The command goes to celery.bin.celery.APP.convert. I tried to print ctx.params there, and notice that the --workdir param only shows up when it's before --app in command: > (venv) 2 ec2-user@ip:/opt$ celery --app=Cranberry --workdir=/opt/Cranberry/ worker > app convert: Cranberry {} > Usage: celery [OPTIONS] COMMAND [ARGS]... > > Error: Invalid value for '-A' / '--app': > Unable to load celery application. > The module Cranberry was not found. > > (venv) 2 ec2-user@:/opt$ celery --workdir=/opt/Cranberry/ --app=Cranberry worker > app convert: Cranberry {'workdir': '/opt/Cranberry/'} > Usage: celery [OPTIONS] COMMAND [ARGS]... > > Error: Invalid value for '-A' / '--app': And this params does not seem to be used in os.chdir or get passed to find_app. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `celery/bin/celery.py` Content: ``` 1 """Celery Command Line Interface.""" 2 import os 3 import traceback 4 5 import click 6 import click.exceptions 7 from click.types import ParamType 8 from click_didyoumean import DYMGroup 9 10 from celery import VERSION_BANNER 11 from celery.app.utils import find_app 12 from celery.bin.amqp import amqp 13 from celery.bin.base import CeleryCommand, CeleryOption, CLIContext 14 from celery.bin.beat import beat 15 from celery.bin.call import call 16 from celery.bin.control import control, inspect, status 17 from celery.bin.events import events 18 from celery.bin.graph import graph 19 from celery.bin.list import list_ 20 from celery.bin.logtool import logtool 21 from celery.bin.migrate import migrate 22 from celery.bin.multi import multi 23 from celery.bin.purge import purge 24 from celery.bin.result import result 25 from celery.bin.shell import shell 26 from celery.bin.upgrade import upgrade 27 from celery.bin.worker import worker 28 29 UNABLE_TO_LOAD_APP_MODULE_NOT_FOUND = click.style(""" 30 Unable to load celery application. 31 The module {0} was not found.""", fg='red') 32 33 UNABLE_TO_LOAD_APP_ERROR_OCCURRED = click.style(""" 34 Unable to load celery application. 35 While trying to load the module {0} the following error occurred: 36 {1}""", fg='red') 37 38 UNABLE_TO_LOAD_APP_APP_MISSING = click.style(""" 39 Unable to load celery application. 40 {0}""") 41 42 43 class App(ParamType): 44 """Application option.""" 45 46 name = "application" 47 48 def convert(self, value, param, ctx): 49 try: 50 return find_app(value) 51 except ModuleNotFoundError as e: 52 if e.name != value: 53 exc = traceback.format_exc() 54 self.fail( 55 UNABLE_TO_LOAD_APP_ERROR_OCCURRED.format(value, exc) 56 ) 57 self.fail(UNABLE_TO_LOAD_APP_MODULE_NOT_FOUND.format(e.name)) 58 except AttributeError as e: 59 attribute_name = e.args[0].capitalize() 60 self.fail(UNABLE_TO_LOAD_APP_APP_MISSING.format(attribute_name)) 61 except Exception: 62 exc = traceback.format_exc() 63 self.fail( 64 UNABLE_TO_LOAD_APP_ERROR_OCCURRED.format(value, exc) 65 ) 66 67 68 APP = App() 69 70 71 @click.group(cls=DYMGroup, invoke_without_command=True) 72 @click.option('-A', 73 '--app', 74 envvar='APP', 75 cls=CeleryOption, 76 type=APP, 77 help_group="Global Options") 78 @click.option('-b', 79 '--broker', 80 envvar='BROKER_URL', 81 cls=CeleryOption, 82 help_group="Global Options") 83 @click.option('--result-backend', 84 envvar='RESULT_BACKEND', 85 cls=CeleryOption, 86 help_group="Global Options") 87 @click.option('--loader', 88 envvar='LOADER', 89 cls=CeleryOption, 90 help_group="Global Options") 91 @click.option('--config', 92 envvar='CONFIG_MODULE', 93 cls=CeleryOption, 94 help_group="Global Options") 95 @click.option('--workdir', 96 cls=CeleryOption, 97 help_group="Global Options") 98 @click.option('-C', 99 '--no-color', 100 envvar='NO_COLOR', 101 is_flag=True, 102 cls=CeleryOption, 103 help_group="Global Options") 104 @click.option('-q', 105 '--quiet', 106 is_flag=True, 107 cls=CeleryOption, 108 help_group="Global Options") 109 @click.option('--version', 110 cls=CeleryOption, 111 is_flag=True, 112 help_group="Global Options") 113 @click.pass_context 114 def celery(ctx, app, broker, result_backend, loader, config, workdir, 115 no_color, quiet, version): 116 """Celery command entrypoint.""" 117 if version: 118 click.echo(VERSION_BANNER) 119 ctx.exit() 120 elif ctx.invoked_subcommand is None: 121 click.echo(ctx.get_help()) 122 ctx.exit() 123 124 if workdir: 125 os.chdir(workdir) 126 if loader: 127 # Default app takes loader from this env (Issue #1066). 128 os.environ['CELERY_LOADER'] = loader 129 if broker: 130 os.environ['CELERY_BROKER_URL'] = broker 131 if result_backend: 132 os.environ['CELERY_RESULT_BACKEND'] = result_backend 133 if config: 134 os.environ['CELERY_CONFIG_MODULE'] = config 135 ctx.obj = CLIContext(app=app, no_color=no_color, workdir=workdir, 136 quiet=quiet) 137 138 # User options 139 worker.params.extend(ctx.obj.app.user_options.get('worker', [])) 140 beat.params.extend(ctx.obj.app.user_options.get('beat', [])) 141 events.params.extend(ctx.obj.app.user_options.get('events', [])) 142 143 144 @celery.command(cls=CeleryCommand) 145 @click.pass_context 146 def report(ctx): 147 """Shows information useful to include in bug-reports.""" 148 app = ctx.obj.app 149 app.loader.import_default_modules() 150 ctx.obj.echo(app.bugreport()) 151 152 153 celery.add_command(purge) 154 celery.add_command(call) 155 celery.add_command(beat) 156 celery.add_command(list_) 157 celery.add_command(result) 158 celery.add_command(migrate) 159 celery.add_command(status) 160 celery.add_command(worker) 161 celery.add_command(events) 162 celery.add_command(inspect) 163 celery.add_command(control) 164 celery.add_command(graph) 165 celery.add_command(upgrade) 166 celery.add_command(logtool) 167 celery.add_command(amqp) 168 celery.add_command(shell) 169 celery.add_command(multi) 170 171 # Monkey-patch click to display a custom error 172 # when -A or --app are used as sub-command options instead of as options 173 # of the global command. 174 175 previous_show_implementation = click.exceptions.NoSuchOption.show 176 177 WRONG_APP_OPTION_USAGE_MESSAGE = """You are using `{option_name}` as an option of the {info_name} sub-command: 178 celery {info_name} {option_name} celeryapp <...> 179 180 The support for this usage was removed in Celery 5.0. Instead you should use `{option_name}` as a global option: 181 celery {option_name} celeryapp {info_name} <...>""" 182 183 184 def _show(self, file=None): 185 if self.option_name in ('-A', '--app'): 186 self.ctx.obj.error( 187 WRONG_APP_OPTION_USAGE_MESSAGE.format( 188 option_name=self.option_name, 189 info_name=self.ctx.info_name), 190 fg='red' 191 ) 192 previous_show_implementation(self, file=file) 193 194 195 click.exceptions.NoSuchOption.show = _show 196 197 198 def main() -> int: 199 """Start celery umbrella command. 200 201 This function is the main entrypoint for the CLI. 202 203 :return: The exit code of the CLI. 204 """ 205 return celery(auto_envvar_prefix="CELERY") 206 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/celery/bin/celery.py b/celery/bin/celery.py --- a/celery/bin/celery.py +++ b/celery/bin/celery.py @@ -1,5 +1,6 @@ """Celery Command Line Interface.""" import os +import pathlib import traceback import click @@ -94,6 +95,9 @@ help_group="Global Options") @click.option('--workdir', cls=CeleryOption, + type=pathlib.Path, + callback=lambda _, __, wd: os.chdir(wd) if wd else None, + is_eager=True, help_group="Global Options") @click.option('-C', '--no-color', @@ -121,8 +125,6 @@ click.echo(ctx.get_help()) ctx.exit() - if workdir: - os.chdir(workdir) if loader: # Default app takes loader from this env (Issue #1066). os.environ['CELERY_LOADER'] = loader
{"golden_diff": "diff --git a/celery/bin/celery.py b/celery/bin/celery.py\n--- a/celery/bin/celery.py\n+++ b/celery/bin/celery.py\n@@ -1,5 +1,6 @@\n \"\"\"Celery Command Line Interface.\"\"\"\n import os\n+import pathlib\n import traceback\n \n import click\n@@ -94,6 +95,9 @@\n help_group=\"Global Options\")\n @click.option('--workdir',\n cls=CeleryOption,\n+ type=pathlib.Path,\n+ callback=lambda _, __, wd: os.chdir(wd) if wd else None,\n+ is_eager=True,\n help_group=\"Global Options\")\n @click.option('-C',\n '--no-color',\n@@ -121,8 +125,6 @@\n click.echo(ctx.get_help())\n ctx.exit()\n \n- if workdir:\n- os.chdir(workdir)\n if loader:\n # Default app takes loader from this env (Issue #1066).\n os.environ['CELERY_LOADER'] = loader\n", "issue": "workdir option no longer works after upgrade to 5.0.1\n<!--\r\nPlease fill this template entirely and do not erase parts of it.\r\nWe reserve the right to close without a response\r\nbug reports which are incomplete.\r\n-->\r\n# Checklist\r\n<!--\r\nTo check an item on the list replace [ ] with [x].\r\n-->\r\n- [x] I have verified that the issue exists against the `master` branch of Celery.\r\n- [x] This has already been asked to the [discussion group](https://groups.google.com/forum/#!forum/celery-users) first.\r\n- [x] I have read the relevant section in the\r\n [contribution guide](http://docs.celeryproject.org/en/latest/contributing.html#other-bugs)\r\n on reporting bugs.\r\n- [x] I have checked the [issues list](https://github.com/celery/celery/issues?q=is%3Aissue+label%3A%22Issue+Type%3A+Bug+Report%22+-label%3A%22Category%3A+Documentation%22)\r\n for similar or identical bug reports.\r\n- [x] I have checked the [pull requests list](https://github.com/celery/celery/pulls?q=is%3Apr+label%3A%22PR+Type%3A+Bugfix%22+-label%3A%22Category%3A+Documentation%22)\r\n for existing proposed fixes.\r\n- [x] I have checked the [commit log](https://github.com/celery/celery/commits/master)\r\n to find out if the bug was already fixed in the master branch.\r\n- [x] I have included all related issues and possible duplicate issues\r\n in this issue (If there are none, check this box anyway).\r\n\r\n## Mandatory Debugging Information\r\n\r\n- [] I have included the output of ``celery -A proj report`` in the issue.\r\n (if you are not able to do this, then at least specify the Celery\r\n version affected).\r\n- [x] I have verified that the issue exists against the `master` branch of Celery.\r\n- [x] I have included the contents of ``pip freeze`` in the issue.\r\n- [x] I have included all the versions of all the external dependencies required\r\n to reproduce this bug.\r\n\r\n## Optional Debugging Information\r\n<!--\r\nTry some of the below if you think they are relevant.\r\nIt will help us figure out the scope of the bug and how many users it affects.\r\n-->\r\n- [ ] I have tried reproducing the issue on more than one Python version\r\n and/or implementation.\r\n- [ ] I have tried reproducing the issue on more than one message broker and/or\r\n result backend.\r\n- [ ] I have tried reproducing the issue on more than one version of the message\r\n broker and/or result backend.\r\n- [ ] I have tried reproducing the issue on more than one operating system.\r\n- [ ] I have tried reproducing the issue on more than one workers pool.\r\n- [ ] I have tried reproducing the issue with autoscaling, retries,\r\n ETA/Countdown & rate limits disabled.\r\n- [x] I have tried reproducing the issue after downgrading\r\n and/or upgrading Celery and its dependencies.\r\n\r\n## Related Issues and Possible Duplicates\r\n<!--\r\nPlease make sure to search and mention any related issues\r\nor possible duplicates to this issue as requested by the checklist above.\r\n\r\nThis may or may not include issues in other repositories that the Celery project\r\nmaintains or other repositories that are dependencies of Celery.\r\n\r\nIf you don't know how to mention issues, please refer to Github's documentation\r\non the subject: https://help.github.com/en/articles/autolinked-references-and-urls#issues-and-pull-requests\r\n-->\r\n\r\n#### Related Issues\r\n\r\n- None\r\n\r\n#### Possible Duplicates\r\n\r\n- None\r\n\r\n## Environment & Settings\r\n<!-- Include the contents of celery --version below -->\r\n**Celery version**:\r\n<!-- Include the output of celery -A proj report below -->\r\n<details>\r\n<summary><b><code>celery report</code> Output:</b></summary>\r\n<p>\r\n\r\nsoftware -> celery:5.0.1 (singularity) kombu:5.0.2 py:3.8.6\r\n billiard:3.6.3.0 redis:3.5.3\r\nplatform -> system:Linux arch:64bit, ELF\r\n kernel version:4.14.200-155.322.amzn2.x86_64 imp:CPython\r\nloader -> celery.loaders.app.AppLoader\r\nsettings -> transport:redis results:****************\r\n\r\n</p>\r\n</details>\r\n\r\n# Steps to Reproduce\r\n\r\n## Required Dependencies\r\n<!-- Please fill the required dependencies to reproduce this issue -->\r\n* **Minimal Python Version**: N/A or Unknown\r\n* **Minimal Celery Version**: 5.0.1\r\n* **Minimal Kombu Version**: N/A or Unknown\r\n* **Minimal Broker Version**: N/A or Unknown\r\n* **Minimal Result Backend Version**: N/A or Unknown\r\n* **Minimal OS and/or Kernel Version**: N/A or Unknown\r\n* **Minimal Broker Client Version**: N/A or Unknown\r\n* **Minimal Result Backend Client Version**: N/A or Unknown\r\n\r\n### Python Packages\r\n<!-- Please fill the contents of pip freeze below -->\r\n<details>\r\n<summary><b><code>pip freeze</code> Output:</b></summary>\r\n<p>\r\n\r\n```\r\n```\r\n\r\n</p>\r\n</details>\r\n\r\n### Other Dependencies\r\n<!--\r\nPlease provide system dependencies, configuration files\r\nand other dependency information if applicable\r\n-->\r\n<details>\r\n<p>\r\nN/A\r\n</p>\r\n</details>\r\n\r\n## Minimally Reproducible Test Case\r\n<!--\r\nPlease provide a reproducible test case.\r\nRefer to the Reporting Bugs section in our contribution guide.\r\n\r\nWe prefer submitting test cases in the form of a PR to our integration test suite.\r\nIf you can provide one, please mention the PR number below.\r\nIf not, please attach the most minimal code example required to reproduce the issue below.\r\nIf the test case is too large, please include a link to a gist or a repository below.\r\n-->\r\n\r\n<details>\r\n<p>\r\n\r\n```python\r\n```\r\n\r\n</p>\r\n</details>\r\n\r\n# Expected Behavior\r\n<!-- Describe in detail what you expect to happen -->\r\nI think we are supposed to be able to run celery multi start in other directories with --workdir option passed.\r\n\r\n# Actual Behavior\r\n<!--\r\nDescribe in detail what actually happened.\r\nPlease include a backtrace and surround it with triple backticks (```).\r\nIn addition, include the Celery daemon logs, the broker logs,\r\nthe result backend logs and system logs below if they will help us debug\r\nthe issue.\r\n-->\r\nAfter upgrading to 5.0.1 from 4.x, the celery multi start command stopped working when not running from the workdir, even with workdir option passed in the command. The daemon service could not start as a result.\r\n\r\n/opt/Cranberry/venv/bin/celery multi start worker1 --workdir=/opt/Cranberry/ --app=Cranberry\r\n\r\nError from the command above:\r\nError: Invalid value for '-A' / '--app':\r\nUnable to load celery application.\r\nModule 'cranberry' has no attribute 'celery'\r\n > [email protected]: * Child terminated with exit code 2\r\nFAILED\r\n\r\nI inserted print('Workdir: ', workdir) at: https://github.com/celery/celery/blob/7c3da03a07882ca86b801ad78dd509a67cba60af/celery/bin/celery.py#L123 \r\nAnd it prints:\r\nWorkdir: None\r\n\r\nAny help on this is much appreciated!\r\n\r\nUpdates 14:50:\r\nTried to debug it a little more and it seems multi ends up calling subprocess with \"celery --app=Cranberry worker --workdir=/opt/Cranberry/ ...\"\r\nThe command goes to celery.bin.celery.APP.convert.\r\nI tried to print ctx.params there, and notice that the --workdir param only shows up when it's before --app in command:\r\n\r\n> (venv) 2 ec2-user@ip:/opt$ celery --app=Cranberry --workdir=/opt/Cranberry/ worker\r\n> app convert: Cranberry {}\r\n> Usage: celery [OPTIONS] COMMAND [ARGS]...\r\n> \r\n> Error: Invalid value for '-A' / '--app':\r\n> Unable to load celery application.\r\n> The module Cranberry was not found.\r\n> \r\n> (venv) 2 ec2-user@:/opt$ celery --workdir=/opt/Cranberry/ --app=Cranberry worker\r\n> app convert: Cranberry {'workdir': '/opt/Cranberry/'}\r\n> Usage: celery [OPTIONS] COMMAND [ARGS]...\r\n> \r\n> Error: Invalid value for '-A' / '--app':\r\n\r\nAnd this params does not seem to be used in os.chdir or get passed to find_app.\n", "before_files": [{"content": "\"\"\"Celery Command Line Interface.\"\"\"\nimport os\nimport traceback\n\nimport click\nimport click.exceptions\nfrom click.types import ParamType\nfrom click_didyoumean import DYMGroup\n\nfrom celery import VERSION_BANNER\nfrom celery.app.utils import find_app\nfrom celery.bin.amqp import amqp\nfrom celery.bin.base import CeleryCommand, CeleryOption, CLIContext\nfrom celery.bin.beat import beat\nfrom celery.bin.call import call\nfrom celery.bin.control import control, inspect, status\nfrom celery.bin.events import events\nfrom celery.bin.graph import graph\nfrom celery.bin.list import list_\nfrom celery.bin.logtool import logtool\nfrom celery.bin.migrate import migrate\nfrom celery.bin.multi import multi\nfrom celery.bin.purge import purge\nfrom celery.bin.result import result\nfrom celery.bin.shell import shell\nfrom celery.bin.upgrade import upgrade\nfrom celery.bin.worker import worker\n\nUNABLE_TO_LOAD_APP_MODULE_NOT_FOUND = click.style(\"\"\"\nUnable to load celery application.\nThe module {0} was not found.\"\"\", fg='red')\n\nUNABLE_TO_LOAD_APP_ERROR_OCCURRED = click.style(\"\"\"\nUnable to load celery application.\nWhile trying to load the module {0} the following error occurred:\n{1}\"\"\", fg='red')\n\nUNABLE_TO_LOAD_APP_APP_MISSING = click.style(\"\"\"\nUnable to load celery application.\n{0}\"\"\")\n\n\nclass App(ParamType):\n \"\"\"Application option.\"\"\"\n\n name = \"application\"\n\n def convert(self, value, param, ctx):\n try:\n return find_app(value)\n except ModuleNotFoundError as e:\n if e.name != value:\n exc = traceback.format_exc()\n self.fail(\n UNABLE_TO_LOAD_APP_ERROR_OCCURRED.format(value, exc)\n )\n self.fail(UNABLE_TO_LOAD_APP_MODULE_NOT_FOUND.format(e.name))\n except AttributeError as e:\n attribute_name = e.args[0].capitalize()\n self.fail(UNABLE_TO_LOAD_APP_APP_MISSING.format(attribute_name))\n except Exception:\n exc = traceback.format_exc()\n self.fail(\n UNABLE_TO_LOAD_APP_ERROR_OCCURRED.format(value, exc)\n )\n\n\nAPP = App()\n\n\[email protected](cls=DYMGroup, invoke_without_command=True)\[email protected]('-A',\n '--app',\n envvar='APP',\n cls=CeleryOption,\n type=APP,\n help_group=\"Global Options\")\[email protected]('-b',\n '--broker',\n envvar='BROKER_URL',\n cls=CeleryOption,\n help_group=\"Global Options\")\[email protected]('--result-backend',\n envvar='RESULT_BACKEND',\n cls=CeleryOption,\n help_group=\"Global Options\")\[email protected]('--loader',\n envvar='LOADER',\n cls=CeleryOption,\n help_group=\"Global Options\")\[email protected]('--config',\n envvar='CONFIG_MODULE',\n cls=CeleryOption,\n help_group=\"Global Options\")\[email protected]('--workdir',\n cls=CeleryOption,\n help_group=\"Global Options\")\[email protected]('-C',\n '--no-color',\n envvar='NO_COLOR',\n is_flag=True,\n cls=CeleryOption,\n help_group=\"Global Options\")\[email protected]('-q',\n '--quiet',\n is_flag=True,\n cls=CeleryOption,\n help_group=\"Global Options\")\[email protected]('--version',\n cls=CeleryOption,\n is_flag=True,\n help_group=\"Global Options\")\[email protected]_context\ndef celery(ctx, app, broker, result_backend, loader, config, workdir,\n no_color, quiet, version):\n \"\"\"Celery command entrypoint.\"\"\"\n if version:\n click.echo(VERSION_BANNER)\n ctx.exit()\n elif ctx.invoked_subcommand is None:\n click.echo(ctx.get_help())\n ctx.exit()\n\n if workdir:\n os.chdir(workdir)\n if loader:\n # Default app takes loader from this env (Issue #1066).\n os.environ['CELERY_LOADER'] = loader\n if broker:\n os.environ['CELERY_BROKER_URL'] = broker\n if result_backend:\n os.environ['CELERY_RESULT_BACKEND'] = result_backend\n if config:\n os.environ['CELERY_CONFIG_MODULE'] = config\n ctx.obj = CLIContext(app=app, no_color=no_color, workdir=workdir,\n quiet=quiet)\n\n # User options\n worker.params.extend(ctx.obj.app.user_options.get('worker', []))\n beat.params.extend(ctx.obj.app.user_options.get('beat', []))\n events.params.extend(ctx.obj.app.user_options.get('events', []))\n\n\[email protected](cls=CeleryCommand)\[email protected]_context\ndef report(ctx):\n \"\"\"Shows information useful to include in bug-reports.\"\"\"\n app = ctx.obj.app\n app.loader.import_default_modules()\n ctx.obj.echo(app.bugreport())\n\n\ncelery.add_command(purge)\ncelery.add_command(call)\ncelery.add_command(beat)\ncelery.add_command(list_)\ncelery.add_command(result)\ncelery.add_command(migrate)\ncelery.add_command(status)\ncelery.add_command(worker)\ncelery.add_command(events)\ncelery.add_command(inspect)\ncelery.add_command(control)\ncelery.add_command(graph)\ncelery.add_command(upgrade)\ncelery.add_command(logtool)\ncelery.add_command(amqp)\ncelery.add_command(shell)\ncelery.add_command(multi)\n\n# Monkey-patch click to display a custom error\n# when -A or --app are used as sub-command options instead of as options\n# of the global command.\n\nprevious_show_implementation = click.exceptions.NoSuchOption.show\n\nWRONG_APP_OPTION_USAGE_MESSAGE = \"\"\"You are using `{option_name}` as an option of the {info_name} sub-command:\ncelery {info_name} {option_name} celeryapp <...>\n\nThe support for this usage was removed in Celery 5.0. Instead you should use `{option_name}` as a global option:\ncelery {option_name} celeryapp {info_name} <...>\"\"\"\n\n\ndef _show(self, file=None):\n if self.option_name in ('-A', '--app'):\n self.ctx.obj.error(\n WRONG_APP_OPTION_USAGE_MESSAGE.format(\n option_name=self.option_name,\n info_name=self.ctx.info_name),\n fg='red'\n )\n previous_show_implementation(self, file=file)\n\n\nclick.exceptions.NoSuchOption.show = _show\n\n\ndef main() -> int:\n \"\"\"Start celery umbrella command.\n\n This function is the main entrypoint for the CLI.\n\n :return: The exit code of the CLI.\n \"\"\"\n return celery(auto_envvar_prefix=\"CELERY\")\n", "path": "celery/bin/celery.py"}], "after_files": [{"content": "\"\"\"Celery Command Line Interface.\"\"\"\nimport os\nimport pathlib\nimport traceback\n\nimport click\nimport click.exceptions\nfrom click.types import ParamType\nfrom click_didyoumean import DYMGroup\n\nfrom celery import VERSION_BANNER\nfrom celery.app.utils import find_app\nfrom celery.bin.amqp import amqp\nfrom celery.bin.base import CeleryCommand, CeleryOption, CLIContext\nfrom celery.bin.beat import beat\nfrom celery.bin.call import call\nfrom celery.bin.control import control, inspect, status\nfrom celery.bin.events import events\nfrom celery.bin.graph import graph\nfrom celery.bin.list import list_\nfrom celery.bin.logtool import logtool\nfrom celery.bin.migrate import migrate\nfrom celery.bin.multi import multi\nfrom celery.bin.purge import purge\nfrom celery.bin.result import result\nfrom celery.bin.shell import shell\nfrom celery.bin.upgrade import upgrade\nfrom celery.bin.worker import worker\n\nUNABLE_TO_LOAD_APP_MODULE_NOT_FOUND = click.style(\"\"\"\nUnable to load celery application.\nThe module {0} was not found.\"\"\", fg='red')\n\nUNABLE_TO_LOAD_APP_ERROR_OCCURRED = click.style(\"\"\"\nUnable to load celery application.\nWhile trying to load the module {0} the following error occurred:\n{1}\"\"\", fg='red')\n\nUNABLE_TO_LOAD_APP_APP_MISSING = click.style(\"\"\"\nUnable to load celery application.\n{0}\"\"\")\n\n\nclass App(ParamType):\n \"\"\"Application option.\"\"\"\n\n name = \"application\"\n\n def convert(self, value, param, ctx):\n try:\n return find_app(value)\n except ModuleNotFoundError as e:\n if e.name != value:\n exc = traceback.format_exc()\n self.fail(\n UNABLE_TO_LOAD_APP_ERROR_OCCURRED.format(value, exc)\n )\n self.fail(UNABLE_TO_LOAD_APP_MODULE_NOT_FOUND.format(e.name))\n except AttributeError as e:\n attribute_name = e.args[0].capitalize()\n self.fail(UNABLE_TO_LOAD_APP_APP_MISSING.format(attribute_name))\n except Exception:\n exc = traceback.format_exc()\n self.fail(\n UNABLE_TO_LOAD_APP_ERROR_OCCURRED.format(value, exc)\n )\n\n\nAPP = App()\n\n\[email protected](cls=DYMGroup, invoke_without_command=True)\[email protected]('-A',\n '--app',\n envvar='APP',\n cls=CeleryOption,\n type=APP,\n help_group=\"Global Options\")\[email protected]('-b',\n '--broker',\n envvar='BROKER_URL',\n cls=CeleryOption,\n help_group=\"Global Options\")\[email protected]('--result-backend',\n envvar='RESULT_BACKEND',\n cls=CeleryOption,\n help_group=\"Global Options\")\[email protected]('--loader',\n envvar='LOADER',\n cls=CeleryOption,\n help_group=\"Global Options\")\[email protected]('--config',\n envvar='CONFIG_MODULE',\n cls=CeleryOption,\n help_group=\"Global Options\")\[email protected]('--workdir',\n cls=CeleryOption,\n type=pathlib.Path,\n callback=lambda _, __, wd: os.chdir(wd) if wd else None,\n is_eager=True,\n help_group=\"Global Options\")\[email protected]('-C',\n '--no-color',\n envvar='NO_COLOR',\n is_flag=True,\n cls=CeleryOption,\n help_group=\"Global Options\")\[email protected]('-q',\n '--quiet',\n is_flag=True,\n cls=CeleryOption,\n help_group=\"Global Options\")\[email protected]('--version',\n cls=CeleryOption,\n is_flag=True,\n help_group=\"Global Options\")\[email protected]_context\ndef celery(ctx, app, broker, result_backend, loader, config, workdir,\n no_color, quiet, version):\n \"\"\"Celery command entrypoint.\"\"\"\n if version:\n click.echo(VERSION_BANNER)\n ctx.exit()\n elif ctx.invoked_subcommand is None:\n click.echo(ctx.get_help())\n ctx.exit()\n\n if loader:\n # Default app takes loader from this env (Issue #1066).\n os.environ['CELERY_LOADER'] = loader\n if broker:\n os.environ['CELERY_BROKER_URL'] = broker\n if result_backend:\n os.environ['CELERY_RESULT_BACKEND'] = result_backend\n if config:\n os.environ['CELERY_CONFIG_MODULE'] = config\n ctx.obj = CLIContext(app=app, no_color=no_color, workdir=workdir,\n quiet=quiet)\n\n # User options\n worker.params.extend(ctx.obj.app.user_options.get('worker', []))\n beat.params.extend(ctx.obj.app.user_options.get('beat', []))\n events.params.extend(ctx.obj.app.user_options.get('events', []))\n\n\[email protected](cls=CeleryCommand)\[email protected]_context\ndef report(ctx):\n \"\"\"Shows information useful to include in bug-reports.\"\"\"\n app = ctx.obj.app\n app.loader.import_default_modules()\n ctx.obj.echo(app.bugreport())\n\n\ncelery.add_command(purge)\ncelery.add_command(call)\ncelery.add_command(beat)\ncelery.add_command(list_)\ncelery.add_command(result)\ncelery.add_command(migrate)\ncelery.add_command(status)\ncelery.add_command(worker)\ncelery.add_command(events)\ncelery.add_command(inspect)\ncelery.add_command(control)\ncelery.add_command(graph)\ncelery.add_command(upgrade)\ncelery.add_command(logtool)\ncelery.add_command(amqp)\ncelery.add_command(shell)\ncelery.add_command(multi)\n\n# Monkey-patch click to display a custom error\n# when -A or --app are used as sub-command options instead of as options\n# of the global command.\n\nprevious_show_implementation = click.exceptions.NoSuchOption.show\n\nWRONG_APP_OPTION_USAGE_MESSAGE = \"\"\"You are using `{option_name}` as an option of the {info_name} sub-command:\ncelery {info_name} {option_name} celeryapp <...>\n\nThe support for this usage was removed in Celery 5.0. Instead you should use `{option_name}` as a global option:\ncelery {option_name} celeryapp {info_name} <...>\"\"\"\n\n\ndef _show(self, file=None):\n if self.option_name in ('-A', '--app'):\n self.ctx.obj.error(\n WRONG_APP_OPTION_USAGE_MESSAGE.format(\n option_name=self.option_name,\n info_name=self.ctx.info_name),\n fg='red'\n )\n previous_show_implementation(self, file=file)\n\n\nclick.exceptions.NoSuchOption.show = _show\n\n\ndef main() -> int:\n \"\"\"Start celery umbrella command.\n\n This function is the main entrypoint for the CLI.\n\n :return: The exit code of the CLI.\n \"\"\"\n return celery(auto_envvar_prefix=\"CELERY\")\n", "path": "celery/bin/celery.py"}]}
4,083
230
gh_patches_debug_41702
rasdani/github-patches
git_diff
mars-project__mars-482
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [ENH] --ui-port option for web service can be removed As no actor pools are created in Mars Worker, the option -p can be adopted as http port, and --ui-port can be merged. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mars/web/__main__.py` Content: ``` 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 # Copyright 1999-2018 Alibaba Group Holding Ltd. 4 # 5 # Licensed under the Apache License, Version 2.0 (the "License"); 6 # you may not use this file except in compliance with the License. 7 # You may obtain a copy of the License at 8 # 9 # http://www.apache.org/licenses/LICENSE-2.0 10 # 11 # Unless required by applicable law or agreed to in writing, software 12 # distributed under the License is distributed on an "AS IS" BASIS, 13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 # See the License for the specific language governing permissions and 15 # limitations under the License. 16 17 import gevent.monkey 18 gevent.monkey.patch_all(thread=False) 19 20 import logging # noqa: E402 21 import random # noqa: E402 22 import time # noqa: E402 23 24 from ..base_app import BaseApplication # noqa: E402 25 from ..compat import six # noqa: E402 26 from ..errors import StartArgumentError # noqa: E402 27 28 logger = logging.getLogger(__name__) 29 30 31 class WebApplication(BaseApplication): 32 def __init__(self): 33 super(WebApplication, self).__init__() 34 self.mars_web = None 35 self.require_pool = False 36 37 def config_args(self, parser): 38 parser.add_argument('--ui-port', help='port of Mars UI') 39 40 def validate_arguments(self): 41 if not self.args.schedulers and not self.args.kv_store: 42 raise StartArgumentError('Either schedulers or url of kv store is required.') 43 44 def main_loop(self): 45 try: 46 self.start() 47 while True: 48 time.sleep(0.1) 49 finally: 50 self.stop() 51 52 def start(self): 53 from .server import MarsWeb 54 if MarsWeb is None: 55 self.mars_web = None 56 logger.warning('Mars UI cannot be loaded. Please check if necessary components are installed.') 57 else: 58 ui_port = int(self.args.ui_port) if self.args.ui_port else None 59 scheduler_ip = self.args.schedulers or None 60 if isinstance(scheduler_ip, six.string_types): 61 schedulers = scheduler_ip.split(',') 62 scheduler_ip = random.choice(schedulers) 63 self.mars_web = MarsWeb(port=ui_port, scheduler_ip=scheduler_ip) 64 self.mars_web.start() 65 66 def stop(self): 67 if self.mars_web: 68 self.mars_web.stop() 69 70 71 main = WebApplication() 72 73 if __name__ == '__main__': 74 main() 75 ``` Path: `mars/base_app.py` Content: ``` 1 # Copyright 1999-2018 Alibaba Group Holding Ltd. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import argparse 16 import logging 17 import os 18 import sys 19 20 from .actors import create_actor_pool 21 from .config import options 22 from .errors import StartArgumentError 23 from .lib.tblib import pickling_support 24 from .utils import get_next_port 25 26 pickling_support.install() 27 logger = logging.getLogger(__name__) 28 29 try: 30 from pytest_cov.embed import cleanup_on_sigterm 31 cleanup_on_sigterm() 32 except ImportError: # pragma: no cover 33 pass 34 35 36 class BaseApplication(object): 37 """ 38 :type pool mars.actors.pool.gevent_pool.ActorContext 39 """ 40 service_description = '' 41 service_logger = logger 42 43 def __init__(self): 44 self.args = None 45 self.endpoint = None 46 self.pool = None 47 self.n_process = None 48 49 self._running = False 50 51 def __call__(self, argv=None): 52 import json 53 54 if argv is None: 55 argv = sys.argv[1:] 56 new_argv = [] 57 for a in argv: 58 if not a.startswith('-D'): 59 new_argv.append(a) 60 continue 61 conf, val = a[2:].split('=', 1) 62 conf_parts = conf.split('.') 63 conf_obj = options 64 for g in conf_parts[:-1]: 65 conf_obj = getattr(conf_obj, g) 66 try: 67 setattr(conf_obj, conf_parts[-1], json.loads(val)) 68 except: 69 setattr(conf_obj, conf_parts[-1], val) 70 71 return self._main(new_argv) 72 73 def _main(self, argv=None): 74 parser = argparse.ArgumentParser(description=self.service_description) 75 parser.add_argument('-a', '--advertise', help='advertise ip') 76 parser.add_argument('-k', '--kv-store', help='address of kv store service, ' 77 'for instance, etcd://localhost:4001') 78 parser.add_argument('-e', '--endpoint', help='endpoint of the service') 79 parser.add_argument('-s', '--schedulers', help='endpoint of scheduler, when single scheduler ' 80 'and etcd is not available') 81 parser.add_argument('-H', '--host', help='host of the scheduler service, only available ' 82 'when `endpoint` is absent') 83 parser.add_argument('-p', '--port', help='port of the scheduler service, only available ' 84 'when `endpoint` is absent') 85 parser.add_argument('--level', help='log level') 86 parser.add_argument('--format', help='log format') 87 parser.add_argument('--log_conf', help='log config file') 88 parser.add_argument('--inspect', help='inspection endpoint') 89 parser.add_argument('--load-modules', nargs='*', help='modules to import') 90 self.config_args(parser) 91 args = parser.parse_args(argv) 92 self.args = args 93 94 endpoint = args.endpoint 95 host = args.host 96 port = args.port 97 options.kv_store = args.kv_store if args.kv_store else options.kv_store 98 99 load_modules = [] 100 for mod in args.load_modules or (): 101 load_modules.extend(mod.split(',')) 102 if not args.load_modules: 103 load_module_str = os.environ.get('MARS_LOAD_MODULES') 104 if load_module_str: 105 load_modules = load_module_str.split(',') 106 load_modules.append('mars.executor') 107 for m in load_modules: 108 __import__(m, globals(), locals(), []) 109 self.service_logger.info('Modules %s loaded', ','.join(load_modules)) 110 111 self.n_process = 1 112 113 self.config_service() 114 self.config_logging() 115 116 if not host: 117 host = args.advertise or '0.0.0.0' 118 if not endpoint and port: 119 endpoint = host + ':' + port 120 121 try: 122 self.validate_arguments() 123 except StartArgumentError as ex: 124 parser.error('Failed to start application: %s' % ex) 125 126 if getattr(self, 'require_pool', True): 127 self.endpoint, self.pool = self._try_create_pool(endpoint=endpoint, host=host, port=port) 128 self.service_logger.info('%s started at %s.', self.service_description, self.endpoint) 129 self.main_loop() 130 131 def config_logging(self): 132 import logging.config 133 log_conf = self.args.log_conf or 'logging.conf' 134 135 conf_file_paths = [ 136 '', os.path.abspath('.'), 137 os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 138 ] 139 log_configured = False 140 for path in conf_file_paths: 141 conf_path = log_conf 142 if path: 143 conf_path = os.path.join(conf_path) 144 if os.path.exists(conf_path): 145 logging.config.fileConfig(conf_path, disable_existing_loggers=False) 146 log_configured = True 147 148 if not log_configured: 149 if not self.args.level: 150 level = logging.INFO 151 else: 152 level = getattr(logging, self.args.level.upper()) 153 logging.getLogger('mars').setLevel(level) 154 logging.basicConfig(format=self.args.format) 155 156 def validate_arguments(self): 157 pass 158 159 def _try_create_pool(self, endpoint=None, host=None, port=None): 160 pool = None 161 if endpoint: 162 pool = self.create_pool(address=endpoint) 163 else: 164 use_port = None 165 retrial = 5 166 while use_port is None: 167 use_port = port or get_next_port() 168 try: 169 endpoint = '{0}:{1}'.format(host, use_port) 170 pool = self.create_pool(address=endpoint) 171 break 172 except: 173 retrial -= 1 174 if retrial == 0: 175 raise 176 177 if port is None: 178 use_port = None 179 else: 180 raise 181 return endpoint, pool 182 183 def create_pool(self, *args, **kwargs): 184 kwargs.update(dict(n_process=self.n_process, backend='gevent')) 185 return create_actor_pool(*args, **kwargs) 186 187 def main_loop(self): 188 try: 189 with self.pool: 190 try: 191 self.start() 192 self._running = True 193 while True: 194 self.pool.join(1) 195 stopped = [] 196 for idx, proc in enumerate(self.pool.processes): 197 if not proc.is_alive(): 198 stopped.append(idx) 199 if stopped: 200 self.handle_process_down(stopped) 201 finally: 202 self.stop() 203 finally: 204 self._running = False 205 206 def handle_process_down(self, proc_indices): 207 """ 208 Handle process down event, the default action is to quit 209 the whole application. Applications can inherit this method 210 to do customized process-level failover. 211 212 :param proc_indices: indices of processes (not pids) 213 """ 214 for idx in proc_indices: 215 proc = self.pool.processes[idx] 216 self.service_logger.fatal( 217 'Process %d exited unpredictably. exitcode=%d', proc.pid, proc.exitcode) 218 raise KeyboardInterrupt 219 220 def config_service(self): 221 pass 222 223 def config_args(self, parser): 224 raise NotImplementedError 225 226 def start(self): 227 raise NotImplementedError 228 229 def stop(self): 230 raise NotImplementedError 231 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mars/base_app.py b/mars/base_app.py --- a/mars/base_app.py +++ b/mars/base_app.py @@ -76,11 +76,11 @@ parser.add_argument('-k', '--kv-store', help='address of kv store service, ' 'for instance, etcd://localhost:4001') parser.add_argument('-e', '--endpoint', help='endpoint of the service') - parser.add_argument('-s', '--schedulers', help='endpoint of scheduler, when single scheduler ' + parser.add_argument('-s', '--schedulers', help='endpoint of schedulers, when single scheduler ' 'and etcd is not available') - parser.add_argument('-H', '--host', help='host of the scheduler service, only available ' + parser.add_argument('-H', '--host', help='host of the service, only available ' 'when `endpoint` is absent') - parser.add_argument('-p', '--port', help='port of the scheduler service, only available ' + parser.add_argument('-p', '--port', help='port of the service, only available ' 'when `endpoint` is absent') parser.add_argument('--level', help='log level') parser.add_argument('--format', help='log format') @@ -125,7 +125,7 @@ if getattr(self, 'require_pool', True): self.endpoint, self.pool = self._try_create_pool(endpoint=endpoint, host=host, port=port) - self.service_logger.info('%s started at %s.', self.service_description, self.endpoint) + self.service_logger.info('%s started at %s.', self.service_description, self.endpoint) self.main_loop() def config_logging(self): diff --git a/mars/web/__main__.py b/mars/web/__main__.py --- a/mars/web/__main__.py +++ b/mars/web/__main__.py @@ -17,9 +17,10 @@ import gevent.monkey gevent.monkey.patch_all(thread=False) -import logging # noqa: E402 -import random # noqa: E402 -import time # noqa: E402 +import argparse # noqa: E402 +import logging # noqa: E402 +import random # noqa: E402 +import time # noqa: E402 from ..base_app import BaseApplication # noqa: E402 from ..compat import six # noqa: E402 @@ -35,7 +36,7 @@ self.require_pool = False def config_args(self, parser): - parser.add_argument('--ui-port', help='port of Mars UI') + parser.add_argument('--ui-port', help=argparse.SUPPRESS) def validate_arguments(self): if not self.args.schedulers and not self.args.kv_store: @@ -55,7 +56,8 @@ self.mars_web = None logger.warning('Mars UI cannot be loaded. Please check if necessary components are installed.') else: - ui_port = int(self.args.ui_port) if self.args.ui_port else None + port_arg = self.args.ui_port or self.args.port + ui_port = int(port_arg) if port_arg else None scheduler_ip = self.args.schedulers or None if isinstance(scheduler_ip, six.string_types): schedulers = scheduler_ip.split(',')
{"golden_diff": "diff --git a/mars/base_app.py b/mars/base_app.py\n--- a/mars/base_app.py\n+++ b/mars/base_app.py\n@@ -76,11 +76,11 @@\n parser.add_argument('-k', '--kv-store', help='address of kv store service, '\n 'for instance, etcd://localhost:4001')\n parser.add_argument('-e', '--endpoint', help='endpoint of the service')\n- parser.add_argument('-s', '--schedulers', help='endpoint of scheduler, when single scheduler '\n+ parser.add_argument('-s', '--schedulers', help='endpoint of schedulers, when single scheduler '\n 'and etcd is not available')\n- parser.add_argument('-H', '--host', help='host of the scheduler service, only available '\n+ parser.add_argument('-H', '--host', help='host of the service, only available '\n 'when `endpoint` is absent')\n- parser.add_argument('-p', '--port', help='port of the scheduler service, only available '\n+ parser.add_argument('-p', '--port', help='port of the service, only available '\n 'when `endpoint` is absent')\n parser.add_argument('--level', help='log level')\n parser.add_argument('--format', help='log format')\n@@ -125,7 +125,7 @@\n \n if getattr(self, 'require_pool', True):\n self.endpoint, self.pool = self._try_create_pool(endpoint=endpoint, host=host, port=port)\n- self.service_logger.info('%s started at %s.', self.service_description, self.endpoint)\n+ self.service_logger.info('%s started at %s.', self.service_description, self.endpoint)\n self.main_loop()\n \n def config_logging(self):\ndiff --git a/mars/web/__main__.py b/mars/web/__main__.py\n--- a/mars/web/__main__.py\n+++ b/mars/web/__main__.py\n@@ -17,9 +17,10 @@\n import gevent.monkey\n gevent.monkey.patch_all(thread=False)\n \n-import logging # noqa: E402\n-import random # noqa: E402\n-import time # noqa: E402\n+import argparse # noqa: E402\n+import logging # noqa: E402\n+import random # noqa: E402\n+import time # noqa: E402\n \n from ..base_app import BaseApplication # noqa: E402\n from ..compat import six # noqa: E402\n@@ -35,7 +36,7 @@\n self.require_pool = False\n \n def config_args(self, parser):\n- parser.add_argument('--ui-port', help='port of Mars UI')\n+ parser.add_argument('--ui-port', help=argparse.SUPPRESS)\n \n def validate_arguments(self):\n if not self.args.schedulers and not self.args.kv_store:\n@@ -55,7 +56,8 @@\n self.mars_web = None\n logger.warning('Mars UI cannot be loaded. Please check if necessary components are installed.')\n else:\n- ui_port = int(self.args.ui_port) if self.args.ui_port else None\n+ port_arg = self.args.ui_port or self.args.port\n+ ui_port = int(port_arg) if port_arg else None\n scheduler_ip = self.args.schedulers or None\n if isinstance(scheduler_ip, six.string_types):\n schedulers = scheduler_ip.split(',')\n", "issue": "[ENH] --ui-port option for web service can be removed\nAs no actor pools are created in Mars Worker, the option -p can be adopted as http port, and --ui-port can be merged.\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2018 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport gevent.monkey\ngevent.monkey.patch_all(thread=False)\n\nimport logging # noqa: E402\nimport random # noqa: E402\nimport time # noqa: E402\n\nfrom ..base_app import BaseApplication # noqa: E402\nfrom ..compat import six # noqa: E402\nfrom ..errors import StartArgumentError # noqa: E402\n\nlogger = logging.getLogger(__name__)\n\n\nclass WebApplication(BaseApplication):\n def __init__(self):\n super(WebApplication, self).__init__()\n self.mars_web = None\n self.require_pool = False\n\n def config_args(self, parser):\n parser.add_argument('--ui-port', help='port of Mars UI')\n\n def validate_arguments(self):\n if not self.args.schedulers and not self.args.kv_store:\n raise StartArgumentError('Either schedulers or url of kv store is required.')\n\n def main_loop(self):\n try:\n self.start()\n while True:\n time.sleep(0.1)\n finally:\n self.stop()\n\n def start(self):\n from .server import MarsWeb\n if MarsWeb is None:\n self.mars_web = None\n logger.warning('Mars UI cannot be loaded. Please check if necessary components are installed.')\n else:\n ui_port = int(self.args.ui_port) if self.args.ui_port else None\n scheduler_ip = self.args.schedulers or None\n if isinstance(scheduler_ip, six.string_types):\n schedulers = scheduler_ip.split(',')\n scheduler_ip = random.choice(schedulers)\n self.mars_web = MarsWeb(port=ui_port, scheduler_ip=scheduler_ip)\n self.mars_web.start()\n\n def stop(self):\n if self.mars_web:\n self.mars_web.stop()\n\n\nmain = WebApplication()\n\nif __name__ == '__main__':\n main()\n", "path": "mars/web/__main__.py"}, {"content": "# Copyright 1999-2018 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport logging\nimport os\nimport sys\n\nfrom .actors import create_actor_pool\nfrom .config import options\nfrom .errors import StartArgumentError\nfrom .lib.tblib import pickling_support\nfrom .utils import get_next_port\n\npickling_support.install()\nlogger = logging.getLogger(__name__)\n\ntry:\n from pytest_cov.embed import cleanup_on_sigterm\n cleanup_on_sigterm()\nexcept ImportError: # pragma: no cover\n pass\n\n\nclass BaseApplication(object):\n \"\"\"\n :type pool mars.actors.pool.gevent_pool.ActorContext\n \"\"\"\n service_description = ''\n service_logger = logger\n\n def __init__(self):\n self.args = None\n self.endpoint = None\n self.pool = None\n self.n_process = None\n\n self._running = False\n\n def __call__(self, argv=None):\n import json\n\n if argv is None:\n argv = sys.argv[1:]\n new_argv = []\n for a in argv:\n if not a.startswith('-D'):\n new_argv.append(a)\n continue\n conf, val = a[2:].split('=', 1)\n conf_parts = conf.split('.')\n conf_obj = options\n for g in conf_parts[:-1]:\n conf_obj = getattr(conf_obj, g)\n try:\n setattr(conf_obj, conf_parts[-1], json.loads(val))\n except:\n setattr(conf_obj, conf_parts[-1], val)\n\n return self._main(new_argv)\n\n def _main(self, argv=None):\n parser = argparse.ArgumentParser(description=self.service_description)\n parser.add_argument('-a', '--advertise', help='advertise ip')\n parser.add_argument('-k', '--kv-store', help='address of kv store service, '\n 'for instance, etcd://localhost:4001')\n parser.add_argument('-e', '--endpoint', help='endpoint of the service')\n parser.add_argument('-s', '--schedulers', help='endpoint of scheduler, when single scheduler '\n 'and etcd is not available')\n parser.add_argument('-H', '--host', help='host of the scheduler service, only available '\n 'when `endpoint` is absent')\n parser.add_argument('-p', '--port', help='port of the scheduler service, only available '\n 'when `endpoint` is absent')\n parser.add_argument('--level', help='log level')\n parser.add_argument('--format', help='log format')\n parser.add_argument('--log_conf', help='log config file')\n parser.add_argument('--inspect', help='inspection endpoint')\n parser.add_argument('--load-modules', nargs='*', help='modules to import')\n self.config_args(parser)\n args = parser.parse_args(argv)\n self.args = args\n\n endpoint = args.endpoint\n host = args.host\n port = args.port\n options.kv_store = args.kv_store if args.kv_store else options.kv_store\n\n load_modules = []\n for mod in args.load_modules or ():\n load_modules.extend(mod.split(','))\n if not args.load_modules:\n load_module_str = os.environ.get('MARS_LOAD_MODULES')\n if load_module_str:\n load_modules = load_module_str.split(',')\n load_modules.append('mars.executor')\n for m in load_modules:\n __import__(m, globals(), locals(), [])\n self.service_logger.info('Modules %s loaded', ','.join(load_modules))\n\n self.n_process = 1\n\n self.config_service()\n self.config_logging()\n\n if not host:\n host = args.advertise or '0.0.0.0'\n if not endpoint and port:\n endpoint = host + ':' + port\n\n try:\n self.validate_arguments()\n except StartArgumentError as ex:\n parser.error('Failed to start application: %s' % ex)\n\n if getattr(self, 'require_pool', True):\n self.endpoint, self.pool = self._try_create_pool(endpoint=endpoint, host=host, port=port)\n self.service_logger.info('%s started at %s.', self.service_description, self.endpoint)\n self.main_loop()\n\n def config_logging(self):\n import logging.config\n log_conf = self.args.log_conf or 'logging.conf'\n\n conf_file_paths = [\n '', os.path.abspath('.'),\n os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n ]\n log_configured = False\n for path in conf_file_paths:\n conf_path = log_conf\n if path:\n conf_path = os.path.join(conf_path)\n if os.path.exists(conf_path):\n logging.config.fileConfig(conf_path, disable_existing_loggers=False)\n log_configured = True\n\n if not log_configured:\n if not self.args.level:\n level = logging.INFO\n else:\n level = getattr(logging, self.args.level.upper())\n logging.getLogger('mars').setLevel(level)\n logging.basicConfig(format=self.args.format)\n\n def validate_arguments(self):\n pass\n\n def _try_create_pool(self, endpoint=None, host=None, port=None):\n pool = None\n if endpoint:\n pool = self.create_pool(address=endpoint)\n else:\n use_port = None\n retrial = 5\n while use_port is None:\n use_port = port or get_next_port()\n try:\n endpoint = '{0}:{1}'.format(host, use_port)\n pool = self.create_pool(address=endpoint)\n break\n except:\n retrial -= 1\n if retrial == 0:\n raise\n\n if port is None:\n use_port = None\n else:\n raise\n return endpoint, pool\n\n def create_pool(self, *args, **kwargs):\n kwargs.update(dict(n_process=self.n_process, backend='gevent'))\n return create_actor_pool(*args, **kwargs)\n\n def main_loop(self):\n try:\n with self.pool:\n try:\n self.start()\n self._running = True\n while True:\n self.pool.join(1)\n stopped = []\n for idx, proc in enumerate(self.pool.processes):\n if not proc.is_alive():\n stopped.append(idx)\n if stopped:\n self.handle_process_down(stopped)\n finally:\n self.stop()\n finally:\n self._running = False\n\n def handle_process_down(self, proc_indices):\n \"\"\"\n Handle process down event, the default action is to quit\n the whole application. Applications can inherit this method\n to do customized process-level failover.\n\n :param proc_indices: indices of processes (not pids)\n \"\"\"\n for idx in proc_indices:\n proc = self.pool.processes[idx]\n self.service_logger.fatal(\n 'Process %d exited unpredictably. exitcode=%d', proc.pid, proc.exitcode)\n raise KeyboardInterrupt\n\n def config_service(self):\n pass\n\n def config_args(self, parser):\n raise NotImplementedError\n\n def start(self):\n raise NotImplementedError\n\n def stop(self):\n raise NotImplementedError\n", "path": "mars/base_app.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2018 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport gevent.monkey\ngevent.monkey.patch_all(thread=False)\n\nimport argparse # noqa: E402\nimport logging # noqa: E402\nimport random # noqa: E402\nimport time # noqa: E402\n\nfrom ..base_app import BaseApplication # noqa: E402\nfrom ..compat import six # noqa: E402\nfrom ..errors import StartArgumentError # noqa: E402\n\nlogger = logging.getLogger(__name__)\n\n\nclass WebApplication(BaseApplication):\n def __init__(self):\n super(WebApplication, self).__init__()\n self.mars_web = None\n self.require_pool = False\n\n def config_args(self, parser):\n parser.add_argument('--ui-port', help=argparse.SUPPRESS)\n\n def validate_arguments(self):\n if not self.args.schedulers and not self.args.kv_store:\n raise StartArgumentError('Either schedulers or url of kv store is required.')\n\n def main_loop(self):\n try:\n self.start()\n while True:\n time.sleep(0.1)\n finally:\n self.stop()\n\n def start(self):\n from .server import MarsWeb\n if MarsWeb is None:\n self.mars_web = None\n logger.warning('Mars UI cannot be loaded. Please check if necessary components are installed.')\n else:\n port_arg = self.args.ui_port or self.args.port\n ui_port = int(port_arg) if port_arg else None\n scheduler_ip = self.args.schedulers or None\n if isinstance(scheduler_ip, six.string_types):\n schedulers = scheduler_ip.split(',')\n scheduler_ip = random.choice(schedulers)\n self.mars_web = MarsWeb(port=ui_port, scheduler_ip=scheduler_ip)\n self.mars_web.start()\n\n def stop(self):\n if self.mars_web:\n self.mars_web.stop()\n\n\nmain = WebApplication()\n\nif __name__ == '__main__':\n main()\n", "path": "mars/web/__main__.py"}, {"content": "# Copyright 1999-2018 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport logging\nimport os\nimport sys\n\nfrom .actors import create_actor_pool\nfrom .config import options\nfrom .errors import StartArgumentError\nfrom .lib.tblib import pickling_support\nfrom .utils import get_next_port\n\npickling_support.install()\nlogger = logging.getLogger(__name__)\n\ntry:\n from pytest_cov.embed import cleanup_on_sigterm\n cleanup_on_sigterm()\nexcept ImportError: # pragma: no cover\n pass\n\n\nclass BaseApplication(object):\n \"\"\"\n :type pool mars.actors.pool.gevent_pool.ActorContext\n \"\"\"\n service_description = ''\n service_logger = logger\n\n def __init__(self):\n self.args = None\n self.endpoint = None\n self.pool = None\n self.n_process = None\n\n self._running = False\n\n def __call__(self, argv=None):\n import json\n\n if argv is None:\n argv = sys.argv[1:]\n new_argv = []\n for a in argv:\n if not a.startswith('-D'):\n new_argv.append(a)\n continue\n conf, val = a[2:].split('=', 1)\n conf_parts = conf.split('.')\n conf_obj = options\n for g in conf_parts[:-1]:\n conf_obj = getattr(conf_obj, g)\n try:\n setattr(conf_obj, conf_parts[-1], json.loads(val))\n except:\n setattr(conf_obj, conf_parts[-1], val)\n\n return self._main(new_argv)\n\n def _main(self, argv=None):\n parser = argparse.ArgumentParser(description=self.service_description)\n parser.add_argument('-a', '--advertise', help='advertise ip')\n parser.add_argument('-k', '--kv-store', help='address of kv store service, '\n 'for instance, etcd://localhost:4001')\n parser.add_argument('-e', '--endpoint', help='endpoint of the service')\n parser.add_argument('-s', '--schedulers', help='endpoint of schedulers, when single scheduler '\n 'and etcd is not available')\n parser.add_argument('-H', '--host', help='host of the service, only available '\n 'when `endpoint` is absent')\n parser.add_argument('-p', '--port', help='port of the service, only available '\n 'when `endpoint` is absent')\n parser.add_argument('--level', help='log level')\n parser.add_argument('--format', help='log format')\n parser.add_argument('--log_conf', help='log config file')\n parser.add_argument('--inspect', help='inspection endpoint')\n parser.add_argument('--load-modules', nargs='*', help='modules to import')\n self.config_args(parser)\n args = parser.parse_args(argv)\n self.args = args\n\n endpoint = args.endpoint\n host = args.host\n port = args.port\n options.kv_store = args.kv_store if args.kv_store else options.kv_store\n\n load_modules = []\n for mod in args.load_modules or ():\n load_modules.extend(mod.split(','))\n if not args.load_modules:\n load_module_str = os.environ.get('MARS_LOAD_MODULES')\n if load_module_str:\n load_modules = load_module_str.split(',')\n load_modules.append('mars.executor')\n for m in load_modules:\n __import__(m, globals(), locals(), [])\n self.service_logger.info('Modules %s loaded', ','.join(load_modules))\n\n self.n_process = 1\n\n self.config_service()\n self.config_logging()\n\n if not host:\n host = args.advertise or '0.0.0.0'\n if not endpoint and port:\n endpoint = host + ':' + port\n\n try:\n self.validate_arguments()\n except StartArgumentError as ex:\n parser.error('Failed to start application: %s' % ex)\n\n if getattr(self, 'require_pool', True):\n self.endpoint, self.pool = self._try_create_pool(endpoint=endpoint, host=host, port=port)\n self.service_logger.info('%s started at %s.', self.service_description, self.endpoint)\n self.main_loop()\n\n def config_logging(self):\n import logging.config\n log_conf = self.args.log_conf or 'logging.conf'\n\n conf_file_paths = [\n '', os.path.abspath('.'),\n os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n ]\n log_configured = False\n for path in conf_file_paths:\n conf_path = log_conf\n if path:\n conf_path = os.path.join(conf_path)\n if os.path.exists(conf_path):\n logging.config.fileConfig(conf_path, disable_existing_loggers=False)\n log_configured = True\n\n if not log_configured:\n if not self.args.level:\n level = logging.INFO\n else:\n level = getattr(logging, self.args.level.upper())\n logging.getLogger('mars').setLevel(level)\n logging.basicConfig(format=self.args.format)\n\n def validate_arguments(self):\n pass\n\n def _try_create_pool(self, endpoint=None, host=None, port=None):\n pool = None\n if endpoint:\n pool = self.create_pool(address=endpoint)\n else:\n use_port = None\n retrial = 5\n while use_port is None:\n use_port = port or get_next_port()\n try:\n endpoint = '{0}:{1}'.format(host, use_port)\n pool = self.create_pool(address=endpoint)\n break\n except:\n retrial -= 1\n if retrial == 0:\n raise\n\n if port is None:\n use_port = None\n else:\n raise\n return endpoint, pool\n\n def create_pool(self, *args, **kwargs):\n kwargs.update(dict(n_process=self.n_process, backend='gevent'))\n return create_actor_pool(*args, **kwargs)\n\n def main_loop(self):\n try:\n with self.pool:\n try:\n self.start()\n self._running = True\n while True:\n self.pool.join(1)\n stopped = []\n for idx, proc in enumerate(self.pool.processes):\n if not proc.is_alive():\n stopped.append(idx)\n if stopped:\n self.handle_process_down(stopped)\n finally:\n self.stop()\n finally:\n self._running = False\n\n def handle_process_down(self, proc_indices):\n \"\"\"\n Handle process down event, the default action is to quit\n the whole application. Applications can inherit this method\n to do customized process-level failover.\n\n :param proc_indices: indices of processes (not pids)\n \"\"\"\n for idx in proc_indices:\n proc = self.pool.processes[idx]\n self.service_logger.fatal(\n 'Process %d exited unpredictably. exitcode=%d', proc.pid, proc.exitcode)\n raise KeyboardInterrupt\n\n def config_service(self):\n pass\n\n def config_args(self, parser):\n raise NotImplementedError\n\n def start(self):\n raise NotImplementedError\n\n def stop(self):\n raise NotImplementedError\n", "path": "mars/base_app.py"}]}
3,245
762
gh_patches_debug_15384
rasdani/github-patches
git_diff
bridgecrewio__checkov-2303
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- CKV_GIT_3 should not be triggered on archived repositories **Describe the issue** _CKV_GIT_3_ currently gets triggered also on archived GitHub repositories. When archiving a repository the configuration `vulnerability_alerts` will get changed to `false` automatically. It's also not possible to turn it on again on an archived repository. _CKV_GIT_3_ should be changed to ignore archived repositories. **Examples** ```terraform resource "github_repository" "test" { name = "test" visibility = "private" archived = true vulnerability_alerts = false } ``` **Version (please complete the following information):** - Starting with Checkov Version 2.0.764 **Additional context** See the [GitHub documentation](https://docs.github.com/en/code-security/supply-chain-security/managing-vulnerabilities-in-your-projects-dependencies/configuring-dependabot-security-updates#supported-repositories) that Dependabot is only supported on non-archived repositories. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `checkov/terraform/checks/resource/github/RepositoryEnableVulnerabilityAlerts.py` Content: ``` 1 from typing import Any 2 3 from checkov.common.models.enums import CheckCategories, CheckResult 4 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceCheck 5 6 7 class GithubRepositoryVulnerabilityAlerts(BaseResourceCheck): 8 def __init__(self) -> None: 9 name = "Ensure GitHub repository has vulnerability alerts enabled" 10 id = "CKV_GIT_3" 11 supported_resources = ["github_repository"] 12 categories = [CheckCategories.GENERAL_SECURITY] 13 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) 14 15 def scan_resource_conf(self, conf) -> CheckResult: 16 # GitHub enables the alerts on public repos but disables them on private repos by default. 17 # is private repo 18 if conf.get("private") == [True] or conf.get("visibility") in [["private"], ["internal"]]: 19 if conf.get("vulnerability_alerts"): 20 return CheckResult.PASSED 21 return CheckResult.FAILED 22 # is public repo 23 if conf.get("vulnerability_alerts") == [False]: 24 return CheckResult.FAILED 25 return CheckResult.PASSED 26 27 28 check = GithubRepositoryVulnerabilityAlerts() 29 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/checkov/terraform/checks/resource/github/RepositoryEnableVulnerabilityAlerts.py b/checkov/terraform/checks/resource/github/RepositoryEnableVulnerabilityAlerts.py --- a/checkov/terraform/checks/resource/github/RepositoryEnableVulnerabilityAlerts.py +++ b/checkov/terraform/checks/resource/github/RepositoryEnableVulnerabilityAlerts.py @@ -13,6 +13,9 @@ super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) def scan_resource_conf(self, conf) -> CheckResult: + # GitHub disables the alerts when archiving the repository without an option to turn them on again. + if conf.get("archived") == [True]: + return CheckResult.PASSED # GitHub enables the alerts on public repos but disables them on private repos by default. # is private repo if conf.get("private") == [True] or conf.get("visibility") in [["private"], ["internal"]]:
{"golden_diff": "diff --git a/checkov/terraform/checks/resource/github/RepositoryEnableVulnerabilityAlerts.py b/checkov/terraform/checks/resource/github/RepositoryEnableVulnerabilityAlerts.py\n--- a/checkov/terraform/checks/resource/github/RepositoryEnableVulnerabilityAlerts.py\n+++ b/checkov/terraform/checks/resource/github/RepositoryEnableVulnerabilityAlerts.py\n@@ -13,6 +13,9 @@\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n \n def scan_resource_conf(self, conf) -> CheckResult:\n+ # GitHub disables the alerts when archiving the repository without an option to turn them on again.\n+ if conf.get(\"archived\") == [True]:\n+ return CheckResult.PASSED\n # GitHub enables the alerts on public repos but disables them on private repos by default.\n # is private repo\n if conf.get(\"private\") == [True] or conf.get(\"visibility\") in [[\"private\"], [\"internal\"]]:\n", "issue": "CKV_GIT_3 should not be triggered on archived repositories\n**Describe the issue**\r\n_CKV_GIT_3_ currently gets triggered also on archived GitHub repositories. When archiving a repository the configuration `vulnerability_alerts` will get changed to `false` automatically. It's also not possible to turn it on again on an archived repository. _CKV_GIT_3_ should be changed to ignore archived repositories.\r\n\r\n**Examples**\r\n\r\n```terraform\r\nresource \"github_repository\" \"test\" {\r\n name = \"test\"\r\n visibility = \"private\"\r\n archived = true\r\n vulnerability_alerts = false\r\n}\r\n```\r\n\r\n**Version (please complete the following information):**\r\n - Starting with Checkov Version 2.0.764\r\n\r\n**Additional context**\r\nSee the [GitHub documentation](https://docs.github.com/en/code-security/supply-chain-security/managing-vulnerabilities-in-your-projects-dependencies/configuring-dependabot-security-updates#supported-repositories) that Dependabot is only supported on non-archived repositories.\r\n\n", "before_files": [{"content": "from typing import Any\n\nfrom checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceCheck\n\n\nclass GithubRepositoryVulnerabilityAlerts(BaseResourceCheck):\n def __init__(self) -> None:\n name = \"Ensure GitHub repository has vulnerability alerts enabled\"\n id = \"CKV_GIT_3\"\n supported_resources = [\"github_repository\"]\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf) -> CheckResult:\n # GitHub enables the alerts on public repos but disables them on private repos by default.\n # is private repo\n if conf.get(\"private\") == [True] or conf.get(\"visibility\") in [[\"private\"], [\"internal\"]]:\n if conf.get(\"vulnerability_alerts\"):\n return CheckResult.PASSED\n return CheckResult.FAILED\n # is public repo\n if conf.get(\"vulnerability_alerts\") == [False]:\n return CheckResult.FAILED\n return CheckResult.PASSED\n\n\ncheck = GithubRepositoryVulnerabilityAlerts()\n", "path": "checkov/terraform/checks/resource/github/RepositoryEnableVulnerabilityAlerts.py"}], "after_files": [{"content": "from typing import Any\n\nfrom checkov.common.models.enums import CheckCategories, CheckResult\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceCheck\n\n\nclass GithubRepositoryVulnerabilityAlerts(BaseResourceCheck):\n def __init__(self) -> None:\n name = \"Ensure GitHub repository has vulnerability alerts enabled\"\n id = \"CKV_GIT_3\"\n supported_resources = [\"github_repository\"]\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf) -> CheckResult:\n # GitHub disables the alerts when archiving the repository without an option to turn them on again.\n if conf.get(\"archived\") == [True]:\n return CheckResult.PASSED\n # GitHub enables the alerts on public repos but disables them on private repos by default.\n # is private repo\n if conf.get(\"private\") == [True] or conf.get(\"visibility\") in [[\"private\"], [\"internal\"]]:\n if conf.get(\"vulnerability_alerts\"):\n return CheckResult.PASSED\n return CheckResult.FAILED\n # is public repo\n if conf.get(\"vulnerability_alerts\") == [False]:\n return CheckResult.FAILED\n return CheckResult.PASSED\n\n\ncheck = GithubRepositoryVulnerabilityAlerts()\n", "path": "checkov/terraform/checks/resource/github/RepositoryEnableVulnerabilityAlerts.py"}]}
804
219
gh_patches_debug_39922
rasdani/github-patches
git_diff
Textualize__textual-3872
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- RadioButton still selectable by keyboard when disabled It appears that when a `RadioButton` is disabled, click events no longer respond as expected, but you can still navigate to it with the keyboard and select it with `space` or `enter`. ![radio-select-disabled](https://github.com/Textualize/textual/assets/11033703/d3e8667f-a704-4d22-8692-c30873dd106a) ```python from textual.app import App, ComposeResult from textual.widgets import RadioButton, RadioSet, Static class Test(App): DEFAULT_CSS = """ RadioButton:disabled { color: red; } """ def compose(self) -> ComposeResult: with RadioSet(): yield RadioButton("Enabled 1") yield RadioButton("Disabled 2", disabled=True) yield RadioButton("Disabled 3", disabled=True) yield Static("Selected:", id="selected") def on_radio_set_changed(self, radio_set: RadioSet.Changed) -> None: self.query_one("#selected").update(f"Selected: {radio_set.pressed.label}") if __name__ == "__main__": app = Test() app.run() ``` # Textual Diagnostics ## Versions | Name | Value | |---------|--------| | Textual | 0.44.1 | | Rich | 13.7.0 | ## Python | Name | Value | |----------------|-----------------------------------------------------------| | Version | 3.8.10 | | Implementation | CPython | | Compiler | Clang 12.0.5 (clang-1205.0.22.9) | | Executable | /Users/geoff/Documents/projects/test/venv/bin/python | ## Operating System | Name | Value | |---------|-------------------------------------------------------------------------------------------------------| | System | Darwin | | Release | 22.5.0 | | Version | Darwin Kernel Version 22.5.0: Thu Jun 8 22:22:19 PDT 2023; root:xnu-8796.121.3~7/RELEASE_ARM64_T8103 | ## Terminal | Name | Value | |----------------------|-----------------| | Terminal Application | vscode (1.85.0) | | TERM | xterm-256color | | COLORTERM | truecolor | | FORCE_COLOR | *Not set* | | NO_COLOR | *Not set* | ## Rich Console options | Name | Value | |----------------|----------------------| | size | width=158, height=40 | | legacy_windows | False | | min_width | 1 | | max_width | 158 | | is_terminal | True | | encoding | utf-8 | | max_height | 40 | | justify | None | | overflow | None | | no_wrap | False | | highlight | None | | markup | None | | height | None | --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/textual/widgets/_radio_set.py` Content: ``` 1 """Provides a RadioSet widget, which groups radio buttons.""" 2 3 from __future__ import annotations 4 5 from typing import ClassVar, Optional 6 7 import rich.repr 8 9 from ..binding import Binding, BindingType 10 from ..containers import Container 11 from ..events import Click, Mount 12 from ..message import Message 13 from ..reactive import var 14 from ._radio_button import RadioButton 15 16 17 class RadioSet(Container, can_focus=True, can_focus_children=False): 18 """Widget for grouping a collection of radio buttons into a set. 19 20 When a collection of [`RadioButton`][textual.widgets.RadioButton]s are 21 grouped with this widget, they will be treated as a mutually-exclusive 22 grouping. If one button is turned on, the previously-on button will be 23 turned off. 24 """ 25 26 DEFAULT_CSS = """ 27 RadioSet { 28 border: tall transparent; 29 background: $boost; 30 padding: 0 1 0 0; 31 height: auto; 32 width: auto; 33 } 34 35 RadioSet:focus { 36 border: tall $accent; 37 } 38 39 /* The following rules/styles mimic similar ToggleButton:focus rules in 40 * ToggleButton. If those styles ever get updated, these should be too. 41 */ 42 43 RadioSet > * { 44 background: transparent; 45 border: none; 46 padding: 0 1; 47 } 48 49 RadioSet:focus > RadioButton.-selected > .toggle--label { 50 text-style: underline; 51 } 52 53 RadioSet:focus ToggleButton.-selected > .toggle--button { 54 background: $foreground 25%; 55 } 56 57 RadioSet:focus > RadioButton.-on.-selected > .toggle--button { 58 background: $foreground 25%; 59 } 60 """ 61 62 BINDINGS: ClassVar[list[BindingType]] = [ 63 Binding("down,right", "next_button", "", show=False), 64 Binding("enter,space", "toggle", "Toggle", show=False), 65 Binding("up,left", "previous_button", "", show=False), 66 ] 67 """ 68 | Key(s) | Description | 69 | :- | :- | 70 | enter, space | Toggle the currently-selected button. | 71 | left, up | Select the previous radio button in the set. | 72 | right, down | Select the next radio button in the set. | 73 """ 74 75 _selected: var[int | None] = var[Optional[int]](None) 76 """The index of the currently-selected radio button.""" 77 78 @rich.repr.auto 79 class Changed(Message): 80 """Posted when the pressed button in the set changes. 81 82 This message can be handled using an `on_radio_set_changed` method. 83 """ 84 85 ALLOW_SELECTOR_MATCH = {"pressed"} 86 """Additional message attributes that can be used with the [`on` decorator][textual.on].""" 87 88 def __init__(self, radio_set: RadioSet, pressed: RadioButton) -> None: 89 """Initialise the message. 90 91 Args: 92 pressed: The radio button that was pressed. 93 """ 94 super().__init__() 95 self.radio_set = radio_set 96 """A reference to the [`RadioSet`][textual.widgets.RadioSet] that was changed.""" 97 self.pressed = pressed 98 """The [`RadioButton`][textual.widgets.RadioButton] that was pressed to make the change.""" 99 self.index = radio_set.pressed_index 100 """The index of the [`RadioButton`][textual.widgets.RadioButton] that was pressed to make the change.""" 101 102 @property 103 def control(self) -> RadioSet: 104 """A reference to the [`RadioSet`][textual.widgets.RadioSet] that was changed. 105 106 This is an alias for [`Changed.radio_set`][textual.widgets.RadioSet.Changed.radio_set] 107 and is used by the [`on`][textual.on] decorator. 108 """ 109 return self.radio_set 110 111 def __rich_repr__(self) -> rich.repr.Result: 112 yield "radio_set", self.radio_set 113 yield "pressed", self.pressed 114 yield "index", self.index 115 116 def __init__( 117 self, 118 *buttons: str | RadioButton, 119 name: str | None = None, 120 id: str | None = None, 121 classes: str | None = None, 122 disabled: bool = False, 123 ) -> None: 124 """Initialise the radio set. 125 126 Args: 127 buttons: The labels or [`RadioButton`][textual.widgets.RadioButton]s to group together. 128 name: The name of the radio set. 129 id: The ID of the radio set in the DOM. 130 classes: The CSS classes of the radio set. 131 disabled: Whether the radio set is disabled or not. 132 133 Note: 134 When a `str` label is provided, a 135 [RadioButton][textual.widgets.RadioButton] will be created from 136 it. 137 """ 138 self._pressed_button: RadioButton | None = None 139 """Holds the radio buttons we're responsible for.""" 140 super().__init__( 141 *[ 142 (button if isinstance(button, RadioButton) else RadioButton(button)) 143 for button in buttons 144 ], 145 name=name, 146 id=id, 147 classes=classes, 148 disabled=disabled, 149 ) 150 151 def _on_mount(self, _: Mount) -> None: 152 """Perform some processing once mounted in the DOM.""" 153 154 # If there are radio buttons, select the first one. 155 if self._nodes: 156 self._selected = 0 157 158 # Get all the buttons within us; we'll be doing a couple of things 159 # with that list. 160 buttons = list(self.query(RadioButton)) 161 162 # RadioButtons can have focus, by default. But we're going to take 163 # that over and handle movement between them. So here we tell them 164 # all they can't focus. 165 for button in buttons: 166 button.can_focus = False 167 168 # It's possible for the user to pass in a collection of radio 169 # buttons, with more than one set to on; they shouldn't, but we 170 # can't stop them. So here we check for that and, for want of a 171 # better approach, we keep the first one on and turn all the others 172 # off. 173 switched_on = [button for button in buttons if button.value] 174 with self.prevent(RadioButton.Changed): 175 for button in switched_on[1:]: 176 button.value = False 177 178 # Keep track of which button is initially pressed. 179 if switched_on: 180 self._pressed_button = switched_on[0] 181 182 def watch__selected(self) -> None: 183 self.query(RadioButton).remove_class("-selected") 184 if self._selected is not None: 185 self._nodes[self._selected].add_class("-selected") 186 187 def _on_radio_button_changed(self, event: RadioButton.Changed) -> None: 188 """Respond to the value of a button in the set being changed. 189 190 Args: 191 event: The event. 192 """ 193 # We're going to consume the underlying radio button events, making 194 # it appear as if they don't emit their own, as far as the caller is 195 # concerned. As such, stop the event bubbling and also prohibit the 196 # same event being sent out if/when we make a value change in here. 197 event.stop() 198 with self.prevent(RadioButton.Changed): 199 # If the message pertains to a button being clicked to on... 200 if event.radio_button.value: 201 # If there's a button pressed right now and it's not really a 202 # case of the user mashing on the same button... 203 if ( 204 self._pressed_button is not None 205 and self._pressed_button != event.radio_button 206 ): 207 self._pressed_button.value = False 208 # Make the pressed button this new button. 209 self._pressed_button = event.radio_button 210 # Emit a message to say our state has changed. 211 self.post_message(self.Changed(self, event.radio_button)) 212 else: 213 # We're being clicked off, we don't want that. 214 event.radio_button.value = True 215 216 def _on_radio_set_changed(self, event: RadioSet.Changed) -> None: 217 """Handle a change to which button in the set is pressed. 218 219 This handler ensures that, when a button is pressed, it's also the 220 selected button. 221 """ 222 self._selected = event.index 223 224 async def _on_click(self, _: Click) -> None: 225 """Handle a click on or within the radio set. 226 227 This handler ensures that focus moves to the clicked radio set, even 228 if there's a click on one of the radio buttons it contains. 229 """ 230 self.focus() 231 232 @property 233 def pressed_button(self) -> RadioButton | None: 234 """The currently-pressed [`RadioButton`][textual.widgets.RadioButton], or `None` if none are pressed.""" 235 return self._pressed_button 236 237 @property 238 def pressed_index(self) -> int: 239 """The index of the currently-pressed [`RadioButton`][textual.widgets.RadioButton], or -1 if none are pressed.""" 240 return ( 241 self._nodes.index(self._pressed_button) 242 if self._pressed_button is not None 243 else -1 244 ) 245 246 def action_previous_button(self) -> None: 247 """Navigate to the previous button in the set. 248 249 Note that this will wrap around to the end if at the start. 250 """ 251 if self._nodes: 252 if self._selected == 0: 253 self._selected = len(self.children) - 1 254 elif self._selected is None: 255 self._selected = 0 256 else: 257 self._selected -= 1 258 259 def action_next_button(self) -> None: 260 """Navigate to the next button in the set. 261 262 Note that this will wrap around to the start if at the end. 263 """ 264 if self._nodes: 265 if self._selected is None or self._selected == len(self._nodes) - 1: 266 self._selected = 0 267 else: 268 self._selected += 1 269 270 def action_toggle(self) -> None: 271 """Toggle the state of the currently-selected button.""" 272 if self._selected is not None: 273 button = self._nodes[self._selected] 274 assert isinstance(button, RadioButton) 275 button.toggle() 276 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/textual/widgets/_radio_set.py b/src/textual/widgets/_radio_set.py --- a/src/textual/widgets/_radio_set.py +++ b/src/textual/widgets/_radio_set.py @@ -2,7 +2,8 @@ from __future__ import annotations -from typing import ClassVar, Optional +from contextlib import suppress +from typing import ClassVar, Literal, Optional import rich.repr @@ -151,9 +152,8 @@ def _on_mount(self, _: Mount) -> None: """Perform some processing once mounted in the DOM.""" - # If there are radio buttons, select the first one. - if self._nodes: - self._selected = 0 + # If there are radio buttons, select the first available one. + self.action_next_button() # Get all the buttons within us; we'll be doing a couple of things # with that list. @@ -248,24 +248,58 @@ Note that this will wrap around to the end if at the start. """ - if self._nodes: - if self._selected == 0: - self._selected = len(self.children) - 1 - elif self._selected is None: - self._selected = 0 - else: - self._selected -= 1 + self._move_selected_button(-1) def action_next_button(self) -> None: """Navigate to the next button in the set. Note that this will wrap around to the start if at the end. """ - if self._nodes: - if self._selected is None or self._selected == len(self._nodes) - 1: - self._selected = 0 - else: - self._selected += 1 + self._move_selected_button(1) + + def _move_selected_button(self, direction: Literal[-1, 1]) -> None: + """Move the selected button to the next or previous one. + + Note that this will wrap around the start/end of the button list. + + We compute the available buttons by ignoring the disabled ones and then + we induce an ordering by computing the distance to the currently selected one if + we start at the selected button and then start moving in the direction indicated. + + For example, if the direction is `1` and self._selected is 2, we have this: + selected: v + buttons: X X X X X X X + indices: 0 1 2 3 4 5 6 + distance: 5 6 0 1 2 3 4 + + Args: + direction: `1` to move to the next button and `-1` for the previous. + """ + + candidate_indices = ( + index + for index, button in enumerate(self.children) + if not button.disabled and index != self._selected + ) + + if self._selected is None: + with suppress(StopIteration): + self._selected = next(candidate_indices) + else: + selected = self._selected + + def distance(index: int) -> int: + """Induce a distance between the given index and the selected button. + + Args: + index: The index of the button to consider. + + Returns: + The distance between the two buttons. + """ + return direction * (index - selected) % len(self.children) + + self._selected = min(candidate_indices, key=distance, default=None) def action_toggle(self) -> None: """Toggle the state of the currently-selected button."""
{"golden_diff": "diff --git a/src/textual/widgets/_radio_set.py b/src/textual/widgets/_radio_set.py\n--- a/src/textual/widgets/_radio_set.py\n+++ b/src/textual/widgets/_radio_set.py\n@@ -2,7 +2,8 @@\n \n from __future__ import annotations\n \n-from typing import ClassVar, Optional\n+from contextlib import suppress\n+from typing import ClassVar, Literal, Optional\n \n import rich.repr\n \n@@ -151,9 +152,8 @@\n def _on_mount(self, _: Mount) -> None:\n \"\"\"Perform some processing once mounted in the DOM.\"\"\"\n \n- # If there are radio buttons, select the first one.\n- if self._nodes:\n- self._selected = 0\n+ # If there are radio buttons, select the first available one.\n+ self.action_next_button()\n \n # Get all the buttons within us; we'll be doing a couple of things\n # with that list.\n@@ -248,24 +248,58 @@\n \n Note that this will wrap around to the end if at the start.\n \"\"\"\n- if self._nodes:\n- if self._selected == 0:\n- self._selected = len(self.children) - 1\n- elif self._selected is None:\n- self._selected = 0\n- else:\n- self._selected -= 1\n+ self._move_selected_button(-1)\n \n def action_next_button(self) -> None:\n \"\"\"Navigate to the next button in the set.\n \n Note that this will wrap around to the start if at the end.\n \"\"\"\n- if self._nodes:\n- if self._selected is None or self._selected == len(self._nodes) - 1:\n- self._selected = 0\n- else:\n- self._selected += 1\n+ self._move_selected_button(1)\n+\n+ def _move_selected_button(self, direction: Literal[-1, 1]) -> None:\n+ \"\"\"Move the selected button to the next or previous one.\n+\n+ Note that this will wrap around the start/end of the button list.\n+\n+ We compute the available buttons by ignoring the disabled ones and then\n+ we induce an ordering by computing the distance to the currently selected one if\n+ we start at the selected button and then start moving in the direction indicated.\n+\n+ For example, if the direction is `1` and self._selected is 2, we have this:\n+ selected: v\n+ buttons: X X X X X X X\n+ indices: 0 1 2 3 4 5 6\n+ distance: 5 6 0 1 2 3 4\n+\n+ Args:\n+ direction: `1` to move to the next button and `-1` for the previous.\n+ \"\"\"\n+\n+ candidate_indices = (\n+ index\n+ for index, button in enumerate(self.children)\n+ if not button.disabled and index != self._selected\n+ )\n+\n+ if self._selected is None:\n+ with suppress(StopIteration):\n+ self._selected = next(candidate_indices)\n+ else:\n+ selected = self._selected\n+\n+ def distance(index: int) -> int:\n+ \"\"\"Induce a distance between the given index and the selected button.\n+\n+ Args:\n+ index: The index of the button to consider.\n+\n+ Returns:\n+ The distance between the two buttons.\n+ \"\"\"\n+ return direction * (index - selected) % len(self.children)\n+\n+ self._selected = min(candidate_indices, key=distance, default=None)\n \n def action_toggle(self) -> None:\n \"\"\"Toggle the state of the currently-selected button.\"\"\"\n", "issue": "RadioButton still selectable by keyboard when disabled\nIt appears that when a `RadioButton` is disabled, click events no longer respond as expected, but you can still navigate to it with the keyboard and select it with `space` or `enter`.\r\n\r\n![radio-select-disabled](https://github.com/Textualize/textual/assets/11033703/d3e8667f-a704-4d22-8692-c30873dd106a)\r\n\r\n```python\r\nfrom textual.app import App, ComposeResult\r\nfrom textual.widgets import RadioButton, RadioSet, Static\r\n\r\n\r\nclass Test(App):\r\n DEFAULT_CSS = \"\"\"\r\n RadioButton:disabled {\r\n color: red;\r\n }\r\n \"\"\"\r\n\r\n def compose(self) -> ComposeResult:\r\n with RadioSet():\r\n yield RadioButton(\"Enabled 1\")\r\n yield RadioButton(\"Disabled 2\", disabled=True)\r\n yield RadioButton(\"Disabled 3\", disabled=True)\r\n yield Static(\"Selected:\", id=\"selected\")\r\n\r\n def on_radio_set_changed(self, radio_set: RadioSet.Changed) -> None:\r\n self.query_one(\"#selected\").update(f\"Selected: {radio_set.pressed.label}\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app = Test()\r\n app.run()\r\n```\r\n\r\n# Textual Diagnostics\r\n\r\n## Versions\r\n\r\n| Name | Value |\r\n|---------|--------|\r\n| Textual | 0.44.1 |\r\n| Rich | 13.7.0 |\r\n\r\n## Python\r\n\r\n| Name | Value |\r\n|----------------|-----------------------------------------------------------|\r\n| Version | 3.8.10 |\r\n| Implementation | CPython |\r\n| Compiler | Clang 12.0.5 (clang-1205.0.22.9) |\r\n| Executable | /Users/geoff/Documents/projects/test/venv/bin/python |\r\n\r\n## Operating System\r\n\r\n| Name | Value |\r\n|---------|-------------------------------------------------------------------------------------------------------|\r\n| System | Darwin |\r\n| Release | 22.5.0 |\r\n| Version | Darwin Kernel Version 22.5.0: Thu Jun 8 22:22:19 PDT 2023; root:xnu-8796.121.3~7/RELEASE_ARM64_T8103 |\r\n\r\n## Terminal\r\n\r\n| Name | Value |\r\n|----------------------|-----------------|\r\n| Terminal Application | vscode (1.85.0) |\r\n| TERM | xterm-256color |\r\n| COLORTERM | truecolor |\r\n| FORCE_COLOR | *Not set* |\r\n| NO_COLOR | *Not set* |\r\n\r\n## Rich Console options\r\n\r\n| Name | Value |\r\n|----------------|----------------------|\r\n| size | width=158, height=40 |\r\n| legacy_windows | False |\r\n| min_width | 1 |\r\n| max_width | 158 |\r\n| is_terminal | True |\r\n| encoding | utf-8 |\r\n| max_height | 40 |\r\n| justify | None |\r\n| overflow | None |\r\n| no_wrap | False |\r\n| highlight | None |\r\n| markup | None |\r\n| height | None |\r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"Provides a RadioSet widget, which groups radio buttons.\"\"\"\n\nfrom __future__ import annotations\n\nfrom typing import ClassVar, Optional\n\nimport rich.repr\n\nfrom ..binding import Binding, BindingType\nfrom ..containers import Container\nfrom ..events import Click, Mount\nfrom ..message import Message\nfrom ..reactive import var\nfrom ._radio_button import RadioButton\n\n\nclass RadioSet(Container, can_focus=True, can_focus_children=False):\n \"\"\"Widget for grouping a collection of radio buttons into a set.\n\n When a collection of [`RadioButton`][textual.widgets.RadioButton]s are\n grouped with this widget, they will be treated as a mutually-exclusive\n grouping. If one button is turned on, the previously-on button will be\n turned off.\n \"\"\"\n\n DEFAULT_CSS = \"\"\"\n RadioSet {\n border: tall transparent;\n background: $boost;\n padding: 0 1 0 0;\n height: auto;\n width: auto;\n }\n\n RadioSet:focus {\n border: tall $accent;\n }\n\n /* The following rules/styles mimic similar ToggleButton:focus rules in\n * ToggleButton. If those styles ever get updated, these should be too.\n */\n\n RadioSet > * {\n background: transparent;\n border: none;\n padding: 0 1;\n }\n\n RadioSet:focus > RadioButton.-selected > .toggle--label {\n text-style: underline;\n }\n\n RadioSet:focus ToggleButton.-selected > .toggle--button {\n background: $foreground 25%;\n }\n\n RadioSet:focus > RadioButton.-on.-selected > .toggle--button {\n background: $foreground 25%;\n }\n \"\"\"\n\n BINDINGS: ClassVar[list[BindingType]] = [\n Binding(\"down,right\", \"next_button\", \"\", show=False),\n Binding(\"enter,space\", \"toggle\", \"Toggle\", show=False),\n Binding(\"up,left\", \"previous_button\", \"\", show=False),\n ]\n \"\"\"\n | Key(s) | Description |\n | :- | :- |\n | enter, space | Toggle the currently-selected button. |\n | left, up | Select the previous radio button in the set. |\n | right, down | Select the next radio button in the set. |\n \"\"\"\n\n _selected: var[int | None] = var[Optional[int]](None)\n \"\"\"The index of the currently-selected radio button.\"\"\"\n\n @rich.repr.auto\n class Changed(Message):\n \"\"\"Posted when the pressed button in the set changes.\n\n This message can be handled using an `on_radio_set_changed` method.\n \"\"\"\n\n ALLOW_SELECTOR_MATCH = {\"pressed\"}\n \"\"\"Additional message attributes that can be used with the [`on` decorator][textual.on].\"\"\"\n\n def __init__(self, radio_set: RadioSet, pressed: RadioButton) -> None:\n \"\"\"Initialise the message.\n\n Args:\n pressed: The radio button that was pressed.\n \"\"\"\n super().__init__()\n self.radio_set = radio_set\n \"\"\"A reference to the [`RadioSet`][textual.widgets.RadioSet] that was changed.\"\"\"\n self.pressed = pressed\n \"\"\"The [`RadioButton`][textual.widgets.RadioButton] that was pressed to make the change.\"\"\"\n self.index = radio_set.pressed_index\n \"\"\"The index of the [`RadioButton`][textual.widgets.RadioButton] that was pressed to make the change.\"\"\"\n\n @property\n def control(self) -> RadioSet:\n \"\"\"A reference to the [`RadioSet`][textual.widgets.RadioSet] that was changed.\n\n This is an alias for [`Changed.radio_set`][textual.widgets.RadioSet.Changed.radio_set]\n and is used by the [`on`][textual.on] decorator.\n \"\"\"\n return self.radio_set\n\n def __rich_repr__(self) -> rich.repr.Result:\n yield \"radio_set\", self.radio_set\n yield \"pressed\", self.pressed\n yield \"index\", self.index\n\n def __init__(\n self,\n *buttons: str | RadioButton,\n name: str | None = None,\n id: str | None = None,\n classes: str | None = None,\n disabled: bool = False,\n ) -> None:\n \"\"\"Initialise the radio set.\n\n Args:\n buttons: The labels or [`RadioButton`][textual.widgets.RadioButton]s to group together.\n name: The name of the radio set.\n id: The ID of the radio set in the DOM.\n classes: The CSS classes of the radio set.\n disabled: Whether the radio set is disabled or not.\n\n Note:\n When a `str` label is provided, a\n [RadioButton][textual.widgets.RadioButton] will be created from\n it.\n \"\"\"\n self._pressed_button: RadioButton | None = None\n \"\"\"Holds the radio buttons we're responsible for.\"\"\"\n super().__init__(\n *[\n (button if isinstance(button, RadioButton) else RadioButton(button))\n for button in buttons\n ],\n name=name,\n id=id,\n classes=classes,\n disabled=disabled,\n )\n\n def _on_mount(self, _: Mount) -> None:\n \"\"\"Perform some processing once mounted in the DOM.\"\"\"\n\n # If there are radio buttons, select the first one.\n if self._nodes:\n self._selected = 0\n\n # Get all the buttons within us; we'll be doing a couple of things\n # with that list.\n buttons = list(self.query(RadioButton))\n\n # RadioButtons can have focus, by default. But we're going to take\n # that over and handle movement between them. So here we tell them\n # all they can't focus.\n for button in buttons:\n button.can_focus = False\n\n # It's possible for the user to pass in a collection of radio\n # buttons, with more than one set to on; they shouldn't, but we\n # can't stop them. So here we check for that and, for want of a\n # better approach, we keep the first one on and turn all the others\n # off.\n switched_on = [button for button in buttons if button.value]\n with self.prevent(RadioButton.Changed):\n for button in switched_on[1:]:\n button.value = False\n\n # Keep track of which button is initially pressed.\n if switched_on:\n self._pressed_button = switched_on[0]\n\n def watch__selected(self) -> None:\n self.query(RadioButton).remove_class(\"-selected\")\n if self._selected is not None:\n self._nodes[self._selected].add_class(\"-selected\")\n\n def _on_radio_button_changed(self, event: RadioButton.Changed) -> None:\n \"\"\"Respond to the value of a button in the set being changed.\n\n Args:\n event: The event.\n \"\"\"\n # We're going to consume the underlying radio button events, making\n # it appear as if they don't emit their own, as far as the caller is\n # concerned. As such, stop the event bubbling and also prohibit the\n # same event being sent out if/when we make a value change in here.\n event.stop()\n with self.prevent(RadioButton.Changed):\n # If the message pertains to a button being clicked to on...\n if event.radio_button.value:\n # If there's a button pressed right now and it's not really a\n # case of the user mashing on the same button...\n if (\n self._pressed_button is not None\n and self._pressed_button != event.radio_button\n ):\n self._pressed_button.value = False\n # Make the pressed button this new button.\n self._pressed_button = event.radio_button\n # Emit a message to say our state has changed.\n self.post_message(self.Changed(self, event.radio_button))\n else:\n # We're being clicked off, we don't want that.\n event.radio_button.value = True\n\n def _on_radio_set_changed(self, event: RadioSet.Changed) -> None:\n \"\"\"Handle a change to which button in the set is pressed.\n\n This handler ensures that, when a button is pressed, it's also the\n selected button.\n \"\"\"\n self._selected = event.index\n\n async def _on_click(self, _: Click) -> None:\n \"\"\"Handle a click on or within the radio set.\n\n This handler ensures that focus moves to the clicked radio set, even\n if there's a click on one of the radio buttons it contains.\n \"\"\"\n self.focus()\n\n @property\n def pressed_button(self) -> RadioButton | None:\n \"\"\"The currently-pressed [`RadioButton`][textual.widgets.RadioButton], or `None` if none are pressed.\"\"\"\n return self._pressed_button\n\n @property\n def pressed_index(self) -> int:\n \"\"\"The index of the currently-pressed [`RadioButton`][textual.widgets.RadioButton], or -1 if none are pressed.\"\"\"\n return (\n self._nodes.index(self._pressed_button)\n if self._pressed_button is not None\n else -1\n )\n\n def action_previous_button(self) -> None:\n \"\"\"Navigate to the previous button in the set.\n\n Note that this will wrap around to the end if at the start.\n \"\"\"\n if self._nodes:\n if self._selected == 0:\n self._selected = len(self.children) - 1\n elif self._selected is None:\n self._selected = 0\n else:\n self._selected -= 1\n\n def action_next_button(self) -> None:\n \"\"\"Navigate to the next button in the set.\n\n Note that this will wrap around to the start if at the end.\n \"\"\"\n if self._nodes:\n if self._selected is None or self._selected == len(self._nodes) - 1:\n self._selected = 0\n else:\n self._selected += 1\n\n def action_toggle(self) -> None:\n \"\"\"Toggle the state of the currently-selected button.\"\"\"\n if self._selected is not None:\n button = self._nodes[self._selected]\n assert isinstance(button, RadioButton)\n button.toggle()\n", "path": "src/textual/widgets/_radio_set.py"}], "after_files": [{"content": "\"\"\"Provides a RadioSet widget, which groups radio buttons.\"\"\"\n\nfrom __future__ import annotations\n\nfrom contextlib import suppress\nfrom typing import ClassVar, Literal, Optional\n\nimport rich.repr\n\nfrom ..binding import Binding, BindingType\nfrom ..containers import Container\nfrom ..events import Click, Mount\nfrom ..message import Message\nfrom ..reactive import var\nfrom ._radio_button import RadioButton\n\n\nclass RadioSet(Container, can_focus=True, can_focus_children=False):\n \"\"\"Widget for grouping a collection of radio buttons into a set.\n\n When a collection of [`RadioButton`][textual.widgets.RadioButton]s are\n grouped with this widget, they will be treated as a mutually-exclusive\n grouping. If one button is turned on, the previously-on button will be\n turned off.\n \"\"\"\n\n DEFAULT_CSS = \"\"\"\n RadioSet {\n border: tall transparent;\n background: $boost;\n padding: 0 1 0 0;\n height: auto;\n width: auto;\n }\n\n RadioSet:focus {\n border: tall $accent;\n }\n\n /* The following rules/styles mimic similar ToggleButton:focus rules in\n * ToggleButton. If those styles ever get updated, these should be too.\n */\n\n RadioSet > * {\n background: transparent;\n border: none;\n padding: 0 1;\n }\n\n RadioSet:focus > RadioButton.-selected > .toggle--label {\n text-style: underline;\n }\n\n RadioSet:focus ToggleButton.-selected > .toggle--button {\n background: $foreground 25%;\n }\n\n RadioSet:focus > RadioButton.-on.-selected > .toggle--button {\n background: $foreground 25%;\n }\n \"\"\"\n\n BINDINGS: ClassVar[list[BindingType]] = [\n Binding(\"down,right\", \"next_button\", \"\", show=False),\n Binding(\"enter,space\", \"toggle\", \"Toggle\", show=False),\n Binding(\"up,left\", \"previous_button\", \"\", show=False),\n ]\n \"\"\"\n | Key(s) | Description |\n | :- | :- |\n | enter, space | Toggle the currently-selected button. |\n | left, up | Select the previous radio button in the set. |\n | right, down | Select the next radio button in the set. |\n \"\"\"\n\n _selected: var[int | None] = var[Optional[int]](None)\n \"\"\"The index of the currently-selected radio button.\"\"\"\n\n @rich.repr.auto\n class Changed(Message):\n \"\"\"Posted when the pressed button in the set changes.\n\n This message can be handled using an `on_radio_set_changed` method.\n \"\"\"\n\n ALLOW_SELECTOR_MATCH = {\"pressed\"}\n \"\"\"Additional message attributes that can be used with the [`on` decorator][textual.on].\"\"\"\n\n def __init__(self, radio_set: RadioSet, pressed: RadioButton) -> None:\n \"\"\"Initialise the message.\n\n Args:\n pressed: The radio button that was pressed.\n \"\"\"\n super().__init__()\n self.radio_set = radio_set\n \"\"\"A reference to the [`RadioSet`][textual.widgets.RadioSet] that was changed.\"\"\"\n self.pressed = pressed\n \"\"\"The [`RadioButton`][textual.widgets.RadioButton] that was pressed to make the change.\"\"\"\n self.index = radio_set.pressed_index\n \"\"\"The index of the [`RadioButton`][textual.widgets.RadioButton] that was pressed to make the change.\"\"\"\n\n @property\n def control(self) -> RadioSet:\n \"\"\"A reference to the [`RadioSet`][textual.widgets.RadioSet] that was changed.\n\n This is an alias for [`Changed.radio_set`][textual.widgets.RadioSet.Changed.radio_set]\n and is used by the [`on`][textual.on] decorator.\n \"\"\"\n return self.radio_set\n\n def __rich_repr__(self) -> rich.repr.Result:\n yield \"radio_set\", self.radio_set\n yield \"pressed\", self.pressed\n yield \"index\", self.index\n\n def __init__(\n self,\n *buttons: str | RadioButton,\n name: str | None = None,\n id: str | None = None,\n classes: str | None = None,\n disabled: bool = False,\n ) -> None:\n \"\"\"Initialise the radio set.\n\n Args:\n buttons: The labels or [`RadioButton`][textual.widgets.RadioButton]s to group together.\n name: The name of the radio set.\n id: The ID of the radio set in the DOM.\n classes: The CSS classes of the radio set.\n disabled: Whether the radio set is disabled or not.\n\n Note:\n When a `str` label is provided, a\n [RadioButton][textual.widgets.RadioButton] will be created from\n it.\n \"\"\"\n self._pressed_button: RadioButton | None = None\n \"\"\"Holds the radio buttons we're responsible for.\"\"\"\n super().__init__(\n *[\n (button if isinstance(button, RadioButton) else RadioButton(button))\n for button in buttons\n ],\n name=name,\n id=id,\n classes=classes,\n disabled=disabled,\n )\n\n def _on_mount(self, _: Mount) -> None:\n \"\"\"Perform some processing once mounted in the DOM.\"\"\"\n\n # If there are radio buttons, select the first available one.\n self.action_next_button()\n\n # Get all the buttons within us; we'll be doing a couple of things\n # with that list.\n buttons = list(self.query(RadioButton))\n\n # RadioButtons can have focus, by default. But we're going to take\n # that over and handle movement between them. So here we tell them\n # all they can't focus.\n for button in buttons:\n button.can_focus = False\n\n # It's possible for the user to pass in a collection of radio\n # buttons, with more than one set to on; they shouldn't, but we\n # can't stop them. So here we check for that and, for want of a\n # better approach, we keep the first one on and turn all the others\n # off.\n switched_on = [button for button in buttons if button.value]\n with self.prevent(RadioButton.Changed):\n for button in switched_on[1:]:\n button.value = False\n\n # Keep track of which button is initially pressed.\n if switched_on:\n self._pressed_button = switched_on[0]\n\n def watch__selected(self) -> None:\n self.query(RadioButton).remove_class(\"-selected\")\n if self._selected is not None:\n self._nodes[self._selected].add_class(\"-selected\")\n\n def _on_radio_button_changed(self, event: RadioButton.Changed) -> None:\n \"\"\"Respond to the value of a button in the set being changed.\n\n Args:\n event: The event.\n \"\"\"\n # We're going to consume the underlying radio button events, making\n # it appear as if they don't emit their own, as far as the caller is\n # concerned. As such, stop the event bubbling and also prohibit the\n # same event being sent out if/when we make a value change in here.\n event.stop()\n with self.prevent(RadioButton.Changed):\n # If the message pertains to a button being clicked to on...\n if event.radio_button.value:\n # If there's a button pressed right now and it's not really a\n # case of the user mashing on the same button...\n if (\n self._pressed_button is not None\n and self._pressed_button != event.radio_button\n ):\n self._pressed_button.value = False\n # Make the pressed button this new button.\n self._pressed_button = event.radio_button\n # Emit a message to say our state has changed.\n self.post_message(self.Changed(self, event.radio_button))\n else:\n # We're being clicked off, we don't want that.\n event.radio_button.value = True\n\n def _on_radio_set_changed(self, event: RadioSet.Changed) -> None:\n \"\"\"Handle a change to which button in the set is pressed.\n\n This handler ensures that, when a button is pressed, it's also the\n selected button.\n \"\"\"\n self._selected = event.index\n\n async def _on_click(self, _: Click) -> None:\n \"\"\"Handle a click on or within the radio set.\n\n This handler ensures that focus moves to the clicked radio set, even\n if there's a click on one of the radio buttons it contains.\n \"\"\"\n self.focus()\n\n @property\n def pressed_button(self) -> RadioButton | None:\n \"\"\"The currently-pressed [`RadioButton`][textual.widgets.RadioButton], or `None` if none are pressed.\"\"\"\n return self._pressed_button\n\n @property\n def pressed_index(self) -> int:\n \"\"\"The index of the currently-pressed [`RadioButton`][textual.widgets.RadioButton], or -1 if none are pressed.\"\"\"\n return (\n self._nodes.index(self._pressed_button)\n if self._pressed_button is not None\n else -1\n )\n\n def action_previous_button(self) -> None:\n \"\"\"Navigate to the previous button in the set.\n\n Note that this will wrap around to the end if at the start.\n \"\"\"\n self._move_selected_button(-1)\n\n def action_next_button(self) -> None:\n \"\"\"Navigate to the next button in the set.\n\n Note that this will wrap around to the start if at the end.\n \"\"\"\n self._move_selected_button(1)\n\n def _move_selected_button(self, direction: Literal[-1, 1]) -> None:\n \"\"\"Move the selected button to the next or previous one.\n\n Note that this will wrap around the start/end of the button list.\n\n We compute the available buttons by ignoring the disabled ones and then\n we induce an ordering by computing the distance to the currently selected one if\n we start at the selected button and then start moving in the direction indicated.\n\n For example, if the direction is `1` and self._selected is 2, we have this:\n selected: v\n buttons: X X X X X X X\n indices: 0 1 2 3 4 5 6\n distance: 5 6 0 1 2 3 4\n\n Args:\n direction: `1` to move to the next button and `-1` for the previous.\n \"\"\"\n\n candidate_indices = (\n index\n for index, button in enumerate(self.children)\n if not button.disabled and index != self._selected\n )\n\n if self._selected is None:\n with suppress(StopIteration):\n self._selected = next(candidate_indices)\n else:\n selected = self._selected\n\n def distance(index: int) -> int:\n \"\"\"Induce a distance between the given index and the selected button.\n\n Args:\n index: The index of the button to consider.\n\n Returns:\n The distance between the two buttons.\n \"\"\"\n return direction * (index - selected) % len(self.children)\n\n self._selected = min(candidate_indices, key=distance, default=None)\n\n def action_toggle(self) -> None:\n \"\"\"Toggle the state of the currently-selected button.\"\"\"\n if self._selected is not None:\n button = self._nodes[self._selected]\n assert isinstance(button, RadioButton)\n button.toggle()\n", "path": "src/textual/widgets/_radio_set.py"}]}
3,886
826
gh_patches_debug_63
rasdani/github-patches
git_diff
strawberry-graphql__strawberry-378
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Cannot create type with multiple Unions ```python from typing import Union import strawberry @strawberry.type class CoolType: @strawberry.type class UnionA1: value: int @strawberry.type class UnionA2: value: int @strawberry.type class UnionB1: value: int @strawberry.type class UnionB2: value: int field1: Union[UnionA1, UnionA2] field2: Union[UnionB1, UnionB2] schema = strawberry.Schema(query=CoolType) ``` ```.pytb Traceback (most recent call last): File "/home/ignormies/.config/JetBrains/PyCharm2020.1/scratches/scratch.py", line 28, in <module> schema = strawberry.Schema(query=CoolType) File "/home/ignormies/.local/share/virtualenvs/gql-bf-XGX4szKA-py3.8/lib/python3.8/site-packages/strawberry/schema.py", line 25, in __init__ super().__init__( File "/home/ignormies/.local/share/virtualenvs/gql-bf-XGX4szKA-py3.8/lib/python3.8/site-packages/graphql/type/schema.py", line 239, in __init__ raise TypeError( TypeError: Schema must contain uniquely named types but contains multiple types named '_resolver'. ``` Removing either `field1` or `field2` allows the schema to be created --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `strawberry/type.py` Content: ``` 1 import copy 2 import dataclasses 3 from functools import partial 4 from typing import Optional 5 6 from graphql import GraphQLInputObjectType, GraphQLInterfaceType, GraphQLObjectType 7 8 from .constants import IS_STRAWBERRY_FIELD, IS_STRAWBERRY_INPUT, IS_STRAWBERRY_INTERFACE 9 from .field import field, strawberry_field 10 from .type_registry import register_type 11 from .utils.str_converters import to_camel_case 12 from .utils.typing import get_actual_type, has_type_var, is_type_var 13 14 15 def _interface_resolve_type(result, info, return_type): 16 """Resolves the correct type for an interface""" 17 return result.__class__.graphql_type 18 19 20 def _get_resolver(cls, field_name): 21 class_field = getattr(cls, field_name, None) 22 23 if class_field and getattr(class_field, "resolver", None): 24 return class_field.resolver 25 26 def _resolver(root, info): 27 if not root: 28 return None 29 30 field_resolver = getattr(root, field_name, None) 31 32 if getattr(field_resolver, IS_STRAWBERRY_FIELD, False): 33 return field_resolver(root, info) 34 35 elif field_resolver.__class__ is strawberry_field: 36 # TODO: support default values 37 return None 38 39 return field_resolver 40 41 return _resolver 42 43 44 def _process_type( 45 cls, *, name=None, is_input=False, is_interface=False, description=None 46 ): 47 name = name or cls.__name__ 48 49 def _get_fields(wrapped, types_replacement_map=None): 50 class_fields = dataclasses.fields(wrapped) 51 52 fields = {} 53 54 for class_field in class_fields: 55 # we want to make a copy of the original field when dealing 56 # with generic types and also get the actual type for the type var 57 if is_type_var(class_field.type) or has_type_var(class_field.type): 58 class_field = copy.copy(class_field) 59 class_field.type = get_actual_type( 60 class_field.type, types_replacement_map 61 ) 62 # like args, a None default implies Optional 63 if class_field.default is None: 64 class_field.type = Optional[class_field.type] 65 66 field_name = getattr(class_field, "field_name", None) or to_camel_case( 67 class_field.name 68 ) 69 description = getattr(class_field, "field_description", None) 70 permission_classes = getattr(class_field, "field_permission_classes", None) 71 resolver = getattr(class_field, "field_resolver", None) or _get_resolver( 72 cls, class_field.name 73 ) 74 resolver.__annotations__["return"] = class_field.type 75 76 fields[field_name] = field( 77 resolver, 78 is_input=is_input, 79 description=description, 80 permission_classes=permission_classes, 81 ).graphql_type 82 # supply a graphql default_value if the type annotation has a default 83 if class_field.default not in (dataclasses.MISSING, None): 84 fields[field_name].default_value = class_field.default 85 86 strawberry_fields = {} 87 88 for base in [cls, *cls.__bases__]: 89 strawberry_fields.update( 90 { 91 key: value 92 for key, value in base.__dict__.items() 93 if getattr(value, IS_STRAWBERRY_FIELD, False) 94 } 95 ) 96 97 for key, value in strawberry_fields.items(): 98 name = getattr(value, "field_name", None) or to_camel_case(key) 99 100 fields[name] = value.graphql_type 101 102 return fields 103 104 if is_input: 105 setattr(cls, IS_STRAWBERRY_INPUT, True) 106 elif is_interface: 107 setattr(cls, IS_STRAWBERRY_INTERFACE, True) 108 109 extra_kwargs = {"description": description or cls.__doc__} 110 111 wrapped = dataclasses.dataclass(cls) 112 113 if is_input: 114 TypeClass = GraphQLInputObjectType 115 elif is_interface: 116 TypeClass = GraphQLInterfaceType 117 118 # TODO: in future we might want to be able to override this 119 # for example to map a class (like a django model) to one 120 # type of the interface 121 extra_kwargs["resolve_type"] = _interface_resolve_type 122 else: 123 TypeClass = GraphQLObjectType 124 125 extra_kwargs["interfaces"] = [ 126 klass.graphql_type 127 for klass in cls.__bases__ 128 if hasattr(klass, IS_STRAWBERRY_INTERFACE) 129 ] 130 131 graphql_type = TypeClass( 132 name, 133 lambda types_replacement_map=None: _get_fields(wrapped, types_replacement_map), 134 **extra_kwargs 135 ) 136 register_type(cls, graphql_type) 137 138 return wrapped 139 140 141 def type(cls=None, *, name=None, is_input=False, is_interface=False, description=None): 142 """Annotates a class as a GraphQL type. 143 144 Example usage: 145 146 >>> @strawberry.type: 147 >>> class X: 148 >>> field_abc: str = "ABC" 149 """ 150 151 def wrap(cls): 152 return _process_type( 153 cls, 154 name=name, 155 is_input=is_input, 156 is_interface=is_interface, 157 description=description, 158 ) 159 160 if cls is None: 161 return wrap 162 163 return wrap(cls) 164 165 166 input = partial(type, is_input=True) 167 interface = partial(type, is_interface=True) 168 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/strawberry/type.py b/strawberry/type.py --- a/strawberry/type.py +++ b/strawberry/type.py @@ -38,6 +38,8 @@ return field_resolver + _resolver.__name__ = field_name + return _resolver
{"golden_diff": "diff --git a/strawberry/type.py b/strawberry/type.py\n--- a/strawberry/type.py\n+++ b/strawberry/type.py\n@@ -38,6 +38,8 @@\n \n return field_resolver\n \n+ _resolver.__name__ = field_name\n+\n return _resolver\n", "issue": "Cannot create type with multiple Unions\n```python\r\nfrom typing import Union\r\n\r\nimport strawberry\r\n\r\n\r\[email protected]\r\nclass CoolType:\r\n @strawberry.type\r\n class UnionA1:\r\n value: int\r\n\r\n @strawberry.type\r\n class UnionA2:\r\n value: int\r\n\r\n @strawberry.type\r\n class UnionB1:\r\n value: int\r\n\r\n @strawberry.type\r\n class UnionB2:\r\n value: int\r\n\r\n field1: Union[UnionA1, UnionA2]\r\n field2: Union[UnionB1, UnionB2]\r\n\r\n\r\nschema = strawberry.Schema(query=CoolType)\r\n```\r\n\r\n```.pytb\r\nTraceback (most recent call last):\r\n File \"/home/ignormies/.config/JetBrains/PyCharm2020.1/scratches/scratch.py\", line 28, in <module>\r\n schema = strawberry.Schema(query=CoolType)\r\n File \"/home/ignormies/.local/share/virtualenvs/gql-bf-XGX4szKA-py3.8/lib/python3.8/site-packages/strawberry/schema.py\", line 25, in __init__\r\n super().__init__(\r\n File \"/home/ignormies/.local/share/virtualenvs/gql-bf-XGX4szKA-py3.8/lib/python3.8/site-packages/graphql/type/schema.py\", line 239, in __init__\r\n raise TypeError(\r\nTypeError: Schema must contain uniquely named types but contains multiple types named '_resolver'.\r\n```\r\n\r\nRemoving either `field1` or `field2` allows the schema to be created\n", "before_files": [{"content": "import copy\nimport dataclasses\nfrom functools import partial\nfrom typing import Optional\n\nfrom graphql import GraphQLInputObjectType, GraphQLInterfaceType, GraphQLObjectType\n\nfrom .constants import IS_STRAWBERRY_FIELD, IS_STRAWBERRY_INPUT, IS_STRAWBERRY_INTERFACE\nfrom .field import field, strawberry_field\nfrom .type_registry import register_type\nfrom .utils.str_converters import to_camel_case\nfrom .utils.typing import get_actual_type, has_type_var, is_type_var\n\n\ndef _interface_resolve_type(result, info, return_type):\n \"\"\"Resolves the correct type for an interface\"\"\"\n return result.__class__.graphql_type\n\n\ndef _get_resolver(cls, field_name):\n class_field = getattr(cls, field_name, None)\n\n if class_field and getattr(class_field, \"resolver\", None):\n return class_field.resolver\n\n def _resolver(root, info):\n if not root:\n return None\n\n field_resolver = getattr(root, field_name, None)\n\n if getattr(field_resolver, IS_STRAWBERRY_FIELD, False):\n return field_resolver(root, info)\n\n elif field_resolver.__class__ is strawberry_field:\n # TODO: support default values\n return None\n\n return field_resolver\n\n return _resolver\n\n\ndef _process_type(\n cls, *, name=None, is_input=False, is_interface=False, description=None\n):\n name = name or cls.__name__\n\n def _get_fields(wrapped, types_replacement_map=None):\n class_fields = dataclasses.fields(wrapped)\n\n fields = {}\n\n for class_field in class_fields:\n # we want to make a copy of the original field when dealing\n # with generic types and also get the actual type for the type var\n if is_type_var(class_field.type) or has_type_var(class_field.type):\n class_field = copy.copy(class_field)\n class_field.type = get_actual_type(\n class_field.type, types_replacement_map\n )\n # like args, a None default implies Optional\n if class_field.default is None:\n class_field.type = Optional[class_field.type]\n\n field_name = getattr(class_field, \"field_name\", None) or to_camel_case(\n class_field.name\n )\n description = getattr(class_field, \"field_description\", None)\n permission_classes = getattr(class_field, \"field_permission_classes\", None)\n resolver = getattr(class_field, \"field_resolver\", None) or _get_resolver(\n cls, class_field.name\n )\n resolver.__annotations__[\"return\"] = class_field.type\n\n fields[field_name] = field(\n resolver,\n is_input=is_input,\n description=description,\n permission_classes=permission_classes,\n ).graphql_type\n # supply a graphql default_value if the type annotation has a default\n if class_field.default not in (dataclasses.MISSING, None):\n fields[field_name].default_value = class_field.default\n\n strawberry_fields = {}\n\n for base in [cls, *cls.__bases__]:\n strawberry_fields.update(\n {\n key: value\n for key, value in base.__dict__.items()\n if getattr(value, IS_STRAWBERRY_FIELD, False)\n }\n )\n\n for key, value in strawberry_fields.items():\n name = getattr(value, \"field_name\", None) or to_camel_case(key)\n\n fields[name] = value.graphql_type\n\n return fields\n\n if is_input:\n setattr(cls, IS_STRAWBERRY_INPUT, True)\n elif is_interface:\n setattr(cls, IS_STRAWBERRY_INTERFACE, True)\n\n extra_kwargs = {\"description\": description or cls.__doc__}\n\n wrapped = dataclasses.dataclass(cls)\n\n if is_input:\n TypeClass = GraphQLInputObjectType\n elif is_interface:\n TypeClass = GraphQLInterfaceType\n\n # TODO: in future we might want to be able to override this\n # for example to map a class (like a django model) to one\n # type of the interface\n extra_kwargs[\"resolve_type\"] = _interface_resolve_type\n else:\n TypeClass = GraphQLObjectType\n\n extra_kwargs[\"interfaces\"] = [\n klass.graphql_type\n for klass in cls.__bases__\n if hasattr(klass, IS_STRAWBERRY_INTERFACE)\n ]\n\n graphql_type = TypeClass(\n name,\n lambda types_replacement_map=None: _get_fields(wrapped, types_replacement_map),\n **extra_kwargs\n )\n register_type(cls, graphql_type)\n\n return wrapped\n\n\ndef type(cls=None, *, name=None, is_input=False, is_interface=False, description=None):\n \"\"\"Annotates a class as a GraphQL type.\n\n Example usage:\n\n >>> @strawberry.type:\n >>> class X:\n >>> field_abc: str = \"ABC\"\n \"\"\"\n\n def wrap(cls):\n return _process_type(\n cls,\n name=name,\n is_input=is_input,\n is_interface=is_interface,\n description=description,\n )\n\n if cls is None:\n return wrap\n\n return wrap(cls)\n\n\ninput = partial(type, is_input=True)\ninterface = partial(type, is_interface=True)\n", "path": "strawberry/type.py"}], "after_files": [{"content": "import copy\nimport dataclasses\nfrom functools import partial\nfrom typing import Optional\n\nfrom graphql import GraphQLInputObjectType, GraphQLInterfaceType, GraphQLObjectType\n\nfrom .constants import IS_STRAWBERRY_FIELD, IS_STRAWBERRY_INPUT, IS_STRAWBERRY_INTERFACE\nfrom .field import field, strawberry_field\nfrom .type_registry import register_type\nfrom .utils.str_converters import to_camel_case\nfrom .utils.typing import get_actual_type, has_type_var, is_type_var\n\n\ndef _interface_resolve_type(result, info, return_type):\n \"\"\"Resolves the correct type for an interface\"\"\"\n return result.__class__.graphql_type\n\n\ndef _get_resolver(cls, field_name):\n class_field = getattr(cls, field_name, None)\n\n if class_field and getattr(class_field, \"resolver\", None):\n return class_field.resolver\n\n def _resolver(root, info):\n if not root:\n return None\n\n field_resolver = getattr(root, field_name, None)\n\n if getattr(field_resolver, IS_STRAWBERRY_FIELD, False):\n return field_resolver(root, info)\n\n elif field_resolver.__class__ is strawberry_field:\n # TODO: support default values\n return None\n\n return field_resolver\n\n _resolver.__name__ = field_name\n\n return _resolver\n\n\ndef _process_type(\n cls, *, name=None, is_input=False, is_interface=False, description=None\n):\n name = name or cls.__name__\n\n def _get_fields(wrapped, types_replacement_map=None):\n class_fields = dataclasses.fields(wrapped)\n\n fields = {}\n\n for class_field in class_fields:\n # we want to make a copy of the original field when dealing\n # with generic types and also get the actual type for the type var\n if is_type_var(class_field.type) or has_type_var(class_field.type):\n class_field = copy.copy(class_field)\n class_field.type = get_actual_type(\n class_field.type, types_replacement_map\n )\n # like args, a None default implies Optional\n if class_field.default is None:\n class_field.type = Optional[class_field.type]\n\n field_name = getattr(class_field, \"field_name\", None) or to_camel_case(\n class_field.name\n )\n description = getattr(class_field, \"field_description\", None)\n permission_classes = getattr(class_field, \"field_permission_classes\", None)\n resolver = getattr(class_field, \"field_resolver\", None) or _get_resolver(\n cls, class_field.name\n )\n resolver.__annotations__[\"return\"] = class_field.type\n\n fields[field_name] = field(\n resolver,\n is_input=is_input,\n description=description,\n permission_classes=permission_classes,\n ).graphql_type\n # supply a graphql default_value if the type annotation has a default\n if class_field.default not in (dataclasses.MISSING, None):\n fields[field_name].default_value = class_field.default\n\n strawberry_fields = {}\n\n for base in [cls, *cls.__bases__]:\n strawberry_fields.update(\n {\n key: value\n for key, value in base.__dict__.items()\n if getattr(value, IS_STRAWBERRY_FIELD, False)\n }\n )\n\n for key, value in strawberry_fields.items():\n name = getattr(value, \"field_name\", None) or to_camel_case(key)\n\n fields[name] = value.graphql_type\n\n return fields\n\n if is_input:\n setattr(cls, IS_STRAWBERRY_INPUT, True)\n elif is_interface:\n setattr(cls, IS_STRAWBERRY_INTERFACE, True)\n\n extra_kwargs = {\"description\": description or cls.__doc__}\n\n wrapped = dataclasses.dataclass(cls)\n\n if is_input:\n TypeClass = GraphQLInputObjectType\n elif is_interface:\n TypeClass = GraphQLInterfaceType\n\n # TODO: in future we might want to be able to override this\n # for example to map a class (like a django model) to one\n # type of the interface\n extra_kwargs[\"resolve_type\"] = _interface_resolve_type\n else:\n TypeClass = GraphQLObjectType\n\n extra_kwargs[\"interfaces\"] = [\n klass.graphql_type\n for klass in cls.__bases__\n if hasattr(klass, IS_STRAWBERRY_INTERFACE)\n ]\n\n graphql_type = TypeClass(\n name,\n lambda types_replacement_map=None: _get_fields(wrapped, types_replacement_map),\n **extra_kwargs\n )\n register_type(cls, graphql_type)\n\n return wrapped\n\n\ndef type(cls=None, *, name=None, is_input=False, is_interface=False, description=None):\n \"\"\"Annotates a class as a GraphQL type.\n\n Example usage:\n\n >>> @strawberry.type:\n >>> class X:\n >>> field_abc: str = \"ABC\"\n \"\"\"\n\n def wrap(cls):\n return _process_type(\n cls,\n name=name,\n is_input=is_input,\n is_interface=is_interface,\n description=description,\n )\n\n if cls is None:\n return wrap\n\n return wrap(cls)\n\n\ninput = partial(type, is_input=True)\ninterface = partial(type, is_interface=True)\n", "path": "strawberry/type.py"}]}
2,127
71
gh_patches_debug_12131
rasdani/github-patches
git_diff
Lightning-AI__torchmetrics-1460
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- MultioutputWrapper does not reset cleanly ## 🐛 Bug Calling `MultioutputWrapper.compute()` after `MultioutputWrapper.reset()` returns old metrics that should have been cleared by the reset. ### To Reproduce <!-- If you have a code sample, error messages, stack traces, please provide it here as well --> #### Code sample ```py import torch import torchmetrics base_metric = torchmetrics.ConfusionMatrix(task="multiclass", num_classes=2) cf = torchmetrics.MultioutputWrapper(base_metric, num_outputs=2) cf(torch.tensor([[0,0]]), torch.tensor([[0,0]])) print("First result: ", cf.compute()) cf.reset() cf(torch.tensor([[1,1]]), torch.tensor([[0,0]])) print("Second result: ", cf.compute()) ``` Output: ``` First result: [tensor([[1, 0], [0, 0]]), tensor([[1, 0], [0, 0]])] Second result: [tensor([[1, 0], [0, 0]]), tensor([[1, 0], [0, 0]])] ``` The old output is returned even after resetting and entering new data. If the fist metric computation is omitted, the second metric is as expected. Importantly, this bug only occurs when using `forward()` to enter data, while `update()` works as expected. ### Expected behavior The result of the second computation should be independent of the first. Furthermore, forward and update should produce the same state as specified in the docs. ### Environment - torchmetrics 0.10.3, installed from pypi - Python 3.8.9 ### Attempts to fix Adding `super().reset()` (as done in e.g. the minmax wrapper) at the top of the reset method seems to fix the bug. https://github.com/Lightning-AI/metrics/blob/7b505ff1a3b88181bef2b0cdfa21ec593dcda3ff/src/torchmetrics/wrappers/multioutput.py#L133 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/torchmetrics/wrappers/multioutput.py` Content: ``` 1 from copy import deepcopy 2 from typing import Any, List, Tuple 3 4 import torch 5 from torch import Tensor 6 from torch.nn import ModuleList 7 8 from torchmetrics import Metric 9 from torchmetrics.utilities import apply_to_collection 10 11 12 def _get_nan_indices(*tensors: Tensor) -> Tensor: 13 """Get indices of rows along dim 0 which have NaN values.""" 14 if len(tensors) == 0: 15 raise ValueError("Must pass at least one tensor as argument") 16 sentinel = tensors[0] 17 nan_idxs = torch.zeros(len(sentinel), dtype=torch.bool, device=sentinel.device) 18 for tensor in tensors: 19 permuted_tensor = tensor.flatten(start_dim=1) 20 nan_idxs |= torch.any(torch.isnan(permuted_tensor), dim=1) 21 return nan_idxs 22 23 24 class MultioutputWrapper(Metric): 25 """Wrap a base metric to enable it to support multiple outputs. 26 27 Several torchmetrics metrics, such as :class:`torchmetrics.regression.spearman.SpearmanCorrcoef` lack support for 28 multioutput mode. This class wraps such metrics to support computing one metric per output. 29 Unlike specific torchmetric metrics, it doesn't support any aggregation across outputs. 30 This means if you set ``num_outputs`` to 2, ``.compute()`` will return a Tensor of dimension 31 ``(2, ...)`` where ``...`` represents the dimensions the metric returns when not wrapped. 32 33 In addition to enabling multioutput support for metrics that lack it, this class also supports, albeit in a crude 34 fashion, dealing with missing labels (or other data). When ``remove_nans`` is passed, the class will remove the 35 intersection of NaN containing "rows" upon each update for each output. For example, suppose a user uses 36 `MultioutputWrapper` to wrap :class:`torchmetrics.regression.r2.R2Score` with 2 outputs, one of which occasionally 37 has missing labels for classes like ``R2Score`` is that this class supports removing ``NaN`` values 38 (parameter ``remove_nans``) on a per-output basis. When ``remove_nans`` is passed the wrapper will remove all rows 39 40 Args: 41 base_metric: Metric being wrapped. 42 num_outputs: Expected dimensionality of the output dimension. 43 This parameter is used to determine the number of distinct metrics we need to track. 44 output_dim: 45 Dimension on which output is expected. Note that while this provides some flexibility, the output dimension 46 must be the same for all inputs to update. This applies even for metrics such as `Accuracy` where the labels 47 can have a different number of dimensions than the predictions. This can be worked around if the output 48 dimension can be set to -1 for both, even if -1 corresponds to different dimensions in different inputs. 49 remove_nans: 50 Whether to remove the intersection of rows containing NaNs from the values passed through to each underlying 51 metric. Proper operation requires all tensors passed to update to have dimension ``(N, ...)`` where N 52 represents the length of the batch or dataset being passed in. 53 squeeze_outputs: 54 If ``True``, will squeeze the 1-item dimensions left after ``index_select`` is applied. 55 This is sometimes unnecessary but harmless for metrics such as `R2Score` but useful 56 for certain classification metrics that can't handle additional 1-item dimensions. 57 58 Example: 59 60 >>> # Mimic R2Score in `multioutput`, `raw_values` mode: 61 >>> import torch 62 >>> from torchmetrics import MultioutputWrapper, R2Score 63 >>> target = torch.tensor([[0.5, 1], [-1, 1], [7, -6]]) 64 >>> preds = torch.tensor([[0, 2], [-1, 2], [8, -5]]) 65 >>> r2score = MultioutputWrapper(R2Score(), 2) 66 >>> r2score(preds, target) 67 [tensor(0.9654), tensor(0.9082)] 68 """ 69 70 is_differentiable = False 71 72 def __init__( 73 self, 74 base_metric: Metric, 75 num_outputs: int, 76 output_dim: int = -1, 77 remove_nans: bool = True, 78 squeeze_outputs: bool = True, 79 ): 80 super().__init__() 81 self.metrics = ModuleList([deepcopy(base_metric) for _ in range(num_outputs)]) 82 self.output_dim = output_dim 83 self.remove_nans = remove_nans 84 self.squeeze_outputs = squeeze_outputs 85 86 def _get_args_kwargs_by_output(self, *args: Tensor, **kwargs: Tensor) -> List[Tuple[Tensor, Tensor]]: 87 """Get args and kwargs reshaped to be output-specific and (maybe) having NaNs stripped out.""" 88 args_kwargs_by_output = [] 89 for i in range(len(self.metrics)): 90 selected_args = apply_to_collection( 91 args, Tensor, torch.index_select, dim=self.output_dim, index=torch.tensor(i, device=self.device) 92 ) 93 selected_kwargs = apply_to_collection( 94 kwargs, Tensor, torch.index_select, dim=self.output_dim, index=torch.tensor(i, device=self.device) 95 ) 96 if self.remove_nans: 97 args_kwargs = selected_args + tuple(selected_kwargs.values()) 98 nan_idxs = _get_nan_indices(*args_kwargs) 99 selected_args = [arg[~nan_idxs] for arg in selected_args] 100 selected_kwargs = {k: v[~nan_idxs] for k, v in selected_kwargs.items()} 101 102 if self.squeeze_outputs: 103 selected_args = [arg.squeeze(self.output_dim) for arg in selected_args] 104 args_kwargs_by_output.append((selected_args, selected_kwargs)) 105 return args_kwargs_by_output 106 107 def update(self, *args: Any, **kwargs: Any) -> None: 108 """Update each underlying metric with the corresponding output.""" 109 reshaped_args_kwargs = self._get_args_kwargs_by_output(*args, **kwargs) 110 for metric, (selected_args, selected_kwargs) in zip(self.metrics, reshaped_args_kwargs): 111 metric.update(*selected_args, **selected_kwargs) 112 113 def compute(self) -> List[Tensor]: 114 """Compute metrics.""" 115 return [m.compute() for m in self.metrics] 116 117 @torch.jit.unused 118 def forward(self, *args: Any, **kwargs: Any) -> Any: 119 """Call underlying forward methods and aggregate the results if they're non-null. 120 121 We override this method to ensure that state variables get copied over on the underlying metrics. 122 """ 123 results = [] 124 reshaped_args_kwargs = self._get_args_kwargs_by_output(*args, **kwargs) 125 for metric, (selected_args, selected_kwargs) in zip(self.metrics, reshaped_args_kwargs): 126 results.append(metric(*selected_args, **selected_kwargs)) 127 if results[0] is None: 128 return None 129 return results 130 131 def reset(self) -> None: 132 """Reset all underlying metrics.""" 133 for metric in self.metrics: 134 metric.reset() 135 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/torchmetrics/wrappers/multioutput.py b/src/torchmetrics/wrappers/multioutput.py --- a/src/torchmetrics/wrappers/multioutput.py +++ b/src/torchmetrics/wrappers/multioutput.py @@ -1,5 +1,5 @@ from copy import deepcopy -from typing import Any, List, Tuple +from typing import Any, Callable, List, Tuple import torch from torch import Tensor @@ -132,3 +132,12 @@ """Reset all underlying metrics.""" for metric in self.metrics: metric.reset() + super().reset() + + def _wrap_update(self, update: Callable) -> Callable: + """Overwrite to do nothing.""" + return update + + def _wrap_compute(self, compute: Callable) -> Callable: + """Overwrite to do nothing.""" + return compute
{"golden_diff": "diff --git a/src/torchmetrics/wrappers/multioutput.py b/src/torchmetrics/wrappers/multioutput.py\n--- a/src/torchmetrics/wrappers/multioutput.py\n+++ b/src/torchmetrics/wrappers/multioutput.py\n@@ -1,5 +1,5 @@\n from copy import deepcopy\n-from typing import Any, List, Tuple\n+from typing import Any, Callable, List, Tuple\n \n import torch\n from torch import Tensor\n@@ -132,3 +132,12 @@\n \"\"\"Reset all underlying metrics.\"\"\"\n for metric in self.metrics:\n metric.reset()\n+ super().reset()\n+\n+ def _wrap_update(self, update: Callable) -> Callable:\n+ \"\"\"Overwrite to do nothing.\"\"\"\n+ return update\n+\n+ def _wrap_compute(self, compute: Callable) -> Callable:\n+ \"\"\"Overwrite to do nothing.\"\"\"\n+ return compute\n", "issue": "MultioutputWrapper does not reset cleanly\n## \ud83d\udc1b Bug\r\n\r\nCalling `MultioutputWrapper.compute()` after `MultioutputWrapper.reset()` returns old metrics that should have been cleared by the reset. \r\n\r\n### To Reproduce\r\n\r\n<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->\r\n\r\n#### Code sample\r\n\r\n```py\r\nimport torch\r\nimport torchmetrics\r\n\r\nbase_metric = torchmetrics.ConfusionMatrix(task=\"multiclass\", num_classes=2)\r\ncf = torchmetrics.MultioutputWrapper(base_metric, num_outputs=2)\r\n\r\ncf(torch.tensor([[0,0]]), torch.tensor([[0,0]]))\r\nprint(\"First result: \", cf.compute())\r\n\r\ncf.reset()\r\n\r\ncf(torch.tensor([[1,1]]), torch.tensor([[0,0]]))\r\nprint(\"Second result: \", cf.compute())\r\n```\r\n\r\nOutput:\r\n```\r\nFirst result: [tensor([[1, 0], [0, 0]]), tensor([[1, 0], [0, 0]])]\r\nSecond result: [tensor([[1, 0], [0, 0]]), tensor([[1, 0], [0, 0]])]\r\n```\r\nThe old output is returned even after resetting and entering new data. If the fist metric computation is omitted, the second metric is as expected.\r\n\r\nImportantly, this bug only occurs when using `forward()` to enter data, while `update()` works as expected.\r\n\r\n### Expected behavior\r\n\r\nThe result of the second computation should be independent of the first. Furthermore, forward and update should produce the same state as specified in the docs.\r\n\r\n### Environment\r\n\r\n- torchmetrics 0.10.3, installed from pypi\r\n- Python 3.8.9\r\n\r\n### Attempts to fix\r\n\r\nAdding `super().reset()` (as done in e.g. the minmax wrapper) at the top of the reset method seems to fix the bug. \r\nhttps://github.com/Lightning-AI/metrics/blob/7b505ff1a3b88181bef2b0cdfa21ec593dcda3ff/src/torchmetrics/wrappers/multioutput.py#L133\n", "before_files": [{"content": "from copy import deepcopy\nfrom typing import Any, List, Tuple\n\nimport torch\nfrom torch import Tensor\nfrom torch.nn import ModuleList\n\nfrom torchmetrics import Metric\nfrom torchmetrics.utilities import apply_to_collection\n\n\ndef _get_nan_indices(*tensors: Tensor) -> Tensor:\n \"\"\"Get indices of rows along dim 0 which have NaN values.\"\"\"\n if len(tensors) == 0:\n raise ValueError(\"Must pass at least one tensor as argument\")\n sentinel = tensors[0]\n nan_idxs = torch.zeros(len(sentinel), dtype=torch.bool, device=sentinel.device)\n for tensor in tensors:\n permuted_tensor = tensor.flatten(start_dim=1)\n nan_idxs |= torch.any(torch.isnan(permuted_tensor), dim=1)\n return nan_idxs\n\n\nclass MultioutputWrapper(Metric):\n \"\"\"Wrap a base metric to enable it to support multiple outputs.\n\n Several torchmetrics metrics, such as :class:`torchmetrics.regression.spearman.SpearmanCorrcoef` lack support for\n multioutput mode. This class wraps such metrics to support computing one metric per output.\n Unlike specific torchmetric metrics, it doesn't support any aggregation across outputs.\n This means if you set ``num_outputs`` to 2, ``.compute()`` will return a Tensor of dimension\n ``(2, ...)`` where ``...`` represents the dimensions the metric returns when not wrapped.\n\n In addition to enabling multioutput support for metrics that lack it, this class also supports, albeit in a crude\n fashion, dealing with missing labels (or other data). When ``remove_nans`` is passed, the class will remove the\n intersection of NaN containing \"rows\" upon each update for each output. For example, suppose a user uses\n `MultioutputWrapper` to wrap :class:`torchmetrics.regression.r2.R2Score` with 2 outputs, one of which occasionally\n has missing labels for classes like ``R2Score`` is that this class supports removing ``NaN`` values\n (parameter ``remove_nans``) on a per-output basis. When ``remove_nans`` is passed the wrapper will remove all rows\n\n Args:\n base_metric: Metric being wrapped.\n num_outputs: Expected dimensionality of the output dimension.\n This parameter is used to determine the number of distinct metrics we need to track.\n output_dim:\n Dimension on which output is expected. Note that while this provides some flexibility, the output dimension\n must be the same for all inputs to update. This applies even for metrics such as `Accuracy` where the labels\n can have a different number of dimensions than the predictions. This can be worked around if the output\n dimension can be set to -1 for both, even if -1 corresponds to different dimensions in different inputs.\n remove_nans:\n Whether to remove the intersection of rows containing NaNs from the values passed through to each underlying\n metric. Proper operation requires all tensors passed to update to have dimension ``(N, ...)`` where N\n represents the length of the batch or dataset being passed in.\n squeeze_outputs:\n If ``True``, will squeeze the 1-item dimensions left after ``index_select`` is applied.\n This is sometimes unnecessary but harmless for metrics such as `R2Score` but useful\n for certain classification metrics that can't handle additional 1-item dimensions.\n\n Example:\n\n >>> # Mimic R2Score in `multioutput`, `raw_values` mode:\n >>> import torch\n >>> from torchmetrics import MultioutputWrapper, R2Score\n >>> target = torch.tensor([[0.5, 1], [-1, 1], [7, -6]])\n >>> preds = torch.tensor([[0, 2], [-1, 2], [8, -5]])\n >>> r2score = MultioutputWrapper(R2Score(), 2)\n >>> r2score(preds, target)\n [tensor(0.9654), tensor(0.9082)]\n \"\"\"\n\n is_differentiable = False\n\n def __init__(\n self,\n base_metric: Metric,\n num_outputs: int,\n output_dim: int = -1,\n remove_nans: bool = True,\n squeeze_outputs: bool = True,\n ):\n super().__init__()\n self.metrics = ModuleList([deepcopy(base_metric) for _ in range(num_outputs)])\n self.output_dim = output_dim\n self.remove_nans = remove_nans\n self.squeeze_outputs = squeeze_outputs\n\n def _get_args_kwargs_by_output(self, *args: Tensor, **kwargs: Tensor) -> List[Tuple[Tensor, Tensor]]:\n \"\"\"Get args and kwargs reshaped to be output-specific and (maybe) having NaNs stripped out.\"\"\"\n args_kwargs_by_output = []\n for i in range(len(self.metrics)):\n selected_args = apply_to_collection(\n args, Tensor, torch.index_select, dim=self.output_dim, index=torch.tensor(i, device=self.device)\n )\n selected_kwargs = apply_to_collection(\n kwargs, Tensor, torch.index_select, dim=self.output_dim, index=torch.tensor(i, device=self.device)\n )\n if self.remove_nans:\n args_kwargs = selected_args + tuple(selected_kwargs.values())\n nan_idxs = _get_nan_indices(*args_kwargs)\n selected_args = [arg[~nan_idxs] for arg in selected_args]\n selected_kwargs = {k: v[~nan_idxs] for k, v in selected_kwargs.items()}\n\n if self.squeeze_outputs:\n selected_args = [arg.squeeze(self.output_dim) for arg in selected_args]\n args_kwargs_by_output.append((selected_args, selected_kwargs))\n return args_kwargs_by_output\n\n def update(self, *args: Any, **kwargs: Any) -> None:\n \"\"\"Update each underlying metric with the corresponding output.\"\"\"\n reshaped_args_kwargs = self._get_args_kwargs_by_output(*args, **kwargs)\n for metric, (selected_args, selected_kwargs) in zip(self.metrics, reshaped_args_kwargs):\n metric.update(*selected_args, **selected_kwargs)\n\n def compute(self) -> List[Tensor]:\n \"\"\"Compute metrics.\"\"\"\n return [m.compute() for m in self.metrics]\n\n @torch.jit.unused\n def forward(self, *args: Any, **kwargs: Any) -> Any:\n \"\"\"Call underlying forward methods and aggregate the results if they're non-null.\n\n We override this method to ensure that state variables get copied over on the underlying metrics.\n \"\"\"\n results = []\n reshaped_args_kwargs = self._get_args_kwargs_by_output(*args, **kwargs)\n for metric, (selected_args, selected_kwargs) in zip(self.metrics, reshaped_args_kwargs):\n results.append(metric(*selected_args, **selected_kwargs))\n if results[0] is None:\n return None\n return results\n\n def reset(self) -> None:\n \"\"\"Reset all underlying metrics.\"\"\"\n for metric in self.metrics:\n metric.reset()\n", "path": "src/torchmetrics/wrappers/multioutput.py"}], "after_files": [{"content": "from copy import deepcopy\nfrom typing import Any, Callable, List, Tuple\n\nimport torch\nfrom torch import Tensor\nfrom torch.nn import ModuleList\n\nfrom torchmetrics import Metric\nfrom torchmetrics.utilities import apply_to_collection\n\n\ndef _get_nan_indices(*tensors: Tensor) -> Tensor:\n \"\"\"Get indices of rows along dim 0 which have NaN values.\"\"\"\n if len(tensors) == 0:\n raise ValueError(\"Must pass at least one tensor as argument\")\n sentinel = tensors[0]\n nan_idxs = torch.zeros(len(sentinel), dtype=torch.bool, device=sentinel.device)\n for tensor in tensors:\n permuted_tensor = tensor.flatten(start_dim=1)\n nan_idxs |= torch.any(torch.isnan(permuted_tensor), dim=1)\n return nan_idxs\n\n\nclass MultioutputWrapper(Metric):\n \"\"\"Wrap a base metric to enable it to support multiple outputs.\n\n Several torchmetrics metrics, such as :class:`torchmetrics.regression.spearman.SpearmanCorrcoef` lack support for\n multioutput mode. This class wraps such metrics to support computing one metric per output.\n Unlike specific torchmetric metrics, it doesn't support any aggregation across outputs.\n This means if you set ``num_outputs`` to 2, ``.compute()`` will return a Tensor of dimension\n ``(2, ...)`` where ``...`` represents the dimensions the metric returns when not wrapped.\n\n In addition to enabling multioutput support for metrics that lack it, this class also supports, albeit in a crude\n fashion, dealing with missing labels (or other data). When ``remove_nans`` is passed, the class will remove the\n intersection of NaN containing \"rows\" upon each update for each output. For example, suppose a user uses\n `MultioutputWrapper` to wrap :class:`torchmetrics.regression.r2.R2Score` with 2 outputs, one of which occasionally\n has missing labels for classes like ``R2Score`` is that this class supports removing ``NaN`` values\n (parameter ``remove_nans``) on a per-output basis. When ``remove_nans`` is passed the wrapper will remove all rows\n\n Args:\n base_metric: Metric being wrapped.\n num_outputs: Expected dimensionality of the output dimension.\n This parameter is used to determine the number of distinct metrics we need to track.\n output_dim:\n Dimension on which output is expected. Note that while this provides some flexibility, the output dimension\n must be the same for all inputs to update. This applies even for metrics such as `Accuracy` where the labels\n can have a different number of dimensions than the predictions. This can be worked around if the output\n dimension can be set to -1 for both, even if -1 corresponds to different dimensions in different inputs.\n remove_nans:\n Whether to remove the intersection of rows containing NaNs from the values passed through to each underlying\n metric. Proper operation requires all tensors passed to update to have dimension ``(N, ...)`` where N\n represents the length of the batch or dataset being passed in.\n squeeze_outputs:\n If ``True``, will squeeze the 1-item dimensions left after ``index_select`` is applied.\n This is sometimes unnecessary but harmless for metrics such as `R2Score` but useful\n for certain classification metrics that can't handle additional 1-item dimensions.\n\n Example:\n\n >>> # Mimic R2Score in `multioutput`, `raw_values` mode:\n >>> import torch\n >>> from torchmetrics import MultioutputWrapper, R2Score\n >>> target = torch.tensor([[0.5, 1], [-1, 1], [7, -6]])\n >>> preds = torch.tensor([[0, 2], [-1, 2], [8, -5]])\n >>> r2score = MultioutputWrapper(R2Score(), 2)\n >>> r2score(preds, target)\n [tensor(0.9654), tensor(0.9082)]\n \"\"\"\n\n is_differentiable = False\n\n def __init__(\n self,\n base_metric: Metric,\n num_outputs: int,\n output_dim: int = -1,\n remove_nans: bool = True,\n squeeze_outputs: bool = True,\n ):\n super().__init__()\n self.metrics = ModuleList([deepcopy(base_metric) for _ in range(num_outputs)])\n self.output_dim = output_dim\n self.remove_nans = remove_nans\n self.squeeze_outputs = squeeze_outputs\n\n def _get_args_kwargs_by_output(self, *args: Tensor, **kwargs: Tensor) -> List[Tuple[Tensor, Tensor]]:\n \"\"\"Get args and kwargs reshaped to be output-specific and (maybe) having NaNs stripped out.\"\"\"\n args_kwargs_by_output = []\n for i in range(len(self.metrics)):\n selected_args = apply_to_collection(\n args, Tensor, torch.index_select, dim=self.output_dim, index=torch.tensor(i, device=self.device)\n )\n selected_kwargs = apply_to_collection(\n kwargs, Tensor, torch.index_select, dim=self.output_dim, index=torch.tensor(i, device=self.device)\n )\n if self.remove_nans:\n args_kwargs = selected_args + tuple(selected_kwargs.values())\n nan_idxs = _get_nan_indices(*args_kwargs)\n selected_args = [arg[~nan_idxs] for arg in selected_args]\n selected_kwargs = {k: v[~nan_idxs] for k, v in selected_kwargs.items()}\n\n if self.squeeze_outputs:\n selected_args = [arg.squeeze(self.output_dim) for arg in selected_args]\n args_kwargs_by_output.append((selected_args, selected_kwargs))\n return args_kwargs_by_output\n\n def update(self, *args: Any, **kwargs: Any) -> None:\n \"\"\"Update each underlying metric with the corresponding output.\"\"\"\n reshaped_args_kwargs = self._get_args_kwargs_by_output(*args, **kwargs)\n for metric, (selected_args, selected_kwargs) in zip(self.metrics, reshaped_args_kwargs):\n metric.update(*selected_args, **selected_kwargs)\n\n def compute(self) -> List[Tensor]:\n \"\"\"Compute metrics.\"\"\"\n return [m.compute() for m in self.metrics]\n\n @torch.jit.unused\n def forward(self, *args: Any, **kwargs: Any) -> Any:\n \"\"\"Call underlying forward methods and aggregate the results if they're non-null.\n\n We override this method to ensure that state variables get copied over on the underlying metrics.\n \"\"\"\n results = []\n reshaped_args_kwargs = self._get_args_kwargs_by_output(*args, **kwargs)\n for metric, (selected_args, selected_kwargs) in zip(self.metrics, reshaped_args_kwargs):\n results.append(metric(*selected_args, **selected_kwargs))\n if results[0] is None:\n return None\n return results\n\n def reset(self) -> None:\n \"\"\"Reset all underlying metrics.\"\"\"\n for metric in self.metrics:\n metric.reset()\n super().reset()\n\n def _wrap_update(self, update: Callable) -> Callable:\n \"\"\"Overwrite to do nothing.\"\"\"\n return update\n\n def _wrap_compute(self, compute: Callable) -> Callable:\n \"\"\"Overwrite to do nothing.\"\"\"\n return compute\n", "path": "src/torchmetrics/wrappers/multioutput.py"}]}
2,497
199
gh_patches_debug_20526
rasdani/github-patches
git_diff
Lightning-AI__torchmetrics-2019
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Pearson Correlation Coefficient fails when updating one batch at a time ## 🐛 Bug The `PearsonCorrCoef` fails when using a single batch per update. ### To Reproduce ```python import torch from torchmetrics import PearsonCorrCoef metric = PearsonCorrCoef() # Works metric(torch.tensor([3.0, -0.5, 2.0, 7.0]), torch.tensor([2.5, 0.0, 2.0, 8.0])) print(metric.compute()) # tensor(0.9849) metric.reset() # Doesn't work. metric(torch.tensor([3.0]), torch.tensor([2.5])) metric(torch.tensor([-0.5]), torch.tensor([0.0])) metric(torch.tensor([2.0]), torch.tensor([2.0])) metric(torch.tensor([7.0]), torch.tensor([8.0])) print(metric.compute()) # tensor(nan) ``` ### Expected behavior Both ways of updating the metric should work. ### Environment Python 3.10 torchmetrics==1.03 torch==2.01 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/torchmetrics/functional/regression/pearson.py` Content: ``` 1 # Copyright The Lightning team. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 import math 15 from typing import Tuple 16 17 import torch 18 from torch import Tensor 19 20 from torchmetrics.functional.regression.utils import _check_data_shape_to_num_outputs 21 from torchmetrics.utilities import rank_zero_warn 22 from torchmetrics.utilities.checks import _check_same_shape 23 24 25 def _pearson_corrcoef_update( 26 preds: Tensor, 27 target: Tensor, 28 mean_x: Tensor, 29 mean_y: Tensor, 30 var_x: Tensor, 31 var_y: Tensor, 32 corr_xy: Tensor, 33 n_prior: Tensor, 34 num_outputs: int, 35 ) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]: 36 """Update and returns variables required to compute Pearson Correlation Coefficient. 37 38 Check for same shape of input tensors. 39 40 Args: 41 preds: estimated scores 42 target: ground truth scores 43 mean_x: current mean estimate of x tensor 44 mean_y: current mean estimate of y tensor 45 var_x: current variance estimate of x tensor 46 var_y: current variance estimate of y tensor 47 corr_xy: current covariance estimate between x and y tensor 48 n_prior: current number of observed observations 49 num_outputs: Number of outputs in multioutput setting 50 51 """ 52 # Data checking 53 _check_same_shape(preds, target) 54 _check_data_shape_to_num_outputs(preds, target, num_outputs) 55 cond = n_prior.mean() > 0 56 57 n_obs = preds.shape[0] 58 if cond: 59 mx_new = (n_prior * mean_x + preds.sum(0)) / (n_prior + n_obs) 60 my_new = (n_prior * mean_y + target.sum(0)) / (n_prior + n_obs) 61 else: 62 mx_new = preds.mean(0) 63 my_new = target.mean(0) 64 65 n_prior += n_obs 66 67 if cond: 68 var_x += ((preds - mx_new) * (preds - mean_x)).sum(0) 69 var_y += ((target - my_new) * (target - mean_y)).sum(0) 70 71 else: 72 var_x += preds.var(0) * (n_obs - 1) 73 var_y += target.var(0) * (n_obs - 1) 74 corr_xy += ((preds - mx_new) * (target - mean_y)).sum(0) 75 mean_x = mx_new 76 mean_y = my_new 77 78 return mean_x, mean_y, var_x, var_y, corr_xy, n_prior 79 80 81 def _pearson_corrcoef_compute( 82 var_x: Tensor, 83 var_y: Tensor, 84 corr_xy: Tensor, 85 nb: Tensor, 86 ) -> Tensor: 87 """Compute the final pearson correlation based on accumulated statistics. 88 89 Args: 90 var_x: variance estimate of x tensor 91 var_y: variance estimate of y tensor 92 corr_xy: covariance estimate between x and y tensor 93 nb: number of observations 94 95 """ 96 var_x /= nb - 1 97 var_y /= nb - 1 98 corr_xy /= nb - 1 99 # if var_x, var_y is float16 and on cpu, make it bfloat16 as sqrt is not supported for float16 100 # on cpu, remove this after https://github.com/pytorch/pytorch/issues/54774 is fixed 101 if var_x.dtype == torch.float16 and var_x.device == torch.device("cpu"): 102 var_x = var_x.bfloat16() 103 var_y = var_y.bfloat16() 104 105 bound = math.sqrt(torch.finfo(var_x.dtype).eps) 106 if (var_x < bound).any() or (var_y < bound).any(): 107 rank_zero_warn( 108 "The variance of predictions or target is close to zero. This can cause instability in Pearson correlation" 109 "coefficient, leading to wrong results. Consider re-scaling the input if possible or computing using a" 110 f"larger dtype (currently using {var_x.dtype}).", 111 UserWarning, 112 ) 113 114 corrcoef = (corr_xy / (var_x * var_y).sqrt()).squeeze() 115 return torch.clamp(corrcoef, -1.0, 1.0) 116 117 118 def pearson_corrcoef(preds: Tensor, target: Tensor) -> Tensor: 119 """Compute pearson correlation coefficient. 120 121 Args: 122 preds: estimated scores 123 target: ground truth scores 124 125 Example (single output regression): 126 >>> from torchmetrics.functional.regression import pearson_corrcoef 127 >>> target = torch.tensor([3, -0.5, 2, 7]) 128 >>> preds = torch.tensor([2.5, 0.0, 2, 8]) 129 >>> pearson_corrcoef(preds, target) 130 tensor(0.9849) 131 132 Example (multi output regression): 133 >>> from torchmetrics.functional.regression import pearson_corrcoef 134 >>> target = torch.tensor([[3, -0.5], [2, 7]]) 135 >>> preds = torch.tensor([[2.5, 0.0], [2, 8]]) 136 >>> pearson_corrcoef(preds, target) 137 tensor([1., 1.]) 138 139 """ 140 d = preds.shape[1] if preds.ndim == 2 else 1 141 _temp = torch.zeros(d, dtype=preds.dtype, device=preds.device) 142 mean_x, mean_y, var_x = _temp.clone(), _temp.clone(), _temp.clone() 143 var_y, corr_xy, nb = _temp.clone(), _temp.clone(), _temp.clone() 144 _, _, var_x, var_y, corr_xy, nb = _pearson_corrcoef_update( 145 preds, target, mean_x, mean_y, var_x, var_y, corr_xy, nb, num_outputs=1 if preds.ndim == 1 else preds.shape[-1] 146 ) 147 return _pearson_corrcoef_compute(var_x, var_y, corr_xy, nb) 148 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/torchmetrics/functional/regression/pearson.py b/src/torchmetrics/functional/regression/pearson.py --- a/src/torchmetrics/functional/regression/pearson.py +++ b/src/torchmetrics/functional/regression/pearson.py @@ -52,9 +52,9 @@ # Data checking _check_same_shape(preds, target) _check_data_shape_to_num_outputs(preds, target, num_outputs) - cond = n_prior.mean() > 0 - n_obs = preds.shape[0] + cond = n_prior.mean() > 0 or n_obs == 1 + if cond: mx_new = (n_prior * mean_x + preds.sum(0)) / (n_prior + n_obs) my_new = (n_prior * mean_y + target.sum(0)) / (n_prior + n_obs) @@ -67,7 +67,6 @@ if cond: var_x += ((preds - mx_new) * (preds - mean_x)).sum(0) var_y += ((target - my_new) * (target - mean_y)).sum(0) - else: var_x += preds.var(0) * (n_obs - 1) var_y += target.var(0) * (n_obs - 1)
{"golden_diff": "diff --git a/src/torchmetrics/functional/regression/pearson.py b/src/torchmetrics/functional/regression/pearson.py\n--- a/src/torchmetrics/functional/regression/pearson.py\n+++ b/src/torchmetrics/functional/regression/pearson.py\n@@ -52,9 +52,9 @@\n # Data checking\n _check_same_shape(preds, target)\n _check_data_shape_to_num_outputs(preds, target, num_outputs)\n- cond = n_prior.mean() > 0\n-\n n_obs = preds.shape[0]\n+ cond = n_prior.mean() > 0 or n_obs == 1\n+\n if cond:\n mx_new = (n_prior * mean_x + preds.sum(0)) / (n_prior + n_obs)\n my_new = (n_prior * mean_y + target.sum(0)) / (n_prior + n_obs)\n@@ -67,7 +67,6 @@\n if cond:\n var_x += ((preds - mx_new) * (preds - mean_x)).sum(0)\n var_y += ((target - my_new) * (target - mean_y)).sum(0)\n-\n else:\n var_x += preds.var(0) * (n_obs - 1)\n var_y += target.var(0) * (n_obs - 1)\n", "issue": "Pearson Correlation Coefficient fails when updating one batch at a time\n## \ud83d\udc1b Bug\r\n\r\nThe `PearsonCorrCoef` fails when using a single batch per update.\r\n\r\n### To Reproduce\r\n```python\r\nimport torch\r\nfrom torchmetrics import PearsonCorrCoef\r\n\r\nmetric = PearsonCorrCoef()\r\n\r\n# Works\r\nmetric(torch.tensor([3.0, -0.5, 2.0, 7.0]), torch.tensor([2.5, 0.0, 2.0, 8.0]))\r\nprint(metric.compute()) # tensor(0.9849)\r\n\r\nmetric.reset()\r\n\r\n# Doesn't work.\r\nmetric(torch.tensor([3.0]), torch.tensor([2.5]))\r\nmetric(torch.tensor([-0.5]), torch.tensor([0.0]))\r\nmetric(torch.tensor([2.0]), torch.tensor([2.0]))\r\nmetric(torch.tensor([7.0]), torch.tensor([8.0]))\r\nprint(metric.compute()) # tensor(nan)\r\n```\r\n\r\n### Expected behavior\r\n\r\nBoth ways of updating the metric should work.\r\n\r\n### Environment\r\nPython 3.10\r\ntorchmetrics==1.03\r\ntorch==2.01\r\n\n", "before_files": [{"content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport math\nfrom typing import Tuple\n\nimport torch\nfrom torch import Tensor\n\nfrom torchmetrics.functional.regression.utils import _check_data_shape_to_num_outputs\nfrom torchmetrics.utilities import rank_zero_warn\nfrom torchmetrics.utilities.checks import _check_same_shape\n\n\ndef _pearson_corrcoef_update(\n preds: Tensor,\n target: Tensor,\n mean_x: Tensor,\n mean_y: Tensor,\n var_x: Tensor,\n var_y: Tensor,\n corr_xy: Tensor,\n n_prior: Tensor,\n num_outputs: int,\n) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]:\n \"\"\"Update and returns variables required to compute Pearson Correlation Coefficient.\n\n Check for same shape of input tensors.\n\n Args:\n preds: estimated scores\n target: ground truth scores\n mean_x: current mean estimate of x tensor\n mean_y: current mean estimate of y tensor\n var_x: current variance estimate of x tensor\n var_y: current variance estimate of y tensor\n corr_xy: current covariance estimate between x and y tensor\n n_prior: current number of observed observations\n num_outputs: Number of outputs in multioutput setting\n\n \"\"\"\n # Data checking\n _check_same_shape(preds, target)\n _check_data_shape_to_num_outputs(preds, target, num_outputs)\n cond = n_prior.mean() > 0\n\n n_obs = preds.shape[0]\n if cond:\n mx_new = (n_prior * mean_x + preds.sum(0)) / (n_prior + n_obs)\n my_new = (n_prior * mean_y + target.sum(0)) / (n_prior + n_obs)\n else:\n mx_new = preds.mean(0)\n my_new = target.mean(0)\n\n n_prior += n_obs\n\n if cond:\n var_x += ((preds - mx_new) * (preds - mean_x)).sum(0)\n var_y += ((target - my_new) * (target - mean_y)).sum(0)\n\n else:\n var_x += preds.var(0) * (n_obs - 1)\n var_y += target.var(0) * (n_obs - 1)\n corr_xy += ((preds - mx_new) * (target - mean_y)).sum(0)\n mean_x = mx_new\n mean_y = my_new\n\n return mean_x, mean_y, var_x, var_y, corr_xy, n_prior\n\n\ndef _pearson_corrcoef_compute(\n var_x: Tensor,\n var_y: Tensor,\n corr_xy: Tensor,\n nb: Tensor,\n) -> Tensor:\n \"\"\"Compute the final pearson correlation based on accumulated statistics.\n\n Args:\n var_x: variance estimate of x tensor\n var_y: variance estimate of y tensor\n corr_xy: covariance estimate between x and y tensor\n nb: number of observations\n\n \"\"\"\n var_x /= nb - 1\n var_y /= nb - 1\n corr_xy /= nb - 1\n # if var_x, var_y is float16 and on cpu, make it bfloat16 as sqrt is not supported for float16\n # on cpu, remove this after https://github.com/pytorch/pytorch/issues/54774 is fixed\n if var_x.dtype == torch.float16 and var_x.device == torch.device(\"cpu\"):\n var_x = var_x.bfloat16()\n var_y = var_y.bfloat16()\n\n bound = math.sqrt(torch.finfo(var_x.dtype).eps)\n if (var_x < bound).any() or (var_y < bound).any():\n rank_zero_warn(\n \"The variance of predictions or target is close to zero. This can cause instability in Pearson correlation\"\n \"coefficient, leading to wrong results. Consider re-scaling the input if possible or computing using a\"\n f\"larger dtype (currently using {var_x.dtype}).\",\n UserWarning,\n )\n\n corrcoef = (corr_xy / (var_x * var_y).sqrt()).squeeze()\n return torch.clamp(corrcoef, -1.0, 1.0)\n\n\ndef pearson_corrcoef(preds: Tensor, target: Tensor) -> Tensor:\n \"\"\"Compute pearson correlation coefficient.\n\n Args:\n preds: estimated scores\n target: ground truth scores\n\n Example (single output regression):\n >>> from torchmetrics.functional.regression import pearson_corrcoef\n >>> target = torch.tensor([3, -0.5, 2, 7])\n >>> preds = torch.tensor([2.5, 0.0, 2, 8])\n >>> pearson_corrcoef(preds, target)\n tensor(0.9849)\n\n Example (multi output regression):\n >>> from torchmetrics.functional.regression import pearson_corrcoef\n >>> target = torch.tensor([[3, -0.5], [2, 7]])\n >>> preds = torch.tensor([[2.5, 0.0], [2, 8]])\n >>> pearson_corrcoef(preds, target)\n tensor([1., 1.])\n\n \"\"\"\n d = preds.shape[1] if preds.ndim == 2 else 1\n _temp = torch.zeros(d, dtype=preds.dtype, device=preds.device)\n mean_x, mean_y, var_x = _temp.clone(), _temp.clone(), _temp.clone()\n var_y, corr_xy, nb = _temp.clone(), _temp.clone(), _temp.clone()\n _, _, var_x, var_y, corr_xy, nb = _pearson_corrcoef_update(\n preds, target, mean_x, mean_y, var_x, var_y, corr_xy, nb, num_outputs=1 if preds.ndim == 1 else preds.shape[-1]\n )\n return _pearson_corrcoef_compute(var_x, var_y, corr_xy, nb)\n", "path": "src/torchmetrics/functional/regression/pearson.py"}], "after_files": [{"content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport math\nfrom typing import Tuple\n\nimport torch\nfrom torch import Tensor\n\nfrom torchmetrics.functional.regression.utils import _check_data_shape_to_num_outputs\nfrom torchmetrics.utilities import rank_zero_warn\nfrom torchmetrics.utilities.checks import _check_same_shape\n\n\ndef _pearson_corrcoef_update(\n preds: Tensor,\n target: Tensor,\n mean_x: Tensor,\n mean_y: Tensor,\n var_x: Tensor,\n var_y: Tensor,\n corr_xy: Tensor,\n n_prior: Tensor,\n num_outputs: int,\n) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]:\n \"\"\"Update and returns variables required to compute Pearson Correlation Coefficient.\n\n Check for same shape of input tensors.\n\n Args:\n preds: estimated scores\n target: ground truth scores\n mean_x: current mean estimate of x tensor\n mean_y: current mean estimate of y tensor\n var_x: current variance estimate of x tensor\n var_y: current variance estimate of y tensor\n corr_xy: current covariance estimate between x and y tensor\n n_prior: current number of observed observations\n num_outputs: Number of outputs in multioutput setting\n\n \"\"\"\n # Data checking\n _check_same_shape(preds, target)\n _check_data_shape_to_num_outputs(preds, target, num_outputs)\n n_obs = preds.shape[0]\n cond = n_prior.mean() > 0 or n_obs == 1\n\n if cond:\n mx_new = (n_prior * mean_x + preds.sum(0)) / (n_prior + n_obs)\n my_new = (n_prior * mean_y + target.sum(0)) / (n_prior + n_obs)\n else:\n mx_new = preds.mean(0)\n my_new = target.mean(0)\n\n n_prior += n_obs\n\n if cond:\n var_x += ((preds - mx_new) * (preds - mean_x)).sum(0)\n var_y += ((target - my_new) * (target - mean_y)).sum(0)\n else:\n var_x += preds.var(0) * (n_obs - 1)\n var_y += target.var(0) * (n_obs - 1)\n corr_xy += ((preds - mx_new) * (target - mean_y)).sum(0)\n mean_x = mx_new\n mean_y = my_new\n\n return mean_x, mean_y, var_x, var_y, corr_xy, n_prior\n\n\ndef _pearson_corrcoef_compute(\n var_x: Tensor,\n var_y: Tensor,\n corr_xy: Tensor,\n nb: Tensor,\n) -> Tensor:\n \"\"\"Compute the final pearson correlation based on accumulated statistics.\n\n Args:\n var_x: variance estimate of x tensor\n var_y: variance estimate of y tensor\n corr_xy: covariance estimate between x and y tensor\n nb: number of observations\n\n \"\"\"\n var_x /= nb - 1\n var_y /= nb - 1\n corr_xy /= nb - 1\n # if var_x, var_y is float16 and on cpu, make it bfloat16 as sqrt is not supported for float16\n # on cpu, remove this after https://github.com/pytorch/pytorch/issues/54774 is fixed\n if var_x.dtype == torch.float16 and var_x.device == torch.device(\"cpu\"):\n var_x = var_x.bfloat16()\n var_y = var_y.bfloat16()\n\n bound = math.sqrt(torch.finfo(var_x.dtype).eps)\n if (var_x < bound).any() or (var_y < bound).any():\n rank_zero_warn(\n \"The variance of predictions or target is close to zero. This can cause instability in Pearson correlation\"\n \"coefficient, leading to wrong results. Consider re-scaling the input if possible or computing using a\"\n f\"larger dtype (currently using {var_x.dtype}).\",\n UserWarning,\n )\n\n corrcoef = (corr_xy / (var_x * var_y).sqrt()).squeeze()\n return torch.clamp(corrcoef, -1.0, 1.0)\n\n\ndef pearson_corrcoef(preds: Tensor, target: Tensor) -> Tensor:\n \"\"\"Compute pearson correlation coefficient.\n\n Args:\n preds: estimated scores\n target: ground truth scores\n\n Example (single output regression):\n >>> from torchmetrics.functional.regression import pearson_corrcoef\n >>> target = torch.tensor([3, -0.5, 2, 7])\n >>> preds = torch.tensor([2.5, 0.0, 2, 8])\n >>> pearson_corrcoef(preds, target)\n tensor(0.9849)\n\n Example (multi output regression):\n >>> from torchmetrics.functional.regression import pearson_corrcoef\n >>> target = torch.tensor([[3, -0.5], [2, 7]])\n >>> preds = torch.tensor([[2.5, 0.0], [2, 8]])\n >>> pearson_corrcoef(preds, target)\n tensor([1., 1.])\n\n \"\"\"\n d = preds.shape[1] if preds.ndim == 2 else 1\n _temp = torch.zeros(d, dtype=preds.dtype, device=preds.device)\n mean_x, mean_y, var_x = _temp.clone(), _temp.clone(), _temp.clone()\n var_y, corr_xy, nb = _temp.clone(), _temp.clone(), _temp.clone()\n _, _, var_x, var_y, corr_xy, nb = _pearson_corrcoef_update(\n preds, target, mean_x, mean_y, var_x, var_y, corr_xy, nb, num_outputs=1 if preds.ndim == 1 else preds.shape[-1]\n )\n return _pearson_corrcoef_compute(var_x, var_y, corr_xy, nb)\n", "path": "src/torchmetrics/functional/regression/pearson.py"}]}
2,253
294
gh_patches_debug_23499
rasdani/github-patches
git_diff
getpelican__pelican-1140
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Atom feeds don't validate with SITEURL containing HTTPS scheme and/or a specific service port. When SITEURL = 'https://example.com' or 'http://example.com:8080', `writers.py` generates `unique_id` producing wrong 'TAG:' IDs. A possible fix could be to switch **line 45** from : ``` python unique_id='tag:%s,%s:%s' % (self.site_url.replace('http://', ''), item.date.date(), item.url), ``` to : ``` python unique_id='tag:%s,%s:%s' % (re.sub('^https?://(?P<host>.*?)(:\d+)?$','\g<host>',self.site_url), item.date.date(), item.url), ``` Atom feeds don't validate with SITEURL containing HTTPS scheme and/or a specific service port. When SITEURL = 'https://example.com' or 'http://example.com:8080', `writers.py` generates `unique_id` producing wrong 'TAG:' IDs. A possible fix could be to switch **line 45** from : ``` python unique_id='tag:%s,%s:%s' % (self.site_url.replace('http://', ''), item.date.date(), item.url), ``` to : ``` python unique_id='tag:%s,%s:%s' % (re.sub('^https?://(?P<host>.*?)(:\d+)?$','\g<host>',self.site_url), item.date.date(), item.url), ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pelican/writers.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 from __future__ import with_statement, unicode_literals, print_function 3 import six 4 5 import os 6 import locale 7 import logging 8 9 if not six.PY3: 10 from codecs import open 11 12 from feedgenerator import Atom1Feed, Rss201rev2Feed 13 from jinja2 import Markup 14 15 from pelican.paginator import Paginator 16 from pelican.utils import get_relative_path, path_to_url, set_date_tzinfo 17 from pelican import signals 18 19 logger = logging.getLogger(__name__) 20 21 22 class Writer(object): 23 24 def __init__(self, output_path, settings=None): 25 self.output_path = output_path 26 self.reminder = dict() 27 self.settings = settings or {} 28 self._written_files = set() 29 self._overridden_files = set() 30 31 def _create_new_feed(self, feed_type, context): 32 feed_class = Rss201rev2Feed if feed_type == 'rss' else Atom1Feed 33 sitename = Markup(context['SITENAME']).striptags() 34 feed = feed_class( 35 title=sitename, 36 link=(self.site_url + '/'), 37 feed_url=self.feed_url, 38 description=context.get('SITESUBTITLE', '')) 39 return feed 40 41 def _add_item_to_the_feed(self, feed, item): 42 43 title = Markup(item.title).striptags() 44 feed.add_item( 45 title=title, 46 link='%s/%s' % (self.site_url, item.url), 47 unique_id='tag:%s,%s:%s' % (self.site_url.replace('http://', ''), 48 item.date.date(), item.url), 49 description=item.get_content(self.site_url), 50 categories=item.tags if hasattr(item, 'tags') else None, 51 author_name=getattr(item, 'author', ''), 52 pubdate=set_date_tzinfo(item.date, 53 self.settings.get('TIMEZONE', None))) 54 55 def _open_w(self, filename, encoding, override=False): 56 """Open a file to write some content to it. 57 58 Exit if we have already written to that file, unless one (and no more 59 than one) of the writes has the override parameter set to True. 60 """ 61 if filename in self._overridden_files: 62 if override: 63 raise RuntimeError('File %s is set to be overridden twice' 64 % filename) 65 else: 66 logger.info('skipping %s' % filename) 67 filename = os.devnull 68 elif filename in self._written_files: 69 if override: 70 logger.info('overwriting %s' % filename) 71 else: 72 raise RuntimeError('File %s is to be overwritten' % filename) 73 if override: 74 self._overridden_files.add(filename) 75 self._written_files.add(filename) 76 return open(filename, 'w', encoding=encoding) 77 78 def write_feed(self, elements, context, path=None, feed_type='atom'): 79 """Generate a feed with the list of articles provided 80 81 Return the feed. If no path or output_path is specified, just 82 return the feed object. 83 84 :param elements: the articles to put on the feed. 85 :param context: the context to get the feed metadata. 86 :param path: the path to output. 87 :param feed_type: the feed type to use (atom or rss) 88 """ 89 old_locale = locale.setlocale(locale.LC_ALL) 90 locale.setlocale(locale.LC_ALL, str('C')) 91 try: 92 self.site_url = context.get( 93 'SITEURL', path_to_url(get_relative_path(path))) 94 95 self.feed_domain = context.get('FEED_DOMAIN') 96 self.feed_url = '{}/{}'.format(self.feed_domain, path) 97 98 feed = self._create_new_feed(feed_type, context) 99 100 max_items = len(elements) 101 if self.settings['FEED_MAX_ITEMS']: 102 max_items = min(self.settings['FEED_MAX_ITEMS'], max_items) 103 for i in range(max_items): 104 self._add_item_to_the_feed(feed, elements[i]) 105 106 if path: 107 complete_path = os.path.join(self.output_path, path) 108 try: 109 os.makedirs(os.path.dirname(complete_path)) 110 except Exception: 111 pass 112 113 encoding = 'utf-8' if six.PY3 else None 114 with self._open_w(complete_path, encoding) as fp: 115 feed.write(fp, 'utf-8') 116 logger.info('writing %s' % complete_path) 117 return feed 118 finally: 119 locale.setlocale(locale.LC_ALL, old_locale) 120 121 def write_file(self, name, template, context, relative_urls=False, 122 paginated=None, override_output=False, **kwargs): 123 """Render the template and write the file. 124 125 :param name: name of the file to output 126 :param template: template to use to generate the content 127 :param context: dict to pass to the templates. 128 :param relative_urls: use relative urls or absolutes ones 129 :param paginated: dict of article list to paginate - must have the 130 same length (same list in different orders) 131 :param override_output: boolean telling if we can override previous 132 output with the same name (and if next files written with the same 133 name should be skipped to keep that one) 134 :param **kwargs: additional variables to pass to the templates 135 """ 136 137 if name is False: 138 return 139 elif not name: 140 # other stuff, just return for now 141 return 142 143 def _write_file(template, localcontext, output_path, name, override): 144 """Render the template write the file.""" 145 old_locale = locale.setlocale(locale.LC_ALL) 146 locale.setlocale(locale.LC_ALL, str('C')) 147 try: 148 output = template.render(localcontext) 149 finally: 150 locale.setlocale(locale.LC_ALL, old_locale) 151 path = os.path.join(output_path, name) 152 try: 153 os.makedirs(os.path.dirname(path)) 154 except Exception: 155 pass 156 157 with self._open_w(path, 'utf-8', override=override) as f: 158 f.write(output) 159 logger.info('writing {}'.format(path)) 160 161 # Send a signal to say we're writing a file with some specific 162 # local context. 163 signals.content_written.send(path, context=localcontext) 164 165 localcontext = context.copy() 166 if relative_urls: 167 relative_url = path_to_url(get_relative_path(name)) 168 context['localsiteurl'] = relative_url 169 localcontext['SITEURL'] = relative_url 170 171 localcontext['output_file'] = name 172 localcontext.update(kwargs) 173 174 # check paginated 175 paginated = paginated or {} 176 if paginated: 177 name_root = os.path.splitext(name)[0] 178 179 # pagination needed, init paginators 180 paginators = {} 181 for key in paginated.keys(): 182 object_list = paginated[key] 183 184 paginators[key] = Paginator( 185 name_root, 186 object_list, 187 self.settings, 188 ) 189 190 # generated pages, and write 191 for page_num in range(list(paginators.values())[0].num_pages): 192 paginated_localcontext = localcontext.copy() 193 for key in paginators.keys(): 194 paginator = paginators[key] 195 previous_page = paginator.page(page_num) \ 196 if page_num > 0 else None 197 page = paginator.page(page_num + 1) 198 next_page = paginator.page(page_num + 2) \ 199 if page_num + 1 < paginator.num_pages else None 200 paginated_localcontext.update( 201 {'%s_paginator' % key: paginator, 202 '%s_page' % key: page, 203 '%s_previous_page' % key: previous_page, 204 '%s_next_page' % key: next_page}) 205 206 _write_file(template, paginated_localcontext, self.output_path, 207 page.save_as, override_output) 208 else: 209 # no pagination 210 _write_file(template, localcontext, self.output_path, name, 211 override_output) 212 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pelican/writers.py b/pelican/writers.py --- a/pelican/writers.py +++ b/pelican/writers.py @@ -8,6 +8,9 @@ if not six.PY3: from codecs import open + from urlparse import urlparse +else: + from urllib.parse import urlparse from feedgenerator import Atom1Feed, Rss201rev2Feed from jinja2 import Markup @@ -41,11 +44,13 @@ def _add_item_to_the_feed(self, feed, item): title = Markup(item.title).striptags() + link = '%s/%s' % (self.site_url, item.url) feed.add_item( title=title, - link='%s/%s' % (self.site_url, item.url), - unique_id='tag:%s,%s:%s' % (self.site_url.replace('http://', ''), - item.date.date(), item.url), + link=link, + unique_id='tag:%s,%s:%s' % (urlparse(link).netloc, + item.date.date(), + urlparse(link).path.lstrip('/')), description=item.get_content(self.site_url), categories=item.tags if hasattr(item, 'tags') else None, author_name=getattr(item, 'author', ''),
{"golden_diff": "diff --git a/pelican/writers.py b/pelican/writers.py\n--- a/pelican/writers.py\n+++ b/pelican/writers.py\n@@ -8,6 +8,9 @@\n \n if not six.PY3:\n from codecs import open\n+ from urlparse import urlparse\n+else:\n+ from urllib.parse import urlparse\n \n from feedgenerator import Atom1Feed, Rss201rev2Feed\n from jinja2 import Markup\n@@ -41,11 +44,13 @@\n def _add_item_to_the_feed(self, feed, item):\n \n title = Markup(item.title).striptags()\n+ link = '%s/%s' % (self.site_url, item.url)\n feed.add_item(\n title=title,\n- link='%s/%s' % (self.site_url, item.url),\n- unique_id='tag:%s,%s:%s' % (self.site_url.replace('http://', ''),\n- item.date.date(), item.url),\n+ link=link,\n+ unique_id='tag:%s,%s:%s' % (urlparse(link).netloc,\n+ item.date.date(),\n+ urlparse(link).path.lstrip('/')),\n description=item.get_content(self.site_url),\n categories=item.tags if hasattr(item, 'tags') else None,\n author_name=getattr(item, 'author', ''),\n", "issue": "Atom feeds don't validate with SITEURL containing HTTPS scheme and/or a specific service port.\nWhen SITEURL = 'https://example.com' or 'http://example.com:8080', `writers.py` generates `unique_id` producing wrong 'TAG:' IDs.\n\nA possible fix could be to switch **line 45** from :\n\n``` python\nunique_id='tag:%s,%s:%s' % (self.site_url.replace('http://', ''),\n item.date.date(), item.url),\n```\n\nto :\n\n``` python\nunique_id='tag:%s,%s:%s' % (re.sub('^https?://(?P<host>.*?)(:\\d+)?$','\\g<host>',self.site_url),\n item.date.date(), item.url),\n```\n\nAtom feeds don't validate with SITEURL containing HTTPS scheme and/or a specific service port.\nWhen SITEURL = 'https://example.com' or 'http://example.com:8080', `writers.py` generates `unique_id` producing wrong 'TAG:' IDs.\n\nA possible fix could be to switch **line 45** from :\n\n``` python\nunique_id='tag:%s,%s:%s' % (self.site_url.replace('http://', ''),\n item.date.date(), item.url),\n```\n\nto :\n\n``` python\nunique_id='tag:%s,%s:%s' % (re.sub('^https?://(?P<host>.*?)(:\\d+)?$','\\g<host>',self.site_url),\n item.date.date(), item.url),\n```\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import with_statement, unicode_literals, print_function\nimport six\n\nimport os\nimport locale\nimport logging\n\nif not six.PY3:\n from codecs import open\n\nfrom feedgenerator import Atom1Feed, Rss201rev2Feed\nfrom jinja2 import Markup\n\nfrom pelican.paginator import Paginator\nfrom pelican.utils import get_relative_path, path_to_url, set_date_tzinfo\nfrom pelican import signals\n\nlogger = logging.getLogger(__name__)\n\n\nclass Writer(object):\n\n def __init__(self, output_path, settings=None):\n self.output_path = output_path\n self.reminder = dict()\n self.settings = settings or {}\n self._written_files = set()\n self._overridden_files = set()\n\n def _create_new_feed(self, feed_type, context):\n feed_class = Rss201rev2Feed if feed_type == 'rss' else Atom1Feed\n sitename = Markup(context['SITENAME']).striptags()\n feed = feed_class(\n title=sitename,\n link=(self.site_url + '/'),\n feed_url=self.feed_url,\n description=context.get('SITESUBTITLE', ''))\n return feed\n\n def _add_item_to_the_feed(self, feed, item):\n\n title = Markup(item.title).striptags()\n feed.add_item(\n title=title,\n link='%s/%s' % (self.site_url, item.url),\n unique_id='tag:%s,%s:%s' % (self.site_url.replace('http://', ''),\n item.date.date(), item.url),\n description=item.get_content(self.site_url),\n categories=item.tags if hasattr(item, 'tags') else None,\n author_name=getattr(item, 'author', ''),\n pubdate=set_date_tzinfo(item.date,\n self.settings.get('TIMEZONE', None)))\n\n def _open_w(self, filename, encoding, override=False):\n \"\"\"Open a file to write some content to it.\n\n Exit if we have already written to that file, unless one (and no more\n than one) of the writes has the override parameter set to True.\n \"\"\"\n if filename in self._overridden_files:\n if override:\n raise RuntimeError('File %s is set to be overridden twice'\n % filename)\n else:\n logger.info('skipping %s' % filename)\n filename = os.devnull\n elif filename in self._written_files:\n if override:\n logger.info('overwriting %s' % filename)\n else:\n raise RuntimeError('File %s is to be overwritten' % filename)\n if override:\n self._overridden_files.add(filename)\n self._written_files.add(filename)\n return open(filename, 'w', encoding=encoding)\n\n def write_feed(self, elements, context, path=None, feed_type='atom'):\n \"\"\"Generate a feed with the list of articles provided\n\n Return the feed. If no path or output_path is specified, just\n return the feed object.\n\n :param elements: the articles to put on the feed.\n :param context: the context to get the feed metadata.\n :param path: the path to output.\n :param feed_type: the feed type to use (atom or rss)\n \"\"\"\n old_locale = locale.setlocale(locale.LC_ALL)\n locale.setlocale(locale.LC_ALL, str('C'))\n try:\n self.site_url = context.get(\n 'SITEURL', path_to_url(get_relative_path(path)))\n\n self.feed_domain = context.get('FEED_DOMAIN')\n self.feed_url = '{}/{}'.format(self.feed_domain, path)\n\n feed = self._create_new_feed(feed_type, context)\n\n max_items = len(elements)\n if self.settings['FEED_MAX_ITEMS']:\n max_items = min(self.settings['FEED_MAX_ITEMS'], max_items)\n for i in range(max_items):\n self._add_item_to_the_feed(feed, elements[i])\n\n if path:\n complete_path = os.path.join(self.output_path, path)\n try:\n os.makedirs(os.path.dirname(complete_path))\n except Exception:\n pass\n\n encoding = 'utf-8' if six.PY3 else None\n with self._open_w(complete_path, encoding) as fp:\n feed.write(fp, 'utf-8')\n logger.info('writing %s' % complete_path)\n return feed\n finally:\n locale.setlocale(locale.LC_ALL, old_locale)\n\n def write_file(self, name, template, context, relative_urls=False,\n paginated=None, override_output=False, **kwargs):\n \"\"\"Render the template and write the file.\n\n :param name: name of the file to output\n :param template: template to use to generate the content\n :param context: dict to pass to the templates.\n :param relative_urls: use relative urls or absolutes ones\n :param paginated: dict of article list to paginate - must have the\n same length (same list in different orders)\n :param override_output: boolean telling if we can override previous\n output with the same name (and if next files written with the same\n name should be skipped to keep that one)\n :param **kwargs: additional variables to pass to the templates\n \"\"\"\n\n if name is False:\n return\n elif not name:\n # other stuff, just return for now\n return\n\n def _write_file(template, localcontext, output_path, name, override):\n \"\"\"Render the template write the file.\"\"\"\n old_locale = locale.setlocale(locale.LC_ALL)\n locale.setlocale(locale.LC_ALL, str('C'))\n try:\n output = template.render(localcontext)\n finally:\n locale.setlocale(locale.LC_ALL, old_locale)\n path = os.path.join(output_path, name)\n try:\n os.makedirs(os.path.dirname(path))\n except Exception:\n pass\n\n with self._open_w(path, 'utf-8', override=override) as f:\n f.write(output)\n logger.info('writing {}'.format(path))\n\n # Send a signal to say we're writing a file with some specific\n # local context.\n signals.content_written.send(path, context=localcontext)\n\n localcontext = context.copy()\n if relative_urls:\n relative_url = path_to_url(get_relative_path(name))\n context['localsiteurl'] = relative_url\n localcontext['SITEURL'] = relative_url\n\n localcontext['output_file'] = name\n localcontext.update(kwargs)\n\n # check paginated\n paginated = paginated or {}\n if paginated:\n name_root = os.path.splitext(name)[0]\n\n # pagination needed, init paginators\n paginators = {}\n for key in paginated.keys():\n object_list = paginated[key]\n\n paginators[key] = Paginator(\n name_root,\n object_list,\n self.settings,\n )\n\n # generated pages, and write\n for page_num in range(list(paginators.values())[0].num_pages):\n paginated_localcontext = localcontext.copy()\n for key in paginators.keys():\n paginator = paginators[key]\n previous_page = paginator.page(page_num) \\\n if page_num > 0 else None\n page = paginator.page(page_num + 1)\n next_page = paginator.page(page_num + 2) \\\n if page_num + 1 < paginator.num_pages else None\n paginated_localcontext.update(\n {'%s_paginator' % key: paginator,\n '%s_page' % key: page,\n '%s_previous_page' % key: previous_page,\n '%s_next_page' % key: next_page})\n\n _write_file(template, paginated_localcontext, self.output_path,\n page.save_as, override_output)\n else:\n # no pagination\n _write_file(template, localcontext, self.output_path, name,\n override_output)\n", "path": "pelican/writers.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import with_statement, unicode_literals, print_function\nimport six\n\nimport os\nimport locale\nimport logging\n\nif not six.PY3:\n from codecs import open\n from urlparse import urlparse\nelse:\n from urllib.parse import urlparse\n\nfrom feedgenerator import Atom1Feed, Rss201rev2Feed\nfrom jinja2 import Markup\n\nfrom pelican.paginator import Paginator\nfrom pelican.utils import get_relative_path, path_to_url, set_date_tzinfo\nfrom pelican import signals\n\nlogger = logging.getLogger(__name__)\n\n\nclass Writer(object):\n\n def __init__(self, output_path, settings=None):\n self.output_path = output_path\n self.reminder = dict()\n self.settings = settings or {}\n self._written_files = set()\n self._overridden_files = set()\n\n def _create_new_feed(self, feed_type, context):\n feed_class = Rss201rev2Feed if feed_type == 'rss' else Atom1Feed\n sitename = Markup(context['SITENAME']).striptags()\n feed = feed_class(\n title=sitename,\n link=(self.site_url + '/'),\n feed_url=self.feed_url,\n description=context.get('SITESUBTITLE', ''))\n return feed\n\n def _add_item_to_the_feed(self, feed, item):\n\n title = Markup(item.title).striptags()\n link = '%s/%s' % (self.site_url, item.url)\n feed.add_item(\n title=title,\n link=link,\n unique_id='tag:%s,%s:%s' % (urlparse(link).netloc,\n item.date.date(),\n urlparse(link).path.lstrip('/')),\n description=item.get_content(self.site_url),\n categories=item.tags if hasattr(item, 'tags') else None,\n author_name=getattr(item, 'author', ''),\n pubdate=set_date_tzinfo(item.date,\n self.settings.get('TIMEZONE', None)))\n\n def _open_w(self, filename, encoding, override=False):\n \"\"\"Open a file to write some content to it.\n\n Exit if we have already written to that file, unless one (and no more\n than one) of the writes has the override parameter set to True.\n \"\"\"\n if filename in self._overridden_files:\n if override:\n raise RuntimeError('File %s is set to be overridden twice'\n % filename)\n else:\n logger.info('skipping %s' % filename)\n filename = os.devnull\n elif filename in self._written_files:\n if override:\n logger.info('overwriting %s' % filename)\n else:\n raise RuntimeError('File %s is to be overwritten' % filename)\n if override:\n self._overridden_files.add(filename)\n self._written_files.add(filename)\n return open(filename, 'w', encoding=encoding)\n\n def write_feed(self, elements, context, path=None, feed_type='atom'):\n \"\"\"Generate a feed with the list of articles provided\n\n Return the feed. If no path or output_path is specified, just\n return the feed object.\n\n :param elements: the articles to put on the feed.\n :param context: the context to get the feed metadata.\n :param path: the path to output.\n :param feed_type: the feed type to use (atom or rss)\n \"\"\"\n old_locale = locale.setlocale(locale.LC_ALL)\n locale.setlocale(locale.LC_ALL, str('C'))\n try:\n self.site_url = context.get(\n 'SITEURL', path_to_url(get_relative_path(path)))\n\n self.feed_domain = context.get('FEED_DOMAIN')\n self.feed_url = '{}/{}'.format(self.feed_domain, path)\n\n feed = self._create_new_feed(feed_type, context)\n\n max_items = len(elements)\n if self.settings['FEED_MAX_ITEMS']:\n max_items = min(self.settings['FEED_MAX_ITEMS'], max_items)\n for i in range(max_items):\n self._add_item_to_the_feed(feed, elements[i])\n\n if path:\n complete_path = os.path.join(self.output_path, path)\n try:\n os.makedirs(os.path.dirname(complete_path))\n except Exception:\n pass\n\n encoding = 'utf-8' if six.PY3 else None\n with self._open_w(complete_path, encoding) as fp:\n feed.write(fp, 'utf-8')\n logger.info('writing %s' % complete_path)\n return feed\n finally:\n locale.setlocale(locale.LC_ALL, old_locale)\n\n def write_file(self, name, template, context, relative_urls=False,\n paginated=None, override_output=False, **kwargs):\n \"\"\"Render the template and write the file.\n\n :param name: name of the file to output\n :param template: template to use to generate the content\n :param context: dict to pass to the templates.\n :param relative_urls: use relative urls or absolutes ones\n :param paginated: dict of article list to paginate - must have the\n same length (same list in different orders)\n :param override_output: boolean telling if we can override previous\n output with the same name (and if next files written with the same\n name should be skipped to keep that one)\n :param **kwargs: additional variables to pass to the templates\n \"\"\"\n\n if name is False:\n return\n elif not name:\n # other stuff, just return for now\n return\n\n def _write_file(template, localcontext, output_path, name, override):\n \"\"\"Render the template write the file.\"\"\"\n old_locale = locale.setlocale(locale.LC_ALL)\n locale.setlocale(locale.LC_ALL, str('C'))\n try:\n output = template.render(localcontext)\n finally:\n locale.setlocale(locale.LC_ALL, old_locale)\n path = os.path.join(output_path, name)\n try:\n os.makedirs(os.path.dirname(path))\n except Exception:\n pass\n\n with self._open_w(path, 'utf-8', override=override) as f:\n f.write(output)\n logger.info('writing {}'.format(path))\n\n # Send a signal to say we're writing a file with some specific\n # local context.\n signals.content_written.send(path, context=localcontext)\n\n localcontext = context.copy()\n if relative_urls:\n relative_url = path_to_url(get_relative_path(name))\n context['localsiteurl'] = relative_url\n localcontext['SITEURL'] = relative_url\n\n localcontext['output_file'] = name\n localcontext.update(kwargs)\n\n # check paginated\n paginated = paginated or {}\n if paginated:\n name_root = os.path.splitext(name)[0]\n\n # pagination needed, init paginators\n paginators = {}\n for key in paginated.keys():\n object_list = paginated[key]\n\n paginators[key] = Paginator(\n name_root,\n object_list,\n self.settings,\n )\n\n # generated pages, and write\n for page_num in range(list(paginators.values())[0].num_pages):\n paginated_localcontext = localcontext.copy()\n for key in paginators.keys():\n paginator = paginators[key]\n previous_page = paginator.page(page_num) \\\n if page_num > 0 else None\n page = paginator.page(page_num + 1)\n next_page = paginator.page(page_num + 2) \\\n if page_num + 1 < paginator.num_pages else None\n paginated_localcontext.update(\n {'%s_paginator' % key: paginator,\n '%s_page' % key: page,\n '%s_previous_page' % key: previous_page,\n '%s_next_page' % key: next_page})\n\n _write_file(template, paginated_localcontext, self.output_path,\n page.save_as, override_output)\n else:\n # no pagination\n _write_file(template, localcontext, self.output_path, name,\n override_output)\n", "path": "pelican/writers.py"}]}
2,825
297
gh_patches_debug_12005
rasdani/github-patches
git_diff
chainer__chainer-722
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- `/usr/local/cuda/lib` in Linux for Tegra In CUDA for L4T (Linux for Tegra), shared objects are located in `/usr/local/cuda/lib`, not in `lib64`. I failed to install Chainer. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `chainer_setup_build.py` Content: ``` 1 from __future__ import print_function 2 import copy 3 import distutils 4 import os 5 from os import path 6 import pkg_resources 7 import shutil 8 import subprocess 9 import sys 10 import tempfile 11 12 import setuptools 13 from setuptools.command import build_ext 14 15 16 dummy_extension = setuptools.Extension('chainer', ['chainer.c']) 17 18 MODULES = [ 19 { 20 'name': 'cuda', 21 'file': [ 22 'cupy.core.core', 23 'cupy.core.flags', 24 'cupy.cuda.cublas', 25 'cupy.cuda.curand', 26 'cupy.cuda.device', 27 'cupy.cuda.driver', 28 'cupy.cuda.memory', 29 'cupy.cuda.function', 30 'cupy.cuda.runtime', 31 'cupy.util', 32 ], 33 'include': [ 34 'cublas_v2.h', 35 'cuda.h', 36 'cuda_runtime.h', 37 'curand.h', 38 ], 39 'libraries': [ 40 'cublas', 41 'cuda', 42 'cudart', 43 'curand', 44 ], 45 }, 46 { 47 'name': 'cudnn', 48 'file': [ 49 'cupy.cuda.cudnn', 50 ], 51 'include': [ 52 'cudnn.h', 53 ], 54 'libraries': [ 55 'cudnn', 56 ], 57 } 58 ] 59 60 61 def get_compiler_setting(): 62 nvcc_path = search_on_path(('nvcc', 'nvcc.exe')) 63 cuda_path_default = None 64 if nvcc_path is None: 65 print('**************************************************************') 66 print('*** WARNING: nvcc not in path.') 67 print('*** WARNING: Please set path to nvcc.') 68 print('**************************************************************') 69 else: 70 cuda_path_default = path.normpath( 71 path.join(path.dirname(nvcc_path), '..')) 72 73 cuda_path = os.environ.get('CUDA_PATH', '') # Nvidia default on Windows 74 if len(cuda_path) > 0 and cuda_path != cuda_path_default: 75 print('**************************************************************') 76 print('*** WARNING: nvcc path != CUDA_PATH') 77 print('*** WARNING: nvcc path: %s', cuda_path_default) 78 print('*** WARNING: CUDA_PATH: %s', cuda_path) 79 print('**************************************************************') 80 81 if not path.exists(cuda_path): 82 cuda_path = cuda_path_default 83 84 if not cuda_path and path.exists('/usr/local/cuda'): 85 cuda_path = '/usr/local/cuda' 86 87 include_dirs = [] 88 library_dirs = [] 89 define_macros = [] 90 91 if cuda_path: 92 include_dirs.append(path.join(cuda_path, 'include')) 93 if sys.platform == 'win32': 94 library_dirs.append(path.join(cuda_path, 'bin')) 95 library_dirs.append(path.join(cuda_path, 'lib', 'x64')) 96 elif sys.platform == 'darwin': 97 library_dirs.append(path.join(cuda_path, 'lib')) 98 else: 99 library_dirs.append(path.join(cuda_path, 'lib64')) 100 if sys.platform == 'darwin': 101 library_dirs.append('/usr/local/cuda/lib') 102 103 return { 104 'include_dirs': include_dirs, 105 'library_dirs': library_dirs, 106 'define_macros': define_macros, 107 'language': 'c++', 108 } 109 110 111 def localpath(*args): 112 return path.abspath(path.join(path.dirname(__file__), *args)) 113 114 115 def get_path(key): 116 return os.environ.get(key, '').split(os.pathsep) 117 118 119 def search_on_path(filenames): 120 for p in get_path('PATH'): 121 for filename in filenames: 122 full = path.join(p, filename) 123 if path.exists(full): 124 return path.abspath(full) 125 126 127 def check_include(dirs, file_path): 128 return any(path.exists(path.join(dir, file_path)) for dir in dirs) 129 130 131 def check_readthedocs_environment(): 132 return os.environ.get('READTHEDOCS', None) == 'True' 133 134 135 def check_library(compiler, includes=[], libraries=[], 136 include_dirs=[], library_dirs=[]): 137 temp_dir = tempfile.mkdtemp() 138 139 try: 140 source = ''' 141 int main(int argc, char* argv[]) { 142 return 0; 143 } 144 ''' 145 fname = os.path.join(temp_dir, 'a.cpp') 146 with open(fname, 'w') as f: 147 for header in includes: 148 f.write('#include <%s>\n' % header) 149 f.write(source) 150 151 try: 152 objects = compiler.compile([fname], output_dir=temp_dir, 153 include_dirs=include_dirs) 154 except distutils.errors.CompileError: 155 return False 156 157 try: 158 compiler.link_shared_lib(objects, 159 os.path.join(temp_dir, 'a'), 160 libraries=libraries, 161 library_dirs=library_dirs) 162 except (distutils.errors.LinkError, TypeError): 163 return False 164 165 return True 166 167 finally: 168 shutil.rmtree(temp_dir, ignore_errors=True) 169 170 171 def make_extensions(options, compiler): 172 173 """Produce a list of Extension instances which passed to cythonize().""" 174 175 no_cuda = options['no_cuda'] 176 settings = get_compiler_setting() 177 178 try: 179 import numpy 180 numpy_include = numpy.get_include() 181 except AttributeError: 182 # if numpy is not installed get the headers from the .egg directory 183 import numpy.core 184 numpy_include = path.join( 185 path.dirname(numpy.core.__file__), 'include') 186 include_dirs = settings['include_dirs'] 187 include_dirs.append(numpy_include) 188 189 settings['include_dirs'] = [ 190 x for x in include_dirs if path.exists(x)] 191 settings['library_dirs'] = [ 192 x for x in settings['library_dirs'] if path.exists(x)] 193 if sys.platform != 'win32': 194 settings['runtime_library_dirs'] = settings['library_dirs'] 195 196 if options['linetrace']: 197 settings['define_macros'].append(('CYTHON_TRACE', '1')) 198 settings['define_macros'].append(('CYTHON_TRACE_NOGIL', '1')) 199 if no_cuda: 200 settings['define_macros'].append(('CUPY_NO_CUDA', '1')) 201 202 ret = [] 203 for module in MODULES: 204 print('Include directories:', settings['include_dirs']) 205 print('Library directories:', settings['library_dirs']) 206 207 if not no_cuda: 208 if not check_library(compiler, 209 includes=module['include'], 210 include_dirs=settings['include_dirs']): 211 print('**************************************************') 212 print('*** Include files not found: %s' % module['include']) 213 print('*** Skip installing %s support' % module['name']) 214 print('*** Check your CPATH environment variable') 215 print('**************************************************') 216 continue 217 218 if not check_library(compiler, 219 libraries=module['libraries'], 220 library_dirs=settings['library_dirs']): 221 print('**************************************************') 222 print('*** Cannot link libraries: %s' % module['libraries']) 223 print('*** Skip installing %s support' % module['name']) 224 print('*** Check your LIBRARY_PATH environment variable') 225 print('**************************************************') 226 continue 227 228 s = settings.copy() 229 if not no_cuda: 230 s['libraries'] = module['libraries'] 231 ret.extend([ 232 setuptools.Extension(f, [path.join(*f.split('.')) + '.pyx'], **s) 233 for f in module['file']]) 234 return ret 235 236 237 _arg_options = {} 238 239 240 def parse_args(): 241 global _arg_options 242 _arg_options['profile'] = '--cupy-profile' in sys.argv 243 if _arg_options['profile']: 244 sys.argv.remove('--cupy-profile') 245 246 cupy_coverage = '--cupy-coverage' in sys.argv 247 if cupy_coverage: 248 sys.argv.remove('--cupy-coverage') 249 _arg_options['linetrace'] = cupy_coverage 250 _arg_options['annotate'] = cupy_coverage 251 252 _arg_options['no_cuda'] = '--cupy-no-cuda' in sys.argv 253 if _arg_options['no_cuda']: 254 sys.argv.remove('--cupy-no-cuda') 255 if check_readthedocs_environment(): 256 _arg_options['no_cuda'] = True 257 258 259 def cythonize(extensions, force=False, annotate=False, compiler_directives={}): 260 cython_pkg = pkg_resources.get_distribution('cython') 261 cython_path = path.join(cython_pkg.location, 'cython.py') 262 print("cython path:%s" % cython_pkg.location) 263 cython_cmdbase = [sys.executable, cython_path] 264 subprocess.check_call(cython_cmdbase + ['--version']) 265 266 cython_cmdbase.extend(['--fast-fail', '--verbose', '--cplus']) 267 ret = [] 268 for ext in extensions: 269 cmd = list(cython_cmdbase) 270 for i in compiler_directives.items(): 271 cmd.append('--directive') 272 cmd.append('%s=%s' % i) 273 cpp_files = [path.splitext(f)[0] + ".cpp" for f in ext.sources] 274 cmd += ext.sources 275 subprocess.check_call(cmd) 276 ext = copy.copy(ext) 277 ext.sources = cpp_files 278 ret.append(ext) 279 return ret 280 281 282 class chainer_build_ext(build_ext.build_ext): 283 284 """`build_ext` command for cython files.""" 285 286 def finalize_options(self): 287 ext_modules = self.distribution.ext_modules 288 if dummy_extension in ext_modules: 289 print('Executing cythonize()') 290 print('Options:', _arg_options) 291 292 directive_keys = ('linetrace', 'profile') 293 directives = {key: _arg_options[key] for key in directive_keys} 294 295 cythonize_option_keys = ('annotate',) 296 cythonize_options = { 297 key: _arg_options[key] for key in cythonize_option_keys} 298 299 compiler = distutils.ccompiler.new_compiler(self.compiler) 300 distutils.sysconfig.customize_compiler(compiler) 301 302 extensions = make_extensions(_arg_options, compiler) 303 extensions = cythonize( 304 extensions, 305 force=True, 306 compiler_directives=directives, 307 **cythonize_options) 308 309 # Modify ext_modules for cython 310 ext_modules.remove(dummy_extension) 311 ext_modules.extend(extensions) 312 313 build_ext.build_ext.finalize_options(self) 314 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/chainer_setup_build.py b/chainer_setup_build.py --- a/chainer_setup_build.py +++ b/chainer_setup_build.py @@ -93,10 +93,9 @@ if sys.platform == 'win32': library_dirs.append(path.join(cuda_path, 'bin')) library_dirs.append(path.join(cuda_path, 'lib', 'x64')) - elif sys.platform == 'darwin': - library_dirs.append(path.join(cuda_path, 'lib')) else: library_dirs.append(path.join(cuda_path, 'lib64')) + library_dirs.append(path.join(cuda_path, 'lib')) if sys.platform == 'darwin': library_dirs.append('/usr/local/cuda/lib')
{"golden_diff": "diff --git a/chainer_setup_build.py b/chainer_setup_build.py\n--- a/chainer_setup_build.py\n+++ b/chainer_setup_build.py\n@@ -93,10 +93,9 @@\n if sys.platform == 'win32':\n library_dirs.append(path.join(cuda_path, 'bin'))\n library_dirs.append(path.join(cuda_path, 'lib', 'x64'))\n- elif sys.platform == 'darwin':\n- library_dirs.append(path.join(cuda_path, 'lib'))\n else:\n library_dirs.append(path.join(cuda_path, 'lib64'))\n+ library_dirs.append(path.join(cuda_path, 'lib'))\n if sys.platform == 'darwin':\n library_dirs.append('/usr/local/cuda/lib')\n", "issue": "`/usr/local/cuda/lib` in Linux for Tegra\nIn CUDA for L4T (Linux for Tegra), shared objects are located in `/usr/local/cuda/lib`, not in `lib64`. I failed to install Chainer.\n\n", "before_files": [{"content": "from __future__ import print_function\nimport copy\nimport distutils\nimport os\nfrom os import path\nimport pkg_resources\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\n\nimport setuptools\nfrom setuptools.command import build_ext\n\n\ndummy_extension = setuptools.Extension('chainer', ['chainer.c'])\n\nMODULES = [\n {\n 'name': 'cuda',\n 'file': [\n 'cupy.core.core',\n 'cupy.core.flags',\n 'cupy.cuda.cublas',\n 'cupy.cuda.curand',\n 'cupy.cuda.device',\n 'cupy.cuda.driver',\n 'cupy.cuda.memory',\n 'cupy.cuda.function',\n 'cupy.cuda.runtime',\n 'cupy.util',\n ],\n 'include': [\n 'cublas_v2.h',\n 'cuda.h',\n 'cuda_runtime.h',\n 'curand.h',\n ],\n 'libraries': [\n 'cublas',\n 'cuda',\n 'cudart',\n 'curand',\n ],\n },\n {\n 'name': 'cudnn',\n 'file': [\n 'cupy.cuda.cudnn',\n ],\n 'include': [\n 'cudnn.h',\n ],\n 'libraries': [\n 'cudnn',\n ],\n }\n]\n\n\ndef get_compiler_setting():\n nvcc_path = search_on_path(('nvcc', 'nvcc.exe'))\n cuda_path_default = None\n if nvcc_path is None:\n print('**************************************************************')\n print('*** WARNING: nvcc not in path.')\n print('*** WARNING: Please set path to nvcc.')\n print('**************************************************************')\n else:\n cuda_path_default = path.normpath(\n path.join(path.dirname(nvcc_path), '..'))\n\n cuda_path = os.environ.get('CUDA_PATH', '') # Nvidia default on Windows\n if len(cuda_path) > 0 and cuda_path != cuda_path_default:\n print('**************************************************************')\n print('*** WARNING: nvcc path != CUDA_PATH')\n print('*** WARNING: nvcc path: %s', cuda_path_default)\n print('*** WARNING: CUDA_PATH: %s', cuda_path)\n print('**************************************************************')\n\n if not path.exists(cuda_path):\n cuda_path = cuda_path_default\n\n if not cuda_path and path.exists('/usr/local/cuda'):\n cuda_path = '/usr/local/cuda'\n\n include_dirs = []\n library_dirs = []\n define_macros = []\n\n if cuda_path:\n include_dirs.append(path.join(cuda_path, 'include'))\n if sys.platform == 'win32':\n library_dirs.append(path.join(cuda_path, 'bin'))\n library_dirs.append(path.join(cuda_path, 'lib', 'x64'))\n elif sys.platform == 'darwin':\n library_dirs.append(path.join(cuda_path, 'lib'))\n else:\n library_dirs.append(path.join(cuda_path, 'lib64'))\n if sys.platform == 'darwin':\n library_dirs.append('/usr/local/cuda/lib')\n\n return {\n 'include_dirs': include_dirs,\n 'library_dirs': library_dirs,\n 'define_macros': define_macros,\n 'language': 'c++',\n }\n\n\ndef localpath(*args):\n return path.abspath(path.join(path.dirname(__file__), *args))\n\n\ndef get_path(key):\n return os.environ.get(key, '').split(os.pathsep)\n\n\ndef search_on_path(filenames):\n for p in get_path('PATH'):\n for filename in filenames:\n full = path.join(p, filename)\n if path.exists(full):\n return path.abspath(full)\n\n\ndef check_include(dirs, file_path):\n return any(path.exists(path.join(dir, file_path)) for dir in dirs)\n\n\ndef check_readthedocs_environment():\n return os.environ.get('READTHEDOCS', None) == 'True'\n\n\ndef check_library(compiler, includes=[], libraries=[],\n include_dirs=[], library_dirs=[]):\n temp_dir = tempfile.mkdtemp()\n\n try:\n source = '''\n int main(int argc, char* argv[]) {\n return 0;\n }\n '''\n fname = os.path.join(temp_dir, 'a.cpp')\n with open(fname, 'w') as f:\n for header in includes:\n f.write('#include <%s>\\n' % header)\n f.write(source)\n\n try:\n objects = compiler.compile([fname], output_dir=temp_dir,\n include_dirs=include_dirs)\n except distutils.errors.CompileError:\n return False\n\n try:\n compiler.link_shared_lib(objects,\n os.path.join(temp_dir, 'a'),\n libraries=libraries,\n library_dirs=library_dirs)\n except (distutils.errors.LinkError, TypeError):\n return False\n\n return True\n\n finally:\n shutil.rmtree(temp_dir, ignore_errors=True)\n\n\ndef make_extensions(options, compiler):\n\n \"\"\"Produce a list of Extension instances which passed to cythonize().\"\"\"\n\n no_cuda = options['no_cuda']\n settings = get_compiler_setting()\n\n try:\n import numpy\n numpy_include = numpy.get_include()\n except AttributeError:\n # if numpy is not installed get the headers from the .egg directory\n import numpy.core\n numpy_include = path.join(\n path.dirname(numpy.core.__file__), 'include')\n include_dirs = settings['include_dirs']\n include_dirs.append(numpy_include)\n\n settings['include_dirs'] = [\n x for x in include_dirs if path.exists(x)]\n settings['library_dirs'] = [\n x for x in settings['library_dirs'] if path.exists(x)]\n if sys.platform != 'win32':\n settings['runtime_library_dirs'] = settings['library_dirs']\n\n if options['linetrace']:\n settings['define_macros'].append(('CYTHON_TRACE', '1'))\n settings['define_macros'].append(('CYTHON_TRACE_NOGIL', '1'))\n if no_cuda:\n settings['define_macros'].append(('CUPY_NO_CUDA', '1'))\n\n ret = []\n for module in MODULES:\n print('Include directories:', settings['include_dirs'])\n print('Library directories:', settings['library_dirs'])\n\n if not no_cuda:\n if not check_library(compiler,\n includes=module['include'],\n include_dirs=settings['include_dirs']):\n print('**************************************************')\n print('*** Include files not found: %s' % module['include'])\n print('*** Skip installing %s support' % module['name'])\n print('*** Check your CPATH environment variable')\n print('**************************************************')\n continue\n\n if not check_library(compiler,\n libraries=module['libraries'],\n library_dirs=settings['library_dirs']):\n print('**************************************************')\n print('*** Cannot link libraries: %s' % module['libraries'])\n print('*** Skip installing %s support' % module['name'])\n print('*** Check your LIBRARY_PATH environment variable')\n print('**************************************************')\n continue\n\n s = settings.copy()\n if not no_cuda:\n s['libraries'] = module['libraries']\n ret.extend([\n setuptools.Extension(f, [path.join(*f.split('.')) + '.pyx'], **s)\n for f in module['file']])\n return ret\n\n\n_arg_options = {}\n\n\ndef parse_args():\n global _arg_options\n _arg_options['profile'] = '--cupy-profile' in sys.argv\n if _arg_options['profile']:\n sys.argv.remove('--cupy-profile')\n\n cupy_coverage = '--cupy-coverage' in sys.argv\n if cupy_coverage:\n sys.argv.remove('--cupy-coverage')\n _arg_options['linetrace'] = cupy_coverage\n _arg_options['annotate'] = cupy_coverage\n\n _arg_options['no_cuda'] = '--cupy-no-cuda' in sys.argv\n if _arg_options['no_cuda']:\n sys.argv.remove('--cupy-no-cuda')\n if check_readthedocs_environment():\n _arg_options['no_cuda'] = True\n\n\ndef cythonize(extensions, force=False, annotate=False, compiler_directives={}):\n cython_pkg = pkg_resources.get_distribution('cython')\n cython_path = path.join(cython_pkg.location, 'cython.py')\n print(\"cython path:%s\" % cython_pkg.location)\n cython_cmdbase = [sys.executable, cython_path]\n subprocess.check_call(cython_cmdbase + ['--version'])\n\n cython_cmdbase.extend(['--fast-fail', '--verbose', '--cplus'])\n ret = []\n for ext in extensions:\n cmd = list(cython_cmdbase)\n for i in compiler_directives.items():\n cmd.append('--directive')\n cmd.append('%s=%s' % i)\n cpp_files = [path.splitext(f)[0] + \".cpp\" for f in ext.sources]\n cmd += ext.sources\n subprocess.check_call(cmd)\n ext = copy.copy(ext)\n ext.sources = cpp_files\n ret.append(ext)\n return ret\n\n\nclass chainer_build_ext(build_ext.build_ext):\n\n \"\"\"`build_ext` command for cython files.\"\"\"\n\n def finalize_options(self):\n ext_modules = self.distribution.ext_modules\n if dummy_extension in ext_modules:\n print('Executing cythonize()')\n print('Options:', _arg_options)\n\n directive_keys = ('linetrace', 'profile')\n directives = {key: _arg_options[key] for key in directive_keys}\n\n cythonize_option_keys = ('annotate',)\n cythonize_options = {\n key: _arg_options[key] for key in cythonize_option_keys}\n\n compiler = distutils.ccompiler.new_compiler(self.compiler)\n distutils.sysconfig.customize_compiler(compiler)\n\n extensions = make_extensions(_arg_options, compiler)\n extensions = cythonize(\n extensions,\n force=True,\n compiler_directives=directives,\n **cythonize_options)\n\n # Modify ext_modules for cython\n ext_modules.remove(dummy_extension)\n ext_modules.extend(extensions)\n\n build_ext.build_ext.finalize_options(self)\n", "path": "chainer_setup_build.py"}], "after_files": [{"content": "from __future__ import print_function\nimport copy\nimport distutils\nimport os\nfrom os import path\nimport pkg_resources\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\n\nimport setuptools\nfrom setuptools.command import build_ext\n\n\ndummy_extension = setuptools.Extension('chainer', ['chainer.c'])\n\nMODULES = [\n {\n 'name': 'cuda',\n 'file': [\n 'cupy.core.core',\n 'cupy.core.flags',\n 'cupy.cuda.cublas',\n 'cupy.cuda.curand',\n 'cupy.cuda.device',\n 'cupy.cuda.driver',\n 'cupy.cuda.memory',\n 'cupy.cuda.function',\n 'cupy.cuda.runtime',\n 'cupy.util',\n ],\n 'include': [\n 'cublas_v2.h',\n 'cuda.h',\n 'cuda_runtime.h',\n 'curand.h',\n ],\n 'libraries': [\n 'cublas',\n 'cuda',\n 'cudart',\n 'curand',\n ],\n },\n {\n 'name': 'cudnn',\n 'file': [\n 'cupy.cuda.cudnn',\n ],\n 'include': [\n 'cudnn.h',\n ],\n 'libraries': [\n 'cudnn',\n ],\n }\n]\n\n\ndef get_compiler_setting():\n nvcc_path = search_on_path(('nvcc', 'nvcc.exe'))\n cuda_path_default = None\n if nvcc_path is None:\n print('**************************************************************')\n print('*** WARNING: nvcc not in path.')\n print('*** WARNING: Please set path to nvcc.')\n print('**************************************************************')\n else:\n cuda_path_default = path.normpath(\n path.join(path.dirname(nvcc_path), '..'))\n\n cuda_path = os.environ.get('CUDA_PATH', '') # Nvidia default on Windows\n if len(cuda_path) > 0 and cuda_path != cuda_path_default:\n print('**************************************************************')\n print('*** WARNING: nvcc path != CUDA_PATH')\n print('*** WARNING: nvcc path: %s', cuda_path_default)\n print('*** WARNING: CUDA_PATH: %s', cuda_path)\n print('**************************************************************')\n\n if not path.exists(cuda_path):\n cuda_path = cuda_path_default\n\n if not cuda_path and path.exists('/usr/local/cuda'):\n cuda_path = '/usr/local/cuda'\n\n include_dirs = []\n library_dirs = []\n define_macros = []\n\n if cuda_path:\n include_dirs.append(path.join(cuda_path, 'include'))\n if sys.platform == 'win32':\n library_dirs.append(path.join(cuda_path, 'bin'))\n library_dirs.append(path.join(cuda_path, 'lib', 'x64'))\n else:\n library_dirs.append(path.join(cuda_path, 'lib64'))\n library_dirs.append(path.join(cuda_path, 'lib'))\n if sys.platform == 'darwin':\n library_dirs.append('/usr/local/cuda/lib')\n\n return {\n 'include_dirs': include_dirs,\n 'library_dirs': library_dirs,\n 'define_macros': define_macros,\n 'language': 'c++',\n }\n\n\ndef localpath(*args):\n return path.abspath(path.join(path.dirname(__file__), *args))\n\n\ndef get_path(key):\n return os.environ.get(key, '').split(os.pathsep)\n\n\ndef search_on_path(filenames):\n for p in get_path('PATH'):\n for filename in filenames:\n full = path.join(p, filename)\n if path.exists(full):\n return path.abspath(full)\n\n\ndef check_include(dirs, file_path):\n return any(path.exists(path.join(dir, file_path)) for dir in dirs)\n\n\ndef check_readthedocs_environment():\n return os.environ.get('READTHEDOCS', None) == 'True'\n\n\ndef check_library(compiler, includes=[], libraries=[],\n include_dirs=[], library_dirs=[]):\n temp_dir = tempfile.mkdtemp()\n\n try:\n source = '''\n int main(int argc, char* argv[]) {\n return 0;\n }\n '''\n fname = os.path.join(temp_dir, 'a.cpp')\n with open(fname, 'w') as f:\n for header in includes:\n f.write('#include <%s>\\n' % header)\n f.write(source)\n\n try:\n objects = compiler.compile([fname], output_dir=temp_dir,\n include_dirs=include_dirs)\n except distutils.errors.CompileError:\n return False\n\n try:\n compiler.link_shared_lib(objects,\n os.path.join(temp_dir, 'a'),\n libraries=libraries,\n library_dirs=library_dirs)\n except (distutils.errors.LinkError, TypeError):\n return False\n\n return True\n\n finally:\n shutil.rmtree(temp_dir, ignore_errors=True)\n\n\ndef make_extensions(options, compiler):\n\n \"\"\"Produce a list of Extension instances which passed to cythonize().\"\"\"\n\n no_cuda = options['no_cuda']\n settings = get_compiler_setting()\n\n try:\n import numpy\n numpy_include = numpy.get_include()\n except AttributeError:\n # if numpy is not installed get the headers from the .egg directory\n import numpy.core\n numpy_include = path.join(\n path.dirname(numpy.core.__file__), 'include')\n include_dirs = settings['include_dirs']\n include_dirs.append(numpy_include)\n\n settings['include_dirs'] = [\n x for x in include_dirs if path.exists(x)]\n settings['library_dirs'] = [\n x for x in settings['library_dirs'] if path.exists(x)]\n if sys.platform != 'win32':\n settings['runtime_library_dirs'] = settings['library_dirs']\n\n if options['linetrace']:\n settings['define_macros'].append(('CYTHON_TRACE', '1'))\n settings['define_macros'].append(('CYTHON_TRACE_NOGIL', '1'))\n if no_cuda:\n settings['define_macros'].append(('CUPY_NO_CUDA', '1'))\n\n ret = []\n for module in MODULES:\n print('Include directories:', settings['include_dirs'])\n print('Library directories:', settings['library_dirs'])\n\n if not no_cuda:\n if not check_library(compiler,\n includes=module['include'],\n include_dirs=settings['include_dirs']):\n print('**************************************************')\n print('*** Include files not found: %s' % module['include'])\n print('*** Skip installing %s support' % module['name'])\n print('*** Check your CPATH environment variable')\n print('**************************************************')\n continue\n\n if not check_library(compiler,\n libraries=module['libraries'],\n library_dirs=settings['library_dirs']):\n print('**************************************************')\n print('*** Cannot link libraries: %s' % module['libraries'])\n print('*** Skip installing %s support' % module['name'])\n print('*** Check your LIBRARY_PATH environment variable')\n print('**************************************************')\n continue\n\n s = settings.copy()\n if not no_cuda:\n s['libraries'] = module['libraries']\n ret.extend([\n setuptools.Extension(f, [path.join(*f.split('.')) + '.pyx'], **s)\n for f in module['file']])\n return ret\n\n\n_arg_options = {}\n\n\ndef parse_args():\n global _arg_options\n _arg_options['profile'] = '--cupy-profile' in sys.argv\n if _arg_options['profile']:\n sys.argv.remove('--cupy-profile')\n\n cupy_coverage = '--cupy-coverage' in sys.argv\n if cupy_coverage:\n sys.argv.remove('--cupy-coverage')\n _arg_options['linetrace'] = cupy_coverage\n _arg_options['annotate'] = cupy_coverage\n\n _arg_options['no_cuda'] = '--cupy-no-cuda' in sys.argv\n if _arg_options['no_cuda']:\n sys.argv.remove('--cupy-no-cuda')\n if check_readthedocs_environment():\n _arg_options['no_cuda'] = True\n\n\ndef cythonize(extensions, force=False, annotate=False, compiler_directives={}):\n cython_pkg = pkg_resources.get_distribution('cython')\n cython_path = path.join(cython_pkg.location, 'cython.py')\n print(\"cython path:%s\" % cython_pkg.location)\n cython_cmdbase = [sys.executable, cython_path]\n subprocess.check_call(cython_cmdbase + ['--version'])\n\n cython_cmdbase.extend(['--fast-fail', '--verbose', '--cplus'])\n ret = []\n for ext in extensions:\n cmd = list(cython_cmdbase)\n for i in compiler_directives.items():\n cmd.append('--directive')\n cmd.append('%s=%s' % i)\n cpp_files = [path.splitext(f)[0] + \".cpp\" for f in ext.sources]\n cmd += ext.sources\n subprocess.check_call(cmd)\n ext = copy.copy(ext)\n ext.sources = cpp_files\n ret.append(ext)\n return ret\n\n\nclass chainer_build_ext(build_ext.build_ext):\n\n \"\"\"`build_ext` command for cython files.\"\"\"\n\n def finalize_options(self):\n ext_modules = self.distribution.ext_modules\n if dummy_extension in ext_modules:\n print('Executing cythonize()')\n print('Options:', _arg_options)\n\n directive_keys = ('linetrace', 'profile')\n directives = {key: _arg_options[key] for key in directive_keys}\n\n cythonize_option_keys = ('annotate',)\n cythonize_options = {\n key: _arg_options[key] for key in cythonize_option_keys}\n\n compiler = distutils.ccompiler.new_compiler(self.compiler)\n distutils.sysconfig.customize_compiler(compiler)\n\n extensions = make_extensions(_arg_options, compiler)\n extensions = cythonize(\n extensions,\n force=True,\n compiler_directives=directives,\n **cythonize_options)\n\n # Modify ext_modules for cython\n ext_modules.remove(dummy_extension)\n ext_modules.extend(extensions)\n\n build_ext.build_ext.finalize_options(self)\n", "path": "chainer_setup_build.py"}]}
3,313
155
gh_patches_debug_19462
rasdani/github-patches
git_diff
sublimelsp__LSP-1997
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- goto commands don't restore selection when location picking is canceled **Describe the bug** when there's more than one location available for a `goto*` command, a quick panel is shown to pick. highlighting entries modifies the selection, canceling the operation doesn't restore the initial selection. **Expected behavior** it should restore the selection, like ST's built-in **Screenshots** ![lsp_goto_cancel_bug](https://user-images.githubusercontent.com/33235928/174485692-f6eb2c91-e309-464d-b44e-9dad258a58e3.gif) **Environment (please complete the following information):** - OS: Windows 10 - Sublime Text version: 4126 - LSP version: 1.16.3 - Language servers used: LSP-rust-analyzer **Additional context** Add any other context about the problem here. For example, whether you're using a helper package or your manual server configuration in LSP.sublime-settings. When using a manual server configuration please include it here if you believe it's applicable. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `plugin/locationpicker.py` Content: ``` 1 from .core.logging import debug 2 from .core.protocol import DocumentUri, Location, Position 3 from .core.protocol import LocationLink 4 from .core.sessions import Session 5 from .core.typing import Union, List, Optional, Tuple 6 from .core.views import get_uri_and_position_from_location 7 from .core.views import location_to_human_readable 8 from .core.views import to_encoded_filename 9 import functools 10 import sublime 11 import weakref 12 13 14 def open_location_async( 15 session: Session, 16 location: Union[Location, LocationLink], 17 side_by_side: bool, 18 force_group: bool 19 ) -> None: 20 flags = sublime.ENCODED_POSITION 21 if force_group: 22 flags |= sublime.FORCE_GROUP 23 if side_by_side: 24 flags |= sublime.ADD_TO_SELECTION | sublime.SEMI_TRANSIENT 25 26 def check_success_async(view: Optional[sublime.View]) -> None: 27 if not view: 28 sublime.error_message("Unable to open URI") 29 30 session.open_location_async(location, flags).then(check_success_async) 31 32 33 def open_basic_file( 34 session: Session, 35 uri: str, 36 position: Position, 37 flags: int = 0, 38 group: Optional[int] = None 39 ) -> sublime.View: 40 filename = session.config.map_server_uri_to_client_path(uri) 41 if group is None: 42 group = session.window.active_group() 43 return session.window.open_file(to_encoded_filename(filename, position), flags=flags, group=group) 44 45 46 class LocationPicker: 47 48 def __init__( 49 self, 50 view: sublime.View, 51 session: Session, 52 locations: Union[List[Location], List[LocationLink]], 53 side_by_side: bool 54 ) -> None: 55 self._view = view 56 window = view.window() 57 if not window: 58 raise ValueError("missing window") 59 self._window = window 60 self._weaksession = weakref.ref(session) 61 self._side_by_side = side_by_side 62 self._items = locations 63 self._highlighted_view = None # type: Optional[sublime.View] 64 manager = session.manager() 65 base_dir = manager.get_project_path(view.file_name() or "") if manager else None 66 self._window.show_quick_panel( 67 items=[location_to_human_readable(session.config, base_dir, location) for location in locations], 68 on_select=self._select_entry, 69 on_highlight=self._highlight_entry, 70 flags=sublime.KEEP_OPEN_ON_FOCUS_LOST 71 ) 72 73 def _unpack(self, index: int) -> Tuple[Optional[Session], Union[Location, LocationLink], DocumentUri, Position]: 74 location = self._items[index] 75 uri, position = get_uri_and_position_from_location(location) 76 return self._weaksession(), location, uri, position 77 78 def _select_entry(self, index: int) -> None: 79 if index >= 0 and self._view.is_valid(): 80 session, location, uri, position = self._unpack(index) 81 if not session: 82 return 83 # Note: this has to run on the main thread (and not via open_location_async) 84 # otherwise the bevior feels weird. It's the only reason why open_basic_file exists. 85 if uri.startswith("file:"): 86 flags = sublime.ENCODED_POSITION 87 if not self._side_by_side: 88 open_basic_file(session, uri, position, flags) 89 else: 90 sublime.set_timeout_async( 91 functools.partial(open_location_async, session, location, self._side_by_side, True)) 92 else: 93 self._window.focus_view(self._view) 94 # When in side-by-side mode close the current highlighted 95 # sheet upon canceling if the sheet is semi-transient 96 if self._side_by_side and self._highlighted_view: 97 sheet = self._highlighted_view.sheet() 98 if sheet and sheet.is_semi_transient(): 99 self._highlighted_view.close() 100 101 def _highlight_entry(self, index: int) -> None: 102 session, _, uri, position = self._unpack(index) 103 if not session: 104 return 105 if uri.startswith("file:"): 106 flags = sublime.ENCODED_POSITION | sublime.FORCE_GROUP 107 if self._side_by_side: 108 if self._highlighted_view and self._highlighted_view.is_valid(): 109 # Replacing the MRU is done relative to the current highlighted sheet 110 self._window.focus_view(self._highlighted_view) 111 flags |= sublime.REPLACE_MRU | sublime.SEMI_TRANSIENT 112 else: 113 flags |= sublime.ADD_TO_SELECTION | sublime.SEMI_TRANSIENT 114 else: 115 flags |= sublime.TRANSIENT 116 self._highlighted_view = open_basic_file(session, uri, position, flags, self._window.active_group()) 117 else: 118 # TODO: Preview non-file uris? 119 debug("no preview for", uri) 120 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/plugin/locationpicker.py b/plugin/locationpicker.py --- a/plugin/locationpicker.py +++ b/plugin/locationpicker.py @@ -53,6 +53,7 @@ side_by_side: bool ) -> None: self._view = view + self._view_states = ([r.to_tuple() for r in view.sel()], view.viewport_position()) window = view.window() if not window: raise ValueError("missing window") @@ -76,6 +77,9 @@ return self._weaksession(), location, uri, position def _select_entry(self, index: int) -> None: + if self._view.is_valid() and not self._side_by_side: + self._view.set_viewport_position(self._view_states[1]) + self._view.run_command('lsp_selection_set', {'regions': self._view_states[0]}) if index >= 0 and self._view.is_valid(): session, location, uri, position = self._unpack(index) if not session:
{"golden_diff": "diff --git a/plugin/locationpicker.py b/plugin/locationpicker.py\n--- a/plugin/locationpicker.py\n+++ b/plugin/locationpicker.py\n@@ -53,6 +53,7 @@\n side_by_side: bool\n ) -> None:\n self._view = view\n+ self._view_states = ([r.to_tuple() for r in view.sel()], view.viewport_position())\n window = view.window()\n if not window:\n raise ValueError(\"missing window\")\n@@ -76,6 +77,9 @@\n return self._weaksession(), location, uri, position\n \n def _select_entry(self, index: int) -> None:\n+ if self._view.is_valid() and not self._side_by_side:\n+ self._view.set_viewport_position(self._view_states[1])\n+ self._view.run_command('lsp_selection_set', {'regions': self._view_states[0]})\n if index >= 0 and self._view.is_valid():\n session, location, uri, position = self._unpack(index)\n if not session:\n", "issue": "goto commands don't restore selection when location picking is canceled\n**Describe the bug**\r\nwhen there's more than one location available for a `goto*` command, a quick panel is shown to pick.\r\nhighlighting entries modifies the selection, canceling the operation doesn't restore the initial selection.\r\n\r\n**Expected behavior**\r\nit should restore the selection, like ST's built-in\r\n\r\n**Screenshots**\r\n![lsp_goto_cancel_bug](https://user-images.githubusercontent.com/33235928/174485692-f6eb2c91-e309-464d-b44e-9dad258a58e3.gif)\r\n\r\n**Environment (please complete the following information):**\r\n- OS: Windows 10\r\n- Sublime Text version: 4126\r\n- LSP version: 1.16.3\r\n- Language servers used: LSP-rust-analyzer\r\n\r\n**Additional context**\r\nAdd any other context about the problem here. For example, whether you're using a helper\r\npackage or your manual server configuration in LSP.sublime-settings. When using\r\na manual server configuration please include it here if you believe it's applicable.\r\n\n", "before_files": [{"content": "from .core.logging import debug\nfrom .core.protocol import DocumentUri, Location, Position\nfrom .core.protocol import LocationLink\nfrom .core.sessions import Session\nfrom .core.typing import Union, List, Optional, Tuple\nfrom .core.views import get_uri_and_position_from_location\nfrom .core.views import location_to_human_readable\nfrom .core.views import to_encoded_filename\nimport functools\nimport sublime\nimport weakref\n\n\ndef open_location_async(\n session: Session,\n location: Union[Location, LocationLink],\n side_by_side: bool,\n force_group: bool\n) -> None:\n flags = sublime.ENCODED_POSITION\n if force_group:\n flags |= sublime.FORCE_GROUP\n if side_by_side:\n flags |= sublime.ADD_TO_SELECTION | sublime.SEMI_TRANSIENT\n\n def check_success_async(view: Optional[sublime.View]) -> None:\n if not view:\n sublime.error_message(\"Unable to open URI\")\n\n session.open_location_async(location, flags).then(check_success_async)\n\n\ndef open_basic_file(\n session: Session,\n uri: str,\n position: Position,\n flags: int = 0,\n group: Optional[int] = None\n) -> sublime.View:\n filename = session.config.map_server_uri_to_client_path(uri)\n if group is None:\n group = session.window.active_group()\n return session.window.open_file(to_encoded_filename(filename, position), flags=flags, group=group)\n\n\nclass LocationPicker:\n\n def __init__(\n self,\n view: sublime.View,\n session: Session,\n locations: Union[List[Location], List[LocationLink]],\n side_by_side: bool\n ) -> None:\n self._view = view\n window = view.window()\n if not window:\n raise ValueError(\"missing window\")\n self._window = window\n self._weaksession = weakref.ref(session)\n self._side_by_side = side_by_side\n self._items = locations\n self._highlighted_view = None # type: Optional[sublime.View]\n manager = session.manager()\n base_dir = manager.get_project_path(view.file_name() or \"\") if manager else None\n self._window.show_quick_panel(\n items=[location_to_human_readable(session.config, base_dir, location) for location in locations],\n on_select=self._select_entry,\n on_highlight=self._highlight_entry,\n flags=sublime.KEEP_OPEN_ON_FOCUS_LOST\n )\n\n def _unpack(self, index: int) -> Tuple[Optional[Session], Union[Location, LocationLink], DocumentUri, Position]:\n location = self._items[index]\n uri, position = get_uri_and_position_from_location(location)\n return self._weaksession(), location, uri, position\n\n def _select_entry(self, index: int) -> None:\n if index >= 0 and self._view.is_valid():\n session, location, uri, position = self._unpack(index)\n if not session:\n return\n # Note: this has to run on the main thread (and not via open_location_async)\n # otherwise the bevior feels weird. It's the only reason why open_basic_file exists.\n if uri.startswith(\"file:\"):\n flags = sublime.ENCODED_POSITION\n if not self._side_by_side:\n open_basic_file(session, uri, position, flags)\n else:\n sublime.set_timeout_async(\n functools.partial(open_location_async, session, location, self._side_by_side, True))\n else:\n self._window.focus_view(self._view)\n # When in side-by-side mode close the current highlighted\n # sheet upon canceling if the sheet is semi-transient\n if self._side_by_side and self._highlighted_view:\n sheet = self._highlighted_view.sheet()\n if sheet and sheet.is_semi_transient():\n self._highlighted_view.close()\n\n def _highlight_entry(self, index: int) -> None:\n session, _, uri, position = self._unpack(index)\n if not session:\n return\n if uri.startswith(\"file:\"):\n flags = sublime.ENCODED_POSITION | sublime.FORCE_GROUP\n if self._side_by_side:\n if self._highlighted_view and self._highlighted_view.is_valid():\n # Replacing the MRU is done relative to the current highlighted sheet\n self._window.focus_view(self._highlighted_view)\n flags |= sublime.REPLACE_MRU | sublime.SEMI_TRANSIENT\n else:\n flags |= sublime.ADD_TO_SELECTION | sublime.SEMI_TRANSIENT\n else:\n flags |= sublime.TRANSIENT\n self._highlighted_view = open_basic_file(session, uri, position, flags, self._window.active_group())\n else:\n # TODO: Preview non-file uris?\n debug(\"no preview for\", uri)\n", "path": "plugin/locationpicker.py"}], "after_files": [{"content": "from .core.logging import debug\nfrom .core.protocol import DocumentUri, Location, Position\nfrom .core.protocol import LocationLink\nfrom .core.sessions import Session\nfrom .core.typing import Union, List, Optional, Tuple\nfrom .core.views import get_uri_and_position_from_location\nfrom .core.views import location_to_human_readable\nfrom .core.views import to_encoded_filename\nimport functools\nimport sublime\nimport weakref\n\n\ndef open_location_async(\n session: Session,\n location: Union[Location, LocationLink],\n side_by_side: bool,\n force_group: bool\n) -> None:\n flags = sublime.ENCODED_POSITION\n if force_group:\n flags |= sublime.FORCE_GROUP\n if side_by_side:\n flags |= sublime.ADD_TO_SELECTION | sublime.SEMI_TRANSIENT\n\n def check_success_async(view: Optional[sublime.View]) -> None:\n if not view:\n sublime.error_message(\"Unable to open URI\")\n\n session.open_location_async(location, flags).then(check_success_async)\n\n\ndef open_basic_file(\n session: Session,\n uri: str,\n position: Position,\n flags: int = 0,\n group: Optional[int] = None\n) -> sublime.View:\n filename = session.config.map_server_uri_to_client_path(uri)\n if group is None:\n group = session.window.active_group()\n return session.window.open_file(to_encoded_filename(filename, position), flags=flags, group=group)\n\n\nclass LocationPicker:\n\n def __init__(\n self,\n view: sublime.View,\n session: Session,\n locations: Union[List[Location], List[LocationLink]],\n side_by_side: bool\n ) -> None:\n self._view = view\n self._view_states = ([r.to_tuple() for r in view.sel()], view.viewport_position())\n window = view.window()\n if not window:\n raise ValueError(\"missing window\")\n self._window = window\n self._weaksession = weakref.ref(session)\n self._side_by_side = side_by_side\n self._items = locations\n self._highlighted_view = None # type: Optional[sublime.View]\n manager = session.manager()\n base_dir = manager.get_project_path(view.file_name() or \"\") if manager else None\n self._window.show_quick_panel(\n items=[location_to_human_readable(session.config, base_dir, location) for location in locations],\n on_select=self._select_entry,\n on_highlight=self._highlight_entry,\n flags=sublime.KEEP_OPEN_ON_FOCUS_LOST\n )\n\n def _unpack(self, index: int) -> Tuple[Optional[Session], Union[Location, LocationLink], DocumentUri, Position]:\n location = self._items[index]\n uri, position = get_uri_and_position_from_location(location)\n return self._weaksession(), location, uri, position\n\n def _select_entry(self, index: int) -> None:\n if self._view.is_valid() and not self._side_by_side:\n self._view.set_viewport_position(self._view_states[1])\n self._view.run_command('lsp_selection_set', {'regions': self._view_states[0]})\n if index >= 0 and self._view.is_valid():\n session, location, uri, position = self._unpack(index)\n if not session:\n return\n # Note: this has to run on the main thread (and not via open_location_async)\n # otherwise the bevior feels weird. It's the only reason why open_basic_file exists.\n if uri.startswith(\"file:\"):\n flags = sublime.ENCODED_POSITION\n if not self._side_by_side:\n open_basic_file(session, uri, position, flags)\n else:\n sublime.set_timeout_async(\n functools.partial(open_location_async, session, location, self._side_by_side, True))\n else:\n self._window.focus_view(self._view)\n # When in side-by-side mode close the current highlighted\n # sheet upon canceling if the sheet is semi-transient\n if self._side_by_side and self._highlighted_view:\n sheet = self._highlighted_view.sheet()\n if sheet and sheet.is_semi_transient():\n self._highlighted_view.close()\n\n def _highlight_entry(self, index: int) -> None:\n session, _, uri, position = self._unpack(index)\n if not session:\n return\n if uri.startswith(\"file:\"):\n flags = sublime.ENCODED_POSITION | sublime.FORCE_GROUP\n if self._side_by_side:\n if self._highlighted_view and self._highlighted_view.is_valid():\n # Replacing the MRU is done relative to the current highlighted sheet\n self._window.focus_view(self._highlighted_view)\n flags |= sublime.REPLACE_MRU | sublime.SEMI_TRANSIENT\n else:\n flags |= sublime.ADD_TO_SELECTION | sublime.SEMI_TRANSIENT\n else:\n flags |= sublime.TRANSIENT\n self._highlighted_view = open_basic_file(session, uri, position, flags, self._window.active_group())\n else:\n # TODO: Preview non-file uris?\n debug(\"no preview for\", uri)\n", "path": "plugin/locationpicker.py"}]}
1,791
226
gh_patches_debug_40931
rasdani/github-patches
git_diff
psychopy__psychopy-465
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Invalid literal for int() when assigning variable to "N Vertices" I'm trying to tell psychopy to display a polygon with a random number of vertices on each presentation by using randint. I keep running into this error: `invalid literal for int() with base 10: 'randint(3, 4, 1)'` This occurs before the script compiles, so it doesn't produce any output / traceback other than that. I've also tried this using $randint(3, 4, 1) but that doesn't work as well. In addition, my friend had this problem when specifying the number of angles via an excel file. When I just insert a number to the field, it works fine, so it seems like python is trying to interpret randint(3, 4, 1) literally, sees that it isn't an integer, and throws an error. Variable assignment: ![image](https://f.cloud.github.com/assets/1839645/1603669/14c4adc4-53ae-11e3-9c09-4e562ed69e8c.png) Error after clicking run: ![image](https://f.cloud.github.com/assets/1839645/1603672/27c7b952-53ae-11e3-8a0a-4402f6da4fb7.png) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `psychopy/app/builder/components/polygon.py` Content: ``` 1 # Part of the PsychoPy library 2 # Copyright (C) 2013 Jonathan Peirce 3 # Distributed under the terms of the GNU General Public License (GPL). 4 5 from _visual import * #to get the template visual component 6 from os import path 7 from psychopy.app.builder.components import getInitVals 8 9 thisFolder = path.abspath(path.dirname(__file__))#the absolute path to the folder containing this path 10 iconFile = path.join(thisFolder,'polygon.png') 11 tooltip = 'Polygon: any regular polygon (line, triangle, square...circle)' 12 13 class PolygonComponent(VisualComponent): 14 """A class for presenting grating stimuli""" 15 def __init__(self, exp, parentName, name='polygon', interpolate='linear', 16 units='from exp settings', 17 lineColor='$[1,1,1]', lineColorSpace='rgb', lineWidth=1, 18 fillColor='$[1,1,1]', fillColorSpace='rgb', 19 nVertices = 4, 20 pos=[0,0], size=[0.5,0.5], ori=0, 21 startType='time (s)', startVal=0.0, 22 stopType='duration (s)', stopVal=1.0, 23 startEstim='', durationEstim=''): 24 #initialise main parameters from base stimulus 25 VisualComponent.__init__(self,exp,parentName,name=name, units=units, 26 pos=pos, size=size, ori=ori, 27 startType=startType, startVal=startVal, 28 stopType=stopType, stopVal=stopVal, 29 startEstim=startEstim, durationEstim=durationEstim) 30 self.type='Polygon' 31 self.url="http://www.psychopy.org/builder/components/shape.html" 32 self.exp.requirePsychopyLibs(['visual']) 33 self.order=['nVertices'] 34 #params 35 self.params['nVertices']=Param(nVertices, valType='code', 36 updates='constant', allowedUpdates=['constant','set every repeat'], 37 hint="How many vertices? 2=line, 3=triangle... (90 approximates a circle)", 38 label="N Vertices") 39 self.params['fillColorSpace']=Param(fillColorSpace, valType='str', allowedVals=['rgb','dkl','lms'], 40 updates='constant', 41 hint="Choice of color space for the fill color (rgb, dkl, lms)", 42 label="Fill color space") 43 self.params['fillColor']=Param(fillColor, valType='str', allowedTypes=[], 44 updates='constant', allowedUpdates=['constant','set every repeat','set every frame'], 45 hint="Fill color of this shape; Right-click to bring up a color-picker (rgb only)", 46 label="Fill color") 47 self.params['lineColorSpace']=Param(lineColorSpace, valType='str', allowedVals=['rgb','dkl','lms'], 48 updates='constant', 49 hint="Choice of color space for the fill color (rgb, dkl, lms)", 50 label="Line color space") 51 self.params['lineColor']=Param(lineColor, valType='str', allowedTypes=[], 52 updates='constant', allowedUpdates=['constant','set every repeat','set every frame'], 53 hint="Line color of this shape; Right-click to bring up a color-picker (rgb only)", 54 label="Line color") 55 self.params['lineWidth']=Param(lineWidth, valType='code', allowedTypes=[], 56 updates='constant', allowedUpdates=['constant','set every repeat','set every frame'], 57 hint="Width of the shape's line (always in pixels - this does NOT use 'units')", 58 label="Line width") 59 self.params['interpolate']=Param(interpolate, valType='str', allowedVals=['linear','nearest'], 60 updates='constant', allowedUpdates=[], 61 hint="How should the image be interpolated if/when rescaled", 62 label="Interpolate") 63 self.params['size']=Param(size, valType='code', allowedTypes=[], 64 updates='constant', allowedUpdates=['constant','set every repeat','set every frame'], 65 hint="Size of this stimulus [w,h]. Note that for a line only the first value is used, for triangle and rect the [w,h] is as expected, but for higher-order polygons it represents the [w,h] of the ellipse that the polygon sits on!! ", 66 label="Size [w,h]") 67 del self.params['color'] 68 del self.params['colorSpace'] 69 70 def writeInitCode(self,buff): 71 #do we need units code? 72 if self.params['units'].val=='from exp settings': unitsStr="" 73 else: unitsStr="units=%(units)s, " %self.params 74 inits = getInitVals(self.params)#replaces variable params with defaults 75 if int(self.params['nVertices'].val) == 2: 76 buff.writeIndented("%s = visual.Line(win=win, name='%s',%s\n" %(inits['name'],inits['name'],unitsStr)) 77 buff.writeIndented(" start=(-%(size)s[0]/2.0, 0), end=(+%(size)s[0]/2.0, 0),\n" %(inits) ) 78 elif int(self.params['nVertices'].val) == 3: 79 buff.writeIndented("%s = visual.ShapeStim(win=win, name='%s',%s\n" %(inits['name'],inits['name'],unitsStr)) 80 buff.writeIndented(" vertices = [[-%(size)s[0]/2.0,-%(size)s[1]/2.0], [+%(size)s[0]/2.0,-%(size)s[1]/2.0], [0,%(size)s[1]/2.0]],\n" %(inits) ) 81 elif int(self.params['nVertices'].val) == 4: 82 buff.writeIndented("%s = visual.Rect(win=win, name='%s',%s\n" %(inits['name'],inits['name'],unitsStr)) 83 buff.writeIndented(" width=%(size)s[0], height=%(size)s[1],\n" %(inits) ) 84 else: 85 buff.writeIndented("%s = visual.Polygon(win=win, name='%s',%s\n" %(inits['name'],inits['name'],unitsStr)) 86 buff.writeIndented(" edges = %(nVertices)s, size=%(size)s,\n" %(inits) ) 87 buff.writeIndented(" ori=%(ori)s, pos=%(pos)s,\n" %(inits) ) 88 buff.writeIndented(" lineWidth=%(lineWidth)s, lineColor=%(lineColor)s, lineColorSpace=%(lineColorSpace)s,\n" %(inits) ) 89 buff.writeIndented(" fillColor=%(fillColor)s, fillColorSpace=%(fillColorSpace)s,\n" %(inits) ) 90 buff.writeIndented(" opacity=%(opacity)s," %(inits) ) 91 if self.params['interpolate'].val=='linear': 92 buff.write("interpolate=True)\n") 93 else: buff.write("interpolate=False)\n") 94 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/psychopy/app/builder/components/polygon.py b/psychopy/app/builder/components/polygon.py --- a/psychopy/app/builder/components/polygon.py +++ b/psychopy/app/builder/components/polygon.py @@ -32,8 +32,8 @@ self.exp.requirePsychopyLibs(['visual']) self.order=['nVertices'] #params - self.params['nVertices']=Param(nVertices, valType='code', - updates='constant', allowedUpdates=['constant','set every repeat'], + self.params['nVertices']=Param(nVertices, valType='int', + updates='constant', allowedUpdates=['constant'], hint="How many vertices? 2=line, 3=triangle... (90 approximates a circle)", label="N Vertices") self.params['fillColorSpace']=Param(fillColorSpace, valType='str', allowedVals=['rgb','dkl','lms'], @@ -72,18 +72,19 @@ if self.params['units'].val=='from exp settings': unitsStr="" else: unitsStr="units=%(units)s, " %self.params inits = getInitVals(self.params)#replaces variable params with defaults - if int(self.params['nVertices'].val) == 2: + if self.params['nVertices'].val == '2': buff.writeIndented("%s = visual.Line(win=win, name='%s',%s\n" %(inits['name'],inits['name'],unitsStr)) buff.writeIndented(" start=(-%(size)s[0]/2.0, 0), end=(+%(size)s[0]/2.0, 0),\n" %(inits) ) - elif int(self.params['nVertices'].val) == 3: + elif self.params['nVertices'].val == '3': buff.writeIndented("%s = visual.ShapeStim(win=win, name='%s',%s\n" %(inits['name'],inits['name'],unitsStr)) buff.writeIndented(" vertices = [[-%(size)s[0]/2.0,-%(size)s[1]/2.0], [+%(size)s[0]/2.0,-%(size)s[1]/2.0], [0,%(size)s[1]/2.0]],\n" %(inits) ) - elif int(self.params['nVertices'].val) == 4: + elif self.params['nVertices'].val == '4': buff.writeIndented("%s = visual.Rect(win=win, name='%s',%s\n" %(inits['name'],inits['name'],unitsStr)) buff.writeIndented(" width=%(size)s[0], height=%(size)s[1],\n" %(inits) ) else: buff.writeIndented("%s = visual.Polygon(win=win, name='%s',%s\n" %(inits['name'],inits['name'],unitsStr)) - buff.writeIndented(" edges = %(nVertices)s, size=%(size)s,\n" %(inits) ) + buff.writeIndented(" edges = %s," % str(inits['nVertices'].val)) + buff.writeIndented(" size=%(size)s,\n" %(inits) ) buff.writeIndented(" ori=%(ori)s, pos=%(pos)s,\n" %(inits) ) buff.writeIndented(" lineWidth=%(lineWidth)s, lineColor=%(lineColor)s, lineColorSpace=%(lineColorSpace)s,\n" %(inits) ) buff.writeIndented(" fillColor=%(fillColor)s, fillColorSpace=%(fillColorSpace)s,\n" %(inits) )
{"golden_diff": "diff --git a/psychopy/app/builder/components/polygon.py b/psychopy/app/builder/components/polygon.py\n--- a/psychopy/app/builder/components/polygon.py\n+++ b/psychopy/app/builder/components/polygon.py\n@@ -32,8 +32,8 @@\n self.exp.requirePsychopyLibs(['visual'])\n self.order=['nVertices']\n #params\n- self.params['nVertices']=Param(nVertices, valType='code',\n- updates='constant', allowedUpdates=['constant','set every repeat'],\n+ self.params['nVertices']=Param(nVertices, valType='int',\n+ updates='constant', allowedUpdates=['constant'],\n hint=\"How many vertices? 2=line, 3=triangle... (90 approximates a circle)\",\n label=\"N Vertices\")\n self.params['fillColorSpace']=Param(fillColorSpace, valType='str', allowedVals=['rgb','dkl','lms'],\n@@ -72,18 +72,19 @@\n if self.params['units'].val=='from exp settings': unitsStr=\"\"\n else: unitsStr=\"units=%(units)s, \" %self.params\n inits = getInitVals(self.params)#replaces variable params with defaults\n- if int(self.params['nVertices'].val) == 2:\n+ if self.params['nVertices'].val == '2':\n buff.writeIndented(\"%s = visual.Line(win=win, name='%s',%s\\n\" %(inits['name'],inits['name'],unitsStr))\n buff.writeIndented(\" start=(-%(size)s[0]/2.0, 0), end=(+%(size)s[0]/2.0, 0),\\n\" %(inits) )\n- elif int(self.params['nVertices'].val) == 3:\n+ elif self.params['nVertices'].val == '3':\n buff.writeIndented(\"%s = visual.ShapeStim(win=win, name='%s',%s\\n\" %(inits['name'],inits['name'],unitsStr))\n buff.writeIndented(\" vertices = [[-%(size)s[0]/2.0,-%(size)s[1]/2.0], [+%(size)s[0]/2.0,-%(size)s[1]/2.0], [0,%(size)s[1]/2.0]],\\n\" %(inits) )\n- elif int(self.params['nVertices'].val) == 4:\n+ elif self.params['nVertices'].val == '4':\n buff.writeIndented(\"%s = visual.Rect(win=win, name='%s',%s\\n\" %(inits['name'],inits['name'],unitsStr))\n buff.writeIndented(\" width=%(size)s[0], height=%(size)s[1],\\n\" %(inits) )\n else:\n buff.writeIndented(\"%s = visual.Polygon(win=win, name='%s',%s\\n\" %(inits['name'],inits['name'],unitsStr))\n- buff.writeIndented(\" edges = %(nVertices)s, size=%(size)s,\\n\" %(inits) )\n+ buff.writeIndented(\" edges = %s,\" % str(inits['nVertices'].val))\n+ buff.writeIndented(\" size=%(size)s,\\n\" %(inits) )\n buff.writeIndented(\" ori=%(ori)s, pos=%(pos)s,\\n\" %(inits) )\n buff.writeIndented(\" lineWidth=%(lineWidth)s, lineColor=%(lineColor)s, lineColorSpace=%(lineColorSpace)s,\\n\" %(inits) )\n buff.writeIndented(\" fillColor=%(fillColor)s, fillColorSpace=%(fillColorSpace)s,\\n\" %(inits) )\n", "issue": "Invalid literal for int() when assigning variable to \"N Vertices\"\nI'm trying to tell psychopy to display a polygon with a random number of vertices on each presentation by using randint. I keep running into this error:\n\n`invalid literal for int() with base 10: 'randint(3, 4, 1)'`\n\nThis occurs before the script compiles, so it doesn't produce any output / traceback other than that. \n\nI've also tried this using $randint(3, 4, 1) but that doesn't work as well. In addition, my friend had this problem when specifying the number of angles via an excel file.\n\nWhen I just insert a number to the field, it works fine, so it seems like python is trying to interpret randint(3, 4, 1) literally, sees that it isn't an integer, and throws an error.\n\nVariable assignment:\n![image](https://f.cloud.github.com/assets/1839645/1603669/14c4adc4-53ae-11e3-9c09-4e562ed69e8c.png)\n\nError after clicking run:\n![image](https://f.cloud.github.com/assets/1839645/1603672/27c7b952-53ae-11e3-8a0a-4402f6da4fb7.png)\n\n", "before_files": [{"content": "# Part of the PsychoPy library\n# Copyright (C) 2013 Jonathan Peirce\n# Distributed under the terms of the GNU General Public License (GPL).\n\nfrom _visual import * #to get the template visual component\nfrom os import path\nfrom psychopy.app.builder.components import getInitVals\n\nthisFolder = path.abspath(path.dirname(__file__))#the absolute path to the folder containing this path\niconFile = path.join(thisFolder,'polygon.png')\ntooltip = 'Polygon: any regular polygon (line, triangle, square...circle)'\n\nclass PolygonComponent(VisualComponent):\n \"\"\"A class for presenting grating stimuli\"\"\"\n def __init__(self, exp, parentName, name='polygon', interpolate='linear',\n units='from exp settings',\n lineColor='$[1,1,1]', lineColorSpace='rgb', lineWidth=1,\n fillColor='$[1,1,1]', fillColorSpace='rgb',\n nVertices = 4,\n pos=[0,0], size=[0.5,0.5], ori=0,\n startType='time (s)', startVal=0.0,\n stopType='duration (s)', stopVal=1.0,\n startEstim='', durationEstim=''):\n #initialise main parameters from base stimulus\n VisualComponent.__init__(self,exp,parentName,name=name, units=units,\n pos=pos, size=size, ori=ori,\n startType=startType, startVal=startVal,\n stopType=stopType, stopVal=stopVal,\n startEstim=startEstim, durationEstim=durationEstim)\n self.type='Polygon'\n self.url=\"http://www.psychopy.org/builder/components/shape.html\"\n self.exp.requirePsychopyLibs(['visual'])\n self.order=['nVertices']\n #params\n self.params['nVertices']=Param(nVertices, valType='code',\n updates='constant', allowedUpdates=['constant','set every repeat'],\n hint=\"How many vertices? 2=line, 3=triangle... (90 approximates a circle)\",\n label=\"N Vertices\")\n self.params['fillColorSpace']=Param(fillColorSpace, valType='str', allowedVals=['rgb','dkl','lms'],\n updates='constant',\n hint=\"Choice of color space for the fill color (rgb, dkl, lms)\",\n label=\"Fill color space\")\n self.params['fillColor']=Param(fillColor, valType='str', allowedTypes=[],\n updates='constant', allowedUpdates=['constant','set every repeat','set every frame'],\n hint=\"Fill color of this shape; Right-click to bring up a color-picker (rgb only)\",\n label=\"Fill color\")\n self.params['lineColorSpace']=Param(lineColorSpace, valType='str', allowedVals=['rgb','dkl','lms'],\n updates='constant',\n hint=\"Choice of color space for the fill color (rgb, dkl, lms)\",\n label=\"Line color space\")\n self.params['lineColor']=Param(lineColor, valType='str', allowedTypes=[],\n updates='constant', allowedUpdates=['constant','set every repeat','set every frame'],\n hint=\"Line color of this shape; Right-click to bring up a color-picker (rgb only)\",\n label=\"Line color\")\n self.params['lineWidth']=Param(lineWidth, valType='code', allowedTypes=[],\n updates='constant', allowedUpdates=['constant','set every repeat','set every frame'],\n hint=\"Width of the shape's line (always in pixels - this does NOT use 'units')\",\n label=\"Line width\")\n self.params['interpolate']=Param(interpolate, valType='str', allowedVals=['linear','nearest'],\n updates='constant', allowedUpdates=[],\n hint=\"How should the image be interpolated if/when rescaled\",\n label=\"Interpolate\")\n self.params['size']=Param(size, valType='code', allowedTypes=[],\n updates='constant', allowedUpdates=['constant','set every repeat','set every frame'],\n hint=\"Size of this stimulus [w,h]. Note that for a line only the first value is used, for triangle and rect the [w,h] is as expected, but for higher-order polygons it represents the [w,h] of the ellipse that the polygon sits on!! \",\n label=\"Size [w,h]\")\n del self.params['color']\n del self.params['colorSpace']\n\n def writeInitCode(self,buff):\n #do we need units code?\n if self.params['units'].val=='from exp settings': unitsStr=\"\"\n else: unitsStr=\"units=%(units)s, \" %self.params\n inits = getInitVals(self.params)#replaces variable params with defaults\n if int(self.params['nVertices'].val) == 2:\n buff.writeIndented(\"%s = visual.Line(win=win, name='%s',%s\\n\" %(inits['name'],inits['name'],unitsStr))\n buff.writeIndented(\" start=(-%(size)s[0]/2.0, 0), end=(+%(size)s[0]/2.0, 0),\\n\" %(inits) )\n elif int(self.params['nVertices'].val) == 3:\n buff.writeIndented(\"%s = visual.ShapeStim(win=win, name='%s',%s\\n\" %(inits['name'],inits['name'],unitsStr))\n buff.writeIndented(\" vertices = [[-%(size)s[0]/2.0,-%(size)s[1]/2.0], [+%(size)s[0]/2.0,-%(size)s[1]/2.0], [0,%(size)s[1]/2.0]],\\n\" %(inits) )\n elif int(self.params['nVertices'].val) == 4:\n buff.writeIndented(\"%s = visual.Rect(win=win, name='%s',%s\\n\" %(inits['name'],inits['name'],unitsStr))\n buff.writeIndented(\" width=%(size)s[0], height=%(size)s[1],\\n\" %(inits) )\n else:\n buff.writeIndented(\"%s = visual.Polygon(win=win, name='%s',%s\\n\" %(inits['name'],inits['name'],unitsStr))\n buff.writeIndented(\" edges = %(nVertices)s, size=%(size)s,\\n\" %(inits) )\n buff.writeIndented(\" ori=%(ori)s, pos=%(pos)s,\\n\" %(inits) )\n buff.writeIndented(\" lineWidth=%(lineWidth)s, lineColor=%(lineColor)s, lineColorSpace=%(lineColorSpace)s,\\n\" %(inits) )\n buff.writeIndented(\" fillColor=%(fillColor)s, fillColorSpace=%(fillColorSpace)s,\\n\" %(inits) )\n buff.writeIndented(\" opacity=%(opacity)s,\" %(inits) )\n if self.params['interpolate'].val=='linear':\n buff.write(\"interpolate=True)\\n\")\n else: buff.write(\"interpolate=False)\\n\")\n", "path": "psychopy/app/builder/components/polygon.py"}], "after_files": [{"content": "# Part of the PsychoPy library\n# Copyright (C) 2013 Jonathan Peirce\n# Distributed under the terms of the GNU General Public License (GPL).\n\nfrom _visual import * #to get the template visual component\nfrom os import path\nfrom psychopy.app.builder.components import getInitVals\n\nthisFolder = path.abspath(path.dirname(__file__))#the absolute path to the folder containing this path\niconFile = path.join(thisFolder,'polygon.png')\ntooltip = 'Polygon: any regular polygon (line, triangle, square...circle)'\n\nclass PolygonComponent(VisualComponent):\n \"\"\"A class for presenting grating stimuli\"\"\"\n def __init__(self, exp, parentName, name='polygon', interpolate='linear',\n units='from exp settings',\n lineColor='$[1,1,1]', lineColorSpace='rgb', lineWidth=1,\n fillColor='$[1,1,1]', fillColorSpace='rgb',\n nVertices = 4,\n pos=[0,0], size=[0.5,0.5], ori=0,\n startType='time (s)', startVal=0.0,\n stopType='duration (s)', stopVal=1.0,\n startEstim='', durationEstim=''):\n #initialise main parameters from base stimulus\n VisualComponent.__init__(self,exp,parentName,name=name, units=units,\n pos=pos, size=size, ori=ori,\n startType=startType, startVal=startVal,\n stopType=stopType, stopVal=stopVal,\n startEstim=startEstim, durationEstim=durationEstim)\n self.type='Polygon'\n self.url=\"http://www.psychopy.org/builder/components/shape.html\"\n self.exp.requirePsychopyLibs(['visual'])\n self.order=['nVertices']\n #params\n self.params['nVertices']=Param(nVertices, valType='int',\n updates='constant', allowedUpdates=['constant'],\n hint=\"How many vertices? 2=line, 3=triangle... (90 approximates a circle)\",\n label=\"N Vertices\")\n self.params['fillColorSpace']=Param(fillColorSpace, valType='str', allowedVals=['rgb','dkl','lms'],\n updates='constant',\n hint=\"Choice of color space for the fill color (rgb, dkl, lms)\",\n label=\"Fill color space\")\n self.params['fillColor']=Param(fillColor, valType='str', allowedTypes=[],\n updates='constant', allowedUpdates=['constant','set every repeat','set every frame'],\n hint=\"Fill color of this shape; Right-click to bring up a color-picker (rgb only)\",\n label=\"Fill color\")\n self.params['lineColorSpace']=Param(lineColorSpace, valType='str', allowedVals=['rgb','dkl','lms'],\n updates='constant',\n hint=\"Choice of color space for the fill color (rgb, dkl, lms)\",\n label=\"Line color space\")\n self.params['lineColor']=Param(lineColor, valType='str', allowedTypes=[],\n updates='constant', allowedUpdates=['constant','set every repeat','set every frame'],\n hint=\"Line color of this shape; Right-click to bring up a color-picker (rgb only)\",\n label=\"Line color\")\n self.params['lineWidth']=Param(lineWidth, valType='code', allowedTypes=[],\n updates='constant', allowedUpdates=['constant','set every repeat','set every frame'],\n hint=\"Width of the shape's line (always in pixels - this does NOT use 'units')\",\n label=\"Line width\")\n self.params['interpolate']=Param(interpolate, valType='str', allowedVals=['linear','nearest'],\n updates='constant', allowedUpdates=[],\n hint=\"How should the image be interpolated if/when rescaled\",\n label=\"Interpolate\")\n self.params['size']=Param(size, valType='code', allowedTypes=[],\n updates='constant', allowedUpdates=['constant','set every repeat','set every frame'],\n hint=\"Size of this stimulus [w,h]. Note that for a line only the first value is used, for triangle and rect the [w,h] is as expected, but for higher-order polygons it represents the [w,h] of the ellipse that the polygon sits on!! \",\n label=\"Size [w,h]\")\n del self.params['color']\n del self.params['colorSpace']\n\n def writeInitCode(self,buff):\n #do we need units code?\n if self.params['units'].val=='from exp settings': unitsStr=\"\"\n else: unitsStr=\"units=%(units)s, \" %self.params\n inits = getInitVals(self.params)#replaces variable params with defaults\n if self.params['nVertices'].val == '2':\n buff.writeIndented(\"%s = visual.Line(win=win, name='%s',%s\\n\" %(inits['name'],inits['name'],unitsStr))\n buff.writeIndented(\" start=(-%(size)s[0]/2.0, 0), end=(+%(size)s[0]/2.0, 0),\\n\" %(inits) )\n elif self.params['nVertices'].val == '3':\n buff.writeIndented(\"%s = visual.ShapeStim(win=win, name='%s',%s\\n\" %(inits['name'],inits['name'],unitsStr))\n buff.writeIndented(\" vertices = [[-%(size)s[0]/2.0,-%(size)s[1]/2.0], [+%(size)s[0]/2.0,-%(size)s[1]/2.0], [0,%(size)s[1]/2.0]],\\n\" %(inits) )\n elif self.params['nVertices'].val == '4':\n buff.writeIndented(\"%s = visual.Rect(win=win, name='%s',%s\\n\" %(inits['name'],inits['name'],unitsStr))\n buff.writeIndented(\" width=%(size)s[0], height=%(size)s[1],\\n\" %(inits) )\n else:\n buff.writeIndented(\"%s = visual.Polygon(win=win, name='%s',%s\\n\" %(inits['name'],inits['name'],unitsStr))\n buff.writeIndented(\" edges = %s,\" % str(inits['nVertices'].val))\n buff.writeIndented(\" size=%(size)s,\\n\" %(inits) )\n buff.writeIndented(\" ori=%(ori)s, pos=%(pos)s,\\n\" %(inits) )\n buff.writeIndented(\" lineWidth=%(lineWidth)s, lineColor=%(lineColor)s, lineColorSpace=%(lineColorSpace)s,\\n\" %(inits) )\n buff.writeIndented(\" fillColor=%(fillColor)s, fillColorSpace=%(fillColorSpace)s,\\n\" %(inits) )\n buff.writeIndented(\" opacity=%(opacity)s,\" %(inits) )\n if self.params['interpolate'].val=='linear':\n buff.write(\"interpolate=True)\\n\")\n else: buff.write(\"interpolate=False)\\n\")\n", "path": "psychopy/app/builder/components/polygon.py"}]}
2,274
813
gh_patches_debug_17636
rasdani/github-patches
git_diff
svthalia__concrexit-3528
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Albums pagination doesn't maintain search terms ### Describe the bug If you open https://thalia.nu/members/photos/?keywords=borrel#photos-albums, then go to the second page using the pagination buttons, the search term is dropped. ### Expected behaviour <!-- A clear and concise description of what you expected to happen. --> The search term remains ### Additional context <!-- Add any other context about the problem here. --> Could be since we introduced the shared paginated view template? So it's quite likely this occurs for other paginated filterable/searchable views as well. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `website/thaliawebsite/views.py` Content: ``` 1 """General views for the website.""" 2 3 from django.contrib.admin.views.decorators import staff_member_required 4 from django.contrib.auth.views import LoginView, PasswordResetView 5 from django.core.exceptions import PermissionDenied 6 from django.http import HttpResponse, HttpResponseForbidden 7 from django.shortcuts import redirect 8 from django.utils.decorators import method_decorator 9 from django.views.generic import ListView, TemplateView 10 from django.views.generic.base import View 11 12 from django_ratelimit.decorators import ratelimit 13 14 15 class IndexView(TemplateView): 16 template_name = "index.html" 17 18 19 @method_decorator(staff_member_required, "dispatch") 20 class TestCrashView(View): 21 """Test view to intentionally crash to test the error handling.""" 22 23 def dispatch(self, request, *args, **kwargs) -> HttpResponse: 24 if not request.user.is_superuser: 25 return HttpResponseForbidden("This is not for you") 26 raise Exception("Test exception") 27 28 29 class PagedView(ListView): 30 """A ListView with automatic pagination.""" 31 32 def get_context_data(self, **kwargs) -> dict: 33 context = super().get_context_data(**kwargs) 34 page = context["page_obj"].number 35 paginator = context["paginator"] 36 37 # Show the two pages before and after the current page 38 page_range_start = max(1, page - 2) 39 page_range_stop = min(page + 3, paginator.num_pages + 1) 40 41 # Add extra pages if we show less than 5 pages 42 page_range_start = min(page_range_start, page_range_stop - 5) 43 page_range_start = max(1, page_range_start) 44 45 # Add extra pages if we still show less than 5 pages 46 page_range_stop = max(page_range_stop, page_range_start + 5) 47 page_range_stop = min(page_range_stop, paginator.num_pages + 1) 48 49 page_range = range(page_range_start, page_range_stop) 50 51 context.update( 52 { 53 "page_range": page_range, 54 } 55 ) 56 57 return context 58 59 60 class RateLimitedPasswordResetView(PasswordResetView): 61 @method_decorator(ratelimit(key="ip", rate="5/h")) 62 def post(self, request, *args, **kwargs): 63 return super().post(request, *args, **kwargs) 64 65 66 class RateLimitedLoginView(LoginView): 67 @method_decorator(ratelimit(key="ip", rate="30/h")) 68 @method_decorator(ratelimit(key="post:username", rate="30/h")) 69 def post(self, request, *args, **kwargs): 70 return super().post(request, *args, **kwargs) 71 72 73 def rate_limited_view(request, *args, **kwargs): 74 return HttpResponse("You are rate limited", status=429) 75 76 77 def admin_unauthorized_view(request): 78 if not request.member: 79 url = "/user/login" 80 args = request.META.get("QUERY_STRING", "") 81 if args: 82 url = f"{url}?{args}" 83 return redirect(url) 84 elif not request.member.is_staff and not request.member.is_superuser: 85 raise PermissionDenied("You are not allowed to access the administration page.") 86 else: 87 return redirect(request.GET.get("next", "/")) 88 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/website/thaliawebsite/views.py b/website/thaliawebsite/views.py --- a/website/thaliawebsite/views.py +++ b/website/thaliawebsite/views.py @@ -31,6 +31,7 @@ def get_context_data(self, **kwargs) -> dict: context = super().get_context_data(**kwargs) + print(kwargs) page = context["page_obj"].number paginator = context["paginator"] @@ -48,9 +49,17 @@ page_range = range(page_range_start, page_range_stop) + querydict = self.request.GET.copy() + + if "page" in querydict: + del querydict["page"] + context.update( { "page_range": page_range, + "base_url": f"{self.request.path}?{querydict.urlencode()}&" + if querydict + else f"{self.request.path}?", } )
{"golden_diff": "diff --git a/website/thaliawebsite/views.py b/website/thaliawebsite/views.py\n--- a/website/thaliawebsite/views.py\n+++ b/website/thaliawebsite/views.py\n@@ -31,6 +31,7 @@\n \n def get_context_data(self, **kwargs) -> dict:\n context = super().get_context_data(**kwargs)\n+ print(kwargs)\n page = context[\"page_obj\"].number\n paginator = context[\"paginator\"]\n \n@@ -48,9 +49,17 @@\n \n page_range = range(page_range_start, page_range_stop)\n \n+ querydict = self.request.GET.copy()\n+\n+ if \"page\" in querydict:\n+ del querydict[\"page\"]\n+\n context.update(\n {\n \"page_range\": page_range,\n+ \"base_url\": f\"{self.request.path}?{querydict.urlencode()}&\"\n+ if querydict\n+ else f\"{self.request.path}?\",\n }\n )\n", "issue": "Albums pagination doesn't maintain search terms\n### Describe the bug\r\nIf you open https://thalia.nu/members/photos/?keywords=borrel#photos-albums, then go to the second page using the pagination buttons, the search term is dropped.\r\n\r\n### Expected behaviour\r\n<!-- A clear and concise description of what you expected to happen. -->\r\nThe search term remains\r\n\r\n### Additional context\r\n<!-- Add any other context about the problem here. -->\r\nCould be since we introduced the shared paginated view template? So it's quite likely this occurs for other paginated filterable/searchable views as well.\n", "before_files": [{"content": "\"\"\"General views for the website.\"\"\"\n\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.contrib.auth.views import LoginView, PasswordResetView\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import HttpResponse, HttpResponseForbidden\nfrom django.shortcuts import redirect\nfrom django.utils.decorators import method_decorator\nfrom django.views.generic import ListView, TemplateView\nfrom django.views.generic.base import View\n\nfrom django_ratelimit.decorators import ratelimit\n\n\nclass IndexView(TemplateView):\n template_name = \"index.html\"\n\n\n@method_decorator(staff_member_required, \"dispatch\")\nclass TestCrashView(View):\n \"\"\"Test view to intentionally crash to test the error handling.\"\"\"\n\n def dispatch(self, request, *args, **kwargs) -> HttpResponse:\n if not request.user.is_superuser:\n return HttpResponseForbidden(\"This is not for you\")\n raise Exception(\"Test exception\")\n\n\nclass PagedView(ListView):\n \"\"\"A ListView with automatic pagination.\"\"\"\n\n def get_context_data(self, **kwargs) -> dict:\n context = super().get_context_data(**kwargs)\n page = context[\"page_obj\"].number\n paginator = context[\"paginator\"]\n\n # Show the two pages before and after the current page\n page_range_start = max(1, page - 2)\n page_range_stop = min(page + 3, paginator.num_pages + 1)\n\n # Add extra pages if we show less than 5 pages\n page_range_start = min(page_range_start, page_range_stop - 5)\n page_range_start = max(1, page_range_start)\n\n # Add extra pages if we still show less than 5 pages\n page_range_stop = max(page_range_stop, page_range_start + 5)\n page_range_stop = min(page_range_stop, paginator.num_pages + 1)\n\n page_range = range(page_range_start, page_range_stop)\n\n context.update(\n {\n \"page_range\": page_range,\n }\n )\n\n return context\n\n\nclass RateLimitedPasswordResetView(PasswordResetView):\n @method_decorator(ratelimit(key=\"ip\", rate=\"5/h\"))\n def post(self, request, *args, **kwargs):\n return super().post(request, *args, **kwargs)\n\n\nclass RateLimitedLoginView(LoginView):\n @method_decorator(ratelimit(key=\"ip\", rate=\"30/h\"))\n @method_decorator(ratelimit(key=\"post:username\", rate=\"30/h\"))\n def post(self, request, *args, **kwargs):\n return super().post(request, *args, **kwargs)\n\n\ndef rate_limited_view(request, *args, **kwargs):\n return HttpResponse(\"You are rate limited\", status=429)\n\n\ndef admin_unauthorized_view(request):\n if not request.member:\n url = \"/user/login\"\n args = request.META.get(\"QUERY_STRING\", \"\")\n if args:\n url = f\"{url}?{args}\"\n return redirect(url)\n elif not request.member.is_staff and not request.member.is_superuser:\n raise PermissionDenied(\"You are not allowed to access the administration page.\")\n else:\n return redirect(request.GET.get(\"next\", \"/\"))\n", "path": "website/thaliawebsite/views.py"}], "after_files": [{"content": "\"\"\"General views for the website.\"\"\"\n\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.contrib.auth.views import LoginView, PasswordResetView\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import HttpResponse, HttpResponseForbidden\nfrom django.shortcuts import redirect\nfrom django.utils.decorators import method_decorator\nfrom django.views.generic import ListView, TemplateView\nfrom django.views.generic.base import View\n\nfrom django_ratelimit.decorators import ratelimit\n\n\nclass IndexView(TemplateView):\n template_name = \"index.html\"\n\n\n@method_decorator(staff_member_required, \"dispatch\")\nclass TestCrashView(View):\n \"\"\"Test view to intentionally crash to test the error handling.\"\"\"\n\n def dispatch(self, request, *args, **kwargs) -> HttpResponse:\n if not request.user.is_superuser:\n return HttpResponseForbidden(\"This is not for you\")\n raise Exception(\"Test exception\")\n\n\nclass PagedView(ListView):\n \"\"\"A ListView with automatic pagination.\"\"\"\n\n def get_context_data(self, **kwargs) -> dict:\n context = super().get_context_data(**kwargs)\n print(kwargs)\n page = context[\"page_obj\"].number\n paginator = context[\"paginator\"]\n\n # Show the two pages before and after the current page\n page_range_start = max(1, page - 2)\n page_range_stop = min(page + 3, paginator.num_pages + 1)\n\n # Add extra pages if we show less than 5 pages\n page_range_start = min(page_range_start, page_range_stop - 5)\n page_range_start = max(1, page_range_start)\n\n # Add extra pages if we still show less than 5 pages\n page_range_stop = max(page_range_stop, page_range_start + 5)\n page_range_stop = min(page_range_stop, paginator.num_pages + 1)\n\n page_range = range(page_range_start, page_range_stop)\n\n querydict = self.request.GET.copy()\n\n if \"page\" in querydict:\n del querydict[\"page\"]\n\n context.update(\n {\n \"page_range\": page_range,\n \"base_url\": f\"{self.request.path}?{querydict.urlencode()}&\"\n if querydict\n else f\"{self.request.path}?\",\n }\n )\n\n return context\n\n\nclass RateLimitedPasswordResetView(PasswordResetView):\n @method_decorator(ratelimit(key=\"ip\", rate=\"5/h\"))\n def post(self, request, *args, **kwargs):\n return super().post(request, *args, **kwargs)\n\n\nclass RateLimitedLoginView(LoginView):\n @method_decorator(ratelimit(key=\"ip\", rate=\"30/h\"))\n @method_decorator(ratelimit(key=\"post:username\", rate=\"30/h\"))\n def post(self, request, *args, **kwargs):\n return super().post(request, *args, **kwargs)\n\n\ndef rate_limited_view(request, *args, **kwargs):\n return HttpResponse(\"You are rate limited\", status=429)\n\n\ndef admin_unauthorized_view(request):\n if not request.member:\n url = \"/user/login\"\n args = request.META.get(\"QUERY_STRING\", \"\")\n if args:\n url = f\"{url}?{args}\"\n return redirect(url)\n elif not request.member.is_staff and not request.member.is_superuser:\n raise PermissionDenied(\"You are not allowed to access the administration page.\")\n else:\n return redirect(request.GET.get(\"next\", \"/\"))\n", "path": "website/thaliawebsite/views.py"}]}
1,228
216
gh_patches_debug_23410
rasdani/github-patches
git_diff
OCA__bank-payment-630
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [12.0][BUG] account_payment_sale Hi I have found a bug in module account_payment_sale, but I am not sure how to fix it nicely. The payment_mode_id does not propagate from the sale order to the invoice. I guess the tests are a bit to naive, that is why they pass anyway. Here we try to propagate the payment mode : https://github.com/OCA/bank-payment/blob/12.0/account_payment_sale/models/sale_order.py#L35 Here, the invoice is created with the right value (coming from the SO) : https://github.com/OCA/OCB/blob/12.0/addons/sale/models/sale.py#L521 And it is overriden here https://github.com/OCA/OCB/blob/12.0/addons/sale/models/sale.py#L570 I really don't get why they have refactored it this way, they create the invoice and then they override a lot of values... And I do not really see a clean solution to solve this. Any idea? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `account_payment_sale/models/sale_order.py` Content: ``` 1 # Copyright 2014-2016 Akretion - Alexis de Lattre 2 # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl). 3 4 from odoo import models, fields, api 5 6 7 class SaleOrder(models.Model): 8 _inherit = "sale.order" 9 10 payment_mode_id = fields.Many2one( 11 'account.payment.mode', string='Payment Mode', 12 domain=[('payment_type', '=', 'inbound')]) 13 14 def _get_payment_mode_vals(self, vals): 15 if self.payment_mode_id: 16 vals['payment_mode_id'] = self.payment_mode_id.id 17 if self.payment_mode_id.bank_account_link == 'fixed': 18 vals['partner_bank_id'] =\ 19 self.payment_mode_id.fixed_journal_id.bank_account_id.id 20 return vals 21 22 @api.onchange('partner_id') 23 def onchange_partner_id(self): 24 res = super().onchange_partner_id() 25 if self.partner_id: 26 self.payment_mode_id = self.partner_id.customer_payment_mode_id 27 else: 28 self.payment_mode_id = False 29 return res 30 31 @api.multi 32 def _prepare_invoice(self): 33 """Copy bank partner from sale order to invoice""" 34 vals = super()._prepare_invoice() 35 return self._get_payment_mode_vals(vals) 36 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/account_payment_sale/models/sale_order.py b/account_payment_sale/models/sale_order.py --- a/account_payment_sale/models/sale_order.py +++ b/account_payment_sale/models/sale_order.py @@ -33,3 +33,31 @@ """Copy bank partner from sale order to invoice""" vals = super()._prepare_invoice() return self._get_payment_mode_vals(vals) + + def _finalize_invoices(self, invoices, references): + """ + Invoked after creating invoices at the end of action_invoice_create. + + We must override this method since the onchange on partner is called by + the base method and therefore will change the specific payment_mode set + on the SO if one is defined on the partner.. + + :param invoices: {group_key: invoice} + :param references: {invoice: order} + """ + payment_vals_by_invoice = {} + for invoice in invoices.values(): + payment_vals_by_invoice[invoice] = { + 'payment_mode_id': invoice.payment_mode_id.id, + 'partner_bank_id': invoice.partner_bank_id.id + } + res = super()._finalize_invoices(invoices, references) + for invoice in invoices.values(): + payment_vals = payment_vals_by_invoice[invoice] + if invoice.payment_mode_id.id == payment_vals['payment_mode_id']: + payment_vals.pop("payment_mode_id") + if invoice.partner_bank_id.id == payment_vals["partner_bank_id"]: + payment_vals.pop("partner_bank_id") + if payment_vals: + invoice.write(payment_vals) + return res
{"golden_diff": "diff --git a/account_payment_sale/models/sale_order.py b/account_payment_sale/models/sale_order.py\n--- a/account_payment_sale/models/sale_order.py\n+++ b/account_payment_sale/models/sale_order.py\n@@ -33,3 +33,31 @@\n \"\"\"Copy bank partner from sale order to invoice\"\"\"\n vals = super()._prepare_invoice()\n return self._get_payment_mode_vals(vals)\n+\n+ def _finalize_invoices(self, invoices, references):\n+ \"\"\"\n+ Invoked after creating invoices at the end of action_invoice_create.\n+\n+ We must override this method since the onchange on partner is called by\n+ the base method and therefore will change the specific payment_mode set\n+ on the SO if one is defined on the partner..\n+\n+ :param invoices: {group_key: invoice}\n+ :param references: {invoice: order}\n+ \"\"\"\n+ payment_vals_by_invoice = {}\n+ for invoice in invoices.values():\n+ payment_vals_by_invoice[invoice] = {\n+ 'payment_mode_id': invoice.payment_mode_id.id,\n+ 'partner_bank_id': invoice.partner_bank_id.id\n+ }\n+ res = super()._finalize_invoices(invoices, references)\n+ for invoice in invoices.values():\n+ payment_vals = payment_vals_by_invoice[invoice]\n+ if invoice.payment_mode_id.id == payment_vals['payment_mode_id']:\n+ payment_vals.pop(\"payment_mode_id\")\n+ if invoice.partner_bank_id.id == payment_vals[\"partner_bank_id\"]:\n+ payment_vals.pop(\"partner_bank_id\")\n+ if payment_vals:\n+ invoice.write(payment_vals)\n+ return res\n", "issue": "[12.0][BUG] account_payment_sale\nHi\r\nI have found a bug in module account_payment_sale, but I am not sure how to fix it nicely.\r\nThe payment_mode_id does not propagate from the sale order to the invoice. \r\nI guess the tests are a bit to naive, that is why they pass anyway.\r\nHere we try to propagate the payment mode : https://github.com/OCA/bank-payment/blob/12.0/account_payment_sale/models/sale_order.py#L35\r\nHere, the invoice is created with the right value (coming from the SO) : https://github.com/OCA/OCB/blob/12.0/addons/sale/models/sale.py#L521\r\nAnd it is overriden here https://github.com/OCA/OCB/blob/12.0/addons/sale/models/sale.py#L570\r\n\r\nI really don't get why they have refactored it this way, they create the invoice and then they override a lot of values...\r\nAnd I do not really see a clean solution to solve this.\r\nAny idea?\n", "before_files": [{"content": "# Copyright 2014-2016 Akretion - Alexis de Lattre\n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).\n\nfrom odoo import models, fields, api\n\n\nclass SaleOrder(models.Model):\n _inherit = \"sale.order\"\n\n payment_mode_id = fields.Many2one(\n 'account.payment.mode', string='Payment Mode',\n domain=[('payment_type', '=', 'inbound')])\n\n def _get_payment_mode_vals(self, vals):\n if self.payment_mode_id:\n vals['payment_mode_id'] = self.payment_mode_id.id\n if self.payment_mode_id.bank_account_link == 'fixed':\n vals['partner_bank_id'] =\\\n self.payment_mode_id.fixed_journal_id.bank_account_id.id\n return vals\n\n @api.onchange('partner_id')\n def onchange_partner_id(self):\n res = super().onchange_partner_id()\n if self.partner_id:\n self.payment_mode_id = self.partner_id.customer_payment_mode_id\n else:\n self.payment_mode_id = False\n return res\n\n @api.multi\n def _prepare_invoice(self):\n \"\"\"Copy bank partner from sale order to invoice\"\"\"\n vals = super()._prepare_invoice()\n return self._get_payment_mode_vals(vals)\n", "path": "account_payment_sale/models/sale_order.py"}], "after_files": [{"content": "# Copyright 2014-2016 Akretion - Alexis de Lattre\n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).\n\nfrom odoo import models, fields, api\n\n\nclass SaleOrder(models.Model):\n _inherit = \"sale.order\"\n\n payment_mode_id = fields.Many2one(\n 'account.payment.mode', string='Payment Mode',\n domain=[('payment_type', '=', 'inbound')])\n\n def _get_payment_mode_vals(self, vals):\n if self.payment_mode_id:\n vals['payment_mode_id'] = self.payment_mode_id.id\n if self.payment_mode_id.bank_account_link == 'fixed':\n vals['partner_bank_id'] =\\\n self.payment_mode_id.fixed_journal_id.bank_account_id.id\n return vals\n\n @api.onchange('partner_id')\n def onchange_partner_id(self):\n res = super().onchange_partner_id()\n if self.partner_id:\n self.payment_mode_id = self.partner_id.customer_payment_mode_id\n else:\n self.payment_mode_id = False\n return res\n\n @api.multi\n def _prepare_invoice(self):\n \"\"\"Copy bank partner from sale order to invoice\"\"\"\n vals = super()._prepare_invoice()\n return self._get_payment_mode_vals(vals)\n\n def _finalize_invoices(self, invoices, references):\n \"\"\"\n Invoked after creating invoices at the end of action_invoice_create.\n\n We must override this method since the onchange on partner is called by\n the base method and therefore will change the specific payment_mode set\n on the SO if one is defined on the partner..\n\n :param invoices: {group_key: invoice}\n :param references: {invoice: order}\n \"\"\"\n payment_vals_by_invoice = {}\n for invoice in invoices.values():\n payment_vals_by_invoice[invoice] = {\n 'payment_mode_id': invoice.payment_mode_id.id,\n 'partner_bank_id': invoice.partner_bank_id.id\n }\n res = super()._finalize_invoices(invoices, references)\n for invoice in invoices.values():\n payment_vals = payment_vals_by_invoice[invoice]\n if invoice.payment_mode_id.id == payment_vals['payment_mode_id']:\n payment_vals.pop(\"payment_mode_id\")\n if invoice.partner_bank_id.id == payment_vals[\"partner_bank_id\"]:\n payment_vals.pop(\"partner_bank_id\")\n if payment_vals:\n invoice.write(payment_vals)\n return res\n", "path": "account_payment_sale/models/sale_order.py"}]}
836
350
gh_patches_debug_15077
rasdani/github-patches
git_diff
microsoft__botbuilder-python-1748
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- botbuilder-core library is missing the botframework-streaming dependency ## Version 4.14.0.20210616.dev252366 ## Describe the bug The botbuilder-core library is missing the botframework-streaming dependency. When running a python bot with the botbuilder-core library installed, it won't run because it is missing the botframework-streaming dependency. The dependency reference is missing from the requirements.txt file, and this new library is not published in any of the regular packages indexes ([test.pypi](https://test.pypi.org/), [pypi](https://pypi.org/) and [azure artifacts](https://dev.azure.com/ConversationalAI/BotFramework/_packaging?_a=feed&feed=SDK%40Local)), so it can't be installed manually. When running the bots locally it is possible to install the dependency from a local folder with the code cloned from the repo. ## To Reproduce 1. Open a bot that uses the botbuilder-core library. 2. Install a preview version (4.14.x). 3. Run the bot. ## Expected behavior The dependencies being installed should install all the required sub-dependencies or have them available for manual installation. ## Screenshots ![image](https://user-images.githubusercontent.com/38112957/122459875-d79f2d80-cf87-11eb-93dd-d6aaf5128fbd.png) ## Additional context This issue is blocking the pipelines from the [BotFramework-FunctionalTests](https://github.com/microsoft/BotFramework-FunctionalTests/) repository from testing preview versions of the BotBuilder Python libraries. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `libraries/botframework-streaming/setup.py` Content: ``` 1 # Copyright (c) Microsoft Corporation. All rights reserved. 2 # Licensed under the MIT License. 3 4 import os 5 from setuptools import setup 6 7 VERSION = os.environ["packageVersion"] if "packageVersion" in os.environ else "4.12.0" 8 REQUIRES = [ 9 "botbuilder-schema>=4.12.0", 10 "botframework-connector>=4.12.0", 11 "botbuilder-core>=4.12.0", 12 ] 13 14 root = os.path.abspath(os.path.dirname(__file__)) 15 16 with open(os.path.join(root, "botframework", "streaming", "about.py")) as f: 17 package_info = {} 18 info = f.read() 19 exec(info, package_info) 20 21 with open(os.path.join(root, "README.rst"), encoding="utf-8") as f: 22 long_description = f.read() 23 24 setup( 25 name=package_info["__title__"], 26 version=package_info["__version__"], 27 url=package_info["__uri__"], 28 author=package_info["__author__"], 29 description=package_info["__description__"], 30 keywords=["BotFrameworkStreaming", "bots", "ai", "botframework", "botframework",], 31 long_description=long_description, 32 long_description_content_type="text/x-rst", 33 license=package_info["__license__"], 34 packages=[ 35 "botframework.streaming", 36 "botframework.streaming.payloads", 37 "botframework.streaming.payloads.models", 38 "botframework.streaming.payload_transport", 39 "botframework.streaming.transport", 40 "botframework.streaming.transport.web_socket", 41 ], 42 install_requires=REQUIRES, 43 classifiers=[ 44 "Programming Language :: Python :: 3.7", 45 "Intended Audience :: Developers", 46 "License :: OSI Approved :: MIT License", 47 "Operating System :: OS Independent", 48 "Development Status :: 5 - Production/Stable", 49 "Topic :: Scientific/Engineering :: Artificial Intelligence", 50 ], 51 ) 52 ``` Path: `libraries/botbuilder-core/setup.py` Content: ``` 1 # Copyright (c) Microsoft Corporation. All rights reserved. 2 # Licensed under the MIT License. 3 4 import os 5 from setuptools import setup 6 7 VERSION = os.environ["packageVersion"] if "packageVersion" in os.environ else "4.14.0" 8 REQUIRES = [ 9 "botbuilder-schema==4.14.0", 10 "botframework-connector==4.14.0", 11 "jsonpickle>=1.2,<1.5", 12 ] 13 14 root = os.path.abspath(os.path.dirname(__file__)) 15 16 with open(os.path.join(root, "botbuilder", "core", "about.py")) as f: 17 package_info = {} 18 info = f.read() 19 exec(info, package_info) 20 21 with open(os.path.join(root, "README.rst"), encoding="utf-8") as f: 22 long_description = f.read() 23 24 setup( 25 name=package_info["__title__"], 26 version=package_info["__version__"], 27 url=package_info["__uri__"], 28 author=package_info["__author__"], 29 description=package_info["__description__"], 30 keywords=["BotBuilderCore", "bots", "ai", "botframework", "botbuilder"], 31 long_description=long_description, 32 long_description_content_type="text/x-rst", 33 license=package_info["__license__"], 34 packages=[ 35 "botbuilder.core", 36 "botbuilder.core.adapters", 37 "botbuilder.core.inspection", 38 "botbuilder.core.integration", 39 "botbuilder.core.skills", 40 "botbuilder.core.streaming", 41 "botbuilder.core.teams", 42 "botbuilder.core.oauth", 43 ], 44 install_requires=REQUIRES, 45 classifiers=[ 46 "Programming Language :: Python :: 3.7", 47 "Intended Audience :: Developers", 48 "License :: OSI Approved :: MIT License", 49 "Operating System :: OS Independent", 50 "Development Status :: 5 - Production/Stable", 51 "Topic :: Scientific/Engineering :: Artificial Intelligence", 52 ], 53 ) 54 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/libraries/botbuilder-core/setup.py b/libraries/botbuilder-core/setup.py --- a/libraries/botbuilder-core/setup.py +++ b/libraries/botbuilder-core/setup.py @@ -8,6 +8,7 @@ REQUIRES = [ "botbuilder-schema==4.14.0", "botframework-connector==4.14.0", + "botframework-streaming==4.14.0", "jsonpickle>=1.2,<1.5", ] diff --git a/libraries/botframework-streaming/setup.py b/libraries/botframework-streaming/setup.py --- a/libraries/botframework-streaming/setup.py +++ b/libraries/botframework-streaming/setup.py @@ -4,11 +4,10 @@ import os from setuptools import setup -VERSION = os.environ["packageVersion"] if "packageVersion" in os.environ else "4.12.0" +VERSION = os.environ["packageVersion"] if "packageVersion" in os.environ else "4.14.0" REQUIRES = [ "botbuilder-schema>=4.12.0", "botframework-connector>=4.12.0", - "botbuilder-core>=4.12.0", ] root = os.path.abspath(os.path.dirname(__file__))
{"golden_diff": "diff --git a/libraries/botbuilder-core/setup.py b/libraries/botbuilder-core/setup.py\n--- a/libraries/botbuilder-core/setup.py\n+++ b/libraries/botbuilder-core/setup.py\n@@ -8,6 +8,7 @@\n REQUIRES = [\n \"botbuilder-schema==4.14.0\",\n \"botframework-connector==4.14.0\",\n+ \"botframework-streaming==4.14.0\",\n \"jsonpickle>=1.2,<1.5\",\n ]\n \ndiff --git a/libraries/botframework-streaming/setup.py b/libraries/botframework-streaming/setup.py\n--- a/libraries/botframework-streaming/setup.py\n+++ b/libraries/botframework-streaming/setup.py\n@@ -4,11 +4,10 @@\n import os\n from setuptools import setup\n \n-VERSION = os.environ[\"packageVersion\"] if \"packageVersion\" in os.environ else \"4.12.0\"\n+VERSION = os.environ[\"packageVersion\"] if \"packageVersion\" in os.environ else \"4.14.0\"\n REQUIRES = [\n \"botbuilder-schema>=4.12.0\",\n \"botframework-connector>=4.12.0\",\n- \"botbuilder-core>=4.12.0\",\n ]\n \n root = os.path.abspath(os.path.dirname(__file__))\n", "issue": "botbuilder-core library is missing the botframework-streaming dependency\n## Version\r\n4.14.0.20210616.dev252366\r\n\r\n## Describe the bug\r\nThe botbuilder-core library is missing the botframework-streaming dependency.\r\nWhen running a python bot with the botbuilder-core library installed, it won't run because it is missing the botframework-streaming dependency.\r\nThe dependency reference is missing from the requirements.txt file, and this new library is not published in any of the regular packages indexes ([test.pypi](https://test.pypi.org/), [pypi](https://pypi.org/) and [azure artifacts](https://dev.azure.com/ConversationalAI/BotFramework/_packaging?_a=feed&feed=SDK%40Local)), so it can't be installed manually.\r\nWhen running the bots locally it is possible to install the dependency from a local folder with the code cloned from the repo.\r\n\r\n## To Reproduce\r\n1. Open a bot that uses the botbuilder-core library.\r\n2. Install a preview version (4.14.x).\r\n3. Run the bot.\r\n\r\n## Expected behavior\r\nThe dependencies being installed should install all the required sub-dependencies or have them available for manual installation.\r\n\r\n## Screenshots\r\n![image](https://user-images.githubusercontent.com/38112957/122459875-d79f2d80-cf87-11eb-93dd-d6aaf5128fbd.png)\r\n\r\n## Additional context\r\nThis issue is blocking the pipelines from the [BotFramework-FunctionalTests](https://github.com/microsoft/BotFramework-FunctionalTests/) repository from testing preview versions of the BotBuilder Python libraries.\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nfrom setuptools import setup\n\nVERSION = os.environ[\"packageVersion\"] if \"packageVersion\" in os.environ else \"4.12.0\"\nREQUIRES = [\n \"botbuilder-schema>=4.12.0\",\n \"botframework-connector>=4.12.0\",\n \"botbuilder-core>=4.12.0\",\n]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(root, \"botframework\", \"streaming\", \"about.py\")) as f:\n package_info = {}\n info = f.read()\n exec(info, package_info)\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=package_info[\"__title__\"],\n version=package_info[\"__version__\"],\n url=package_info[\"__uri__\"],\n author=package_info[\"__author__\"],\n description=package_info[\"__description__\"],\n keywords=[\"BotFrameworkStreaming\", \"bots\", \"ai\", \"botframework\", \"botframework\",],\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=package_info[\"__license__\"],\n packages=[\n \"botframework.streaming\",\n \"botframework.streaming.payloads\",\n \"botframework.streaming.payloads.models\",\n \"botframework.streaming.payload_transport\",\n \"botframework.streaming.transport\",\n \"botframework.streaming.transport.web_socket\",\n ],\n install_requires=REQUIRES,\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "libraries/botframework-streaming/setup.py"}, {"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nfrom setuptools import setup\n\nVERSION = os.environ[\"packageVersion\"] if \"packageVersion\" in os.environ else \"4.14.0\"\nREQUIRES = [\n \"botbuilder-schema==4.14.0\",\n \"botframework-connector==4.14.0\",\n \"jsonpickle>=1.2,<1.5\",\n]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(root, \"botbuilder\", \"core\", \"about.py\")) as f:\n package_info = {}\n info = f.read()\n exec(info, package_info)\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=package_info[\"__title__\"],\n version=package_info[\"__version__\"],\n url=package_info[\"__uri__\"],\n author=package_info[\"__author__\"],\n description=package_info[\"__description__\"],\n keywords=[\"BotBuilderCore\", \"bots\", \"ai\", \"botframework\", \"botbuilder\"],\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=package_info[\"__license__\"],\n packages=[\n \"botbuilder.core\",\n \"botbuilder.core.adapters\",\n \"botbuilder.core.inspection\",\n \"botbuilder.core.integration\",\n \"botbuilder.core.skills\",\n \"botbuilder.core.streaming\",\n \"botbuilder.core.teams\",\n \"botbuilder.core.oauth\",\n ],\n install_requires=REQUIRES,\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "libraries/botbuilder-core/setup.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nfrom setuptools import setup\n\nVERSION = os.environ[\"packageVersion\"] if \"packageVersion\" in os.environ else \"4.14.0\"\nREQUIRES = [\n \"botbuilder-schema>=4.12.0\",\n \"botframework-connector>=4.12.0\",\n]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(root, \"botframework\", \"streaming\", \"about.py\")) as f:\n package_info = {}\n info = f.read()\n exec(info, package_info)\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=package_info[\"__title__\"],\n version=package_info[\"__version__\"],\n url=package_info[\"__uri__\"],\n author=package_info[\"__author__\"],\n description=package_info[\"__description__\"],\n keywords=[\"BotFrameworkStreaming\", \"bots\", \"ai\", \"botframework\", \"botframework\",],\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=package_info[\"__license__\"],\n packages=[\n \"botframework.streaming\",\n \"botframework.streaming.payloads\",\n \"botframework.streaming.payloads.models\",\n \"botframework.streaming.payload_transport\",\n \"botframework.streaming.transport\",\n \"botframework.streaming.transport.web_socket\",\n ],\n install_requires=REQUIRES,\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "libraries/botframework-streaming/setup.py"}, {"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nfrom setuptools import setup\n\nVERSION = os.environ[\"packageVersion\"] if \"packageVersion\" in os.environ else \"4.14.0\"\nREQUIRES = [\n \"botbuilder-schema==4.14.0\",\n \"botframework-connector==4.14.0\",\n \"botframework-streaming==4.14.0\",\n \"jsonpickle>=1.2,<1.5\",\n]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(root, \"botbuilder\", \"core\", \"about.py\")) as f:\n package_info = {}\n info = f.read()\n exec(info, package_info)\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=package_info[\"__title__\"],\n version=package_info[\"__version__\"],\n url=package_info[\"__uri__\"],\n author=package_info[\"__author__\"],\n description=package_info[\"__description__\"],\n keywords=[\"BotBuilderCore\", \"bots\", \"ai\", \"botframework\", \"botbuilder\"],\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=package_info[\"__license__\"],\n packages=[\n \"botbuilder.core\",\n \"botbuilder.core.adapters\",\n \"botbuilder.core.inspection\",\n \"botbuilder.core.integration\",\n \"botbuilder.core.skills\",\n \"botbuilder.core.streaming\",\n \"botbuilder.core.teams\",\n \"botbuilder.core.oauth\",\n ],\n install_requires=REQUIRES,\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "libraries/botbuilder-core/setup.py"}]}
1,674
293
gh_patches_debug_30090
rasdani/github-patches
git_diff
Textualize__textual-4299
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- `Placeholder` has no `disabled` `__init__` keyword parameter It is intended that `disabled` is one of the "standard" keyword parameters for widgets in Textual; this seems to have never been added to `Placeholder`. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/textual/widgets/_placeholder.py` Content: ``` 1 """Provides a Textual placeholder widget; useful when designing an app's layout.""" 2 3 from __future__ import annotations 4 5 from itertools import cycle 6 from typing import TYPE_CHECKING, Iterator 7 from weakref import WeakKeyDictionary 8 9 from typing_extensions import Literal, Self 10 11 from .. import events 12 13 if TYPE_CHECKING: 14 from ..app import RenderResult 15 16 from ..css._error_tools import friendly_list 17 from ..reactive import Reactive, reactive 18 from ..widget import Widget 19 20 if TYPE_CHECKING: 21 from textual.app import App 22 23 PlaceholderVariant = Literal["default", "size", "text"] 24 """The different variants of placeholder.""" 25 26 _VALID_PLACEHOLDER_VARIANTS_ORDERED: list[PlaceholderVariant] = [ 27 "default", 28 "size", 29 "text", 30 ] 31 _VALID_PLACEHOLDER_VARIANTS: set[PlaceholderVariant] = set( 32 _VALID_PLACEHOLDER_VARIANTS_ORDERED 33 ) 34 _PLACEHOLDER_BACKGROUND_COLORS = [ 35 "#881177", 36 "#aa3355", 37 "#cc6666", 38 "#ee9944", 39 "#eedd00", 40 "#99dd55", 41 "#44dd88", 42 "#22ccbb", 43 "#00bbcc", 44 "#0099cc", 45 "#3366bb", 46 "#663399", 47 ] 48 _LOREM_IPSUM_PLACEHOLDER_TEXT = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Etiam feugiat ac elit sit amet accumsan. Suspendisse bibendum nec libero quis gravida. Phasellus id eleifend ligula. Nullam imperdiet sem tellus, sed vehicula nisl faucibus sit amet. Praesent iaculis tempor ultricies. Sed lacinia, tellus id rutrum lacinia, sapien sapien congue mauris, sit amet pellentesque quam quam vel nisl. Curabitur vulputate erat pellentesque mauris posuere, non dictum risus mattis." 49 50 51 class InvalidPlaceholderVariant(Exception): 52 """Raised when an invalid Placeholder variant is set.""" 53 54 55 class Placeholder(Widget): 56 """A simple placeholder widget to use before you build your custom widgets. 57 58 This placeholder has a couple of variants that show different data. 59 Clicking the placeholder cycles through the available variants, but a placeholder 60 can also be initialised in a specific variant. 61 62 The variants available are: 63 64 | Variant | Placeholder shows | 65 |---------|------------------------------------------------| 66 | default | Identifier label or the ID of the placeholder. | 67 | size | Size of the placeholder. | 68 | text | Lorem Ipsum text. | 69 """ 70 71 DEFAULT_CSS = """ 72 Placeholder { 73 content-align: center middle; 74 overflow: hidden; 75 color: $text; 76 } 77 Placeholder.-text { 78 padding: 1; 79 } 80 """ 81 82 # Consecutive placeholders get assigned consecutive colors. 83 _COLORS: WeakKeyDictionary[App, Iterator[str]] = WeakKeyDictionary() 84 _SIZE_RENDER_TEMPLATE = "[b]{} x {}[/b]" 85 86 variant: Reactive[PlaceholderVariant] = reactive[PlaceholderVariant]("default") 87 88 _renderables: dict[PlaceholderVariant, str] 89 90 def __init__( 91 self, 92 label: str | None = None, 93 variant: PlaceholderVariant = "default", 94 *, 95 name: str | None = None, 96 id: str | None = None, 97 classes: str | None = None, 98 ) -> None: 99 """Create a Placeholder widget. 100 101 Args: 102 label: The label to identify the placeholder. 103 If no label is present, uses the placeholder ID instead. 104 variant: The variant of the placeholder. 105 name: The name of the placeholder. 106 id: The ID of the placeholder in the DOM. 107 classes: A space separated string with the CSS classes 108 of the placeholder, if any. 109 """ 110 # Create and cache renderables for all the variants. 111 self._renderables = { 112 "default": label if label else f"#{id}" if id else "Placeholder", 113 "size": "", 114 "text": "\n\n".join(_LOREM_IPSUM_PLACEHOLDER_TEXT for _ in range(5)), 115 } 116 117 super().__init__(name=name, id=id, classes=classes) 118 119 self.variant = self.validate_variant(variant) 120 """The current variant of the placeholder.""" 121 122 # Set a cycle through the variants with the correct starting point. 123 self._variants_cycle = cycle(_VALID_PLACEHOLDER_VARIANTS_ORDERED) 124 while next(self._variants_cycle) != self.variant: 125 pass 126 127 async def _on_compose(self, event: events.Compose) -> None: 128 """Set the color for this placeholder.""" 129 colors = Placeholder._COLORS.setdefault( 130 self.app, cycle(_PLACEHOLDER_BACKGROUND_COLORS) 131 ) 132 self.styles.background = f"{next(colors)} 50%" 133 134 def render(self) -> RenderResult: 135 """Render the placeholder. 136 137 Returns: 138 The value to render. 139 """ 140 return self._renderables[self.variant] 141 142 def cycle_variant(self) -> Self: 143 """Get the next variant in the cycle. 144 145 Returns: 146 The `Placeholder` instance. 147 """ 148 self.variant = next(self._variants_cycle) 149 return self 150 151 def watch_variant( 152 self, old_variant: PlaceholderVariant, variant: PlaceholderVariant 153 ) -> None: 154 self.remove_class(f"-{old_variant}") 155 self.add_class(f"-{variant}") 156 157 def validate_variant(self, variant: PlaceholderVariant) -> PlaceholderVariant: 158 """Validate the variant to which the placeholder was set.""" 159 if variant not in _VALID_PLACEHOLDER_VARIANTS: 160 raise InvalidPlaceholderVariant( 161 "Valid placeholder variants are " 162 + f"{friendly_list(_VALID_PLACEHOLDER_VARIANTS)}" 163 ) 164 return variant 165 166 async def _on_click(self, _: events.Click) -> None: 167 """Click handler to cycle through the placeholder variants.""" 168 self.cycle_variant() 169 170 def _on_resize(self, event: events.Resize) -> None: 171 """Update the placeholder "size" variant with the new placeholder size.""" 172 self._renderables["size"] = self._SIZE_RENDER_TEMPLATE.format(*event.size) 173 if self.variant == "size": 174 self.refresh() 175 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/textual/widgets/_placeholder.py b/src/textual/widgets/_placeholder.py --- a/src/textual/widgets/_placeholder.py +++ b/src/textual/widgets/_placeholder.py @@ -73,6 +73,10 @@ content-align: center middle; overflow: hidden; color: $text; + + &:disabled { + opacity: 0.7; + } } Placeholder.-text { padding: 1; @@ -95,6 +99,7 @@ name: str | None = None, id: str | None = None, classes: str | None = None, + disabled: bool = False, ) -> None: """Create a Placeholder widget. @@ -106,6 +111,7 @@ id: The ID of the placeholder in the DOM. classes: A space separated string with the CSS classes of the placeholder, if any. + disabled: Whether the placeholder is disabled or not. """ # Create and cache renderables for all the variants. self._renderables = { @@ -114,7 +120,7 @@ "text": "\n\n".join(_LOREM_IPSUM_PLACEHOLDER_TEXT for _ in range(5)), } - super().__init__(name=name, id=id, classes=classes) + super().__init__(name=name, id=id, classes=classes, disabled=disabled) self.variant = self.validate_variant(variant) """The current variant of the placeholder."""
{"golden_diff": "diff --git a/src/textual/widgets/_placeholder.py b/src/textual/widgets/_placeholder.py\n--- a/src/textual/widgets/_placeholder.py\n+++ b/src/textual/widgets/_placeholder.py\n@@ -73,6 +73,10 @@\n content-align: center middle;\n overflow: hidden;\n color: $text;\n+\n+ &:disabled {\n+ opacity: 0.7;\n+ }\n }\n Placeholder.-text {\n padding: 1;\n@@ -95,6 +99,7 @@\n name: str | None = None,\n id: str | None = None,\n classes: str | None = None,\n+ disabled: bool = False,\n ) -> None:\n \"\"\"Create a Placeholder widget.\n \n@@ -106,6 +111,7 @@\n id: The ID of the placeholder in the DOM.\n classes: A space separated string with the CSS classes\n of the placeholder, if any.\n+ disabled: Whether the placeholder is disabled or not.\n \"\"\"\n # Create and cache renderables for all the variants.\n self._renderables = {\n@@ -114,7 +120,7 @@\n \"text\": \"\\n\\n\".join(_LOREM_IPSUM_PLACEHOLDER_TEXT for _ in range(5)),\n }\n \n- super().__init__(name=name, id=id, classes=classes)\n+ super().__init__(name=name, id=id, classes=classes, disabled=disabled)\n \n self.variant = self.validate_variant(variant)\n \"\"\"The current variant of the placeholder.\"\"\"\n", "issue": "`Placeholder` has no `disabled` `__init__` keyword parameter\nIt is intended that `disabled` is one of the \"standard\" keyword parameters for widgets in Textual; this seems to have never been added to `Placeholder`.\n", "before_files": [{"content": "\"\"\"Provides a Textual placeholder widget; useful when designing an app's layout.\"\"\"\n\nfrom __future__ import annotations\n\nfrom itertools import cycle\nfrom typing import TYPE_CHECKING, Iterator\nfrom weakref import WeakKeyDictionary\n\nfrom typing_extensions import Literal, Self\n\nfrom .. import events\n\nif TYPE_CHECKING:\n from ..app import RenderResult\n\nfrom ..css._error_tools import friendly_list\nfrom ..reactive import Reactive, reactive\nfrom ..widget import Widget\n\nif TYPE_CHECKING:\n from textual.app import App\n\nPlaceholderVariant = Literal[\"default\", \"size\", \"text\"]\n\"\"\"The different variants of placeholder.\"\"\"\n\n_VALID_PLACEHOLDER_VARIANTS_ORDERED: list[PlaceholderVariant] = [\n \"default\",\n \"size\",\n \"text\",\n]\n_VALID_PLACEHOLDER_VARIANTS: set[PlaceholderVariant] = set(\n _VALID_PLACEHOLDER_VARIANTS_ORDERED\n)\n_PLACEHOLDER_BACKGROUND_COLORS = [\n \"#881177\",\n \"#aa3355\",\n \"#cc6666\",\n \"#ee9944\",\n \"#eedd00\",\n \"#99dd55\",\n \"#44dd88\",\n \"#22ccbb\",\n \"#00bbcc\",\n \"#0099cc\",\n \"#3366bb\",\n \"#663399\",\n]\n_LOREM_IPSUM_PLACEHOLDER_TEXT = \"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Etiam feugiat ac elit sit amet accumsan. Suspendisse bibendum nec libero quis gravida. Phasellus id eleifend ligula. Nullam imperdiet sem tellus, sed vehicula nisl faucibus sit amet. Praesent iaculis tempor ultricies. Sed lacinia, tellus id rutrum lacinia, sapien sapien congue mauris, sit amet pellentesque quam quam vel nisl. Curabitur vulputate erat pellentesque mauris posuere, non dictum risus mattis.\"\n\n\nclass InvalidPlaceholderVariant(Exception):\n \"\"\"Raised when an invalid Placeholder variant is set.\"\"\"\n\n\nclass Placeholder(Widget):\n \"\"\"A simple placeholder widget to use before you build your custom widgets.\n\n This placeholder has a couple of variants that show different data.\n Clicking the placeholder cycles through the available variants, but a placeholder\n can also be initialised in a specific variant.\n\n The variants available are:\n\n | Variant | Placeholder shows |\n |---------|------------------------------------------------|\n | default | Identifier label or the ID of the placeholder. |\n | size | Size of the placeholder. |\n | text | Lorem Ipsum text. |\n \"\"\"\n\n DEFAULT_CSS = \"\"\"\n Placeholder {\n content-align: center middle;\n overflow: hidden;\n color: $text;\n }\n Placeholder.-text {\n padding: 1;\n }\n \"\"\"\n\n # Consecutive placeholders get assigned consecutive colors.\n _COLORS: WeakKeyDictionary[App, Iterator[str]] = WeakKeyDictionary()\n _SIZE_RENDER_TEMPLATE = \"[b]{} x {}[/b]\"\n\n variant: Reactive[PlaceholderVariant] = reactive[PlaceholderVariant](\"default\")\n\n _renderables: dict[PlaceholderVariant, str]\n\n def __init__(\n self,\n label: str | None = None,\n variant: PlaceholderVariant = \"default\",\n *,\n name: str | None = None,\n id: str | None = None,\n classes: str | None = None,\n ) -> None:\n \"\"\"Create a Placeholder widget.\n\n Args:\n label: The label to identify the placeholder.\n If no label is present, uses the placeholder ID instead.\n variant: The variant of the placeholder.\n name: The name of the placeholder.\n id: The ID of the placeholder in the DOM.\n classes: A space separated string with the CSS classes\n of the placeholder, if any.\n \"\"\"\n # Create and cache renderables for all the variants.\n self._renderables = {\n \"default\": label if label else f\"#{id}\" if id else \"Placeholder\",\n \"size\": \"\",\n \"text\": \"\\n\\n\".join(_LOREM_IPSUM_PLACEHOLDER_TEXT for _ in range(5)),\n }\n\n super().__init__(name=name, id=id, classes=classes)\n\n self.variant = self.validate_variant(variant)\n \"\"\"The current variant of the placeholder.\"\"\"\n\n # Set a cycle through the variants with the correct starting point.\n self._variants_cycle = cycle(_VALID_PLACEHOLDER_VARIANTS_ORDERED)\n while next(self._variants_cycle) != self.variant:\n pass\n\n async def _on_compose(self, event: events.Compose) -> None:\n \"\"\"Set the color for this placeholder.\"\"\"\n colors = Placeholder._COLORS.setdefault(\n self.app, cycle(_PLACEHOLDER_BACKGROUND_COLORS)\n )\n self.styles.background = f\"{next(colors)} 50%\"\n\n def render(self) -> RenderResult:\n \"\"\"Render the placeholder.\n\n Returns:\n The value to render.\n \"\"\"\n return self._renderables[self.variant]\n\n def cycle_variant(self) -> Self:\n \"\"\"Get the next variant in the cycle.\n\n Returns:\n The `Placeholder` instance.\n \"\"\"\n self.variant = next(self._variants_cycle)\n return self\n\n def watch_variant(\n self, old_variant: PlaceholderVariant, variant: PlaceholderVariant\n ) -> None:\n self.remove_class(f\"-{old_variant}\")\n self.add_class(f\"-{variant}\")\n\n def validate_variant(self, variant: PlaceholderVariant) -> PlaceholderVariant:\n \"\"\"Validate the variant to which the placeholder was set.\"\"\"\n if variant not in _VALID_PLACEHOLDER_VARIANTS:\n raise InvalidPlaceholderVariant(\n \"Valid placeholder variants are \"\n + f\"{friendly_list(_VALID_PLACEHOLDER_VARIANTS)}\"\n )\n return variant\n\n async def _on_click(self, _: events.Click) -> None:\n \"\"\"Click handler to cycle through the placeholder variants.\"\"\"\n self.cycle_variant()\n\n def _on_resize(self, event: events.Resize) -> None:\n \"\"\"Update the placeholder \"size\" variant with the new placeholder size.\"\"\"\n self._renderables[\"size\"] = self._SIZE_RENDER_TEMPLATE.format(*event.size)\n if self.variant == \"size\":\n self.refresh()\n", "path": "src/textual/widgets/_placeholder.py"}], "after_files": [{"content": "\"\"\"Provides a Textual placeholder widget; useful when designing an app's layout.\"\"\"\n\nfrom __future__ import annotations\n\nfrom itertools import cycle\nfrom typing import TYPE_CHECKING, Iterator\nfrom weakref import WeakKeyDictionary\n\nfrom typing_extensions import Literal, Self\n\nfrom .. import events\n\nif TYPE_CHECKING:\n from ..app import RenderResult\n\nfrom ..css._error_tools import friendly_list\nfrom ..reactive import Reactive, reactive\nfrom ..widget import Widget\n\nif TYPE_CHECKING:\n from textual.app import App\n\nPlaceholderVariant = Literal[\"default\", \"size\", \"text\"]\n\"\"\"The different variants of placeholder.\"\"\"\n\n_VALID_PLACEHOLDER_VARIANTS_ORDERED: list[PlaceholderVariant] = [\n \"default\",\n \"size\",\n \"text\",\n]\n_VALID_PLACEHOLDER_VARIANTS: set[PlaceholderVariant] = set(\n _VALID_PLACEHOLDER_VARIANTS_ORDERED\n)\n_PLACEHOLDER_BACKGROUND_COLORS = [\n \"#881177\",\n \"#aa3355\",\n \"#cc6666\",\n \"#ee9944\",\n \"#eedd00\",\n \"#99dd55\",\n \"#44dd88\",\n \"#22ccbb\",\n \"#00bbcc\",\n \"#0099cc\",\n \"#3366bb\",\n \"#663399\",\n]\n_LOREM_IPSUM_PLACEHOLDER_TEXT = \"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Etiam feugiat ac elit sit amet accumsan. Suspendisse bibendum nec libero quis gravida. Phasellus id eleifend ligula. Nullam imperdiet sem tellus, sed vehicula nisl faucibus sit amet. Praesent iaculis tempor ultricies. Sed lacinia, tellus id rutrum lacinia, sapien sapien congue mauris, sit amet pellentesque quam quam vel nisl. Curabitur vulputate erat pellentesque mauris posuere, non dictum risus mattis.\"\n\n\nclass InvalidPlaceholderVariant(Exception):\n \"\"\"Raised when an invalid Placeholder variant is set.\"\"\"\n\n\nclass Placeholder(Widget):\n \"\"\"A simple placeholder widget to use before you build your custom widgets.\n\n This placeholder has a couple of variants that show different data.\n Clicking the placeholder cycles through the available variants, but a placeholder\n can also be initialised in a specific variant.\n\n The variants available are:\n\n | Variant | Placeholder shows |\n |---------|------------------------------------------------|\n | default | Identifier label or the ID of the placeholder. |\n | size | Size of the placeholder. |\n | text | Lorem Ipsum text. |\n \"\"\"\n\n DEFAULT_CSS = \"\"\"\n Placeholder {\n content-align: center middle;\n overflow: hidden;\n color: $text;\n\n &:disabled {\n opacity: 0.7;\n }\n }\n Placeholder.-text {\n padding: 1;\n }\n \"\"\"\n\n # Consecutive placeholders get assigned consecutive colors.\n _COLORS: WeakKeyDictionary[App, Iterator[str]] = WeakKeyDictionary()\n _SIZE_RENDER_TEMPLATE = \"[b]{} x {}[/b]\"\n\n variant: Reactive[PlaceholderVariant] = reactive[PlaceholderVariant](\"default\")\n\n _renderables: dict[PlaceholderVariant, str]\n\n def __init__(\n self,\n label: str | None = None,\n variant: PlaceholderVariant = \"default\",\n *,\n name: str | None = None,\n id: str | None = None,\n classes: str | None = None,\n disabled: bool = False,\n ) -> None:\n \"\"\"Create a Placeholder widget.\n\n Args:\n label: The label to identify the placeholder.\n If no label is present, uses the placeholder ID instead.\n variant: The variant of the placeholder.\n name: The name of the placeholder.\n id: The ID of the placeholder in the DOM.\n classes: A space separated string with the CSS classes\n of the placeholder, if any.\n disabled: Whether the placeholder is disabled or not.\n \"\"\"\n # Create and cache renderables for all the variants.\n self._renderables = {\n \"default\": label if label else f\"#{id}\" if id else \"Placeholder\",\n \"size\": \"\",\n \"text\": \"\\n\\n\".join(_LOREM_IPSUM_PLACEHOLDER_TEXT for _ in range(5)),\n }\n\n super().__init__(name=name, id=id, classes=classes, disabled=disabled)\n\n self.variant = self.validate_variant(variant)\n \"\"\"The current variant of the placeholder.\"\"\"\n\n # Set a cycle through the variants with the correct starting point.\n self._variants_cycle = cycle(_VALID_PLACEHOLDER_VARIANTS_ORDERED)\n while next(self._variants_cycle) != self.variant:\n pass\n\n async def _on_compose(self, event: events.Compose) -> None:\n \"\"\"Set the color for this placeholder.\"\"\"\n colors = Placeholder._COLORS.setdefault(\n self.app, cycle(_PLACEHOLDER_BACKGROUND_COLORS)\n )\n self.styles.background = f\"{next(colors)} 50%\"\n\n def render(self) -> RenderResult:\n \"\"\"Render the placeholder.\n\n Returns:\n The value to render.\n \"\"\"\n return self._renderables[self.variant]\n\n def cycle_variant(self) -> Self:\n \"\"\"Get the next variant in the cycle.\n\n Returns:\n The `Placeholder` instance.\n \"\"\"\n self.variant = next(self._variants_cycle)\n return self\n\n def watch_variant(\n self, old_variant: PlaceholderVariant, variant: PlaceholderVariant\n ) -> None:\n self.remove_class(f\"-{old_variant}\")\n self.add_class(f\"-{variant}\")\n\n def validate_variant(self, variant: PlaceholderVariant) -> PlaceholderVariant:\n \"\"\"Validate the variant to which the placeholder was set.\"\"\"\n if variant not in _VALID_PLACEHOLDER_VARIANTS:\n raise InvalidPlaceholderVariant(\n \"Valid placeholder variants are \"\n + f\"{friendly_list(_VALID_PLACEHOLDER_VARIANTS)}\"\n )\n return variant\n\n async def _on_click(self, _: events.Click) -> None:\n \"\"\"Click handler to cycle through the placeholder variants.\"\"\"\n self.cycle_variant()\n\n def _on_resize(self, event: events.Resize) -> None:\n \"\"\"Update the placeholder \"size\" variant with the new placeholder size.\"\"\"\n self._renderables[\"size\"] = self._SIZE_RENDER_TEMPLATE.format(*event.size)\n if self.variant == \"size\":\n self.refresh()\n", "path": "src/textual/widgets/_placeholder.py"}]}
2,088
338
gh_patches_debug_8950
rasdani/github-patches
git_diff
gratipay__gratipay.com-3047
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Cannot close (delete) my account I works for a while but then I see an "Application Error" page from Heroku. Ostensibly the operation takes too long and Heroku kills the request. Cannot close (delete) my account I works for a while but then I see an "Application Error" page from Heroku. Ostensibly the operation takes too long and Heroku kills the request. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `gratipay/models/_mixin_team.py` Content: ``` 1 """Teams on Gratipay are plural participants with members. 2 """ 3 from collections import OrderedDict 4 from decimal import Decimal 5 6 from aspen.utils import typecheck 7 8 9 class MemberLimitReached(Exception): pass 10 11 class StubParticipantAdded(Exception): pass 12 13 class MixinTeam(object): 14 """This class provides methods for working with a Participant as a Team. 15 16 :param Participant participant: the underlying :py:class:`~gratipay.participant.Participant` object for this team 17 18 """ 19 20 # XXX These were all written with the ORM and need to be converted. 21 22 def __init__(self, participant): 23 self.participant = participant 24 25 def show_as_team(self, user): 26 """Return a boolean, whether to show this participant as a team. 27 """ 28 if not self.IS_PLURAL: 29 return False 30 if user.ADMIN: 31 return True 32 if not self.get_current_takes(): 33 if self == user.participant: 34 return True 35 return False 36 return True 37 38 def add_member(self, member): 39 """Add a member to this team. 40 """ 41 assert self.IS_PLURAL 42 if len(self.get_current_takes()) == 149: 43 raise MemberLimitReached 44 if not member.is_claimed: 45 raise StubParticipantAdded 46 self.__set_take_for(member, Decimal('0.01'), self) 47 48 def remove_member(self, member): 49 """Remove a member from this team. 50 """ 51 assert self.IS_PLURAL 52 self.__set_take_for(member, Decimal('0.00'), self) 53 54 def remove_all_members(self, cursor=None): 55 (cursor or self.db).run(""" 56 INSERT INTO takes (ctime, member, team, amount, recorder) ( 57 SELECT ctime, member, %(username)s, 0.00, %(username)s 58 FROM current_takes 59 WHERE team=%(username)s 60 AND amount > 0 61 ); 62 """, dict(username=self.username)) 63 64 def member_of(self, team): 65 """Given a Participant object, return a boolean. 66 """ 67 assert team.IS_PLURAL 68 for take in team.get_current_takes(): 69 if take['member'] == self.username: 70 return True 71 return False 72 73 def get_take_last_week_for(self, member): 74 """Get the user's nominal take last week. Used in throttling. 75 """ 76 assert self.IS_PLURAL 77 membername = member.username if hasattr(member, 'username') \ 78 else member['username'] 79 return self.db.one(""" 80 81 SELECT amount 82 FROM takes 83 WHERE team=%s AND member=%s 84 AND mtime < ( 85 SELECT ts_start 86 FROM paydays 87 WHERE ts_end > ts_start 88 ORDER BY ts_start DESC LIMIT 1 89 ) 90 ORDER BY mtime DESC LIMIT 1 91 92 """, (self.username, membername), default=Decimal('0.00')) 93 94 def get_take_for(self, member): 95 """Return a Decimal representation of the take for this member, or 0. 96 """ 97 assert self.IS_PLURAL 98 return self.db.one( "SELECT amount FROM current_takes " 99 "WHERE member=%s AND team=%s" 100 , (member.username, self.username) 101 , default=Decimal('0.00') 102 ) 103 104 def compute_max_this_week(self, last_week): 105 """2x last week's take, but at least a dollar. 106 """ 107 return max(last_week * Decimal('2'), Decimal('1.00')) 108 109 def set_take_for(self, member, take, recorder, cursor=None): 110 """Sets member's take from the team pool. 111 """ 112 assert self.IS_PLURAL 113 114 # lazy import to avoid circular import 115 from gratipay.security.user import User 116 from gratipay.models.participant import Participant 117 118 typecheck( member, Participant 119 , take, Decimal 120 , recorder, (Participant, User) 121 ) 122 123 last_week = self.get_take_last_week_for(member) 124 max_this_week = self.compute_max_this_week(last_week) 125 if take > max_this_week: 126 take = max_this_week 127 128 self.__set_take_for(member, take, recorder, cursor) 129 return take 130 131 def __set_take_for(self, member, amount, recorder, cursor=None): 132 assert self.IS_PLURAL 133 # XXX Factored out for testing purposes only! :O Use .set_take_for. 134 with self.db.get_cursor(cursor) as cursor: 135 # Lock to avoid race conditions 136 cursor.run("LOCK TABLE takes IN EXCLUSIVE MODE") 137 # Compute the current takes 138 old_takes = self.compute_actual_takes(cursor) 139 # Insert the new take 140 cursor.run(""" 141 142 INSERT INTO takes (ctime, member, team, amount, recorder) 143 VALUES ( COALESCE (( SELECT ctime 144 FROM takes 145 WHERE member=%(member)s 146 AND team=%(team)s 147 LIMIT 1 148 ), CURRENT_TIMESTAMP) 149 , %(member)s 150 , %(team)s 151 , %(amount)s 152 , %(recorder)s 153 ) 154 155 """, dict(member=member.username, team=self.username, amount=amount, 156 recorder=recorder.username)) 157 # Compute the new takes 158 new_takes = self.compute_actual_takes(cursor) 159 # Update receiving amounts in the participants table 160 self.update_taking(old_takes, new_takes, cursor, member) 161 # Update is_funded on member's tips 162 member.update_giving(cursor) 163 164 def update_taking(self, old_takes, new_takes, cursor=None, member=None): 165 """Update `taking` amounts based on the difference between `old_takes` 166 and `new_takes`. 167 """ 168 for username in set(old_takes.keys()).union(new_takes.keys()): 169 if username == self.username: 170 continue 171 old = old_takes.get(username, {}).get('actual_amount', Decimal(0)) 172 new = new_takes.get(username, {}).get('actual_amount', Decimal(0)) 173 diff = new - old 174 if diff != 0: 175 r = (self.db or cursor).one(""" 176 UPDATE participants 177 SET taking = (taking + %(diff)s) 178 , receiving = (receiving + %(diff)s) 179 WHERE username=%(username)s 180 RETURNING taking, receiving 181 """, dict(username=username, diff=diff)) 182 if member and username == member.username: 183 member.set_attributes(**r._asdict()) 184 185 def get_current_takes(self, cursor=None): 186 """Return a list of member takes for a team. 187 """ 188 assert self.IS_PLURAL 189 TAKES = """ 190 SELECT member, amount, ctime, mtime 191 FROM current_takes 192 WHERE team=%(team)s 193 ORDER BY ctime DESC 194 """ 195 records = (cursor or self.db).all(TAKES, dict(team=self.username)) 196 return [r._asdict() for r in records] 197 198 def get_team_take(self, cursor=None): 199 """Return a single take for a team, the team itself's take. 200 """ 201 assert self.IS_PLURAL 202 TAKE = "SELECT sum(amount) FROM current_takes WHERE team=%s" 203 total_take = (cursor or self.db).one(TAKE, (self.username,), default=0) 204 team_take = max(self.receiving - total_take, 0) 205 membership = { "ctime": None 206 , "mtime": None 207 , "member": self.username 208 , "amount": team_take 209 } 210 return membership 211 212 def compute_actual_takes(self, cursor=None): 213 """Get the takes, compute the actual amounts, and return an OrderedDict. 214 """ 215 actual_takes = OrderedDict() 216 nominal_takes = self.get_current_takes(cursor=cursor) 217 nominal_takes.append(self.get_team_take(cursor=cursor)) 218 budget = balance = self.balance + self.receiving - self.giving 219 for take in nominal_takes: 220 nominal_amount = take['nominal_amount'] = take.pop('amount') 221 actual_amount = take['actual_amount'] = min(nominal_amount, balance) 222 if take['member'] != self.username: 223 balance -= actual_amount 224 take['balance'] = balance 225 take['percentage'] = (actual_amount / budget) if budget > 0 else 0 226 actual_takes[take['member']] = take 227 return actual_takes 228 229 def get_members(self, current_participant): 230 """Return a list of member dicts. 231 """ 232 assert self.IS_PLURAL 233 takes = self.compute_actual_takes() 234 members = [] 235 for take in takes.values(): 236 member = {} 237 member['username'] = take['member'] 238 member['take'] = take['nominal_amount'] 239 member['balance'] = take['balance'] 240 member['percentage'] = take['percentage'] 241 242 member['removal_allowed'] = current_participant == self 243 member['editing_allowed'] = False 244 member['is_current_user'] = False 245 if current_participant is not None: 246 if member['username'] == current_participant.username: 247 member['is_current_user'] = True 248 if take['ctime'] is not None: 249 # current user, but not the team itself 250 member['editing_allowed']= True 251 252 member['last_week'] = last_week = self.get_take_last_week_for(member) 253 member['max_this_week'] = self.compute_max_this_week(last_week) 254 members.append(member) 255 return members 256 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/gratipay/models/_mixin_team.py b/gratipay/models/_mixin_team.py --- a/gratipay/models/_mixin_team.py +++ b/gratipay/models/_mixin_team.py @@ -172,7 +172,7 @@ new = new_takes.get(username, {}).get('actual_amount', Decimal(0)) diff = new - old if diff != 0: - r = (self.db or cursor).one(""" + r = (cursor or self.db).one(""" UPDATE participants SET taking = (taking + %(diff)s) , receiving = (receiving + %(diff)s)
{"golden_diff": "diff --git a/gratipay/models/_mixin_team.py b/gratipay/models/_mixin_team.py\n--- a/gratipay/models/_mixin_team.py\n+++ b/gratipay/models/_mixin_team.py\n@@ -172,7 +172,7 @@\n new = new_takes.get(username, {}).get('actual_amount', Decimal(0))\n diff = new - old\n if diff != 0:\n- r = (self.db or cursor).one(\"\"\"\n+ r = (cursor or self.db).one(\"\"\"\n UPDATE participants\n SET taking = (taking + %(diff)s)\n , receiving = (receiving + %(diff)s)\n", "issue": "Cannot close (delete) my account\nI works for a while but then I see an \"Application Error\" page from Heroku. Ostensibly the operation takes too long and Heroku kills the request.\n\nCannot close (delete) my account\nI works for a while but then I see an \"Application Error\" page from Heroku. Ostensibly the operation takes too long and Heroku kills the request.\n\n", "before_files": [{"content": "\"\"\"Teams on Gratipay are plural participants with members.\n\"\"\"\nfrom collections import OrderedDict\nfrom decimal import Decimal\n\nfrom aspen.utils import typecheck\n\n\nclass MemberLimitReached(Exception): pass\n\nclass StubParticipantAdded(Exception): pass\n\nclass MixinTeam(object):\n \"\"\"This class provides methods for working with a Participant as a Team.\n\n :param Participant participant: the underlying :py:class:`~gratipay.participant.Participant` object for this team\n\n \"\"\"\n\n # XXX These were all written with the ORM and need to be converted.\n\n def __init__(self, participant):\n self.participant = participant\n\n def show_as_team(self, user):\n \"\"\"Return a boolean, whether to show this participant as a team.\n \"\"\"\n if not self.IS_PLURAL:\n return False\n if user.ADMIN:\n return True\n if not self.get_current_takes():\n if self == user.participant:\n return True\n return False\n return True\n\n def add_member(self, member):\n \"\"\"Add a member to this team.\n \"\"\"\n assert self.IS_PLURAL\n if len(self.get_current_takes()) == 149:\n raise MemberLimitReached\n if not member.is_claimed:\n raise StubParticipantAdded\n self.__set_take_for(member, Decimal('0.01'), self)\n\n def remove_member(self, member):\n \"\"\"Remove a member from this team.\n \"\"\"\n assert self.IS_PLURAL\n self.__set_take_for(member, Decimal('0.00'), self)\n\n def remove_all_members(self, cursor=None):\n (cursor or self.db).run(\"\"\"\n INSERT INTO takes (ctime, member, team, amount, recorder) (\n SELECT ctime, member, %(username)s, 0.00, %(username)s\n FROM current_takes\n WHERE team=%(username)s\n AND amount > 0\n );\n \"\"\", dict(username=self.username))\n\n def member_of(self, team):\n \"\"\"Given a Participant object, return a boolean.\n \"\"\"\n assert team.IS_PLURAL\n for take in team.get_current_takes():\n if take['member'] == self.username:\n return True\n return False\n\n def get_take_last_week_for(self, member):\n \"\"\"Get the user's nominal take last week. Used in throttling.\n \"\"\"\n assert self.IS_PLURAL\n membername = member.username if hasattr(member, 'username') \\\n else member['username']\n return self.db.one(\"\"\"\n\n SELECT amount\n FROM takes\n WHERE team=%s AND member=%s\n AND mtime < (\n SELECT ts_start\n FROM paydays\n WHERE ts_end > ts_start\n ORDER BY ts_start DESC LIMIT 1\n )\n ORDER BY mtime DESC LIMIT 1\n\n \"\"\", (self.username, membername), default=Decimal('0.00'))\n\n def get_take_for(self, member):\n \"\"\"Return a Decimal representation of the take for this member, or 0.\n \"\"\"\n assert self.IS_PLURAL\n return self.db.one( \"SELECT amount FROM current_takes \"\n \"WHERE member=%s AND team=%s\"\n , (member.username, self.username)\n , default=Decimal('0.00')\n )\n\n def compute_max_this_week(self, last_week):\n \"\"\"2x last week's take, but at least a dollar.\n \"\"\"\n return max(last_week * Decimal('2'), Decimal('1.00'))\n\n def set_take_for(self, member, take, recorder, cursor=None):\n \"\"\"Sets member's take from the team pool.\n \"\"\"\n assert self.IS_PLURAL\n\n # lazy import to avoid circular import\n from gratipay.security.user import User\n from gratipay.models.participant import Participant\n\n typecheck( member, Participant\n , take, Decimal\n , recorder, (Participant, User)\n )\n\n last_week = self.get_take_last_week_for(member)\n max_this_week = self.compute_max_this_week(last_week)\n if take > max_this_week:\n take = max_this_week\n\n self.__set_take_for(member, take, recorder, cursor)\n return take\n\n def __set_take_for(self, member, amount, recorder, cursor=None):\n assert self.IS_PLURAL\n # XXX Factored out for testing purposes only! :O Use .set_take_for.\n with self.db.get_cursor(cursor) as cursor:\n # Lock to avoid race conditions\n cursor.run(\"LOCK TABLE takes IN EXCLUSIVE MODE\")\n # Compute the current takes\n old_takes = self.compute_actual_takes(cursor)\n # Insert the new take\n cursor.run(\"\"\"\n\n INSERT INTO takes (ctime, member, team, amount, recorder)\n VALUES ( COALESCE (( SELECT ctime\n FROM takes\n WHERE member=%(member)s\n AND team=%(team)s\n LIMIT 1\n ), CURRENT_TIMESTAMP)\n , %(member)s\n , %(team)s\n , %(amount)s\n , %(recorder)s\n )\n\n \"\"\", dict(member=member.username, team=self.username, amount=amount,\n recorder=recorder.username))\n # Compute the new takes\n new_takes = self.compute_actual_takes(cursor)\n # Update receiving amounts in the participants table\n self.update_taking(old_takes, new_takes, cursor, member)\n # Update is_funded on member's tips\n member.update_giving(cursor)\n\n def update_taking(self, old_takes, new_takes, cursor=None, member=None):\n \"\"\"Update `taking` amounts based on the difference between `old_takes`\n and `new_takes`.\n \"\"\"\n for username in set(old_takes.keys()).union(new_takes.keys()):\n if username == self.username:\n continue\n old = old_takes.get(username, {}).get('actual_amount', Decimal(0))\n new = new_takes.get(username, {}).get('actual_amount', Decimal(0))\n diff = new - old\n if diff != 0:\n r = (self.db or cursor).one(\"\"\"\n UPDATE participants\n SET taking = (taking + %(diff)s)\n , receiving = (receiving + %(diff)s)\n WHERE username=%(username)s\n RETURNING taking, receiving\n \"\"\", dict(username=username, diff=diff))\n if member and username == member.username:\n member.set_attributes(**r._asdict())\n\n def get_current_takes(self, cursor=None):\n \"\"\"Return a list of member takes for a team.\n \"\"\"\n assert self.IS_PLURAL\n TAKES = \"\"\"\n SELECT member, amount, ctime, mtime\n FROM current_takes\n WHERE team=%(team)s\n ORDER BY ctime DESC\n \"\"\"\n records = (cursor or self.db).all(TAKES, dict(team=self.username))\n return [r._asdict() for r in records]\n\n def get_team_take(self, cursor=None):\n \"\"\"Return a single take for a team, the team itself's take.\n \"\"\"\n assert self.IS_PLURAL\n TAKE = \"SELECT sum(amount) FROM current_takes WHERE team=%s\"\n total_take = (cursor or self.db).one(TAKE, (self.username,), default=0)\n team_take = max(self.receiving - total_take, 0)\n membership = { \"ctime\": None\n , \"mtime\": None\n , \"member\": self.username\n , \"amount\": team_take\n }\n return membership\n\n def compute_actual_takes(self, cursor=None):\n \"\"\"Get the takes, compute the actual amounts, and return an OrderedDict.\n \"\"\"\n actual_takes = OrderedDict()\n nominal_takes = self.get_current_takes(cursor=cursor)\n nominal_takes.append(self.get_team_take(cursor=cursor))\n budget = balance = self.balance + self.receiving - self.giving\n for take in nominal_takes:\n nominal_amount = take['nominal_amount'] = take.pop('amount')\n actual_amount = take['actual_amount'] = min(nominal_amount, balance)\n if take['member'] != self.username:\n balance -= actual_amount\n take['balance'] = balance\n take['percentage'] = (actual_amount / budget) if budget > 0 else 0\n actual_takes[take['member']] = take\n return actual_takes\n\n def get_members(self, current_participant):\n \"\"\"Return a list of member dicts.\n \"\"\"\n assert self.IS_PLURAL\n takes = self.compute_actual_takes()\n members = []\n for take in takes.values():\n member = {}\n member['username'] = take['member']\n member['take'] = take['nominal_amount']\n member['balance'] = take['balance']\n member['percentage'] = take['percentage']\n\n member['removal_allowed'] = current_participant == self\n member['editing_allowed'] = False\n member['is_current_user'] = False\n if current_participant is not None:\n if member['username'] == current_participant.username:\n member['is_current_user'] = True\n if take['ctime'] is not None:\n # current user, but not the team itself\n member['editing_allowed']= True\n\n member['last_week'] = last_week = self.get_take_last_week_for(member)\n member['max_this_week'] = self.compute_max_this_week(last_week)\n members.append(member)\n return members\n", "path": "gratipay/models/_mixin_team.py"}], "after_files": [{"content": "\"\"\"Teams on Gratipay are plural participants with members.\n\"\"\"\nfrom collections import OrderedDict\nfrom decimal import Decimal\n\nfrom aspen.utils import typecheck\n\n\nclass MemberLimitReached(Exception): pass\n\nclass StubParticipantAdded(Exception): pass\n\nclass MixinTeam(object):\n \"\"\"This class provides methods for working with a Participant as a Team.\n\n :param Participant participant: the underlying :py:class:`~gratipay.participant.Participant` object for this team\n\n \"\"\"\n\n # XXX These were all written with the ORM and need to be converted.\n\n def __init__(self, participant):\n self.participant = participant\n\n def show_as_team(self, user):\n \"\"\"Return a boolean, whether to show this participant as a team.\n \"\"\"\n if not self.IS_PLURAL:\n return False\n if user.ADMIN:\n return True\n if not self.get_current_takes():\n if self == user.participant:\n return True\n return False\n return True\n\n def add_member(self, member):\n \"\"\"Add a member to this team.\n \"\"\"\n assert self.IS_PLURAL\n if len(self.get_current_takes()) == 149:\n raise MemberLimitReached\n if not member.is_claimed:\n raise StubParticipantAdded\n self.__set_take_for(member, Decimal('0.01'), self)\n\n def remove_member(self, member):\n \"\"\"Remove a member from this team.\n \"\"\"\n assert self.IS_PLURAL\n self.__set_take_for(member, Decimal('0.00'), self)\n\n def remove_all_members(self, cursor=None):\n (cursor or self.db).run(\"\"\"\n INSERT INTO takes (ctime, member, team, amount, recorder) (\n SELECT ctime, member, %(username)s, 0.00, %(username)s\n FROM current_takes\n WHERE team=%(username)s\n AND amount > 0\n );\n \"\"\", dict(username=self.username))\n\n def member_of(self, team):\n \"\"\"Given a Participant object, return a boolean.\n \"\"\"\n assert team.IS_PLURAL\n for take in team.get_current_takes():\n if take['member'] == self.username:\n return True\n return False\n\n def get_take_last_week_for(self, member):\n \"\"\"Get the user's nominal take last week. Used in throttling.\n \"\"\"\n assert self.IS_PLURAL\n membername = member.username if hasattr(member, 'username') \\\n else member['username']\n return self.db.one(\"\"\"\n\n SELECT amount\n FROM takes\n WHERE team=%s AND member=%s\n AND mtime < (\n SELECT ts_start\n FROM paydays\n WHERE ts_end > ts_start\n ORDER BY ts_start DESC LIMIT 1\n )\n ORDER BY mtime DESC LIMIT 1\n\n \"\"\", (self.username, membername), default=Decimal('0.00'))\n\n def get_take_for(self, member):\n \"\"\"Return a Decimal representation of the take for this member, or 0.\n \"\"\"\n assert self.IS_PLURAL\n return self.db.one( \"SELECT amount FROM current_takes \"\n \"WHERE member=%s AND team=%s\"\n , (member.username, self.username)\n , default=Decimal('0.00')\n )\n\n def compute_max_this_week(self, last_week):\n \"\"\"2x last week's take, but at least a dollar.\n \"\"\"\n return max(last_week * Decimal('2'), Decimal('1.00'))\n\n def set_take_for(self, member, take, recorder, cursor=None):\n \"\"\"Sets member's take from the team pool.\n \"\"\"\n assert self.IS_PLURAL\n\n # lazy import to avoid circular import\n from gratipay.security.user import User\n from gratipay.models.participant import Participant\n\n typecheck( member, Participant\n , take, Decimal\n , recorder, (Participant, User)\n )\n\n last_week = self.get_take_last_week_for(member)\n max_this_week = self.compute_max_this_week(last_week)\n if take > max_this_week:\n take = max_this_week\n\n self.__set_take_for(member, take, recorder, cursor)\n return take\n\n def __set_take_for(self, member, amount, recorder, cursor=None):\n assert self.IS_PLURAL\n # XXX Factored out for testing purposes only! :O Use .set_take_for.\n with self.db.get_cursor(cursor) as cursor:\n # Lock to avoid race conditions\n cursor.run(\"LOCK TABLE takes IN EXCLUSIVE MODE\")\n # Compute the current takes\n old_takes = self.compute_actual_takes(cursor)\n # Insert the new take\n cursor.run(\"\"\"\n\n INSERT INTO takes (ctime, member, team, amount, recorder)\n VALUES ( COALESCE (( SELECT ctime\n FROM takes\n WHERE member=%(member)s\n AND team=%(team)s\n LIMIT 1\n ), CURRENT_TIMESTAMP)\n , %(member)s\n , %(team)s\n , %(amount)s\n , %(recorder)s\n )\n\n \"\"\", dict(member=member.username, team=self.username, amount=amount,\n recorder=recorder.username))\n # Compute the new takes\n new_takes = self.compute_actual_takes(cursor)\n # Update receiving amounts in the participants table\n self.update_taking(old_takes, new_takes, cursor, member)\n # Update is_funded on member's tips\n member.update_giving(cursor)\n\n def update_taking(self, old_takes, new_takes, cursor=None, member=None):\n \"\"\"Update `taking` amounts based on the difference between `old_takes`\n and `new_takes`.\n \"\"\"\n for username in set(old_takes.keys()).union(new_takes.keys()):\n if username == self.username:\n continue\n old = old_takes.get(username, {}).get('actual_amount', Decimal(0))\n new = new_takes.get(username, {}).get('actual_amount', Decimal(0))\n diff = new - old\n if diff != 0:\n r = (cursor or self.db).one(\"\"\"\n UPDATE participants\n SET taking = (taking + %(diff)s)\n , receiving = (receiving + %(diff)s)\n WHERE username=%(username)s\n RETURNING taking, receiving\n \"\"\", dict(username=username, diff=diff))\n if member and username == member.username:\n member.set_attributes(**r._asdict())\n\n def get_current_takes(self, cursor=None):\n \"\"\"Return a list of member takes for a team.\n \"\"\"\n assert self.IS_PLURAL\n TAKES = \"\"\"\n SELECT member, amount, ctime, mtime\n FROM current_takes\n WHERE team=%(team)s\n ORDER BY ctime DESC\n \"\"\"\n records = (cursor or self.db).all(TAKES, dict(team=self.username))\n return [r._asdict() for r in records]\n\n def get_team_take(self, cursor=None):\n \"\"\"Return a single take for a team, the team itself's take.\n \"\"\"\n assert self.IS_PLURAL\n TAKE = \"SELECT sum(amount) FROM current_takes WHERE team=%s\"\n total_take = (cursor or self.db).one(TAKE, (self.username,), default=0)\n team_take = max(self.receiving - total_take, 0)\n membership = { \"ctime\": None\n , \"mtime\": None\n , \"member\": self.username\n , \"amount\": team_take\n }\n return membership\n\n def compute_actual_takes(self, cursor=None):\n \"\"\"Get the takes, compute the actual amounts, and return an OrderedDict.\n \"\"\"\n actual_takes = OrderedDict()\n nominal_takes = self.get_current_takes(cursor=cursor)\n nominal_takes.append(self.get_team_take(cursor=cursor))\n budget = balance = self.balance + self.receiving - self.giving\n for take in nominal_takes:\n nominal_amount = take['nominal_amount'] = take.pop('amount')\n actual_amount = take['actual_amount'] = min(nominal_amount, balance)\n if take['member'] != self.username:\n balance -= actual_amount\n take['balance'] = balance\n take['percentage'] = (actual_amount / budget) if budget > 0 else 0\n actual_takes[take['member']] = take\n return actual_takes\n\n def get_members(self, current_participant):\n \"\"\"Return a list of member dicts.\n \"\"\"\n assert self.IS_PLURAL\n takes = self.compute_actual_takes()\n members = []\n for take in takes.values():\n member = {}\n member['username'] = take['member']\n member['take'] = take['nominal_amount']\n member['balance'] = take['balance']\n member['percentage'] = take['percentage']\n\n member['removal_allowed'] = current_participant == self\n member['editing_allowed'] = False\n member['is_current_user'] = False\n if current_participant is not None:\n if member['username'] == current_participant.username:\n member['is_current_user'] = True\n if take['ctime'] is not None:\n # current user, but not the team itself\n member['editing_allowed']= True\n\n member['last_week'] = last_week = self.get_take_last_week_for(member)\n member['max_this_week'] = self.compute_max_this_week(last_week)\n members.append(member)\n return members\n", "path": "gratipay/models/_mixin_team.py"}]}
3,053
146
gh_patches_debug_10452
rasdani/github-patches
git_diff
sublimelsp__LSP-285
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Feature request: Toggle diagnostic panel Hi Tom, I have a suggestion. Right now I have a keyboard shortcut to open the diagnostic panel `ctrl+shift+m`. As you can see. When I open the panel with that keybinding, and press the keybinding again the panel is still visible. ![notoggle](https://user-images.githubusercontent.com/22029477/36727088-45b28d4a-1bbc-11e8-9f72-3a6d7f611f09.gif) Wouldn't it be better if the panel could be toggled like this? :) ![output3](https://user-images.githubusercontent.com/22029477/36727446-8e5a78d6-1bbd-11e8-8968-a914264a20da.gif) If the answer is yes? I have already done that and I could create a pull request if you want :) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `plugin/diagnostics.py` Content: ``` 1 import html 2 import os 3 import sublime 4 import sublime_plugin 5 6 try: 7 from typing import Any, List, Dict, Tuple, Callable, Optional 8 assert Any and List and Dict and Tuple and Callable and Optional 9 except ImportError: 10 pass 11 12 from .core.settings import settings, PLUGIN_NAME 13 from .core.protocol import Diagnostic, DiagnosticSeverity 14 from .core.events import Events 15 from .core.configurations import is_supported_syntax 16 from .core.diagnostics import DiagnosticsUpdate, get_window_diagnostics, get_line_diagnostics 17 from .core.workspace import get_project_path 18 from .core.panels import create_output_panel 19 20 diagnostic_severity_names = { 21 DiagnosticSeverity.Error: "error", 22 DiagnosticSeverity.Warning: "warning", 23 DiagnosticSeverity.Information: "info", 24 DiagnosticSeverity.Hint: "hint" 25 } 26 27 diagnostic_severity_scopes = { 28 DiagnosticSeverity.Error: 'markup.deleted.lsp sublimelinter.mark.error markup.error.lsp', 29 DiagnosticSeverity.Warning: 'markup.changed.lsp sublimelinter.mark.warning markup.warning.lsp', 30 DiagnosticSeverity.Information: 'markup.inserted.lsp sublimelinter.gutter-mark markup.info.lsp', 31 DiagnosticSeverity.Hint: 'markup.inserted.lsp sublimelinter.gutter-mark markup.info.suggestion.lsp' 32 } 33 34 stylesheet = ''' 35 <style> 36 div.error-arrow { 37 border-top: 0.4rem solid transparent; 38 border-left: 0.5rem solid color(var(--redish) blend(var(--background) 30%)); 39 width: 0; 40 height: 0; 41 } 42 div.error { 43 padding: 0.4rem 0 0.4rem 0.7rem; 44 margin: 0 0 0.2rem; 45 border-radius: 0 0.2rem 0.2rem 0.2rem; 46 } 47 48 div.error span.message { 49 padding-right: 0.7rem; 50 } 51 52 div.error a { 53 text-decoration: inherit; 54 padding: 0.35rem 0.7rem 0.45rem 0.8rem; 55 position: relative; 56 bottom: 0.05rem; 57 border-radius: 0 0.2rem 0.2rem 0; 58 font-weight: bold; 59 } 60 html.dark div.error a { 61 background-color: #00000018; 62 } 63 html.light div.error a { 64 background-color: #ffffff18; 65 } 66 </style> 67 ''' 68 69 UNDERLINE_FLAGS = (sublime.DRAW_SQUIGGLY_UNDERLINE 70 | sublime.DRAW_NO_OUTLINE 71 | sublime.DRAW_NO_FILL 72 | sublime.DRAW_EMPTY_AS_OVERWRITE) 73 74 BOX_FLAGS = sublime.DRAW_NO_FILL | sublime.DRAW_EMPTY_AS_OVERWRITE 75 76 77 def create_phantom_html(text: str) -> str: 78 global stylesheet 79 return """<body id=inline-error>{} 80 <div class="error-arrow"></div> 81 <div class="error"> 82 <span class="message">{}</span> 83 <a href="code-actions">Code Actions</a> 84 </div> 85 </body>""".format(stylesheet, html.escape(text, quote=False)) 86 87 88 def on_phantom_navigate(view: sublime.View, href: str, point: int): 89 # TODO: don't mess with the user's cursor. 90 sel = view.sel() 91 sel.clear() 92 sel.add(sublime.Region(point)) 93 view.run_command("lsp_code_actions") 94 95 96 def create_phantom(view: sublime.View, diagnostic: Diagnostic) -> sublime.Phantom: 97 region = diagnostic.range.to_region(view) 98 # TODO: hook up hide phantom (if keeping them) 99 content = create_phantom_html(diagnostic.message) 100 return sublime.Phantom( 101 region, 102 '<p>' + content + '</p>', 103 sublime.LAYOUT_BELOW, 104 lambda href: on_phantom_navigate(view, href, region.begin()) 105 ) 106 107 108 def format_severity(severity: int) -> str: 109 return diagnostic_severity_names.get(severity, "???") 110 111 112 def format_diagnostic(diagnostic: Diagnostic) -> str: 113 location = "{:>8}:{:<4}".format( 114 diagnostic.range.start.row + 1, diagnostic.range.start.col + 1) 115 message = diagnostic.message.replace("\n", " ").replace("\r", "") 116 return " {}\t{:<12}\t{:<10}\t{}".format( 117 location, diagnostic.source, format_severity(diagnostic.severity), message) 118 119 120 phantom_sets_by_buffer = {} # type: Dict[int, sublime.PhantomSet] 121 122 123 def update_diagnostics_phantoms(view: sublime.View, diagnostics: 'List[Diagnostic]'): 124 global phantom_sets_by_buffer 125 126 buffer_id = view.buffer_id() 127 if not settings.show_diagnostics_phantoms or view.is_dirty(): 128 phantoms = None 129 else: 130 phantoms = list( 131 create_phantom(view, diagnostic) for diagnostic in diagnostics) 132 if phantoms: 133 phantom_set = phantom_sets_by_buffer.get(buffer_id) 134 if not phantom_set: 135 phantom_set = sublime.PhantomSet(view, "lsp_diagnostics") 136 phantom_sets_by_buffer[buffer_id] = phantom_set 137 phantom_set.update(phantoms) 138 else: 139 phantom_sets_by_buffer.pop(buffer_id, None) 140 141 142 def update_diagnostics_regions(view: sublime.View, diagnostics: 'List[Diagnostic]', severity: int): 143 region_name = "lsp_" + format_severity(severity) 144 if settings.show_diagnostics_phantoms and not view.is_dirty(): 145 regions = None 146 else: 147 regions = list(diagnostic.range.to_region(view) for diagnostic in diagnostics 148 if diagnostic.severity == severity) 149 if regions: 150 scope_name = diagnostic_severity_scopes[severity] 151 view.add_regions( 152 region_name, regions, scope_name, settings.diagnostics_gutter_marker, 153 UNDERLINE_FLAGS if settings.diagnostics_highlight_style == "underline" else BOX_FLAGS) 154 else: 155 view.erase_regions(region_name) 156 157 158 def update_diagnostics_in_view(view: sublime.View, diagnostics: 'List[Diagnostic]'): 159 if view and view.is_valid(): 160 update_diagnostics_phantoms(view, diagnostics) 161 for severity in range(DiagnosticSeverity.Error, DiagnosticSeverity.Information): 162 update_diagnostics_regions(view, diagnostics, severity) 163 164 165 Events.subscribe("document.diagnostics", 166 lambda update: handle_diagnostics(update)) 167 168 169 def handle_diagnostics(update: DiagnosticsUpdate): 170 window = update.window 171 view = window.find_open_file(update.file_path) 172 if view: 173 update_diagnostics_in_view(view, update.diagnostics) 174 update_diagnostics_panel(window) 175 176 177 class DiagnosticsCursorListener(sublime_plugin.ViewEventListener): 178 def __init__(self, view): 179 self.view = view 180 self.has_status = False 181 182 @classmethod 183 def is_applicable(cls, view_settings): 184 syntax = view_settings.get('syntax') 185 return settings.show_diagnostics_in_view_status and syntax and is_supported_syntax(syntax) 186 187 def on_selection_modified_async(self): 188 selections = self.view.sel() 189 if len(selections) > 0: 190 pos = selections[0].begin() 191 line_diagnostics = get_line_diagnostics(self.view, pos) 192 if len(line_diagnostics) > 0: 193 self.show_diagnostics_status(line_diagnostics) 194 elif self.has_status: 195 self.clear_diagnostics_status() 196 197 def show_diagnostics_status(self, line_diagnostics): 198 self.has_status = True 199 self.view.set_status('lsp_diagnostics', line_diagnostics[0].message) 200 201 def clear_diagnostics_status(self): 202 self.view.erase_status('lsp_diagnostics') 203 self.has_status = False 204 205 206 class LspShowDiagnosticsPanelCommand(sublime_plugin.WindowCommand): 207 def run(self): 208 ensure_diagnostics_panel(self.window) 209 self.window.run_command("show_panel", {"panel": "output.diagnostics"}) 210 211 212 def create_diagnostics_panel(window): 213 panel = create_output_panel(window, "diagnostics") 214 panel.settings().set("result_file_regex", r"^\s*\S\s+(\S.*):$") 215 panel.settings().set("result_line_regex", r"^\s+([0-9]+):?([0-9]+).*$") 216 panel.assign_syntax("Packages/" + PLUGIN_NAME + 217 "/Syntaxes/Diagnostics.sublime-syntax") 218 # Call create_output_panel a second time after assigning the above 219 # settings, so that it'll be picked up as a result buffer 220 # see: Packages/Default/exec.py#L228-L230 221 panel = window.create_output_panel("diagnostics") 222 return panel 223 224 225 def ensure_diagnostics_panel(window: sublime.Window): 226 return window.find_output_panel("diagnostics") or create_diagnostics_panel(window) 227 228 229 def update_diagnostics_panel(window: sublime.Window): 230 assert window, "missing window!" 231 base_dir = get_project_path(window) 232 233 panel = ensure_diagnostics_panel(window) 234 assert panel, "must have a panel now!" 235 236 diagnostics_by_file = get_window_diagnostics(window) 237 if diagnostics_by_file is not None: 238 active_panel = window.active_panel() 239 is_active_panel = (active_panel == "output.diagnostics") 240 panel.settings().set("result_base_dir", base_dir) 241 panel.set_read_only(False) 242 if diagnostics_by_file: 243 to_render = [] 244 for file_path, source_diagnostics in diagnostics_by_file.items(): 245 relative_file_path = os.path.relpath(file_path, base_dir) if base_dir else file_path 246 if source_diagnostics: 247 to_render.append(format_diagnostics(relative_file_path, source_diagnostics)) 248 panel.run_command("lsp_update_panel", {"characters": "\n".join(to_render)}) 249 if settings.auto_show_diagnostics_panel and not active_panel: 250 window.run_command("show_panel", 251 {"panel": "output.diagnostics"}) 252 else: 253 panel.run_command("lsp_clear_panel") 254 if settings.auto_show_diagnostics_panel and is_active_panel: 255 window.run_command("hide_panel", 256 {"panel": "output.diagnostics"}) 257 panel.set_read_only(True) 258 259 260 def format_diagnostics(file_path, origin_diagnostics): 261 content = " ◌ {}:\n".format(file_path) 262 for origin, diagnostics in origin_diagnostics.items(): 263 for diagnostic in diagnostics: 264 item = format_diagnostic(diagnostic) 265 content += item + "\n" 266 return content 267 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/plugin/diagnostics.py b/plugin/diagnostics.py --- a/plugin/diagnostics.py +++ b/plugin/diagnostics.py @@ -206,7 +206,13 @@ class LspShowDiagnosticsPanelCommand(sublime_plugin.WindowCommand): def run(self): ensure_diagnostics_panel(self.window) - self.window.run_command("show_panel", {"panel": "output.diagnostics"}) + active_panel = self.window.active_panel() + is_active_panel = (active_panel == "output.diagnostics") + + if is_active_panel: + self.window.run_command("hide_panel", {"panel": "output.diagnostics"}) + else: + self.window.run_command("show_panel", {"panel": "output.diagnostics"}) def create_diagnostics_panel(window):
{"golden_diff": "diff --git a/plugin/diagnostics.py b/plugin/diagnostics.py\n--- a/plugin/diagnostics.py\n+++ b/plugin/diagnostics.py\n@@ -206,7 +206,13 @@\n class LspShowDiagnosticsPanelCommand(sublime_plugin.WindowCommand):\n def run(self):\n ensure_diagnostics_panel(self.window)\n- self.window.run_command(\"show_panel\", {\"panel\": \"output.diagnostics\"})\n+ active_panel = self.window.active_panel()\n+ is_active_panel = (active_panel == \"output.diagnostics\")\n+\n+ if is_active_panel:\n+ self.window.run_command(\"hide_panel\", {\"panel\": \"output.diagnostics\"})\n+ else:\n+ self.window.run_command(\"show_panel\", {\"panel\": \"output.diagnostics\"})\n \n \n def create_diagnostics_panel(window):\n", "issue": "Feature request: Toggle diagnostic panel\nHi Tom, I have a suggestion.\r\n\r\nRight now I have a keyboard shortcut to open the diagnostic panel `ctrl+shift+m`.\r\nAs you can see. When I open the panel with that keybinding, and press the keybinding again the panel is still visible. \r\n\r\n![notoggle](https://user-images.githubusercontent.com/22029477/36727088-45b28d4a-1bbc-11e8-9f72-3a6d7f611f09.gif)\r\n\r\nWouldn't it be better if the panel could be toggled like this? :)\r\n\r\n![output3](https://user-images.githubusercontent.com/22029477/36727446-8e5a78d6-1bbd-11e8-8968-a914264a20da.gif)\r\n\r\n\r\nIf the answer is yes? I have already done that and I could create a pull request if you want :)\n", "before_files": [{"content": "import html\nimport os\nimport sublime\nimport sublime_plugin\n\ntry:\n from typing import Any, List, Dict, Tuple, Callable, Optional\n assert Any and List and Dict and Tuple and Callable and Optional\nexcept ImportError:\n pass\n\nfrom .core.settings import settings, PLUGIN_NAME\nfrom .core.protocol import Diagnostic, DiagnosticSeverity\nfrom .core.events import Events\nfrom .core.configurations import is_supported_syntax\nfrom .core.diagnostics import DiagnosticsUpdate, get_window_diagnostics, get_line_diagnostics\nfrom .core.workspace import get_project_path\nfrom .core.panels import create_output_panel\n\ndiagnostic_severity_names = {\n DiagnosticSeverity.Error: \"error\",\n DiagnosticSeverity.Warning: \"warning\",\n DiagnosticSeverity.Information: \"info\",\n DiagnosticSeverity.Hint: \"hint\"\n}\n\ndiagnostic_severity_scopes = {\n DiagnosticSeverity.Error: 'markup.deleted.lsp sublimelinter.mark.error markup.error.lsp',\n DiagnosticSeverity.Warning: 'markup.changed.lsp sublimelinter.mark.warning markup.warning.lsp',\n DiagnosticSeverity.Information: 'markup.inserted.lsp sublimelinter.gutter-mark markup.info.lsp',\n DiagnosticSeverity.Hint: 'markup.inserted.lsp sublimelinter.gutter-mark markup.info.suggestion.lsp'\n}\n\nstylesheet = '''\n <style>\n div.error-arrow {\n border-top: 0.4rem solid transparent;\n border-left: 0.5rem solid color(var(--redish) blend(var(--background) 30%));\n width: 0;\n height: 0;\n }\n div.error {\n padding: 0.4rem 0 0.4rem 0.7rem;\n margin: 0 0 0.2rem;\n border-radius: 0 0.2rem 0.2rem 0.2rem;\n }\n\n div.error span.message {\n padding-right: 0.7rem;\n }\n\n div.error a {\n text-decoration: inherit;\n padding: 0.35rem 0.7rem 0.45rem 0.8rem;\n position: relative;\n bottom: 0.05rem;\n border-radius: 0 0.2rem 0.2rem 0;\n font-weight: bold;\n }\n html.dark div.error a {\n background-color: #00000018;\n }\n html.light div.error a {\n background-color: #ffffff18;\n }\n </style>\n '''\n\nUNDERLINE_FLAGS = (sublime.DRAW_SQUIGGLY_UNDERLINE\n | sublime.DRAW_NO_OUTLINE\n | sublime.DRAW_NO_FILL\n | sublime.DRAW_EMPTY_AS_OVERWRITE)\n\nBOX_FLAGS = sublime.DRAW_NO_FILL | sublime.DRAW_EMPTY_AS_OVERWRITE\n\n\ndef create_phantom_html(text: str) -> str:\n global stylesheet\n return \"\"\"<body id=inline-error>{}\n <div class=\"error-arrow\"></div>\n <div class=\"error\">\n <span class=\"message\">{}</span>\n <a href=\"code-actions\">Code Actions</a>\n </div>\n </body>\"\"\".format(stylesheet, html.escape(text, quote=False))\n\n\ndef on_phantom_navigate(view: sublime.View, href: str, point: int):\n # TODO: don't mess with the user's cursor.\n sel = view.sel()\n sel.clear()\n sel.add(sublime.Region(point))\n view.run_command(\"lsp_code_actions\")\n\n\ndef create_phantom(view: sublime.View, diagnostic: Diagnostic) -> sublime.Phantom:\n region = diagnostic.range.to_region(view)\n # TODO: hook up hide phantom (if keeping them)\n content = create_phantom_html(diagnostic.message)\n return sublime.Phantom(\n region,\n '<p>' + content + '</p>',\n sublime.LAYOUT_BELOW,\n lambda href: on_phantom_navigate(view, href, region.begin())\n )\n\n\ndef format_severity(severity: int) -> str:\n return diagnostic_severity_names.get(severity, \"???\")\n\n\ndef format_diagnostic(diagnostic: Diagnostic) -> str:\n location = \"{:>8}:{:<4}\".format(\n diagnostic.range.start.row + 1, diagnostic.range.start.col + 1)\n message = diagnostic.message.replace(\"\\n\", \" \").replace(\"\\r\", \"\")\n return \" {}\\t{:<12}\\t{:<10}\\t{}\".format(\n location, diagnostic.source, format_severity(diagnostic.severity), message)\n\n\nphantom_sets_by_buffer = {} # type: Dict[int, sublime.PhantomSet]\n\n\ndef update_diagnostics_phantoms(view: sublime.View, diagnostics: 'List[Diagnostic]'):\n global phantom_sets_by_buffer\n\n buffer_id = view.buffer_id()\n if not settings.show_diagnostics_phantoms or view.is_dirty():\n phantoms = None\n else:\n phantoms = list(\n create_phantom(view, diagnostic) for diagnostic in diagnostics)\n if phantoms:\n phantom_set = phantom_sets_by_buffer.get(buffer_id)\n if not phantom_set:\n phantom_set = sublime.PhantomSet(view, \"lsp_diagnostics\")\n phantom_sets_by_buffer[buffer_id] = phantom_set\n phantom_set.update(phantoms)\n else:\n phantom_sets_by_buffer.pop(buffer_id, None)\n\n\ndef update_diagnostics_regions(view: sublime.View, diagnostics: 'List[Diagnostic]', severity: int):\n region_name = \"lsp_\" + format_severity(severity)\n if settings.show_diagnostics_phantoms and not view.is_dirty():\n regions = None\n else:\n regions = list(diagnostic.range.to_region(view) for diagnostic in diagnostics\n if diagnostic.severity == severity)\n if regions:\n scope_name = diagnostic_severity_scopes[severity]\n view.add_regions(\n region_name, regions, scope_name, settings.diagnostics_gutter_marker,\n UNDERLINE_FLAGS if settings.diagnostics_highlight_style == \"underline\" else BOX_FLAGS)\n else:\n view.erase_regions(region_name)\n\n\ndef update_diagnostics_in_view(view: sublime.View, diagnostics: 'List[Diagnostic]'):\n if view and view.is_valid():\n update_diagnostics_phantoms(view, diagnostics)\n for severity in range(DiagnosticSeverity.Error, DiagnosticSeverity.Information):\n update_diagnostics_regions(view, diagnostics, severity)\n\n\nEvents.subscribe(\"document.diagnostics\",\n lambda update: handle_diagnostics(update))\n\n\ndef handle_diagnostics(update: DiagnosticsUpdate):\n window = update.window\n view = window.find_open_file(update.file_path)\n if view:\n update_diagnostics_in_view(view, update.diagnostics)\n update_diagnostics_panel(window)\n\n\nclass DiagnosticsCursorListener(sublime_plugin.ViewEventListener):\n def __init__(self, view):\n self.view = view\n self.has_status = False\n\n @classmethod\n def is_applicable(cls, view_settings):\n syntax = view_settings.get('syntax')\n return settings.show_diagnostics_in_view_status and syntax and is_supported_syntax(syntax)\n\n def on_selection_modified_async(self):\n selections = self.view.sel()\n if len(selections) > 0:\n pos = selections[0].begin()\n line_diagnostics = get_line_diagnostics(self.view, pos)\n if len(line_diagnostics) > 0:\n self.show_diagnostics_status(line_diagnostics)\n elif self.has_status:\n self.clear_diagnostics_status()\n\n def show_diagnostics_status(self, line_diagnostics):\n self.has_status = True\n self.view.set_status('lsp_diagnostics', line_diagnostics[0].message)\n\n def clear_diagnostics_status(self):\n self.view.erase_status('lsp_diagnostics')\n self.has_status = False\n\n\nclass LspShowDiagnosticsPanelCommand(sublime_plugin.WindowCommand):\n def run(self):\n ensure_diagnostics_panel(self.window)\n self.window.run_command(\"show_panel\", {\"panel\": \"output.diagnostics\"})\n\n\ndef create_diagnostics_panel(window):\n panel = create_output_panel(window, \"diagnostics\")\n panel.settings().set(\"result_file_regex\", r\"^\\s*\\S\\s+(\\S.*):$\")\n panel.settings().set(\"result_line_regex\", r\"^\\s+([0-9]+):?([0-9]+).*$\")\n panel.assign_syntax(\"Packages/\" + PLUGIN_NAME +\n \"/Syntaxes/Diagnostics.sublime-syntax\")\n # Call create_output_panel a second time after assigning the above\n # settings, so that it'll be picked up as a result buffer\n # see: Packages/Default/exec.py#L228-L230\n panel = window.create_output_panel(\"diagnostics\")\n return panel\n\n\ndef ensure_diagnostics_panel(window: sublime.Window):\n return window.find_output_panel(\"diagnostics\") or create_diagnostics_panel(window)\n\n\ndef update_diagnostics_panel(window: sublime.Window):\n assert window, \"missing window!\"\n base_dir = get_project_path(window)\n\n panel = ensure_diagnostics_panel(window)\n assert panel, \"must have a panel now!\"\n\n diagnostics_by_file = get_window_diagnostics(window)\n if diagnostics_by_file is not None:\n active_panel = window.active_panel()\n is_active_panel = (active_panel == \"output.diagnostics\")\n panel.settings().set(\"result_base_dir\", base_dir)\n panel.set_read_only(False)\n if diagnostics_by_file:\n to_render = []\n for file_path, source_diagnostics in diagnostics_by_file.items():\n relative_file_path = os.path.relpath(file_path, base_dir) if base_dir else file_path\n if source_diagnostics:\n to_render.append(format_diagnostics(relative_file_path, source_diagnostics))\n panel.run_command(\"lsp_update_panel\", {\"characters\": \"\\n\".join(to_render)})\n if settings.auto_show_diagnostics_panel and not active_panel:\n window.run_command(\"show_panel\",\n {\"panel\": \"output.diagnostics\"})\n else:\n panel.run_command(\"lsp_clear_panel\")\n if settings.auto_show_diagnostics_panel and is_active_panel:\n window.run_command(\"hide_panel\",\n {\"panel\": \"output.diagnostics\"})\n panel.set_read_only(True)\n\n\ndef format_diagnostics(file_path, origin_diagnostics):\n content = \" \u25cc {}:\\n\".format(file_path)\n for origin, diagnostics in origin_diagnostics.items():\n for diagnostic in diagnostics:\n item = format_diagnostic(diagnostic)\n content += item + \"\\n\"\n return content\n", "path": "plugin/diagnostics.py"}], "after_files": [{"content": "import html\nimport os\nimport sublime\nimport sublime_plugin\n\ntry:\n from typing import Any, List, Dict, Tuple, Callable, Optional\n assert Any and List and Dict and Tuple and Callable and Optional\nexcept ImportError:\n pass\n\nfrom .core.settings import settings, PLUGIN_NAME\nfrom .core.protocol import Diagnostic, DiagnosticSeverity\nfrom .core.events import Events\nfrom .core.configurations import is_supported_syntax\nfrom .core.diagnostics import DiagnosticsUpdate, get_window_diagnostics, get_line_diagnostics\nfrom .core.workspace import get_project_path\nfrom .core.panels import create_output_panel\n\ndiagnostic_severity_names = {\n DiagnosticSeverity.Error: \"error\",\n DiagnosticSeverity.Warning: \"warning\",\n DiagnosticSeverity.Information: \"info\",\n DiagnosticSeverity.Hint: \"hint\"\n}\n\ndiagnostic_severity_scopes = {\n DiagnosticSeverity.Error: 'markup.deleted.lsp sublimelinter.mark.error markup.error.lsp',\n DiagnosticSeverity.Warning: 'markup.changed.lsp sublimelinter.mark.warning markup.warning.lsp',\n DiagnosticSeverity.Information: 'markup.inserted.lsp sublimelinter.gutter-mark markup.info.lsp',\n DiagnosticSeverity.Hint: 'markup.inserted.lsp sublimelinter.gutter-mark markup.info.suggestion.lsp'\n}\n\nstylesheet = '''\n <style>\n div.error-arrow {\n border-top: 0.4rem solid transparent;\n border-left: 0.5rem solid color(var(--redish) blend(var(--background) 30%));\n width: 0;\n height: 0;\n }\n div.error {\n padding: 0.4rem 0 0.4rem 0.7rem;\n margin: 0 0 0.2rem;\n border-radius: 0 0.2rem 0.2rem 0.2rem;\n }\n\n div.error span.message {\n padding-right: 0.7rem;\n }\n\n div.error a {\n text-decoration: inherit;\n padding: 0.35rem 0.7rem 0.45rem 0.8rem;\n position: relative;\n bottom: 0.05rem;\n border-radius: 0 0.2rem 0.2rem 0;\n font-weight: bold;\n }\n html.dark div.error a {\n background-color: #00000018;\n }\n html.light div.error a {\n background-color: #ffffff18;\n }\n </style>\n '''\n\nUNDERLINE_FLAGS = (sublime.DRAW_SQUIGGLY_UNDERLINE\n | sublime.DRAW_NO_OUTLINE\n | sublime.DRAW_NO_FILL\n | sublime.DRAW_EMPTY_AS_OVERWRITE)\n\nBOX_FLAGS = sublime.DRAW_NO_FILL | sublime.DRAW_EMPTY_AS_OVERWRITE\n\n\ndef create_phantom_html(text: str) -> str:\n global stylesheet\n return \"\"\"<body id=inline-error>{}\n <div class=\"error-arrow\"></div>\n <div class=\"error\">\n <span class=\"message\">{}</span>\n <a href=\"code-actions\">Code Actions</a>\n </div>\n </body>\"\"\".format(stylesheet, html.escape(text, quote=False))\n\n\ndef on_phantom_navigate(view: sublime.View, href: str, point: int):\n # TODO: don't mess with the user's cursor.\n sel = view.sel()\n sel.clear()\n sel.add(sublime.Region(point))\n view.run_command(\"lsp_code_actions\")\n\n\ndef create_phantom(view: sublime.View, diagnostic: Diagnostic) -> sublime.Phantom:\n region = diagnostic.range.to_region(view)\n # TODO: hook up hide phantom (if keeping them)\n content = create_phantom_html(diagnostic.message)\n return sublime.Phantom(\n region,\n '<p>' + content + '</p>',\n sublime.LAYOUT_BELOW,\n lambda href: on_phantom_navigate(view, href, region.begin())\n )\n\n\ndef format_severity(severity: int) -> str:\n return diagnostic_severity_names.get(severity, \"???\")\n\n\ndef format_diagnostic(diagnostic: Diagnostic) -> str:\n location = \"{:>8}:{:<4}\".format(\n diagnostic.range.start.row + 1, diagnostic.range.start.col + 1)\n message = diagnostic.message.replace(\"\\n\", \" \").replace(\"\\r\", \"\")\n return \" {}\\t{:<12}\\t{:<10}\\t{}\".format(\n location, diagnostic.source, format_severity(diagnostic.severity), message)\n\n\nphantom_sets_by_buffer = {} # type: Dict[int, sublime.PhantomSet]\n\n\ndef update_diagnostics_phantoms(view: sublime.View, diagnostics: 'List[Diagnostic]'):\n global phantom_sets_by_buffer\n\n buffer_id = view.buffer_id()\n if not settings.show_diagnostics_phantoms or view.is_dirty():\n phantoms = None\n else:\n phantoms = list(\n create_phantom(view, diagnostic) for diagnostic in diagnostics)\n if phantoms:\n phantom_set = phantom_sets_by_buffer.get(buffer_id)\n if not phantom_set:\n phantom_set = sublime.PhantomSet(view, \"lsp_diagnostics\")\n phantom_sets_by_buffer[buffer_id] = phantom_set\n phantom_set.update(phantoms)\n else:\n phantom_sets_by_buffer.pop(buffer_id, None)\n\n\ndef update_diagnostics_regions(view: sublime.View, diagnostics: 'List[Diagnostic]', severity: int):\n region_name = \"lsp_\" + format_severity(severity)\n if settings.show_diagnostics_phantoms and not view.is_dirty():\n regions = None\n else:\n regions = list(diagnostic.range.to_region(view) for diagnostic in diagnostics\n if diagnostic.severity == severity)\n if regions:\n scope_name = diagnostic_severity_scopes[severity]\n view.add_regions(\n region_name, regions, scope_name, settings.diagnostics_gutter_marker,\n UNDERLINE_FLAGS if settings.diagnostics_highlight_style == \"underline\" else BOX_FLAGS)\n else:\n view.erase_regions(region_name)\n\n\ndef update_diagnostics_in_view(view: sublime.View, diagnostics: 'List[Diagnostic]'):\n if view and view.is_valid():\n update_diagnostics_phantoms(view, diagnostics)\n for severity in range(DiagnosticSeverity.Error, DiagnosticSeverity.Information):\n update_diagnostics_regions(view, diagnostics, severity)\n\n\nEvents.subscribe(\"document.diagnostics\",\n lambda update: handle_diagnostics(update))\n\n\ndef handle_diagnostics(update: DiagnosticsUpdate):\n window = update.window\n view = window.find_open_file(update.file_path)\n if view:\n update_diagnostics_in_view(view, update.diagnostics)\n update_diagnostics_panel(window)\n\n\nclass DiagnosticsCursorListener(sublime_plugin.ViewEventListener):\n def __init__(self, view):\n self.view = view\n self.has_status = False\n\n @classmethod\n def is_applicable(cls, view_settings):\n syntax = view_settings.get('syntax')\n return settings.show_diagnostics_in_view_status and syntax and is_supported_syntax(syntax)\n\n def on_selection_modified_async(self):\n selections = self.view.sel()\n if len(selections) > 0:\n pos = selections[0].begin()\n line_diagnostics = get_line_diagnostics(self.view, pos)\n if len(line_diagnostics) > 0:\n self.show_diagnostics_status(line_diagnostics)\n elif self.has_status:\n self.clear_diagnostics_status()\n\n def show_diagnostics_status(self, line_diagnostics):\n self.has_status = True\n self.view.set_status('lsp_diagnostics', line_diagnostics[0].message)\n\n def clear_diagnostics_status(self):\n self.view.erase_status('lsp_diagnostics')\n self.has_status = False\n\n\nclass LspShowDiagnosticsPanelCommand(sublime_plugin.WindowCommand):\n def run(self):\n ensure_diagnostics_panel(self.window)\n active_panel = self.window.active_panel()\n is_active_panel = (active_panel == \"output.diagnostics\")\n\n if is_active_panel:\n self.window.run_command(\"hide_panel\", {\"panel\": \"output.diagnostics\"})\n else:\n self.window.run_command(\"show_panel\", {\"panel\": \"output.diagnostics\"})\n\n\ndef create_diagnostics_panel(window):\n panel = create_output_panel(window, \"diagnostics\")\n panel.settings().set(\"result_file_regex\", r\"^\\s*\\S\\s+(\\S.*):$\")\n panel.settings().set(\"result_line_regex\", r\"^\\s+([0-9]+):?([0-9]+).*$\")\n panel.assign_syntax(\"Packages/\" + PLUGIN_NAME +\n \"/Syntaxes/Diagnostics.sublime-syntax\")\n # Call create_output_panel a second time after assigning the above\n # settings, so that it'll be picked up as a result buffer\n # see: Packages/Default/exec.py#L228-L230\n panel = window.create_output_panel(\"diagnostics\")\n return panel\n\n\ndef ensure_diagnostics_panel(window: sublime.Window):\n return window.find_output_panel(\"diagnostics\") or create_diagnostics_panel(window)\n\n\ndef update_diagnostics_panel(window: sublime.Window):\n assert window, \"missing window!\"\n base_dir = get_project_path(window)\n\n panel = ensure_diagnostics_panel(window)\n assert panel, \"must have a panel now!\"\n\n diagnostics_by_file = get_window_diagnostics(window)\n if diagnostics_by_file is not None:\n active_panel = window.active_panel()\n is_active_panel = (active_panel == \"output.diagnostics\")\n panel.settings().set(\"result_base_dir\", base_dir)\n panel.set_read_only(False)\n if diagnostics_by_file:\n to_render = []\n for file_path, source_diagnostics in diagnostics_by_file.items():\n relative_file_path = os.path.relpath(file_path, base_dir) if base_dir else file_path\n if source_diagnostics:\n to_render.append(format_diagnostics(relative_file_path, source_diagnostics))\n panel.run_command(\"lsp_update_panel\", {\"characters\": \"\\n\".join(to_render)})\n if settings.auto_show_diagnostics_panel and not active_panel:\n window.run_command(\"show_panel\",\n {\"panel\": \"output.diagnostics\"})\n else:\n panel.run_command(\"lsp_clear_panel\")\n if settings.auto_show_diagnostics_panel and is_active_panel:\n window.run_command(\"hide_panel\",\n {\"panel\": \"output.diagnostics\"})\n panel.set_read_only(True)\n\n\ndef format_diagnostics(file_path, origin_diagnostics):\n content = \" \u25cc {}:\\n\".format(file_path)\n for origin, diagnostics in origin_diagnostics.items():\n for diagnostic in diagnostics:\n item = format_diagnostic(diagnostic)\n content += item + \"\\n\"\n return content\n", "path": "plugin/diagnostics.py"}]}
3,427
169
gh_patches_debug_39569
rasdani/github-patches
git_diff
celery__celery-6917
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- I can‘t stop a task by its task_id [2018-12-02 23:53:58,955: INFO/MainProcess] Received task: tasks.add[bb1fe102-c1f9-4361-9370-1129900c0d52] [2018-12-02 23:54:02,479: INFO/MainProcess] Terminating bb1fe102-c1f9-4361-9370-1129900c0d52 (Signals.SIGTERM) [2018-12-02 23:54:02,490: ERROR/MainProcess] pidbox command error: NotImplementedError("<class 'celery.concurrency.eventlet.TaskPool'> does not implement kill_job",) Traceback (most recent call last): File "d:\envs\aidcs\lib\site-packages\kombu\pidbox.py", line 101, in dispatch reply = handle(method, arguments) File "d:\envs\aidcs\lib\site-packages\kombu\pidbox.py", line 122, in handle_cast return self.handle(method, arguments) File "d:\envs\aidcs\lib\site-packages\kombu\pidbox.py", line 116, in handle return self.handlers[method](self.state, **arguments) File "d:\envs\aidcs\lib\site-packages\celery\worker\control.py", line 163, in revoke request.terminate(state.consumer.pool, signal=signum) File "d:\envs\aidcs\lib\site-packages\celery\worker\request.py", line 249, in terminate pool.terminate_job(self.worker_pid, signal) File "d:\envs\aidcs\lib\site-packages\celery\concurrency\base.py", line 115, in terminate_job '{0} does not implement kill_job'.format(type(self))) NotImplementedError: <class 'celery.concurrency.eventlet.TaskPool'> does not implement kill_job [2018-12-02 23:55:38,956: INFO/MainProcess] Task tasks.add[bb1fe102-c1f9-4361-9370-1129900c0d52] succeeded in 100.0s: 8 this is my main code: from celery.app.control import Control from tasks import add, app myControl=Control(app) myControl.revoke(task_id="b11729b0-6272-4527-af9d-dc24c0ad492d", terminate=True) finally,if i want to look at the state of the task only by task_id (just like above), how . --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `celery/concurrency/eventlet.py` Content: ``` 1 """Eventlet execution pool.""" 2 import sys 3 from time import monotonic 4 5 from kombu.asynchronous import timer as _timer 6 7 from celery import signals 8 9 from . import base 10 11 __all__ = ('TaskPool',) 12 13 W_RACE = """\ 14 Celery module with %s imported before eventlet patched\ 15 """ 16 RACE_MODS = ('billiard.', 'celery.', 'kombu.') 17 18 19 #: Warn if we couldn't patch early enough, 20 #: and thread/socket depending celery modules have already been loaded. 21 for mod in (mod for mod in sys.modules if mod.startswith(RACE_MODS)): 22 for side in ('thread', 'threading', 'socket'): # pragma: no cover 23 if getattr(mod, side, None): 24 import warnings 25 warnings.warn(RuntimeWarning(W_RACE % side)) 26 27 28 def apply_target(target, args=(), kwargs=None, callback=None, 29 accept_callback=None, getpid=None): 30 kwargs = {} if not kwargs else kwargs 31 return base.apply_target(target, args, kwargs, callback, accept_callback, 32 pid=getpid()) 33 34 35 class Timer(_timer.Timer): 36 """Eventlet Timer.""" 37 38 def __init__(self, *args, **kwargs): 39 from eventlet.greenthread import spawn_after 40 from greenlet import GreenletExit 41 super().__init__(*args, **kwargs) 42 43 self.GreenletExit = GreenletExit 44 self._spawn_after = spawn_after 45 self._queue = set() 46 47 def _enter(self, eta, priority, entry, **kwargs): 48 secs = max(eta - monotonic(), 0) 49 g = self._spawn_after(secs, entry) 50 self._queue.add(g) 51 g.link(self._entry_exit, entry) 52 g.entry = entry 53 g.eta = eta 54 g.priority = priority 55 g.canceled = False 56 return g 57 58 def _entry_exit(self, g, entry): 59 try: 60 try: 61 g.wait() 62 except self.GreenletExit: 63 entry.cancel() 64 g.canceled = True 65 finally: 66 self._queue.discard(g) 67 68 def clear(self): 69 queue = self._queue 70 while queue: 71 try: 72 queue.pop().cancel() 73 except (KeyError, self.GreenletExit): 74 pass 75 76 def cancel(self, tref): 77 try: 78 tref.cancel() 79 except self.GreenletExit: 80 pass 81 82 @property 83 def queue(self): 84 return self._queue 85 86 87 class TaskPool(base.BasePool): 88 """Eventlet Task Pool.""" 89 90 Timer = Timer 91 92 signal_safe = False 93 is_green = True 94 task_join_will_block = False 95 _pool = None 96 _quick_put = None 97 98 def __init__(self, *args, **kwargs): 99 from eventlet import greenthread 100 from eventlet.greenpool import GreenPool 101 self.Pool = GreenPool 102 self.getcurrent = greenthread.getcurrent 103 self.getpid = lambda: id(greenthread.getcurrent()) 104 self.spawn_n = greenthread.spawn_n 105 106 super().__init__(*args, **kwargs) 107 108 def on_start(self): 109 self._pool = self.Pool(self.limit) 110 signals.eventlet_pool_started.send(sender=self) 111 self._quick_put = self._pool.spawn_n 112 self._quick_apply_sig = signals.eventlet_pool_apply.send 113 114 def on_stop(self): 115 signals.eventlet_pool_preshutdown.send(sender=self) 116 if self._pool is not None: 117 self._pool.waitall() 118 signals.eventlet_pool_postshutdown.send(sender=self) 119 120 def on_apply(self, target, args=None, kwargs=None, callback=None, 121 accept_callback=None, **_): 122 self._quick_apply_sig( 123 sender=self, target=target, args=args, kwargs=kwargs, 124 ) 125 self._quick_put(apply_target, target, args, kwargs, 126 callback, accept_callback, 127 self.getpid) 128 129 def grow(self, n=1): 130 limit = self.limit + n 131 self._pool.resize(limit) 132 self.limit = limit 133 134 def shrink(self, n=1): 135 limit = self.limit - n 136 self._pool.resize(limit) 137 self.limit = limit 138 139 def _get_info(self): 140 info = super()._get_info() 141 info.update({ 142 'max-concurrency': self.limit, 143 'free-threads': self._pool.free(), 144 'running-threads': self._pool.running(), 145 }) 146 return info 147 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/celery/concurrency/eventlet.py b/celery/concurrency/eventlet.py --- a/celery/concurrency/eventlet.py +++ b/celery/concurrency/eventlet.py @@ -2,6 +2,7 @@ import sys from time import monotonic +from greenlet import GreenletExit from kombu.asynchronous import timer as _timer from celery import signals @@ -93,6 +94,7 @@ is_green = True task_join_will_block = False _pool = None + _pool_map = None _quick_put = None def __init__(self, *args, **kwargs): @@ -107,8 +109,9 @@ def on_start(self): self._pool = self.Pool(self.limit) + self._pool_map = {} signals.eventlet_pool_started.send(sender=self) - self._quick_put = self._pool.spawn_n + self._quick_put = self._pool.spawn self._quick_apply_sig = signals.eventlet_pool_apply.send def on_stop(self): @@ -119,12 +122,17 @@ def on_apply(self, target, args=None, kwargs=None, callback=None, accept_callback=None, **_): - self._quick_apply_sig( - sender=self, target=target, args=args, kwargs=kwargs, + target = TaskPool._make_killable_target(target) + self._quick_apply_sig(sender=self, target=target, args=args, kwargs=kwargs,) + greenlet = self._quick_put( + apply_target, + target, args, + kwargs, + callback, + accept_callback, + self.getpid ) - self._quick_put(apply_target, target, args, kwargs, - callback, accept_callback, - self.getpid) + self._add_to_pool_map(id(greenlet), greenlet) def grow(self, n=1): limit = self.limit + n @@ -136,6 +144,12 @@ self._pool.resize(limit) self.limit = limit + def terminate_job(self, pid, signal=None): + if pid in self._pool_map.keys(): + greenlet = self._pool_map[pid] + greenlet.kill() + greenlet.wait() + def _get_info(self): info = super()._get_info() info.update({ @@ -144,3 +158,24 @@ 'running-threads': self._pool.running(), }) return info + + @staticmethod + def _make_killable_target(target): + def killable_target(*args, **kwargs): + try: + return target(*args, **kwargs) + except GreenletExit: + return (False, None, None) + return killable_target + + def _add_to_pool_map(self, pid, greenlet): + self._pool_map[pid] = greenlet + greenlet.link( + TaskPool._cleanup_after_job_finish, + self._pool_map, + pid + ) + + @staticmethod + def _cleanup_after_job_finish(greenlet, pool_map, pid): + del pool_map[pid]
{"golden_diff": "diff --git a/celery/concurrency/eventlet.py b/celery/concurrency/eventlet.py\n--- a/celery/concurrency/eventlet.py\n+++ b/celery/concurrency/eventlet.py\n@@ -2,6 +2,7 @@\n import sys\n from time import monotonic\n \n+from greenlet import GreenletExit\n from kombu.asynchronous import timer as _timer\n \n from celery import signals\n@@ -93,6 +94,7 @@\n is_green = True\n task_join_will_block = False\n _pool = None\n+ _pool_map = None\n _quick_put = None\n \n def __init__(self, *args, **kwargs):\n@@ -107,8 +109,9 @@\n \n def on_start(self):\n self._pool = self.Pool(self.limit)\n+ self._pool_map = {}\n signals.eventlet_pool_started.send(sender=self)\n- self._quick_put = self._pool.spawn_n\n+ self._quick_put = self._pool.spawn\n self._quick_apply_sig = signals.eventlet_pool_apply.send\n \n def on_stop(self):\n@@ -119,12 +122,17 @@\n \n def on_apply(self, target, args=None, kwargs=None, callback=None,\n accept_callback=None, **_):\n- self._quick_apply_sig(\n- sender=self, target=target, args=args, kwargs=kwargs,\n+ target = TaskPool._make_killable_target(target)\n+ self._quick_apply_sig(sender=self, target=target, args=args, kwargs=kwargs,)\n+ greenlet = self._quick_put(\n+ apply_target,\n+ target, args,\n+ kwargs,\n+ callback,\n+ accept_callback,\n+ self.getpid\n )\n- self._quick_put(apply_target, target, args, kwargs,\n- callback, accept_callback,\n- self.getpid)\n+ self._add_to_pool_map(id(greenlet), greenlet)\n \n def grow(self, n=1):\n limit = self.limit + n\n@@ -136,6 +144,12 @@\n self._pool.resize(limit)\n self.limit = limit\n \n+ def terminate_job(self, pid, signal=None):\n+ if pid in self._pool_map.keys():\n+ greenlet = self._pool_map[pid]\n+ greenlet.kill()\n+ greenlet.wait()\n+\n def _get_info(self):\n info = super()._get_info()\n info.update({\n@@ -144,3 +158,24 @@\n 'running-threads': self._pool.running(),\n })\n return info\n+\n+ @staticmethod\n+ def _make_killable_target(target):\n+ def killable_target(*args, **kwargs):\n+ try:\n+ return target(*args, **kwargs)\n+ except GreenletExit:\n+ return (False, None, None)\n+ return killable_target\n+\n+ def _add_to_pool_map(self, pid, greenlet):\n+ self._pool_map[pid] = greenlet\n+ greenlet.link(\n+ TaskPool._cleanup_after_job_finish,\n+ self._pool_map,\n+ pid\n+ )\n+\n+ @staticmethod\n+ def _cleanup_after_job_finish(greenlet, pool_map, pid):\n+ del pool_map[pid]\n", "issue": "I can\u2018t stop a task by its task_id\n[2018-12-02 23:53:58,955: INFO/MainProcess] Received task: tasks.add[bb1fe102-c1f9-4361-9370-1129900c0d52]\r\n[2018-12-02 23:54:02,479: INFO/MainProcess] Terminating bb1fe102-c1f9-4361-9370-1129900c0d52 (Signals.SIGTERM)\r\n[2018-12-02 23:54:02,490: ERROR/MainProcess] pidbox command error: NotImplementedError(\"<class 'celery.concurrency.eventlet.TaskPool'> does not implement kill_job\",)\r\nTraceback (most recent call last):\r\n File \"d:\\envs\\aidcs\\lib\\site-packages\\kombu\\pidbox.py\", line 101, in dispatch\r\n reply = handle(method, arguments)\r\n File \"d:\\envs\\aidcs\\lib\\site-packages\\kombu\\pidbox.py\", line 122, in handle_cast\r\n return self.handle(method, arguments)\r\n File \"d:\\envs\\aidcs\\lib\\site-packages\\kombu\\pidbox.py\", line 116, in handle\r\n return self.handlers[method](self.state, **arguments)\r\n File \"d:\\envs\\aidcs\\lib\\site-packages\\celery\\worker\\control.py\", line 163, in revoke\r\n request.terminate(state.consumer.pool, signal=signum)\r\n File \"d:\\envs\\aidcs\\lib\\site-packages\\celery\\worker\\request.py\", line 249, in terminate\r\n pool.terminate_job(self.worker_pid, signal)\r\n File \"d:\\envs\\aidcs\\lib\\site-packages\\celery\\concurrency\\base.py\", line 115, in terminate_job\r\n '{0} does not implement kill_job'.format(type(self)))\r\nNotImplementedError: <class 'celery.concurrency.eventlet.TaskPool'> does not implement kill_job\r\n[2018-12-02 23:55:38,956: INFO/MainProcess] Task tasks.add[bb1fe102-c1f9-4361-9370-1129900c0d52] succeeded in 100.0s: 8\r\n\r\n\r\n\r\n\r\nthis is my main code:\r\n\r\nfrom celery.app.control import Control\r\nfrom tasks import add, app\r\n\r\nmyControl=Control(app)\r\nmyControl.revoke(task_id=\"b11729b0-6272-4527-af9d-dc24c0ad492d\", terminate=True)\r\n\r\n\r\n\r\nfinally\uff0cif i want to look at the state of the task only by task_id (just like above), how .\n", "before_files": [{"content": "\"\"\"Eventlet execution pool.\"\"\"\nimport sys\nfrom time import monotonic\n\nfrom kombu.asynchronous import timer as _timer\n\nfrom celery import signals\n\nfrom . import base\n\n__all__ = ('TaskPool',)\n\nW_RACE = \"\"\"\\\nCelery module with %s imported before eventlet patched\\\n\"\"\"\nRACE_MODS = ('billiard.', 'celery.', 'kombu.')\n\n\n#: Warn if we couldn't patch early enough,\n#: and thread/socket depending celery modules have already been loaded.\nfor mod in (mod for mod in sys.modules if mod.startswith(RACE_MODS)):\n for side in ('thread', 'threading', 'socket'): # pragma: no cover\n if getattr(mod, side, None):\n import warnings\n warnings.warn(RuntimeWarning(W_RACE % side))\n\n\ndef apply_target(target, args=(), kwargs=None, callback=None,\n accept_callback=None, getpid=None):\n kwargs = {} if not kwargs else kwargs\n return base.apply_target(target, args, kwargs, callback, accept_callback,\n pid=getpid())\n\n\nclass Timer(_timer.Timer):\n \"\"\"Eventlet Timer.\"\"\"\n\n def __init__(self, *args, **kwargs):\n from eventlet.greenthread import spawn_after\n from greenlet import GreenletExit\n super().__init__(*args, **kwargs)\n\n self.GreenletExit = GreenletExit\n self._spawn_after = spawn_after\n self._queue = set()\n\n def _enter(self, eta, priority, entry, **kwargs):\n secs = max(eta - monotonic(), 0)\n g = self._spawn_after(secs, entry)\n self._queue.add(g)\n g.link(self._entry_exit, entry)\n g.entry = entry\n g.eta = eta\n g.priority = priority\n g.canceled = False\n return g\n\n def _entry_exit(self, g, entry):\n try:\n try:\n g.wait()\n except self.GreenletExit:\n entry.cancel()\n g.canceled = True\n finally:\n self._queue.discard(g)\n\n def clear(self):\n queue = self._queue\n while queue:\n try:\n queue.pop().cancel()\n except (KeyError, self.GreenletExit):\n pass\n\n def cancel(self, tref):\n try:\n tref.cancel()\n except self.GreenletExit:\n pass\n\n @property\n def queue(self):\n return self._queue\n\n\nclass TaskPool(base.BasePool):\n \"\"\"Eventlet Task Pool.\"\"\"\n\n Timer = Timer\n\n signal_safe = False\n is_green = True\n task_join_will_block = False\n _pool = None\n _quick_put = None\n\n def __init__(self, *args, **kwargs):\n from eventlet import greenthread\n from eventlet.greenpool import GreenPool\n self.Pool = GreenPool\n self.getcurrent = greenthread.getcurrent\n self.getpid = lambda: id(greenthread.getcurrent())\n self.spawn_n = greenthread.spawn_n\n\n super().__init__(*args, **kwargs)\n\n def on_start(self):\n self._pool = self.Pool(self.limit)\n signals.eventlet_pool_started.send(sender=self)\n self._quick_put = self._pool.spawn_n\n self._quick_apply_sig = signals.eventlet_pool_apply.send\n\n def on_stop(self):\n signals.eventlet_pool_preshutdown.send(sender=self)\n if self._pool is not None:\n self._pool.waitall()\n signals.eventlet_pool_postshutdown.send(sender=self)\n\n def on_apply(self, target, args=None, kwargs=None, callback=None,\n accept_callback=None, **_):\n self._quick_apply_sig(\n sender=self, target=target, args=args, kwargs=kwargs,\n )\n self._quick_put(apply_target, target, args, kwargs,\n callback, accept_callback,\n self.getpid)\n\n def grow(self, n=1):\n limit = self.limit + n\n self._pool.resize(limit)\n self.limit = limit\n\n def shrink(self, n=1):\n limit = self.limit - n\n self._pool.resize(limit)\n self.limit = limit\n\n def _get_info(self):\n info = super()._get_info()\n info.update({\n 'max-concurrency': self.limit,\n 'free-threads': self._pool.free(),\n 'running-threads': self._pool.running(),\n })\n return info\n", "path": "celery/concurrency/eventlet.py"}], "after_files": [{"content": "\"\"\"Eventlet execution pool.\"\"\"\nimport sys\nfrom time import monotonic\n\nfrom greenlet import GreenletExit\nfrom kombu.asynchronous import timer as _timer\n\nfrom celery import signals\n\nfrom . import base\n\n__all__ = ('TaskPool',)\n\nW_RACE = \"\"\"\\\nCelery module with %s imported before eventlet patched\\\n\"\"\"\nRACE_MODS = ('billiard.', 'celery.', 'kombu.')\n\n\n#: Warn if we couldn't patch early enough,\n#: and thread/socket depending celery modules have already been loaded.\nfor mod in (mod for mod in sys.modules if mod.startswith(RACE_MODS)):\n for side in ('thread', 'threading', 'socket'): # pragma: no cover\n if getattr(mod, side, None):\n import warnings\n warnings.warn(RuntimeWarning(W_RACE % side))\n\n\ndef apply_target(target, args=(), kwargs=None, callback=None,\n accept_callback=None, getpid=None):\n kwargs = {} if not kwargs else kwargs\n return base.apply_target(target, args, kwargs, callback, accept_callback,\n pid=getpid())\n\n\nclass Timer(_timer.Timer):\n \"\"\"Eventlet Timer.\"\"\"\n\n def __init__(self, *args, **kwargs):\n from eventlet.greenthread import spawn_after\n from greenlet import GreenletExit\n super().__init__(*args, **kwargs)\n\n self.GreenletExit = GreenletExit\n self._spawn_after = spawn_after\n self._queue = set()\n\n def _enter(self, eta, priority, entry, **kwargs):\n secs = max(eta - monotonic(), 0)\n g = self._spawn_after(secs, entry)\n self._queue.add(g)\n g.link(self._entry_exit, entry)\n g.entry = entry\n g.eta = eta\n g.priority = priority\n g.canceled = False\n return g\n\n def _entry_exit(self, g, entry):\n try:\n try:\n g.wait()\n except self.GreenletExit:\n entry.cancel()\n g.canceled = True\n finally:\n self._queue.discard(g)\n\n def clear(self):\n queue = self._queue\n while queue:\n try:\n queue.pop().cancel()\n except (KeyError, self.GreenletExit):\n pass\n\n def cancel(self, tref):\n try:\n tref.cancel()\n except self.GreenletExit:\n pass\n\n @property\n def queue(self):\n return self._queue\n\n\nclass TaskPool(base.BasePool):\n \"\"\"Eventlet Task Pool.\"\"\"\n\n Timer = Timer\n\n signal_safe = False\n is_green = True\n task_join_will_block = False\n _pool = None\n _pool_map = None\n _quick_put = None\n\n def __init__(self, *args, **kwargs):\n from eventlet import greenthread\n from eventlet.greenpool import GreenPool\n self.Pool = GreenPool\n self.getcurrent = greenthread.getcurrent\n self.getpid = lambda: id(greenthread.getcurrent())\n self.spawn_n = greenthread.spawn_n\n\n super().__init__(*args, **kwargs)\n\n def on_start(self):\n self._pool = self.Pool(self.limit)\n self._pool_map = {}\n signals.eventlet_pool_started.send(sender=self)\n self._quick_put = self._pool.spawn\n self._quick_apply_sig = signals.eventlet_pool_apply.send\n\n def on_stop(self):\n signals.eventlet_pool_preshutdown.send(sender=self)\n if self._pool is not None:\n self._pool.waitall()\n signals.eventlet_pool_postshutdown.send(sender=self)\n\n def on_apply(self, target, args=None, kwargs=None, callback=None,\n accept_callback=None, **_):\n target = TaskPool._make_killable_target(target)\n self._quick_apply_sig(sender=self, target=target, args=args, kwargs=kwargs,)\n greenlet = self._quick_put(\n apply_target,\n target, args,\n kwargs,\n callback,\n accept_callback,\n self.getpid\n )\n self._add_to_pool_map(id(greenlet), greenlet)\n\n def grow(self, n=1):\n limit = self.limit + n\n self._pool.resize(limit)\n self.limit = limit\n\n def shrink(self, n=1):\n limit = self.limit - n\n self._pool.resize(limit)\n self.limit = limit\n\n def terminate_job(self, pid, signal=None):\n if pid in self._pool_map.keys():\n greenlet = self._pool_map[pid]\n greenlet.kill()\n greenlet.wait()\n\n def _get_info(self):\n info = super()._get_info()\n info.update({\n 'max-concurrency': self.limit,\n 'free-threads': self._pool.free(),\n 'running-threads': self._pool.running(),\n })\n return info\n\n @staticmethod\n def _make_killable_target(target):\n def killable_target(*args, **kwargs):\n try:\n return target(*args, **kwargs)\n except GreenletExit:\n return (False, None, None)\n return killable_target\n\n def _add_to_pool_map(self, pid, greenlet):\n self._pool_map[pid] = greenlet\n greenlet.link(\n TaskPool._cleanup_after_job_finish,\n self._pool_map,\n pid\n )\n\n @staticmethod\n def _cleanup_after_job_finish(greenlet, pool_map, pid):\n del pool_map[pid]\n", "path": "celery/concurrency/eventlet.py"}]}
2,242
729
gh_patches_debug_18536
rasdani/github-patches
git_diff
learningequality__kolibri-2113
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- print detailed exception info to server console on 500 errors Currently, the web server middleware swallows all Python exceptions and returns the traceback information to the client in a 500 error. This makes debugging difficult. It should be printed to the console and saved to log files. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `kolibri/deployment/default/settings/base.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 """ 3 Django settings for kolibri project. 4 5 For more information on this file, see 6 https://docs.djangoproject.com/en/1.9/topics/settings/ 7 8 For the full list of settings and their values, see 9 https://docs.djangoproject.com/en/1.9/ref/settings/ 10 """ 11 from __future__ import absolute_import, print_function, unicode_literals 12 13 # Build paths inside the project like this: os.path.join(BASE_DIR, ...) 14 import os 15 16 # import kolibri, so we can get the path to the module. 17 import kolibri 18 # we load other utilities related to i18n 19 # This is essential! We load the kolibri conf INSIDE the Django conf 20 from kolibri.utils import conf, i18n 21 from tzlocal import get_localzone 22 23 KOLIBRI_MODULE_PATH = os.path.dirname(kolibri.__file__) 24 25 BASE_DIR = os.path.abspath(os.path.dirname(__name__)) 26 27 KOLIBRI_HOME = os.environ['KOLIBRI_HOME'] 28 29 KOLIBRI_CORE_JS_NAME = 'kolibriGlobal' 30 31 LOCALE_PATHS = [ 32 os.path.join(KOLIBRI_MODULE_PATH, "locale"), 33 ] 34 35 # Quick-start development settings - unsuitable for production 36 # See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/ 37 38 # SECURITY WARNING: keep the secret key used in production secret! 39 SECRET_KEY = 'f@ey3)y^03r9^@mou97apom*+c1m#b1!cwbm50^s4yk72xce27' 40 41 # SECURITY WARNING: don't run with debug turned on in production! 42 DEBUG = False 43 44 ALLOWED_HOSTS = ['*'] 45 46 # Application definition 47 48 INSTALLED_APPS = [ 49 'kolibri.core', 50 'django.contrib.admin', 51 'django.contrib.auth', 52 'django.contrib.contenttypes', 53 'django.contrib.sessions', 54 'django.contrib.messages', 55 'django.contrib.staticfiles', 56 'kolibri.auth.apps.KolibriAuthConfig', 57 'kolibri.content', 58 'kolibri.logger', 59 'kolibri.tasks.apps.KolibriTasksConfig', 60 'kolibri.core.webpack', 61 'kolibri.core.exams', 62 'kolibri.core.device', 63 'kolibri.core.discovery', 64 'rest_framework', 65 'django_js_reverse', 66 'jsonfield', 67 'morango', 68 ] + conf.config['INSTALLED_APPS'] 69 70 # Add in the external plugins' locale paths. Our frontend messages depends 71 # specifically on the value of LOCALE_PATHS to find its catalog files. 72 LOCALE_PATHS += [ 73 i18n.get_installed_app_locale_path(app) for app in INSTALLED_APPS 74 if i18n.is_external_plugin(app) 75 ] 76 77 MIDDLEWARE_CLASSES = ( 78 'django.contrib.sessions.middleware.SessionMiddleware', 79 'django.middleware.locale.LocaleMiddleware', 80 'django.middleware.common.CommonMiddleware', 81 'django.middleware.csrf.CsrfViewMiddleware', 82 'kolibri.plugins.setup_wizard.middleware.SetupWizardMiddleware', 83 'kolibri.auth.middleware.CustomAuthenticationMiddleware', 84 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 85 'django.contrib.messages.middleware.MessageMiddleware', 86 'django.middleware.clickjacking.XFrameOptionsMiddleware', 87 'django.middleware.security.SecurityMiddleware', 88 ) 89 90 QUEUE_JOB_STORAGE_PATH = os.path.join(KOLIBRI_HOME, "job_storage.sqlite3") 91 92 ROOT_URLCONF = 'kolibri.deployment.default.urls' 93 94 TEMPLATES = [ 95 { 96 'BACKEND': 'django.template.backends.django.DjangoTemplates', 97 'DIRS': [], 98 'APP_DIRS': True, 99 'OPTIONS': { 100 'context_processors': [ 101 'django.template.context_processors.debug', 102 'django.template.context_processors.request', 103 'django.contrib.auth.context_processors.auth', 104 'django.contrib.messages.context_processors.messages', 105 'kolibri.core.context_processors.custom_context_processor.return_session', 106 ], 107 }, 108 }, 109 ] 110 111 WSGI_APPLICATION = 'kolibri.deployment.default.wsgi.application' 112 113 114 # Database 115 # https://docs.djangoproject.com/en/1.9/ref/settings/#databases 116 117 DATABASES = { 118 'default': { 119 'ENGINE': 'django.db.backends.sqlite3', 120 'NAME': os.path.join(KOLIBRI_HOME, 'db.sqlite3'), 121 'OPTIONS': { 122 'timeout': 100, 123 } 124 }, 125 } 126 127 # Content directories and URLs for channel metadata and content files 128 129 # Directory and URL for storing content databases for channel data 130 CONTENT_DATABASE_DIR = os.path.join(KOLIBRI_HOME, 'content', 'databases') 131 if not os.path.exists(CONTENT_DATABASE_DIR): 132 os.makedirs(CONTENT_DATABASE_DIR) 133 134 # Directory and URL for storing de-duped content files for all channels 135 CONTENT_STORAGE_DIR = os.path.join(KOLIBRI_HOME, 'content', 'storage') 136 if not os.path.exists(CONTENT_STORAGE_DIR): 137 os.makedirs(CONTENT_STORAGE_DIR) 138 139 # Base default URL for downloading content from an online server 140 CENTRAL_CONTENT_DOWNLOAD_BASE_URL = "https://contentworkshop.learningequality.org" 141 142 # Internationalization 143 # https://docs.djangoproject.com/en/1.9/topics/i18n/ 144 145 LANGUAGES = [ 146 ('en', 'English'), 147 ('sw-tz', 'Kiswahili'), 148 ('es-es', 'Español'), 149 ('es-mx', 'Español (México)'), 150 ('fr-fr', 'Français, langue française'), 151 ('pt-pt', 'Português'), 152 ('hi-in', 'हिंदी'), 153 ] 154 155 LANGUAGE_CODE = conf.config.get("LANGUAGE_CODE") or "en" 156 157 TIME_ZONE = get_localzone().zone 158 159 USE_I18N = True 160 161 USE_L10N = True 162 163 USE_TZ = True 164 165 # Static files (CSS, JavaScript, Images) 166 # https://docs.djangoproject.com/en/1.9/howto/static-files/ 167 168 STATIC_URL = '/static/' 169 STATIC_ROOT = os.path.join(KOLIBRI_HOME, "static") 170 171 # https://docs.djangoproject.com/en/1.9/ref/settings/#std:setting-LOGGING 172 # https://docs.djangoproject.com/en/1.9/topics/logging/ 173 174 LOGGING = { 175 'version': 1, 176 'disable_existing_loggers': False, 177 'formatters': { 178 'verbose': { 179 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s' 180 }, 181 'simple': { 182 'format': '%(levelname)s %(message)s' 183 }, 184 'simple_date': { 185 'format': '%(levelname)s %(asctime)s %(module)s %(message)s' 186 }, 187 'color': { 188 '()': 'colorlog.ColoredFormatter', 189 'format': '%(log_color)s%(levelname)-8s %(message)s', 190 'log_colors': { 191 'DEBUG': 'bold_black', 192 'INFO': 'white', 193 'WARNING': 'yellow', 194 'ERROR': 'red', 195 'CRITICAL': 'bold_red', 196 }, 197 } 198 }, 199 'filters': { 200 'require_debug_true': { 201 '()': 'django.utils.log.RequireDebugTrue', 202 }, 203 'require_debug_false': { 204 '()': 'django.utils.log.RequireDebugFalse', 205 }, 206 }, 207 'handlers': { 208 'console': { 209 'level': 'INFO', 210 'class': 'logging.StreamHandler', 211 'formatter': 'color' 212 }, 213 'mail_admins': { 214 'level': 'ERROR', 215 'class': 'django.utils.log.AdminEmailHandler', 216 'filters': ['require_debug_false'], 217 }, 218 'file_debug': { 219 'level': 'DEBUG', 220 'filters': ['require_debug_true'], 221 'class': 'logging.FileHandler', 222 'filename': os.path.join(KOLIBRI_HOME, 'debug.log'), 223 'formatter': 'simple_date', 224 }, 225 'file': { 226 'level': 'INFO', 227 'filters': [], 228 'class': 'logging.FileHandler', 229 'filename': os.path.join(KOLIBRI_HOME, 'kolibri.log'), 230 'formatter': 'simple_date', 231 }, 232 }, 233 'loggers': { 234 'django': { 235 'handlers': ['console', 'file'], 236 'propagate': True, 237 }, 238 'django.request': { 239 'handlers': ['mail_admins', 'file'], 240 'level': 'ERROR', 241 'propagate': False, 242 }, 243 'kolibri': { 244 'handlers': ['console', 'mail_admins', 'file', 'file_debug'], 245 'level': 'INFO', 246 } 247 } 248 } 249 250 251 # Customizing Django auth system 252 # https://docs.djangoproject.com/en/1.9/topics/auth/customizing/ 253 254 AUTH_USER_MODEL = 'kolibriauth.FacilityUser' 255 256 AUTHENTICATION_BACKENDS = ['kolibri.auth.backends.FacilityUserBackend'] 257 258 259 # Django REST Framework 260 # http://www.django-rest-framework.org/api-guide/settings/ 261 262 REST_FRAMEWORK = { 263 "UNAUTHENTICATED_USER": "kolibri.auth.models.KolibriAnonymousUser", 264 'DEFAULT_RENDERER_CLASSES': ( 265 'rest_framework.renderers.JSONRenderer', 266 'rest_framework.renderers.BrowsableAPIRenderer', 267 'rest_framework_csv.renderers.CSVRenderer', 268 ), 269 } 270 271 # System warnings to disable 272 # see https://docs.djangoproject.com/en/1.9/ref/settings/#silenced-system-checks 273 SILENCED_SYSTEM_CHECKS = ["auth.W004"] 274 275 # Configuration for Django JS Reverse 276 # https://github.com/ierror/django-js-reverse#options 277 278 JS_REVERSE_JS_VAR_NAME = 'kolibriUrls' 279 280 JS_REVERSE_EXCLUDE_NAMESPACES = ['admin', ] 281 282 ENABLE_DATA_BOOTSTRAPPING = True 283 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/kolibri/deployment/default/settings/base.py b/kolibri/deployment/default/settings/base.py --- a/kolibri/deployment/default/settings/base.py +++ b/kolibri/deployment/default/settings/base.py @@ -215,6 +215,12 @@ 'class': 'django.utils.log.AdminEmailHandler', 'filters': ['require_debug_false'], }, + 'request_debug': { + 'level': 'ERROR', + 'class': 'logging.StreamHandler', + 'formatter': 'color', + 'filters': ['require_debug_true'], + }, 'file_debug': { 'level': 'DEBUG', 'filters': ['require_debug_true'], @@ -236,7 +242,7 @@ 'propagate': True, }, 'django.request': { - 'handlers': ['mail_admins', 'file'], + 'handlers': ['mail_admins', 'file', 'request_debug'], 'level': 'ERROR', 'propagate': False, },
{"golden_diff": "diff --git a/kolibri/deployment/default/settings/base.py b/kolibri/deployment/default/settings/base.py\n--- a/kolibri/deployment/default/settings/base.py\n+++ b/kolibri/deployment/default/settings/base.py\n@@ -215,6 +215,12 @@\n 'class': 'django.utils.log.AdminEmailHandler',\n 'filters': ['require_debug_false'],\n },\n+ 'request_debug': {\n+ 'level': 'ERROR',\n+ 'class': 'logging.StreamHandler',\n+ 'formatter': 'color',\n+ 'filters': ['require_debug_true'],\n+ },\n 'file_debug': {\n 'level': 'DEBUG',\n 'filters': ['require_debug_true'],\n@@ -236,7 +242,7 @@\n 'propagate': True,\n },\n 'django.request': {\n- 'handlers': ['mail_admins', 'file'],\n+ 'handlers': ['mail_admins', 'file', 'request_debug'],\n 'level': 'ERROR',\n 'propagate': False,\n },\n", "issue": "print detailed exception info to server console on 500 errors\n\r\nCurrently, the web server middleware swallows all Python exceptions and returns the traceback information to the client in a 500 error. This makes debugging difficult.\r\n\r\nIt should be printed to the console and saved to log files.\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nDjango settings for kolibri project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.9/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.9/ref/settings/\n\"\"\"\nfrom __future__ import absolute_import, print_function, unicode_literals\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\n\n# import kolibri, so we can get the path to the module.\nimport kolibri\n# we load other utilities related to i18n\n# This is essential! We load the kolibri conf INSIDE the Django conf\nfrom kolibri.utils import conf, i18n\nfrom tzlocal import get_localzone\n\nKOLIBRI_MODULE_PATH = os.path.dirname(kolibri.__file__)\n\nBASE_DIR = os.path.abspath(os.path.dirname(__name__))\n\nKOLIBRI_HOME = os.environ['KOLIBRI_HOME']\n\nKOLIBRI_CORE_JS_NAME = 'kolibriGlobal'\n\nLOCALE_PATHS = [\n os.path.join(KOLIBRI_MODULE_PATH, \"locale\"),\n]\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = 'f@ey3)y^03r9^@mou97apom*+c1m#b1!cwbm50^s4yk72xce27'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = False\n\nALLOWED_HOSTS = ['*']\n\n# Application definition\n\nINSTALLED_APPS = [\n 'kolibri.core',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'kolibri.auth.apps.KolibriAuthConfig',\n 'kolibri.content',\n 'kolibri.logger',\n 'kolibri.tasks.apps.KolibriTasksConfig',\n 'kolibri.core.webpack',\n 'kolibri.core.exams',\n 'kolibri.core.device',\n 'kolibri.core.discovery',\n 'rest_framework',\n 'django_js_reverse',\n 'jsonfield',\n 'morango',\n] + conf.config['INSTALLED_APPS']\n\n# Add in the external plugins' locale paths. Our frontend messages depends\n# specifically on the value of LOCALE_PATHS to find its catalog files.\nLOCALE_PATHS += [\n i18n.get_installed_app_locale_path(app) for app in INSTALLED_APPS\n if i18n.is_external_plugin(app)\n]\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'kolibri.plugins.setup_wizard.middleware.SetupWizardMiddleware',\n 'kolibri.auth.middleware.CustomAuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n)\n\nQUEUE_JOB_STORAGE_PATH = os.path.join(KOLIBRI_HOME, \"job_storage.sqlite3\")\n\nROOT_URLCONF = 'kolibri.deployment.default.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'kolibri.core.context_processors.custom_context_processor.return_session',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'kolibri.deployment.default.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.9/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(KOLIBRI_HOME, 'db.sqlite3'),\n 'OPTIONS': {\n 'timeout': 100,\n }\n },\n}\n\n# Content directories and URLs for channel metadata and content files\n\n# Directory and URL for storing content databases for channel data\nCONTENT_DATABASE_DIR = os.path.join(KOLIBRI_HOME, 'content', 'databases')\nif not os.path.exists(CONTENT_DATABASE_DIR):\n os.makedirs(CONTENT_DATABASE_DIR)\n\n# Directory and URL for storing de-duped content files for all channels\nCONTENT_STORAGE_DIR = os.path.join(KOLIBRI_HOME, 'content', 'storage')\nif not os.path.exists(CONTENT_STORAGE_DIR):\n os.makedirs(CONTENT_STORAGE_DIR)\n\n# Base default URL for downloading content from an online server\nCENTRAL_CONTENT_DOWNLOAD_BASE_URL = \"https://contentworkshop.learningequality.org\"\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.9/topics/i18n/\n\nLANGUAGES = [\n ('en', 'English'),\n ('sw-tz', 'Kiswahili'),\n ('es-es', 'Espa\u00f1ol'),\n ('es-mx', 'Espa\u00f1ol (M\u00e9xico)'),\n ('fr-fr', 'Fran\u00e7ais, langue fran\u00e7aise'),\n ('pt-pt', 'Portugu\u00eas'),\n ('hi-in', '\u0939\u093f\u0902\u0926\u0940'),\n]\n\nLANGUAGE_CODE = conf.config.get(\"LANGUAGE_CODE\") or \"en\"\n\nTIME_ZONE = get_localzone().zone\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.9/howto/static-files/\n\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(KOLIBRI_HOME, \"static\")\n\n# https://docs.djangoproject.com/en/1.9/ref/settings/#std:setting-LOGGING\n# https://docs.djangoproject.com/en/1.9/topics/logging/\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'\n },\n 'simple': {\n 'format': '%(levelname)s %(message)s'\n },\n 'simple_date': {\n 'format': '%(levelname)s %(asctime)s %(module)s %(message)s'\n },\n 'color': {\n '()': 'colorlog.ColoredFormatter',\n 'format': '%(log_color)s%(levelname)-8s %(message)s',\n 'log_colors': {\n 'DEBUG': 'bold_black',\n 'INFO': 'white',\n 'WARNING': 'yellow',\n 'ERROR': 'red',\n 'CRITICAL': 'bold_red',\n },\n }\n },\n 'filters': {\n 'require_debug_true': {\n '()': 'django.utils.log.RequireDebugTrue',\n },\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse',\n },\n },\n 'handlers': {\n 'console': {\n 'level': 'INFO',\n 'class': 'logging.StreamHandler',\n 'formatter': 'color'\n },\n 'mail_admins': {\n 'level': 'ERROR',\n 'class': 'django.utils.log.AdminEmailHandler',\n 'filters': ['require_debug_false'],\n },\n 'file_debug': {\n 'level': 'DEBUG',\n 'filters': ['require_debug_true'],\n 'class': 'logging.FileHandler',\n 'filename': os.path.join(KOLIBRI_HOME, 'debug.log'),\n 'formatter': 'simple_date',\n },\n 'file': {\n 'level': 'INFO',\n 'filters': [],\n 'class': 'logging.FileHandler',\n 'filename': os.path.join(KOLIBRI_HOME, 'kolibri.log'),\n 'formatter': 'simple_date',\n },\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console', 'file'],\n 'propagate': True,\n },\n 'django.request': {\n 'handlers': ['mail_admins', 'file'],\n 'level': 'ERROR',\n 'propagate': False,\n },\n 'kolibri': {\n 'handlers': ['console', 'mail_admins', 'file', 'file_debug'],\n 'level': 'INFO',\n }\n }\n}\n\n\n# Customizing Django auth system\n# https://docs.djangoproject.com/en/1.9/topics/auth/customizing/\n\nAUTH_USER_MODEL = 'kolibriauth.FacilityUser'\n\nAUTHENTICATION_BACKENDS = ['kolibri.auth.backends.FacilityUserBackend']\n\n\n# Django REST Framework\n# http://www.django-rest-framework.org/api-guide/settings/\n\nREST_FRAMEWORK = {\n \"UNAUTHENTICATED_USER\": \"kolibri.auth.models.KolibriAnonymousUser\",\n 'DEFAULT_RENDERER_CLASSES': (\n 'rest_framework.renderers.JSONRenderer',\n 'rest_framework.renderers.BrowsableAPIRenderer',\n 'rest_framework_csv.renderers.CSVRenderer',\n ),\n}\n\n# System warnings to disable\n# see https://docs.djangoproject.com/en/1.9/ref/settings/#silenced-system-checks\nSILENCED_SYSTEM_CHECKS = [\"auth.W004\"]\n\n# Configuration for Django JS Reverse\n# https://github.com/ierror/django-js-reverse#options\n\nJS_REVERSE_JS_VAR_NAME = 'kolibriUrls'\n\nJS_REVERSE_EXCLUDE_NAMESPACES = ['admin', ]\n\nENABLE_DATA_BOOTSTRAPPING = True\n", "path": "kolibri/deployment/default/settings/base.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nDjango settings for kolibri project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.9/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.9/ref/settings/\n\"\"\"\nfrom __future__ import absolute_import, print_function, unicode_literals\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\n\n# import kolibri, so we can get the path to the module.\nimport kolibri\n# we load other utilities related to i18n\n# This is essential! We load the kolibri conf INSIDE the Django conf\nfrom kolibri.utils import conf, i18n\nfrom tzlocal import get_localzone\n\nKOLIBRI_MODULE_PATH = os.path.dirname(kolibri.__file__)\n\nBASE_DIR = os.path.abspath(os.path.dirname(__name__))\n\nKOLIBRI_HOME = os.environ['KOLIBRI_HOME']\n\nKOLIBRI_CORE_JS_NAME = 'kolibriGlobal'\n\nLOCALE_PATHS = [\n os.path.join(KOLIBRI_MODULE_PATH, \"locale\"),\n]\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = 'f@ey3)y^03r9^@mou97apom*+c1m#b1!cwbm50^s4yk72xce27'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = False\n\nALLOWED_HOSTS = ['*']\n\n# Application definition\n\nINSTALLED_APPS = [\n 'kolibri.core',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'kolibri.auth.apps.KolibriAuthConfig',\n 'kolibri.content',\n 'kolibri.logger',\n 'kolibri.tasks.apps.KolibriTasksConfig',\n 'kolibri.core.webpack',\n 'kolibri.core.exams',\n 'kolibri.core.device',\n 'kolibri.core.discovery',\n 'rest_framework',\n 'django_js_reverse',\n 'jsonfield',\n 'morango',\n] + conf.config['INSTALLED_APPS']\n\n# Add in the external plugins' locale paths. Our frontend messages depends\n# specifically on the value of LOCALE_PATHS to find its catalog files.\nLOCALE_PATHS += [\n i18n.get_installed_app_locale_path(app) for app in INSTALLED_APPS\n if i18n.is_external_plugin(app)\n]\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'kolibri.plugins.setup_wizard.middleware.SetupWizardMiddleware',\n 'kolibri.auth.middleware.CustomAuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n)\n\nQUEUE_JOB_STORAGE_PATH = os.path.join(KOLIBRI_HOME, \"job_storage.sqlite3\")\n\nROOT_URLCONF = 'kolibri.deployment.default.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'kolibri.core.context_processors.custom_context_processor.return_session',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'kolibri.deployment.default.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.9/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(KOLIBRI_HOME, 'db.sqlite3'),\n 'OPTIONS': {\n 'timeout': 100,\n }\n },\n}\n\n# Content directories and URLs for channel metadata and content files\n\n# Directory and URL for storing content databases for channel data\nCONTENT_DATABASE_DIR = os.path.join(KOLIBRI_HOME, 'content', 'databases')\nif not os.path.exists(CONTENT_DATABASE_DIR):\n os.makedirs(CONTENT_DATABASE_DIR)\n\n# Directory and URL for storing de-duped content files for all channels\nCONTENT_STORAGE_DIR = os.path.join(KOLIBRI_HOME, 'content', 'storage')\nif not os.path.exists(CONTENT_STORAGE_DIR):\n os.makedirs(CONTENT_STORAGE_DIR)\n\n# Base default URL for downloading content from an online server\nCENTRAL_CONTENT_DOWNLOAD_BASE_URL = \"https://contentworkshop.learningequality.org\"\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.9/topics/i18n/\n\nLANGUAGES = [\n ('en', 'English'),\n ('sw-tz', 'Kiswahili'),\n ('es-es', 'Espa\u00f1ol'),\n ('es-mx', 'Espa\u00f1ol (M\u00e9xico)'),\n ('fr-fr', 'Fran\u00e7ais, langue fran\u00e7aise'),\n ('pt-pt', 'Portugu\u00eas'),\n ('hi-in', '\u0939\u093f\u0902\u0926\u0940'),\n]\n\nLANGUAGE_CODE = conf.config.get(\"LANGUAGE_CODE\") or \"en\"\n\nTIME_ZONE = get_localzone().zone\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.9/howto/static-files/\n\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(KOLIBRI_HOME, \"static\")\n\n# https://docs.djangoproject.com/en/1.9/ref/settings/#std:setting-LOGGING\n# https://docs.djangoproject.com/en/1.9/topics/logging/\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'\n },\n 'simple': {\n 'format': '%(levelname)s %(message)s'\n },\n 'simple_date': {\n 'format': '%(levelname)s %(asctime)s %(module)s %(message)s'\n },\n 'color': {\n '()': 'colorlog.ColoredFormatter',\n 'format': '%(log_color)s%(levelname)-8s %(message)s',\n 'log_colors': {\n 'DEBUG': 'bold_black',\n 'INFO': 'white',\n 'WARNING': 'yellow',\n 'ERROR': 'red',\n 'CRITICAL': 'bold_red',\n },\n }\n },\n 'filters': {\n 'require_debug_true': {\n '()': 'django.utils.log.RequireDebugTrue',\n },\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse',\n },\n },\n 'handlers': {\n 'console': {\n 'level': 'INFO',\n 'class': 'logging.StreamHandler',\n 'formatter': 'color'\n },\n 'mail_admins': {\n 'level': 'ERROR',\n 'class': 'django.utils.log.AdminEmailHandler',\n 'filters': ['require_debug_false'],\n },\n 'request_debug': {\n 'level': 'ERROR',\n 'class': 'logging.StreamHandler',\n 'formatter': 'color',\n 'filters': ['require_debug_true'],\n },\n 'file_debug': {\n 'level': 'DEBUG',\n 'filters': ['require_debug_true'],\n 'class': 'logging.FileHandler',\n 'filename': os.path.join(KOLIBRI_HOME, 'debug.log'),\n 'formatter': 'simple_date',\n },\n 'file': {\n 'level': 'INFO',\n 'filters': [],\n 'class': 'logging.FileHandler',\n 'filename': os.path.join(KOLIBRI_HOME, 'kolibri.log'),\n 'formatter': 'simple_date',\n },\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console', 'file'],\n 'propagate': True,\n },\n 'django.request': {\n 'handlers': ['mail_admins', 'file', 'request_debug'],\n 'level': 'ERROR',\n 'propagate': False,\n },\n 'kolibri': {\n 'handlers': ['console', 'mail_admins', 'file', 'file_debug'],\n 'level': 'INFO',\n }\n }\n}\n\n\n# Customizing Django auth system\n# https://docs.djangoproject.com/en/1.9/topics/auth/customizing/\n\nAUTH_USER_MODEL = 'kolibriauth.FacilityUser'\n\nAUTHENTICATION_BACKENDS = ['kolibri.auth.backends.FacilityUserBackend']\n\n\n# Django REST Framework\n# http://www.django-rest-framework.org/api-guide/settings/\n\nREST_FRAMEWORK = {\n \"UNAUTHENTICATED_USER\": \"kolibri.auth.models.KolibriAnonymousUser\",\n 'DEFAULT_RENDERER_CLASSES': (\n 'rest_framework.renderers.JSONRenderer',\n 'rest_framework.renderers.BrowsableAPIRenderer',\n 'rest_framework_csv.renderers.CSVRenderer',\n ),\n}\n\n# System warnings to disable\n# see https://docs.djangoproject.com/en/1.9/ref/settings/#silenced-system-checks\nSILENCED_SYSTEM_CHECKS = [\"auth.W004\"]\n\n# Configuration for Django JS Reverse\n# https://github.com/ierror/django-js-reverse#options\n\nJS_REVERSE_JS_VAR_NAME = 'kolibriUrls'\n\nJS_REVERSE_EXCLUDE_NAMESPACES = ['admin', ]\n\nENABLE_DATA_BOOTSTRAPPING = True\n", "path": "kolibri/deployment/default/settings/base.py"}]}
3,152
229
gh_patches_debug_18689
rasdani/github-patches
git_diff
sanic-org__sanic-1553
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Unable to start server -- Running run_async.py failed **Describe the bug** [2019-04-14 19:22:02 +0800] [21512] [INFO] Goin' Fast @ http://0.0.0.0:8000 [2019-04-14 19:22:02 +0800] [21512] [ERROR] Unable to start server Traceback (most recent call last): File "C:\ProgramData\Anaconda3\envs\venom\lib\site-packages\sanic\server.py", line 745, in serve http_server = loop.run_until_complete(server_coroutine) File "C:\ProgramData\Anaconda3\envs\venom\lib\asyncio\base_events.py", line 571, in run_until_complete self.run_forever() File "C:\ProgramData\Anaconda3\envs\venom\lib\asyncio\base_events.py", line 529, in run_forever 'Cannot run the event loop while another loop is running') RuntimeError: Cannot run the event loop while another loop is running **Code snippet** Relevant source code, make sure to remove what is not necessary. https://github.com/huge-success/sanic/blob/master/examples/run_async.py **Expected behavior** A clear and concise description of what you expected to happen. **Environment (please complete the following information):** - OS: [e.g. iOS] - Version [e.g. 0.8.3] Window and Linux, Python 3.6 or 3.7 don't work **Additional context** Add any other context about the problem here. Is this example still work ? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `examples/log_request_id.py` Content: ``` 1 ''' 2 Based on example from https://github.com/Skyscanner/aiotask-context 3 and `examples/{override_logging,run_async}.py`. 4 5 Needs https://github.com/Skyscanner/aiotask-context/tree/52efbc21e2e1def2d52abb9a8e951f3ce5e6f690 or newer 6 7 $ pip install git+https://github.com/Skyscanner/aiotask-context.git 8 ''' 9 10 import asyncio 11 import uuid 12 import logging 13 from signal import signal, SIGINT 14 15 from sanic import Sanic 16 from sanic import response 17 18 import uvloop 19 import aiotask_context as context 20 21 log = logging.getLogger(__name__) 22 23 24 class RequestIdFilter(logging.Filter): 25 def filter(self, record): 26 record.request_id = context.get('X-Request-ID') 27 return True 28 29 30 LOG_SETTINGS = { 31 'version': 1, 32 'disable_existing_loggers': False, 33 'handlers': { 34 'console': { 35 'class': 'logging.StreamHandler', 36 'level': 'DEBUG', 37 'formatter': 'default', 38 'filters': ['requestid'], 39 }, 40 }, 41 'filters': { 42 'requestid': { 43 '()': RequestIdFilter, 44 }, 45 }, 46 'formatters': { 47 'default': { 48 'format': '%(asctime)s %(levelname)s %(name)s:%(lineno)d %(request_id)s | %(message)s', 49 }, 50 }, 51 'loggers': { 52 '': { 53 'level': 'DEBUG', 54 'handlers': ['console'], 55 'propagate': True 56 }, 57 } 58 } 59 60 61 app = Sanic(__name__, log_config=LOG_SETTINGS) 62 63 64 @app.middleware('request') 65 async def set_request_id(request): 66 request_id = request.headers.get('X-Request-ID') or str(uuid.uuid4()) 67 context.set("X-Request-ID", request_id) 68 69 70 @app.route("/") 71 async def test(request): 72 log.debug('X-Request-ID: %s', context.get('X-Request-ID')) 73 log.info('Hello from test!') 74 return response.json({"test": True}) 75 76 77 if __name__ == '__main__': 78 asyncio.set_event_loop(uvloop.new_event_loop()) 79 server = app.create_server(host="0.0.0.0", port=8000) 80 loop = asyncio.get_event_loop() 81 loop.set_task_factory(context.task_factory) 82 task = asyncio.ensure_future(server) 83 try: 84 loop.run_forever() 85 except: 86 loop.stop() 87 ``` Path: `examples/run_async.py` Content: ``` 1 from sanic import Sanic 2 from sanic import response 3 from signal import signal, SIGINT 4 import asyncio 5 import uvloop 6 7 app = Sanic(__name__) 8 9 10 @app.route("/") 11 async def test(request): 12 return response.json({"answer": "42"}) 13 14 asyncio.set_event_loop(uvloop.new_event_loop()) 15 server = app.create_server(host="0.0.0.0", port=8000) 16 loop = asyncio.get_event_loop() 17 task = asyncio.ensure_future(server) 18 signal(SIGINT, lambda s, f: loop.stop()) 19 try: 20 loop.run_forever() 21 except: 22 loop.stop() 23 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/examples/log_request_id.py b/examples/log_request_id.py --- a/examples/log_request_id.py +++ b/examples/log_request_id.py @@ -76,7 +76,7 @@ if __name__ == '__main__': asyncio.set_event_loop(uvloop.new_event_loop()) - server = app.create_server(host="0.0.0.0", port=8000) + server = app.create_server(host="0.0.0.0", port=8000, return_asyncio_server=True) loop = asyncio.get_event_loop() loop.set_task_factory(context.task_factory) task = asyncio.ensure_future(server) diff --git a/examples/run_async.py b/examples/run_async.py --- a/examples/run_async.py +++ b/examples/run_async.py @@ -12,7 +12,7 @@ return response.json({"answer": "42"}) asyncio.set_event_loop(uvloop.new_event_loop()) -server = app.create_server(host="0.0.0.0", port=8000) +server = app.create_server(host="0.0.0.0", port=8000, return_asyncio_server=True) loop = asyncio.get_event_loop() task = asyncio.ensure_future(server) signal(SIGINT, lambda s, f: loop.stop())
{"golden_diff": "diff --git a/examples/log_request_id.py b/examples/log_request_id.py\n--- a/examples/log_request_id.py\n+++ b/examples/log_request_id.py\n@@ -76,7 +76,7 @@\n \n if __name__ == '__main__':\n asyncio.set_event_loop(uvloop.new_event_loop())\n- server = app.create_server(host=\"0.0.0.0\", port=8000)\n+ server = app.create_server(host=\"0.0.0.0\", port=8000, return_asyncio_server=True)\n loop = asyncio.get_event_loop()\n loop.set_task_factory(context.task_factory)\n task = asyncio.ensure_future(server)\ndiff --git a/examples/run_async.py b/examples/run_async.py\n--- a/examples/run_async.py\n+++ b/examples/run_async.py\n@@ -12,7 +12,7 @@\n return response.json({\"answer\": \"42\"})\n \n asyncio.set_event_loop(uvloop.new_event_loop())\n-server = app.create_server(host=\"0.0.0.0\", port=8000)\n+server = app.create_server(host=\"0.0.0.0\", port=8000, return_asyncio_server=True)\n loop = asyncio.get_event_loop()\n task = asyncio.ensure_future(server)\n signal(SIGINT, lambda s, f: loop.stop())\n", "issue": "Unable to start server -- Running run_async.py failed\n**Describe the bug**\r\n[2019-04-14 19:22:02 +0800] [21512] [INFO] Goin' Fast @ http://0.0.0.0:8000\r\n[2019-04-14 19:22:02 +0800] [21512] [ERROR] Unable to start server\r\nTraceback (most recent call last):\r\n File \"C:\\ProgramData\\Anaconda3\\envs\\venom\\lib\\site-packages\\sanic\\server.py\", line 745, in serve\r\n http_server = loop.run_until_complete(server_coroutine)\r\n File \"C:\\ProgramData\\Anaconda3\\envs\\venom\\lib\\asyncio\\base_events.py\", line 571, in run_until_complete\r\n self.run_forever()\r\n File \"C:\\ProgramData\\Anaconda3\\envs\\venom\\lib\\asyncio\\base_events.py\", line 529, in run_forever\r\n 'Cannot run the event loop while another loop is running')\r\nRuntimeError: Cannot run the event loop while another loop is running\r\n\r\n**Code snippet**\r\nRelevant source code, make sure to remove what is not necessary.\r\n\r\nhttps://github.com/huge-success/sanic/blob/master/examples/run_async.py\r\n\r\n**Expected behavior**\r\nA clear and concise description of what you expected to happen.\r\n\r\n\r\n**Environment (please complete the following information):**\r\n - OS: [e.g. iOS]\r\n - Version [e.g. 0.8.3]\r\nWindow and Linux, Python 3.6 or 3.7 don't work\r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n\r\n\r\nIs this example still work ?\n", "before_files": [{"content": "'''\nBased on example from https://github.com/Skyscanner/aiotask-context\nand `examples/{override_logging,run_async}.py`.\n\nNeeds https://github.com/Skyscanner/aiotask-context/tree/52efbc21e2e1def2d52abb9a8e951f3ce5e6f690 or newer\n\n$ pip install git+https://github.com/Skyscanner/aiotask-context.git\n'''\n\nimport asyncio\nimport uuid\nimport logging\nfrom signal import signal, SIGINT\n\nfrom sanic import Sanic\nfrom sanic import response\n\nimport uvloop\nimport aiotask_context as context\n\nlog = logging.getLogger(__name__)\n\n\nclass RequestIdFilter(logging.Filter):\n def filter(self, record):\n record.request_id = context.get('X-Request-ID')\n return True\n\n\nLOG_SETTINGS = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n 'level': 'DEBUG',\n 'formatter': 'default',\n 'filters': ['requestid'],\n },\n },\n 'filters': {\n 'requestid': {\n '()': RequestIdFilter,\n },\n },\n 'formatters': {\n 'default': {\n 'format': '%(asctime)s %(levelname)s %(name)s:%(lineno)d %(request_id)s | %(message)s',\n },\n },\n 'loggers': {\n '': {\n 'level': 'DEBUG',\n 'handlers': ['console'],\n 'propagate': True\n },\n }\n}\n\n\napp = Sanic(__name__, log_config=LOG_SETTINGS)\n\n\[email protected]('request')\nasync def set_request_id(request):\n request_id = request.headers.get('X-Request-ID') or str(uuid.uuid4())\n context.set(\"X-Request-ID\", request_id)\n\n\[email protected](\"/\")\nasync def test(request):\n log.debug('X-Request-ID: %s', context.get('X-Request-ID'))\n log.info('Hello from test!')\n return response.json({\"test\": True})\n\n\nif __name__ == '__main__':\n asyncio.set_event_loop(uvloop.new_event_loop())\n server = app.create_server(host=\"0.0.0.0\", port=8000)\n loop = asyncio.get_event_loop()\n loop.set_task_factory(context.task_factory)\n task = asyncio.ensure_future(server)\n try:\n loop.run_forever()\n except:\n loop.stop()\n", "path": "examples/log_request_id.py"}, {"content": "from sanic import Sanic\nfrom sanic import response\nfrom signal import signal, SIGINT\nimport asyncio\nimport uvloop\n\napp = Sanic(__name__)\n\n\[email protected](\"/\")\nasync def test(request):\n return response.json({\"answer\": \"42\"})\n\nasyncio.set_event_loop(uvloop.new_event_loop())\nserver = app.create_server(host=\"0.0.0.0\", port=8000)\nloop = asyncio.get_event_loop()\ntask = asyncio.ensure_future(server)\nsignal(SIGINT, lambda s, f: loop.stop())\ntry:\n loop.run_forever()\nexcept:\n loop.stop()\n", "path": "examples/run_async.py"}], "after_files": [{"content": "'''\nBased on example from https://github.com/Skyscanner/aiotask-context\nand `examples/{override_logging,run_async}.py`.\n\nNeeds https://github.com/Skyscanner/aiotask-context/tree/52efbc21e2e1def2d52abb9a8e951f3ce5e6f690 or newer\n\n$ pip install git+https://github.com/Skyscanner/aiotask-context.git\n'''\n\nimport asyncio\nimport uuid\nimport logging\nfrom signal import signal, SIGINT\n\nfrom sanic import Sanic\nfrom sanic import response\n\nimport uvloop\nimport aiotask_context as context\n\nlog = logging.getLogger(__name__)\n\n\nclass RequestIdFilter(logging.Filter):\n def filter(self, record):\n record.request_id = context.get('X-Request-ID')\n return True\n\n\nLOG_SETTINGS = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n 'level': 'DEBUG',\n 'formatter': 'default',\n 'filters': ['requestid'],\n },\n },\n 'filters': {\n 'requestid': {\n '()': RequestIdFilter,\n },\n },\n 'formatters': {\n 'default': {\n 'format': '%(asctime)s %(levelname)s %(name)s:%(lineno)d %(request_id)s | %(message)s',\n },\n },\n 'loggers': {\n '': {\n 'level': 'DEBUG',\n 'handlers': ['console'],\n 'propagate': True\n },\n }\n}\n\n\napp = Sanic(__name__, log_config=LOG_SETTINGS)\n\n\[email protected]('request')\nasync def set_request_id(request):\n request_id = request.headers.get('X-Request-ID') or str(uuid.uuid4())\n context.set(\"X-Request-ID\", request_id)\n\n\[email protected](\"/\")\nasync def test(request):\n log.debug('X-Request-ID: %s', context.get('X-Request-ID'))\n log.info('Hello from test!')\n return response.json({\"test\": True})\n\n\nif __name__ == '__main__':\n asyncio.set_event_loop(uvloop.new_event_loop())\n server = app.create_server(host=\"0.0.0.0\", port=8000, return_asyncio_server=True)\n loop = asyncio.get_event_loop()\n loop.set_task_factory(context.task_factory)\n task = asyncio.ensure_future(server)\n try:\n loop.run_forever()\n except:\n loop.stop()\n", "path": "examples/log_request_id.py"}, {"content": "from sanic import Sanic\nfrom sanic import response\nfrom signal import signal, SIGINT\nimport asyncio\nimport uvloop\n\napp = Sanic(__name__)\n\n\[email protected](\"/\")\nasync def test(request):\n return response.json({\"answer\": \"42\"})\n\nasyncio.set_event_loop(uvloop.new_event_loop())\nserver = app.create_server(host=\"0.0.0.0\", port=8000, return_asyncio_server=True)\nloop = asyncio.get_event_loop()\ntask = asyncio.ensure_future(server)\nsignal(SIGINT, lambda s, f: loop.stop())\ntry:\n loop.run_forever()\nexcept:\n loop.stop()\n", "path": "examples/run_async.py"}]}
1,564
284
gh_patches_debug_4598
rasdani/github-patches
git_diff
vispy__vispy-2223
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- scene.visuals.Graph is not working with directed = True I am trying to render an directed graph but I am getting the error. Code (based on [example from gallery](https://vispy.org/gallery/scene/graph.html#sphx-glr-gallery-scene-graph-py), I just set directed=True): ```py import sys import networkx as nx from vispy import app, scene from vispy.visuals.graphs import layouts canvas = scene.SceneCanvas(title='Simple NetworkX Graph', size=(600, 600), bgcolor='white', show=True) view = canvas.central_widget.add_view('panzoom') graph = nx.adjacency_matrix( nx.fast_gnp_random_graph(500, 0.005, directed=True) ) layout = layouts.get_layout('force_directed', iterations=100) visual = scene.visuals.Graph( graph, layout=layout, line_color='black', arrow_type="stealth", arrow_size=30, node_symbol="disc", node_size=20, face_color=(1, 0, 0, 0.2), border_width=0.0, animate=True, directed=True, parent=view.scene) @canvas.events.draw.connect def on_draw(event): if not visual.animate_layout(): canvas.update() if __name__ == '__main__': if sys.flags.interactive != 1: app.run() ``` Error: ``` << caught exception here: >> File "C:\Users\maxim\AppData\Local\Programs\Python\Python39\lib\site-packages\vispy\util\event.py", line 469, in _invoke_callback cb(event) File "D:\dev\university\UniversityProjects\3\alg_and_struct\2\demo.py", line 27, in on_draw if not visual.animate_layout(): File "C:\Users\maxim\AppData\Local\Programs\Python\Python39\lib\site-packages\vispy\visuals\graphs\graph.py", line 143, in animate_layout node_vertices, line_vertices, arrows = next(self._layout_iter) File "C:\Users\maxim\AppData\Local\Programs\Python\Python39\lib\site-packages\vispy\visuals\graphs\layouts\force_directed.py", line 95, in __call__ for result in solver(adjacency_mat, directed): File "C:\Users\maxim\AppData\Local\Programs\Python\Python39\lib\site-packages\vispy\visuals\graphs\layouts\force_directed.py", line 162, in _sparse_fruchterman_reingold line_vertices, arrows = _straight_line_vertices(adjacency_coo, pos, File "C:\Users\maxim\AppData\Local\Programs\Python\Python39\lib\site-packages\vispy\visuals\graphs\util.py", line 92, in _straight_line_vertices arrow_vertices = arrow_vertices.reshape((len(arrow_vertices)/2, 4)) TypeError: 'float' object cannot be interpreted as an integer ERROR: Invoking <function on_draw at 0x000001EB3573EDC0> for DrawEvent ``` May be typecasting or `//` at [this line](https://github.com/vispy/vispy/blob/feeaf8afa99ddbbac86a03e3e611a52c1c89584d/vispy/visuals/graphs/util.py#L92) is needed. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `vispy/visuals/graphs/util.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # Copyright (c) Vispy Development Team. All Rights Reserved. 3 # Distributed under the (new) BSD License. See LICENSE.txt for more info. 4 """ 5 Graph utilities 6 =============== 7 8 A module containing several graph utility functions. 9 """ 10 11 import numpy as np 12 13 try: 14 from scipy.sparse import issparse 15 from scipy import sparse 16 except ImportError: 17 def issparse(*args, **kwargs): 18 return False 19 20 21 def _get_edges(adjacency_mat): 22 func = _sparse_get_edges if issparse(adjacency_mat) else _ndarray_get_edges 23 return func(adjacency_mat) 24 25 26 def _sparse_get_edges(adjacency_mat): 27 return np.concatenate((adjacency_mat.row[:, np.newaxis], 28 adjacency_mat.col[:, np.newaxis]), axis=-1) 29 30 31 def _ndarray_get_edges(adjacency_mat): 32 # Get indices of all non zero values 33 i, j = np.where(adjacency_mat) 34 35 return np.concatenate((i[:, np.newaxis], j[:, np.newaxis]), axis=-1) 36 37 38 def _get_directed_edges(adjacency_mat): 39 func = _sparse_get_edges if issparse(adjacency_mat) else _ndarray_get_edges 40 41 if issparse(adjacency_mat): 42 triu = sparse.triu 43 tril = sparse.tril 44 else: 45 triu = np.triu 46 tril = np.tril 47 48 upper = triu(adjacency_mat) 49 lower = tril(adjacency_mat) 50 51 return np.concatenate((func(upper), func(lower))) 52 53 54 def _straight_line_vertices(adjacency_mat, node_coords, directed=False): 55 """ 56 Generate the vertices for straight lines between nodes. 57 58 If it is a directed graph, it also generates the vertices which can be 59 passed to an :class:`ArrowVisual`. 60 61 Parameters 62 ---------- 63 adjacency_mat : array 64 The adjacency matrix of the graph 65 node_coords : array 66 The current coordinates of all nodes in the graph 67 directed : bool 68 Wether the graph is directed. If this is true it will also generate 69 the vertices for arrows which can be passed to :class:`ArrowVisual`. 70 71 Returns 72 ------- 73 vertices : tuple 74 Returns a tuple containing containing (`line_vertices`, 75 `arrow_vertices`) 76 """ 77 if not issparse(adjacency_mat): 78 adjacency_mat = np.asarray(adjacency_mat, float) 79 80 if (adjacency_mat.ndim != 2 or adjacency_mat.shape[0] != 81 adjacency_mat.shape[1]): 82 raise ValueError("Adjacency matrix should be square.") 83 84 arrow_vertices = np.array([]) 85 86 edges = _get_edges(adjacency_mat) 87 line_vertices = node_coords[edges.ravel()] 88 89 if directed: 90 arrows = np.array(list(_get_directed_edges(adjacency_mat))) 91 arrow_vertices = node_coords[arrows.ravel()] 92 arrow_vertices = arrow_vertices.reshape((len(arrow_vertices)/2, 4)) 93 94 return line_vertices, arrow_vertices 95 96 97 def _rescale_layout(pos, scale=1): 98 """ 99 Normalize the given coordinate list to the range [0, `scale`]. 100 101 Parameters 102 ---------- 103 pos : array 104 Coordinate list 105 scale : number 106 The upperbound value for the coordinates range 107 108 Returns 109 ------- 110 pos : array 111 The rescaled (normalized) coordinates in the range [0, `scale`]. 112 113 Notes 114 ----- 115 Changes `pos` in place. 116 """ 117 pos -= pos.min(axis=0) 118 pos *= scale / pos.max() 119 120 return pos 121 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/vispy/visuals/graphs/util.py b/vispy/visuals/graphs/util.py --- a/vispy/visuals/graphs/util.py +++ b/vispy/visuals/graphs/util.py @@ -89,7 +89,7 @@ if directed: arrows = np.array(list(_get_directed_edges(adjacency_mat))) arrow_vertices = node_coords[arrows.ravel()] - arrow_vertices = arrow_vertices.reshape((len(arrow_vertices)/2, 4)) + arrow_vertices = arrow_vertices.reshape((len(arrow_vertices)//2, 4)) return line_vertices, arrow_vertices
{"golden_diff": "diff --git a/vispy/visuals/graphs/util.py b/vispy/visuals/graphs/util.py\n--- a/vispy/visuals/graphs/util.py\n+++ b/vispy/visuals/graphs/util.py\n@@ -89,7 +89,7 @@\n if directed:\n arrows = np.array(list(_get_directed_edges(adjacency_mat)))\n arrow_vertices = node_coords[arrows.ravel()]\n- arrow_vertices = arrow_vertices.reshape((len(arrow_vertices)/2, 4))\n+ arrow_vertices = arrow_vertices.reshape((len(arrow_vertices)//2, 4))\n \n return line_vertices, arrow_vertices\n", "issue": "scene.visuals.Graph is not working with directed = True\nI am trying to render an directed graph but I am getting the error.\r\n\r\nCode (based on [example from gallery](https://vispy.org/gallery/scene/graph.html#sphx-glr-gallery-scene-graph-py), I just set directed=True):\r\n```py\r\nimport sys\r\n\r\nimport networkx as nx\r\n\r\nfrom vispy import app, scene\r\nfrom vispy.visuals.graphs import layouts\r\n\r\n\r\ncanvas = scene.SceneCanvas(title='Simple NetworkX Graph', size=(600, 600),\r\n bgcolor='white', show=True)\r\nview = canvas.central_widget.add_view('panzoom')\r\n\r\ngraph = nx.adjacency_matrix(\r\n nx.fast_gnp_random_graph(500, 0.005, directed=True)\r\n)\r\nlayout = layouts.get_layout('force_directed', iterations=100)\r\n\r\nvisual = scene.visuals.Graph(\r\n graph, layout=layout, line_color='black', arrow_type=\"stealth\",\r\n arrow_size=30, node_symbol=\"disc\", node_size=20,\r\n face_color=(1, 0, 0, 0.2), border_width=0.0, animate=True, directed=True,\r\n parent=view.scene)\r\n\r\n\r\[email protected]\r\ndef on_draw(event):\r\n if not visual.animate_layout():\r\n canvas.update()\r\n\r\nif __name__ == '__main__':\r\n if sys.flags.interactive != 1:\r\n app.run()\r\n```\r\n\r\nError:\r\n```\r\n<< caught exception here: >>\r\n File \"C:\\Users\\maxim\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\vispy\\util\\event.py\", line 469, in _invoke_callback\r\n cb(event)\r\n File \"D:\\dev\\university\\UniversityProjects\\3\\alg_and_struct\\2\\demo.py\", line 27, in on_draw\r\n if not visual.animate_layout():\r\n File \"C:\\Users\\maxim\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\vispy\\visuals\\graphs\\graph.py\", line 143, in animate_layout\r\n node_vertices, line_vertices, arrows = next(self._layout_iter)\r\n File \"C:\\Users\\maxim\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\vispy\\visuals\\graphs\\layouts\\force_directed.py\", line 95, in __call__\r\n for result in solver(adjacency_mat, directed):\r\n File \"C:\\Users\\maxim\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\vispy\\visuals\\graphs\\layouts\\force_directed.py\", line 162, in _sparse_fruchterman_reingold\r\n line_vertices, arrows = _straight_line_vertices(adjacency_coo, pos,\r\n File \"C:\\Users\\maxim\\AppData\\Local\\Programs\\Python\\Python39\\lib\\site-packages\\vispy\\visuals\\graphs\\util.py\", line 92, in _straight_line_vertices\r\n arrow_vertices = arrow_vertices.reshape((len(arrow_vertices)/2, 4))\r\nTypeError: 'float' object cannot be interpreted as an integer\r\nERROR: Invoking <function on_draw at 0x000001EB3573EDC0> for DrawEvent\r\n```\r\n\r\nMay be typecasting or `//` at [this line](https://github.com/vispy/vispy/blob/feeaf8afa99ddbbac86a03e3e611a52c1c89584d/vispy/visuals/graphs/util.py#L92) is needed.\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) Vispy Development Team. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\"\"\"\nGraph utilities\n===============\n\nA module containing several graph utility functions.\n\"\"\"\n\nimport numpy as np\n\ntry:\n from scipy.sparse import issparse\n from scipy import sparse\nexcept ImportError:\n def issparse(*args, **kwargs):\n return False\n\n\ndef _get_edges(adjacency_mat):\n func = _sparse_get_edges if issparse(adjacency_mat) else _ndarray_get_edges\n return func(adjacency_mat)\n\n\ndef _sparse_get_edges(adjacency_mat):\n return np.concatenate((adjacency_mat.row[:, np.newaxis],\n adjacency_mat.col[:, np.newaxis]), axis=-1)\n\n\ndef _ndarray_get_edges(adjacency_mat):\n # Get indices of all non zero values\n i, j = np.where(adjacency_mat)\n\n return np.concatenate((i[:, np.newaxis], j[:, np.newaxis]), axis=-1)\n\n\ndef _get_directed_edges(adjacency_mat):\n func = _sparse_get_edges if issparse(adjacency_mat) else _ndarray_get_edges\n\n if issparse(adjacency_mat):\n triu = sparse.triu\n tril = sparse.tril\n else:\n triu = np.triu\n tril = np.tril\n\n upper = triu(adjacency_mat)\n lower = tril(adjacency_mat)\n\n return np.concatenate((func(upper), func(lower)))\n\n\ndef _straight_line_vertices(adjacency_mat, node_coords, directed=False):\n \"\"\"\n Generate the vertices for straight lines between nodes.\n\n If it is a directed graph, it also generates the vertices which can be\n passed to an :class:`ArrowVisual`.\n\n Parameters\n ----------\n adjacency_mat : array\n The adjacency matrix of the graph\n node_coords : array\n The current coordinates of all nodes in the graph\n directed : bool\n Wether the graph is directed. If this is true it will also generate\n the vertices for arrows which can be passed to :class:`ArrowVisual`.\n\n Returns\n -------\n vertices : tuple\n Returns a tuple containing containing (`line_vertices`,\n `arrow_vertices`)\n \"\"\"\n if not issparse(adjacency_mat):\n adjacency_mat = np.asarray(adjacency_mat, float)\n\n if (adjacency_mat.ndim != 2 or adjacency_mat.shape[0] !=\n adjacency_mat.shape[1]):\n raise ValueError(\"Adjacency matrix should be square.\")\n\n arrow_vertices = np.array([])\n\n edges = _get_edges(adjacency_mat)\n line_vertices = node_coords[edges.ravel()]\n\n if directed:\n arrows = np.array(list(_get_directed_edges(adjacency_mat)))\n arrow_vertices = node_coords[arrows.ravel()]\n arrow_vertices = arrow_vertices.reshape((len(arrow_vertices)/2, 4))\n\n return line_vertices, arrow_vertices\n\n\ndef _rescale_layout(pos, scale=1):\n \"\"\"\n Normalize the given coordinate list to the range [0, `scale`].\n\n Parameters\n ----------\n pos : array\n Coordinate list\n scale : number\n The upperbound value for the coordinates range\n\n Returns\n -------\n pos : array\n The rescaled (normalized) coordinates in the range [0, `scale`].\n\n Notes\n -----\n Changes `pos` in place.\n \"\"\"\n pos -= pos.min(axis=0)\n pos *= scale / pos.max()\n\n return pos\n", "path": "vispy/visuals/graphs/util.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) Vispy Development Team. All Rights Reserved.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\"\"\"\nGraph utilities\n===============\n\nA module containing several graph utility functions.\n\"\"\"\n\nimport numpy as np\n\ntry:\n from scipy.sparse import issparse\n from scipy import sparse\nexcept ImportError:\n def issparse(*args, **kwargs):\n return False\n\n\ndef _get_edges(adjacency_mat):\n func = _sparse_get_edges if issparse(adjacency_mat) else _ndarray_get_edges\n return func(adjacency_mat)\n\n\ndef _sparse_get_edges(adjacency_mat):\n return np.concatenate((adjacency_mat.row[:, np.newaxis],\n adjacency_mat.col[:, np.newaxis]), axis=-1)\n\n\ndef _ndarray_get_edges(adjacency_mat):\n # Get indices of all non zero values\n i, j = np.where(adjacency_mat)\n\n return np.concatenate((i[:, np.newaxis], j[:, np.newaxis]), axis=-1)\n\n\ndef _get_directed_edges(adjacency_mat):\n func = _sparse_get_edges if issparse(adjacency_mat) else _ndarray_get_edges\n\n if issparse(adjacency_mat):\n triu = sparse.triu\n tril = sparse.tril\n else:\n triu = np.triu\n tril = np.tril\n\n upper = triu(adjacency_mat)\n lower = tril(adjacency_mat)\n\n return np.concatenate((func(upper), func(lower)))\n\n\ndef _straight_line_vertices(adjacency_mat, node_coords, directed=False):\n \"\"\"\n Generate the vertices for straight lines between nodes.\n\n If it is a directed graph, it also generates the vertices which can be\n passed to an :class:`ArrowVisual`.\n\n Parameters\n ----------\n adjacency_mat : array\n The adjacency matrix of the graph\n node_coords : array\n The current coordinates of all nodes in the graph\n directed : bool\n Wether the graph is directed. If this is true it will also generate\n the vertices for arrows which can be passed to :class:`ArrowVisual`.\n\n Returns\n -------\n vertices : tuple\n Returns a tuple containing containing (`line_vertices`,\n `arrow_vertices`)\n \"\"\"\n if not issparse(adjacency_mat):\n adjacency_mat = np.asarray(adjacency_mat, float)\n\n if (adjacency_mat.ndim != 2 or adjacency_mat.shape[0] !=\n adjacency_mat.shape[1]):\n raise ValueError(\"Adjacency matrix should be square.\")\n\n arrow_vertices = np.array([])\n\n edges = _get_edges(adjacency_mat)\n line_vertices = node_coords[edges.ravel()]\n\n if directed:\n arrows = np.array(list(_get_directed_edges(adjacency_mat)))\n arrow_vertices = node_coords[arrows.ravel()]\n arrow_vertices = arrow_vertices.reshape((len(arrow_vertices)//2, 4))\n\n return line_vertices, arrow_vertices\n\n\ndef _rescale_layout(pos, scale=1):\n \"\"\"\n Normalize the given coordinate list to the range [0, `scale`].\n\n Parameters\n ----------\n pos : array\n Coordinate list\n scale : number\n The upperbound value for the coordinates range\n\n Returns\n -------\n pos : array\n The rescaled (normalized) coordinates in the range [0, `scale`].\n\n Notes\n -----\n Changes `pos` in place.\n \"\"\"\n pos -= pos.min(axis=0)\n pos *= scale / pos.max()\n\n return pos\n", "path": "vispy/visuals/graphs/util.py"}]}
2,081
141
gh_patches_debug_27242
rasdani/github-patches
git_diff
google__openhtf-473
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Update documentation and examples/measurement.py Ran into some issues on a fresh install from the documentation. I needed to add the package libprotobuf-dev to the apt-get install line in CONTRIBUTING.md to get protobufs to build and got an error when trying to run the example measurements.py that units could not be found, resolved by importing openhtf.utils.units --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `examples/measurements.py` Content: ``` 1 # Copyright 2016 Google Inc. All Rights Reserved. 2 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 7 # http://www.apache.org/licenses/LICENSE-2.0 8 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """Example OpenHTF test demonstrating use of measurements. 16 17 Run with (your virtualenv must be activated first): 18 19 python measurements.py 20 21 Afterwards, check out the output in measurements.json. If you open both this 22 example test and that output file and compare them, you should be able to see 23 where measurement values end up in the output and what the corresponding code 24 looks like that sets them. 25 26 TODO(someone): Write these examples. 27 For more complex topics, see the validators.py and dimensions.py examples. 28 29 For a simpler example, see the hello_world.py example. If the output of this 30 test is confusing, start with the hello_world.py output and compare it to this 31 test's output. 32 33 Some constraints on measurements: 34 35 - Measurement names must be valid python variable names. This is mostly for 36 sanity, but also ensures you can access them via attribute access in phases. 37 This applies *after* any with_args() substitution (not covered in this 38 tutorial, see the phases.py example for more details). 39 40 - You cannot declare the same measurement name multiple times on the same 41 phase. Technically, you *can* declare the same measurement on multiple 42 phases; measurements are attached to a specific phase in the output. This 43 isn't recommended, though, because it makes it difficult to flatten a test's 44 measurements, which some output formats require. 45 """ 46 47 # Import openhtf with an abbreviated name, as we'll be using a bunch of stuff 48 # from it throughout our test scripts. See __all__ at the top of 49 # openhtf/__init__.py for details on what's in top-of-module namespace. 50 import openhtf as htf 51 52 # Import this output mechanism as it's the specific one we want to use. 53 from openhtf.output.callbacks import json_factory 54 55 # You won't normally need to import this, see validators.py example for 56 # more details. It's used for the inline measurement declaration example 57 # below, but normally you'll only import it when you want to define custom 58 # measurement validators. 59 from openhtf.util import validators 60 61 62 # Simple example of measurement use, similar to hello_world.py usage. 63 @htf.measures(htf.Measurement('hello_world_measurement')) 64 def hello_phase(test): 65 test.measurements.hello_world_measurement = 'Hello!' 66 67 68 # An alternative simpler syntax that creates the Measurement for you. 69 @htf.measures('hello_again_measurement') 70 def again_phase(test): 71 test.measurements.hello_again_measurement = 'Again!' 72 73 74 # Multiple measurements can be specified in a single decorator, using either of 75 # the above syntaxes. Technically, these syntaxes can be mixed and matched, but 76 # as a matter of convention you should always use one or the other within a 77 # single decorator call. You'll also note that you can stack multiple 78 # decorations on a single phase. This is useful if you have a handful of simple 79 # measurements, and then one or two with more complex declarations (see below). 80 @htf.measures('first_measurement', 'second_measurement') 81 @htf.measures(htf.Measurement('third'), htf.Measurement('fourth')) 82 def lots_of_measurements(test): 83 test.measurements.first_measurement = 'First!' 84 # Measurements can also be access via indexing rather than attributes. 85 test.measurements['second_measurement'] = 'Second :(' 86 # This can be handy for iterating over measurements. 87 for measurement in ('third', 'fourth'): 88 test.measurements[measurement] = measurement + ' is the best!' 89 90 91 # Basic key/value measurements are handy, but we may also want to validate a 92 # measurement against some criteria, or specify additional information 93 # describing the measurement. Validators can get quite complex, for more 94 # details, see the validators.py example. 95 @htf.measures(htf.Measurement('validated_measurement').in_range(0, 10).doc( 96 'This measurement is validated.').with_units(units.SECOND)) 97 def measure_seconds(test): 98 # The 'outcome' of this measurement in the test_record result will be a PASS 99 # because its value passes the validator specified (0 <= 5 <= 10). 100 test.measurements.validated_measurement = 5 101 102 103 # These additional attributes can also be specified inline as kwargs passed 104 # directly to the @measures decorator. If you do so, however, you must 105 # specify exactly one measurement with that decorator (ie. the first argument 106 # must be a string containing the measurement name). If you want to specify 107 # multiple measurements this way, you can stack multiple decorators. 108 @htf.measures('inline_kwargs', docstring='This measurement is declared inline!', 109 units=units.HERTZ, validators=[validators.in_range(0, 10)]) 110 @htf.measures('another_inline', docstring='Because why not?') 111 def inline_phase(test): 112 # This measurement will have an outcome of FAIL, because the set value of 15 113 # will not pass the 0 <= x <= 10 validator. 114 test.measurements.inline_kwargs = 15 115 test.measurements.another_inline = 'This one is unvalidated.' 116 117 # Let's log a message so the operator knows the test should fail. 118 test.logger.info('Set inline_kwargs to a failing value, test should FAIL!') 119 120 121 if __name__ == '__main__': 122 # We instantiate our OpenHTF test with the phases we want to run as args. 123 test = htf.Test(hello_phase, again_phase, lots_of_measurements, 124 measure_seconds, inline_phase) 125 126 # In order to view the result of the test, we have to output it somewhere, 127 # and a local JSON file is a convenient way to do this. Custom output 128 # mechanisms can be implemented, but for now we'll just keep it simple. 129 # This will always output to the same ./measurements.json file, formatted 130 # slightly for human readability. 131 test.add_output_callbacks( 132 json_factory.OutputToJSON('./measurements.json', indent=2)) 133 134 # Unlike hello_world.py, where we prompt for a DUT ID, here we'll just 135 # use an arbitrary one. 136 test.execute(test_start=lambda: 'MyDutId') 137 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/examples/measurements.py b/examples/measurements.py --- a/examples/measurements.py +++ b/examples/measurements.py @@ -93,7 +93,7 @@ # describing the measurement. Validators can get quite complex, for more # details, see the validators.py example. @htf.measures(htf.Measurement('validated_measurement').in_range(0, 10).doc( - 'This measurement is validated.').with_units(units.SECOND)) + 'This measurement is validated.').with_units(htf.units.SECOND)) def measure_seconds(test): # The 'outcome' of this measurement in the test_record result will be a PASS # because its value passes the validator specified (0 <= 5 <= 10). @@ -106,7 +106,7 @@ # must be a string containing the measurement name). If you want to specify # multiple measurements this way, you can stack multiple decorators. @htf.measures('inline_kwargs', docstring='This measurement is declared inline!', - units=units.HERTZ, validators=[validators.in_range(0, 10)]) + units=htf.units.HERTZ, validators=[validators.in_range(0, 10)]) @htf.measures('another_inline', docstring='Because why not?') def inline_phase(test): # This measurement will have an outcome of FAIL, because the set value of 15
{"golden_diff": "diff --git a/examples/measurements.py b/examples/measurements.py\n--- a/examples/measurements.py\n+++ b/examples/measurements.py\n@@ -93,7 +93,7 @@\n # describing the measurement. Validators can get quite complex, for more\n # details, see the validators.py example.\n @htf.measures(htf.Measurement('validated_measurement').in_range(0, 10).doc(\n- 'This measurement is validated.').with_units(units.SECOND))\n+ 'This measurement is validated.').with_units(htf.units.SECOND))\n def measure_seconds(test):\n # The 'outcome' of this measurement in the test_record result will be a PASS\n # because its value passes the validator specified (0 <= 5 <= 10).\n@@ -106,7 +106,7 @@\n # must be a string containing the measurement name). If you want to specify\n # multiple measurements this way, you can stack multiple decorators.\n @htf.measures('inline_kwargs', docstring='This measurement is declared inline!',\n- units=units.HERTZ, validators=[validators.in_range(0, 10)])\n+ units=htf.units.HERTZ, validators=[validators.in_range(0, 10)])\n @htf.measures('another_inline', docstring='Because why not?')\n def inline_phase(test):\n # This measurement will have an outcome of FAIL, because the set value of 15\n", "issue": "Update documentation and examples/measurement.py\nRan into some issues on a fresh install from the documentation. I needed to add the package libprotobuf-dev to the apt-get install line in CONTRIBUTING.md to get protobufs to build and got an error when trying to run the example measurements.py that units could not be found, resolved by importing openhtf.utils.units\n", "before_files": [{"content": "# Copyright 2016 Google Inc. All Rights Reserved.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Example OpenHTF test demonstrating use of measurements.\n\nRun with (your virtualenv must be activated first):\n\n python measurements.py\n\nAfterwards, check out the output in measurements.json. If you open both this\nexample test and that output file and compare them, you should be able to see\nwhere measurement values end up in the output and what the corresponding code\nlooks like that sets them.\n\nTODO(someone): Write these examples.\nFor more complex topics, see the validators.py and dimensions.py examples.\n\nFor a simpler example, see the hello_world.py example. If the output of this\ntest is confusing, start with the hello_world.py output and compare it to this\ntest's output.\n\nSome constraints on measurements:\n\n - Measurement names must be valid python variable names. This is mostly for\n sanity, but also ensures you can access them via attribute access in phases.\n This applies *after* any with_args() substitution (not covered in this\n tutorial, see the phases.py example for more details).\n\n - You cannot declare the same measurement name multiple times on the same\n phase. Technically, you *can* declare the same measurement on multiple\n phases; measurements are attached to a specific phase in the output. This\n isn't recommended, though, because it makes it difficult to flatten a test's\n measurements, which some output formats require.\n\"\"\"\n\n# Import openhtf with an abbreviated name, as we'll be using a bunch of stuff\n# from it throughout our test scripts. See __all__ at the top of\n# openhtf/__init__.py for details on what's in top-of-module namespace.\nimport openhtf as htf\n\n# Import this output mechanism as it's the specific one we want to use.\nfrom openhtf.output.callbacks import json_factory\n\n# You won't normally need to import this, see validators.py example for\n# more details. It's used for the inline measurement declaration example\n# below, but normally you'll only import it when you want to define custom\n# measurement validators.\nfrom openhtf.util import validators\n\n\n# Simple example of measurement use, similar to hello_world.py usage.\[email protected](htf.Measurement('hello_world_measurement'))\ndef hello_phase(test):\n test.measurements.hello_world_measurement = 'Hello!'\n\n\n# An alternative simpler syntax that creates the Measurement for you.\[email protected]('hello_again_measurement')\ndef again_phase(test):\n test.measurements.hello_again_measurement = 'Again!'\n\n\n# Multiple measurements can be specified in a single decorator, using either of\n# the above syntaxes. Technically, these syntaxes can be mixed and matched, but\n# as a matter of convention you should always use one or the other within a\n# single decorator call. You'll also note that you can stack multiple\n# decorations on a single phase. This is useful if you have a handful of simple\n# measurements, and then one or two with more complex declarations (see below).\[email protected]('first_measurement', 'second_measurement')\[email protected](htf.Measurement('third'), htf.Measurement('fourth'))\ndef lots_of_measurements(test):\n test.measurements.first_measurement = 'First!'\n # Measurements can also be access via indexing rather than attributes.\n test.measurements['second_measurement'] = 'Second :('\n # This can be handy for iterating over measurements.\n for measurement in ('third', 'fourth'):\n test.measurements[measurement] = measurement + ' is the best!'\n\n\n# Basic key/value measurements are handy, but we may also want to validate a\n# measurement against some criteria, or specify additional information\n# describing the measurement. Validators can get quite complex, for more\n# details, see the validators.py example.\[email protected](htf.Measurement('validated_measurement').in_range(0, 10).doc(\n 'This measurement is validated.').with_units(units.SECOND))\ndef measure_seconds(test):\n # The 'outcome' of this measurement in the test_record result will be a PASS\n # because its value passes the validator specified (0 <= 5 <= 10).\n test.measurements.validated_measurement = 5\n\n\n# These additional attributes can also be specified inline as kwargs passed\n# directly to the @measures decorator. If you do so, however, you must\n# specify exactly one measurement with that decorator (ie. the first argument\n# must be a string containing the measurement name). If you want to specify\n# multiple measurements this way, you can stack multiple decorators.\[email protected]('inline_kwargs', docstring='This measurement is declared inline!',\n units=units.HERTZ, validators=[validators.in_range(0, 10)])\[email protected]('another_inline', docstring='Because why not?')\ndef inline_phase(test):\n # This measurement will have an outcome of FAIL, because the set value of 15\n # will not pass the 0 <= x <= 10 validator.\n test.measurements.inline_kwargs = 15\n test.measurements.another_inline = 'This one is unvalidated.'\n\n # Let's log a message so the operator knows the test should fail.\n test.logger.info('Set inline_kwargs to a failing value, test should FAIL!')\n\n\nif __name__ == '__main__':\n # We instantiate our OpenHTF test with the phases we want to run as args.\n test = htf.Test(hello_phase, again_phase, lots_of_measurements,\n measure_seconds, inline_phase)\n\n # In order to view the result of the test, we have to output it somewhere,\n # and a local JSON file is a convenient way to do this. Custom output\n # mechanisms can be implemented, but for now we'll just keep it simple.\n # This will always output to the same ./measurements.json file, formatted\n # slightly for human readability.\n test.add_output_callbacks(\n json_factory.OutputToJSON('./measurements.json', indent=2))\n\n # Unlike hello_world.py, where we prompt for a DUT ID, here we'll just\n # use an arbitrary one.\n test.execute(test_start=lambda: 'MyDutId')\n", "path": "examples/measurements.py"}], "after_files": [{"content": "# Copyright 2016 Google Inc. All Rights Reserved.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Example OpenHTF test demonstrating use of measurements.\n\nRun with (your virtualenv must be activated first):\n\n python measurements.py\n\nAfterwards, check out the output in measurements.json. If you open both this\nexample test and that output file and compare them, you should be able to see\nwhere measurement values end up in the output and what the corresponding code\nlooks like that sets them.\n\nTODO(someone): Write these examples.\nFor more complex topics, see the validators.py and dimensions.py examples.\n\nFor a simpler example, see the hello_world.py example. If the output of this\ntest is confusing, start with the hello_world.py output and compare it to this\ntest's output.\n\nSome constraints on measurements:\n\n - Measurement names must be valid python variable names. This is mostly for\n sanity, but also ensures you can access them via attribute access in phases.\n This applies *after* any with_args() substitution (not covered in this\n tutorial, see the phases.py example for more details).\n\n - You cannot declare the same measurement name multiple times on the same\n phase. Technically, you *can* declare the same measurement on multiple\n phases; measurements are attached to a specific phase in the output. This\n isn't recommended, though, because it makes it difficult to flatten a test's\n measurements, which some output formats require.\n\"\"\"\n\n# Import openhtf with an abbreviated name, as we'll be using a bunch of stuff\n# from it throughout our test scripts. See __all__ at the top of\n# openhtf/__init__.py for details on what's in top-of-module namespace.\nimport openhtf as htf\n\n# Import this output mechanism as it's the specific one we want to use.\nfrom openhtf.output.callbacks import json_factory\n\n# You won't normally need to import this, see validators.py example for\n# more details. It's used for the inline measurement declaration example\n# below, but normally you'll only import it when you want to define custom\n# measurement validators.\nfrom openhtf.util import validators\n\n\n# Simple example of measurement use, similar to hello_world.py usage.\[email protected](htf.Measurement('hello_world_measurement'))\ndef hello_phase(test):\n test.measurements.hello_world_measurement = 'Hello!'\n\n\n# An alternative simpler syntax that creates the Measurement for you.\[email protected]('hello_again_measurement')\ndef again_phase(test):\n test.measurements.hello_again_measurement = 'Again!'\n\n\n# Multiple measurements can be specified in a single decorator, using either of\n# the above syntaxes. Technically, these syntaxes can be mixed and matched, but\n# as a matter of convention you should always use one or the other within a\n# single decorator call. You'll also note that you can stack multiple\n# decorations on a single phase. This is useful if you have a handful of simple\n# measurements, and then one or two with more complex declarations (see below).\[email protected]('first_measurement', 'second_measurement')\[email protected](htf.Measurement('third'), htf.Measurement('fourth'))\ndef lots_of_measurements(test):\n test.measurements.first_measurement = 'First!'\n # Measurements can also be access via indexing rather than attributes.\n test.measurements['second_measurement'] = 'Second :('\n # This can be handy for iterating over measurements.\n for measurement in ('third', 'fourth'):\n test.measurements[measurement] = measurement + ' is the best!'\n\n\n# Basic key/value measurements are handy, but we may also want to validate a\n# measurement against some criteria, or specify additional information\n# describing the measurement. Validators can get quite complex, for more\n# details, see the validators.py example.\[email protected](htf.Measurement('validated_measurement').in_range(0, 10).doc(\n 'This measurement is validated.').with_units(htf.units.SECOND))\ndef measure_seconds(test):\n # The 'outcome' of this measurement in the test_record result will be a PASS\n # because its value passes the validator specified (0 <= 5 <= 10).\n test.measurements.validated_measurement = 5\n\n\n# These additional attributes can also be specified inline as kwargs passed\n# directly to the @measures decorator. If you do so, however, you must\n# specify exactly one measurement with that decorator (ie. the first argument\n# must be a string containing the measurement name). If you want to specify\n# multiple measurements this way, you can stack multiple decorators.\[email protected]('inline_kwargs', docstring='This measurement is declared inline!',\n units=htf.units.HERTZ, validators=[validators.in_range(0, 10)])\[email protected]('another_inline', docstring='Because why not?')\ndef inline_phase(test):\n # This measurement will have an outcome of FAIL, because the set value of 15\n # will not pass the 0 <= x <= 10 validator.\n test.measurements.inline_kwargs = 15\n test.measurements.another_inline = 'This one is unvalidated.'\n\n # Let's log a message so the operator knows the test should fail.\n test.logger.info('Set inline_kwargs to a failing value, test should FAIL!')\n\n\nif __name__ == '__main__':\n # We instantiate our OpenHTF test with the phases we want to run as args.\n test = htf.Test(hello_phase, again_phase, lots_of_measurements,\n measure_seconds, inline_phase)\n\n # In order to view the result of the test, we have to output it somewhere,\n # and a local JSON file is a convenient way to do this. Custom output\n # mechanisms can be implemented, but for now we'll just keep it simple.\n # This will always output to the same ./measurements.json file, formatted\n # slightly for human readability.\n test.add_output_callbacks(\n json_factory.OutputToJSON('./measurements.json', indent=2))\n\n # Unlike hello_world.py, where we prompt for a DUT ID, here we'll just\n # use an arbitrary one.\n test.execute(test_start=lambda: 'MyDutId')\n", "path": "examples/measurements.py"}]}
2,107
314
gh_patches_debug_32683
rasdani/github-patches
git_diff
rotki__rotki-152
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- BTG or other assets with no market in bittrex crash the app ## Problem Definition If a user holds an asset in bittrex that does not have a market in the exchange, like say `BTG`, then during balances query rotkehlchen will crash with `ValueError: Bittrex: Could not find BTC market for "BTG"` ## Task Fix the crash, and use other sources for market price data in the case this happens. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `rotkehlchen/bittrex.py` Content: ``` 1 import time 2 import hmac 3 import hashlib 4 from urllib.parse import urlencode 5 from json.decoder import JSONDecodeError 6 7 from typing import Dict, Tuple, Optional, Union, List, cast 8 from rotkehlchen.utils import ( 9 createTimeStamp, 10 get_pair_position, 11 rlk_jsonloads, 12 cache_response_timewise, 13 ) 14 from rotkehlchen.exchange import Exchange 15 from rotkehlchen.order_formatting import Trade 16 from rotkehlchen.fval import FVal 17 from rotkehlchen.errors import RemoteError 18 from rotkehlchen.inquirer import Inquirer 19 from rotkehlchen import typing 20 21 import logging 22 logger = logging.getLogger(__name__) 23 24 BITTREX_MARKET_METHODS = { 25 'getopenorders', 26 'cancel', 27 'sellmarket', 28 'selllimit', 29 'buymarket', 30 'buylimit' 31 } 32 BITTREX_ACCOUNT_METHODS = { 33 'getbalances', 34 'getbalance', 35 'getdepositaddress', 36 'withdraw', 37 'getorderhistory' 38 } 39 40 41 def bittrex_pair_to_world(pair: str) -> str: 42 return pair.replace('-', '_') 43 44 45 def world_pair_to_bittrex(pair: str) -> str: 46 return pair.replace('_', '-') 47 48 49 def trade_from_bittrex(bittrex_trade: Dict) -> Trade: 50 """Turn a bittrex trade returned from bittrex trade history to our common trade 51 history format""" 52 amount = FVal(bittrex_trade['Quantity']) - FVal(bittrex_trade['QuantityRemaining']) 53 rate = FVal(bittrex_trade['PricePerUnit']) 54 order_type = bittrex_trade['OrderType'] 55 bittrex_price = FVal(bittrex_trade['Price']) 56 bittrex_commission = FVal(bittrex_trade['Commission']) 57 pair = bittrex_pair_to_world(bittrex_trade['Exchange']) 58 base_currency = get_pair_position(pair, 'first') 59 if order_type == 'LIMIT_BUY': 60 order_type = 'buy' 61 cost = bittrex_price + bittrex_commission 62 fee = bittrex_commission 63 elif order_type == 'LIMIT_SEL': 64 order_type = 'sell' 65 cost = bittrex_price - bittrex_commission 66 fee = bittrex_commission 67 else: 68 raise ValueError('Got unexpected order type "{}" for bittrex trade'.format(order_type)) 69 70 return Trade( 71 timestamp=bittrex_trade['TimeStamp'], 72 pair=pair, 73 type=order_type, 74 rate=rate, 75 cost=cost, 76 cost_currency=base_currency, 77 fee=fee, 78 fee_currency=base_currency, 79 amount=amount, 80 location='bittrex' 81 ) 82 83 84 class Bittrex(Exchange): 85 def __init__( 86 self, 87 api_key: typing.ApiKey, 88 secret: typing.ApiSecret, 89 inquirer: Inquirer, 90 data_dir: typing.FilePath 91 ): 92 super(Bittrex, self).__init__('bittrex', api_key, secret, data_dir) 93 self.apiversion = 'v1.1' 94 self.uri = 'https://bittrex.com/api/{}/'.format(self.apiversion) 95 self.inquirer = inquirer 96 97 def first_connection(self): 98 self.first_connection_made = True 99 100 def validate_api_key(self) -> Tuple[bool, str]: 101 try: 102 self.api_query('getbalance', {'currency': 'BTC'}) 103 except ValueError as e: 104 error = str(e) 105 if error == 'APIKEY_INVALID': 106 return False, 'Provided API Key is invalid' 107 elif error == 'INVALID_SIGNATURE': 108 return False, 'Provided API Secret is invalid' 109 else: 110 raise 111 return True, '' 112 113 def api_query( 114 self, 115 method: str, 116 options: Optional[Dict] = None, 117 ) -> Union[List, Dict]: 118 """ 119 Queries Bittrex with given method and options 120 """ 121 if not options: 122 options = {} 123 nonce = str(int(time.time() * 1000)) 124 method_type = 'public' 125 126 if method in BITTREX_MARKET_METHODS: 127 method_type = 'market' 128 elif method in BITTREX_ACCOUNT_METHODS: 129 method_type = 'account' 130 131 request_url = self.uri + method_type + '/' + method + '?' 132 133 if method_type != 'public': 134 request_url += 'apikey=' + self.api_key.decode() + "&nonce=" + nonce + '&' 135 136 request_url += urlencode(options) 137 signature = hmac.new( 138 self.secret, 139 request_url.encode(), 140 hashlib.sha512 141 ).hexdigest() 142 self.session.headers.update({'apisign': signature}) 143 response = self.session.get(request_url) 144 try: 145 json_ret = rlk_jsonloads(response.text) 146 except JSONDecodeError: 147 raise RemoteError('Bittrex returned invalid JSON response') 148 149 if json_ret['success'] is not True: 150 raise RemoteError(json_ret['message']) 151 return json_ret['result'] 152 153 def get_btc_price(self, asset: typing.BlockchainAsset) -> Optional[FVal]: 154 if asset == 'BTC': 155 return None 156 btc_price = None 157 btc_pair = 'BTC-' + asset 158 for market in self.markets: 159 if market['MarketName'] == btc_pair: 160 btc_price = FVal(market['Last']) 161 break 162 163 if btc_price is None: 164 raise ValueError('Bittrex: Could not find BTC market for "{}"'.format(asset)) 165 166 return btc_price 167 168 @cache_response_timewise() 169 def query_balances(self) -> Tuple[Optional[dict], str]: 170 try: 171 self.markets = self.api_query('getmarketsummaries') 172 resp = self.api_query('getbalances') 173 except RemoteError as e: 174 msg = ( 175 'Bittrex API request failed. Could not reach bittrex due ' 176 'to {}'.format(e) 177 ) 178 logger.error(msg) 179 return None, msg 180 181 returned_balances = dict() 182 for entry in resp: 183 currency = entry['Currency'] 184 usd_price = self.inquirer.find_usd_price( 185 asset=currency, 186 asset_btc_price=self.get_btc_price(currency) 187 ) 188 189 balance = dict() 190 balance['amount'] = FVal(entry['Balance']) 191 balance['usd_value'] = FVal(balance['amount']) * usd_price 192 returned_balances[currency] = balance 193 194 return returned_balances, '' 195 196 def query_trade_history( 197 self, 198 start_ts: typing.Timestamp, 199 end_ts: typing.Timestamp, 200 end_at_least_ts: typing.Timestamp, 201 market: Optional[str] = None, 202 count: Optional[int] = None, 203 ) -> List: 204 205 options: Dict[str, Union[str, int]] = dict() 206 cache = self.check_trades_cache(start_ts, end_at_least_ts) 207 cache = cast(List, cache) 208 if market is not None: 209 options['market'] = world_pair_to_bittrex(market) 210 elif cache is not None: 211 return cache 212 213 if count is not None: 214 options['count'] = count 215 order_history = self.api_query('getorderhistory', options) 216 217 returned_history = list() 218 for order in order_history: 219 order_timestamp = createTimeStamp(order['TimeStamp'], formatstr="%Y-%m-%dT%H:%M:%S.%f") 220 if start_ts is not None and order_timestamp < start_ts: 221 continue 222 if end_ts is not None and order_timestamp > end_ts: 223 break 224 order['TimeStamp'] = order_timestamp 225 returned_history.append(order) 226 227 self.update_trades_cache(returned_history, start_ts, end_ts) 228 return returned_history 229 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/rotkehlchen/bittrex.py b/rotkehlchen/bittrex.py --- a/rotkehlchen/bittrex.py +++ b/rotkehlchen/bittrex.py @@ -1,24 +1,24 @@ -import time -import hmac import hashlib -from urllib.parse import urlencode +import hmac +import logging +import time from json.decoder import JSONDecodeError +from typing import Dict, List, Optional, Tuple, Union, cast +from urllib.parse import urlencode -from typing import Dict, Tuple, Optional, Union, List, cast +from rotkehlchen import typing +from rotkehlchen.errors import RemoteError +from rotkehlchen.exchange import Exchange +from rotkehlchen.fval import FVal +from rotkehlchen.inquirer import Inquirer +from rotkehlchen.order_formatting import Trade from rotkehlchen.utils import ( + cache_response_timewise, createTimeStamp, get_pair_position, rlk_jsonloads, - cache_response_timewise, ) -from rotkehlchen.exchange import Exchange -from rotkehlchen.order_formatting import Trade -from rotkehlchen.fval import FVal -from rotkehlchen.errors import RemoteError -from rotkehlchen.inquirer import Inquirer -from rotkehlchen import typing -import logging logger = logging.getLogger(__name__) BITTREX_MARKET_METHODS = { @@ -160,9 +160,6 @@ btc_price = FVal(market['Last']) break - if btc_price is None: - raise ValueError('Bittrex: Could not find BTC market for "{}"'.format(asset)) - return btc_price @cache_response_timewise() @@ -181,9 +178,10 @@ returned_balances = dict() for entry in resp: currency = entry['Currency'] + asset_btc_price = self.get_btc_price(currency) usd_price = self.inquirer.find_usd_price( asset=currency, - asset_btc_price=self.get_btc_price(currency) + asset_btc_price=asset_btc_price ) balance = dict()
{"golden_diff": "diff --git a/rotkehlchen/bittrex.py b/rotkehlchen/bittrex.py\n--- a/rotkehlchen/bittrex.py\n+++ b/rotkehlchen/bittrex.py\n@@ -1,24 +1,24 @@\n-import time\n-import hmac\n import hashlib\n-from urllib.parse import urlencode\n+import hmac\n+import logging\n+import time\n from json.decoder import JSONDecodeError\n+from typing import Dict, List, Optional, Tuple, Union, cast\n+from urllib.parse import urlencode\n \n-from typing import Dict, Tuple, Optional, Union, List, cast\n+from rotkehlchen import typing\n+from rotkehlchen.errors import RemoteError\n+from rotkehlchen.exchange import Exchange\n+from rotkehlchen.fval import FVal\n+from rotkehlchen.inquirer import Inquirer\n+from rotkehlchen.order_formatting import Trade\n from rotkehlchen.utils import (\n+ cache_response_timewise,\n createTimeStamp,\n get_pair_position,\n rlk_jsonloads,\n- cache_response_timewise,\n )\n-from rotkehlchen.exchange import Exchange\n-from rotkehlchen.order_formatting import Trade\n-from rotkehlchen.fval import FVal\n-from rotkehlchen.errors import RemoteError\n-from rotkehlchen.inquirer import Inquirer\n-from rotkehlchen import typing\n \n-import logging\n logger = logging.getLogger(__name__)\n \n BITTREX_MARKET_METHODS = {\n@@ -160,9 +160,6 @@\n btc_price = FVal(market['Last'])\n break\n \n- if btc_price is None:\n- raise ValueError('Bittrex: Could not find BTC market for \"{}\"'.format(asset))\n-\n return btc_price\n \n @cache_response_timewise()\n@@ -181,9 +178,10 @@\n returned_balances = dict()\n for entry in resp:\n currency = entry['Currency']\n+ asset_btc_price = self.get_btc_price(currency)\n usd_price = self.inquirer.find_usd_price(\n asset=currency,\n- asset_btc_price=self.get_btc_price(currency)\n+ asset_btc_price=asset_btc_price\n )\n \n balance = dict()\n", "issue": "BTG or other assets with no market in bittrex crash the app\n## Problem Definition\r\n\r\nIf a user holds an asset in bittrex that does not have a market in the exchange, like say `BTG`, then during balances query rotkehlchen will crash with `ValueError: Bittrex: Could not find BTC market for \"BTG\"`\r\n\r\n## Task\r\n\r\nFix the crash, and use other sources for market price data in the case this happens.\n", "before_files": [{"content": "import time\nimport hmac\nimport hashlib\nfrom urllib.parse import urlencode\nfrom json.decoder import JSONDecodeError\n\nfrom typing import Dict, Tuple, Optional, Union, List, cast\nfrom rotkehlchen.utils import (\n createTimeStamp,\n get_pair_position,\n rlk_jsonloads,\n cache_response_timewise,\n)\nfrom rotkehlchen.exchange import Exchange\nfrom rotkehlchen.order_formatting import Trade\nfrom rotkehlchen.fval import FVal\nfrom rotkehlchen.errors import RemoteError\nfrom rotkehlchen.inquirer import Inquirer\nfrom rotkehlchen import typing\n\nimport logging\nlogger = logging.getLogger(__name__)\n\nBITTREX_MARKET_METHODS = {\n 'getopenorders',\n 'cancel',\n 'sellmarket',\n 'selllimit',\n 'buymarket',\n 'buylimit'\n}\nBITTREX_ACCOUNT_METHODS = {\n 'getbalances',\n 'getbalance',\n 'getdepositaddress',\n 'withdraw',\n 'getorderhistory'\n}\n\n\ndef bittrex_pair_to_world(pair: str) -> str:\n return pair.replace('-', '_')\n\n\ndef world_pair_to_bittrex(pair: str) -> str:\n return pair.replace('_', '-')\n\n\ndef trade_from_bittrex(bittrex_trade: Dict) -> Trade:\n \"\"\"Turn a bittrex trade returned from bittrex trade history to our common trade\n history format\"\"\"\n amount = FVal(bittrex_trade['Quantity']) - FVal(bittrex_trade['QuantityRemaining'])\n rate = FVal(bittrex_trade['PricePerUnit'])\n order_type = bittrex_trade['OrderType']\n bittrex_price = FVal(bittrex_trade['Price'])\n bittrex_commission = FVal(bittrex_trade['Commission'])\n pair = bittrex_pair_to_world(bittrex_trade['Exchange'])\n base_currency = get_pair_position(pair, 'first')\n if order_type == 'LIMIT_BUY':\n order_type = 'buy'\n cost = bittrex_price + bittrex_commission\n fee = bittrex_commission\n elif order_type == 'LIMIT_SEL':\n order_type = 'sell'\n cost = bittrex_price - bittrex_commission\n fee = bittrex_commission\n else:\n raise ValueError('Got unexpected order type \"{}\" for bittrex trade'.format(order_type))\n\n return Trade(\n timestamp=bittrex_trade['TimeStamp'],\n pair=pair,\n type=order_type,\n rate=rate,\n cost=cost,\n cost_currency=base_currency,\n fee=fee,\n fee_currency=base_currency,\n amount=amount,\n location='bittrex'\n )\n\n\nclass Bittrex(Exchange):\n def __init__(\n self,\n api_key: typing.ApiKey,\n secret: typing.ApiSecret,\n inquirer: Inquirer,\n data_dir: typing.FilePath\n ):\n super(Bittrex, self).__init__('bittrex', api_key, secret, data_dir)\n self.apiversion = 'v1.1'\n self.uri = 'https://bittrex.com/api/{}/'.format(self.apiversion)\n self.inquirer = inquirer\n\n def first_connection(self):\n self.first_connection_made = True\n\n def validate_api_key(self) -> Tuple[bool, str]:\n try:\n self.api_query('getbalance', {'currency': 'BTC'})\n except ValueError as e:\n error = str(e)\n if error == 'APIKEY_INVALID':\n return False, 'Provided API Key is invalid'\n elif error == 'INVALID_SIGNATURE':\n return False, 'Provided API Secret is invalid'\n else:\n raise\n return True, ''\n\n def api_query(\n self,\n method: str,\n options: Optional[Dict] = None,\n ) -> Union[List, Dict]:\n \"\"\"\n Queries Bittrex with given method and options\n \"\"\"\n if not options:\n options = {}\n nonce = str(int(time.time() * 1000))\n method_type = 'public'\n\n if method in BITTREX_MARKET_METHODS:\n method_type = 'market'\n elif method in BITTREX_ACCOUNT_METHODS:\n method_type = 'account'\n\n request_url = self.uri + method_type + '/' + method + '?'\n\n if method_type != 'public':\n request_url += 'apikey=' + self.api_key.decode() + \"&nonce=\" + nonce + '&'\n\n request_url += urlencode(options)\n signature = hmac.new(\n self.secret,\n request_url.encode(),\n hashlib.sha512\n ).hexdigest()\n self.session.headers.update({'apisign': signature})\n response = self.session.get(request_url)\n try:\n json_ret = rlk_jsonloads(response.text)\n except JSONDecodeError:\n raise RemoteError('Bittrex returned invalid JSON response')\n\n if json_ret['success'] is not True:\n raise RemoteError(json_ret['message'])\n return json_ret['result']\n\n def get_btc_price(self, asset: typing.BlockchainAsset) -> Optional[FVal]:\n if asset == 'BTC':\n return None\n btc_price = None\n btc_pair = 'BTC-' + asset\n for market in self.markets:\n if market['MarketName'] == btc_pair:\n btc_price = FVal(market['Last'])\n break\n\n if btc_price is None:\n raise ValueError('Bittrex: Could not find BTC market for \"{}\"'.format(asset))\n\n return btc_price\n\n @cache_response_timewise()\n def query_balances(self) -> Tuple[Optional[dict], str]:\n try:\n self.markets = self.api_query('getmarketsummaries')\n resp = self.api_query('getbalances')\n except RemoteError as e:\n msg = (\n 'Bittrex API request failed. Could not reach bittrex due '\n 'to {}'.format(e)\n )\n logger.error(msg)\n return None, msg\n\n returned_balances = dict()\n for entry in resp:\n currency = entry['Currency']\n usd_price = self.inquirer.find_usd_price(\n asset=currency,\n asset_btc_price=self.get_btc_price(currency)\n )\n\n balance = dict()\n balance['amount'] = FVal(entry['Balance'])\n balance['usd_value'] = FVal(balance['amount']) * usd_price\n returned_balances[currency] = balance\n\n return returned_balances, ''\n\n def query_trade_history(\n self,\n start_ts: typing.Timestamp,\n end_ts: typing.Timestamp,\n end_at_least_ts: typing.Timestamp,\n market: Optional[str] = None,\n count: Optional[int] = None,\n ) -> List:\n\n options: Dict[str, Union[str, int]] = dict()\n cache = self.check_trades_cache(start_ts, end_at_least_ts)\n cache = cast(List, cache)\n if market is not None:\n options['market'] = world_pair_to_bittrex(market)\n elif cache is not None:\n return cache\n\n if count is not None:\n options['count'] = count\n order_history = self.api_query('getorderhistory', options)\n\n returned_history = list()\n for order in order_history:\n order_timestamp = createTimeStamp(order['TimeStamp'], formatstr=\"%Y-%m-%dT%H:%M:%S.%f\")\n if start_ts is not None and order_timestamp < start_ts:\n continue\n if end_ts is not None and order_timestamp > end_ts:\n break\n order['TimeStamp'] = order_timestamp\n returned_history.append(order)\n\n self.update_trades_cache(returned_history, start_ts, end_ts)\n return returned_history\n", "path": "rotkehlchen/bittrex.py"}], "after_files": [{"content": "import hashlib\nimport hmac\nimport logging\nimport time\nfrom json.decoder import JSONDecodeError\nfrom typing import Dict, List, Optional, Tuple, Union, cast\nfrom urllib.parse import urlencode\n\nfrom rotkehlchen import typing\nfrom rotkehlchen.errors import RemoteError\nfrom rotkehlchen.exchange import Exchange\nfrom rotkehlchen.fval import FVal\nfrom rotkehlchen.inquirer import Inquirer\nfrom rotkehlchen.order_formatting import Trade\nfrom rotkehlchen.utils import (\n cache_response_timewise,\n createTimeStamp,\n get_pair_position,\n rlk_jsonloads,\n)\n\nlogger = logging.getLogger(__name__)\n\nBITTREX_MARKET_METHODS = {\n 'getopenorders',\n 'cancel',\n 'sellmarket',\n 'selllimit',\n 'buymarket',\n 'buylimit'\n}\nBITTREX_ACCOUNT_METHODS = {\n 'getbalances',\n 'getbalance',\n 'getdepositaddress',\n 'withdraw',\n 'getorderhistory'\n}\n\n\ndef bittrex_pair_to_world(pair: str) -> str:\n return pair.replace('-', '_')\n\n\ndef world_pair_to_bittrex(pair: str) -> str:\n return pair.replace('_', '-')\n\n\ndef trade_from_bittrex(bittrex_trade: Dict) -> Trade:\n \"\"\"Turn a bittrex trade returned from bittrex trade history to our common trade\n history format\"\"\"\n amount = FVal(bittrex_trade['Quantity']) - FVal(bittrex_trade['QuantityRemaining'])\n rate = FVal(bittrex_trade['PricePerUnit'])\n order_type = bittrex_trade['OrderType']\n bittrex_price = FVal(bittrex_trade['Price'])\n bittrex_commission = FVal(bittrex_trade['Commission'])\n pair = bittrex_pair_to_world(bittrex_trade['Exchange'])\n base_currency = get_pair_position(pair, 'first')\n if order_type == 'LIMIT_BUY':\n order_type = 'buy'\n cost = bittrex_price + bittrex_commission\n fee = bittrex_commission\n elif order_type == 'LIMIT_SEL':\n order_type = 'sell'\n cost = bittrex_price - bittrex_commission\n fee = bittrex_commission\n else:\n raise ValueError('Got unexpected order type \"{}\" for bittrex trade'.format(order_type))\n\n return Trade(\n timestamp=bittrex_trade['TimeStamp'],\n pair=pair,\n type=order_type,\n rate=rate,\n cost=cost,\n cost_currency=base_currency,\n fee=fee,\n fee_currency=base_currency,\n amount=amount,\n location='bittrex'\n )\n\n\nclass Bittrex(Exchange):\n def __init__(\n self,\n api_key: typing.ApiKey,\n secret: typing.ApiSecret,\n inquirer: Inquirer,\n data_dir: typing.FilePath\n ):\n super(Bittrex, self).__init__('bittrex', api_key, secret, data_dir)\n self.apiversion = 'v1.1'\n self.uri = 'https://bittrex.com/api/{}/'.format(self.apiversion)\n self.inquirer = inquirer\n\n def first_connection(self):\n self.first_connection_made = True\n\n def validate_api_key(self) -> Tuple[bool, str]:\n try:\n self.api_query('getbalance', {'currency': 'BTC'})\n except ValueError as e:\n error = str(e)\n if error == 'APIKEY_INVALID':\n return False, 'Provided API Key is invalid'\n elif error == 'INVALID_SIGNATURE':\n return False, 'Provided API Secret is invalid'\n else:\n raise\n return True, ''\n\n def api_query(\n self,\n method: str,\n options: Optional[Dict] = None,\n ) -> Union[List, Dict]:\n \"\"\"\n Queries Bittrex with given method and options\n \"\"\"\n if not options:\n options = {}\n nonce = str(int(time.time() * 1000))\n method_type = 'public'\n\n if method in BITTREX_MARKET_METHODS:\n method_type = 'market'\n elif method in BITTREX_ACCOUNT_METHODS:\n method_type = 'account'\n\n request_url = self.uri + method_type + '/' + method + '?'\n\n if method_type != 'public':\n request_url += 'apikey=' + self.api_key.decode() + \"&nonce=\" + nonce + '&'\n\n request_url += urlencode(options)\n signature = hmac.new(\n self.secret,\n request_url.encode(),\n hashlib.sha512\n ).hexdigest()\n self.session.headers.update({'apisign': signature})\n response = self.session.get(request_url)\n try:\n json_ret = rlk_jsonloads(response.text)\n except JSONDecodeError:\n raise RemoteError('Bittrex returned invalid JSON response')\n\n if json_ret['success'] is not True:\n raise RemoteError(json_ret['message'])\n return json_ret['result']\n\n def get_btc_price(self, asset: typing.BlockchainAsset) -> Optional[FVal]:\n if asset == 'BTC':\n return None\n btc_price = None\n btc_pair = 'BTC-' + asset\n for market in self.markets:\n if market['MarketName'] == btc_pair:\n btc_price = FVal(market['Last'])\n break\n\n return btc_price\n\n @cache_response_timewise()\n def query_balances(self) -> Tuple[Optional[dict], str]:\n try:\n self.markets = self.api_query('getmarketsummaries')\n resp = self.api_query('getbalances')\n except RemoteError as e:\n msg = (\n 'Bittrex API request failed. Could not reach bittrex due '\n 'to {}'.format(e)\n )\n logger.error(msg)\n return None, msg\n\n returned_balances = dict()\n for entry in resp:\n currency = entry['Currency']\n asset_btc_price = self.get_btc_price(currency)\n usd_price = self.inquirer.find_usd_price(\n asset=currency,\n asset_btc_price=asset_btc_price\n )\n\n balance = dict()\n balance['amount'] = FVal(entry['Balance'])\n balance['usd_value'] = FVal(balance['amount']) * usd_price\n returned_balances[currency] = balance\n\n return returned_balances, ''\n\n def query_trade_history(\n self,\n start_ts: typing.Timestamp,\n end_ts: typing.Timestamp,\n end_at_least_ts: typing.Timestamp,\n market: Optional[str] = None,\n count: Optional[int] = None,\n ) -> List:\n\n options: Dict[str, Union[str, int]] = dict()\n cache = self.check_trades_cache(start_ts, end_at_least_ts)\n cache = cast(List, cache)\n if market is not None:\n options['market'] = world_pair_to_bittrex(market)\n elif cache is not None:\n return cache\n\n if count is not None:\n options['count'] = count\n order_history = self.api_query('getorderhistory', options)\n\n returned_history = list()\n for order in order_history:\n order_timestamp = createTimeStamp(order['TimeStamp'], formatstr=\"%Y-%m-%dT%H:%M:%S.%f\")\n if start_ts is not None and order_timestamp < start_ts:\n continue\n if end_ts is not None and order_timestamp > end_ts:\n break\n order['TimeStamp'] = order_timestamp\n returned_history.append(order)\n\n self.update_trades_cache(returned_history, start_ts, end_ts)\n return returned_history\n", "path": "rotkehlchen/bittrex.py"}]}
2,618
488
gh_patches_debug_6849
rasdani/github-patches
git_diff
WordPress__openverse-api-233
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [Bug] A circular import prevents starting the project correctly ## Description <!-- Concisely describe the bug. --> There is a problem with models imports, run the project and see: ``` web_1 | Exception in thread django-main-thread: web_1 | Traceback (most recent call last): web_1 | File "/usr/local/lib/python3.9/threading.py", line 973, in _bootstrap_inner web_1 | self.run() web_1 | File "/usr/local/lib/python3.9/threading.py", line 910, in run web_1 | self._target(*self._args, **self._kwargs) web_1 | File "/usr/local/lib/python3.9/site-packages/django/utils/autoreload.py", line 64, in wrapper web_1 | fn(*args, **kwargs) web_1 | File "/usr/local/lib/python3.9/site-packages/django/core/management/commands/runserver.py", line 110, in inner_run web_1 | autoreload.raise_last_exception() web_1 | File "/usr/local/lib/python3.9/site-packages/django/utils/autoreload.py", line 87, in raise_last_exception web_1 | raise _exception[1] web_1 | File "/usr/local/lib/python3.9/site-packages/django/core/management/__init__.py", line 375, in execute web_1 | autoreload.check_errors(django.setup)() web_1 | File "/usr/local/lib/python3.9/site-packages/django/utils/autoreload.py", line 64, in wrapper web_1 | fn(*args, **kwargs) web_1 | File "/usr/local/lib/python3.9/site-packages/django/__init__.py", line 24, in setup web_1 | apps.populate(settings.INSTALLED_APPS) web_1 | File "/usr/local/lib/python3.9/site-packages/django/apps/registry.py", line 114, in populate web_1 | app_config.import_models() web_1 | File "/usr/local/lib/python3.9/site-packages/django/apps/config.py", line 301, in import_models web_1 | self.models_module = import_module(models_module_name) web_1 | File "/usr/local/lib/python3.9/importlib/__init__.py", line 127, in import_module web_1 | return _bootstrap._gcd_import(name[level:], package, level) web_1 | File "<frozen importlib._bootstrap>", line 1030, in _gcd_import web_1 | File "<frozen importlib._bootstrap>", line 1007, in _find_and_load web_1 | File "<frozen importlib._bootstrap>", line 986, in _find_and_load_unlocked web_1 | File "<frozen importlib._bootstrap>", line 680, in _load_unlocked web_1 | File "<frozen importlib._bootstrap_external>", line 850, in exec_module web_1 | File "<frozen importlib._bootstrap>", line 228, in _call_with_frames_removed web_1 | File "/openverse-api/catalog/api/models/__init__.py", line 1, in <module> web_1 | from catalog.api.models.audio import ( web_1 | File "/openverse-api/catalog/api/models/audio.py", line 2, in <module> web_1 | from catalog.api.models import OpenLedgerModel web_1 | ImportError: cannot import name 'OpenLedgerModel' from partially initialized module 'catalog.api.models' (most likely due to a circular import) (/openverse-api/catalog/api/models/__init__.py) ``` ## Expectation <!-- Concisely describe what you expected to happen. --> The project should start without errors and run normally, passing tests. ## Additional context <!-- Add any other context about the problem here; or delete the section entirely. --> The wrong order is introduced due to the `isort` rules so we should make an exception for these lines or the file. ## Resolution <!-- Replace the [ ] with [x] to check the box. --> - [ ] 🙋 I would be interested in resolving this bug. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `openverse-api/catalog/api/models/__init__.py` Content: ``` 1 from catalog.api.models.audio import ( 2 AltAudioFile, 3 Audio, 4 AudioList, 5 AudioReport, 6 AudioSet, 7 DeletedAudio, 8 MatureAudio, 9 ) 10 from catalog.api.models.base import OpenLedgerModel 11 from catalog.api.models.image import ( 12 DeletedImage, 13 Image, 14 ImageList, 15 ImageReport, 16 MatureImage, 17 ) 18 from catalog.api.models.media import ( 19 DEINDEXED, 20 DMCA, 21 MATURE, 22 MATURE_FILTERED, 23 NO_ACTION, 24 OTHER, 25 PENDING, 26 ) 27 from catalog.api.models.models import ContentProvider, ShortenedLink, SourceLogo, Tag 28 from catalog.api.models.oauth import ( 29 OAuth2Registration, 30 OAuth2Verification, 31 ThrottledApplication, 32 ) 33 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/openverse-api/catalog/api/models/__init__.py b/openverse-api/catalog/api/models/__init__.py --- a/openverse-api/catalog/api/models/__init__.py +++ b/openverse-api/catalog/api/models/__init__.py @@ -1,3 +1,4 @@ +from catalog.api.models.base import OpenLedgerModel # isort:skip from catalog.api.models.audio import ( AltAudioFile, Audio, @@ -7,7 +8,6 @@ DeletedAudio, MatureAudio, ) -from catalog.api.models.base import OpenLedgerModel from catalog.api.models.image import ( DeletedImage, Image,
{"golden_diff": "diff --git a/openverse-api/catalog/api/models/__init__.py b/openverse-api/catalog/api/models/__init__.py\n--- a/openverse-api/catalog/api/models/__init__.py\n+++ b/openverse-api/catalog/api/models/__init__.py\n@@ -1,3 +1,4 @@\n+from catalog.api.models.base import OpenLedgerModel # isort:skip\n from catalog.api.models.audio import (\n AltAudioFile,\n Audio,\n@@ -7,7 +8,6 @@\n DeletedAudio,\n MatureAudio,\n )\n-from catalog.api.models.base import OpenLedgerModel\n from catalog.api.models.image import (\n DeletedImage,\n Image,\n", "issue": "[Bug] A circular import prevents starting the project correctly\n## Description\r\n<!-- Concisely describe the bug. -->\r\nThere is a problem with models imports, run the project and see:\r\n\r\n```\r\nweb_1 | Exception in thread django-main-thread:\r\nweb_1 | Traceback (most recent call last):\r\nweb_1 | File \"/usr/local/lib/python3.9/threading.py\", line 973, in _bootstrap_inner\r\nweb_1 | self.run()\r\nweb_1 | File \"/usr/local/lib/python3.9/threading.py\", line 910, in run\r\nweb_1 | self._target(*self._args, **self._kwargs)\r\nweb_1 | File \"/usr/local/lib/python3.9/site-packages/django/utils/autoreload.py\", line 64, in wrapper\r\nweb_1 | fn(*args, **kwargs)\r\nweb_1 | File \"/usr/local/lib/python3.9/site-packages/django/core/management/commands/runserver.py\", line 110, in inner_run\r\nweb_1 | autoreload.raise_last_exception()\r\nweb_1 | File \"/usr/local/lib/python3.9/site-packages/django/utils/autoreload.py\", line 87, in raise_last_exception\r\nweb_1 | raise _exception[1]\r\nweb_1 | File \"/usr/local/lib/python3.9/site-packages/django/core/management/__init__.py\", line 375, in execute\r\nweb_1 | autoreload.check_errors(django.setup)()\r\nweb_1 | File \"/usr/local/lib/python3.9/site-packages/django/utils/autoreload.py\", line 64, in wrapper\r\nweb_1 | fn(*args, **kwargs)\r\nweb_1 | File \"/usr/local/lib/python3.9/site-packages/django/__init__.py\", line 24, in setup\r\nweb_1 | apps.populate(settings.INSTALLED_APPS)\r\nweb_1 | File \"/usr/local/lib/python3.9/site-packages/django/apps/registry.py\", line 114, in populate\r\nweb_1 | app_config.import_models()\r\nweb_1 | File \"/usr/local/lib/python3.9/site-packages/django/apps/config.py\", line 301, in import_models\r\nweb_1 | self.models_module = import_module(models_module_name)\r\nweb_1 | File \"/usr/local/lib/python3.9/importlib/__init__.py\", line 127, in import_module\r\nweb_1 | return _bootstrap._gcd_import(name[level:], package, level)\r\nweb_1 | File \"<frozen importlib._bootstrap>\", line 1030, in _gcd_import\r\nweb_1 | File \"<frozen importlib._bootstrap>\", line 1007, in _find_and_load\r\nweb_1 | File \"<frozen importlib._bootstrap>\", line 986, in _find_and_load_unlocked\r\nweb_1 | File \"<frozen importlib._bootstrap>\", line 680, in _load_unlocked\r\nweb_1 | File \"<frozen importlib._bootstrap_external>\", line 850, in exec_module\r\nweb_1 | File \"<frozen importlib._bootstrap>\", line 228, in _call_with_frames_removed\r\nweb_1 | File \"/openverse-api/catalog/api/models/__init__.py\", line 1, in <module>\r\nweb_1 | from catalog.api.models.audio import (\r\nweb_1 | File \"/openverse-api/catalog/api/models/audio.py\", line 2, in <module>\r\nweb_1 | from catalog.api.models import OpenLedgerModel\r\nweb_1 | ImportError: cannot import name 'OpenLedgerModel' from partially initialized module 'catalog.api.models' (most likely due to a circular import) (/openverse-api/catalog/api/models/__init__.py)\r\n```\r\n\r\n## Expectation\r\n<!-- Concisely describe what you expected to happen. -->\r\nThe project should start without errors and run normally, passing tests.\r\n\r\n## Additional context\r\n<!-- Add any other context about the problem here; or delete the section entirely. -->\r\nThe wrong order is introduced due to the `isort` rules so we should make an exception for these lines or the file.\r\n\r\n## Resolution\r\n<!-- Replace the [ ] with [x] to check the box. -->\r\n- [ ] \ud83d\ude4b I would be interested in resolving this bug.\r\n\n", "before_files": [{"content": "from catalog.api.models.audio import (\n AltAudioFile,\n Audio,\n AudioList,\n AudioReport,\n AudioSet,\n DeletedAudio,\n MatureAudio,\n)\nfrom catalog.api.models.base import OpenLedgerModel\nfrom catalog.api.models.image import (\n DeletedImage,\n Image,\n ImageList,\n ImageReport,\n MatureImage,\n)\nfrom catalog.api.models.media import (\n DEINDEXED,\n DMCA,\n MATURE,\n MATURE_FILTERED,\n NO_ACTION,\n OTHER,\n PENDING,\n)\nfrom catalog.api.models.models import ContentProvider, ShortenedLink, SourceLogo, Tag\nfrom catalog.api.models.oauth import (\n OAuth2Registration,\n OAuth2Verification,\n ThrottledApplication,\n)\n", "path": "openverse-api/catalog/api/models/__init__.py"}], "after_files": [{"content": "from catalog.api.models.base import OpenLedgerModel # isort:skip\nfrom catalog.api.models.audio import (\n AltAudioFile,\n Audio,\n AudioList,\n AudioReport,\n AudioSet,\n DeletedAudio,\n MatureAudio,\n)\nfrom catalog.api.models.image import (\n DeletedImage,\n Image,\n ImageList,\n ImageReport,\n MatureImage,\n)\nfrom catalog.api.models.media import (\n DEINDEXED,\n DMCA,\n MATURE,\n MATURE_FILTERED,\n NO_ACTION,\n OTHER,\n PENDING,\n)\nfrom catalog.api.models.models import ContentProvider, ShortenedLink, SourceLogo, Tag\nfrom catalog.api.models.oauth import (\n OAuth2Registration,\n OAuth2Verification,\n ThrottledApplication,\n)\n", "path": "openverse-api/catalog/api/models/__init__.py"}]}
1,466
139
gh_patches_debug_25787
rasdani/github-patches
git_diff
pypa__setuptools-1905
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- TestDepends.testRequire regression in 41.6.0 (py3) While trying to build the new release of setuptools, I get the following test failure: ``` ==================================================================== FAILURES ===================================================================== _____________________________________________________________ TestDepends.testRequire _____________________________________________________________ self = <setuptools.tests.test_setuptools.TestDepends object at 0x7fbfae31d710> @needs_bytecode def testRequire(self): req = Require('Json', '1.0.3', 'json') assert req.name == 'Json' assert req.module == 'json' assert req.requested_version == '1.0.3' assert req.attribute == '__version__' assert req.full_name() == 'Json-1.0.3' from json import __version__ assert req.get_version() == __version__ assert req.version_ok('1.0.9') assert not req.version_ok('0.9.1') assert not req.version_ok('unknown') assert req.is_present() assert req.is_current() req = Require('Json 3000', '03000', 'json', format=LooseVersion) assert req.is_present() assert not req.is_current() assert not req.version_ok('unknown') req = Require('Do-what-I-mean', '1.0', 'd-w-i-m') assert not req.is_present() assert not req.is_current() req = Require('Tests', None, 'tests', homepage="http://example.com") assert req.format is None assert req.attribute is None assert req.requested_version is None assert req.full_name() == 'Tests' assert req.homepage == 'http://example.com' from setuptools.tests import __path__ paths = [os.path.dirname(p) for p in __path__] > assert req.is_present(paths) E AssertionError: assert False E + where False = <bound method Require.is_present of <setuptools.depends.Require object at 0x7fbfae0d0b38>>(['/tmp/portage/dev-python/setuptools-41.6.0/work/setuptools-41.6.0-python3_5/setuptools']) E + where <bound method Require.is_present of <setuptools.depends.Require object at 0x7fbfae0d0b38>> = <setuptools.depends.Require object at 0x7fbfae0d0b38>.is_present setuptools/tests/test_setuptools.py:120: AssertionError ``` I can reproduce it reliably with at least pypy3.6 (7.2.0) & python3.5 (3.5.7). I haven't tested other versions yet. Full build log: [dev-python:setuptools-41.6.0:20191030-083347.log](https://github.com/pypa/setuptools/files/3787797/dev-python.setuptools-41.6.0.20191030-083347.log) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setuptools/_imp.py` Content: ``` 1 """ 2 Re-implementation of find_module and get_frozen_object 3 from the deprecated imp module. 4 """ 5 6 import os 7 import importlib.util 8 import importlib.machinery 9 10 from .py34compat import module_from_spec 11 12 13 PY_SOURCE = 1 14 PY_COMPILED = 2 15 C_EXTENSION = 3 16 C_BUILTIN = 6 17 PY_FROZEN = 7 18 19 20 def find_module(module, paths=None): 21 """Just like 'imp.find_module()', but with package support""" 22 spec = importlib.util.find_spec(module, paths) 23 if spec is None: 24 raise ImportError("Can't find %s" % module) 25 if not spec.has_location and hasattr(spec, 'submodule_search_locations'): 26 spec = importlib.util.spec_from_loader('__init__.py', spec.loader) 27 28 kind = -1 29 file = None 30 static = isinstance(spec.loader, type) 31 if spec.origin == 'frozen' or static and issubclass( 32 spec.loader, importlib.machinery.FrozenImporter): 33 kind = PY_FROZEN 34 path = None # imp compabilty 35 suffix = mode = '' # imp compability 36 elif spec.origin == 'built-in' or static and issubclass( 37 spec.loader, importlib.machinery.BuiltinImporter): 38 kind = C_BUILTIN 39 path = None # imp compabilty 40 suffix = mode = '' # imp compability 41 elif spec.has_location: 42 path = spec.origin 43 suffix = os.path.splitext(path)[1] 44 mode = 'r' if suffix in importlib.machinery.SOURCE_SUFFIXES else 'rb' 45 46 if suffix in importlib.machinery.SOURCE_SUFFIXES: 47 kind = PY_SOURCE 48 elif suffix in importlib.machinery.BYTECODE_SUFFIXES: 49 kind = PY_COMPILED 50 elif suffix in importlib.machinery.EXTENSION_SUFFIXES: 51 kind = C_EXTENSION 52 53 if kind in {PY_SOURCE, PY_COMPILED}: 54 file = open(path, mode) 55 else: 56 path = None 57 suffix = mode = '' 58 59 return file, path, (suffix, mode, kind) 60 61 62 def get_frozen_object(module, paths=None): 63 spec = importlib.util.find_spec(module, paths) 64 if not spec: 65 raise ImportError("Can't find %s" % module) 66 return spec.loader.get_code(module) 67 68 69 def get_module(module, paths, info): 70 spec = importlib.util.find_spec(module, paths) 71 if not spec: 72 raise ImportError("Can't find %s" % module) 73 return module_from_spec(spec) 74 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setuptools/_imp.py b/setuptools/_imp.py --- a/setuptools/_imp.py +++ b/setuptools/_imp.py @@ -17,9 +17,18 @@ PY_FROZEN = 7 +def find_spec(module, paths): + finder = ( + importlib.machinery.PathFinder().find_spec + if isinstance(paths, list) else + importlib.util.find_spec + ) + return finder(module, paths) + + def find_module(module, paths=None): """Just like 'imp.find_module()', but with package support""" - spec = importlib.util.find_spec(module, paths) + spec = find_spec(module, paths) if spec is None: raise ImportError("Can't find %s" % module) if not spec.has_location and hasattr(spec, 'submodule_search_locations'): @@ -60,14 +69,14 @@ def get_frozen_object(module, paths=None): - spec = importlib.util.find_spec(module, paths) + spec = find_spec(module, paths) if not spec: raise ImportError("Can't find %s" % module) return spec.loader.get_code(module) def get_module(module, paths, info): - spec = importlib.util.find_spec(module, paths) + spec = find_spec(module, paths) if not spec: raise ImportError("Can't find %s" % module) return module_from_spec(spec)
{"golden_diff": "diff --git a/setuptools/_imp.py b/setuptools/_imp.py\n--- a/setuptools/_imp.py\n+++ b/setuptools/_imp.py\n@@ -17,9 +17,18 @@\n PY_FROZEN = 7\n \n \n+def find_spec(module, paths):\n+ finder = (\n+ importlib.machinery.PathFinder().find_spec\n+ if isinstance(paths, list) else\n+ importlib.util.find_spec\n+ )\n+ return finder(module, paths)\n+\n+\n def find_module(module, paths=None):\n \"\"\"Just like 'imp.find_module()', but with package support\"\"\"\n- spec = importlib.util.find_spec(module, paths)\n+ spec = find_spec(module, paths)\n if spec is None:\n raise ImportError(\"Can't find %s\" % module)\n if not spec.has_location and hasattr(spec, 'submodule_search_locations'):\n@@ -60,14 +69,14 @@\n \n \n def get_frozen_object(module, paths=None):\n- spec = importlib.util.find_spec(module, paths)\n+ spec = find_spec(module, paths)\n if not spec:\n raise ImportError(\"Can't find %s\" % module)\n return spec.loader.get_code(module)\n \n \n def get_module(module, paths, info):\n- spec = importlib.util.find_spec(module, paths)\n+ spec = find_spec(module, paths)\n if not spec:\n raise ImportError(\"Can't find %s\" % module)\n return module_from_spec(spec)\n", "issue": "TestDepends.testRequire regression in 41.6.0 (py3)\nWhile trying to build the new release of setuptools, I get the following test failure:\r\n\r\n```\r\n==================================================================== FAILURES =====================================================================\r\n_____________________________________________________________ TestDepends.testRequire _____________________________________________________________\r\n\r\nself = <setuptools.tests.test_setuptools.TestDepends object at 0x7fbfae31d710>\r\n\r\n @needs_bytecode\r\n def testRequire(self):\r\n req = Require('Json', '1.0.3', 'json')\r\n \r\n assert req.name == 'Json'\r\n assert req.module == 'json'\r\n assert req.requested_version == '1.0.3'\r\n assert req.attribute == '__version__'\r\n assert req.full_name() == 'Json-1.0.3'\r\n \r\n from json import __version__\r\n assert req.get_version() == __version__\r\n assert req.version_ok('1.0.9')\r\n assert not req.version_ok('0.9.1')\r\n assert not req.version_ok('unknown')\r\n \r\n assert req.is_present()\r\n assert req.is_current()\r\n \r\n req = Require('Json 3000', '03000', 'json', format=LooseVersion)\r\n assert req.is_present()\r\n assert not req.is_current()\r\n assert not req.version_ok('unknown')\r\n \r\n req = Require('Do-what-I-mean', '1.0', 'd-w-i-m')\r\n assert not req.is_present()\r\n assert not req.is_current()\r\n \r\n req = Require('Tests', None, 'tests', homepage=\"http://example.com\")\r\n assert req.format is None\r\n assert req.attribute is None\r\n assert req.requested_version is None\r\n assert req.full_name() == 'Tests'\r\n assert req.homepage == 'http://example.com'\r\n \r\n from setuptools.tests import __path__\r\n paths = [os.path.dirname(p) for p in __path__]\r\n> assert req.is_present(paths)\r\nE AssertionError: assert False\r\nE + where False = <bound method Require.is_present of <setuptools.depends.Require object at 0x7fbfae0d0b38>>(['/tmp/portage/dev-python/setuptools-41.6.0/work/setuptools-41.6.0-python3_5/setuptools'])\r\nE + where <bound method Require.is_present of <setuptools.depends.Require object at 0x7fbfae0d0b38>> = <setuptools.depends.Require object at 0x7fbfae0d0b38>.is_present\r\n\r\nsetuptools/tests/test_setuptools.py:120: AssertionError\r\n```\r\n\r\nI can reproduce it reliably with at least pypy3.6 (7.2.0) & python3.5 (3.5.7). I haven't tested other versions yet.\r\n\r\nFull build log: [dev-python:setuptools-41.6.0:20191030-083347.log](https://github.com/pypa/setuptools/files/3787797/dev-python.setuptools-41.6.0.20191030-083347.log)\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nRe-implementation of find_module and get_frozen_object\nfrom the deprecated imp module.\n\"\"\"\n\nimport os\nimport importlib.util\nimport importlib.machinery\n\nfrom .py34compat import module_from_spec\n\n\nPY_SOURCE = 1\nPY_COMPILED = 2\nC_EXTENSION = 3\nC_BUILTIN = 6\nPY_FROZEN = 7\n\n\ndef find_module(module, paths=None):\n \"\"\"Just like 'imp.find_module()', but with package support\"\"\"\n spec = importlib.util.find_spec(module, paths)\n if spec is None:\n raise ImportError(\"Can't find %s\" % module)\n if not spec.has_location and hasattr(spec, 'submodule_search_locations'):\n spec = importlib.util.spec_from_loader('__init__.py', spec.loader)\n\n kind = -1\n file = None\n static = isinstance(spec.loader, type)\n if spec.origin == 'frozen' or static and issubclass(\n spec.loader, importlib.machinery.FrozenImporter):\n kind = PY_FROZEN\n path = None # imp compabilty\n suffix = mode = '' # imp compability\n elif spec.origin == 'built-in' or static and issubclass(\n spec.loader, importlib.machinery.BuiltinImporter):\n kind = C_BUILTIN\n path = None # imp compabilty\n suffix = mode = '' # imp compability\n elif spec.has_location:\n path = spec.origin\n suffix = os.path.splitext(path)[1]\n mode = 'r' if suffix in importlib.machinery.SOURCE_SUFFIXES else 'rb'\n\n if suffix in importlib.machinery.SOURCE_SUFFIXES:\n kind = PY_SOURCE\n elif suffix in importlib.machinery.BYTECODE_SUFFIXES:\n kind = PY_COMPILED\n elif suffix in importlib.machinery.EXTENSION_SUFFIXES:\n kind = C_EXTENSION\n\n if kind in {PY_SOURCE, PY_COMPILED}:\n file = open(path, mode)\n else:\n path = None\n suffix = mode = ''\n\n return file, path, (suffix, mode, kind)\n\n\ndef get_frozen_object(module, paths=None):\n spec = importlib.util.find_spec(module, paths)\n if not spec:\n raise ImportError(\"Can't find %s\" % module)\n return spec.loader.get_code(module)\n\n\ndef get_module(module, paths, info):\n spec = importlib.util.find_spec(module, paths)\n if not spec:\n raise ImportError(\"Can't find %s\" % module)\n return module_from_spec(spec)\n", "path": "setuptools/_imp.py"}], "after_files": [{"content": "\"\"\"\nRe-implementation of find_module and get_frozen_object\nfrom the deprecated imp module.\n\"\"\"\n\nimport os\nimport importlib.util\nimport importlib.machinery\n\nfrom .py34compat import module_from_spec\n\n\nPY_SOURCE = 1\nPY_COMPILED = 2\nC_EXTENSION = 3\nC_BUILTIN = 6\nPY_FROZEN = 7\n\n\ndef find_spec(module, paths):\n finder = (\n importlib.machinery.PathFinder().find_spec\n if isinstance(paths, list) else\n importlib.util.find_spec\n )\n return finder(module, paths)\n\n\ndef find_module(module, paths=None):\n \"\"\"Just like 'imp.find_module()', but with package support\"\"\"\n spec = find_spec(module, paths)\n if spec is None:\n raise ImportError(\"Can't find %s\" % module)\n if not spec.has_location and hasattr(spec, 'submodule_search_locations'):\n spec = importlib.util.spec_from_loader('__init__.py', spec.loader)\n\n kind = -1\n file = None\n static = isinstance(spec.loader, type)\n if spec.origin == 'frozen' or static and issubclass(\n spec.loader, importlib.machinery.FrozenImporter):\n kind = PY_FROZEN\n path = None # imp compabilty\n suffix = mode = '' # imp compability\n elif spec.origin == 'built-in' or static and issubclass(\n spec.loader, importlib.machinery.BuiltinImporter):\n kind = C_BUILTIN\n path = None # imp compabilty\n suffix = mode = '' # imp compability\n elif spec.has_location:\n path = spec.origin\n suffix = os.path.splitext(path)[1]\n mode = 'r' if suffix in importlib.machinery.SOURCE_SUFFIXES else 'rb'\n\n if suffix in importlib.machinery.SOURCE_SUFFIXES:\n kind = PY_SOURCE\n elif suffix in importlib.machinery.BYTECODE_SUFFIXES:\n kind = PY_COMPILED\n elif suffix in importlib.machinery.EXTENSION_SUFFIXES:\n kind = C_EXTENSION\n\n if kind in {PY_SOURCE, PY_COMPILED}:\n file = open(path, mode)\n else:\n path = None\n suffix = mode = ''\n\n return file, path, (suffix, mode, kind)\n\n\ndef get_frozen_object(module, paths=None):\n spec = find_spec(module, paths)\n if not spec:\n raise ImportError(\"Can't find %s\" % module)\n return spec.loader.get_code(module)\n\n\ndef get_module(module, paths, info):\n spec = find_spec(module, paths)\n if not spec:\n raise ImportError(\"Can't find %s\" % module)\n return module_from_spec(spec)\n", "path": "setuptools/_imp.py"}]}
1,659
323
gh_patches_debug_27632
rasdani/github-patches
git_diff
AUTOMATIC1111__stable-diffusion-webui-3364
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [Bug]: TypeError: 'NoneType' object is not subscriptable when using img2img alternative test ### Is there an existing issue for this? - [X] I have searched the existing issues and checked the recent builds/commits ### What happened? Webui fails to generate an image if using the img2img alternative test since the runway inpainting support update. Unrelated to this, it does not support the unlimited tokens either. ### Steps to reproduce the problem 1. Go to imgimg 2. Choose img2img alternative test 3. Upload image and write prompt, parameters, settings, etc. 4. Generate. ### What should have happened? An image should generate. ### Commit where the problem happens bf30673f5132c8f28357b31224c54331e788d3e7 ### What platforms do you use to access UI ? Windows ### What browsers do you use to access the UI ? Google Chrome ### Command Line Arguments ```Shell --deepdanbooru --xformers --gradio-img2img-tool color-sketch ``` ### Additional information, context and logs Traceback (most recent call last): File "G:\stable-webui\modules\ui.py", line 212, in f res = list(func(*args, **kwargs)) File "G:\stable-webui\webui.py", line 63, in f res = func(*args, **kwargs) File "G:\stable-webui\modules\img2img.py", line 124, in img2img processed = modules.scripts.scripts_img2img.run(p, *args) File "G:\stable-webui\modules\scripts.py", line 173, in run processed = script.run(p, *script_args) File "G:\stable-webui\scripts\img2imgalt.py", line 208, in run processed = processing.process_images(p) File "G:\stable-webui\modules\processing.py", line 411, in process_images samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength) File "G:\stable-webui\scripts\img2imgalt.py", line 197, in sample_extra return sampler.sample_img2img(p, p.init_latent, noise_dt, conditioning, unconditional_conditioning) File "G:\stable-webui\modules\sd_samplers.py", line 423, in sample_img2img samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, xi, extra_args={ File "G:\stable-webui\modules\sd_samplers.py", line 356, in launch_sampling return func() File "G:\stable-webui\modules\sd_samplers.py", line 423, in <lambda> samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, xi, extra_args={ File "G:\stable-webui\venv\lib\site-packages\torch\autograd\grad_mode.py", line 27, in decorate_context return func(*args, **kwargs) File "G:\stable-webui\repositories\k-diffusion\k_diffusion\sampling.py", line 64, in sample_euler denoised = model(x, sigma_hat * s_in, **extra_args) File "G:\stable-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1130, in _call_impl return forward_call(*input, **kwargs) File "G:\stable-webui\modules\sd_samplers.py", line 269, in forward image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_cond]) File "G:\stable-webui\modules\sd_samplers.py", line 269, in <listcomp> image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_cond]) File "G:\stable-webui\modules\sd_samplers.py", line 269, in <listcomp> image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_cond]) TypeError: 'NoneType' object is not subscriptable --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `scripts/img2imgalt.py` Content: ``` 1 from collections import namedtuple 2 3 import numpy as np 4 from tqdm import trange 5 6 import modules.scripts as scripts 7 import gradio as gr 8 9 from modules import processing, shared, sd_samplers, prompt_parser 10 from modules.processing import Processed 11 from modules.shared import opts, cmd_opts, state 12 13 import torch 14 import k_diffusion as K 15 16 from PIL import Image 17 from torch import autocast 18 from einops import rearrange, repeat 19 20 21 def find_noise_for_image(p, cond, uncond, cfg_scale, steps): 22 x = p.init_latent 23 24 s_in = x.new_ones([x.shape[0]]) 25 dnw = K.external.CompVisDenoiser(shared.sd_model) 26 sigmas = dnw.get_sigmas(steps).flip(0) 27 28 shared.state.sampling_steps = steps 29 30 for i in trange(1, len(sigmas)): 31 shared.state.sampling_step += 1 32 33 x_in = torch.cat([x] * 2) 34 sigma_in = torch.cat([sigmas[i] * s_in] * 2) 35 cond_in = torch.cat([uncond, cond]) 36 37 c_out, c_in = [K.utils.append_dims(k, x_in.ndim) for k in dnw.get_scalings(sigma_in)] 38 t = dnw.sigma_to_t(sigma_in) 39 40 eps = shared.sd_model.apply_model(x_in * c_in, t, cond=cond_in) 41 denoised_uncond, denoised_cond = (x_in + eps * c_out).chunk(2) 42 43 denoised = denoised_uncond + (denoised_cond - denoised_uncond) * cfg_scale 44 45 d = (x - denoised) / sigmas[i] 46 dt = sigmas[i] - sigmas[i - 1] 47 48 x = x + d * dt 49 50 sd_samplers.store_latent(x) 51 52 # This shouldn't be necessary, but solved some VRAM issues 53 del x_in, sigma_in, cond_in, c_out, c_in, t, 54 del eps, denoised_uncond, denoised_cond, denoised, d, dt 55 56 shared.state.nextjob() 57 58 return x / x.std() 59 60 61 Cached = namedtuple("Cached", ["noise", "cfg_scale", "steps", "latent", "original_prompt", "original_negative_prompt", "sigma_adjustment"]) 62 63 64 # Based on changes suggested by briansemrau in https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/736 65 def find_noise_for_image_sigma_adjustment(p, cond, uncond, cfg_scale, steps): 66 x = p.init_latent 67 68 s_in = x.new_ones([x.shape[0]]) 69 dnw = K.external.CompVisDenoiser(shared.sd_model) 70 sigmas = dnw.get_sigmas(steps).flip(0) 71 72 shared.state.sampling_steps = steps 73 74 for i in trange(1, len(sigmas)): 75 shared.state.sampling_step += 1 76 77 x_in = torch.cat([x] * 2) 78 sigma_in = torch.cat([sigmas[i - 1] * s_in] * 2) 79 cond_in = torch.cat([uncond, cond]) 80 81 c_out, c_in = [K.utils.append_dims(k, x_in.ndim) for k in dnw.get_scalings(sigma_in)] 82 83 if i == 1: 84 t = dnw.sigma_to_t(torch.cat([sigmas[i] * s_in] * 2)) 85 else: 86 t = dnw.sigma_to_t(sigma_in) 87 88 eps = shared.sd_model.apply_model(x_in * c_in, t, cond=cond_in) 89 denoised_uncond, denoised_cond = (x_in + eps * c_out).chunk(2) 90 91 denoised = denoised_uncond + (denoised_cond - denoised_uncond) * cfg_scale 92 93 if i == 1: 94 d = (x - denoised) / (2 * sigmas[i]) 95 else: 96 d = (x - denoised) / sigmas[i - 1] 97 98 dt = sigmas[i] - sigmas[i - 1] 99 x = x + d * dt 100 101 sd_samplers.store_latent(x) 102 103 # This shouldn't be necessary, but solved some VRAM issues 104 del x_in, sigma_in, cond_in, c_out, c_in, t, 105 del eps, denoised_uncond, denoised_cond, denoised, d, dt 106 107 shared.state.nextjob() 108 109 return x / sigmas[-1] 110 111 112 class Script(scripts.Script): 113 def __init__(self): 114 self.cache = None 115 116 def title(self): 117 return "img2img alternative test" 118 119 def show(self, is_img2img): 120 return is_img2img 121 122 def ui(self, is_img2img): 123 info = gr.Markdown(''' 124 * `CFG Scale` should be 2 or lower. 125 ''') 126 127 override_sampler = gr.Checkbox(label="Override `Sampling method` to Euler?(this method is built for it)", value=True) 128 129 override_prompt = gr.Checkbox(label="Override `prompt` to the same value as `original prompt`?(and `negative prompt`)", value=True) 130 original_prompt = gr.Textbox(label="Original prompt", lines=1) 131 original_negative_prompt = gr.Textbox(label="Original negative prompt", lines=1) 132 133 override_steps = gr.Checkbox(label="Override `Sampling Steps` to the same value as `Decode steps`?", value=True) 134 st = gr.Slider(label="Decode steps", minimum=1, maximum=150, step=1, value=50) 135 136 override_strength = gr.Checkbox(label="Override `Denoising strength` to 1?", value=True) 137 138 cfg = gr.Slider(label="Decode CFG scale", minimum=0.0, maximum=15.0, step=0.1, value=1.0) 139 randomness = gr.Slider(label="Randomness", minimum=0.0, maximum=1.0, step=0.01, value=0.0) 140 sigma_adjustment = gr.Checkbox(label="Sigma adjustment for finding noise for image", value=False) 141 142 return [ 143 info, 144 override_sampler, 145 override_prompt, original_prompt, original_negative_prompt, 146 override_steps, st, 147 override_strength, 148 cfg, randomness, sigma_adjustment, 149 ] 150 151 def run(self, p, _, override_sampler, override_prompt, original_prompt, original_negative_prompt, override_steps, st, override_strength, cfg, randomness, sigma_adjustment): 152 # Override 153 if override_sampler: 154 p.sampler_index = [sampler.name for sampler in sd_samplers.samplers].index("Euler") 155 if override_prompt: 156 p.prompt = original_prompt 157 p.negative_prompt = original_negative_prompt 158 if override_steps: 159 p.steps = st 160 if override_strength: 161 p.denoising_strength = 1.0 162 163 164 def sample_extra(conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength): 165 lat = (p.init_latent.cpu().numpy() * 10).astype(int) 166 167 same_params = self.cache is not None and self.cache.cfg_scale == cfg and self.cache.steps == st \ 168 and self.cache.original_prompt == original_prompt \ 169 and self.cache.original_negative_prompt == original_negative_prompt \ 170 and self.cache.sigma_adjustment == sigma_adjustment 171 same_everything = same_params and self.cache.latent.shape == lat.shape and np.abs(self.cache.latent-lat).sum() < 100 172 173 if same_everything: 174 rec_noise = self.cache.noise 175 else: 176 shared.state.job_count += 1 177 cond = p.sd_model.get_learned_conditioning(p.batch_size * [original_prompt]) 178 uncond = p.sd_model.get_learned_conditioning(p.batch_size * [original_negative_prompt]) 179 if sigma_adjustment: 180 rec_noise = find_noise_for_image_sigma_adjustment(p, cond, uncond, cfg, st) 181 else: 182 rec_noise = find_noise_for_image(p, cond, uncond, cfg, st) 183 self.cache = Cached(rec_noise, cfg, st, lat, original_prompt, original_negative_prompt, sigma_adjustment) 184 185 rand_noise = processing.create_random_tensors(p.init_latent.shape[1:], seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, seed_resize_from_h=p.seed_resize_from_h, seed_resize_from_w=p.seed_resize_from_w, p=p) 186 187 combined_noise = ((1 - randomness) * rec_noise + randomness * rand_noise) / ((randomness**2 + (1-randomness)**2) ** 0.5) 188 189 sampler = sd_samplers.create_sampler_with_index(sd_samplers.samplers, p.sampler_index, p.sd_model) 190 191 sigmas = sampler.model_wrap.get_sigmas(p.steps) 192 193 noise_dt = combined_noise - (p.init_latent / sigmas[0]) 194 195 p.seed = p.seed + 1 196 197 return sampler.sample_img2img(p, p.init_latent, noise_dt, conditioning, unconditional_conditioning) 198 199 p.sample = sample_extra 200 201 p.extra_generation_params["Decode prompt"] = original_prompt 202 p.extra_generation_params["Decode negative prompt"] = original_negative_prompt 203 p.extra_generation_params["Decode CFG scale"] = cfg 204 p.extra_generation_params["Decode steps"] = st 205 p.extra_generation_params["Randomness"] = randomness 206 p.extra_generation_params["Sigma Adjustment"] = sigma_adjustment 207 208 processed = processing.process_images(p) 209 210 return processed 211 212 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scripts/img2imgalt.py b/scripts/img2imgalt.py --- a/scripts/img2imgalt.py +++ b/scripts/img2imgalt.py @@ -34,6 +34,9 @@ sigma_in = torch.cat([sigmas[i] * s_in] * 2) cond_in = torch.cat([uncond, cond]) + image_conditioning = torch.cat([p.image_conditioning] * 2) + cond_in = {"c_concat": [image_conditioning], "c_crossattn": [cond_in]} + c_out, c_in = [K.utils.append_dims(k, x_in.ndim) for k in dnw.get_scalings(sigma_in)] t = dnw.sigma_to_t(sigma_in) @@ -78,6 +81,9 @@ sigma_in = torch.cat([sigmas[i - 1] * s_in] * 2) cond_in = torch.cat([uncond, cond]) + image_conditioning = torch.cat([p.image_conditioning] * 2) + cond_in = {"c_concat": [image_conditioning], "c_crossattn": [cond_in]} + c_out, c_in = [K.utils.append_dims(k, x_in.ndim) for k in dnw.get_scalings(sigma_in)] if i == 1: @@ -194,7 +200,7 @@ p.seed = p.seed + 1 - return sampler.sample_img2img(p, p.init_latent, noise_dt, conditioning, unconditional_conditioning) + return sampler.sample_img2img(p, p.init_latent, noise_dt, conditioning, unconditional_conditioning, image_conditioning=p.image_conditioning) p.sample = sample_extra
{"golden_diff": "diff --git a/scripts/img2imgalt.py b/scripts/img2imgalt.py\n--- a/scripts/img2imgalt.py\n+++ b/scripts/img2imgalt.py\n@@ -34,6 +34,9 @@\n sigma_in = torch.cat([sigmas[i] * s_in] * 2)\r\n cond_in = torch.cat([uncond, cond])\r\n \r\n+ image_conditioning = torch.cat([p.image_conditioning] * 2)\r\n+ cond_in = {\"c_concat\": [image_conditioning], \"c_crossattn\": [cond_in]}\r\n+\r\n c_out, c_in = [K.utils.append_dims(k, x_in.ndim) for k in dnw.get_scalings(sigma_in)]\r\n t = dnw.sigma_to_t(sigma_in)\r\n \r\n@@ -78,6 +81,9 @@\n sigma_in = torch.cat([sigmas[i - 1] * s_in] * 2)\r\n cond_in = torch.cat([uncond, cond])\r\n \r\n+ image_conditioning = torch.cat([p.image_conditioning] * 2)\r\n+ cond_in = {\"c_concat\": [image_conditioning], \"c_crossattn\": [cond_in]}\r\n+\r\n c_out, c_in = [K.utils.append_dims(k, x_in.ndim) for k in dnw.get_scalings(sigma_in)]\r\n \r\n if i == 1:\r\n@@ -194,7 +200,7 @@\n \r\n p.seed = p.seed + 1\r\n \r\n- return sampler.sample_img2img(p, p.init_latent, noise_dt, conditioning, unconditional_conditioning)\r\n+ return sampler.sample_img2img(p, p.init_latent, noise_dt, conditioning, unconditional_conditioning, image_conditioning=p.image_conditioning)\r\n \r\n p.sample = sample_extra\n", "issue": "[Bug]: TypeError: 'NoneType' object is not subscriptable when using img2img alternative test\n### Is there an existing issue for this?\n\n- [X] I have searched the existing issues and checked the recent builds/commits\n\n### What happened?\n\nWebui fails to generate an image if using the img2img alternative test since the runway inpainting support update.\r\nUnrelated to this, it does not support the unlimited tokens either.\n\n### Steps to reproduce the problem\n\n1. Go to imgimg\r\n2. Choose img2img alternative test\r\n3. Upload image and write prompt, parameters, settings, etc.\r\n4. Generate.\r\n\n\n### What should have happened?\n\nAn image should generate.\n\n### Commit where the problem happens\n\nbf30673f5132c8f28357b31224c54331e788d3e7\n\n### What platforms do you use to access UI ?\n\nWindows\n\n### What browsers do you use to access the UI ?\n\nGoogle Chrome\n\n### Command Line Arguments\n\n```Shell\n--deepdanbooru --xformers --gradio-img2img-tool color-sketch\n```\n\n\n### Additional information, context and logs\n\nTraceback (most recent call last):\r\n File \"G:\\stable-webui\\modules\\ui.py\", line 212, in f\r\n res = list(func(*args, **kwargs))\r\n File \"G:\\stable-webui\\webui.py\", line 63, in f\r\n res = func(*args, **kwargs)\r\n File \"G:\\stable-webui\\modules\\img2img.py\", line 124, in img2img\r\n processed = modules.scripts.scripts_img2img.run(p, *args)\r\n File \"G:\\stable-webui\\modules\\scripts.py\", line 173, in run\r\n processed = script.run(p, *script_args)\r\n File \"G:\\stable-webui\\scripts\\img2imgalt.py\", line 208, in run\r\n processed = processing.process_images(p)\r\n File \"G:\\stable-webui\\modules\\processing.py\", line 411, in process_images\r\n samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength)\r\n File \"G:\\stable-webui\\scripts\\img2imgalt.py\", line 197, in sample_extra\r\n return sampler.sample_img2img(p, p.init_latent, noise_dt, conditioning, unconditional_conditioning)\r\n File \"G:\\stable-webui\\modules\\sd_samplers.py\", line 423, in sample_img2img\r\n samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, xi, extra_args={\r\n File \"G:\\stable-webui\\modules\\sd_samplers.py\", line 356, in launch_sampling\r\n return func()\r\n File \"G:\\stable-webui\\modules\\sd_samplers.py\", line 423, in <lambda>\r\n samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, xi, extra_args={\r\n File \"G:\\stable-webui\\venv\\lib\\site-packages\\torch\\autograd\\grad_mode.py\", line 27, in decorate_context\r\n return func(*args, **kwargs)\r\n File \"G:\\stable-webui\\repositories\\k-diffusion\\k_diffusion\\sampling.py\", line 64, in sample_euler\r\n denoised = model(x, sigma_hat * s_in, **extra_args)\r\n File \"G:\\stable-webui\\venv\\lib\\site-packages\\torch\\nn\\modules\\module.py\", line 1130, in _call_impl\r\n return forward_call(*input, **kwargs)\r\n File \"G:\\stable-webui\\modules\\sd_samplers.py\", line 269, in forward\r\n image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_cond])\r\n File \"G:\\stable-webui\\modules\\sd_samplers.py\", line 269, in <listcomp>\r\n image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_cond])\r\n File \"G:\\stable-webui\\modules\\sd_samplers.py\", line 269, in <listcomp>\r\n image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_cond])\r\nTypeError: 'NoneType' object is not subscriptable\n", "before_files": [{"content": "from collections import namedtuple\r\n\r\nimport numpy as np\r\nfrom tqdm import trange\r\n\r\nimport modules.scripts as scripts\r\nimport gradio as gr\r\n\r\nfrom modules import processing, shared, sd_samplers, prompt_parser\r\nfrom modules.processing import Processed\r\nfrom modules.shared import opts, cmd_opts, state\r\n\r\nimport torch\r\nimport k_diffusion as K\r\n\r\nfrom PIL import Image\r\nfrom torch import autocast\r\nfrom einops import rearrange, repeat\r\n\r\n\r\ndef find_noise_for_image(p, cond, uncond, cfg_scale, steps):\r\n x = p.init_latent\r\n\r\n s_in = x.new_ones([x.shape[0]])\r\n dnw = K.external.CompVisDenoiser(shared.sd_model)\r\n sigmas = dnw.get_sigmas(steps).flip(0)\r\n\r\n shared.state.sampling_steps = steps\r\n\r\n for i in trange(1, len(sigmas)):\r\n shared.state.sampling_step += 1\r\n\r\n x_in = torch.cat([x] * 2)\r\n sigma_in = torch.cat([sigmas[i] * s_in] * 2)\r\n cond_in = torch.cat([uncond, cond])\r\n\r\n c_out, c_in = [K.utils.append_dims(k, x_in.ndim) for k in dnw.get_scalings(sigma_in)]\r\n t = dnw.sigma_to_t(sigma_in)\r\n\r\n eps = shared.sd_model.apply_model(x_in * c_in, t, cond=cond_in)\r\n denoised_uncond, denoised_cond = (x_in + eps * c_out).chunk(2)\r\n\r\n denoised = denoised_uncond + (denoised_cond - denoised_uncond) * cfg_scale\r\n\r\n d = (x - denoised) / sigmas[i]\r\n dt = sigmas[i] - sigmas[i - 1]\r\n\r\n x = x + d * dt\r\n\r\n sd_samplers.store_latent(x)\r\n\r\n # This shouldn't be necessary, but solved some VRAM issues\r\n del x_in, sigma_in, cond_in, c_out, c_in, t,\r\n del eps, denoised_uncond, denoised_cond, denoised, d, dt\r\n\r\n shared.state.nextjob()\r\n\r\n return x / x.std()\r\n\r\n\r\nCached = namedtuple(\"Cached\", [\"noise\", \"cfg_scale\", \"steps\", \"latent\", \"original_prompt\", \"original_negative_prompt\", \"sigma_adjustment\"])\r\n\r\n\r\n# Based on changes suggested by briansemrau in https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/736\r\ndef find_noise_for_image_sigma_adjustment(p, cond, uncond, cfg_scale, steps):\r\n x = p.init_latent\r\n\r\n s_in = x.new_ones([x.shape[0]])\r\n dnw = K.external.CompVisDenoiser(shared.sd_model)\r\n sigmas = dnw.get_sigmas(steps).flip(0)\r\n\r\n shared.state.sampling_steps = steps\r\n\r\n for i in trange(1, len(sigmas)):\r\n shared.state.sampling_step += 1\r\n\r\n x_in = torch.cat([x] * 2)\r\n sigma_in = torch.cat([sigmas[i - 1] * s_in] * 2)\r\n cond_in = torch.cat([uncond, cond])\r\n\r\n c_out, c_in = [K.utils.append_dims(k, x_in.ndim) for k in dnw.get_scalings(sigma_in)]\r\n\r\n if i == 1:\r\n t = dnw.sigma_to_t(torch.cat([sigmas[i] * s_in] * 2))\r\n else:\r\n t = dnw.sigma_to_t(sigma_in)\r\n\r\n eps = shared.sd_model.apply_model(x_in * c_in, t, cond=cond_in)\r\n denoised_uncond, denoised_cond = (x_in + eps * c_out).chunk(2)\r\n\r\n denoised = denoised_uncond + (denoised_cond - denoised_uncond) * cfg_scale\r\n\r\n if i == 1:\r\n d = (x - denoised) / (2 * sigmas[i])\r\n else:\r\n d = (x - denoised) / sigmas[i - 1]\r\n\r\n dt = sigmas[i] - sigmas[i - 1]\r\n x = x + d * dt\r\n\r\n sd_samplers.store_latent(x)\r\n\r\n # This shouldn't be necessary, but solved some VRAM issues\r\n del x_in, sigma_in, cond_in, c_out, c_in, t,\r\n del eps, denoised_uncond, denoised_cond, denoised, d, dt\r\n\r\n shared.state.nextjob()\r\n\r\n return x / sigmas[-1]\r\n\r\n\r\nclass Script(scripts.Script):\r\n def __init__(self):\r\n self.cache = None\r\n\r\n def title(self):\r\n return \"img2img alternative test\"\r\n\r\n def show(self, is_img2img):\r\n return is_img2img\r\n\r\n def ui(self, is_img2img):\r\n info = gr.Markdown('''\r\n * `CFG Scale` should be 2 or lower.\r\n ''')\r\n\r\n override_sampler = gr.Checkbox(label=\"Override `Sampling method` to Euler?(this method is built for it)\", value=True)\r\n\r\n override_prompt = gr.Checkbox(label=\"Override `prompt` to the same value as `original prompt`?(and `negative prompt`)\", value=True)\r\n original_prompt = gr.Textbox(label=\"Original prompt\", lines=1)\r\n original_negative_prompt = gr.Textbox(label=\"Original negative prompt\", lines=1)\r\n\r\n override_steps = gr.Checkbox(label=\"Override `Sampling Steps` to the same value as `Decode steps`?\", value=True)\r\n st = gr.Slider(label=\"Decode steps\", minimum=1, maximum=150, step=1, value=50)\r\n\r\n override_strength = gr.Checkbox(label=\"Override `Denoising strength` to 1?\", value=True)\r\n\r\n cfg = gr.Slider(label=\"Decode CFG scale\", minimum=0.0, maximum=15.0, step=0.1, value=1.0)\r\n randomness = gr.Slider(label=\"Randomness\", minimum=0.0, maximum=1.0, step=0.01, value=0.0)\r\n sigma_adjustment = gr.Checkbox(label=\"Sigma adjustment for finding noise for image\", value=False)\r\n\r\n return [\r\n info, \r\n override_sampler,\r\n override_prompt, original_prompt, original_negative_prompt, \r\n override_steps, st,\r\n override_strength,\r\n cfg, randomness, sigma_adjustment,\r\n ]\r\n\r\n def run(self, p, _, override_sampler, override_prompt, original_prompt, original_negative_prompt, override_steps, st, override_strength, cfg, randomness, sigma_adjustment):\r\n # Override\r\n if override_sampler:\r\n p.sampler_index = [sampler.name for sampler in sd_samplers.samplers].index(\"Euler\")\r\n if override_prompt:\r\n p.prompt = original_prompt\r\n p.negative_prompt = original_negative_prompt\r\n if override_steps:\r\n p.steps = st\r\n if override_strength:\r\n p.denoising_strength = 1.0\r\n\r\n\r\n def sample_extra(conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength):\r\n lat = (p.init_latent.cpu().numpy() * 10).astype(int)\r\n\r\n same_params = self.cache is not None and self.cache.cfg_scale == cfg and self.cache.steps == st \\\r\n and self.cache.original_prompt == original_prompt \\\r\n and self.cache.original_negative_prompt == original_negative_prompt \\\r\n and self.cache.sigma_adjustment == sigma_adjustment\r\n same_everything = same_params and self.cache.latent.shape == lat.shape and np.abs(self.cache.latent-lat).sum() < 100\r\n\r\n if same_everything:\r\n rec_noise = self.cache.noise\r\n else:\r\n shared.state.job_count += 1\r\n cond = p.sd_model.get_learned_conditioning(p.batch_size * [original_prompt])\r\n uncond = p.sd_model.get_learned_conditioning(p.batch_size * [original_negative_prompt])\r\n if sigma_adjustment:\r\n rec_noise = find_noise_for_image_sigma_adjustment(p, cond, uncond, cfg, st)\r\n else:\r\n rec_noise = find_noise_for_image(p, cond, uncond, cfg, st)\r\n self.cache = Cached(rec_noise, cfg, st, lat, original_prompt, original_negative_prompt, sigma_adjustment)\r\n\r\n rand_noise = processing.create_random_tensors(p.init_latent.shape[1:], seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, seed_resize_from_h=p.seed_resize_from_h, seed_resize_from_w=p.seed_resize_from_w, p=p)\r\n \r\n combined_noise = ((1 - randomness) * rec_noise + randomness * rand_noise) / ((randomness**2 + (1-randomness)**2) ** 0.5)\r\n \r\n sampler = sd_samplers.create_sampler_with_index(sd_samplers.samplers, p.sampler_index, p.sd_model)\r\n\r\n sigmas = sampler.model_wrap.get_sigmas(p.steps)\r\n \r\n noise_dt = combined_noise - (p.init_latent / sigmas[0])\r\n \r\n p.seed = p.seed + 1\r\n \r\n return sampler.sample_img2img(p, p.init_latent, noise_dt, conditioning, unconditional_conditioning)\r\n\r\n p.sample = sample_extra\r\n\r\n p.extra_generation_params[\"Decode prompt\"] = original_prompt\r\n p.extra_generation_params[\"Decode negative prompt\"] = original_negative_prompt\r\n p.extra_generation_params[\"Decode CFG scale\"] = cfg\r\n p.extra_generation_params[\"Decode steps\"] = st\r\n p.extra_generation_params[\"Randomness\"] = randomness\r\n p.extra_generation_params[\"Sigma Adjustment\"] = sigma_adjustment\r\n\r\n processed = processing.process_images(p)\r\n\r\n return processed\r\n\r\n", "path": "scripts/img2imgalt.py"}], "after_files": [{"content": "from collections import namedtuple\r\n\r\nimport numpy as np\r\nfrom tqdm import trange\r\n\r\nimport modules.scripts as scripts\r\nimport gradio as gr\r\n\r\nfrom modules import processing, shared, sd_samplers, prompt_parser\r\nfrom modules.processing import Processed\r\nfrom modules.shared import opts, cmd_opts, state\r\n\r\nimport torch\r\nimport k_diffusion as K\r\n\r\nfrom PIL import Image\r\nfrom torch import autocast\r\nfrom einops import rearrange, repeat\r\n\r\n\r\ndef find_noise_for_image(p, cond, uncond, cfg_scale, steps):\r\n x = p.init_latent\r\n\r\n s_in = x.new_ones([x.shape[0]])\r\n dnw = K.external.CompVisDenoiser(shared.sd_model)\r\n sigmas = dnw.get_sigmas(steps).flip(0)\r\n\r\n shared.state.sampling_steps = steps\r\n\r\n for i in trange(1, len(sigmas)):\r\n shared.state.sampling_step += 1\r\n\r\n x_in = torch.cat([x] * 2)\r\n sigma_in = torch.cat([sigmas[i] * s_in] * 2)\r\n cond_in = torch.cat([uncond, cond])\r\n\r\n image_conditioning = torch.cat([p.image_conditioning] * 2)\r\n cond_in = {\"c_concat\": [image_conditioning], \"c_crossattn\": [cond_in]}\r\n\r\n c_out, c_in = [K.utils.append_dims(k, x_in.ndim) for k in dnw.get_scalings(sigma_in)]\r\n t = dnw.sigma_to_t(sigma_in)\r\n\r\n eps = shared.sd_model.apply_model(x_in * c_in, t, cond=cond_in)\r\n denoised_uncond, denoised_cond = (x_in + eps * c_out).chunk(2)\r\n\r\n denoised = denoised_uncond + (denoised_cond - denoised_uncond) * cfg_scale\r\n\r\n d = (x - denoised) / sigmas[i]\r\n dt = sigmas[i] - sigmas[i - 1]\r\n\r\n x = x + d * dt\r\n\r\n sd_samplers.store_latent(x)\r\n\r\n # This shouldn't be necessary, but solved some VRAM issues\r\n del x_in, sigma_in, cond_in, c_out, c_in, t,\r\n del eps, denoised_uncond, denoised_cond, denoised, d, dt\r\n\r\n shared.state.nextjob()\r\n\r\n return x / x.std()\r\n\r\n\r\nCached = namedtuple(\"Cached\", [\"noise\", \"cfg_scale\", \"steps\", \"latent\", \"original_prompt\", \"original_negative_prompt\", \"sigma_adjustment\"])\r\n\r\n\r\n# Based on changes suggested by briansemrau in https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/736\r\ndef find_noise_for_image_sigma_adjustment(p, cond, uncond, cfg_scale, steps):\r\n x = p.init_latent\r\n\r\n s_in = x.new_ones([x.shape[0]])\r\n dnw = K.external.CompVisDenoiser(shared.sd_model)\r\n sigmas = dnw.get_sigmas(steps).flip(0)\r\n\r\n shared.state.sampling_steps = steps\r\n\r\n for i in trange(1, len(sigmas)):\r\n shared.state.sampling_step += 1\r\n\r\n x_in = torch.cat([x] * 2)\r\n sigma_in = torch.cat([sigmas[i - 1] * s_in] * 2)\r\n cond_in = torch.cat([uncond, cond])\r\n\r\n image_conditioning = torch.cat([p.image_conditioning] * 2)\r\n cond_in = {\"c_concat\": [image_conditioning], \"c_crossattn\": [cond_in]}\r\n\r\n c_out, c_in = [K.utils.append_dims(k, x_in.ndim) for k in dnw.get_scalings(sigma_in)]\r\n\r\n if i == 1:\r\n t = dnw.sigma_to_t(torch.cat([sigmas[i] * s_in] * 2))\r\n else:\r\n t = dnw.sigma_to_t(sigma_in)\r\n\r\n eps = shared.sd_model.apply_model(x_in * c_in, t, cond=cond_in)\r\n denoised_uncond, denoised_cond = (x_in + eps * c_out).chunk(2)\r\n\r\n denoised = denoised_uncond + (denoised_cond - denoised_uncond) * cfg_scale\r\n\r\n if i == 1:\r\n d = (x - denoised) / (2 * sigmas[i])\r\n else:\r\n d = (x - denoised) / sigmas[i - 1]\r\n\r\n dt = sigmas[i] - sigmas[i - 1]\r\n x = x + d * dt\r\n\r\n sd_samplers.store_latent(x)\r\n\r\n # This shouldn't be necessary, but solved some VRAM issues\r\n del x_in, sigma_in, cond_in, c_out, c_in, t,\r\n del eps, denoised_uncond, denoised_cond, denoised, d, dt\r\n\r\n shared.state.nextjob()\r\n\r\n return x / sigmas[-1]\r\n\r\n\r\nclass Script(scripts.Script):\r\n def __init__(self):\r\n self.cache = None\r\n\r\n def title(self):\r\n return \"img2img alternative test\"\r\n\r\n def show(self, is_img2img):\r\n return is_img2img\r\n\r\n def ui(self, is_img2img):\r\n info = gr.Markdown('''\r\n * `CFG Scale` should be 2 or lower.\r\n ''')\r\n\r\n override_sampler = gr.Checkbox(label=\"Override `Sampling method` to Euler?(this method is built for it)\", value=True)\r\n\r\n override_prompt = gr.Checkbox(label=\"Override `prompt` to the same value as `original prompt`?(and `negative prompt`)\", value=True)\r\n original_prompt = gr.Textbox(label=\"Original prompt\", lines=1)\r\n original_negative_prompt = gr.Textbox(label=\"Original negative prompt\", lines=1)\r\n\r\n override_steps = gr.Checkbox(label=\"Override `Sampling Steps` to the same value as `Decode steps`?\", value=True)\r\n st = gr.Slider(label=\"Decode steps\", minimum=1, maximum=150, step=1, value=50)\r\n\r\n override_strength = gr.Checkbox(label=\"Override `Denoising strength` to 1?\", value=True)\r\n\r\n cfg = gr.Slider(label=\"Decode CFG scale\", minimum=0.0, maximum=15.0, step=0.1, value=1.0)\r\n randomness = gr.Slider(label=\"Randomness\", minimum=0.0, maximum=1.0, step=0.01, value=0.0)\r\n sigma_adjustment = gr.Checkbox(label=\"Sigma adjustment for finding noise for image\", value=False)\r\n\r\n return [\r\n info, \r\n override_sampler,\r\n override_prompt, original_prompt, original_negative_prompt, \r\n override_steps, st,\r\n override_strength,\r\n cfg, randomness, sigma_adjustment,\r\n ]\r\n\r\n def run(self, p, _, override_sampler, override_prompt, original_prompt, original_negative_prompt, override_steps, st, override_strength, cfg, randomness, sigma_adjustment):\r\n # Override\r\n if override_sampler:\r\n p.sampler_index = [sampler.name for sampler in sd_samplers.samplers].index(\"Euler\")\r\n if override_prompt:\r\n p.prompt = original_prompt\r\n p.negative_prompt = original_negative_prompt\r\n if override_steps:\r\n p.steps = st\r\n if override_strength:\r\n p.denoising_strength = 1.0\r\n\r\n\r\n def sample_extra(conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength):\r\n lat = (p.init_latent.cpu().numpy() * 10).astype(int)\r\n\r\n same_params = self.cache is not None and self.cache.cfg_scale == cfg and self.cache.steps == st \\\r\n and self.cache.original_prompt == original_prompt \\\r\n and self.cache.original_negative_prompt == original_negative_prompt \\\r\n and self.cache.sigma_adjustment == sigma_adjustment\r\n same_everything = same_params and self.cache.latent.shape == lat.shape and np.abs(self.cache.latent-lat).sum() < 100\r\n\r\n if same_everything:\r\n rec_noise = self.cache.noise\r\n else:\r\n shared.state.job_count += 1\r\n cond = p.sd_model.get_learned_conditioning(p.batch_size * [original_prompt])\r\n uncond = p.sd_model.get_learned_conditioning(p.batch_size * [original_negative_prompt])\r\n if sigma_adjustment:\r\n rec_noise = find_noise_for_image_sigma_adjustment(p, cond, uncond, cfg, st)\r\n else:\r\n rec_noise = find_noise_for_image(p, cond, uncond, cfg, st)\r\n self.cache = Cached(rec_noise, cfg, st, lat, original_prompt, original_negative_prompt, sigma_adjustment)\r\n\r\n rand_noise = processing.create_random_tensors(p.init_latent.shape[1:], seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, seed_resize_from_h=p.seed_resize_from_h, seed_resize_from_w=p.seed_resize_from_w, p=p)\r\n \r\n combined_noise = ((1 - randomness) * rec_noise + randomness * rand_noise) / ((randomness**2 + (1-randomness)**2) ** 0.5)\r\n \r\n sampler = sd_samplers.create_sampler_with_index(sd_samplers.samplers, p.sampler_index, p.sd_model)\r\n\r\n sigmas = sampler.model_wrap.get_sigmas(p.steps)\r\n \r\n noise_dt = combined_noise - (p.init_latent / sigmas[0])\r\n \r\n p.seed = p.seed + 1\r\n \r\n return sampler.sample_img2img(p, p.init_latent, noise_dt, conditioning, unconditional_conditioning, image_conditioning=p.image_conditioning)\r\n\r\n p.sample = sample_extra\r\n\r\n p.extra_generation_params[\"Decode prompt\"] = original_prompt\r\n p.extra_generation_params[\"Decode negative prompt\"] = original_negative_prompt\r\n p.extra_generation_params[\"Decode CFG scale\"] = cfg\r\n p.extra_generation_params[\"Decode steps\"] = st\r\n p.extra_generation_params[\"Randomness\"] = randomness\r\n p.extra_generation_params[\"Sigma Adjustment\"] = sigma_adjustment\r\n\r\n processed = processing.process_images(p)\r\n\r\n return processed\r\n\r\n", "path": "scripts/img2imgalt.py"}]}
3,939
387
gh_patches_debug_7219
rasdani/github-patches
git_diff
spack__spack-18478
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- nn-c uses invalid self.compiler.pic_flag? (breaks nn-c build, via elmerfem build) These lines fail, because there is no such member, and looking at other packages, it seems that flags like ``` self.compiler.cc_pic_flag self.compiler.cxx_pic_flag self.compiler.fc_pic_flag #or ? self.compiler.f77_pic_flag ``` would be appropriate. https://github.com/spack/spack/blob/601f97d8a50b1840df9b056a34256b6dd2b54ce3/var/spack/repos/builtin/packages/nn-c/package.py#L29-L31 I triggered this on recent `devel` (today) by ``` spack install --test=root elmerfem@devel +mpi +hypre +lua +mumps +openmp +scatt2d +trilinos +zoltan ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `var/spack/repos/builtin/packages/nn-c/package.py` Content: ``` 1 # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other 2 # Spack Project Developers. See the top-level COPYRIGHT file for details. 3 # 4 # SPDX-License-Identifier: (Apache-2.0 OR MIT) 5 6 from spack import * 7 8 9 class NnC(AutotoolsPackage): 10 """nn: Natural Neighbours interpolation. nn is a C code 11 for Natural Neighbours interpolation of 2D scattered data. 12 It provides a C library and a command line utility nnbathy.""" 13 14 homepage = "https://github.com/sakov/nn-c" 15 git = "https://github.com/sakov/nn-c.git" 16 17 version('master', branch='master') 18 version('1.86.2', commit='343c7784d38d3270d75d450569fc0b64767c37e9') 19 20 variant('pic', default=True, 21 description='Produce position-independent code (for shared libs)') 22 23 configure_directory = 'nn' 24 25 def configure_args(self): 26 args = [] 27 if '+pic' in self.spec: 28 args.extend([ 29 'CFLAGS={0}'.format(self.compiler.pic_flag), 30 'CXXFLAGS={0}'.format(self.compiler.pic_flag), 31 'FFLAGS={0}'.format(self.compiler.pic_flag) 32 ]) 33 return args 34 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/var/spack/repos/builtin/packages/nn-c/package.py b/var/spack/repos/builtin/packages/nn-c/package.py --- a/var/spack/repos/builtin/packages/nn-c/package.py +++ b/var/spack/repos/builtin/packages/nn-c/package.py @@ -26,8 +26,8 @@ args = [] if '+pic' in self.spec: args.extend([ - 'CFLAGS={0}'.format(self.compiler.pic_flag), - 'CXXFLAGS={0}'.format(self.compiler.pic_flag), - 'FFLAGS={0}'.format(self.compiler.pic_flag) + 'CFLAGS={0}'.format(self.compiler.cc_pic_flag), + 'CXXFLAGS={0}'.format(self.compiler.cxx_pic_flag), + 'FFLAGS={0}'.format(self.compiler.fc_pic_flag) ]) return args
{"golden_diff": "diff --git a/var/spack/repos/builtin/packages/nn-c/package.py b/var/spack/repos/builtin/packages/nn-c/package.py\n--- a/var/spack/repos/builtin/packages/nn-c/package.py\n+++ b/var/spack/repos/builtin/packages/nn-c/package.py\n@@ -26,8 +26,8 @@\n args = []\n if '+pic' in self.spec:\n args.extend([\n- 'CFLAGS={0}'.format(self.compiler.pic_flag),\n- 'CXXFLAGS={0}'.format(self.compiler.pic_flag),\n- 'FFLAGS={0}'.format(self.compiler.pic_flag)\n+ 'CFLAGS={0}'.format(self.compiler.cc_pic_flag),\n+ 'CXXFLAGS={0}'.format(self.compiler.cxx_pic_flag),\n+ 'FFLAGS={0}'.format(self.compiler.fc_pic_flag)\n ])\n return args\n", "issue": "nn-c uses invalid self.compiler.pic_flag? (breaks nn-c build, via elmerfem build)\nThese lines fail, because there is no such member, and looking at other packages, it seems that flags like\r\n```\r\nself.compiler.cc_pic_flag\r\nself.compiler.cxx_pic_flag\r\nself.compiler.fc_pic_flag\r\n#or ?\r\nself.compiler.f77_pic_flag\r\n```\r\nwould be appropriate.\r\n\r\nhttps://github.com/spack/spack/blob/601f97d8a50b1840df9b056a34256b6dd2b54ce3/var/spack/repos/builtin/packages/nn-c/package.py#L29-L31\r\n\r\nI triggered this on recent `devel` (today) by\r\n```\r\nspack install --test=root elmerfem@devel +mpi +hypre +lua +mumps +openmp +scatt2d +trilinos +zoltan\r\n```\n", "before_files": [{"content": "# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack import *\n\n\nclass NnC(AutotoolsPackage):\n \"\"\"nn: Natural Neighbours interpolation. nn is a C code\n for Natural Neighbours interpolation of 2D scattered data.\n It provides a C library and a command line utility nnbathy.\"\"\"\n\n homepage = \"https://github.com/sakov/nn-c\"\n git = \"https://github.com/sakov/nn-c.git\"\n\n version('master', branch='master')\n version('1.86.2', commit='343c7784d38d3270d75d450569fc0b64767c37e9')\n\n variant('pic', default=True,\n description='Produce position-independent code (for shared libs)')\n\n configure_directory = 'nn'\n\n def configure_args(self):\n args = []\n if '+pic' in self.spec:\n args.extend([\n 'CFLAGS={0}'.format(self.compiler.pic_flag),\n 'CXXFLAGS={0}'.format(self.compiler.pic_flag),\n 'FFLAGS={0}'.format(self.compiler.pic_flag)\n ])\n return args\n", "path": "var/spack/repos/builtin/packages/nn-c/package.py"}], "after_files": [{"content": "# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack import *\n\n\nclass NnC(AutotoolsPackage):\n \"\"\"nn: Natural Neighbours interpolation. nn is a C code\n for Natural Neighbours interpolation of 2D scattered data.\n It provides a C library and a command line utility nnbathy.\"\"\"\n\n homepage = \"https://github.com/sakov/nn-c\"\n git = \"https://github.com/sakov/nn-c.git\"\n\n version('master', branch='master')\n version('1.86.2', commit='343c7784d38d3270d75d450569fc0b64767c37e9')\n\n variant('pic', default=True,\n description='Produce position-independent code (for shared libs)')\n\n configure_directory = 'nn'\n\n def configure_args(self):\n args = []\n if '+pic' in self.spec:\n args.extend([\n 'CFLAGS={0}'.format(self.compiler.cc_pic_flag),\n 'CXXFLAGS={0}'.format(self.compiler.cxx_pic_flag),\n 'FFLAGS={0}'.format(self.compiler.fc_pic_flag)\n ])\n return args\n", "path": "var/spack/repos/builtin/packages/nn-c/package.py"}]}
843
187
gh_patches_debug_2611
rasdani/github-patches
git_diff
freedomofpress__securedrop-703
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Don't armor encrypted submissions SecureDrop currently armors encrypted submissions. This bloats the size of stored submissions significantly due to the encoding. For example, a 93 MB upload results in a 125.7 MB submission for the journalist to download. Downloading anything over Tor is very slow (the aforementioned download took me, on average, 9 minutes to download). Therefore, unnecessarily increasing the size of submissions severely impacts usability. There is no reason that I can think of to ascii armor submissions - they are uploaded and downloaded over HTTP, which automatically handles encoding and de-encoding binary data. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `securedrop/crypto_util.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 import os 3 import subprocess 4 from base64 import b32encode 5 6 from Crypto.Random import random 7 import gnupg 8 import scrypt 9 10 import config 11 import store 12 13 # to fix gpg error #78 on production 14 os.environ['USERNAME'] = 'www-data' 15 16 GPG_KEY_TYPE = "RSA" 17 if os.environ.get('SECUREDROP_ENV') == 'test': 18 # Optiimize crypto to speed up tests (at the expense of security - DO NOT 19 # use these settings in production) 20 GPG_KEY_LENGTH = 1024 21 SCRYPT_PARAMS = dict(N=2**1, r=1, p=1) 22 else: 23 GPG_KEY_LENGTH = 4096 24 SCRYPT_PARAMS = config.SCRYPT_PARAMS 25 26 SCRYPT_ID_PEPPER = config.SCRYPT_ID_PEPPER 27 SCRYPT_GPG_PEPPER = config.SCRYPT_GPG_PEPPER 28 29 DEFAULT_WORDS_IN_RANDOM_ID = 8 30 31 # Make sure these pass before the app can run 32 # TODO: Add more tests 33 def do_runtime_tests(): 34 assert(config.SCRYPT_ID_PEPPER != config.SCRYPT_GPG_PEPPER) 35 # crash if we don't have srm: 36 try: 37 subprocess.check_call(['srm'], stdout=subprocess.PIPE) 38 except subprocess.CalledProcessError: 39 pass 40 41 do_runtime_tests() 42 43 GPG_BINARY = 'gpg2' 44 try: 45 p = subprocess.Popen([GPG_BINARY, '--version'], stdout=subprocess.PIPE) 46 except OSError: 47 GPG_BINARY = 'gpg' 48 p = subprocess.Popen([GPG_BINARY, '--version'], stdout=subprocess.PIPE) 49 50 assert p.stdout.readline().split()[ 51 -1].split('.')[0] == '2', "upgrade GPG to 2.0" 52 del p 53 54 gpg = gnupg.GPG(binary=GPG_BINARY, homedir=config.GPG_KEY_DIR) 55 56 words = file(config.WORD_LIST).read().split('\n') 57 nouns = file(config.NOUNS).read().split('\n') 58 adjectives = file(config.ADJECTIVES).read().split('\n') 59 60 61 class CryptoException(Exception): 62 pass 63 64 65 def clean(s, also=''): 66 """ 67 >>> clean("Hello, world!") 68 Traceback (most recent call last): 69 ... 70 CryptoException: invalid input 71 >>> clean("Helloworld") 72 'Helloworld' 73 """ 74 # safe characters for every possible word in the wordlist includes capital 75 # letters because codename hashes are base32-encoded with capital letters 76 ok = ' !#%$&)(+*-1032547698;:=?@acbedgfihkjmlonqpsrutwvyxzABCDEFGHIJKLMNOPQRSTUVWXYZ' 77 for c in s: 78 if c not in ok and c not in also: 79 raise CryptoException("invalid input: %s" % s) 80 # scrypt.hash requires input of type str. Since the wordlist is all ASCII 81 # characters, this conversion is not problematic 82 return str(s) 83 84 85 def genrandomid(words_in_random_id=DEFAULT_WORDS_IN_RANDOM_ID): 86 return ' '.join(random.choice(words) for x in range(words_in_random_id)) 87 88 89 def display_id(): 90 return ' '.join([random.choice(adjectives), random.choice(nouns)]) 91 92 93 def hash_codename(codename, salt=SCRYPT_ID_PEPPER): 94 """ 95 >>> hash_codename('Hello, world!') 96 'EQZGCJBRGISGOTC2NZVWG6LILJBHEV3CINNEWSCLLFTUWZLFHBTS6WLCHFHTOLRSGQXUQLRQHFMXKOKKOQ4WQ6SXGZXDAS3Z' 97 """ 98 return b32encode(scrypt.hash(clean(codename), salt, **SCRYPT_PARAMS)) 99 100 101 def genkeypair(name, secret): 102 """ 103 >>> if not gpg.list_keys(hash_codename('randomid')): 104 ... genkeypair(hash_codename('randomid'), 'randomid').type 105 ... else: 106 ... u'P' 107 u'P' 108 """ 109 name = clean(name) 110 secret = hash_codename(secret, salt=SCRYPT_GPG_PEPPER) 111 return gpg.gen_key(gpg.gen_key_input( 112 key_type=GPG_KEY_TYPE, key_length=GPG_KEY_LENGTH, 113 passphrase=secret, 114 name_email=name 115 )) 116 117 118 def delete_reply_keypair(source_id): 119 key = getkey(source_id) 120 # If this source was never flagged for reivew, they won't have a reply keypair 121 if not key: return 122 # The private key needs to be deleted before the public key can be deleted 123 # http://pythonhosted.org/python-gnupg/#deleting-keys 124 gpg.delete_keys(key, True) # private key 125 gpg.delete_keys(key) # public key 126 # TODO: srm? 127 128 129 def getkey(name): 130 for key in gpg.list_keys(): 131 for uid in key['uids']: 132 if name in uid: 133 return key['fingerprint'] 134 return None 135 136 137 def get_key_by_fingerprint(fingerprint): 138 matches = filter(lambda k: k['fingerprint'] == fingerprint, gpg.list_keys()) 139 return matches[0] if matches else None 140 141 142 def encrypt(plaintext, fingerprints, output=None): 143 # Verify the output path 144 if output: 145 store.verify(output) 146 147 # Remove any spaces from provided fingerpints 148 # GPG outputs fingerprints with spaces for readability, but requires the 149 # spaces to be removed when using fingerprints to specify recipients. 150 if not isinstance(fingerprints, (list, tuple)): 151 fingerprints = [fingerprints,] 152 fingerprints = [ fpr.replace(' ', '') for fpr in fingerprints ] 153 154 if isinstance(plaintext, unicode): 155 plaintext = plaintext.encode('utf8') 156 157 encrypt_fn = gpg.encrypt if isinstance(plaintext, str) else gpg.encrypt_file 158 out = encrypt_fn(plaintext, 159 *fingerprints, 160 output=output, 161 always_trust=True) 162 if out.ok: 163 return out.data 164 else: 165 raise CryptoException(out.stderr) 166 167 168 def decrypt(secret, plain_text): 169 """ 170 >>> key = genkeypair('randomid', 'randomid') 171 >>> decrypt('randomid', 'randomid', 172 ... encrypt('randomid', 'Goodbye, cruel world!') 173 ... ) 174 'Goodbye, cruel world!' 175 """ 176 hashed_codename = hash_codename(secret, salt=SCRYPT_GPG_PEPPER) 177 return gpg.decrypt(plain_text, passphrase=hashed_codename).data 178 179 180 if __name__ == "__main__": 181 import doctest 182 doctest.testmod() 183 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/securedrop/crypto_util.py b/securedrop/crypto_util.py --- a/securedrop/crypto_util.py +++ b/securedrop/crypto_util.py @@ -158,7 +158,8 @@ out = encrypt_fn(plaintext, *fingerprints, output=output, - always_trust=True) + always_trust=True, + armor=False) if out.ok: return out.data else:
{"golden_diff": "diff --git a/securedrop/crypto_util.py b/securedrop/crypto_util.py\n--- a/securedrop/crypto_util.py\n+++ b/securedrop/crypto_util.py\n@@ -158,7 +158,8 @@\n out = encrypt_fn(plaintext,\n *fingerprints,\n output=output,\n- always_trust=True)\n+ always_trust=True,\n+ armor=False)\n if out.ok:\n return out.data\n else:\n", "issue": "Don't armor encrypted submissions\nSecureDrop currently armors encrypted submissions. This bloats the size of stored submissions significantly due to the encoding. For example, a 93 MB upload results in a 125.7 MB submission for the journalist to download.\n\nDownloading anything over Tor is very slow (the aforementioned download took me, on average, 9 minutes to download). Therefore, unnecessarily increasing the size of submissions severely impacts usability. There is no reason that I can think of to ascii armor submissions - they are uploaded and downloaded over HTTP, which automatically handles encoding and de-encoding binary data.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport os\nimport subprocess\nfrom base64 import b32encode\n\nfrom Crypto.Random import random\nimport gnupg\nimport scrypt\n\nimport config\nimport store\n\n# to fix gpg error #78 on production\nos.environ['USERNAME'] = 'www-data'\n\nGPG_KEY_TYPE = \"RSA\"\nif os.environ.get('SECUREDROP_ENV') == 'test':\n # Optiimize crypto to speed up tests (at the expense of security - DO NOT\n # use these settings in production)\n GPG_KEY_LENGTH = 1024\n SCRYPT_PARAMS = dict(N=2**1, r=1, p=1)\nelse:\n GPG_KEY_LENGTH = 4096\n SCRYPT_PARAMS = config.SCRYPT_PARAMS\n\nSCRYPT_ID_PEPPER = config.SCRYPT_ID_PEPPER\nSCRYPT_GPG_PEPPER = config.SCRYPT_GPG_PEPPER\n\nDEFAULT_WORDS_IN_RANDOM_ID = 8\n\n# Make sure these pass before the app can run\n# TODO: Add more tests\ndef do_runtime_tests():\n assert(config.SCRYPT_ID_PEPPER != config.SCRYPT_GPG_PEPPER)\n # crash if we don't have srm:\n try:\n subprocess.check_call(['srm'], stdout=subprocess.PIPE)\n except subprocess.CalledProcessError:\n pass\n\ndo_runtime_tests()\n\nGPG_BINARY = 'gpg2'\ntry:\n p = subprocess.Popen([GPG_BINARY, '--version'], stdout=subprocess.PIPE)\nexcept OSError:\n GPG_BINARY = 'gpg'\n p = subprocess.Popen([GPG_BINARY, '--version'], stdout=subprocess.PIPE)\n\nassert p.stdout.readline().split()[\n -1].split('.')[0] == '2', \"upgrade GPG to 2.0\"\ndel p\n\ngpg = gnupg.GPG(binary=GPG_BINARY, homedir=config.GPG_KEY_DIR)\n\nwords = file(config.WORD_LIST).read().split('\\n')\nnouns = file(config.NOUNS).read().split('\\n')\nadjectives = file(config.ADJECTIVES).read().split('\\n')\n\n\nclass CryptoException(Exception):\n pass\n\n\ndef clean(s, also=''):\n \"\"\"\n >>> clean(\"Hello, world!\")\n Traceback (most recent call last):\n ...\n CryptoException: invalid input\n >>> clean(\"Helloworld\")\n 'Helloworld'\n \"\"\"\n # safe characters for every possible word in the wordlist includes capital\n # letters because codename hashes are base32-encoded with capital letters\n ok = ' !#%$&)(+*-1032547698;:=?@acbedgfihkjmlonqpsrutwvyxzABCDEFGHIJKLMNOPQRSTUVWXYZ'\n for c in s:\n if c not in ok and c not in also:\n raise CryptoException(\"invalid input: %s\" % s)\n # scrypt.hash requires input of type str. Since the wordlist is all ASCII\n # characters, this conversion is not problematic\n return str(s)\n\n\ndef genrandomid(words_in_random_id=DEFAULT_WORDS_IN_RANDOM_ID):\n return ' '.join(random.choice(words) for x in range(words_in_random_id))\n\n\ndef display_id():\n return ' '.join([random.choice(adjectives), random.choice(nouns)])\n\n\ndef hash_codename(codename, salt=SCRYPT_ID_PEPPER):\n \"\"\"\n >>> hash_codename('Hello, world!')\n 'EQZGCJBRGISGOTC2NZVWG6LILJBHEV3CINNEWSCLLFTUWZLFHBTS6WLCHFHTOLRSGQXUQLRQHFMXKOKKOQ4WQ6SXGZXDAS3Z'\n \"\"\"\n return b32encode(scrypt.hash(clean(codename), salt, **SCRYPT_PARAMS))\n\n\ndef genkeypair(name, secret):\n \"\"\"\n >>> if not gpg.list_keys(hash_codename('randomid')):\n ... genkeypair(hash_codename('randomid'), 'randomid').type\n ... else:\n ... u'P'\n u'P'\n \"\"\"\n name = clean(name)\n secret = hash_codename(secret, salt=SCRYPT_GPG_PEPPER)\n return gpg.gen_key(gpg.gen_key_input(\n key_type=GPG_KEY_TYPE, key_length=GPG_KEY_LENGTH,\n passphrase=secret,\n name_email=name\n ))\n\n\ndef delete_reply_keypair(source_id):\n key = getkey(source_id)\n # If this source was never flagged for reivew, they won't have a reply keypair\n if not key: return\n # The private key needs to be deleted before the public key can be deleted\n # http://pythonhosted.org/python-gnupg/#deleting-keys\n gpg.delete_keys(key, True) # private key\n gpg.delete_keys(key) # public key\n # TODO: srm?\n\n\ndef getkey(name):\n for key in gpg.list_keys():\n for uid in key['uids']:\n if name in uid:\n return key['fingerprint']\n return None\n\n\ndef get_key_by_fingerprint(fingerprint):\n matches = filter(lambda k: k['fingerprint'] == fingerprint, gpg.list_keys())\n return matches[0] if matches else None\n\n\ndef encrypt(plaintext, fingerprints, output=None):\n # Verify the output path\n if output:\n store.verify(output)\n\n # Remove any spaces from provided fingerpints\n # GPG outputs fingerprints with spaces for readability, but requires the\n # spaces to be removed when using fingerprints to specify recipients.\n if not isinstance(fingerprints, (list, tuple)):\n fingerprints = [fingerprints,]\n fingerprints = [ fpr.replace(' ', '') for fpr in fingerprints ]\n\n if isinstance(plaintext, unicode):\n plaintext = plaintext.encode('utf8')\n\n encrypt_fn = gpg.encrypt if isinstance(plaintext, str) else gpg.encrypt_file\n out = encrypt_fn(plaintext,\n *fingerprints,\n output=output,\n always_trust=True)\n if out.ok:\n return out.data\n else:\n raise CryptoException(out.stderr)\n\n\ndef decrypt(secret, plain_text):\n \"\"\"\n >>> key = genkeypair('randomid', 'randomid')\n >>> decrypt('randomid', 'randomid',\n ... encrypt('randomid', 'Goodbye, cruel world!')\n ... )\n 'Goodbye, cruel world!'\n \"\"\"\n hashed_codename = hash_codename(secret, salt=SCRYPT_GPG_PEPPER)\n return gpg.decrypt(plain_text, passphrase=hashed_codename).data\n\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n", "path": "securedrop/crypto_util.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport os\nimport subprocess\nfrom base64 import b32encode\n\nfrom Crypto.Random import random\nimport gnupg\nimport scrypt\n\nimport config\nimport store\n\n# to fix gpg error #78 on production\nos.environ['USERNAME'] = 'www-data'\n\nGPG_KEY_TYPE = \"RSA\"\nif os.environ.get('SECUREDROP_ENV') == 'test':\n # Optiimize crypto to speed up tests (at the expense of security - DO NOT\n # use these settings in production)\n GPG_KEY_LENGTH = 1024\n SCRYPT_PARAMS = dict(N=2**1, r=1, p=1)\nelse:\n GPG_KEY_LENGTH = 4096\n SCRYPT_PARAMS = config.SCRYPT_PARAMS\n\nSCRYPT_ID_PEPPER = config.SCRYPT_ID_PEPPER\nSCRYPT_GPG_PEPPER = config.SCRYPT_GPG_PEPPER\n\nDEFAULT_WORDS_IN_RANDOM_ID = 8\n\n# Make sure these pass before the app can run\n# TODO: Add more tests\ndef do_runtime_tests():\n assert(config.SCRYPT_ID_PEPPER != config.SCRYPT_GPG_PEPPER)\n # crash if we don't have srm:\n try:\n subprocess.check_call(['srm'], stdout=subprocess.PIPE)\n except subprocess.CalledProcessError:\n pass\n\ndo_runtime_tests()\n\nGPG_BINARY = 'gpg2'\ntry:\n p = subprocess.Popen([GPG_BINARY, '--version'], stdout=subprocess.PIPE)\nexcept OSError:\n GPG_BINARY = 'gpg'\n p = subprocess.Popen([GPG_BINARY, '--version'], stdout=subprocess.PIPE)\n\nassert p.stdout.readline().split()[\n -1].split('.')[0] == '2', \"upgrade GPG to 2.0\"\ndel p\n\ngpg = gnupg.GPG(binary=GPG_BINARY, homedir=config.GPG_KEY_DIR)\n\nwords = file(config.WORD_LIST).read().split('\\n')\nnouns = file(config.NOUNS).read().split('\\n')\nadjectives = file(config.ADJECTIVES).read().split('\\n')\n\n\nclass CryptoException(Exception):\n pass\n\n\ndef clean(s, also=''):\n \"\"\"\n >>> clean(\"Hello, world!\")\n Traceback (most recent call last):\n ...\n CryptoException: invalid input\n >>> clean(\"Helloworld\")\n 'Helloworld'\n \"\"\"\n # safe characters for every possible word in the wordlist includes capital\n # letters because codename hashes are base32-encoded with capital letters\n ok = ' !#%$&)(+*-1032547698;:=?@acbedgfihkjmlonqpsrutwvyxzABCDEFGHIJKLMNOPQRSTUVWXYZ'\n for c in s:\n if c not in ok and c not in also:\n raise CryptoException(\"invalid input: %s\" % s)\n # scrypt.hash requires input of type str. Since the wordlist is all ASCII\n # characters, this conversion is not problematic\n return str(s)\n\n\ndef genrandomid(words_in_random_id=DEFAULT_WORDS_IN_RANDOM_ID):\n return ' '.join(random.choice(words) for x in range(words_in_random_id))\n\n\ndef display_id():\n return ' '.join([random.choice(adjectives), random.choice(nouns)])\n\n\ndef hash_codename(codename, salt=SCRYPT_ID_PEPPER):\n \"\"\"\n >>> hash_codename('Hello, world!')\n 'EQZGCJBRGISGOTC2NZVWG6LILJBHEV3CINNEWSCLLFTUWZLFHBTS6WLCHFHTOLRSGQXUQLRQHFMXKOKKOQ4WQ6SXGZXDAS3Z'\n \"\"\"\n return b32encode(scrypt.hash(clean(codename), salt, **SCRYPT_PARAMS))\n\n\ndef genkeypair(name, secret):\n \"\"\"\n >>> if not gpg.list_keys(hash_codename('randomid')):\n ... genkeypair(hash_codename('randomid'), 'randomid').type\n ... else:\n ... u'P'\n u'P'\n \"\"\"\n name = clean(name)\n secret = hash_codename(secret, salt=SCRYPT_GPG_PEPPER)\n return gpg.gen_key(gpg.gen_key_input(\n key_type=GPG_KEY_TYPE, key_length=GPG_KEY_LENGTH,\n passphrase=secret,\n name_email=name\n ))\n\n\ndef delete_reply_keypair(source_id):\n key = getkey(source_id)\n # If this source was never flagged for reivew, they won't have a reply keypair\n if not key: return\n # The private key needs to be deleted before the public key can be deleted\n # http://pythonhosted.org/python-gnupg/#deleting-keys\n gpg.delete_keys(key, True) # private key\n gpg.delete_keys(key) # public key\n # TODO: srm?\n\n\ndef getkey(name):\n for key in gpg.list_keys():\n for uid in key['uids']:\n if name in uid:\n return key['fingerprint']\n return None\n\n\ndef get_key_by_fingerprint(fingerprint):\n matches = filter(lambda k: k['fingerprint'] == fingerprint, gpg.list_keys())\n return matches[0] if matches else None\n\n\ndef encrypt(plaintext, fingerprints, output=None):\n # Verify the output path\n if output:\n store.verify(output)\n\n # Remove any spaces from provided fingerpints\n # GPG outputs fingerprints with spaces for readability, but requires the\n # spaces to be removed when using fingerprints to specify recipients.\n if not isinstance(fingerprints, (list, tuple)):\n fingerprints = [fingerprints,]\n fingerprints = [ fpr.replace(' ', '') for fpr in fingerprints ]\n\n if isinstance(plaintext, unicode):\n plaintext = plaintext.encode('utf8')\n\n encrypt_fn = gpg.encrypt if isinstance(plaintext, str) else gpg.encrypt_file\n out = encrypt_fn(plaintext,\n *fingerprints,\n output=output,\n always_trust=True,\n armor=False)\n if out.ok:\n return out.data\n else:\n raise CryptoException(out.stderr)\n\n\ndef decrypt(secret, plain_text):\n \"\"\"\n >>> key = genkeypair('randomid', 'randomid')\n >>> decrypt('randomid', 'randomid',\n ... encrypt('randomid', 'Goodbye, cruel world!')\n ... )\n 'Goodbye, cruel world!'\n \"\"\"\n hashed_codename = hash_codename(secret, salt=SCRYPT_GPG_PEPPER)\n return gpg.decrypt(plain_text, passphrase=hashed_codename).data\n\n\nif __name__ == \"__main__\":\n import doctest\n doctest.testmod()\n", "path": "securedrop/crypto_util.py"}]}
2,288
99
gh_patches_debug_57938
rasdani/github-patches
git_diff
coreruleset__coreruleset-3500
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Google link/crawler blocked at PL2 ### Description Hello everyone, Here is another false positive found in our production. The `ARGS:gclid` contains a token in URL when someone visits a website by clicking a shared link on google/youtube. However, it matches the following rules: 942440 PL2 SQL Comment Sequence Detected 949110 PL1 Inbound Anomaly Score Exceeded (Total Score: 5) 980170 PL1 Anomaly Scores: (Inbound Scores: blocking=5, detection=5, per_pl=0-5-0-0, threshold=5) - (Outbound Scores: blocking=0, detection=0, per_pl=0-0-0-0, threshold=4) - (SQLI=5, XSS=0, RFI=0, LFI=0, RCE=0, PHPI=0, HTTP=0, SESS=0) Example: `example.com/file/?gclid=j0KCQiA1NebBhDDARIsAANiDD3_RJeMv8zScF--mC1jf8fO8PDYJCxD9xdwT7iQ59QIIwL-86ncQtMaAh0lEALw_wcB` Test on sandbox: `curl -s -H "x-format-output: txt-matched-rules" -H 'x-crs-paranoia-level: 2' 'https://sandbox.coreruleset.org/file/?gclid=Cj0KCQiA1NebBhDDARIsAANiDD3_RJeMv8zScF--mC1jf8fO8PDYJCxD9xdwT7iQ59QIIwL-86ncQtMaAh0lEALw_wcB'` We excluded following way: ``` SecRule &ARGS:gclid "@gt 0" "id:xxxxxxxx,\ ....,\ ....,\ ctl:ruleRemoveTargetById=942440;ARGS:gclid,\ chain" SecRule ARGS:gclid "@rx ^[a-zA-Z0-9_-]{0,100}$" "t:none" ``` ### Confirmation - [x] I have removed any personal data (email addresses, IP addresses, passwords, domain names) from any logs posted. Thanks as always, @theMiddleBlue --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `util/find-rules-without-test/find-rules-without-test.py` Content: ``` 1 #!/usr/bin/env python3 2 3 # This file helps to find the rules which does not have any test cases. 4 # 5 # You just have to pass the CORERULESET_ROOT as argument. 6 # 7 # At the end, the script will print the list of rules without any tests. 8 # 9 # Please note, that there are some exclusions: 10 # * only REQUEST-NNN rules are checked 11 # * there are some hardcoded exlucions: 12 # * REQUEST-900- 13 # * REQUEST-901- 14 # * REQUEST-905- 15 # * REQUEST-910- 16 # * REQUEST-912. 17 # * REQUEST-949- 18 # 19 # and the rule 921170 20 21 import sys 22 import glob 23 import msc_pyparser 24 import argparse 25 26 EXCLUSION_LIST = ["900", "901", "905", "910", "912", "949", "921170"] 27 oformat = "native" 28 29 def find_ids(s, test_cases): 30 """ 31 s: the parsed structure 32 test_cases: all available test cases 33 """ 34 rids = {} 35 for i in s: 36 # only SecRule counts 37 if i['type'] == "SecRule": 38 for a in i['actions']: 39 # find the `id` action 40 if a['act_name'] == "id": 41 # get the argument of the action 42 rid = int(a['act_arg']) # int 43 srid = a['act_arg'] # string 44 if (rid%1000) >= 100: # skip the PL control rules 45 # also skip these hardcoded rules 46 need_check = True 47 for excl in EXCLUSION_LIST: 48 if srid[:len(excl)] == excl: 49 need_check = False 50 if need_check: 51 # if there is no test cases, just print it 52 if rid not in test_cases: 53 rids[rid] = a['lineno'] 54 return rids 55 56 def errmsgf(msg): 57 if oformat == "github": 58 print("::error file={file},line={line},endLine={endLine},title={title}::{message}".format(**msg)) 59 else: 60 print("file={file}, line={line}, endLine={endLine}, title={title}: {message}".format(**msg)) 61 62 if __name__ == "__main__": 63 64 desc = """This script helps to find the rules without test cases. It needs a mandatory 65 argument where you pass the path to your coreruleset. The tool collects the 66 tests with name REQUEST-*, but not with RESPONSE-*. Then reads the rule id's, 67 and check which rule does not have any test. Some rules does not need test 68 case, these are hardcoded as exclusions: 900NNN, 901NNN, 905NNN, 910NNN, 69 912NNN, 949NNN.""" 70 71 parser = argparse.ArgumentParser(description=desc, formatter_class=argparse.RawTextHelpFormatter) 72 parser.add_argument("--output", dest="output", help="Output format native[default]|github", required=False) 73 parser.add_argument('crspath', metavar='/path/to/coreruleset', type=str, 74 help='Directory path to CRS') 75 args = parser.parse_args() 76 77 if args.output is not None: 78 if args.output not in ["native", "github"]: 79 print("--output can be one of the 'native' or 'github'. Default value is 'native'") 80 sys.exit(1) 81 oformat = args.output 82 83 test_cases = {} 84 # from argument, build the rules path and regression test paths 85 crspath = args.crspath.rstrip("/") + "/rules/*.conf" 86 testpath = args.crspath.rstrip("/") + "/tests/regression/tests/*" 87 retval = 0 88 # collect rules 89 flist = glob.glob(crspath) 90 flist.sort() 91 if len(flist) == 0: 92 print("Can't open files in given path!") 93 sys.exit(1) 94 95 # collect test cases 96 tlist = glob.glob(testpath) 97 tlist.sort() 98 if len(tlist) == 0: 99 print("Can't open files in given path (%s)!" % (testpath)) 100 sys.exit(1) 101 # find the yaml files with name REQUEST at the begin 102 # collect them in a dictionary 103 for t in tlist: 104 tname = t.split("/")[-1] 105 if tname[:7] == "REQUEST": 106 testlist = glob.glob(t + "/*.yaml") 107 testlist.sort() 108 for tc in testlist: 109 tcname = tc.split("/")[-1].split(".")[0] 110 test_cases[int(tcname)] = 1 111 112 # iterate the rule files 113 for f in flist: 114 fname = f.split("/")[-1] 115 if fname[:7] == "REQUEST": 116 try: 117 with open(f, 'r') as inputfile: 118 data = inputfile.read() 119 except: 120 print("Can't open file: %s" % f) 121 print(sys.exc_info()) 122 sys.exit(1) 123 124 try: 125 # make a structure 126 mparser = msc_pyparser.MSCParser() 127 mparser.parser.parse(data) 128 # add the parsed structure to a function, which finds the 'id'-s, 129 # and the collected test cases 130 rids = find_ids(mparser.configlines, test_cases) 131 for k in rids.keys(): 132 errmsgf({'file': f, 'line': rids[k], 'endLine': rids[k], 'title': "Test file missing", 'message': ("rule %d does not have any regression test" % k)}) 133 except: 134 print("Can't parse config file: %s" % (f)) 135 print(sys.exc_info()[1]) 136 sys.exit(1) 137 sys.exit(retval) 138 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/util/find-rules-without-test/find-rules-without-test.py b/util/find-rules-without-test/find-rules-without-test.py --- a/util/find-rules-without-test/find-rules-without-test.py +++ b/util/find-rules-without-test/find-rules-without-test.py @@ -23,7 +23,7 @@ import msc_pyparser import argparse -EXCLUSION_LIST = ["900", "901", "905", "910", "912", "949", "921170"] +EXCLUSION_LIST = ["900", "901", "905", "910", "912", "949", "921170", "942441", "942442"] oformat = "native" def find_ids(s, test_cases):
{"golden_diff": "diff --git a/util/find-rules-without-test/find-rules-without-test.py b/util/find-rules-without-test/find-rules-without-test.py\n--- a/util/find-rules-without-test/find-rules-without-test.py\n+++ b/util/find-rules-without-test/find-rules-without-test.py\n@@ -23,7 +23,7 @@\n import msc_pyparser\n import argparse\n \n-EXCLUSION_LIST = [\"900\", \"901\", \"905\", \"910\", \"912\", \"949\", \"921170\"]\n+EXCLUSION_LIST = [\"900\", \"901\", \"905\", \"910\", \"912\", \"949\", \"921170\", \"942441\", \"942442\"]\n oformat = \"native\"\n \n def find_ids(s, test_cases):\n", "issue": "Google link/crawler blocked at PL2\n### Description\r\nHello everyone,\r\n\r\nHere is another false positive found in our production.\r\nThe `ARGS:gclid` contains a token in URL when someone visits a website by clicking a shared link on google/youtube.\r\nHowever, it matches the following rules:\r\n\r\n942440 PL2 SQL Comment Sequence Detected\r\n949110 PL1 Inbound Anomaly Score Exceeded (Total Score: 5)\r\n980170 PL1 Anomaly Scores: (Inbound Scores: blocking=5, detection=5, per_pl=0-5-0-0, threshold=5) - (Outbound Scores: blocking=0, detection=0, per_pl=0-0-0-0, threshold=4) - (SQLI=5, XSS=0, RFI=0, LFI=0, RCE=0, PHPI=0, HTTP=0, SESS=0)\r\n\r\nExample:\r\n`example.com/file/?gclid=j0KCQiA1NebBhDDARIsAANiDD3_RJeMv8zScF--mC1jf8fO8PDYJCxD9xdwT7iQ59QIIwL-86ncQtMaAh0lEALw_wcB`\r\n\r\nTest on sandbox:\r\n`curl -s -H \"x-format-output: txt-matched-rules\" -H 'x-crs-paranoia-level: 2' 'https://sandbox.coreruleset.org/file/?gclid=Cj0KCQiA1NebBhDDARIsAANiDD3_RJeMv8zScF--mC1jf8fO8PDYJCxD9xdwT7iQ59QIIwL-86ncQtMaAh0lEALw_wcB'`\r\n\r\nWe excluded following way:\r\n```\r\nSecRule &ARGS:gclid \"@gt 0\" \"id:xxxxxxxx,\\\r\n ....,\\\r\n ....,\\\r\n ctl:ruleRemoveTargetById=942440;ARGS:gclid,\\\r\n chain\"\r\n SecRule ARGS:gclid \"@rx ^[a-zA-Z0-9_-]{0,100}$\" \"t:none\"\r\n\r\n```\r\n### Confirmation\r\n\r\n- [x] I have removed any personal data (email addresses, IP addresses,\r\n passwords, domain names) from any logs posted.\r\n\r\nThanks as always, @theMiddleBlue \r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n# This file helps to find the rules which does not have any test cases.\n#\n# You just have to pass the CORERULESET_ROOT as argument.\n#\n# At the end, the script will print the list of rules without any tests.\n#\n# Please note, that there are some exclusions:\n# * only REQUEST-NNN rules are checked\n# * there are some hardcoded exlucions:\n# * REQUEST-900-\n# * REQUEST-901-\n# * REQUEST-905-\n# * REQUEST-910-\n# * REQUEST-912.\n# * REQUEST-949-\n#\n# and the rule 921170\n\nimport sys\nimport glob\nimport msc_pyparser\nimport argparse\n\nEXCLUSION_LIST = [\"900\", \"901\", \"905\", \"910\", \"912\", \"949\", \"921170\"]\noformat = \"native\"\n\ndef find_ids(s, test_cases):\n \"\"\"\n s: the parsed structure\n test_cases: all available test cases\n \"\"\"\n rids = {}\n for i in s:\n # only SecRule counts\n if i['type'] == \"SecRule\":\n for a in i['actions']:\n # find the `id` action\n if a['act_name'] == \"id\":\n # get the argument of the action\n rid = int(a['act_arg']) # int\n srid = a['act_arg'] # string\n if (rid%1000) >= 100: # skip the PL control rules\n # also skip these hardcoded rules\n need_check = True\n for excl in EXCLUSION_LIST:\n if srid[:len(excl)] == excl:\n need_check = False\n if need_check:\n # if there is no test cases, just print it\n if rid not in test_cases:\n rids[rid] = a['lineno']\n return rids\n\ndef errmsgf(msg):\n if oformat == \"github\":\n print(\"::error file={file},line={line},endLine={endLine},title={title}::{message}\".format(**msg))\n else:\n print(\"file={file}, line={line}, endLine={endLine}, title={title}: {message}\".format(**msg))\n\nif __name__ == \"__main__\":\n\n desc = \"\"\"This script helps to find the rules without test cases. It needs a mandatory\nargument where you pass the path to your coreruleset. The tool collects the\ntests with name REQUEST-*, but not with RESPONSE-*. Then reads the rule id's,\nand check which rule does not have any test. Some rules does not need test\ncase, these are hardcoded as exclusions: 900NNN, 901NNN, 905NNN, 910NNN,\n912NNN, 949NNN.\"\"\"\n\n parser = argparse.ArgumentParser(description=desc, formatter_class=argparse.RawTextHelpFormatter)\n parser.add_argument(\"--output\", dest=\"output\", help=\"Output format native[default]|github\", required=False)\n parser.add_argument('crspath', metavar='/path/to/coreruleset', type=str,\n help='Directory path to CRS')\n args = parser.parse_args()\n\n if args.output is not None:\n if args.output not in [\"native\", \"github\"]:\n print(\"--output can be one of the 'native' or 'github'. Default value is 'native'\")\n sys.exit(1)\n oformat = args.output\n\n test_cases = {}\n # from argument, build the rules path and regression test paths\n crspath = args.crspath.rstrip(\"/\") + \"/rules/*.conf\"\n testpath = args.crspath.rstrip(\"/\") + \"/tests/regression/tests/*\"\n retval = 0\n # collect rules\n flist = glob.glob(crspath)\n flist.sort()\n if len(flist) == 0:\n print(\"Can't open files in given path!\")\n sys.exit(1)\n\n # collect test cases\n tlist = glob.glob(testpath)\n tlist.sort()\n if len(tlist) == 0:\n print(\"Can't open files in given path (%s)!\" % (testpath))\n sys.exit(1)\n # find the yaml files with name REQUEST at the begin\n # collect them in a dictionary\n for t in tlist:\n tname = t.split(\"/\")[-1]\n if tname[:7] == \"REQUEST\":\n testlist = glob.glob(t + \"/*.yaml\")\n testlist.sort()\n for tc in testlist:\n tcname = tc.split(\"/\")[-1].split(\".\")[0]\n test_cases[int(tcname)] = 1\n\n # iterate the rule files\n for f in flist:\n fname = f.split(\"/\")[-1]\n if fname[:7] == \"REQUEST\":\n try:\n with open(f, 'r') as inputfile:\n data = inputfile.read()\n except:\n print(\"Can't open file: %s\" % f)\n print(sys.exc_info())\n sys.exit(1)\n\n try:\n # make a structure\n mparser = msc_pyparser.MSCParser()\n mparser.parser.parse(data)\n # add the parsed structure to a function, which finds the 'id'-s,\n # and the collected test cases\n rids = find_ids(mparser.configlines, test_cases)\n for k in rids.keys():\n errmsgf({'file': f, 'line': rids[k], 'endLine': rids[k], 'title': \"Test file missing\", 'message': (\"rule %d does not have any regression test\" % k)})\n except:\n print(\"Can't parse config file: %s\" % (f))\n print(sys.exc_info()[1])\n sys.exit(1)\n sys.exit(retval)\n", "path": "util/find-rules-without-test/find-rules-without-test.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\n# This file helps to find the rules which does not have any test cases.\n#\n# You just have to pass the CORERULESET_ROOT as argument.\n#\n# At the end, the script will print the list of rules without any tests.\n#\n# Please note, that there are some exclusions:\n# * only REQUEST-NNN rules are checked\n# * there are some hardcoded exlucions:\n# * REQUEST-900-\n# * REQUEST-901-\n# * REQUEST-905-\n# * REQUEST-910-\n# * REQUEST-912.\n# * REQUEST-949-\n#\n# and the rule 921170\n\nimport sys\nimport glob\nimport msc_pyparser\nimport argparse\n\nEXCLUSION_LIST = [\"900\", \"901\", \"905\", \"910\", \"912\", \"949\", \"921170\", \"942441\", \"942442\"]\noformat = \"native\"\n\ndef find_ids(s, test_cases):\n \"\"\"\n s: the parsed structure\n test_cases: all available test cases\n \"\"\"\n rids = {}\n for i in s:\n # only SecRule counts\n if i['type'] == \"SecRule\":\n for a in i['actions']:\n # find the `id` action\n if a['act_name'] == \"id\":\n # get the argument of the action\n rid = int(a['act_arg']) # int\n srid = a['act_arg'] # string\n if (rid%1000) >= 100: # skip the PL control rules\n # also skip these hardcoded rules\n need_check = True\n for excl in EXCLUSION_LIST:\n if srid[:len(excl)] == excl:\n need_check = False\n if need_check:\n # if there is no test cases, just print it\n if rid not in test_cases:\n rids[rid] = a['lineno']\n return rids\n\ndef errmsgf(msg):\n if oformat == \"github\":\n print(\"::error file={file},line={line},endLine={endLine},title={title}::{message}\".format(**msg))\n else:\n print(\"file={file}, line={line}, endLine={endLine}, title={title}: {message}\".format(**msg))\n\nif __name__ == \"__main__\":\n\n desc = \"\"\"This script helps to find the rules without test cases. It needs a mandatory\nargument where you pass the path to your coreruleset. The tool collects the\ntests with name REQUEST-*, but not with RESPONSE-*. Then reads the rule id's,\nand check which rule does not have any test. Some rules does not need test\ncase, these are hardcoded as exclusions: 900NNN, 901NNN, 905NNN, 910NNN,\n912NNN, 949NNN.\"\"\"\n\n parser = argparse.ArgumentParser(description=desc, formatter_class=argparse.RawTextHelpFormatter)\n parser.add_argument(\"--output\", dest=\"output\", help=\"Output format native[default]|github\", required=False)\n parser.add_argument('crspath', metavar='/path/to/coreruleset', type=str,\n help='Directory path to CRS')\n args = parser.parse_args()\n\n if args.output is not None:\n if args.output not in [\"native\", \"github\"]:\n print(\"--output can be one of the 'native' or 'github'. Default value is 'native'\")\n sys.exit(1)\n oformat = args.output\n\n test_cases = {}\n # from argument, build the rules path and regression test paths\n crspath = args.crspath.rstrip(\"/\") + \"/rules/*.conf\"\n testpath = args.crspath.rstrip(\"/\") + \"/tests/regression/tests/*\"\n retval = 0\n # collect rules\n flist = glob.glob(crspath)\n flist.sort()\n if len(flist) == 0:\n print(\"Can't open files in given path!\")\n sys.exit(1)\n\n # collect test cases\n tlist = glob.glob(testpath)\n tlist.sort()\n if len(tlist) == 0:\n print(\"Can't open files in given path (%s)!\" % (testpath))\n sys.exit(1)\n # find the yaml files with name REQUEST at the begin\n # collect them in a dictionary\n for t in tlist:\n tname = t.split(\"/\")[-1]\n if tname[:7] == \"REQUEST\":\n testlist = glob.glob(t + \"/*.yaml\")\n testlist.sort()\n for tc in testlist:\n tcname = tc.split(\"/\")[-1].split(\".\")[0]\n test_cases[int(tcname)] = 1\n\n # iterate the rule files\n for f in flist:\n fname = f.split(\"/\")[-1]\n if fname[:7] == \"REQUEST\":\n try:\n with open(f, 'r') as inputfile:\n data = inputfile.read()\n except:\n print(\"Can't open file: %s\" % f)\n print(sys.exc_info())\n sys.exit(1)\n\n try:\n # make a structure\n mparser = msc_pyparser.MSCParser()\n mparser.parser.parse(data)\n # add the parsed structure to a function, which finds the 'id'-s,\n # and the collected test cases\n rids = find_ids(mparser.configlines, test_cases)\n for k in rids.keys():\n errmsgf({'file': f, 'line': rids[k], 'endLine': rids[k], 'title': \"Test file missing\", 'message': (\"rule %d does not have any regression test\" % k)})\n except:\n print(\"Can't parse config file: %s\" % (f))\n print(sys.exc_info()[1])\n sys.exit(1)\n sys.exit(retval)\n", "path": "util/find-rules-without-test/find-rules-without-test.py"}]}
2,408
208
gh_patches_debug_392
rasdani/github-patches
git_diff
Nitrate__Nitrate-527
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Remove dependency mock Use `unittest.mock` instead. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 from setuptools import setup, find_packages 4 5 6 with open('VERSION.txt', 'r') as f: 7 pkg_version = f.read().strip() 8 9 10 def get_long_description(): 11 with open('README.rst', 'r') as f: 12 return f.read() 13 14 15 install_requires = [ 16 'beautifulsoup4 >= 4.1.1', 17 'django >= 2.0,<3.0', 18 'django-contrib-comments == 1.9.1', 19 'django-tinymce == 2.7.0', 20 'django-uuslug == 1.1.8', 21 'html2text', 22 'odfpy >= 0.9.6', 23 'python-bugzilla', 24 'xmltodict', 25 'kobo == 0.9.0' 26 ] 27 28 extras_require = { 29 'mysql': ['mysqlclient >= 1.2.3'], 30 'pgsql': ['psycopg2 == 2.7.5'], 31 32 # Required for tcms.auth.backends.KerberosBackend 33 'krbauth': [ 34 'kerberos == 1.2.5' 35 ], 36 37 # Packages for building documentation 38 'docs': [ 39 'Sphinx >= 1.1.2', 40 'sphinx_rtd_theme', 41 ], 42 43 # Necessary packages for running tests 44 'tests': [ 45 'beautifulsoup4', 46 'coverage', 47 'factory_boy', 48 'flake8', 49 'mock', 50 'pytest', 51 'pytest-cov', 52 'pytest-django', 53 ], 54 55 # Contain tools that assists the development 56 'devtools': [ 57 'django-debug-toolbar', 58 'tox', 59 'django-extensions', 60 'pygraphviz', 61 ], 62 63 # Required packages required to run async tasks 64 'async': [ 65 'celery == 4.2.0', 66 ], 67 68 'multiauth': [ 69 'social-auth-app-django == 3.1.0', 70 ] 71 } 72 73 setup( 74 name='nitrate-tcms', 75 version=pkg_version, 76 description='A full-featured Test Case Management System', 77 long_description=get_long_description(), 78 author='Nitrate Team', 79 maintainer='Chenxiong Qi', 80 maintainer_email='[email protected]', 81 url='https://github.com/Nitrate/Nitrate/', 82 license='GPLv2+', 83 keywords='test case', 84 install_requires=install_requires, 85 extras_require=extras_require, 86 python_requires='>=3.6', 87 package_dir={'': 'src'}, 88 packages=find_packages('src', exclude=['test*']), 89 include_package_data=True, 90 zip_safe=False, 91 classifiers=[ 92 'Framework :: Django', 93 'Framework :: Django :: 2.0', 94 'Framework :: Django :: 2.1', 95 'Framework :: Django :: 2.2', 96 'Intended Audience :: Developers', 97 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)', 98 'Programming Language :: Python :: 3', 99 'Programming Language :: Python :: 3.6', 100 'Programming Language :: Python :: 3.7', 101 'Programming Language :: Python :: 3 :: Only', 102 'Topic :: Software Development :: Quality Assurance', 103 'Topic :: Software Development :: Testing', 104 ], 105 project_urls={ 106 'Issue Tracker': 'https://github.com/Nitrate/Nitrate/issues', 107 'Source Code': 'https://github.com/Nitrate/Nitrate', 108 'Documentation': 'https://nitrate.readthedocs.io/', 109 }, 110 ) 111 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -46,7 +46,6 @@ 'coverage', 'factory_boy', 'flake8', - 'mock', 'pytest', 'pytest-cov', 'pytest-django',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -46,7 +46,6 @@\n 'coverage',\n 'factory_boy',\n 'flake8',\n- 'mock',\n 'pytest',\n 'pytest-cov',\n 'pytest-django',\n", "issue": "Remove dependency mock\nUse `unittest.mock` instead.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\n\nwith open('VERSION.txt', 'r') as f:\n pkg_version = f.read().strip()\n\n\ndef get_long_description():\n with open('README.rst', 'r') as f:\n return f.read()\n\n\ninstall_requires = [\n 'beautifulsoup4 >= 4.1.1',\n 'django >= 2.0,<3.0',\n 'django-contrib-comments == 1.9.1',\n 'django-tinymce == 2.7.0',\n 'django-uuslug == 1.1.8',\n 'html2text',\n 'odfpy >= 0.9.6',\n 'python-bugzilla',\n 'xmltodict',\n 'kobo == 0.9.0'\n]\n\nextras_require = {\n 'mysql': ['mysqlclient >= 1.2.3'],\n 'pgsql': ['psycopg2 == 2.7.5'],\n\n # Required for tcms.auth.backends.KerberosBackend\n 'krbauth': [\n 'kerberos == 1.2.5'\n ],\n\n # Packages for building documentation\n 'docs': [\n 'Sphinx >= 1.1.2',\n 'sphinx_rtd_theme',\n ],\n\n # Necessary packages for running tests\n 'tests': [\n 'beautifulsoup4',\n 'coverage',\n 'factory_boy',\n 'flake8',\n 'mock',\n 'pytest',\n 'pytest-cov',\n 'pytest-django',\n ],\n\n # Contain tools that assists the development\n 'devtools': [\n 'django-debug-toolbar',\n 'tox',\n 'django-extensions',\n 'pygraphviz',\n ],\n\n # Required packages required to run async tasks\n 'async': [\n 'celery == 4.2.0',\n ],\n\n 'multiauth': [\n 'social-auth-app-django == 3.1.0',\n ]\n}\n\nsetup(\n name='nitrate-tcms',\n version=pkg_version,\n description='A full-featured Test Case Management System',\n long_description=get_long_description(),\n author='Nitrate Team',\n maintainer='Chenxiong Qi',\n maintainer_email='[email protected]',\n url='https://github.com/Nitrate/Nitrate/',\n license='GPLv2+',\n keywords='test case',\n install_requires=install_requires,\n extras_require=extras_require,\n python_requires='>=3.6',\n package_dir={'': 'src'},\n packages=find_packages('src', exclude=['test*']),\n include_package_data=True,\n zip_safe=False,\n classifiers=[\n 'Framework :: Django',\n 'Framework :: Django :: 2.0',\n 'Framework :: Django :: 2.1',\n 'Framework :: Django :: 2.2',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Software Development :: Testing',\n ],\n project_urls={\n 'Issue Tracker': 'https://github.com/Nitrate/Nitrate/issues',\n 'Source Code': 'https://github.com/Nitrate/Nitrate',\n 'Documentation': 'https://nitrate.readthedocs.io/',\n },\n)\n", "path": "setup.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\n\nwith open('VERSION.txt', 'r') as f:\n pkg_version = f.read().strip()\n\n\ndef get_long_description():\n with open('README.rst', 'r') as f:\n return f.read()\n\n\ninstall_requires = [\n 'beautifulsoup4 >= 4.1.1',\n 'django >= 2.0,<3.0',\n 'django-contrib-comments == 1.9.1',\n 'django-tinymce == 2.7.0',\n 'django-uuslug == 1.1.8',\n 'html2text',\n 'odfpy >= 0.9.6',\n 'python-bugzilla',\n 'xmltodict',\n 'kobo == 0.9.0'\n]\n\nextras_require = {\n 'mysql': ['mysqlclient >= 1.2.3'],\n 'pgsql': ['psycopg2 == 2.7.5'],\n\n # Required for tcms.auth.backends.KerberosBackend\n 'krbauth': [\n 'kerberos == 1.2.5'\n ],\n\n # Packages for building documentation\n 'docs': [\n 'Sphinx >= 1.1.2',\n 'sphinx_rtd_theme',\n ],\n\n # Necessary packages for running tests\n 'tests': [\n 'beautifulsoup4',\n 'coverage',\n 'factory_boy',\n 'flake8',\n 'pytest',\n 'pytest-cov',\n 'pytest-django',\n ],\n\n # Contain tools that assists the development\n 'devtools': [\n 'django-debug-toolbar',\n 'tox',\n 'django-extensions',\n 'pygraphviz',\n ],\n\n # Required packages required to run async tasks\n 'async': [\n 'celery == 4.2.0',\n ],\n\n 'multiauth': [\n 'social-auth-app-django == 3.1.0',\n ]\n}\n\nsetup(\n name='nitrate-tcms',\n version=pkg_version,\n description='A full-featured Test Case Management System',\n long_description=get_long_description(),\n author='Nitrate Team',\n maintainer='Chenxiong Qi',\n maintainer_email='[email protected]',\n url='https://github.com/Nitrate/Nitrate/',\n license='GPLv2+',\n keywords='test case',\n install_requires=install_requires,\n extras_require=extras_require,\n python_requires='>=3.6',\n package_dir={'': 'src'},\n packages=find_packages('src', exclude=['test*']),\n include_package_data=True,\n zip_safe=False,\n classifiers=[\n 'Framework :: Django',\n 'Framework :: Django :: 2.0',\n 'Framework :: Django :: 2.1',\n 'Framework :: Django :: 2.2',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Software Development :: Testing',\n ],\n project_urls={\n 'Issue Tracker': 'https://github.com/Nitrate/Nitrate/issues',\n 'Source Code': 'https://github.com/Nitrate/Nitrate',\n 'Documentation': 'https://nitrate.readthedocs.io/',\n },\n)\n", "path": "setup.py"}]}
1,278
68
gh_patches_debug_1370
rasdani/github-patches
git_diff
pystiche__pystiche-103
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- ZeroDivisionError with default_epoch_optim_loop I get an `ZeroDivisionError: integer division or modulo by zero` when using the `default_transformer_epoch_optim_loop`. This is probably because the `num_batches` of the `batch_sampler` is much smaller than in the `default_transformer_optim_loop` which results in `log_freq=0` in `default_transformer_optim_log_fn.` Below is a minimal example to reproduce the error: ```python from pystiche.optim.log import default_transformer_optim_log_fn, OptimLogger logger = OptimLogger() num_batches = 300 log_fn = default_transformer_optim_log_fn(logger, num_batches) image_loading_velocity = 1 image_processing_velocity = 1 batch = 1 loss = 1 log_fn(batch, loss, image_loading_velocity, image_processing_velocity) ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pystiche/optim/log.py` Content: ``` 1 from typing import Union, Optional, Tuple, Callable 2 import contextlib 3 import sys 4 import logging 5 import torch 6 from torch.optim.optimizer import Optimizer 7 from torch.optim.lr_scheduler import _LRScheduler as LRScheduler 8 import pystiche 9 from pystiche.pyramid.level import PyramidLevel 10 from .meter import FloatMeter, LossMeter, ProgressMeter 11 12 __all__ = [ 13 "default_logger", 14 "OptimLogger", 15 "default_image_optim_log_fn", 16 "default_pyramid_level_header", 17 "default_transformer_optim_log_fn", 18 ] 19 20 21 def default_logger(name: Optional[str] = None, log_file: Optional[str] = None): 22 logger = logging.getLogger(name) 23 logger.setLevel(logging.INFO) 24 25 fmt = logging.Formatter( 26 fmt="|%(asctime)s| %(message)s", datefmt="%d.%m.%Y %H:%M:%S" 27 ) 28 29 sh = logging.StreamHandler(sys.stdout) 30 sh.setLevel(logging.INFO) 31 sh.addFilter(lambda record: record.levelno <= logging.INFO) 32 sh.setFormatter(fmt) 33 logger.addHandler(sh) 34 35 sh = logging.StreamHandler(sys.stderr) 36 sh.setLevel(logging.WARNING) 37 sh.setFormatter(fmt) 38 logger.addHandler(sh) 39 40 if log_file is not None: 41 fh = logging.FileHandler(log_file) 42 fh.setLevel(logging.INFO) 43 fh.setFormatter(fmt) 44 logger.addHandler(fh) 45 46 return logger 47 48 49 class OptimLogger: 50 INDENT = 2 51 SEP_LINE_LENGTH = 80 52 SEP_CHARS = ("#", "=", "-", ".") 53 54 def __init__(self, logger: Optional[logging.Logger] = None): 55 if logger is None: 56 logger = default_logger() 57 self.logger = logger 58 59 self._environ_indent_offset = 0 60 self._environ_level_offset = 0 61 62 def _calc_abs_indent(self, indent: int, rel: bool): 63 abs_indent = indent 64 if rel: 65 abs_indent += self._environ_indent_offset 66 return abs_indent 67 68 def _calc_abs_level(self, level: int, rel: bool): 69 abs_level = level 70 if rel: 71 abs_level += self._environ_level_offset 72 return abs_level 73 74 def message(self, msg: str, indent: int = 0, rel=True) -> None: 75 abs_indent = self._calc_abs_indent(indent, rel) 76 for line in msg.splitlines(): 77 self.logger.info(" " * abs_indent + line) 78 79 def sepline(self, level: int = 0, rel=True): 80 abs_level = self._calc_abs_level(level, rel) 81 self.message(self.SEP_CHARS[abs_level] * self.SEP_LINE_LENGTH) 82 83 def sep_message( 84 self, msg: str, level: int = 0, rel=True, top_sep=True, bottom_sep=True 85 ): 86 if top_sep: 87 self.sepline(level=level, rel=rel) 88 self.message(msg, rel=rel) 89 if bottom_sep: 90 self.sepline(level=level, rel=rel) 91 92 @contextlib.contextmanager 93 def environment(self, header: str): 94 self.sep_message(header) 95 self._environ_indent_offset += self.INDENT 96 self._environ_level_offset += 1 97 try: 98 yield 99 finally: 100 self._environ_level_offset -= 1 101 self._environ_indent_offset -= self.INDENT 102 103 104 def default_image_optim_log_fn( 105 optim_logger: OptimLogger, log_freq: int = 50, max_depth: int = 1 106 ) -> Callable[[int, Union[torch.Tensor, pystiche.LossDict]], None]: 107 def log_fn(step: int, loss: Union[torch.Tensor, pystiche.LossDict]) -> None: 108 if step % log_freq == 0: 109 with optim_logger.environment(f"Step {step}"): 110 if isinstance(loss, torch.Tensor): 111 optim_logger.message(f"loss: {loss.item():.3e}") 112 else: # isinstance(loss, pystiche.LossDict) 113 optim_logger.message(loss.aggregate(max_depth).format()) 114 115 return log_fn 116 117 118 def default_pyramid_level_header( 119 num: int, level: PyramidLevel, input_image_size: Tuple[int, int] 120 ): 121 height, width = input_image_size 122 return f"Pyramid level {num} with {level.num_steps} steps " f"({width} x {height})" 123 124 125 def default_transformer_optim_log_fn( 126 optim_logger: OptimLogger, 127 num_batches: int, 128 log_freq: Optional[int] = None, 129 show_loading_velocity: bool = True, 130 show_processing_velocity: bool = True, 131 show_running_means: bool = True, 132 ): 133 if log_freq is None: 134 log_freq = min(round(1e-3 * num_batches) * 10, 50) 135 136 window_size = min(10 * log_freq, 1000) 137 138 meters = [LossMeter(show_avg=show_running_means, window_size=window_size)] 139 if show_loading_velocity: 140 meters.append( 141 FloatMeter( 142 name="loading_velocity", 143 fmt="{:3.1f} img/s", 144 show_avg=show_running_means, 145 window_size=window_size, 146 ) 147 ) 148 if show_processing_velocity: 149 meters.append( 150 FloatMeter( 151 name="processing_velocity", 152 fmt="{:3.1f} img/s", 153 show_avg=show_running_means, 154 window_size=window_size, 155 ) 156 ) 157 158 progress_meter = ProgressMeter(num_batches, *meters) 159 160 def log_fn(batch, loss, loading_velocity, processing_velocity): 161 progress_meter.update( 162 batch, 163 loss=loss, 164 loading_velocity=loading_velocity, 165 processing_velocity=processing_velocity, 166 ) 167 168 if batch % log_freq == 0: 169 optim_logger.message(str(progress_meter)) 170 171 return log_fn 172 173 174 def default_epoch_header_fn( 175 epoch: int, optimizer: Optimizer, lr_scheduler: Optional[LRScheduler] 176 ): 177 return f"Epoch {epoch}" 178 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pystiche/optim/log.py b/pystiche/optim/log.py --- a/pystiche/optim/log.py +++ b/pystiche/optim/log.py @@ -131,7 +131,7 @@ show_running_means: bool = True, ): if log_freq is None: - log_freq = min(round(1e-3 * num_batches) * 10, 50) + log_freq = max(min(round(1e-3 * num_batches) * 10, 50), 1) window_size = min(10 * log_freq, 1000)
{"golden_diff": "diff --git a/pystiche/optim/log.py b/pystiche/optim/log.py\n--- a/pystiche/optim/log.py\n+++ b/pystiche/optim/log.py\n@@ -131,7 +131,7 @@\n show_running_means: bool = True,\n ):\n if log_freq is None:\n- log_freq = min(round(1e-3 * num_batches) * 10, 50)\n+ log_freq = max(min(round(1e-3 * num_batches) * 10, 50), 1)\n \n window_size = min(10 * log_freq, 1000)\n", "issue": "ZeroDivisionError with default_epoch_optim_loop\nI get an `ZeroDivisionError: integer division or modulo by zero` when using the `default_transformer_epoch_optim_loop`. This is probably because the `num_batches` of the `batch_sampler` is much smaller than in the `default_transformer_optim_loop` which results in `log_freq=0` in `default_transformer_optim_log_fn.` \r\n\r\nBelow is a minimal example to reproduce the error: \r\n```python\r\nfrom pystiche.optim.log import default_transformer_optim_log_fn, OptimLogger\r\n\r\nlogger = OptimLogger()\r\nnum_batches = 300\r\nlog_fn = default_transformer_optim_log_fn(logger, num_batches)\r\nimage_loading_velocity = 1\r\nimage_processing_velocity = 1\r\nbatch = 1\r\nloss = 1\r\nlog_fn(batch, loss, image_loading_velocity, image_processing_velocity)\r\n```\n", "before_files": [{"content": "from typing import Union, Optional, Tuple, Callable\nimport contextlib\nimport sys\nimport logging\nimport torch\nfrom torch.optim.optimizer import Optimizer\nfrom torch.optim.lr_scheduler import _LRScheduler as LRScheduler\nimport pystiche\nfrom pystiche.pyramid.level import PyramidLevel\nfrom .meter import FloatMeter, LossMeter, ProgressMeter\n\n__all__ = [\n \"default_logger\",\n \"OptimLogger\",\n \"default_image_optim_log_fn\",\n \"default_pyramid_level_header\",\n \"default_transformer_optim_log_fn\",\n]\n\n\ndef default_logger(name: Optional[str] = None, log_file: Optional[str] = None):\n logger = logging.getLogger(name)\n logger.setLevel(logging.INFO)\n\n fmt = logging.Formatter(\n fmt=\"|%(asctime)s| %(message)s\", datefmt=\"%d.%m.%Y %H:%M:%S\"\n )\n\n sh = logging.StreamHandler(sys.stdout)\n sh.setLevel(logging.INFO)\n sh.addFilter(lambda record: record.levelno <= logging.INFO)\n sh.setFormatter(fmt)\n logger.addHandler(sh)\n\n sh = logging.StreamHandler(sys.stderr)\n sh.setLevel(logging.WARNING)\n sh.setFormatter(fmt)\n logger.addHandler(sh)\n\n if log_file is not None:\n fh = logging.FileHandler(log_file)\n fh.setLevel(logging.INFO)\n fh.setFormatter(fmt)\n logger.addHandler(fh)\n\n return logger\n\n\nclass OptimLogger:\n INDENT = 2\n SEP_LINE_LENGTH = 80\n SEP_CHARS = (\"#\", \"=\", \"-\", \".\")\n\n def __init__(self, logger: Optional[logging.Logger] = None):\n if logger is None:\n logger = default_logger()\n self.logger = logger\n\n self._environ_indent_offset = 0\n self._environ_level_offset = 0\n\n def _calc_abs_indent(self, indent: int, rel: bool):\n abs_indent = indent\n if rel:\n abs_indent += self._environ_indent_offset\n return abs_indent\n\n def _calc_abs_level(self, level: int, rel: bool):\n abs_level = level\n if rel:\n abs_level += self._environ_level_offset\n return abs_level\n\n def message(self, msg: str, indent: int = 0, rel=True) -> None:\n abs_indent = self._calc_abs_indent(indent, rel)\n for line in msg.splitlines():\n self.logger.info(\" \" * abs_indent + line)\n\n def sepline(self, level: int = 0, rel=True):\n abs_level = self._calc_abs_level(level, rel)\n self.message(self.SEP_CHARS[abs_level] * self.SEP_LINE_LENGTH)\n\n def sep_message(\n self, msg: str, level: int = 0, rel=True, top_sep=True, bottom_sep=True\n ):\n if top_sep:\n self.sepline(level=level, rel=rel)\n self.message(msg, rel=rel)\n if bottom_sep:\n self.sepline(level=level, rel=rel)\n\n @contextlib.contextmanager\n def environment(self, header: str):\n self.sep_message(header)\n self._environ_indent_offset += self.INDENT\n self._environ_level_offset += 1\n try:\n yield\n finally:\n self._environ_level_offset -= 1\n self._environ_indent_offset -= self.INDENT\n\n\ndef default_image_optim_log_fn(\n optim_logger: OptimLogger, log_freq: int = 50, max_depth: int = 1\n) -> Callable[[int, Union[torch.Tensor, pystiche.LossDict]], None]:\n def log_fn(step: int, loss: Union[torch.Tensor, pystiche.LossDict]) -> None:\n if step % log_freq == 0:\n with optim_logger.environment(f\"Step {step}\"):\n if isinstance(loss, torch.Tensor):\n optim_logger.message(f\"loss: {loss.item():.3e}\")\n else: # isinstance(loss, pystiche.LossDict)\n optim_logger.message(loss.aggregate(max_depth).format())\n\n return log_fn\n\n\ndef default_pyramid_level_header(\n num: int, level: PyramidLevel, input_image_size: Tuple[int, int]\n):\n height, width = input_image_size\n return f\"Pyramid level {num} with {level.num_steps} steps \" f\"({width} x {height})\"\n\n\ndef default_transformer_optim_log_fn(\n optim_logger: OptimLogger,\n num_batches: int,\n log_freq: Optional[int] = None,\n show_loading_velocity: bool = True,\n show_processing_velocity: bool = True,\n show_running_means: bool = True,\n):\n if log_freq is None:\n log_freq = min(round(1e-3 * num_batches) * 10, 50)\n\n window_size = min(10 * log_freq, 1000)\n\n meters = [LossMeter(show_avg=show_running_means, window_size=window_size)]\n if show_loading_velocity:\n meters.append(\n FloatMeter(\n name=\"loading_velocity\",\n fmt=\"{:3.1f} img/s\",\n show_avg=show_running_means,\n window_size=window_size,\n )\n )\n if show_processing_velocity:\n meters.append(\n FloatMeter(\n name=\"processing_velocity\",\n fmt=\"{:3.1f} img/s\",\n show_avg=show_running_means,\n window_size=window_size,\n )\n )\n\n progress_meter = ProgressMeter(num_batches, *meters)\n\n def log_fn(batch, loss, loading_velocity, processing_velocity):\n progress_meter.update(\n batch,\n loss=loss,\n loading_velocity=loading_velocity,\n processing_velocity=processing_velocity,\n )\n\n if batch % log_freq == 0:\n optim_logger.message(str(progress_meter))\n\n return log_fn\n\n\ndef default_epoch_header_fn(\n epoch: int, optimizer: Optimizer, lr_scheduler: Optional[LRScheduler]\n):\n return f\"Epoch {epoch}\"\n", "path": "pystiche/optim/log.py"}], "after_files": [{"content": "from typing import Union, Optional, Tuple, Callable\nimport contextlib\nimport sys\nimport logging\nimport torch\nfrom torch.optim.optimizer import Optimizer\nfrom torch.optim.lr_scheduler import _LRScheduler as LRScheduler\nimport pystiche\nfrom pystiche.pyramid.level import PyramidLevel\nfrom .meter import FloatMeter, LossMeter, ProgressMeter\n\n__all__ = [\n \"default_logger\",\n \"OptimLogger\",\n \"default_image_optim_log_fn\",\n \"default_pyramid_level_header\",\n \"default_transformer_optim_log_fn\",\n]\n\n\ndef default_logger(name: Optional[str] = None, log_file: Optional[str] = None):\n logger = logging.getLogger(name)\n logger.setLevel(logging.INFO)\n\n fmt = logging.Formatter(\n fmt=\"|%(asctime)s| %(message)s\", datefmt=\"%d.%m.%Y %H:%M:%S\"\n )\n\n sh = logging.StreamHandler(sys.stdout)\n sh.setLevel(logging.INFO)\n sh.addFilter(lambda record: record.levelno <= logging.INFO)\n sh.setFormatter(fmt)\n logger.addHandler(sh)\n\n sh = logging.StreamHandler(sys.stderr)\n sh.setLevel(logging.WARNING)\n sh.setFormatter(fmt)\n logger.addHandler(sh)\n\n if log_file is not None:\n fh = logging.FileHandler(log_file)\n fh.setLevel(logging.INFO)\n fh.setFormatter(fmt)\n logger.addHandler(fh)\n\n return logger\n\n\nclass OptimLogger:\n INDENT = 2\n SEP_LINE_LENGTH = 80\n SEP_CHARS = (\"#\", \"=\", \"-\", \".\")\n\n def __init__(self, logger: Optional[logging.Logger] = None):\n if logger is None:\n logger = default_logger()\n self.logger = logger\n\n self._environ_indent_offset = 0\n self._environ_level_offset = 0\n\n def _calc_abs_indent(self, indent: int, rel: bool):\n abs_indent = indent\n if rel:\n abs_indent += self._environ_indent_offset\n return abs_indent\n\n def _calc_abs_level(self, level: int, rel: bool):\n abs_level = level\n if rel:\n abs_level += self._environ_level_offset\n return abs_level\n\n def message(self, msg: str, indent: int = 0, rel=True) -> None:\n abs_indent = self._calc_abs_indent(indent, rel)\n for line in msg.splitlines():\n self.logger.info(\" \" * abs_indent + line)\n\n def sepline(self, level: int = 0, rel=True):\n abs_level = self._calc_abs_level(level, rel)\n self.message(self.SEP_CHARS[abs_level] * self.SEP_LINE_LENGTH)\n\n def sep_message(\n self, msg: str, level: int = 0, rel=True, top_sep=True, bottom_sep=True\n ):\n if top_sep:\n self.sepline(level=level, rel=rel)\n self.message(msg, rel=rel)\n if bottom_sep:\n self.sepline(level=level, rel=rel)\n\n @contextlib.contextmanager\n def environment(self, header: str):\n self.sep_message(header)\n self._environ_indent_offset += self.INDENT\n self._environ_level_offset += 1\n try:\n yield\n finally:\n self._environ_level_offset -= 1\n self._environ_indent_offset -= self.INDENT\n\n\ndef default_image_optim_log_fn(\n optim_logger: OptimLogger, log_freq: int = 50, max_depth: int = 1\n) -> Callable[[int, Union[torch.Tensor, pystiche.LossDict]], None]:\n def log_fn(step: int, loss: Union[torch.Tensor, pystiche.LossDict]) -> None:\n if step % log_freq == 0:\n with optim_logger.environment(f\"Step {step}\"):\n if isinstance(loss, torch.Tensor):\n optim_logger.message(f\"loss: {loss.item():.3e}\")\n else: # isinstance(loss, pystiche.LossDict)\n optim_logger.message(loss.aggregate(max_depth).format())\n\n return log_fn\n\n\ndef default_pyramid_level_header(\n num: int, level: PyramidLevel, input_image_size: Tuple[int, int]\n):\n height, width = input_image_size\n return f\"Pyramid level {num} with {level.num_steps} steps \" f\"({width} x {height})\"\n\n\ndef default_transformer_optim_log_fn(\n optim_logger: OptimLogger,\n num_batches: int,\n log_freq: Optional[int] = None,\n show_loading_velocity: bool = True,\n show_processing_velocity: bool = True,\n show_running_means: bool = True,\n):\n if log_freq is None:\n log_freq = max(min(round(1e-3 * num_batches) * 10, 50), 1)\n\n window_size = min(10 * log_freq, 1000)\n\n meters = [LossMeter(show_avg=show_running_means, window_size=window_size)]\n if show_loading_velocity:\n meters.append(\n FloatMeter(\n name=\"loading_velocity\",\n fmt=\"{:3.1f} img/s\",\n show_avg=show_running_means,\n window_size=window_size,\n )\n )\n if show_processing_velocity:\n meters.append(\n FloatMeter(\n name=\"processing_velocity\",\n fmt=\"{:3.1f} img/s\",\n show_avg=show_running_means,\n window_size=window_size,\n )\n )\n\n progress_meter = ProgressMeter(num_batches, *meters)\n\n def log_fn(batch, loss, loading_velocity, processing_velocity):\n progress_meter.update(\n batch,\n loss=loss,\n loading_velocity=loading_velocity,\n processing_velocity=processing_velocity,\n )\n\n if batch % log_freq == 0:\n optim_logger.message(str(progress_meter))\n\n return log_fn\n\n\ndef default_epoch_header_fn(\n epoch: int, optimizer: Optimizer, lr_scheduler: Optional[LRScheduler]\n):\n return f\"Epoch {epoch}\"\n", "path": "pystiche/optim/log.py"}]}
2,184
144
gh_patches_debug_5653
rasdani/github-patches
git_diff
open-telemetry__opentelemetry-python-contrib-537
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Problem with using domain socket as bind address **Describe your environment** The OT-wsgi library throws error if domain socket is used for the bind address. **Steps to reproduce** Here is a test program: ``` import web from time import sleep from opentelemetry import trace from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import ( ConsoleSpanExporter, SimpleSpanProcessor, ) from opentelemetry.instrumentation.wsgi import OpenTelemetryMiddleware from cheroot import wsgi trace.set_tracer_provider(TracerProvider()) trace.get_tracer_provider().add_span_processor( SimpleSpanProcessor(ConsoleSpanExporter()) ) #tracer = trace.get_tracer(__name__) urls = ( '/', 'index' ) class index: def GET(self): return "Hello, world!" if __name__ == "__main__": app = web.application(urls, globals()) func = app.wsgifunc() func = OpenTelemetryMiddleware(func) server = wsgi.WSGIServer("/tmp/lr.sock", func, server_name="localhost") server.start() ``` invocation: ``` (base) kamalh-mbp:~ kamalh$ echo -ne 'GET / HTTP/1.1\r\nHost: test.com\r\n\r\n' | nc -U /tmp/lr.sock HTTP/1.1 500 Internal Server Error Content-Length: 0 Content-Type: text/plain ``` Error from the program ``` (base) kamalh-mbp:opentelemetry kamalh$ python3 wsgi-lr.py Overriding of current TracerProvider is not allowed ValueError("invalid literal for int() with base 10: ''") Traceback (most recent call last): File "/Users/kamalh/miniconda3/lib/python3.7/site-packages/cheroot/server.py", line 1287, in communicate req.respond() File "/Users/kamalh/miniconda3/lib/python3.7/site-packages/cheroot/server.py", line 1077, in respond self.server.gateway(self).respond() File "/Users/kamalh/miniconda3/lib/python3.7/site-packages/cheroot/wsgi.py", line 140, in respond response = self.req.server.wsgi_app(self.env, self.start_response) File "/Users/kamalh/miniconda3/lib/python3.7/site-packages/opentelemetry/instrumentation/wsgi/__init__.py", line 229, in __call__ attributes=collect_request_attributes(environ), File "/Users/kamalh/miniconda3/lib/python3.7/site-packages/opentelemetry/instrumentation/wsgi/__init__.py", line 122, in collect_request_attributes result.update({SpanAttributes.NET_HOST_PORT: int(host_port)}) ValueError: invalid literal for int() with base 10: '' ``` **What is the expected behavior?** Expect to see the server returning normally as in TCP sockets. **What is the actual behavior?** Error message. Please see the paste above. **Additional context** Add any other context about the problem here. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py` Content: ``` 1 # Copyright The OpenTelemetry Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 """ 15 This library provides a WSGI middleware that can be used on any WSGI framework 16 (such as Django / Flask) to track requests timing through OpenTelemetry. 17 18 Usage (Flask) 19 ------------- 20 21 .. code-block:: python 22 23 from flask import Flask 24 from opentelemetry.instrumentation.wsgi import OpenTelemetryMiddleware 25 26 app = Flask(__name__) 27 app.wsgi_app = OpenTelemetryMiddleware(app.wsgi_app) 28 29 @app.route("/") 30 def hello(): 31 return "Hello!" 32 33 if __name__ == "__main__": 34 app.run(debug=True) 35 36 37 Usage (Django) 38 -------------- 39 40 Modify the application's ``wsgi.py`` file as shown below. 41 42 .. code-block:: python 43 44 import os 45 from opentelemetry.instrumentation.wsgi import OpenTelemetryMiddleware 46 from django.core.wsgi import get_wsgi_application 47 48 os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'application.settings') 49 50 application = get_wsgi_application() 51 application = OpenTelemetryMiddleware(application) 52 53 API 54 --- 55 """ 56 57 import functools 58 import typing 59 import wsgiref.util as wsgiref_util 60 61 from opentelemetry import context, trace 62 from opentelemetry.instrumentation.utils import http_status_to_status_code 63 from opentelemetry.instrumentation.wsgi.version import __version__ 64 from opentelemetry.propagate import extract 65 from opentelemetry.propagators.textmap import Getter 66 from opentelemetry.semconv.trace import SpanAttributes 67 from opentelemetry.trace.status import Status, StatusCode 68 69 _HTTP_VERSION_PREFIX = "HTTP/" 70 _CARRIER_KEY_PREFIX = "HTTP_" 71 _CARRIER_KEY_PREFIX_LEN = len(_CARRIER_KEY_PREFIX) 72 73 74 class WSGIGetter(Getter): 75 def get( 76 self, carrier: dict, key: str 77 ) -> typing.Optional[typing.List[str]]: 78 """Getter implementation to retrieve a HTTP header value from the 79 PEP3333-conforming WSGI environ 80 81 Args: 82 carrier: WSGI environ object 83 key: header name in environ object 84 Returns: 85 A list with a single string with the header value if it exists, 86 else None. 87 """ 88 environ_key = "HTTP_" + key.upper().replace("-", "_") 89 value = carrier.get(environ_key) 90 if value is not None: 91 return [value] 92 return None 93 94 def keys(self, carrier): 95 return [ 96 key[_CARRIER_KEY_PREFIX_LEN:].lower().replace("_", "-") 97 for key in carrier 98 if key.startswith(_CARRIER_KEY_PREFIX) 99 ] 100 101 102 wsgi_getter = WSGIGetter() 103 104 105 def setifnotnone(dic, key, value): 106 if value is not None: 107 dic[key] = value 108 109 110 def collect_request_attributes(environ): 111 """Collects HTTP request attributes from the PEP3333-conforming 112 WSGI environ and returns a dictionary to be used as span creation attributes.""" 113 114 result = { 115 SpanAttributes.HTTP_METHOD: environ.get("REQUEST_METHOD"), 116 SpanAttributes.HTTP_SERVER_NAME: environ.get("SERVER_NAME"), 117 SpanAttributes.HTTP_SCHEME: environ.get("wsgi.url_scheme"), 118 } 119 120 host_port = environ.get("SERVER_PORT") 121 if host_port is not None: 122 result.update({SpanAttributes.NET_HOST_PORT: int(host_port)}) 123 124 setifnotnone(result, SpanAttributes.HTTP_HOST, environ.get("HTTP_HOST")) 125 target = environ.get("RAW_URI") 126 if target is None: # Note: `"" or None is None` 127 target = environ.get("REQUEST_URI") 128 if target is not None: 129 result[SpanAttributes.HTTP_TARGET] = target 130 else: 131 result[SpanAttributes.HTTP_URL] = wsgiref_util.request_uri(environ) 132 133 remote_addr = environ.get("REMOTE_ADDR") 134 if remote_addr: 135 result[SpanAttributes.NET_PEER_IP] = remote_addr 136 remote_host = environ.get("REMOTE_HOST") 137 if remote_host and remote_host != remote_addr: 138 result[SpanAttributes.NET_PEER_NAME] = remote_host 139 140 user_agent = environ.get("HTTP_USER_AGENT") 141 if user_agent is not None and len(user_agent) > 0: 142 result[SpanAttributes.HTTP_USER_AGENT] = user_agent 143 144 setifnotnone( 145 result, SpanAttributes.NET_PEER_PORT, environ.get("REMOTE_PORT") 146 ) 147 flavor = environ.get("SERVER_PROTOCOL", "") 148 if flavor.upper().startswith(_HTTP_VERSION_PREFIX): 149 flavor = flavor[len(_HTTP_VERSION_PREFIX) :] 150 if flavor: 151 result[SpanAttributes.HTTP_FLAVOR] = flavor 152 153 return result 154 155 156 def add_response_attributes( 157 span, start_response_status, response_headers 158 ): # pylint: disable=unused-argument 159 """Adds HTTP response attributes to span using the arguments 160 passed to a PEP3333-conforming start_response callable.""" 161 if not span.is_recording(): 162 return 163 status_code, _ = start_response_status.split(" ", 1) 164 165 try: 166 status_code = int(status_code) 167 except ValueError: 168 span.set_status( 169 Status( 170 StatusCode.ERROR, 171 "Non-integer HTTP status: " + repr(status_code), 172 ) 173 ) 174 else: 175 span.set_attribute(SpanAttributes.HTTP_STATUS_CODE, status_code) 176 span.set_status(Status(http_status_to_status_code(status_code))) 177 178 179 def get_default_span_name(environ): 180 """Default implementation for name_callback, returns HTTP {METHOD_NAME}.""" 181 return "HTTP {}".format(environ.get("REQUEST_METHOD", "")).strip() 182 183 184 class OpenTelemetryMiddleware: 185 """The WSGI application middleware. 186 187 This class is a PEP 3333 conforming WSGI middleware that starts and 188 annotates spans for any requests it is invoked with. 189 190 Args: 191 wsgi: The WSGI application callable to forward requests to. 192 request_hook: Optional callback which is called with the server span and WSGI 193 environ object for every incoming request. 194 response_hook: Optional callback which is called with the server span, 195 WSGI environ, status_code and response_headers for every 196 incoming request. 197 tracer_provider: Optional tracer provider to use. If omitted the current 198 globally configured one is used. 199 """ 200 201 def __init__( 202 self, wsgi, request_hook=None, response_hook=None, tracer_provider=None 203 ): 204 self.wsgi = wsgi 205 self.tracer = trace.get_tracer(__name__, __version__, tracer_provider) 206 self.request_hook = request_hook 207 self.response_hook = response_hook 208 209 @staticmethod 210 def _create_start_response(span, start_response, response_hook): 211 @functools.wraps(start_response) 212 def _start_response(status, response_headers, *args, **kwargs): 213 add_response_attributes(span, status, response_headers) 214 if response_hook: 215 response_hook(status, response_headers) 216 return start_response(status, response_headers, *args, **kwargs) 217 218 return _start_response 219 220 def __call__(self, environ, start_response): 221 """The WSGI application 222 223 Args: 224 environ: A WSGI environment. 225 start_response: The WSGI start_response callable. 226 """ 227 228 token = context.attach(extract(environ, getter=wsgi_getter)) 229 230 span = self.tracer.start_span( 231 get_default_span_name(environ), 232 kind=trace.SpanKind.SERVER, 233 attributes=collect_request_attributes(environ), 234 ) 235 236 if self.request_hook: 237 self.request_hook(span, environ) 238 239 response_hook = self.response_hook 240 if response_hook: 241 response_hook = functools.partial(response_hook, span, environ) 242 243 try: 244 with trace.use_span(span): 245 start_response = self._create_start_response( 246 span, start_response, response_hook 247 ) 248 iterable = self.wsgi(environ, start_response) 249 return _end_span_after_iterating( 250 iterable, span, self.tracer, token 251 ) 252 except Exception as ex: 253 if span.is_recording(): 254 span.set_status(Status(StatusCode.ERROR, str(ex))) 255 span.end() 256 context.detach(token) 257 raise 258 259 260 # Put this in a subfunction to not delay the call to the wrapped 261 # WSGI application (instrumentation should change the application 262 # behavior as little as possible). 263 def _end_span_after_iterating(iterable, span, tracer, token): 264 try: 265 with trace.use_span(span): 266 for yielded in iterable: 267 yield yielded 268 finally: 269 close = getattr(iterable, "close", None) 270 if close: 271 close() 272 span.end() 273 context.detach(token) 274 275 276 # TODO: inherit from opentelemetry.instrumentation.propagators.Setter 277 278 279 class ResponsePropagationSetter: 280 def set(self, carrier, key, value): # pylint: disable=no-self-use 281 carrier.append((key, value)) 282 283 284 default_response_propagation_setter = ResponsePropagationSetter() 285 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py b/instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py --- a/instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py @@ -118,7 +118,7 @@ } host_port = environ.get("SERVER_PORT") - if host_port is not None: + if host_port is not None and not host_port == "": result.update({SpanAttributes.NET_HOST_PORT: int(host_port)}) setifnotnone(result, SpanAttributes.HTTP_HOST, environ.get("HTTP_HOST"))
{"golden_diff": "diff --git a/instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py b/instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py\n--- a/instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py\n+++ b/instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py\n@@ -118,7 +118,7 @@\n }\n \n host_port = environ.get(\"SERVER_PORT\")\n- if host_port is not None:\n+ if host_port is not None and not host_port == \"\":\n result.update({SpanAttributes.NET_HOST_PORT: int(host_port)})\n \n setifnotnone(result, SpanAttributes.HTTP_HOST, environ.get(\"HTTP_HOST\"))\n", "issue": "Problem with using domain socket as bind address\n**Describe your environment** \r\nThe OT-wsgi library throws error if domain socket is used for the bind address.\r\n\r\n**Steps to reproduce**\r\nHere is a test program:\r\n\r\n```\r\nimport web\r\nfrom time import sleep\r\nfrom opentelemetry import trace\r\nfrom opentelemetry.sdk.trace import TracerProvider\r\nfrom opentelemetry.sdk.trace.export import (\r\n ConsoleSpanExporter,\r\n SimpleSpanProcessor,\r\n)\r\nfrom opentelemetry.instrumentation.wsgi import OpenTelemetryMiddleware\r\nfrom cheroot import wsgi\r\n\r\ntrace.set_tracer_provider(TracerProvider())\r\ntrace.get_tracer_provider().add_span_processor(\r\n SimpleSpanProcessor(ConsoleSpanExporter())\r\n)\r\n\r\n#tracer = trace.get_tracer(__name__)\r\n\r\nurls = (\r\n '/', 'index'\r\n)\r\nclass index:\r\n def GET(self):\r\n return \"Hello, world!\"\r\n\r\nif __name__ == \"__main__\":\r\n app = web.application(urls, globals())\r\n func = app.wsgifunc()\r\n\r\n func = OpenTelemetryMiddleware(func)\r\n\r\n server = wsgi.WSGIServer(\"/tmp/lr.sock\", func, server_name=\"localhost\")\r\n server.start()\r\n```\r\n\r\ninvocation:\r\n```\r\n(base) kamalh-mbp:~ kamalh$ echo -ne 'GET / HTTP/1.1\\r\\nHost: test.com\\r\\n\\r\\n' | nc -U /tmp/lr.sock\r\nHTTP/1.1 500 Internal Server Error\r\nContent-Length: 0\r\nContent-Type: text/plain\r\n```\r\n\r\nError from the program\r\n```\r\n(base) kamalh-mbp:opentelemetry kamalh$ python3 wsgi-lr.py\r\nOverriding of current TracerProvider is not allowed\r\nValueError(\"invalid literal for int() with base 10: ''\")\r\nTraceback (most recent call last):\r\n File \"/Users/kamalh/miniconda3/lib/python3.7/site-packages/cheroot/server.py\", line 1287, in communicate\r\n req.respond()\r\n File \"/Users/kamalh/miniconda3/lib/python3.7/site-packages/cheroot/server.py\", line 1077, in respond\r\n self.server.gateway(self).respond()\r\n File \"/Users/kamalh/miniconda3/lib/python3.7/site-packages/cheroot/wsgi.py\", line 140, in respond\r\n response = self.req.server.wsgi_app(self.env, self.start_response)\r\n File \"/Users/kamalh/miniconda3/lib/python3.7/site-packages/opentelemetry/instrumentation/wsgi/__init__.py\", line 229, in __call__\r\n attributes=collect_request_attributes(environ),\r\n File \"/Users/kamalh/miniconda3/lib/python3.7/site-packages/opentelemetry/instrumentation/wsgi/__init__.py\", line 122, in collect_request_attributes\r\n result.update({SpanAttributes.NET_HOST_PORT: int(host_port)})\r\nValueError: invalid literal for int() with base 10: ''\r\n```\r\n\r\n**What is the expected behavior?**\r\nExpect to see the server returning normally as in TCP sockets.\r\n\r\n**What is the actual behavior?**\r\nError message. Please see the paste above.\r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThis library provides a WSGI middleware that can be used on any WSGI framework\n(such as Django / Flask) to track requests timing through OpenTelemetry.\n\nUsage (Flask)\n-------------\n\n.. code-block:: python\n\n from flask import Flask\n from opentelemetry.instrumentation.wsgi import OpenTelemetryMiddleware\n\n app = Flask(__name__)\n app.wsgi_app = OpenTelemetryMiddleware(app.wsgi_app)\n\n @app.route(\"/\")\n def hello():\n return \"Hello!\"\n\n if __name__ == \"__main__\":\n app.run(debug=True)\n\n\nUsage (Django)\n--------------\n\nModify the application's ``wsgi.py`` file as shown below.\n\n.. code-block:: python\n\n import os\n from opentelemetry.instrumentation.wsgi import OpenTelemetryMiddleware\n from django.core.wsgi import get_wsgi_application\n\n os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'application.settings')\n\n application = get_wsgi_application()\n application = OpenTelemetryMiddleware(application)\n\nAPI\n---\n\"\"\"\n\nimport functools\nimport typing\nimport wsgiref.util as wsgiref_util\n\nfrom opentelemetry import context, trace\nfrom opentelemetry.instrumentation.utils import http_status_to_status_code\nfrom opentelemetry.instrumentation.wsgi.version import __version__\nfrom opentelemetry.propagate import extract\nfrom opentelemetry.propagators.textmap import Getter\nfrom opentelemetry.semconv.trace import SpanAttributes\nfrom opentelemetry.trace.status import Status, StatusCode\n\n_HTTP_VERSION_PREFIX = \"HTTP/\"\n_CARRIER_KEY_PREFIX = \"HTTP_\"\n_CARRIER_KEY_PREFIX_LEN = len(_CARRIER_KEY_PREFIX)\n\n\nclass WSGIGetter(Getter):\n def get(\n self, carrier: dict, key: str\n ) -> typing.Optional[typing.List[str]]:\n \"\"\"Getter implementation to retrieve a HTTP header value from the\n PEP3333-conforming WSGI environ\n\n Args:\n carrier: WSGI environ object\n key: header name in environ object\n Returns:\n A list with a single string with the header value if it exists,\n else None.\n \"\"\"\n environ_key = \"HTTP_\" + key.upper().replace(\"-\", \"_\")\n value = carrier.get(environ_key)\n if value is not None:\n return [value]\n return None\n\n def keys(self, carrier):\n return [\n key[_CARRIER_KEY_PREFIX_LEN:].lower().replace(\"_\", \"-\")\n for key in carrier\n if key.startswith(_CARRIER_KEY_PREFIX)\n ]\n\n\nwsgi_getter = WSGIGetter()\n\n\ndef setifnotnone(dic, key, value):\n if value is not None:\n dic[key] = value\n\n\ndef collect_request_attributes(environ):\n \"\"\"Collects HTTP request attributes from the PEP3333-conforming\n WSGI environ and returns a dictionary to be used as span creation attributes.\"\"\"\n\n result = {\n SpanAttributes.HTTP_METHOD: environ.get(\"REQUEST_METHOD\"),\n SpanAttributes.HTTP_SERVER_NAME: environ.get(\"SERVER_NAME\"),\n SpanAttributes.HTTP_SCHEME: environ.get(\"wsgi.url_scheme\"),\n }\n\n host_port = environ.get(\"SERVER_PORT\")\n if host_port is not None:\n result.update({SpanAttributes.NET_HOST_PORT: int(host_port)})\n\n setifnotnone(result, SpanAttributes.HTTP_HOST, environ.get(\"HTTP_HOST\"))\n target = environ.get(\"RAW_URI\")\n if target is None: # Note: `\"\" or None is None`\n target = environ.get(\"REQUEST_URI\")\n if target is not None:\n result[SpanAttributes.HTTP_TARGET] = target\n else:\n result[SpanAttributes.HTTP_URL] = wsgiref_util.request_uri(environ)\n\n remote_addr = environ.get(\"REMOTE_ADDR\")\n if remote_addr:\n result[SpanAttributes.NET_PEER_IP] = remote_addr\n remote_host = environ.get(\"REMOTE_HOST\")\n if remote_host and remote_host != remote_addr:\n result[SpanAttributes.NET_PEER_NAME] = remote_host\n\n user_agent = environ.get(\"HTTP_USER_AGENT\")\n if user_agent is not None and len(user_agent) > 0:\n result[SpanAttributes.HTTP_USER_AGENT] = user_agent\n\n setifnotnone(\n result, SpanAttributes.NET_PEER_PORT, environ.get(\"REMOTE_PORT\")\n )\n flavor = environ.get(\"SERVER_PROTOCOL\", \"\")\n if flavor.upper().startswith(_HTTP_VERSION_PREFIX):\n flavor = flavor[len(_HTTP_VERSION_PREFIX) :]\n if flavor:\n result[SpanAttributes.HTTP_FLAVOR] = flavor\n\n return result\n\n\ndef add_response_attributes(\n span, start_response_status, response_headers\n): # pylint: disable=unused-argument\n \"\"\"Adds HTTP response attributes to span using the arguments\n passed to a PEP3333-conforming start_response callable.\"\"\"\n if not span.is_recording():\n return\n status_code, _ = start_response_status.split(\" \", 1)\n\n try:\n status_code = int(status_code)\n except ValueError:\n span.set_status(\n Status(\n StatusCode.ERROR,\n \"Non-integer HTTP status: \" + repr(status_code),\n )\n )\n else:\n span.set_attribute(SpanAttributes.HTTP_STATUS_CODE, status_code)\n span.set_status(Status(http_status_to_status_code(status_code)))\n\n\ndef get_default_span_name(environ):\n \"\"\"Default implementation for name_callback, returns HTTP {METHOD_NAME}.\"\"\"\n return \"HTTP {}\".format(environ.get(\"REQUEST_METHOD\", \"\")).strip()\n\n\nclass OpenTelemetryMiddleware:\n \"\"\"The WSGI application middleware.\n\n This class is a PEP 3333 conforming WSGI middleware that starts and\n annotates spans for any requests it is invoked with.\n\n Args:\n wsgi: The WSGI application callable to forward requests to.\n request_hook: Optional callback which is called with the server span and WSGI\n environ object for every incoming request.\n response_hook: Optional callback which is called with the server span,\n WSGI environ, status_code and response_headers for every\n incoming request.\n tracer_provider: Optional tracer provider to use. If omitted the current\n globally configured one is used.\n \"\"\"\n\n def __init__(\n self, wsgi, request_hook=None, response_hook=None, tracer_provider=None\n ):\n self.wsgi = wsgi\n self.tracer = trace.get_tracer(__name__, __version__, tracer_provider)\n self.request_hook = request_hook\n self.response_hook = response_hook\n\n @staticmethod\n def _create_start_response(span, start_response, response_hook):\n @functools.wraps(start_response)\n def _start_response(status, response_headers, *args, **kwargs):\n add_response_attributes(span, status, response_headers)\n if response_hook:\n response_hook(status, response_headers)\n return start_response(status, response_headers, *args, **kwargs)\n\n return _start_response\n\n def __call__(self, environ, start_response):\n \"\"\"The WSGI application\n\n Args:\n environ: A WSGI environment.\n start_response: The WSGI start_response callable.\n \"\"\"\n\n token = context.attach(extract(environ, getter=wsgi_getter))\n\n span = self.tracer.start_span(\n get_default_span_name(environ),\n kind=trace.SpanKind.SERVER,\n attributes=collect_request_attributes(environ),\n )\n\n if self.request_hook:\n self.request_hook(span, environ)\n\n response_hook = self.response_hook\n if response_hook:\n response_hook = functools.partial(response_hook, span, environ)\n\n try:\n with trace.use_span(span):\n start_response = self._create_start_response(\n span, start_response, response_hook\n )\n iterable = self.wsgi(environ, start_response)\n return _end_span_after_iterating(\n iterable, span, self.tracer, token\n )\n except Exception as ex:\n if span.is_recording():\n span.set_status(Status(StatusCode.ERROR, str(ex)))\n span.end()\n context.detach(token)\n raise\n\n\n# Put this in a subfunction to not delay the call to the wrapped\n# WSGI application (instrumentation should change the application\n# behavior as little as possible).\ndef _end_span_after_iterating(iterable, span, tracer, token):\n try:\n with trace.use_span(span):\n for yielded in iterable:\n yield yielded\n finally:\n close = getattr(iterable, \"close\", None)\n if close:\n close()\n span.end()\n context.detach(token)\n\n\n# TODO: inherit from opentelemetry.instrumentation.propagators.Setter\n\n\nclass ResponsePropagationSetter:\n def set(self, carrier, key, value): # pylint: disable=no-self-use\n carrier.append((key, value))\n\n\ndefault_response_propagation_setter = ResponsePropagationSetter()\n", "path": "instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThis library provides a WSGI middleware that can be used on any WSGI framework\n(such as Django / Flask) to track requests timing through OpenTelemetry.\n\nUsage (Flask)\n-------------\n\n.. code-block:: python\n\n from flask import Flask\n from opentelemetry.instrumentation.wsgi import OpenTelemetryMiddleware\n\n app = Flask(__name__)\n app.wsgi_app = OpenTelemetryMiddleware(app.wsgi_app)\n\n @app.route(\"/\")\n def hello():\n return \"Hello!\"\n\n if __name__ == \"__main__\":\n app.run(debug=True)\n\n\nUsage (Django)\n--------------\n\nModify the application's ``wsgi.py`` file as shown below.\n\n.. code-block:: python\n\n import os\n from opentelemetry.instrumentation.wsgi import OpenTelemetryMiddleware\n from django.core.wsgi import get_wsgi_application\n\n os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'application.settings')\n\n application = get_wsgi_application()\n application = OpenTelemetryMiddleware(application)\n\nAPI\n---\n\"\"\"\n\nimport functools\nimport typing\nimport wsgiref.util as wsgiref_util\n\nfrom opentelemetry import context, trace\nfrom opentelemetry.instrumentation.utils import http_status_to_status_code\nfrom opentelemetry.instrumentation.wsgi.version import __version__\nfrom opentelemetry.propagate import extract\nfrom opentelemetry.propagators.textmap import Getter\nfrom opentelemetry.semconv.trace import SpanAttributes\nfrom opentelemetry.trace.status import Status, StatusCode\n\n_HTTP_VERSION_PREFIX = \"HTTP/\"\n_CARRIER_KEY_PREFIX = \"HTTP_\"\n_CARRIER_KEY_PREFIX_LEN = len(_CARRIER_KEY_PREFIX)\n\n\nclass WSGIGetter(Getter):\n def get(\n self, carrier: dict, key: str\n ) -> typing.Optional[typing.List[str]]:\n \"\"\"Getter implementation to retrieve a HTTP header value from the\n PEP3333-conforming WSGI environ\n\n Args:\n carrier: WSGI environ object\n key: header name in environ object\n Returns:\n A list with a single string with the header value if it exists,\n else None.\n \"\"\"\n environ_key = \"HTTP_\" + key.upper().replace(\"-\", \"_\")\n value = carrier.get(environ_key)\n if value is not None:\n return [value]\n return None\n\n def keys(self, carrier):\n return [\n key[_CARRIER_KEY_PREFIX_LEN:].lower().replace(\"_\", \"-\")\n for key in carrier\n if key.startswith(_CARRIER_KEY_PREFIX)\n ]\n\n\nwsgi_getter = WSGIGetter()\n\n\ndef setifnotnone(dic, key, value):\n if value is not None:\n dic[key] = value\n\n\ndef collect_request_attributes(environ):\n \"\"\"Collects HTTP request attributes from the PEP3333-conforming\n WSGI environ and returns a dictionary to be used as span creation attributes.\"\"\"\n\n result = {\n SpanAttributes.HTTP_METHOD: environ.get(\"REQUEST_METHOD\"),\n SpanAttributes.HTTP_SERVER_NAME: environ.get(\"SERVER_NAME\"),\n SpanAttributes.HTTP_SCHEME: environ.get(\"wsgi.url_scheme\"),\n }\n\n host_port = environ.get(\"SERVER_PORT\")\n if host_port is not None and not host_port == \"\":\n result.update({SpanAttributes.NET_HOST_PORT: int(host_port)})\n\n setifnotnone(result, SpanAttributes.HTTP_HOST, environ.get(\"HTTP_HOST\"))\n target = environ.get(\"RAW_URI\")\n if target is None: # Note: `\"\" or None is None`\n target = environ.get(\"REQUEST_URI\")\n if target is not None:\n result[SpanAttributes.HTTP_TARGET] = target\n else:\n result[SpanAttributes.HTTP_URL] = wsgiref_util.request_uri(environ)\n\n remote_addr = environ.get(\"REMOTE_ADDR\")\n if remote_addr:\n result[SpanAttributes.NET_PEER_IP] = remote_addr\n remote_host = environ.get(\"REMOTE_HOST\")\n if remote_host and remote_host != remote_addr:\n result[SpanAttributes.NET_PEER_NAME] = remote_host\n\n user_agent = environ.get(\"HTTP_USER_AGENT\")\n if user_agent is not None and len(user_agent) > 0:\n result[SpanAttributes.HTTP_USER_AGENT] = user_agent\n\n setifnotnone(\n result, SpanAttributes.NET_PEER_PORT, environ.get(\"REMOTE_PORT\")\n )\n flavor = environ.get(\"SERVER_PROTOCOL\", \"\")\n if flavor.upper().startswith(_HTTP_VERSION_PREFIX):\n flavor = flavor[len(_HTTP_VERSION_PREFIX) :]\n if flavor:\n result[SpanAttributes.HTTP_FLAVOR] = flavor\n\n return result\n\n\ndef add_response_attributes(\n span, start_response_status, response_headers\n): # pylint: disable=unused-argument\n \"\"\"Adds HTTP response attributes to span using the arguments\n passed to a PEP3333-conforming start_response callable.\"\"\"\n if not span.is_recording():\n return\n status_code, _ = start_response_status.split(\" \", 1)\n\n try:\n status_code = int(status_code)\n except ValueError:\n span.set_status(\n Status(\n StatusCode.ERROR,\n \"Non-integer HTTP status: \" + repr(status_code),\n )\n )\n else:\n span.set_attribute(SpanAttributes.HTTP_STATUS_CODE, status_code)\n span.set_status(Status(http_status_to_status_code(status_code)))\n\n\ndef get_default_span_name(environ):\n \"\"\"Default implementation for name_callback, returns HTTP {METHOD_NAME}.\"\"\"\n return \"HTTP {}\".format(environ.get(\"REQUEST_METHOD\", \"\")).strip()\n\n\nclass OpenTelemetryMiddleware:\n \"\"\"The WSGI application middleware.\n\n This class is a PEP 3333 conforming WSGI middleware that starts and\n annotates spans for any requests it is invoked with.\n\n Args:\n wsgi: The WSGI application callable to forward requests to.\n request_hook: Optional callback which is called with the server span and WSGI\n environ object for every incoming request.\n response_hook: Optional callback which is called with the server span,\n WSGI environ, status_code and response_headers for every\n incoming request.\n tracer_provider: Optional tracer provider to use. If omitted the current\n globally configured one is used.\n \"\"\"\n\n def __init__(\n self, wsgi, request_hook=None, response_hook=None, tracer_provider=None\n ):\n self.wsgi = wsgi\n self.tracer = trace.get_tracer(__name__, __version__, tracer_provider)\n self.request_hook = request_hook\n self.response_hook = response_hook\n\n @staticmethod\n def _create_start_response(span, start_response, response_hook):\n @functools.wraps(start_response)\n def _start_response(status, response_headers, *args, **kwargs):\n add_response_attributes(span, status, response_headers)\n if response_hook:\n response_hook(status, response_headers)\n return start_response(status, response_headers, *args, **kwargs)\n\n return _start_response\n\n def __call__(self, environ, start_response):\n \"\"\"The WSGI application\n\n Args:\n environ: A WSGI environment.\n start_response: The WSGI start_response callable.\n \"\"\"\n\n token = context.attach(extract(environ, getter=wsgi_getter))\n\n span = self.tracer.start_span(\n get_default_span_name(environ),\n kind=trace.SpanKind.SERVER,\n attributes=collect_request_attributes(environ),\n )\n\n if self.request_hook:\n self.request_hook(span, environ)\n\n response_hook = self.response_hook\n if response_hook:\n response_hook = functools.partial(response_hook, span, environ)\n\n try:\n with trace.use_span(span):\n start_response = self._create_start_response(\n span, start_response, response_hook\n )\n iterable = self.wsgi(environ, start_response)\n return _end_span_after_iterating(\n iterable, span, self.tracer, token\n )\n except Exception as ex:\n if span.is_recording():\n span.set_status(Status(StatusCode.ERROR, str(ex)))\n span.end()\n context.detach(token)\n raise\n\n\n# Put this in a subfunction to not delay the call to the wrapped\n# WSGI application (instrumentation should change the application\n# behavior as little as possible).\ndef _end_span_after_iterating(iterable, span, tracer, token):\n try:\n with trace.use_span(span):\n for yielded in iterable:\n yield yielded\n finally:\n close = getattr(iterable, \"close\", None)\n if close:\n close()\n span.end()\n context.detach(token)\n\n\n# TODO: inherit from opentelemetry.instrumentation.propagators.Setter\n\n\nclass ResponsePropagationSetter:\n def set(self, carrier, key, value): # pylint: disable=no-self-use\n carrier.append((key, value))\n\n\ndefault_response_propagation_setter = ResponsePropagationSetter()\n", "path": "instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py"}]}
3,767
191
gh_patches_debug_34727
rasdani/github-patches
git_diff
ansible__ansible-43542
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- 'slack' callback plugin not working <!--- Verify first that your issue/request is not already reported on GitHub. THIS FORM WILL BE READ BY A MACHINE, COMPLETE ALL SECTIONS AS DESCRIBED. Also test if the latest release, and devel branch are affected too. ALWAYS add information AFTER (OUTSIDE) these html comments. Otherwise it may end up being automatically closed by our bot. --> ##### SUMMARY ansible-playbook with slack module produces an error in callback plugin ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME slack.CallbackModule ##### ANSIBLE VERSION <!--- Paste, BELOW THIS COMMENT, verbatim output from "ansible --version" between quotes below --> ``` ansible 2.6.1 config file = /Users/mikejames/GitHub/ConfigurationManagement/ansible/ansible.cfg configured module search path = [u'/Users/mikejames/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules'] ansible python module location = /usr/local/Cellar/ansible/2.6.1/libexec/lib/python2.7/site-packages/ansible executable location = /usr/local/bin/ansible python version = 2.7.15 (default, Jun 17 2018, 12:46:58) [GCC 4.2.1 Compatible Apple LLVM 9.1.0 (clang-902.0.39.2)] ``` ##### CONFIGURATION <!--- If using Ansible 2.4 or above, paste, BELOW THIS COMMENT, the results of "ansible-config dump --only-changed" Otherwise, mention any settings you have changed/added/removed in ansible.cfg (or using the ANSIBLE_* environment variables).--> ``` DEFAULT_CALLBACK_PLUGIN_PATH(/Users/mikejames/GitHub/ConfigurationManagement/ansible/ansible.cfg) = [u'/Users/mikejames/GitHub/ConfigurationManagement/ansible/playbooks/callback_plugins'] DEFAULT_CALLBACK_WHITELIST(/Users/mikejames/GitHub/ConfigurationManagement/ansible/ansible.cfg) = ['profile_tasks', 'timer', 'slack'] DEFAULT_HOST_LIST(/Users/mikejames/GitHub/ConfigurationManagement/ansible/ansible.cfg) = [u'/Users/mikejames/GitHub/ConfigurationManagement/ansible/inventory'] DEFAULT_LOG_PATH(/Users/mikejames/GitHub/ConfigurationManagement/ansible/ansible.cfg) = /Users/mikejames/GitHub/ConfigurationManagement/ansible/logs/ansible.log DEFAULT_ROLES_PATH(/Users/mikejames/GitHub/ConfigurationManagement/ansible/ansible.cfg) = [u'/Users/mikejames/GitHub/ConfigurationManagement/ansible/playbooks/roles'] HOST_KEY_CHECKING(/Users/mikejames/GitHub/ConfigurationManagement/ansible/ansible.cfg) = False ``` ##### OS / ENVIRONMENT <!--- Mention, BELOW THIS COMMENT, the OS you are running Ansible from, and the OS you are managing, or say "N/A" for anything that is not platform-specific. Also mention the specific version of what you are trying to control, e.g. if this is a network bug the version of firmware on the network device.--> macOS High Sierra 10.13.6 ##### STEPS TO REPRODUCE <!--- For bugs, show exactly how to reproduce the problem, using a minimal test-case. For new features, show how the feature would be used. --> https://gist.github.com/tightly-clutched/05a40814d3271b51a6530163e3209299 <!--- You can also paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- What did you expect to happen when running the steps above? --> I expected a result to be posted to slack channel ##### ACTUAL RESULTS <!--- What actually happened? If possible run with extra verbosity (-vvvv) --> playbook execution failed <!--- Paste verbatim command output between quotes below --> https://gist.github.com/tightly-clutched/05a40814d3271b51a6530163e3209299 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `lib/ansible/plugins/callback/slack.py` Content: ``` 1 # (C) 2014-2015, Matt Martz <[email protected]> 2 # (C) 2017 Ansible Project 3 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) 4 5 # Make coding more python3-ish 6 from __future__ import (absolute_import, division, print_function) 7 __metaclass__ = type 8 9 DOCUMENTATION = ''' 10 callback: slack 11 callback_type: notification 12 requirements: 13 - whitelist in configuration 14 - prettytable (python library) 15 short_description: Sends play events to a Slack channel 16 version_added: "2.1" 17 description: 18 - This is an ansible callback plugin that sends status updates to a Slack channel during playbook execution. 19 - Before 2.4 only environment variables were available for configuring this plugin 20 options: 21 webhook_url: 22 required: True 23 description: Slack Webhook URL 24 env: 25 - name: SLACK_WEBHOOK_URL 26 ini: 27 - section: callback_slack 28 key: webhook_url 29 channel: 30 default: "#ansible" 31 description: Slack room to post in. 32 env: 33 - name: SLACK_CHANNEL 34 ini: 35 - section: callback_slack 36 key: channel 37 username: 38 description: Username to post as. 39 env: 40 - name: SLACK_USERNAME 41 default: ansible 42 ini: 43 - section: callback_slack 44 key: username 45 ''' 46 47 import json 48 import os 49 import uuid 50 51 try: 52 from __main__ import cli 53 except ImportError: 54 cli = None 55 56 from ansible.module_utils.urls import open_url 57 from ansible.plugins.callback import CallbackBase 58 59 try: 60 import prettytable 61 HAS_PRETTYTABLE = True 62 except ImportError: 63 HAS_PRETTYTABLE = False 64 65 66 class CallbackModule(CallbackBase): 67 """This is an ansible callback plugin that sends status 68 updates to a Slack channel during playbook execution. 69 """ 70 CALLBACK_VERSION = 2.0 71 CALLBACK_TYPE = 'notification' 72 CALLBACK_NAME = 'slack' 73 CALLBACK_NEEDS_WHITELIST = True 74 75 def __init__(self, display=None): 76 77 super(CallbackModule, self).__init__(display=display) 78 79 if not HAS_PRETTYTABLE: 80 self.disabled = True 81 self._display.warning('The `prettytable` python module is not ' 82 'installed. Disabling the Slack callback ' 83 'plugin.') 84 85 self.playbook_name = None 86 87 # This is a 6 character identifier provided with each message 88 # This makes it easier to correlate messages when there are more 89 # than 1 simultaneous playbooks running 90 self.guid = uuid.uuid4().hex[:6] 91 92 def set_options(self, task_keys=None, var_options=None, direct=None): 93 94 super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct) 95 96 self.webhook_url = self.get_option('webhook_url') 97 self.channel = self.get_option('channel') 98 self.username = self.get_option('username') 99 self.show_invocation = (self._display.verbosity > 1) 100 101 if self.webhook_url is None: 102 self.disabled = True 103 self._display.warning('Slack Webhook URL was not provided. The ' 104 'Slack Webhook URL can be provided using ' 105 'the `SLACK_WEBHOOK_URL` environment ' 106 'variable.') 107 108 def send_msg(self, attachments): 109 payload = { 110 'channel': self.channel, 111 'username': self.username, 112 'attachments': attachments, 113 'parse': 'none', 114 'icon_url': ('http://cdn2.hubspot.net/hub/330046/' 115 'file-449187601-png/ansible_badge.png'), 116 } 117 118 data = json.dumps(payload) 119 self._display.debug(data) 120 self._display.debug(self.webhook_url) 121 try: 122 response = open_url(self.webhook_url, data=data) 123 return response.read() 124 except Exception as e: 125 self._display.warning('Could not submit message to Slack: %s' % 126 str(e)) 127 128 def v2_playbook_on_start(self, playbook): 129 self.playbook_name = os.path.basename(playbook._file_name) 130 131 title = [ 132 '*Playbook initiated* (_%s_)' % self.guid 133 ] 134 invocation_items = [] 135 if self._plugin_options and self.show_invocation: 136 tags = self.get_option('tags') 137 skip_tags = self.get_option('skip_tags') 138 extra_vars = self.get_option('extra_vars') 139 subset = self.get_option('subset') 140 inventory = os.path.basename( 141 os.path.realpath(self.get_option('inventory')) 142 ) 143 144 invocation_items.append('Inventory: %s' % inventory) 145 if tags and tags != 'all': 146 invocation_items.append('Tags: %s' % tags) 147 if skip_tags: 148 invocation_items.append('Skip Tags: %s' % skip_tags) 149 if subset: 150 invocation_items.append('Limit: %s' % subset) 151 if extra_vars: 152 invocation_items.append('Extra Vars: %s' % 153 ' '.join(extra_vars)) 154 155 title.append('by *%s*' % self.get_option('remote_user')) 156 157 title.append('\n\n*%s*' % self.playbook_name) 158 msg_items = [' '.join(title)] 159 if invocation_items: 160 msg_items.append('```\n%s\n```' % '\n'.join(invocation_items)) 161 162 msg = '\n'.join(msg_items) 163 164 attachments = [{ 165 'fallback': msg, 166 'fields': [ 167 { 168 'value': msg 169 } 170 ], 171 'color': 'warning', 172 'mrkdwn_in': ['text', 'fallback', 'fields'], 173 }] 174 175 self.send_msg(attachments=attachments) 176 177 def v2_playbook_on_play_start(self, play): 178 """Display Play start messages""" 179 180 name = play.name or 'Play name not specified (%s)' % play._uuid 181 msg = '*Starting play* (_%s_)\n\n*%s*' % (self.guid, name) 182 attachments = [ 183 { 184 'fallback': msg, 185 'text': msg, 186 'color': 'warning', 187 'mrkdwn_in': ['text', 'fallback', 'fields'], 188 } 189 ] 190 self.send_msg(attachments=attachments) 191 192 def v2_playbook_on_stats(self, stats): 193 """Display info about playbook statistics""" 194 195 hosts = sorted(stats.processed.keys()) 196 197 t = prettytable.PrettyTable(['Host', 'Ok', 'Changed', 'Unreachable', 198 'Failures']) 199 200 failures = False 201 unreachable = False 202 203 for h in hosts: 204 s = stats.summarize(h) 205 206 if s['failures'] > 0: 207 failures = True 208 if s['unreachable'] > 0: 209 unreachable = True 210 211 t.add_row([h] + [s[k] for k in ['ok', 'changed', 'unreachable', 212 'failures']]) 213 214 attachments = [] 215 msg_items = [ 216 '*Playbook Complete* (_%s_)' % self.guid 217 ] 218 if failures or unreachable: 219 color = 'danger' 220 msg_items.append('\n*Failed!*') 221 else: 222 color = 'good' 223 msg_items.append('\n*Success!*') 224 225 msg_items.append('```\n%s\n```' % t) 226 227 msg = '\n'.join(msg_items) 228 229 attachments.append({ 230 'fallback': msg, 231 'fields': [ 232 { 233 'value': msg 234 } 235 ], 236 'color': color, 237 'mrkdwn_in': ['text', 'fallback', 'fields'] 238 }) 239 240 self.send_msg(attachments=attachments) 241 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/lib/ansible/plugins/callback/slack.py b/lib/ansible/plugins/callback/slack.py --- a/lib/ansible/plugins/callback/slack.py +++ b/lib/ansible/plugins/callback/slack.py @@ -76,6 +76,8 @@ super(CallbackModule, self).__init__(display=display) + self._options = cli.options + if not HAS_PRETTYTABLE: self.disabled = True self._display.warning('The `prettytable` python module is not ' @@ -132,27 +134,25 @@ '*Playbook initiated* (_%s_)' % self.guid ] invocation_items = [] - if self._plugin_options and self.show_invocation: - tags = self.get_option('tags') - skip_tags = self.get_option('skip_tags') - extra_vars = self.get_option('extra_vars') - subset = self.get_option('subset') - inventory = os.path.basename( - os.path.realpath(self.get_option('inventory')) - ) - - invocation_items.append('Inventory: %s' % inventory) - if tags and tags != 'all': - invocation_items.append('Tags: %s' % tags) + if self._options and self.show_invocation: + tags = self._options.tags + skip_tags = self._options.skip_tags + extra_vars = self._options.extra_vars + subset = self._options.subset + inventory = [os.path.abspath(i) for i in self._options.inventory] + + invocation_items.append('Inventory: %s' % ', '.join(inventory)) + if tags and tags != ['all']: + invocation_items.append('Tags: %s' % ', '.join(tags)) if skip_tags: - invocation_items.append('Skip Tags: %s' % skip_tags) + invocation_items.append('Skip Tags: %s' % ', '.join(skip_tags)) if subset: invocation_items.append('Limit: %s' % subset) if extra_vars: invocation_items.append('Extra Vars: %s' % ' '.join(extra_vars)) - title.append('by *%s*' % self.get_option('remote_user')) + title.append('by *%s*' % self._options.remote_user) title.append('\n\n*%s*' % self.playbook_name) msg_items = [' '.join(title)]
{"golden_diff": "diff --git a/lib/ansible/plugins/callback/slack.py b/lib/ansible/plugins/callback/slack.py\n--- a/lib/ansible/plugins/callback/slack.py\n+++ b/lib/ansible/plugins/callback/slack.py\n@@ -76,6 +76,8 @@\n \n super(CallbackModule, self).__init__(display=display)\n \n+ self._options = cli.options\n+\n if not HAS_PRETTYTABLE:\n self.disabled = True\n self._display.warning('The `prettytable` python module is not '\n@@ -132,27 +134,25 @@\n '*Playbook initiated* (_%s_)' % self.guid\n ]\n invocation_items = []\n- if self._plugin_options and self.show_invocation:\n- tags = self.get_option('tags')\n- skip_tags = self.get_option('skip_tags')\n- extra_vars = self.get_option('extra_vars')\n- subset = self.get_option('subset')\n- inventory = os.path.basename(\n- os.path.realpath(self.get_option('inventory'))\n- )\n-\n- invocation_items.append('Inventory: %s' % inventory)\n- if tags and tags != 'all':\n- invocation_items.append('Tags: %s' % tags)\n+ if self._options and self.show_invocation:\n+ tags = self._options.tags\n+ skip_tags = self._options.skip_tags\n+ extra_vars = self._options.extra_vars\n+ subset = self._options.subset\n+ inventory = [os.path.abspath(i) for i in self._options.inventory]\n+\n+ invocation_items.append('Inventory: %s' % ', '.join(inventory))\n+ if tags and tags != ['all']:\n+ invocation_items.append('Tags: %s' % ', '.join(tags))\n if skip_tags:\n- invocation_items.append('Skip Tags: %s' % skip_tags)\n+ invocation_items.append('Skip Tags: %s' % ', '.join(skip_tags))\n if subset:\n invocation_items.append('Limit: %s' % subset)\n if extra_vars:\n invocation_items.append('Extra Vars: %s' %\n ' '.join(extra_vars))\n \n- title.append('by *%s*' % self.get_option('remote_user'))\n+ title.append('by *%s*' % self._options.remote_user)\n \n title.append('\\n\\n*%s*' % self.playbook_name)\n msg_items = [' '.join(title)]\n", "issue": "'slack' callback plugin not working\n<!---\r\nVerify first that your issue/request is not already reported on GitHub.\r\nTHIS FORM WILL BE READ BY A MACHINE, COMPLETE ALL SECTIONS AS DESCRIBED.\r\nAlso test if the latest release, and devel branch are affected too.\r\nALWAYS add information AFTER (OUTSIDE) these html comments.\r\nOtherwise it may end up being automatically closed by our bot. -->\r\n\r\n##### SUMMARY \r\nansible-playbook with slack module produces an error in callback plugin\r\n\r\n##### ISSUE TYPE\r\n - Bug Report\r\n\r\n##### COMPONENT NAME\r\nslack.CallbackModule\r\n\r\n##### ANSIBLE VERSION\r\n<!--- Paste, BELOW THIS COMMENT, verbatim output from \"ansible --version\" between quotes below -->\r\n```\r\nansible 2.6.1\r\n config file = /Users/mikejames/GitHub/ConfigurationManagement/ansible/ansible.cfg\r\n configured module search path = [u'/Users/mikejames/.ansible/plugins/modules', u'/usr/share/ansible/plugins/modules']\r\n ansible python module location = /usr/local/Cellar/ansible/2.6.1/libexec/lib/python2.7/site-packages/ansible\r\n executable location = /usr/local/bin/ansible\r\n python version = 2.7.15 (default, Jun 17 2018, 12:46:58) [GCC 4.2.1 Compatible Apple LLVM 9.1.0 (clang-902.0.39.2)]\r\n```\r\n\r\n##### CONFIGURATION\r\n<!--- If using Ansible 2.4 or above, paste, BELOW THIS COMMENT, the results of \"ansible-config dump --only-changed\"\r\nOtherwise, mention any settings you have changed/added/removed in ansible.cfg\r\n(or using the ANSIBLE_* environment variables).-->\r\n```\r\nDEFAULT_CALLBACK_PLUGIN_PATH(/Users/mikejames/GitHub/ConfigurationManagement/ansible/ansible.cfg) = [u'/Users/mikejames/GitHub/ConfigurationManagement/ansible/playbooks/callback_plugins']\r\nDEFAULT_CALLBACK_WHITELIST(/Users/mikejames/GitHub/ConfigurationManagement/ansible/ansible.cfg) = ['profile_tasks', 'timer', 'slack']\r\nDEFAULT_HOST_LIST(/Users/mikejames/GitHub/ConfigurationManagement/ansible/ansible.cfg) = [u'/Users/mikejames/GitHub/ConfigurationManagement/ansible/inventory']\r\nDEFAULT_LOG_PATH(/Users/mikejames/GitHub/ConfigurationManagement/ansible/ansible.cfg) = /Users/mikejames/GitHub/ConfigurationManagement/ansible/logs/ansible.log\r\nDEFAULT_ROLES_PATH(/Users/mikejames/GitHub/ConfigurationManagement/ansible/ansible.cfg) = [u'/Users/mikejames/GitHub/ConfigurationManagement/ansible/playbooks/roles']\r\nHOST_KEY_CHECKING(/Users/mikejames/GitHub/ConfigurationManagement/ansible/ansible.cfg) = False\r\n```\r\n\r\n##### OS / ENVIRONMENT\r\n<!--- Mention, BELOW THIS COMMENT, the OS you are running Ansible from, and the OS you are\r\nmanaging, or say \"N/A\" for anything that is not platform-specific.\r\nAlso mention the specific version of what you are trying to control,\r\ne.g. if this is a network bug the version of firmware on the network device.-->\r\nmacOS High Sierra 10.13.6\r\n\r\n##### STEPS TO REPRODUCE\r\n<!--- For bugs, show exactly how to reproduce the problem, using a minimal test-case.\r\nFor new features, show how the feature would be used. -->\r\n\r\nhttps://gist.github.com/tightly-clutched/05a40814d3271b51a6530163e3209299\r\n<!--- You can also paste gist.github.com links for larger files -->\r\n\r\n##### EXPECTED RESULTS\r\n<!--- What did you expect to happen when running the steps above? -->\r\nI expected a result to be posted to slack channel\r\n\r\n##### ACTUAL RESULTS\r\n<!--- What actually happened? If possible run with extra verbosity (-vvvv) -->\r\nplaybook execution failed \r\n\r\n<!--- Paste verbatim command output between quotes below -->\r\nhttps://gist.github.com/tightly-clutched/05a40814d3271b51a6530163e3209299\n", "before_files": [{"content": "# (C) 2014-2015, Matt Martz <[email protected]>\n# (C) 2017 Ansible Project\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\n# Make coding more python3-ish\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nDOCUMENTATION = '''\n callback: slack\n callback_type: notification\n requirements:\n - whitelist in configuration\n - prettytable (python library)\n short_description: Sends play events to a Slack channel\n version_added: \"2.1\"\n description:\n - This is an ansible callback plugin that sends status updates to a Slack channel during playbook execution.\n - Before 2.4 only environment variables were available for configuring this plugin\n options:\n webhook_url:\n required: True\n description: Slack Webhook URL\n env:\n - name: SLACK_WEBHOOK_URL\n ini:\n - section: callback_slack\n key: webhook_url\n channel:\n default: \"#ansible\"\n description: Slack room to post in.\n env:\n - name: SLACK_CHANNEL\n ini:\n - section: callback_slack\n key: channel\n username:\n description: Username to post as.\n env:\n - name: SLACK_USERNAME\n default: ansible\n ini:\n - section: callback_slack\n key: username\n'''\n\nimport json\nimport os\nimport uuid\n\ntry:\n from __main__ import cli\nexcept ImportError:\n cli = None\n\nfrom ansible.module_utils.urls import open_url\nfrom ansible.plugins.callback import CallbackBase\n\ntry:\n import prettytable\n HAS_PRETTYTABLE = True\nexcept ImportError:\n HAS_PRETTYTABLE = False\n\n\nclass CallbackModule(CallbackBase):\n \"\"\"This is an ansible callback plugin that sends status\n updates to a Slack channel during playbook execution.\n \"\"\"\n CALLBACK_VERSION = 2.0\n CALLBACK_TYPE = 'notification'\n CALLBACK_NAME = 'slack'\n CALLBACK_NEEDS_WHITELIST = True\n\n def __init__(self, display=None):\n\n super(CallbackModule, self).__init__(display=display)\n\n if not HAS_PRETTYTABLE:\n self.disabled = True\n self._display.warning('The `prettytable` python module is not '\n 'installed. Disabling the Slack callback '\n 'plugin.')\n\n self.playbook_name = None\n\n # This is a 6 character identifier provided with each message\n # This makes it easier to correlate messages when there are more\n # than 1 simultaneous playbooks running\n self.guid = uuid.uuid4().hex[:6]\n\n def set_options(self, task_keys=None, var_options=None, direct=None):\n\n super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)\n\n self.webhook_url = self.get_option('webhook_url')\n self.channel = self.get_option('channel')\n self.username = self.get_option('username')\n self.show_invocation = (self._display.verbosity > 1)\n\n if self.webhook_url is None:\n self.disabled = True\n self._display.warning('Slack Webhook URL was not provided. The '\n 'Slack Webhook URL can be provided using '\n 'the `SLACK_WEBHOOK_URL` environment '\n 'variable.')\n\n def send_msg(self, attachments):\n payload = {\n 'channel': self.channel,\n 'username': self.username,\n 'attachments': attachments,\n 'parse': 'none',\n 'icon_url': ('http://cdn2.hubspot.net/hub/330046/'\n 'file-449187601-png/ansible_badge.png'),\n }\n\n data = json.dumps(payload)\n self._display.debug(data)\n self._display.debug(self.webhook_url)\n try:\n response = open_url(self.webhook_url, data=data)\n return response.read()\n except Exception as e:\n self._display.warning('Could not submit message to Slack: %s' %\n str(e))\n\n def v2_playbook_on_start(self, playbook):\n self.playbook_name = os.path.basename(playbook._file_name)\n\n title = [\n '*Playbook initiated* (_%s_)' % self.guid\n ]\n invocation_items = []\n if self._plugin_options and self.show_invocation:\n tags = self.get_option('tags')\n skip_tags = self.get_option('skip_tags')\n extra_vars = self.get_option('extra_vars')\n subset = self.get_option('subset')\n inventory = os.path.basename(\n os.path.realpath(self.get_option('inventory'))\n )\n\n invocation_items.append('Inventory: %s' % inventory)\n if tags and tags != 'all':\n invocation_items.append('Tags: %s' % tags)\n if skip_tags:\n invocation_items.append('Skip Tags: %s' % skip_tags)\n if subset:\n invocation_items.append('Limit: %s' % subset)\n if extra_vars:\n invocation_items.append('Extra Vars: %s' %\n ' '.join(extra_vars))\n\n title.append('by *%s*' % self.get_option('remote_user'))\n\n title.append('\\n\\n*%s*' % self.playbook_name)\n msg_items = [' '.join(title)]\n if invocation_items:\n msg_items.append('```\\n%s\\n```' % '\\n'.join(invocation_items))\n\n msg = '\\n'.join(msg_items)\n\n attachments = [{\n 'fallback': msg,\n 'fields': [\n {\n 'value': msg\n }\n ],\n 'color': 'warning',\n 'mrkdwn_in': ['text', 'fallback', 'fields'],\n }]\n\n self.send_msg(attachments=attachments)\n\n def v2_playbook_on_play_start(self, play):\n \"\"\"Display Play start messages\"\"\"\n\n name = play.name or 'Play name not specified (%s)' % play._uuid\n msg = '*Starting play* (_%s_)\\n\\n*%s*' % (self.guid, name)\n attachments = [\n {\n 'fallback': msg,\n 'text': msg,\n 'color': 'warning',\n 'mrkdwn_in': ['text', 'fallback', 'fields'],\n }\n ]\n self.send_msg(attachments=attachments)\n\n def v2_playbook_on_stats(self, stats):\n \"\"\"Display info about playbook statistics\"\"\"\n\n hosts = sorted(stats.processed.keys())\n\n t = prettytable.PrettyTable(['Host', 'Ok', 'Changed', 'Unreachable',\n 'Failures'])\n\n failures = False\n unreachable = False\n\n for h in hosts:\n s = stats.summarize(h)\n\n if s['failures'] > 0:\n failures = True\n if s['unreachable'] > 0:\n unreachable = True\n\n t.add_row([h] + [s[k] for k in ['ok', 'changed', 'unreachable',\n 'failures']])\n\n attachments = []\n msg_items = [\n '*Playbook Complete* (_%s_)' % self.guid\n ]\n if failures or unreachable:\n color = 'danger'\n msg_items.append('\\n*Failed!*')\n else:\n color = 'good'\n msg_items.append('\\n*Success!*')\n\n msg_items.append('```\\n%s\\n```' % t)\n\n msg = '\\n'.join(msg_items)\n\n attachments.append({\n 'fallback': msg,\n 'fields': [\n {\n 'value': msg\n }\n ],\n 'color': color,\n 'mrkdwn_in': ['text', 'fallback', 'fields']\n })\n\n self.send_msg(attachments=attachments)\n", "path": "lib/ansible/plugins/callback/slack.py"}], "after_files": [{"content": "# (C) 2014-2015, Matt Martz <[email protected]>\n# (C) 2017 Ansible Project\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\n# Make coding more python3-ish\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nDOCUMENTATION = '''\n callback: slack\n callback_type: notification\n requirements:\n - whitelist in configuration\n - prettytable (python library)\n short_description: Sends play events to a Slack channel\n version_added: \"2.1\"\n description:\n - This is an ansible callback plugin that sends status updates to a Slack channel during playbook execution.\n - Before 2.4 only environment variables were available for configuring this plugin\n options:\n webhook_url:\n required: True\n description: Slack Webhook URL\n env:\n - name: SLACK_WEBHOOK_URL\n ini:\n - section: callback_slack\n key: webhook_url\n channel:\n default: \"#ansible\"\n description: Slack room to post in.\n env:\n - name: SLACK_CHANNEL\n ini:\n - section: callback_slack\n key: channel\n username:\n description: Username to post as.\n env:\n - name: SLACK_USERNAME\n default: ansible\n ini:\n - section: callback_slack\n key: username\n'''\n\nimport json\nimport os\nimport uuid\n\ntry:\n from __main__ import cli\nexcept ImportError:\n cli = None\n\nfrom ansible.module_utils.urls import open_url\nfrom ansible.plugins.callback import CallbackBase\n\ntry:\n import prettytable\n HAS_PRETTYTABLE = True\nexcept ImportError:\n HAS_PRETTYTABLE = False\n\n\nclass CallbackModule(CallbackBase):\n \"\"\"This is an ansible callback plugin that sends status\n updates to a Slack channel during playbook execution.\n \"\"\"\n CALLBACK_VERSION = 2.0\n CALLBACK_TYPE = 'notification'\n CALLBACK_NAME = 'slack'\n CALLBACK_NEEDS_WHITELIST = True\n\n def __init__(self, display=None):\n\n super(CallbackModule, self).__init__(display=display)\n\n self._options = cli.options\n\n if not HAS_PRETTYTABLE:\n self.disabled = True\n self._display.warning('The `prettytable` python module is not '\n 'installed. Disabling the Slack callback '\n 'plugin.')\n\n self.playbook_name = None\n\n # This is a 6 character identifier provided with each message\n # This makes it easier to correlate messages when there are more\n # than 1 simultaneous playbooks running\n self.guid = uuid.uuid4().hex[:6]\n\n def set_options(self, task_keys=None, var_options=None, direct=None):\n\n super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)\n\n self.webhook_url = self.get_option('webhook_url')\n self.channel = self.get_option('channel')\n self.username = self.get_option('username')\n self.show_invocation = (self._display.verbosity > 1)\n\n if self.webhook_url is None:\n self.disabled = True\n self._display.warning('Slack Webhook URL was not provided. The '\n 'Slack Webhook URL can be provided using '\n 'the `SLACK_WEBHOOK_URL` environment '\n 'variable.')\n\n def send_msg(self, attachments):\n payload = {\n 'channel': self.channel,\n 'username': self.username,\n 'attachments': attachments,\n 'parse': 'none',\n 'icon_url': ('http://cdn2.hubspot.net/hub/330046/'\n 'file-449187601-png/ansible_badge.png'),\n }\n\n data = json.dumps(payload)\n self._display.debug(data)\n self._display.debug(self.webhook_url)\n try:\n response = open_url(self.webhook_url, data=data)\n return response.read()\n except Exception as e:\n self._display.warning('Could not submit message to Slack: %s' %\n str(e))\n\n def v2_playbook_on_start(self, playbook):\n self.playbook_name = os.path.basename(playbook._file_name)\n\n title = [\n '*Playbook initiated* (_%s_)' % self.guid\n ]\n invocation_items = []\n if self._options and self.show_invocation:\n tags = self._options.tags\n skip_tags = self._options.skip_tags\n extra_vars = self._options.extra_vars\n subset = self._options.subset\n inventory = [os.path.abspath(i) for i in self._options.inventory]\n\n invocation_items.append('Inventory: %s' % ', '.join(inventory))\n if tags and tags != ['all']:\n invocation_items.append('Tags: %s' % ', '.join(tags))\n if skip_tags:\n invocation_items.append('Skip Tags: %s' % ', '.join(skip_tags))\n if subset:\n invocation_items.append('Limit: %s' % subset)\n if extra_vars:\n invocation_items.append('Extra Vars: %s' %\n ' '.join(extra_vars))\n\n title.append('by *%s*' % self._options.remote_user)\n\n title.append('\\n\\n*%s*' % self.playbook_name)\n msg_items = [' '.join(title)]\n if invocation_items:\n msg_items.append('```\\n%s\\n```' % '\\n'.join(invocation_items))\n\n msg = '\\n'.join(msg_items)\n\n attachments = [{\n 'fallback': msg,\n 'fields': [\n {\n 'value': msg\n }\n ],\n 'color': 'warning',\n 'mrkdwn_in': ['text', 'fallback', 'fields'],\n }]\n\n self.send_msg(attachments=attachments)\n\n def v2_playbook_on_play_start(self, play):\n \"\"\"Display Play start messages\"\"\"\n\n name = play.name or 'Play name not specified (%s)' % play._uuid\n msg = '*Starting play* (_%s_)\\n\\n*%s*' % (self.guid, name)\n attachments = [\n {\n 'fallback': msg,\n 'text': msg,\n 'color': 'warning',\n 'mrkdwn_in': ['text', 'fallback', 'fields'],\n }\n ]\n self.send_msg(attachments=attachments)\n\n def v2_playbook_on_stats(self, stats):\n \"\"\"Display info about playbook statistics\"\"\"\n\n hosts = sorted(stats.processed.keys())\n\n t = prettytable.PrettyTable(['Host', 'Ok', 'Changed', 'Unreachable',\n 'Failures'])\n\n failures = False\n unreachable = False\n\n for h in hosts:\n s = stats.summarize(h)\n\n if s['failures'] > 0:\n failures = True\n if s['unreachable'] > 0:\n unreachable = True\n\n t.add_row([h] + [s[k] for k in ['ok', 'changed', 'unreachable',\n 'failures']])\n\n attachments = []\n msg_items = [\n '*Playbook Complete* (_%s_)' % self.guid\n ]\n if failures or unreachable:\n color = 'danger'\n msg_items.append('\\n*Failed!*')\n else:\n color = 'good'\n msg_items.append('\\n*Success!*')\n\n msg_items.append('```\\n%s\\n```' % t)\n\n msg = '\\n'.join(msg_items)\n\n attachments.append({\n 'fallback': msg,\n 'fields': [\n {\n 'value': msg\n }\n ],\n 'color': color,\n 'mrkdwn_in': ['text', 'fallback', 'fields']\n })\n\n self.send_msg(attachments=attachments)\n", "path": "lib/ansible/plugins/callback/slack.py"}]}
3,503
536
gh_patches_debug_23512
rasdani/github-patches
git_diff
Lightning-AI__torchmetrics-173
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- MetricCollection should enforce order when passed a dict ## 🐛 Bug Not a clear bug yet, but just thinking about distributed metric computation, in here: https://github.com/PyTorchLightning/metrics/blob/53d570158a503497351ae45ec895ca44a0546068/torchmetrics/collections.py#L81 we should make sure to sort the names before the insert so that we insert things in the same order (ModuleDict is already OrderedDict otherwise). If we don't we will get deadlocks when doing distributed metric updates. Additionally, we might want to enforce sorting when passed list/tuple, but that might be more on the user end. ### To Reproduce On each of the workers, pass dictionary with same metrics but in different order, try compute and observe deadlock. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `torchmetrics/collections.py` Content: ``` 1 # Copyright The PyTorch Lightning team. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from copy import deepcopy 16 from typing import Any, Dict, List, Optional, Tuple, Union 17 18 from torch import nn 19 20 from torchmetrics.metric import Metric 21 22 23 class MetricCollection(nn.ModuleDict): 24 """ 25 MetricCollection class can be used to chain metrics that have the same 26 call pattern into one single class. 27 28 Args: 29 metrics: One of the following 30 31 * list or tuple: if metrics are passed in as a list, will use the 32 metrics class name as key for output dict. Therefore, two metrics 33 of the same class cannot be chained this way. 34 35 * dict: if metrics are passed in as a dict, will use each key in the 36 dict as key for output dict. Use this format if you want to chain 37 together multiple of the same metric with different parameters. 38 39 prefix: a string to append in front of the keys of the output dict 40 41 Raises: 42 ValueError: 43 If one of the elements of ``metrics`` is not an instance of ``pl.metrics.Metric``. 44 ValueError: 45 If two elements in ``metrics`` have the same ``name``. 46 ValueError: 47 If ``metrics`` is not a ``list``, ``tuple`` or a ``dict``. 48 49 Example (input as list): 50 >>> import torch 51 >>> from pprint import pprint 52 >>> from torchmetrics import MetricCollection, Accuracy, Precision, Recall 53 >>> target = torch.tensor([0, 2, 0, 2, 0, 1, 0, 2]) 54 >>> preds = torch.tensor([2, 1, 2, 0, 1, 2, 2, 2]) 55 >>> metrics = MetricCollection([Accuracy(), 56 ... Precision(num_classes=3, average='macro'), 57 ... Recall(num_classes=3, average='macro')]) 58 >>> metrics(preds, target) 59 {'Accuracy': tensor(0.1250), 'Precision': tensor(0.0667), 'Recall': tensor(0.1111)} 60 61 Example (input as dict): 62 >>> metrics = MetricCollection({'micro_recall': Recall(num_classes=3, average='micro'), 63 ... 'macro_recall': Recall(num_classes=3, average='macro')}) 64 >>> same_metric = metrics.clone() 65 >>> pprint(metrics(preds, target)) 66 {'macro_recall': tensor(0.1111), 'micro_recall': tensor(0.1250)} 67 >>> pprint(same_metric(preds, target)) 68 {'macro_recall': tensor(0.1111), 'micro_recall': tensor(0.1250)} 69 >>> metrics.persistent() 70 71 """ 72 73 def __init__( 74 self, 75 metrics: Union[List[Metric], Tuple[Metric], Dict[str, Metric]], 76 prefix: Optional[str] = None, 77 ): 78 super().__init__() 79 if isinstance(metrics, dict): 80 # Check all values are metrics 81 for name, metric in metrics.items(): 82 if not isinstance(metric, Metric): 83 raise ValueError( 84 f"Value {metric} belonging to key {name}" 85 " is not an instance of `pl.metrics.Metric`" 86 ) 87 self[name] = metric 88 elif isinstance(metrics, (tuple, list)): 89 for metric in metrics: 90 if not isinstance(metric, Metric): 91 raise ValueError( 92 f"Input {metric} to `MetricCollection` is not a instance" 93 " of `pl.metrics.Metric`" 94 ) 95 name = metric.__class__.__name__ 96 if name in self: 97 raise ValueError(f"Encountered two metrics both named {name}") 98 self[name] = metric 99 else: 100 raise ValueError("Unknown input to MetricCollection.") 101 102 self.prefix = self._check_prefix_arg(prefix) 103 104 def forward(self, *args, **kwargs) -> Dict[str, Any]: # pylint: disable=E0202 105 """ 106 Iteratively call forward for each metric. Positional arguments (args) will 107 be passed to every metric in the collection, while keyword arguments (kwargs) 108 will be filtered based on the signature of the individual metric. 109 """ 110 return {self._set_prefix(k): m(*args, **m._filter_kwargs(**kwargs)) for k, m in self.items()} 111 112 def update(self, *args, **kwargs): # pylint: disable=E0202 113 """ 114 Iteratively call update for each metric. Positional arguments (args) will 115 be passed to every metric in the collection, while keyword arguments (kwargs) 116 will be filtered based on the signature of the individual metric. 117 """ 118 for _, m in self.items(): 119 m_kwargs = m._filter_kwargs(**kwargs) 120 m.update(*args, **m_kwargs) 121 122 def compute(self) -> Dict[str, Any]: 123 return {self._set_prefix(k): m.compute() for k, m in self.items()} 124 125 def reset(self) -> None: 126 """ Iteratively call reset for each metric """ 127 for _, m in self.items(): 128 m.reset() 129 130 def clone(self, prefix: Optional[str] = None) -> 'MetricCollection': 131 """ Make a copy of the metric collection 132 Args: 133 prefix: a string to append in front of the metric keys 134 """ 135 mc = deepcopy(self) 136 mc.prefix = self._check_prefix_arg(prefix) 137 return mc 138 139 def persistent(self, mode: bool = True) -> None: 140 """Method for post-init to change if metric states should be saved to 141 its state_dict 142 """ 143 for _, m in self.items(): 144 m.persistent(mode) 145 146 def _set_prefix(self, k: str) -> str: 147 return k if self.prefix is None else self.prefix + k 148 149 @staticmethod 150 def _check_prefix_arg(prefix: str) -> Optional[str]: 151 if prefix is not None: 152 if isinstance(prefix, str): 153 return prefix 154 else: 155 raise ValueError('Expected input `prefix` to be a string') 156 return None 157 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/torchmetrics/collections.py b/torchmetrics/collections.py --- a/torchmetrics/collections.py +++ b/torchmetrics/collections.py @@ -35,6 +35,7 @@ * dict: if metrics are passed in as a dict, will use each key in the dict as key for output dict. Use this format if you want to chain together multiple of the same metric with different parameters. + Note that the keys in the output dict will be sorted alphabetically. prefix: a string to append in front of the keys of the output dict @@ -78,7 +79,9 @@ super().__init__() if isinstance(metrics, dict): # Check all values are metrics - for name, metric in metrics.items(): + # Make sure that metrics are added in deterministic order + for name in sorted(metrics.keys()): + metric = metrics[name] if not isinstance(metric, Metric): raise ValueError( f"Value {metric} belonging to key {name}"
{"golden_diff": "diff --git a/torchmetrics/collections.py b/torchmetrics/collections.py\n--- a/torchmetrics/collections.py\n+++ b/torchmetrics/collections.py\n@@ -35,6 +35,7 @@\n * dict: if metrics are passed in as a dict, will use each key in the\n dict as key for output dict. Use this format if you want to chain\n together multiple of the same metric with different parameters.\n+ Note that the keys in the output dict will be sorted alphabetically.\n \n prefix: a string to append in front of the keys of the output dict\n \n@@ -78,7 +79,9 @@\n super().__init__()\n if isinstance(metrics, dict):\n # Check all values are metrics\n- for name, metric in metrics.items():\n+ # Make sure that metrics are added in deterministic order\n+ for name in sorted(metrics.keys()):\n+ metric = metrics[name]\n if not isinstance(metric, Metric):\n raise ValueError(\n f\"Value {metric} belonging to key {name}\"\n", "issue": "MetricCollection should enforce order when passed a dict\n## \ud83d\udc1b Bug\r\n\r\nNot a clear bug yet, but just thinking about distributed metric computation, in here: https://github.com/PyTorchLightning/metrics/blob/53d570158a503497351ae45ec895ca44a0546068/torchmetrics/collections.py#L81\r\nwe should make sure to sort the names before the insert so that we insert things in the same order (ModuleDict is already OrderedDict otherwise). If we don't we will get deadlocks when doing distributed metric updates.\r\n\r\nAdditionally, we might want to enforce sorting when passed list/tuple, but that might be more on the user end.\r\n\r\n\r\n### To Reproduce\r\n\r\nOn each of the workers, pass dictionary with same metrics but in different order, try compute and observe deadlock.\r\n\r\n\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom copy import deepcopy\nfrom typing import Any, Dict, List, Optional, Tuple, Union\n\nfrom torch import nn\n\nfrom torchmetrics.metric import Metric\n\n\nclass MetricCollection(nn.ModuleDict):\n \"\"\"\n MetricCollection class can be used to chain metrics that have the same\n call pattern into one single class.\n\n Args:\n metrics: One of the following\n\n * list or tuple: if metrics are passed in as a list, will use the\n metrics class name as key for output dict. Therefore, two metrics\n of the same class cannot be chained this way.\n\n * dict: if metrics are passed in as a dict, will use each key in the\n dict as key for output dict. Use this format if you want to chain\n together multiple of the same metric with different parameters.\n\n prefix: a string to append in front of the keys of the output dict\n\n Raises:\n ValueError:\n If one of the elements of ``metrics`` is not an instance of ``pl.metrics.Metric``.\n ValueError:\n If two elements in ``metrics`` have the same ``name``.\n ValueError:\n If ``metrics`` is not a ``list``, ``tuple`` or a ``dict``.\n\n Example (input as list):\n >>> import torch\n >>> from pprint import pprint\n >>> from torchmetrics import MetricCollection, Accuracy, Precision, Recall\n >>> target = torch.tensor([0, 2, 0, 2, 0, 1, 0, 2])\n >>> preds = torch.tensor([2, 1, 2, 0, 1, 2, 2, 2])\n >>> metrics = MetricCollection([Accuracy(),\n ... Precision(num_classes=3, average='macro'),\n ... Recall(num_classes=3, average='macro')])\n >>> metrics(preds, target)\n {'Accuracy': tensor(0.1250), 'Precision': tensor(0.0667), 'Recall': tensor(0.1111)}\n\n Example (input as dict):\n >>> metrics = MetricCollection({'micro_recall': Recall(num_classes=3, average='micro'),\n ... 'macro_recall': Recall(num_classes=3, average='macro')})\n >>> same_metric = metrics.clone()\n >>> pprint(metrics(preds, target))\n {'macro_recall': tensor(0.1111), 'micro_recall': tensor(0.1250)}\n >>> pprint(same_metric(preds, target))\n {'macro_recall': tensor(0.1111), 'micro_recall': tensor(0.1250)}\n >>> metrics.persistent()\n\n \"\"\"\n\n def __init__(\n self,\n metrics: Union[List[Metric], Tuple[Metric], Dict[str, Metric]],\n prefix: Optional[str] = None,\n ):\n super().__init__()\n if isinstance(metrics, dict):\n # Check all values are metrics\n for name, metric in metrics.items():\n if not isinstance(metric, Metric):\n raise ValueError(\n f\"Value {metric} belonging to key {name}\"\n \" is not an instance of `pl.metrics.Metric`\"\n )\n self[name] = metric\n elif isinstance(metrics, (tuple, list)):\n for metric in metrics:\n if not isinstance(metric, Metric):\n raise ValueError(\n f\"Input {metric} to `MetricCollection` is not a instance\"\n \" of `pl.metrics.Metric`\"\n )\n name = metric.__class__.__name__\n if name in self:\n raise ValueError(f\"Encountered two metrics both named {name}\")\n self[name] = metric\n else:\n raise ValueError(\"Unknown input to MetricCollection.\")\n\n self.prefix = self._check_prefix_arg(prefix)\n\n def forward(self, *args, **kwargs) -> Dict[str, Any]: # pylint: disable=E0202\n \"\"\"\n Iteratively call forward for each metric. Positional arguments (args) will\n be passed to every metric in the collection, while keyword arguments (kwargs)\n will be filtered based on the signature of the individual metric.\n \"\"\"\n return {self._set_prefix(k): m(*args, **m._filter_kwargs(**kwargs)) for k, m in self.items()}\n\n def update(self, *args, **kwargs): # pylint: disable=E0202\n \"\"\"\n Iteratively call update for each metric. Positional arguments (args) will\n be passed to every metric in the collection, while keyword arguments (kwargs)\n will be filtered based on the signature of the individual metric.\n \"\"\"\n for _, m in self.items():\n m_kwargs = m._filter_kwargs(**kwargs)\n m.update(*args, **m_kwargs)\n\n def compute(self) -> Dict[str, Any]:\n return {self._set_prefix(k): m.compute() for k, m in self.items()}\n\n def reset(self) -> None:\n \"\"\" Iteratively call reset for each metric \"\"\"\n for _, m in self.items():\n m.reset()\n\n def clone(self, prefix: Optional[str] = None) -> 'MetricCollection':\n \"\"\" Make a copy of the metric collection\n Args:\n prefix: a string to append in front of the metric keys\n \"\"\"\n mc = deepcopy(self)\n mc.prefix = self._check_prefix_arg(prefix)\n return mc\n\n def persistent(self, mode: bool = True) -> None:\n \"\"\"Method for post-init to change if metric states should be saved to\n its state_dict\n \"\"\"\n for _, m in self.items():\n m.persistent(mode)\n\n def _set_prefix(self, k: str) -> str:\n return k if self.prefix is None else self.prefix + k\n\n @staticmethod\n def _check_prefix_arg(prefix: str) -> Optional[str]:\n if prefix is not None:\n if isinstance(prefix, str):\n return prefix\n else:\n raise ValueError('Expected input `prefix` to be a string')\n return None\n", "path": "torchmetrics/collections.py"}], "after_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom copy import deepcopy\nfrom typing import Any, Dict, List, Optional, Tuple, Union\n\nfrom torch import nn\n\nfrom torchmetrics.metric import Metric\n\n\nclass MetricCollection(nn.ModuleDict):\n \"\"\"\n MetricCollection class can be used to chain metrics that have the same\n call pattern into one single class.\n\n Args:\n metrics: One of the following\n\n * list or tuple: if metrics are passed in as a list, will use the\n metrics class name as key for output dict. Therefore, two metrics\n of the same class cannot be chained this way.\n\n * dict: if metrics are passed in as a dict, will use each key in the\n dict as key for output dict. Use this format if you want to chain\n together multiple of the same metric with different parameters.\n Note that the keys in the output dict will be sorted alphabetically.\n\n prefix: a string to append in front of the keys of the output dict\n\n Raises:\n ValueError:\n If one of the elements of ``metrics`` is not an instance of ``pl.metrics.Metric``.\n ValueError:\n If two elements in ``metrics`` have the same ``name``.\n ValueError:\n If ``metrics`` is not a ``list``, ``tuple`` or a ``dict``.\n\n Example (input as list):\n >>> import torch\n >>> from pprint import pprint\n >>> from torchmetrics import MetricCollection, Accuracy, Precision, Recall\n >>> target = torch.tensor([0, 2, 0, 2, 0, 1, 0, 2])\n >>> preds = torch.tensor([2, 1, 2, 0, 1, 2, 2, 2])\n >>> metrics = MetricCollection([Accuracy(),\n ... Precision(num_classes=3, average='macro'),\n ... Recall(num_classes=3, average='macro')])\n >>> metrics(preds, target)\n {'Accuracy': tensor(0.1250), 'Precision': tensor(0.0667), 'Recall': tensor(0.1111)}\n\n Example (input as dict):\n >>> metrics = MetricCollection({'micro_recall': Recall(num_classes=3, average='micro'),\n ... 'macro_recall': Recall(num_classes=3, average='macro')})\n >>> same_metric = metrics.clone()\n >>> pprint(metrics(preds, target))\n {'macro_recall': tensor(0.1111), 'micro_recall': tensor(0.1250)}\n >>> pprint(same_metric(preds, target))\n {'macro_recall': tensor(0.1111), 'micro_recall': tensor(0.1250)}\n >>> metrics.persistent()\n\n \"\"\"\n\n def __init__(\n self,\n metrics: Union[List[Metric], Tuple[Metric], Dict[str, Metric]],\n prefix: Optional[str] = None,\n ):\n super().__init__()\n if isinstance(metrics, dict):\n # Check all values are metrics\n # Make sure that metrics are added in deterministic order\n for name in sorted(metrics.keys()):\n metric = metrics[name]\n if not isinstance(metric, Metric):\n raise ValueError(\n f\"Value {metric} belonging to key {name}\"\n \" is not an instance of `pl.metrics.Metric`\"\n )\n self[name] = metric\n elif isinstance(metrics, (tuple, list)):\n for metric in metrics:\n if not isinstance(metric, Metric):\n raise ValueError(\n f\"Input {metric} to `MetricCollection` is not a instance\"\n \" of `pl.metrics.Metric`\"\n )\n name = metric.__class__.__name__\n if name in self:\n raise ValueError(f\"Encountered two metrics both named {name}\")\n self[name] = metric\n else:\n raise ValueError(\"Unknown input to MetricCollection.\")\n\n self.prefix = self._check_prefix_arg(prefix)\n\n def forward(self, *args, **kwargs) -> Dict[str, Any]: # pylint: disable=E0202\n \"\"\"\n Iteratively call forward for each metric. Positional arguments (args) will\n be passed to every metric in the collection, while keyword arguments (kwargs)\n will be filtered based on the signature of the individual metric.\n \"\"\"\n return {self._set_prefix(k): m(*args, **m._filter_kwargs(**kwargs)) for k, m in self.items()}\n\n def update(self, *args, **kwargs): # pylint: disable=E0202\n \"\"\"\n Iteratively call update for each metric. Positional arguments (args) will\n be passed to every metric in the collection, while keyword arguments (kwargs)\n will be filtered based on the signature of the individual metric.\n \"\"\"\n for _, m in self.items():\n m_kwargs = m._filter_kwargs(**kwargs)\n m.update(*args, **m_kwargs)\n\n def compute(self) -> Dict[str, Any]:\n return {self._set_prefix(k): m.compute() for k, m in self.items()}\n\n def reset(self) -> None:\n \"\"\" Iteratively call reset for each metric \"\"\"\n for _, m in self.items():\n m.reset()\n\n def clone(self, prefix: Optional[str] = None) -> 'MetricCollection':\n \"\"\" Make a copy of the metric collection\n Args:\n prefix: a string to append in front of the metric keys\n \"\"\"\n mc = deepcopy(self)\n mc.prefix = self._check_prefix_arg(prefix)\n return mc\n\n def persistent(self, mode: bool = True) -> None:\n \"\"\"Method for post-init to change if metric states should be saved to\n its state_dict\n \"\"\"\n for _, m in self.items():\n m.persistent(mode)\n\n def _set_prefix(self, k: str) -> str:\n return k if self.prefix is None else self.prefix + k\n\n @staticmethod\n def _check_prefix_arg(prefix: str) -> Optional[str]:\n if prefix is not None:\n if isinstance(prefix, str):\n return prefix\n else:\n raise ValueError('Expected input `prefix` to be a string')\n return None\n", "path": "torchmetrics/collections.py"}]}
2,232
226
gh_patches_debug_21464
rasdani/github-patches
git_diff
netbox-community__netbox-9547
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Provide Markdown help with links to local documentation pages ### NetBox version v3.2.4 ### Feature type New functionality ### Proposed functionality Currently netbox supports a documentation package as part of the main release due to https://github.com/netbox-community/netbox/issues/6328 I propose to change the Markdown assistance available in some text areas ( for example in comments fields) that is currently going to "https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet" to another URL as part of the offline documentation package ![image](https://user-images.githubusercontent.com/44149262/173758856-79739e48-1eb3-4173-9b09-9908fa1c92df.png) I propose that a new section in the documentation package is created, based in the github url above, and use the corresponding link within this assistance fields. The final url could be something like, if this section is placed under references. https://netboxfqdn/static/docs/reference/markdown/ ### Use case The following use cases are applicable: Provide the correct documentation that is always related to the specific version being used, instead of the online version that refers the latest version. Provide access to the documentation to system installed in a isolated management environment that do not have internet access. ### Database changes none ### External dependencies none --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `netbox/utilities/forms/fields/fields.py` Content: ``` 1 import json 2 3 from django import forms 4 from django.db.models import Count 5 from django.forms.fields import JSONField as _JSONField, InvalidJSONInput 6 from netaddr import AddrFormatError, EUI 7 8 from utilities.forms import widgets 9 from utilities.validators import EnhancedURLValidator 10 11 __all__ = ( 12 'ChoiceField', 13 'ColorField', 14 'CommentField', 15 'JSONField', 16 'LaxURLField', 17 'MACAddressField', 18 'MultipleChoiceField', 19 'SlugField', 20 'TagFilterField', 21 ) 22 23 24 class CommentField(forms.CharField): 25 """ 26 A textarea with support for Markdown rendering. Exists mostly just to add a standard `help_text`. 27 """ 28 widget = forms.Textarea 29 # TODO: Port Markdown cheat sheet to internal documentation 30 help_text = """ 31 <i class="mdi mdi-information-outline"></i> 32 <a href="https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet" target="_blank" tabindex="-1"> 33 Markdown</a> syntax is supported 34 """ 35 36 def __init__(self, *, label='', help_text=help_text, required=False, **kwargs): 37 super().__init__(label=label, help_text=help_text, required=required, **kwargs) 38 39 40 class SlugField(forms.SlugField): 41 """ 42 Extend Django's built-in SlugField to automatically populate from a field called `name` unless otherwise specified. 43 44 Parameters: 45 slug_source: Name of the form field from which the slug value will be derived 46 """ 47 widget = widgets.SlugWidget 48 help_text = "URL-friendly unique shorthand" 49 50 def __init__(self, *, slug_source='name', help_text=help_text, **kwargs): 51 super().__init__(help_text=help_text, **kwargs) 52 53 self.widget.attrs['slug-source'] = slug_source 54 55 56 class ColorField(forms.CharField): 57 """ 58 A field which represents a color value in hexadecimal `RRGGBB` format. Utilizes NetBox's `ColorSelect` widget to 59 render choices. 60 """ 61 widget = widgets.ColorSelect 62 63 64 class TagFilterField(forms.MultipleChoiceField): 65 """ 66 A filter field for the tags of a model. Only the tags used by a model are displayed. 67 68 :param model: The model of the filter 69 """ 70 widget = widgets.StaticSelectMultiple 71 72 def __init__(self, model, *args, **kwargs): 73 def get_choices(): 74 tags = model.tags.annotate( 75 count=Count('extras_taggeditem_items') 76 ).order_by('name') 77 return [ 78 (str(tag.slug), '{} ({})'.format(tag.name, tag.count)) for tag in tags 79 ] 80 81 # Choices are fetched each time the form is initialized 82 super().__init__(label='Tags', choices=get_choices, required=False, *args, **kwargs) 83 84 85 class LaxURLField(forms.URLField): 86 """ 87 Modifies Django's built-in URLField to remove the requirement for fully-qualified domain names 88 (e.g. http://myserver/ is valid) 89 """ 90 default_validators = [EnhancedURLValidator()] 91 92 93 class JSONField(_JSONField): 94 """ 95 Custom wrapper around Django's built-in JSONField to avoid presenting "null" as the default text. 96 """ 97 def __init__(self, *args, **kwargs): 98 super().__init__(*args, **kwargs) 99 if not self.help_text: 100 self.help_text = 'Enter context data in <a href="https://json.org/">JSON</a> format.' 101 self.widget.attrs['placeholder'] = '' 102 103 def prepare_value(self, value): 104 if isinstance(value, InvalidJSONInput): 105 return value 106 if value is None: 107 return '' 108 return json.dumps(value, sort_keys=True, indent=4) 109 110 111 class MACAddressField(forms.Field): 112 """ 113 Validates a 48-bit MAC address. 114 """ 115 widget = forms.CharField 116 default_error_messages = { 117 'invalid': 'MAC address must be in EUI-48 format', 118 } 119 120 def to_python(self, value): 121 value = super().to_python(value) 122 123 # Validate MAC address format 124 try: 125 value = EUI(value.strip()) 126 except AddrFormatError: 127 raise forms.ValidationError(self.error_messages['invalid'], code='invalid') 128 129 return value 130 131 132 # 133 # Choice fields 134 # 135 136 class ChoiceField(forms.ChoiceField): 137 """ 138 Overrides Django's built-in `ChoiceField` to use NetBox's `StaticSelect` widget 139 """ 140 widget = widgets.StaticSelect 141 142 143 class MultipleChoiceField(forms.MultipleChoiceField): 144 """ 145 Overrides Django's built-in `MultipleChoiceField` to use NetBox's `StaticSelectMultiple` widget 146 """ 147 widget = widgets.StaticSelectMultiple 148 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/netbox/utilities/forms/fields/fields.py b/netbox/utilities/forms/fields/fields.py --- a/netbox/utilities/forms/fields/fields.py +++ b/netbox/utilities/forms/fields/fields.py @@ -3,6 +3,7 @@ from django import forms from django.db.models import Count from django.forms.fields import JSONField as _JSONField, InvalidJSONInput +from django.templatetags.static import static from netaddr import AddrFormatError, EUI from utilities.forms import widgets @@ -26,10 +27,9 @@ A textarea with support for Markdown rendering. Exists mostly just to add a standard `help_text`. """ widget = forms.Textarea - # TODO: Port Markdown cheat sheet to internal documentation - help_text = """ + help_text = f""" <i class="mdi mdi-information-outline"></i> - <a href="https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet" target="_blank" tabindex="-1"> + <a href="{static('docs/reference/markdown/')}" target="_blank" tabindex="-1"> Markdown</a> syntax is supported """
{"golden_diff": "diff --git a/netbox/utilities/forms/fields/fields.py b/netbox/utilities/forms/fields/fields.py\n--- a/netbox/utilities/forms/fields/fields.py\n+++ b/netbox/utilities/forms/fields/fields.py\n@@ -3,6 +3,7 @@\n from django import forms\n from django.db.models import Count\n from django.forms.fields import JSONField as _JSONField, InvalidJSONInput\n+from django.templatetags.static import static\n from netaddr import AddrFormatError, EUI\n \n from utilities.forms import widgets\n@@ -26,10 +27,9 @@\n A textarea with support for Markdown rendering. Exists mostly just to add a standard `help_text`.\n \"\"\"\n widget = forms.Textarea\n- # TODO: Port Markdown cheat sheet to internal documentation\n- help_text = \"\"\"\n+ help_text = f\"\"\"\n <i class=\"mdi mdi-information-outline\"></i>\n- <a href=\"https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet\" target=\"_blank\" tabindex=\"-1\">\n+ <a href=\"{static('docs/reference/markdown/')}\" target=\"_blank\" tabindex=\"-1\">\n Markdown</a> syntax is supported\n \"\"\"\n", "issue": "Provide Markdown help with links to local documentation pages\n### NetBox version\n\nv3.2.4\n\n### Feature type\n\nNew functionality\n\n### Proposed functionality\n\nCurrently netbox supports a documentation package as part of the main release due to https://github.com/netbox-community/netbox/issues/6328\r\n\r\nI propose to change the Markdown assistance available in some text areas ( for example in comments fields) that is currently going to \"https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet\" to another URL as part of the offline documentation package \r\n![image](https://user-images.githubusercontent.com/44149262/173758856-79739e48-1eb3-4173-9b09-9908fa1c92df.png)\r\n\r\nI propose that a new section in the documentation package is created, based in the github url above, and use the corresponding link within this assistance fields. \r\n\r\nThe final url could be something like, if this section is placed under references. \r\nhttps://netboxfqdn/static/docs/reference/markdown/\n\n### Use case\n\n\r\n\r\nThe following use cases are applicable:\r\n\r\n Provide the correct documentation that is always related to the specific version being used, instead of the online version that refers the latest version.\r\n Provide access to the documentation to system installed in a isolated management environment that do not have internet access.\r\n\n\n### Database changes\n\nnone\n\n### External dependencies\n\nnone\n", "before_files": [{"content": "import json\n\nfrom django import forms\nfrom django.db.models import Count\nfrom django.forms.fields import JSONField as _JSONField, InvalidJSONInput\nfrom netaddr import AddrFormatError, EUI\n\nfrom utilities.forms import widgets\nfrom utilities.validators import EnhancedURLValidator\n\n__all__ = (\n 'ChoiceField',\n 'ColorField',\n 'CommentField',\n 'JSONField',\n 'LaxURLField',\n 'MACAddressField',\n 'MultipleChoiceField',\n 'SlugField',\n 'TagFilterField',\n)\n\n\nclass CommentField(forms.CharField):\n \"\"\"\n A textarea with support for Markdown rendering. Exists mostly just to add a standard `help_text`.\n \"\"\"\n widget = forms.Textarea\n # TODO: Port Markdown cheat sheet to internal documentation\n help_text = \"\"\"\n <i class=\"mdi mdi-information-outline\"></i>\n <a href=\"https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet\" target=\"_blank\" tabindex=\"-1\">\n Markdown</a> syntax is supported\n \"\"\"\n\n def __init__(self, *, label='', help_text=help_text, required=False, **kwargs):\n super().__init__(label=label, help_text=help_text, required=required, **kwargs)\n\n\nclass SlugField(forms.SlugField):\n \"\"\"\n Extend Django's built-in SlugField to automatically populate from a field called `name` unless otherwise specified.\n\n Parameters:\n slug_source: Name of the form field from which the slug value will be derived\n \"\"\"\n widget = widgets.SlugWidget\n help_text = \"URL-friendly unique shorthand\"\n\n def __init__(self, *, slug_source='name', help_text=help_text, **kwargs):\n super().__init__(help_text=help_text, **kwargs)\n\n self.widget.attrs['slug-source'] = slug_source\n\n\nclass ColorField(forms.CharField):\n \"\"\"\n A field which represents a color value in hexadecimal `RRGGBB` format. Utilizes NetBox's `ColorSelect` widget to\n render choices.\n \"\"\"\n widget = widgets.ColorSelect\n\n\nclass TagFilterField(forms.MultipleChoiceField):\n \"\"\"\n A filter field for the tags of a model. Only the tags used by a model are displayed.\n\n :param model: The model of the filter\n \"\"\"\n widget = widgets.StaticSelectMultiple\n\n def __init__(self, model, *args, **kwargs):\n def get_choices():\n tags = model.tags.annotate(\n count=Count('extras_taggeditem_items')\n ).order_by('name')\n return [\n (str(tag.slug), '{} ({})'.format(tag.name, tag.count)) for tag in tags\n ]\n\n # Choices are fetched each time the form is initialized\n super().__init__(label='Tags', choices=get_choices, required=False, *args, **kwargs)\n\n\nclass LaxURLField(forms.URLField):\n \"\"\"\n Modifies Django's built-in URLField to remove the requirement for fully-qualified domain names\n (e.g. http://myserver/ is valid)\n \"\"\"\n default_validators = [EnhancedURLValidator()]\n\n\nclass JSONField(_JSONField):\n \"\"\"\n Custom wrapper around Django's built-in JSONField to avoid presenting \"null\" as the default text.\n \"\"\"\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if not self.help_text:\n self.help_text = 'Enter context data in <a href=\"https://json.org/\">JSON</a> format.'\n self.widget.attrs['placeholder'] = ''\n\n def prepare_value(self, value):\n if isinstance(value, InvalidJSONInput):\n return value\n if value is None:\n return ''\n return json.dumps(value, sort_keys=True, indent=4)\n\n\nclass MACAddressField(forms.Field):\n \"\"\"\n Validates a 48-bit MAC address.\n \"\"\"\n widget = forms.CharField\n default_error_messages = {\n 'invalid': 'MAC address must be in EUI-48 format',\n }\n\n def to_python(self, value):\n value = super().to_python(value)\n\n # Validate MAC address format\n try:\n value = EUI(value.strip())\n except AddrFormatError:\n raise forms.ValidationError(self.error_messages['invalid'], code='invalid')\n\n return value\n\n\n#\n# Choice fields\n#\n\nclass ChoiceField(forms.ChoiceField):\n \"\"\"\n Overrides Django's built-in `ChoiceField` to use NetBox's `StaticSelect` widget\n \"\"\"\n widget = widgets.StaticSelect\n\n\nclass MultipleChoiceField(forms.MultipleChoiceField):\n \"\"\"\n Overrides Django's built-in `MultipleChoiceField` to use NetBox's `StaticSelectMultiple` widget\n \"\"\"\n widget = widgets.StaticSelectMultiple\n", "path": "netbox/utilities/forms/fields/fields.py"}], "after_files": [{"content": "import json\n\nfrom django import forms\nfrom django.db.models import Count\nfrom django.forms.fields import JSONField as _JSONField, InvalidJSONInput\nfrom django.templatetags.static import static\nfrom netaddr import AddrFormatError, EUI\n\nfrom utilities.forms import widgets\nfrom utilities.validators import EnhancedURLValidator\n\n__all__ = (\n 'ChoiceField',\n 'ColorField',\n 'CommentField',\n 'JSONField',\n 'LaxURLField',\n 'MACAddressField',\n 'MultipleChoiceField',\n 'SlugField',\n 'TagFilterField',\n)\n\n\nclass CommentField(forms.CharField):\n \"\"\"\n A textarea with support for Markdown rendering. Exists mostly just to add a standard `help_text`.\n \"\"\"\n widget = forms.Textarea\n help_text = f\"\"\"\n <i class=\"mdi mdi-information-outline\"></i>\n <a href=\"{static('docs/reference/markdown/')}\" target=\"_blank\" tabindex=\"-1\">\n Markdown</a> syntax is supported\n \"\"\"\n\n def __init__(self, *, label='', help_text=help_text, required=False, **kwargs):\n super().__init__(label=label, help_text=help_text, required=required, **kwargs)\n\n\nclass SlugField(forms.SlugField):\n \"\"\"\n Extend Django's built-in SlugField to automatically populate from a field called `name` unless otherwise specified.\n\n Parameters:\n slug_source: Name of the form field from which the slug value will be derived\n \"\"\"\n widget = widgets.SlugWidget\n help_text = \"URL-friendly unique shorthand\"\n\n def __init__(self, *, slug_source='name', help_text=help_text, **kwargs):\n super().__init__(help_text=help_text, **kwargs)\n\n self.widget.attrs['slug-source'] = slug_source\n\n\nclass ColorField(forms.CharField):\n \"\"\"\n A field which represents a color value in hexadecimal `RRGGBB` format. Utilizes NetBox's `ColorSelect` widget to\n render choices.\n \"\"\"\n widget = widgets.ColorSelect\n\n\nclass TagFilterField(forms.MultipleChoiceField):\n \"\"\"\n A filter field for the tags of a model. Only the tags used by a model are displayed.\n\n :param model: The model of the filter\n \"\"\"\n widget = widgets.StaticSelectMultiple\n\n def __init__(self, model, *args, **kwargs):\n def get_choices():\n tags = model.tags.annotate(\n count=Count('extras_taggeditem_items')\n ).order_by('name')\n return [\n (str(tag.slug), '{} ({})'.format(tag.name, tag.count)) for tag in tags\n ]\n\n # Choices are fetched each time the form is initialized\n super().__init__(label='Tags', choices=get_choices, required=False, *args, **kwargs)\n\n\nclass LaxURLField(forms.URLField):\n \"\"\"\n Modifies Django's built-in URLField to remove the requirement for fully-qualified domain names\n (e.g. http://myserver/ is valid)\n \"\"\"\n default_validators = [EnhancedURLValidator()]\n\n\nclass JSONField(_JSONField):\n \"\"\"\n Custom wrapper around Django's built-in JSONField to avoid presenting \"null\" as the default text.\n \"\"\"\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if not self.help_text:\n self.help_text = 'Enter context data in <a href=\"https://json.org/\">JSON</a> format.'\n self.widget.attrs['placeholder'] = ''\n\n def prepare_value(self, value):\n if isinstance(value, InvalidJSONInput):\n return value\n if value is None:\n return ''\n return json.dumps(value, sort_keys=True, indent=4)\n\n\nclass MACAddressField(forms.Field):\n \"\"\"\n Validates a 48-bit MAC address.\n \"\"\"\n widget = forms.CharField\n default_error_messages = {\n 'invalid': 'MAC address must be in EUI-48 format',\n }\n\n def to_python(self, value):\n value = super().to_python(value)\n\n # Validate MAC address format\n try:\n value = EUI(value.strip())\n except AddrFormatError:\n raise forms.ValidationError(self.error_messages['invalid'], code='invalid')\n\n return value\n\n\n#\n# Choice fields\n#\n\nclass ChoiceField(forms.ChoiceField):\n \"\"\"\n Overrides Django's built-in `ChoiceField` to use NetBox's `StaticSelect` widget\n \"\"\"\n widget = widgets.StaticSelect\n\n\nclass MultipleChoiceField(forms.MultipleChoiceField):\n \"\"\"\n Overrides Django's built-in `MultipleChoiceField` to use NetBox's `StaticSelectMultiple` widget\n \"\"\"\n widget = widgets.StaticSelectMultiple\n", "path": "netbox/utilities/forms/fields/fields.py"}]}
1,945
265
gh_patches_debug_9398
rasdani/github-patches
git_diff
saulpw__visidata-1890
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- fixed width saver truncates data if columns are not fully expanded **Small description** If you save or syscopy a table as `fixed` format, and the visible column width is less than the width of the data in the column, the data is truncated. Also, the resulting file is not a valid fixed width format file as the columns are not aligned with the headers. **Expected result** Saving or copying a table when the columns are not fully expanded should yield the same result as when the columns are expanded. **Actual result with screenshot** ![image](https://user-images.githubusercontent.com/2499066/231616002-553a1a46-0b56-4950-b3eb-90507a77b823.png) **Steps to reproduce with sample data and a .vd** [test-vd-fixed.zip](https://github.com/saulpw/visidata/files/11217144/test-vd-fixed.zip) **Additional context** saul.pw/VisiData v2.11 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `visidata/loaders/fixed_width.py` Content: ``` 1 2 from visidata import VisiData, vd, Sheet, Column, Progress, SequenceSheet 3 4 5 vd.option('fixed_rows', 1000, 'number of rows to check for fixed width columns') 6 vd.option('fixed_maxcols', 0, 'max number of fixed-width columns to create (0 is no max)') 7 8 @VisiData.api 9 def open_fixed(vd, p): 10 return FixedWidthColumnsSheet(p.name, source=p, headerlines=[]) 11 12 class FixedWidthColumn(Column): 13 def __init__(self, name, i, j, **kwargs): 14 super().__init__(name, **kwargs) 15 self.i, self.j = i, j 16 17 def calcValue(self, row): 18 return row[0][self.i:self.j] 19 20 def putValue(self, row, value): 21 value = str(value)[:self.j-self.i] 22 j = self.j or len(row) 23 row[0] = row[0][:self.i] + '%-*s' % (j-self.i, value) + row[0][self.j:] 24 25 def columnize(rows): 26 'Generate (i,j) indexes for fixed-width columns found in rows' 27 28 ## find all character columns that are not spaces ever 29 allNonspaces = set() 30 for r in rows: 31 for i, ch in enumerate(r): 32 if not ch.isspace(): 33 allNonspaces.add(i) 34 35 colstart = 0 36 prev = 0 37 38 # collapse fields 39 for i in allNonspaces: 40 if i > prev+1: 41 yield colstart, i 42 colstart = i 43 prev = i 44 45 yield colstart, prev+1 # final column gets rest of line 46 47 48 class FixedWidthColumnsSheet(SequenceSheet): 49 rowtype = 'lines' # rowdef: [line] (wrapping in list makes it unique and modifiable) 50 def addRow(self, row, index=None): 51 Sheet.addRow(self, row, index=index) 52 53 def iterload(self): 54 itsource = iter(self.source) 55 56 # compute fixed width columns from first fixed_rows lines 57 maxcols = self.options.fixed_maxcols 58 self.columns = [] 59 fixedRows = list([x] for x in self.optlines(itsource, 'fixed_rows')) 60 for i, j in columnize(list(r[0] for r in fixedRows)): 61 if maxcols and self.nCols >= maxcols-1: 62 self.addColumn(FixedWidthColumn('', i, None)) 63 break 64 else: 65 self.addColumn(FixedWidthColumn('', i, j)) 66 67 yield from fixedRows 68 69 self.setColNames(self.headerlines) 70 71 yield from ([line] for line in itsource) 72 73 def setCols(self, headerlines): 74 self.headerlines = headerlines 75 76 77 @VisiData.api 78 def save_fixed(vd, p, *vsheets): 79 with p.open(mode='w', encoding=vsheets[0].options.save_encoding) as fp: 80 for sheet in vsheets: 81 if len(vsheets) > 1: 82 fp.write('%s\n\n' % sheet.name) 83 84 widths = {} # Column -> width:int 85 # headers 86 for col in Progress(sheet.visibleCols, gerund='sizing'): 87 maxWidth = col.getMaxWidth(sheet.rows) 88 widths[col] = col.width if col.width >= maxWidth else sheet.options.default_width or maxWidth 89 fp.write(('{0:%s} ' % widths[col]).format(col.name)) 90 fp.write('\n') 91 92 # rows 93 with Progress(gerund='saving'): 94 for dispvals in sheet.iterdispvals(format=True): 95 for col, val in dispvals.items(): 96 fp.write(('{0:%s%s.%s} ' % ('>' if vd.isNumeric(col) else '<', widths[col], widths[col])).format(val)) 97 fp.write('\n') 98 99 vd.status('%s save finished' % p) 100 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/visidata/loaders/fixed_width.py b/visidata/loaders/fixed_width.py --- a/visidata/loaders/fixed_width.py +++ b/visidata/loaders/fixed_width.py @@ -84,8 +84,7 @@ widths = {} # Column -> width:int # headers for col in Progress(sheet.visibleCols, gerund='sizing'): - maxWidth = col.getMaxWidth(sheet.rows) - widths[col] = col.width if col.width >= maxWidth else sheet.options.default_width or maxWidth + widths[col] = col.getMaxWidth(sheet.rows) #1849 fp.write(('{0:%s} ' % widths[col]).format(col.name)) fp.write('\n')
{"golden_diff": "diff --git a/visidata/loaders/fixed_width.py b/visidata/loaders/fixed_width.py\n--- a/visidata/loaders/fixed_width.py\n+++ b/visidata/loaders/fixed_width.py\n@@ -84,8 +84,7 @@\n widths = {} # Column -> width:int\n # headers\n for col in Progress(sheet.visibleCols, gerund='sizing'):\n- maxWidth = col.getMaxWidth(sheet.rows)\n- widths[col] = col.width if col.width >= maxWidth else sheet.options.default_width or maxWidth\n+ widths[col] = col.getMaxWidth(sheet.rows) #1849 \n fp.write(('{0:%s} ' % widths[col]).format(col.name))\n fp.write('\\n')\n", "issue": "fixed width saver truncates data if columns are not fully expanded\n**Small description**\r\n\r\nIf you save or syscopy a table as `fixed` format, and the visible column width is less than the width of the data in the column, the data is truncated. Also, the resulting file is not a valid fixed width format file as the columns are not aligned with the headers.\r\n\r\n**Expected result**\r\n\r\nSaving or copying a table when the columns are not fully expanded should yield the same result as when the columns are expanded.\r\n\r\n**Actual result with screenshot**\r\n![image](https://user-images.githubusercontent.com/2499066/231616002-553a1a46-0b56-4950-b3eb-90507a77b823.png)\r\n\r\n**Steps to reproduce with sample data and a .vd**\r\n[test-vd-fixed.zip](https://github.com/saulpw/visidata/files/11217144/test-vd-fixed.zip)\r\n\r\n**Additional context**\r\nsaul.pw/VisiData v2.11\r\n\n", "before_files": [{"content": "\nfrom visidata import VisiData, vd, Sheet, Column, Progress, SequenceSheet\n\n\nvd.option('fixed_rows', 1000, 'number of rows to check for fixed width columns')\nvd.option('fixed_maxcols', 0, 'max number of fixed-width columns to create (0 is no max)')\n\[email protected]\ndef open_fixed(vd, p):\n return FixedWidthColumnsSheet(p.name, source=p, headerlines=[])\n\nclass FixedWidthColumn(Column):\n def __init__(self, name, i, j, **kwargs):\n super().__init__(name, **kwargs)\n self.i, self.j = i, j\n\n def calcValue(self, row):\n return row[0][self.i:self.j]\n\n def putValue(self, row, value):\n value = str(value)[:self.j-self.i]\n j = self.j or len(row)\n row[0] = row[0][:self.i] + '%-*s' % (j-self.i, value) + row[0][self.j:]\n\ndef columnize(rows):\n 'Generate (i,j) indexes for fixed-width columns found in rows'\n\n ## find all character columns that are not spaces ever\n allNonspaces = set()\n for r in rows:\n for i, ch in enumerate(r):\n if not ch.isspace():\n allNonspaces.add(i)\n\n colstart = 0\n prev = 0\n\n # collapse fields\n for i in allNonspaces:\n if i > prev+1:\n yield colstart, i\n colstart = i\n prev = i\n\n yield colstart, prev+1 # final column gets rest of line\n\n\nclass FixedWidthColumnsSheet(SequenceSheet):\n rowtype = 'lines' # rowdef: [line] (wrapping in list makes it unique and modifiable)\n def addRow(self, row, index=None):\n Sheet.addRow(self, row, index=index)\n\n def iterload(self):\n itsource = iter(self.source)\n\n # compute fixed width columns from first fixed_rows lines\n maxcols = self.options.fixed_maxcols\n self.columns = []\n fixedRows = list([x] for x in self.optlines(itsource, 'fixed_rows'))\n for i, j in columnize(list(r[0] for r in fixedRows)):\n if maxcols and self.nCols >= maxcols-1:\n self.addColumn(FixedWidthColumn('', i, None))\n break\n else:\n self.addColumn(FixedWidthColumn('', i, j))\n\n yield from fixedRows\n\n self.setColNames(self.headerlines)\n\n yield from ([line] for line in itsource)\n\n def setCols(self, headerlines):\n self.headerlines = headerlines\n\n\[email protected]\ndef save_fixed(vd, p, *vsheets):\n with p.open(mode='w', encoding=vsheets[0].options.save_encoding) as fp:\n for sheet in vsheets:\n if len(vsheets) > 1:\n fp.write('%s\\n\\n' % sheet.name)\n\n widths = {} # Column -> width:int\n # headers\n for col in Progress(sheet.visibleCols, gerund='sizing'):\n maxWidth = col.getMaxWidth(sheet.rows)\n widths[col] = col.width if col.width >= maxWidth else sheet.options.default_width or maxWidth\n fp.write(('{0:%s} ' % widths[col]).format(col.name))\n fp.write('\\n')\n\n # rows\n with Progress(gerund='saving'):\n for dispvals in sheet.iterdispvals(format=True):\n for col, val in dispvals.items():\n fp.write(('{0:%s%s.%s} ' % ('>' if vd.isNumeric(col) else '<', widths[col], widths[col])).format(val))\n fp.write('\\n')\n\n vd.status('%s save finished' % p)\n", "path": "visidata/loaders/fixed_width.py"}], "after_files": [{"content": "\nfrom visidata import VisiData, vd, Sheet, Column, Progress, SequenceSheet\n\n\nvd.option('fixed_rows', 1000, 'number of rows to check for fixed width columns')\nvd.option('fixed_maxcols', 0, 'max number of fixed-width columns to create (0 is no max)')\n\[email protected]\ndef open_fixed(vd, p):\n return FixedWidthColumnsSheet(p.name, source=p, headerlines=[])\n\nclass FixedWidthColumn(Column):\n def __init__(self, name, i, j, **kwargs):\n super().__init__(name, **kwargs)\n self.i, self.j = i, j\n\n def calcValue(self, row):\n return row[0][self.i:self.j]\n\n def putValue(self, row, value):\n value = str(value)[:self.j-self.i]\n j = self.j or len(row)\n row[0] = row[0][:self.i] + '%-*s' % (j-self.i, value) + row[0][self.j:]\n\ndef columnize(rows):\n 'Generate (i,j) indexes for fixed-width columns found in rows'\n\n ## find all character columns that are not spaces ever\n allNonspaces = set()\n for r in rows:\n for i, ch in enumerate(r):\n if not ch.isspace():\n allNonspaces.add(i)\n\n colstart = 0\n prev = 0\n\n # collapse fields\n for i in allNonspaces:\n if i > prev+1:\n yield colstart, i\n colstart = i\n prev = i\n\n yield colstart, prev+1 # final column gets rest of line\n\n\nclass FixedWidthColumnsSheet(SequenceSheet):\n rowtype = 'lines' # rowdef: [line] (wrapping in list makes it unique and modifiable)\n def addRow(self, row, index=None):\n Sheet.addRow(self, row, index=index)\n\n def iterload(self):\n itsource = iter(self.source)\n\n # compute fixed width columns from first fixed_rows lines\n maxcols = self.options.fixed_maxcols\n self.columns = []\n fixedRows = list([x] for x in self.optlines(itsource, 'fixed_rows'))\n for i, j in columnize(list(r[0] for r in fixedRows)):\n if maxcols and self.nCols >= maxcols-1:\n self.addColumn(FixedWidthColumn('', i, None))\n break\n else:\n self.addColumn(FixedWidthColumn('', i, j))\n\n yield from fixedRows\n\n self.setColNames(self.headerlines)\n\n yield from ([line] for line in itsource)\n\n def setCols(self, headerlines):\n self.headerlines = headerlines\n\n\[email protected]\ndef save_fixed(vd, p, *vsheets):\n with p.open(mode='w', encoding=vsheets[0].options.save_encoding) as fp:\n for sheet in vsheets:\n if len(vsheets) > 1:\n fp.write('%s\\n\\n' % sheet.name)\n\n widths = {} # Column -> width:int\n # headers\n for col in Progress(sheet.visibleCols, gerund='sizing'):\n widths[col] = col.getMaxWidth(sheet.rows) #1849 \n fp.write(('{0:%s} ' % widths[col]).format(col.name))\n fp.write('\\n')\n\n # rows\n with Progress(gerund='saving'):\n for dispvals in sheet.iterdispvals(format=True):\n for col, val in dispvals.items():\n fp.write(('{0:%s%s.%s} ' % ('>' if vd.isNumeric(col) else '<', widths[col], widths[col])).format(val))\n fp.write('\\n')\n\n vd.status('%s save finished' % p)\n", "path": "visidata/loaders/fixed_width.py"}]}
1,544
164
gh_patches_debug_4240
rasdani/github-patches
git_diff
liqd__adhocracy4-210
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Keep html time field optional even if a DateTimeField is set to be required Time is optional in the backend but the html input field still gets the required attribute if the the DateTimeField is initializes with `required=True` The time Widget should always be initialized without required. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `adhocracy4/forms/widgets.py` Content: ``` 1 import datetime 2 3 from django.contrib.staticfiles.storage import staticfiles_storage 4 from django.forms import widgets as form_widgets 5 from django.template.loader import render_to_string 6 from django.utils.timezone import localtime 7 8 9 class DateTimeInput(form_widgets.SplitDateTimeWidget): 10 def __init__(self, time_label='', time_default=None, *args, **kwargs): 11 super().__init__(*args, **kwargs) 12 self.time_label = time_label 13 self.time_default = time_default or datetime.time(hour=0, minute=0) 14 15 class Media: 16 js = ( 17 staticfiles_storage.url('datepicker.js'), 18 ) 19 css = {'all': [ 20 staticfiles_storage.url('datepicker.css'), 21 ]} 22 23 def render(self, name, value, attrs=None): 24 date_attrs = self.build_attrs(attrs) 25 date_attrs.update({ 26 'class': 'datepicker', 27 'placeholder': self.widgets[0].format_value(datetime.date.today()), 28 'id': attrs['id'] + '_date' 29 }) 30 time_attrs = self.build_attrs(attrs) 31 time_attrs.update({ 32 'class': 'timepicker', 33 'placeholder': self.widgets[1].format_value( 34 self.get_default_time()), 35 'id': attrs['id'] + '_time' 36 }) 37 38 if isinstance(value, datetime.datetime): 39 value = localtime(value) 40 date = value.date() 41 time = value.time() 42 else: 43 # value's just a list in case of an error 44 date = value[0] if value else None 45 time = value[1] if value else None 46 47 return render_to_string( 48 'a4forms/datetime_input.html', { 49 'date': self.widgets[0].render( 50 name + '_0', 51 date, 52 date_attrs 53 ), 54 'time': self.widgets[1].render( 55 name + '_1', 56 time, 57 time_attrs 58 ), 59 'time_label': { 60 'label': self.time_label, 61 'id_for_label': attrs['id'] + '_time' 62 }, 63 }) 64 65 def id_for_label(self, id_): 66 if id_: 67 id_ += '_date' 68 return id_ 69 70 def get_default_time(self): 71 time_widget = self.widgets[1] 72 73 if not self.time_default: 74 return time_widget.format_value(datetime.time(hour=0, minute=0)) 75 elif isinstance(self.time_default, (datetime.time, datetime.datetime)): 76 return time_widget.format_value(self.time_default) 77 else: 78 return self.time_default 79 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/adhocracy4/forms/widgets.py b/adhocracy4/forms/widgets.py --- a/adhocracy4/forms/widgets.py +++ b/adhocracy4/forms/widgets.py @@ -32,7 +32,8 @@ 'class': 'timepicker', 'placeholder': self.widgets[1].format_value( self.get_default_time()), - 'id': attrs['id'] + '_time' + 'id': attrs['id'] + '_time', + 'required': False }) if isinstance(value, datetime.datetime):
{"golden_diff": "diff --git a/adhocracy4/forms/widgets.py b/adhocracy4/forms/widgets.py\n--- a/adhocracy4/forms/widgets.py\n+++ b/adhocracy4/forms/widgets.py\n@@ -32,7 +32,8 @@\n 'class': 'timepicker',\n 'placeholder': self.widgets[1].format_value(\n self.get_default_time()),\n- 'id': attrs['id'] + '_time'\n+ 'id': attrs['id'] + '_time',\n+ 'required': False\n })\n \n if isinstance(value, datetime.datetime):\n", "issue": "Keep html time field optional even if a DateTimeField is set to be required\nTime is optional in the backend but the html input field still gets the required attribute if the the DateTimeField is initializes with `required=True`\r\nThe time Widget should always be initialized without required.\n", "before_files": [{"content": "import datetime\n\nfrom django.contrib.staticfiles.storage import staticfiles_storage\nfrom django.forms import widgets as form_widgets\nfrom django.template.loader import render_to_string\nfrom django.utils.timezone import localtime\n\n\nclass DateTimeInput(form_widgets.SplitDateTimeWidget):\n def __init__(self, time_label='', time_default=None, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.time_label = time_label\n self.time_default = time_default or datetime.time(hour=0, minute=0)\n\n class Media:\n js = (\n staticfiles_storage.url('datepicker.js'),\n )\n css = {'all': [\n staticfiles_storage.url('datepicker.css'),\n ]}\n\n def render(self, name, value, attrs=None):\n date_attrs = self.build_attrs(attrs)\n date_attrs.update({\n 'class': 'datepicker',\n 'placeholder': self.widgets[0].format_value(datetime.date.today()),\n 'id': attrs['id'] + '_date'\n })\n time_attrs = self.build_attrs(attrs)\n time_attrs.update({\n 'class': 'timepicker',\n 'placeholder': self.widgets[1].format_value(\n self.get_default_time()),\n 'id': attrs['id'] + '_time'\n })\n\n if isinstance(value, datetime.datetime):\n value = localtime(value)\n date = value.date()\n time = value.time()\n else:\n # value's just a list in case of an error\n date = value[0] if value else None\n time = value[1] if value else None\n\n return render_to_string(\n 'a4forms/datetime_input.html', {\n 'date': self.widgets[0].render(\n name + '_0',\n date,\n date_attrs\n ),\n 'time': self.widgets[1].render(\n name + '_1',\n time,\n time_attrs\n ),\n 'time_label': {\n 'label': self.time_label,\n 'id_for_label': attrs['id'] + '_time'\n },\n })\n\n def id_for_label(self, id_):\n if id_:\n id_ += '_date'\n return id_\n\n def get_default_time(self):\n time_widget = self.widgets[1]\n\n if not self.time_default:\n return time_widget.format_value(datetime.time(hour=0, minute=0))\n elif isinstance(self.time_default, (datetime.time, datetime.datetime)):\n return time_widget.format_value(self.time_default)\n else:\n return self.time_default\n", "path": "adhocracy4/forms/widgets.py"}], "after_files": [{"content": "import datetime\n\nfrom django.contrib.staticfiles.storage import staticfiles_storage\nfrom django.forms import widgets as form_widgets\nfrom django.template.loader import render_to_string\nfrom django.utils.timezone import localtime\n\n\nclass DateTimeInput(form_widgets.SplitDateTimeWidget):\n def __init__(self, time_label='', time_default=None, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.time_label = time_label\n self.time_default = time_default or datetime.time(hour=0, minute=0)\n\n class Media:\n js = (\n staticfiles_storage.url('datepicker.js'),\n )\n css = {'all': [\n staticfiles_storage.url('datepicker.css'),\n ]}\n\n def render(self, name, value, attrs=None):\n date_attrs = self.build_attrs(attrs)\n date_attrs.update({\n 'class': 'datepicker',\n 'placeholder': self.widgets[0].format_value(datetime.date.today()),\n 'id': attrs['id'] + '_date'\n })\n time_attrs = self.build_attrs(attrs)\n time_attrs.update({\n 'class': 'timepicker',\n 'placeholder': self.widgets[1].format_value(\n self.get_default_time()),\n 'id': attrs['id'] + '_time',\n 'required': False\n })\n\n if isinstance(value, datetime.datetime):\n value = localtime(value)\n date = value.date()\n time = value.time()\n else:\n # value's just a list in case of an error\n date = value[0] if value else None\n time = value[1] if value else None\n\n return render_to_string(\n 'a4forms/datetime_input.html', {\n 'date': self.widgets[0].render(\n name + '_0',\n date,\n date_attrs\n ),\n 'time': self.widgets[1].render(\n name + '_1',\n time,\n time_attrs\n ),\n 'time_label': {\n 'label': self.time_label,\n 'id_for_label': attrs['id'] + '_time'\n },\n })\n\n def id_for_label(self, id_):\n if id_:\n id_ += '_date'\n return id_\n\n def get_default_time(self):\n time_widget = self.widgets[1]\n\n if not self.time_default:\n return time_widget.format_value(datetime.time(hour=0, minute=0))\n elif isinstance(self.time_default, (datetime.time, datetime.datetime)):\n return time_widget.format_value(self.time_default)\n else:\n return self.time_default\n", "path": "adhocracy4/forms/widgets.py"}]}
995
122
gh_patches_debug_15813
rasdani/github-patches
git_diff
netbox-community__netbox-14367
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Bulk edit of Interfaces of VMs without cluster fails ### NetBox version v3.6.5 ### Python version 3.8 ### Steps to Reproduce 1. Create VM and assign only a Site, not a Cluster 2. Create Interface for VM 3. Mark Interface and click on "Edit Selected" ### Expected Behavior Edit form for selected VM Interface(s) appear ### Observed Behavior Exception Window with the following Content: ``` <class 'AttributeError'> 'NoneType' object has no attribute 'site' Python version: 3.8.10 NetBox version: 3.6.5 Plugins: netbox_demo: 0.3.1 ``` When generating the bulk edit form, the site is extracted from the cluster of the virtual machine, which fails if no cluster is assigned to the VM: ``` File "/opt/netbox/netbox/virtualization/forms/bulk_edit.py", line 272, in __init__ site = interface.virtual_machine.cluster.site AttributeError: 'NoneType' object has no attribute 'site' ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `netbox/virtualization/forms/bulk_edit.py` Content: ``` 1 from django import forms 2 from django.utils.translation import gettext_lazy as _ 3 4 from dcim.choices import InterfaceModeChoices 5 from dcim.constants import INTERFACE_MTU_MAX, INTERFACE_MTU_MIN 6 from dcim.models import Device, DeviceRole, Platform, Region, Site, SiteGroup 7 from extras.models import ConfigTemplate 8 from ipam.models import VLAN, VLANGroup, VRF 9 from netbox.forms import NetBoxModelBulkEditForm 10 from tenancy.models import Tenant 11 from utilities.forms import BulkRenameForm, add_blank_choice 12 from utilities.forms.fields import CommentField, DynamicModelChoiceField, DynamicModelMultipleChoiceField 13 from utilities.forms.widgets import BulkEditNullBooleanSelect 14 from virtualization.choices import * 15 from virtualization.models import * 16 17 __all__ = ( 18 'ClusterBulkEditForm', 19 'ClusterGroupBulkEditForm', 20 'ClusterTypeBulkEditForm', 21 'VirtualMachineBulkEditForm', 22 'VMInterfaceBulkEditForm', 23 'VMInterfaceBulkRenameForm', 24 ) 25 26 27 class ClusterTypeBulkEditForm(NetBoxModelBulkEditForm): 28 description = forms.CharField( 29 label=_('Description'), 30 max_length=200, 31 required=False 32 ) 33 34 model = ClusterType 35 fieldsets = ( 36 (None, ('description',)), 37 ) 38 nullable_fields = ('description',) 39 40 41 class ClusterGroupBulkEditForm(NetBoxModelBulkEditForm): 42 description = forms.CharField( 43 label=_('Description'), 44 max_length=200, 45 required=False 46 ) 47 48 model = ClusterGroup 49 fieldsets = ( 50 (None, ('description',)), 51 ) 52 nullable_fields = ('description',) 53 54 55 class ClusterBulkEditForm(NetBoxModelBulkEditForm): 56 type = DynamicModelChoiceField( 57 label=_('Type'), 58 queryset=ClusterType.objects.all(), 59 required=False 60 ) 61 group = DynamicModelChoiceField( 62 label=_('Group'), 63 queryset=ClusterGroup.objects.all(), 64 required=False 65 ) 66 status = forms.ChoiceField( 67 label=_('Status'), 68 choices=add_blank_choice(ClusterStatusChoices), 69 required=False, 70 initial='' 71 ) 72 tenant = DynamicModelChoiceField( 73 label=_('Tenant'), 74 queryset=Tenant.objects.all(), 75 required=False 76 ) 77 region = DynamicModelChoiceField( 78 label=_('Region'), 79 queryset=Region.objects.all(), 80 required=False, 81 ) 82 site_group = DynamicModelChoiceField( 83 label=_('Site group'), 84 queryset=SiteGroup.objects.all(), 85 required=False, 86 ) 87 site = DynamicModelChoiceField( 88 label=_('Site'), 89 queryset=Site.objects.all(), 90 required=False, 91 query_params={ 92 'region_id': '$region', 93 'group_id': '$site_group', 94 } 95 ) 96 description = forms.CharField( 97 label=_('Site'), 98 max_length=200, 99 required=False 100 ) 101 comments = CommentField() 102 103 model = Cluster 104 fieldsets = ( 105 (None, ('type', 'group', 'status', 'tenant', 'description')), 106 (_('Site'), ('region', 'site_group', 'site')), 107 ) 108 nullable_fields = ( 109 'group', 'site', 'tenant', 'description', 'comments', 110 ) 111 112 113 class VirtualMachineBulkEditForm(NetBoxModelBulkEditForm): 114 status = forms.ChoiceField( 115 label=_('Status'), 116 choices=add_blank_choice(VirtualMachineStatusChoices), 117 required=False, 118 initial='', 119 ) 120 site = DynamicModelChoiceField( 121 label=_('Site'), 122 queryset=Site.objects.all(), 123 required=False 124 ) 125 cluster = DynamicModelChoiceField( 126 label=_('Cluster'), 127 queryset=Cluster.objects.all(), 128 required=False, 129 query_params={ 130 'site_id': '$site' 131 } 132 ) 133 device = DynamicModelChoiceField( 134 label=_('Device'), 135 queryset=Device.objects.all(), 136 required=False, 137 query_params={ 138 'cluster_id': '$cluster' 139 } 140 ) 141 role = DynamicModelChoiceField( 142 label=_('Role'), 143 queryset=DeviceRole.objects.filter( 144 vm_role=True 145 ), 146 required=False, 147 query_params={ 148 "vm_role": "True" 149 } 150 ) 151 tenant = DynamicModelChoiceField( 152 label=_('Tenant'), 153 queryset=Tenant.objects.all(), 154 required=False 155 ) 156 platform = DynamicModelChoiceField( 157 label=_('Platform'), 158 queryset=Platform.objects.all(), 159 required=False 160 ) 161 vcpus = forms.IntegerField( 162 required=False, 163 label=_('vCPUs') 164 ) 165 memory = forms.IntegerField( 166 required=False, 167 label=_('Memory (MB)') 168 ) 169 disk = forms.IntegerField( 170 required=False, 171 label=_('Disk (GB)') 172 ) 173 description = forms.CharField( 174 label=_('Description'), 175 max_length=200, 176 required=False 177 ) 178 config_template = DynamicModelChoiceField( 179 queryset=ConfigTemplate.objects.all(), 180 required=False 181 ) 182 comments = CommentField() 183 184 model = VirtualMachine 185 fieldsets = ( 186 (None, ('site', 'cluster', 'device', 'status', 'role', 'tenant', 'platform', 'description')), 187 (_('Resources'), ('vcpus', 'memory', 'disk')), 188 ('Configuration', ('config_template',)), 189 ) 190 nullable_fields = ( 191 'site', 'cluster', 'device', 'role', 'tenant', 'platform', 'vcpus', 'memory', 'disk', 'description', 'comments', 192 ) 193 194 195 class VMInterfaceBulkEditForm(NetBoxModelBulkEditForm): 196 virtual_machine = forms.ModelChoiceField( 197 label=_('Virtual machine'), 198 queryset=VirtualMachine.objects.all(), 199 required=False, 200 disabled=True, 201 widget=forms.HiddenInput() 202 ) 203 parent = DynamicModelChoiceField( 204 label=_('Parent'), 205 queryset=VMInterface.objects.all(), 206 required=False 207 ) 208 bridge = DynamicModelChoiceField( 209 label=_('Bridge'), 210 queryset=VMInterface.objects.all(), 211 required=False 212 ) 213 enabled = forms.NullBooleanField( 214 label=_('Enabled'), 215 required=False, 216 widget=BulkEditNullBooleanSelect() 217 ) 218 mtu = forms.IntegerField( 219 required=False, 220 min_value=INTERFACE_MTU_MIN, 221 max_value=INTERFACE_MTU_MAX, 222 label=_('MTU') 223 ) 224 description = forms.CharField( 225 label=_('Description'), 226 max_length=100, 227 required=False 228 ) 229 mode = forms.ChoiceField( 230 label=_('Mode'), 231 choices=add_blank_choice(InterfaceModeChoices), 232 required=False 233 ) 234 vlan_group = DynamicModelChoiceField( 235 queryset=VLANGroup.objects.all(), 236 required=False, 237 label=_('VLAN group') 238 ) 239 untagged_vlan = DynamicModelChoiceField( 240 queryset=VLAN.objects.all(), 241 required=False, 242 query_params={ 243 'group_id': '$vlan_group', 244 }, 245 label=_('Untagged VLAN') 246 ) 247 tagged_vlans = DynamicModelMultipleChoiceField( 248 queryset=VLAN.objects.all(), 249 required=False, 250 query_params={ 251 'group_id': '$vlan_group', 252 }, 253 label=_('Tagged VLANs') 254 ) 255 vrf = DynamicModelChoiceField( 256 queryset=VRF.objects.all(), 257 required=False, 258 label=_('VRF') 259 ) 260 261 model = VMInterface 262 fieldsets = ( 263 (None, ('mtu', 'enabled', 'vrf', 'description')), 264 (_('Related Interfaces'), ('parent', 'bridge')), 265 (_('802.1Q Switching'), ('mode', 'vlan_group', 'untagged_vlan', 'tagged_vlans')), 266 ) 267 nullable_fields = ( 268 'parent', 'bridge', 'mtu', 'vrf', 'description', 269 ) 270 271 def __init__(self, *args, **kwargs): 272 super().__init__(*args, **kwargs) 273 if 'virtual_machine' in self.initial: 274 vm_id = self.initial.get('virtual_machine') 275 276 # Restrict parent/bridge interface assignment by VM 277 self.fields['parent'].widget.add_query_param('virtual_machine_id', vm_id) 278 self.fields['bridge'].widget.add_query_param('virtual_machine_id', vm_id) 279 280 # Limit VLAN choices by virtual machine 281 self.fields['untagged_vlan'].widget.add_query_param('available_on_virtualmachine', vm_id) 282 self.fields['tagged_vlans'].widget.add_query_param('available_on_virtualmachine', vm_id) 283 284 else: 285 # See 5643 286 if 'pk' in self.initial: 287 site = None 288 interfaces = VMInterface.objects.filter( 289 pk__in=self.initial['pk'] 290 ).prefetch_related( 291 'virtual_machine__site' 292 ) 293 294 # Check interface sites. First interface should set site, further interfaces will either continue the 295 # loop or reset back to no site and break the loop. 296 for interface in interfaces: 297 if site is None: 298 site = interface.virtual_machine.cluster.site 299 elif interface.virtual_machine.cluster.site is not site: 300 site = None 301 break 302 303 if site is not None: 304 self.fields['untagged_vlan'].widget.add_query_param('site_id', site.pk) 305 self.fields['tagged_vlans'].widget.add_query_param('site_id', site.pk) 306 307 self.fields['parent'].choices = () 308 self.fields['parent'].widget.attrs['disabled'] = True 309 self.fields['bridge'].choices = () 310 self.fields['bridge'].widget.attrs['disabled'] = True 311 312 313 class VMInterfaceBulkRenameForm(BulkRenameForm): 314 pk = forms.ModelMultipleChoiceField( 315 queryset=VMInterface.objects.all(), 316 widget=forms.MultipleHiddenInput() 317 ) 318 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/netbox/virtualization/forms/bulk_edit.py b/netbox/virtualization/forms/bulk_edit.py --- a/netbox/virtualization/forms/bulk_edit.py +++ b/netbox/virtualization/forms/bulk_edit.py @@ -294,9 +294,10 @@ # Check interface sites. First interface should set site, further interfaces will either continue the # loop or reset back to no site and break the loop. for interface in interfaces: + vm_site = interface.virtual_machine.site or interface.virtual_machine.cluster.site if site is None: - site = interface.virtual_machine.cluster.site - elif interface.virtual_machine.cluster.site is not site: + site = vm_site + elif vm_site is not site: site = None break
{"golden_diff": "diff --git a/netbox/virtualization/forms/bulk_edit.py b/netbox/virtualization/forms/bulk_edit.py\n--- a/netbox/virtualization/forms/bulk_edit.py\n+++ b/netbox/virtualization/forms/bulk_edit.py\n@@ -294,9 +294,10 @@\n # Check interface sites. First interface should set site, further interfaces will either continue the\n # loop or reset back to no site and break the loop.\n for interface in interfaces:\n+ vm_site = interface.virtual_machine.site or interface.virtual_machine.cluster.site\n if site is None:\n- site = interface.virtual_machine.cluster.site\n- elif interface.virtual_machine.cluster.site is not site:\n+ site = vm_site\n+ elif vm_site is not site:\n site = None\n break\n", "issue": "Bulk edit of Interfaces of VMs without cluster fails\n### NetBox version\n\nv3.6.5\n\n### Python version\n\n3.8\n\n### Steps to Reproduce\n\n1. Create VM and assign only a Site, not a Cluster\r\n2. Create Interface for VM\r\n3. Mark Interface and click on \"Edit Selected\"\n\n### Expected Behavior\n\nEdit form for selected VM Interface(s) appear\n\n### Observed Behavior\n\nException Window with the following Content:\r\n```\r\n<class 'AttributeError'>\r\n\r\n'NoneType' object has no attribute 'site'\r\n\r\nPython version: 3.8.10\r\nNetBox version: 3.6.5\r\nPlugins: \r\n netbox_demo: 0.3.1\r\n```\r\n\r\nWhen generating the bulk edit form, the site is extracted from the cluster of the virtual machine, which fails if no cluster is assigned to the VM:\r\n```\r\n File \"/opt/netbox/netbox/virtualization/forms/bulk_edit.py\", line 272, in __init__\r\n site = interface.virtual_machine.cluster.site\r\nAttributeError: 'NoneType' object has no attribute 'site'\r\n```\n", "before_files": [{"content": "from django import forms\nfrom django.utils.translation import gettext_lazy as _\n\nfrom dcim.choices import InterfaceModeChoices\nfrom dcim.constants import INTERFACE_MTU_MAX, INTERFACE_MTU_MIN\nfrom dcim.models import Device, DeviceRole, Platform, Region, Site, SiteGroup\nfrom extras.models import ConfigTemplate\nfrom ipam.models import VLAN, VLANGroup, VRF\nfrom netbox.forms import NetBoxModelBulkEditForm\nfrom tenancy.models import Tenant\nfrom utilities.forms import BulkRenameForm, add_blank_choice\nfrom utilities.forms.fields import CommentField, DynamicModelChoiceField, DynamicModelMultipleChoiceField\nfrom utilities.forms.widgets import BulkEditNullBooleanSelect\nfrom virtualization.choices import *\nfrom virtualization.models import *\n\n__all__ = (\n 'ClusterBulkEditForm',\n 'ClusterGroupBulkEditForm',\n 'ClusterTypeBulkEditForm',\n 'VirtualMachineBulkEditForm',\n 'VMInterfaceBulkEditForm',\n 'VMInterfaceBulkRenameForm',\n)\n\n\nclass ClusterTypeBulkEditForm(NetBoxModelBulkEditForm):\n description = forms.CharField(\n label=_('Description'),\n max_length=200,\n required=False\n )\n\n model = ClusterType\n fieldsets = (\n (None, ('description',)),\n )\n nullable_fields = ('description',)\n\n\nclass ClusterGroupBulkEditForm(NetBoxModelBulkEditForm):\n description = forms.CharField(\n label=_('Description'),\n max_length=200,\n required=False\n )\n\n model = ClusterGroup\n fieldsets = (\n (None, ('description',)),\n )\n nullable_fields = ('description',)\n\n\nclass ClusterBulkEditForm(NetBoxModelBulkEditForm):\n type = DynamicModelChoiceField(\n label=_('Type'),\n queryset=ClusterType.objects.all(),\n required=False\n )\n group = DynamicModelChoiceField(\n label=_('Group'),\n queryset=ClusterGroup.objects.all(),\n required=False\n )\n status = forms.ChoiceField(\n label=_('Status'),\n choices=add_blank_choice(ClusterStatusChoices),\n required=False,\n initial=''\n )\n tenant = DynamicModelChoiceField(\n label=_('Tenant'),\n queryset=Tenant.objects.all(),\n required=False\n )\n region = DynamicModelChoiceField(\n label=_('Region'),\n queryset=Region.objects.all(),\n required=False,\n )\n site_group = DynamicModelChoiceField(\n label=_('Site group'),\n queryset=SiteGroup.objects.all(),\n required=False,\n )\n site = DynamicModelChoiceField(\n label=_('Site'),\n queryset=Site.objects.all(),\n required=False,\n query_params={\n 'region_id': '$region',\n 'group_id': '$site_group',\n }\n )\n description = forms.CharField(\n label=_('Site'),\n max_length=200,\n required=False\n )\n comments = CommentField()\n\n model = Cluster\n fieldsets = (\n (None, ('type', 'group', 'status', 'tenant', 'description')),\n (_('Site'), ('region', 'site_group', 'site')),\n )\n nullable_fields = (\n 'group', 'site', 'tenant', 'description', 'comments',\n )\n\n\nclass VirtualMachineBulkEditForm(NetBoxModelBulkEditForm):\n status = forms.ChoiceField(\n label=_('Status'),\n choices=add_blank_choice(VirtualMachineStatusChoices),\n required=False,\n initial='',\n )\n site = DynamicModelChoiceField(\n label=_('Site'),\n queryset=Site.objects.all(),\n required=False\n )\n cluster = DynamicModelChoiceField(\n label=_('Cluster'),\n queryset=Cluster.objects.all(),\n required=False,\n query_params={\n 'site_id': '$site'\n }\n )\n device = DynamicModelChoiceField(\n label=_('Device'),\n queryset=Device.objects.all(),\n required=False,\n query_params={\n 'cluster_id': '$cluster'\n }\n )\n role = DynamicModelChoiceField(\n label=_('Role'),\n queryset=DeviceRole.objects.filter(\n vm_role=True\n ),\n required=False,\n query_params={\n \"vm_role\": \"True\"\n }\n )\n tenant = DynamicModelChoiceField(\n label=_('Tenant'),\n queryset=Tenant.objects.all(),\n required=False\n )\n platform = DynamicModelChoiceField(\n label=_('Platform'),\n queryset=Platform.objects.all(),\n required=False\n )\n vcpus = forms.IntegerField(\n required=False,\n label=_('vCPUs')\n )\n memory = forms.IntegerField(\n required=False,\n label=_('Memory (MB)')\n )\n disk = forms.IntegerField(\n required=False,\n label=_('Disk (GB)')\n )\n description = forms.CharField(\n label=_('Description'),\n max_length=200,\n required=False\n )\n config_template = DynamicModelChoiceField(\n queryset=ConfigTemplate.objects.all(),\n required=False\n )\n comments = CommentField()\n\n model = VirtualMachine\n fieldsets = (\n (None, ('site', 'cluster', 'device', 'status', 'role', 'tenant', 'platform', 'description')),\n (_('Resources'), ('vcpus', 'memory', 'disk')),\n ('Configuration', ('config_template',)),\n )\n nullable_fields = (\n 'site', 'cluster', 'device', 'role', 'tenant', 'platform', 'vcpus', 'memory', 'disk', 'description', 'comments',\n )\n\n\nclass VMInterfaceBulkEditForm(NetBoxModelBulkEditForm):\n virtual_machine = forms.ModelChoiceField(\n label=_('Virtual machine'),\n queryset=VirtualMachine.objects.all(),\n required=False,\n disabled=True,\n widget=forms.HiddenInput()\n )\n parent = DynamicModelChoiceField(\n label=_('Parent'),\n queryset=VMInterface.objects.all(),\n required=False\n )\n bridge = DynamicModelChoiceField(\n label=_('Bridge'),\n queryset=VMInterface.objects.all(),\n required=False\n )\n enabled = forms.NullBooleanField(\n label=_('Enabled'),\n required=False,\n widget=BulkEditNullBooleanSelect()\n )\n mtu = forms.IntegerField(\n required=False,\n min_value=INTERFACE_MTU_MIN,\n max_value=INTERFACE_MTU_MAX,\n label=_('MTU')\n )\n description = forms.CharField(\n label=_('Description'),\n max_length=100,\n required=False\n )\n mode = forms.ChoiceField(\n label=_('Mode'),\n choices=add_blank_choice(InterfaceModeChoices),\n required=False\n )\n vlan_group = DynamicModelChoiceField(\n queryset=VLANGroup.objects.all(),\n required=False,\n label=_('VLAN group')\n )\n untagged_vlan = DynamicModelChoiceField(\n queryset=VLAN.objects.all(),\n required=False,\n query_params={\n 'group_id': '$vlan_group',\n },\n label=_('Untagged VLAN')\n )\n tagged_vlans = DynamicModelMultipleChoiceField(\n queryset=VLAN.objects.all(),\n required=False,\n query_params={\n 'group_id': '$vlan_group',\n },\n label=_('Tagged VLANs')\n )\n vrf = DynamicModelChoiceField(\n queryset=VRF.objects.all(),\n required=False,\n label=_('VRF')\n )\n\n model = VMInterface\n fieldsets = (\n (None, ('mtu', 'enabled', 'vrf', 'description')),\n (_('Related Interfaces'), ('parent', 'bridge')),\n (_('802.1Q Switching'), ('mode', 'vlan_group', 'untagged_vlan', 'tagged_vlans')),\n )\n nullable_fields = (\n 'parent', 'bridge', 'mtu', 'vrf', 'description',\n )\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if 'virtual_machine' in self.initial:\n vm_id = self.initial.get('virtual_machine')\n\n # Restrict parent/bridge interface assignment by VM\n self.fields['parent'].widget.add_query_param('virtual_machine_id', vm_id)\n self.fields['bridge'].widget.add_query_param('virtual_machine_id', vm_id)\n\n # Limit VLAN choices by virtual machine\n self.fields['untagged_vlan'].widget.add_query_param('available_on_virtualmachine', vm_id)\n self.fields['tagged_vlans'].widget.add_query_param('available_on_virtualmachine', vm_id)\n\n else:\n # See 5643\n if 'pk' in self.initial:\n site = None\n interfaces = VMInterface.objects.filter(\n pk__in=self.initial['pk']\n ).prefetch_related(\n 'virtual_machine__site'\n )\n\n # Check interface sites. First interface should set site, further interfaces will either continue the\n # loop or reset back to no site and break the loop.\n for interface in interfaces:\n if site is None:\n site = interface.virtual_machine.cluster.site\n elif interface.virtual_machine.cluster.site is not site:\n site = None\n break\n\n if site is not None:\n self.fields['untagged_vlan'].widget.add_query_param('site_id', site.pk)\n self.fields['tagged_vlans'].widget.add_query_param('site_id', site.pk)\n\n self.fields['parent'].choices = ()\n self.fields['parent'].widget.attrs['disabled'] = True\n self.fields['bridge'].choices = ()\n self.fields['bridge'].widget.attrs['disabled'] = True\n\n\nclass VMInterfaceBulkRenameForm(BulkRenameForm):\n pk = forms.ModelMultipleChoiceField(\n queryset=VMInterface.objects.all(),\n widget=forms.MultipleHiddenInput()\n )\n", "path": "netbox/virtualization/forms/bulk_edit.py"}], "after_files": [{"content": "from django import forms\nfrom django.utils.translation import gettext_lazy as _\n\nfrom dcim.choices import InterfaceModeChoices\nfrom dcim.constants import INTERFACE_MTU_MAX, INTERFACE_MTU_MIN\nfrom dcim.models import Device, DeviceRole, Platform, Region, Site, SiteGroup\nfrom extras.models import ConfigTemplate\nfrom ipam.models import VLAN, VLANGroup, VRF\nfrom netbox.forms import NetBoxModelBulkEditForm\nfrom tenancy.models import Tenant\nfrom utilities.forms import BulkRenameForm, add_blank_choice\nfrom utilities.forms.fields import CommentField, DynamicModelChoiceField, DynamicModelMultipleChoiceField\nfrom utilities.forms.widgets import BulkEditNullBooleanSelect\nfrom virtualization.choices import *\nfrom virtualization.models import *\n\n__all__ = (\n 'ClusterBulkEditForm',\n 'ClusterGroupBulkEditForm',\n 'ClusterTypeBulkEditForm',\n 'VirtualMachineBulkEditForm',\n 'VMInterfaceBulkEditForm',\n 'VMInterfaceBulkRenameForm',\n)\n\n\nclass ClusterTypeBulkEditForm(NetBoxModelBulkEditForm):\n description = forms.CharField(\n label=_('Description'),\n max_length=200,\n required=False\n )\n\n model = ClusterType\n fieldsets = (\n (None, ('description',)),\n )\n nullable_fields = ('description',)\n\n\nclass ClusterGroupBulkEditForm(NetBoxModelBulkEditForm):\n description = forms.CharField(\n label=_('Description'),\n max_length=200,\n required=False\n )\n\n model = ClusterGroup\n fieldsets = (\n (None, ('description',)),\n )\n nullable_fields = ('description',)\n\n\nclass ClusterBulkEditForm(NetBoxModelBulkEditForm):\n type = DynamicModelChoiceField(\n label=_('Type'),\n queryset=ClusterType.objects.all(),\n required=False\n )\n group = DynamicModelChoiceField(\n label=_('Group'),\n queryset=ClusterGroup.objects.all(),\n required=False\n )\n status = forms.ChoiceField(\n label=_('Status'),\n choices=add_blank_choice(ClusterStatusChoices),\n required=False,\n initial=''\n )\n tenant = DynamicModelChoiceField(\n label=_('Tenant'),\n queryset=Tenant.objects.all(),\n required=False\n )\n region = DynamicModelChoiceField(\n label=_('Region'),\n queryset=Region.objects.all(),\n required=False,\n )\n site_group = DynamicModelChoiceField(\n label=_('Site group'),\n queryset=SiteGroup.objects.all(),\n required=False,\n )\n site = DynamicModelChoiceField(\n label=_('Site'),\n queryset=Site.objects.all(),\n required=False,\n query_params={\n 'region_id': '$region',\n 'group_id': '$site_group',\n }\n )\n description = forms.CharField(\n label=_('Site'),\n max_length=200,\n required=False\n )\n comments = CommentField()\n\n model = Cluster\n fieldsets = (\n (None, ('type', 'group', 'status', 'tenant', 'description')),\n (_('Site'), ('region', 'site_group', 'site')),\n )\n nullable_fields = (\n 'group', 'site', 'tenant', 'description', 'comments',\n )\n\n\nclass VirtualMachineBulkEditForm(NetBoxModelBulkEditForm):\n status = forms.ChoiceField(\n label=_('Status'),\n choices=add_blank_choice(VirtualMachineStatusChoices),\n required=False,\n initial='',\n )\n site = DynamicModelChoiceField(\n label=_('Site'),\n queryset=Site.objects.all(),\n required=False\n )\n cluster = DynamicModelChoiceField(\n label=_('Cluster'),\n queryset=Cluster.objects.all(),\n required=False,\n query_params={\n 'site_id': '$site'\n }\n )\n device = DynamicModelChoiceField(\n label=_('Device'),\n queryset=Device.objects.all(),\n required=False,\n query_params={\n 'cluster_id': '$cluster'\n }\n )\n role = DynamicModelChoiceField(\n label=_('Role'),\n queryset=DeviceRole.objects.filter(\n vm_role=True\n ),\n required=False,\n query_params={\n \"vm_role\": \"True\"\n }\n )\n tenant = DynamicModelChoiceField(\n label=_('Tenant'),\n queryset=Tenant.objects.all(),\n required=False\n )\n platform = DynamicModelChoiceField(\n label=_('Platform'),\n queryset=Platform.objects.all(),\n required=False\n )\n vcpus = forms.IntegerField(\n required=False,\n label=_('vCPUs')\n )\n memory = forms.IntegerField(\n required=False,\n label=_('Memory (MB)')\n )\n disk = forms.IntegerField(\n required=False,\n label=_('Disk (GB)')\n )\n description = forms.CharField(\n label=_('Description'),\n max_length=200,\n required=False\n )\n config_template = DynamicModelChoiceField(\n queryset=ConfigTemplate.objects.all(),\n required=False\n )\n comments = CommentField()\n\n model = VirtualMachine\n fieldsets = (\n (None, ('site', 'cluster', 'device', 'status', 'role', 'tenant', 'platform', 'description')),\n (_('Resources'), ('vcpus', 'memory', 'disk')),\n ('Configuration', ('config_template',)),\n )\n nullable_fields = (\n 'site', 'cluster', 'device', 'role', 'tenant', 'platform', 'vcpus', 'memory', 'disk', 'description', 'comments',\n )\n\n\nclass VMInterfaceBulkEditForm(NetBoxModelBulkEditForm):\n virtual_machine = forms.ModelChoiceField(\n label=_('Virtual machine'),\n queryset=VirtualMachine.objects.all(),\n required=False,\n disabled=True,\n widget=forms.HiddenInput()\n )\n parent = DynamicModelChoiceField(\n label=_('Parent'),\n queryset=VMInterface.objects.all(),\n required=False\n )\n bridge = DynamicModelChoiceField(\n label=_('Bridge'),\n queryset=VMInterface.objects.all(),\n required=False\n )\n enabled = forms.NullBooleanField(\n label=_('Enabled'),\n required=False,\n widget=BulkEditNullBooleanSelect()\n )\n mtu = forms.IntegerField(\n required=False,\n min_value=INTERFACE_MTU_MIN,\n max_value=INTERFACE_MTU_MAX,\n label=_('MTU')\n )\n description = forms.CharField(\n label=_('Description'),\n max_length=100,\n required=False\n )\n mode = forms.ChoiceField(\n label=_('Mode'),\n choices=add_blank_choice(InterfaceModeChoices),\n required=False\n )\n vlan_group = DynamicModelChoiceField(\n queryset=VLANGroup.objects.all(),\n required=False,\n label=_('VLAN group')\n )\n untagged_vlan = DynamicModelChoiceField(\n queryset=VLAN.objects.all(),\n required=False,\n query_params={\n 'group_id': '$vlan_group',\n },\n label=_('Untagged VLAN')\n )\n tagged_vlans = DynamicModelMultipleChoiceField(\n queryset=VLAN.objects.all(),\n required=False,\n query_params={\n 'group_id': '$vlan_group',\n },\n label=_('Tagged VLANs')\n )\n vrf = DynamicModelChoiceField(\n queryset=VRF.objects.all(),\n required=False,\n label=_('VRF')\n )\n\n model = VMInterface\n fieldsets = (\n (None, ('mtu', 'enabled', 'vrf', 'description')),\n (_('Related Interfaces'), ('parent', 'bridge')),\n (_('802.1Q Switching'), ('mode', 'vlan_group', 'untagged_vlan', 'tagged_vlans')),\n )\n nullable_fields = (\n 'parent', 'bridge', 'mtu', 'vrf', 'description',\n )\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if 'virtual_machine' in self.initial:\n vm_id = self.initial.get('virtual_machine')\n\n # Restrict parent/bridge interface assignment by VM\n self.fields['parent'].widget.add_query_param('virtual_machine_id', vm_id)\n self.fields['bridge'].widget.add_query_param('virtual_machine_id', vm_id)\n\n # Limit VLAN choices by virtual machine\n self.fields['untagged_vlan'].widget.add_query_param('available_on_virtualmachine', vm_id)\n self.fields['tagged_vlans'].widget.add_query_param('available_on_virtualmachine', vm_id)\n\n else:\n # See 5643\n if 'pk' in self.initial:\n site = None\n interfaces = VMInterface.objects.filter(\n pk__in=self.initial['pk']\n ).prefetch_related(\n 'virtual_machine__site'\n )\n\n # Check interface sites. First interface should set site, further interfaces will either continue the\n # loop or reset back to no site and break the loop.\n for interface in interfaces:\n vm_site = interface.virtual_machine.site or interface.virtual_machine.cluster.site\n if site is None:\n site = vm_site\n elif vm_site is not site:\n site = None\n break\n\n if site is not None:\n self.fields['untagged_vlan'].widget.add_query_param('site_id', site.pk)\n self.fields['tagged_vlans'].widget.add_query_param('site_id', site.pk)\n\n self.fields['parent'].choices = ()\n self.fields['parent'].widget.attrs['disabled'] = True\n self.fields['bridge'].choices = ()\n self.fields['bridge'].widget.attrs['disabled'] = True\n\n\nclass VMInterfaceBulkRenameForm(BulkRenameForm):\n pk = forms.ModelMultipleChoiceField(\n queryset=VMInterface.objects.all(),\n widget=forms.MultipleHiddenInput()\n )\n", "path": "netbox/virtualization/forms/bulk_edit.py"}]}
3,403
175
gh_patches_debug_24120
rasdani/github-patches
git_diff
conan-io__conan-center-index-11233
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [request] kcov/40 ### Package Details * Package Name/Version: **kcov/40** * Changelog: **https://github.com/SimonKagstrom/kcov/blob/master/ChangeLog** Hello, Currently conan-center provides only 38 release, I would like to have latest release (40) also available. I'll provides a pull request. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `recipes/kcov/all/conanfile.py` Content: ``` 1 import os 2 from conans import ConanFile, CMake, tools 3 from conans.errors import ConanInvalidConfiguration 4 5 6 class KcovConan(ConanFile): 7 name = "kcov" 8 license = "GPL-2.0" 9 url = "https://github.com/conan-io/conan-center-index/" 10 homepage = "http://simonkagstrom.github.io/kcov/index.html" 11 description = "Code coverage tool for compiled programs, Python and Bash\ 12 which uses debugging information to collect and report data without\ 13 special compilation options" 14 topics = ("coverage", "linux", "debug") 15 settings = "os", "compiler", "build_type", "arch" 16 exports_sources = "CMakeLists.txt", "patches/**" 17 requires = ["zlib/1.2.11", 18 "libiberty/9.1.0", 19 "libcurl/7.64.1", 20 "elfutils/0.180"] 21 generators = "cmake" 22 _cmake = None 23 _source_subfolder = "source_subfolder" 24 _build_subfolder = "build_subfolder" 25 26 def configure(self): 27 if self.settings.os == "Windows": 28 raise ConanInvalidConfiguration( 29 "kcov can not be built on windows.") 30 31 def source(self): 32 tools.get(**self.conan_data["sources"][self.version]) 33 extracted_dir = self.name + "-" + self.version 34 os.rename(extracted_dir, self._source_subfolder) 35 36 def _patch_sources(self): 37 for patch in self.conan_data["patches"][self.version]: 38 tools.patch(**patch) 39 40 def _configure_cmake(self): 41 if self._cmake is not None: 42 return self._cmake 43 self._cmake = CMake(self) 44 self._cmake.configure(build_folder=self._build_subfolder) 45 return self._cmake 46 47 def build(self): 48 self._patch_sources() 49 cmake = self._configure_cmake() 50 cmake.build() 51 52 def package(self): 53 cmake = self._configure_cmake() 54 cmake.install() 55 tools.rmdir(os.path.join(self.package_folder, "share")) 56 self.copy("COPYING*", dst="licenses", src=self._source_subfolder) 57 58 def package_info(self): 59 bindir = os.path.join(self.package_folder, "bin") 60 self.output.info("Appending PATH environment variable: {}" 61 .format(bindir)) 62 self.env_info.PATH.append(bindir) 63 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/recipes/kcov/all/conanfile.py b/recipes/kcov/all/conanfile.py --- a/recipes/kcov/all/conanfile.py +++ b/recipes/kcov/all/conanfile.py @@ -1,8 +1,8 @@ import os -from conans import ConanFile, CMake, tools +from conan import ConanFile +from conans import CMake, tools from conans.errors import ConanInvalidConfiguration - class KcovConan(ConanFile): name = "kcov" license = "GPL-2.0" @@ -14,9 +14,9 @@ topics = ("coverage", "linux", "debug") settings = "os", "compiler", "build_type", "arch" exports_sources = "CMakeLists.txt", "patches/**" - requires = ["zlib/1.2.11", + requires = ["zlib/1.2.12", "libiberty/9.1.0", - "libcurl/7.64.1", + "libcurl/7.83.1", "elfutils/0.180"] generators = "cmake" _cmake = None @@ -60,3 +60,4 @@ self.output.info("Appending PATH environment variable: {}" .format(bindir)) self.env_info.PATH.append(bindir) + self.cpp_info.includedirs = []
{"golden_diff": "diff --git a/recipes/kcov/all/conanfile.py b/recipes/kcov/all/conanfile.py\n--- a/recipes/kcov/all/conanfile.py\n+++ b/recipes/kcov/all/conanfile.py\n@@ -1,8 +1,8 @@\n import os\n-from conans import ConanFile, CMake, tools\n+from conan import ConanFile\n+from conans import CMake, tools\n from conans.errors import ConanInvalidConfiguration\n \n-\n class KcovConan(ConanFile):\n name = \"kcov\"\n license = \"GPL-2.0\"\n@@ -14,9 +14,9 @@\n topics = (\"coverage\", \"linux\", \"debug\")\n settings = \"os\", \"compiler\", \"build_type\", \"arch\"\n exports_sources = \"CMakeLists.txt\", \"patches/**\"\n- requires = [\"zlib/1.2.11\",\n+ requires = [\"zlib/1.2.12\",\n \"libiberty/9.1.0\",\n- \"libcurl/7.64.1\",\n+ \"libcurl/7.83.1\",\n \"elfutils/0.180\"]\n generators = \"cmake\"\n _cmake = None\n@@ -60,3 +60,4 @@\n self.output.info(\"Appending PATH environment variable: {}\"\n .format(bindir))\n self.env_info.PATH.append(bindir)\n+ self.cpp_info.includedirs = []\n", "issue": "[request] kcov/40\n### Package Details\r\n * Package Name/Version: **kcov/40**\r\n * Changelog: **https://github.com/SimonKagstrom/kcov/blob/master/ChangeLog**\r\n\r\nHello,\r\n\r\nCurrently conan-center provides only 38 release, I would like to have latest release (40) also available.\r\nI'll provides a pull request.\r\n\n", "before_files": [{"content": "import os\nfrom conans import ConanFile, CMake, tools\nfrom conans.errors import ConanInvalidConfiguration\n\n\nclass KcovConan(ConanFile):\n name = \"kcov\"\n license = \"GPL-2.0\"\n url = \"https://github.com/conan-io/conan-center-index/\"\n homepage = \"http://simonkagstrom.github.io/kcov/index.html\"\n description = \"Code coverage tool for compiled programs, Python and Bash\\\n which uses debugging information to collect and report data without\\\n special compilation options\"\n topics = (\"coverage\", \"linux\", \"debug\")\n settings = \"os\", \"compiler\", \"build_type\", \"arch\"\n exports_sources = \"CMakeLists.txt\", \"patches/**\"\n requires = [\"zlib/1.2.11\",\n \"libiberty/9.1.0\",\n \"libcurl/7.64.1\",\n \"elfutils/0.180\"]\n generators = \"cmake\"\n _cmake = None\n _source_subfolder = \"source_subfolder\"\n _build_subfolder = \"build_subfolder\"\n\n def configure(self):\n if self.settings.os == \"Windows\":\n raise ConanInvalidConfiguration(\n \"kcov can not be built on windows.\")\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_dir = self.name + \"-\" + self.version\n os.rename(extracted_dir, self._source_subfolder)\n\n def _patch_sources(self):\n for patch in self.conan_data[\"patches\"][self.version]:\n tools.patch(**patch)\n\n def _configure_cmake(self):\n if self._cmake is not None:\n return self._cmake\n self._cmake = CMake(self)\n self._cmake.configure(build_folder=self._build_subfolder)\n return self._cmake\n\n def build(self):\n self._patch_sources()\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n cmake = self._configure_cmake()\n cmake.install()\n tools.rmdir(os.path.join(self.package_folder, \"share\"))\n self.copy(\"COPYING*\", dst=\"licenses\", src=self._source_subfolder)\n\n def package_info(self):\n bindir = os.path.join(self.package_folder, \"bin\")\n self.output.info(\"Appending PATH environment variable: {}\"\n .format(bindir))\n self.env_info.PATH.append(bindir)\n", "path": "recipes/kcov/all/conanfile.py"}], "after_files": [{"content": "import os\nfrom conan import ConanFile\nfrom conans import CMake, tools\nfrom conans.errors import ConanInvalidConfiguration\n\nclass KcovConan(ConanFile):\n name = \"kcov\"\n license = \"GPL-2.0\"\n url = \"https://github.com/conan-io/conan-center-index/\"\n homepage = \"http://simonkagstrom.github.io/kcov/index.html\"\n description = \"Code coverage tool for compiled programs, Python and Bash\\\n which uses debugging information to collect and report data without\\\n special compilation options\"\n topics = (\"coverage\", \"linux\", \"debug\")\n settings = \"os\", \"compiler\", \"build_type\", \"arch\"\n exports_sources = \"CMakeLists.txt\", \"patches/**\"\n requires = [\"zlib/1.2.12\",\n \"libiberty/9.1.0\",\n \"libcurl/7.83.1\",\n \"elfutils/0.180\"]\n generators = \"cmake\"\n _cmake = None\n _source_subfolder = \"source_subfolder\"\n _build_subfolder = \"build_subfolder\"\n\n def configure(self):\n if self.settings.os == \"Windows\":\n raise ConanInvalidConfiguration(\n \"kcov can not be built on windows.\")\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_dir = self.name + \"-\" + self.version\n os.rename(extracted_dir, self._source_subfolder)\n\n def _patch_sources(self):\n for patch in self.conan_data[\"patches\"][self.version]:\n tools.patch(**patch)\n\n def _configure_cmake(self):\n if self._cmake is not None:\n return self._cmake\n self._cmake = CMake(self)\n self._cmake.configure(build_folder=self._build_subfolder)\n return self._cmake\n\n def build(self):\n self._patch_sources()\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n cmake = self._configure_cmake()\n cmake.install()\n tools.rmdir(os.path.join(self.package_folder, \"share\"))\n self.copy(\"COPYING*\", dst=\"licenses\", src=self._source_subfolder)\n\n def package_info(self):\n bindir = os.path.join(self.package_folder, \"bin\")\n self.output.info(\"Appending PATH environment variable: {}\"\n .format(bindir))\n self.env_info.PATH.append(bindir)\n self.cpp_info.includedirs = []\n", "path": "recipes/kcov/all/conanfile.py"}]}
999
325
gh_patches_debug_33677
rasdani/github-patches
git_diff
kedro-org__kedro-3300
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Revisit: Make KedroContext a dataclass and add `config_loader` as a property # Description As `KedroSession` now control the lifecycle of Kedro's run, `KedroContext` act like a container and it stores important attributes. Since we now dropped Python 3.6 support, we can make use of Python's `dataclass` to further tidy up `KedroContext`'s constructor code. todo: - [x] Make `KedroContext` a dataclass(https://github.com/kedro-org/kedro/pull/1465) - [x] Add `config_loader` as @property instead of relying the private `context._get_config_loader()` https://github.com/kedro-org/kedro/pull/1505 - [x] Update corresponding document in Starter/Kedro --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `kedro/framework/context/context.py` Content: ``` 1 """This module provides context for Kedro project.""" 2 from __future__ import annotations 3 4 import logging 5 from copy import deepcopy 6 from pathlib import Path, PurePosixPath, PureWindowsPath 7 from typing import Any 8 from urllib.parse import urlparse 9 from warnings import warn 10 11 from attrs import field, frozen 12 from omegaconf import OmegaConf 13 from pluggy import PluginManager 14 15 from kedro.config import AbstractConfigLoader, MissingConfigException 16 from kedro.framework.project import settings 17 from kedro.io import DataCatalog 18 from kedro.pipeline.pipeline import _transcode_split 19 20 21 def _is_relative_path(path_string: str) -> bool: 22 """Checks whether a path string is a relative path. 23 24 Example: 25 :: 26 >>> _is_relative_path("data/01_raw") == True 27 >>> _is_relative_path("info.log") == True 28 >>> _is_relative_path("/tmp/data/01_raw") == False 29 >>> _is_relative_path(r"C:\\info.log") == False 30 >>> _is_relative_path(r"\\'info.log") == False 31 >>> _is_relative_path("c:/info.log") == False 32 >>> _is_relative_path("s3://info.log") == False 33 34 Args: 35 path_string: The path string to check. 36 Returns: 37 Whether the string is a relative path. 38 """ 39 # os.path.splitdrive does not reliably work on non-Windows systems 40 # breaking the coverage, using PureWindowsPath instead 41 is_full_windows_path_with_drive = bool(PureWindowsPath(path_string).drive) 42 if is_full_windows_path_with_drive: 43 return False 44 45 is_remote_path = bool(urlparse(path_string).scheme) 46 if is_remote_path: 47 return False 48 49 is_absolute_path = PurePosixPath(path_string).is_absolute() 50 if is_absolute_path: 51 return False 52 53 return True 54 55 56 def _convert_paths_to_absolute_posix( 57 project_path: Path, conf_dictionary: dict[str, Any] 58 ) -> dict[str, Any]: 59 """Turn all relative paths inside ``conf_dictionary`` into absolute paths by appending them 60 to ``project_path`` and convert absolute Windows paths to POSIX format. This is a hack to 61 make sure that we don't have to change user's working directory for logging and datasets to 62 work. It is important for non-standard workflows such as IPython notebook where users don't go 63 through `kedro run` or `__main__.py` entrypoints. 64 65 Example: 66 :: 67 >>> conf = _convert_paths_to_absolute_posix( 68 >>> project_path=Path("/path/to/my/project"), 69 >>> conf_dictionary={ 70 >>> "handlers": { 71 >>> "info_file_handler": { 72 >>> "filename": "info.log" 73 >>> } 74 >>> } 75 >>> } 76 >>> ) 77 >>> print(conf['handlers']['info_file_handler']['filename']) 78 "/path/to/my/project/info.log" 79 80 Args: 81 project_path: The root directory to prepend to relative path to make absolute path. 82 conf_dictionary: The configuration containing paths to expand. 83 Returns: 84 A dictionary containing only absolute paths. 85 Raises: 86 ValueError: If the provided ``project_path`` is not an absolute path. 87 """ 88 if not project_path.is_absolute(): 89 raise ValueError( 90 f"project_path must be an absolute path. Received: {project_path}" 91 ) 92 93 # only check a few conf keys that are known to specify a path string as value 94 conf_keys_with_filepath = ("filename", "filepath", "path") 95 96 for conf_key, conf_value in conf_dictionary.items(): 97 # if the conf_value is another dictionary, absolutify its paths first. 98 if isinstance(conf_value, dict): 99 conf_dictionary[conf_key] = _convert_paths_to_absolute_posix( 100 project_path, conf_value 101 ) 102 continue 103 104 # if the conf_value is not a dictionary nor a string, skip 105 if not isinstance(conf_value, str): 106 continue 107 108 # if the conf_value is a string but the conf_key isn't one associated with filepath, skip 109 if conf_key not in conf_keys_with_filepath: 110 continue 111 112 if _is_relative_path(conf_value): 113 # Absolute local path should be in POSIX format 114 conf_value_absolute_path = (project_path / conf_value).as_posix() 115 conf_dictionary[conf_key] = conf_value_absolute_path 116 elif PureWindowsPath(conf_value).drive: 117 # Convert absolute Windows path to POSIX format 118 conf_dictionary[conf_key] = PureWindowsPath(conf_value).as_posix() 119 120 return conf_dictionary 121 122 123 def _validate_transcoded_datasets(catalog: DataCatalog): 124 """Validates transcoded datasets are correctly named 125 126 Args: 127 catalog (DataCatalog): The catalog object containing the 128 datasets to be validated. 129 130 Raises: 131 ValueError: If a dataset name does not conform to the expected 132 transcoding naming conventions,a ValueError is raised by the 133 `_transcode_split` function. 134 135 """ 136 # noqa: protected-access 137 for dataset_name in catalog._datasets.keys(): 138 _transcode_split(dataset_name) 139 140 141 def _expand_full_path(project_path: str | Path) -> Path: 142 return Path(project_path).expanduser().resolve() 143 144 145 @frozen 146 class KedroContext: 147 """``KedroContext`` is the base class which holds the configuration and 148 Kedro's main functionality. 149 """ 150 151 _package_name: str 152 project_path: Path = field(converter=_expand_full_path) 153 config_loader: AbstractConfigLoader 154 _hook_manager: PluginManager 155 env: str | None = None 156 _extra_params: dict[str, Any] | None = field(default=None, converter=deepcopy) 157 158 @property 159 def catalog(self) -> DataCatalog: 160 """Read-only property referring to Kedro's ``DataCatalog`` for this context. 161 162 Returns: 163 DataCatalog defined in `catalog.yml`. 164 Raises: 165 KedroContextError: Incorrect ``DataCatalog`` registered for the project. 166 167 """ 168 return self._get_catalog() 169 170 @property 171 def params(self) -> dict[str, Any]: 172 """Read-only property referring to Kedro's parameters for this context. 173 174 Returns: 175 Parameters defined in `parameters.yml` with the addition of any 176 extra parameters passed at initialization. 177 """ 178 try: 179 params = self.config_loader["parameters"] 180 except MissingConfigException as exc: 181 warn(f"Parameters not found in your Kedro project config.\n{str(exc)}") 182 params = {} 183 184 if self._extra_params: 185 # Merge nested structures 186 params = OmegaConf.merge(params, self._extra_params) 187 188 return OmegaConf.to_container(params) if OmegaConf.is_config(params) else params 189 190 def _get_catalog( 191 self, 192 save_version: str = None, 193 load_versions: dict[str, str] = None, 194 ) -> DataCatalog: 195 """A hook for changing the creation of a DataCatalog instance. 196 197 Returns: 198 DataCatalog defined in `catalog.yml`. 199 Raises: 200 KedroContextError: Incorrect ``DataCatalog`` registered for the project. 201 202 """ 203 # '**/catalog*' reads modular pipeline configs 204 conf_catalog = self.config_loader["catalog"] 205 # turn relative paths in conf_catalog into absolute paths 206 # before initializing the catalog 207 conf_catalog = _convert_paths_to_absolute_posix( 208 project_path=self.project_path, conf_dictionary=conf_catalog 209 ) 210 conf_creds = self._get_config_credentials() 211 212 catalog = settings.DATA_CATALOG_CLASS.from_config( 213 catalog=conf_catalog, 214 credentials=conf_creds, 215 load_versions=load_versions, 216 save_version=save_version, 217 ) 218 219 feed_dict = self._get_feed_dict() 220 catalog.add_feed_dict(feed_dict) 221 _validate_transcoded_datasets(catalog) 222 self._hook_manager.hook.after_catalog_created( 223 catalog=catalog, 224 conf_catalog=conf_catalog, 225 conf_creds=conf_creds, 226 feed_dict=feed_dict, 227 save_version=save_version, 228 load_versions=load_versions, 229 ) 230 return catalog 231 232 def _get_feed_dict(self) -> dict[str, Any]: 233 """Get parameters and return the feed dictionary.""" 234 params = self.params 235 feed_dict = {"parameters": params} 236 237 def _add_param_to_feed_dict(param_name, param_value): 238 """This recursively adds parameter paths to the `feed_dict`, 239 whenever `param_value` is a dictionary itself, so that users can 240 specify specific nested parameters in their node inputs. 241 242 Example: 243 244 >>> param_name = "a" 245 >>> param_value = {"b": 1} 246 >>> _add_param_to_feed_dict(param_name, param_value) 247 >>> assert feed_dict["params:a"] == {"b": 1} 248 >>> assert feed_dict["params:a.b"] == 1 249 """ 250 key = f"params:{param_name}" 251 feed_dict[key] = param_value 252 if isinstance(param_value, dict): 253 for key, val in param_value.items(): 254 _add_param_to_feed_dict(f"{param_name}.{key}", val) 255 256 for param_name, param_value in params.items(): 257 _add_param_to_feed_dict(param_name, param_value) 258 259 return feed_dict 260 261 def _get_config_credentials(self) -> dict[str, Any]: 262 """Getter for credentials specified in credentials directory.""" 263 try: 264 conf_creds = self.config_loader["credentials"] 265 except MissingConfigException as exc: 266 logging.getLogger(__name__).debug( 267 "Credentials not found in your Kedro project config.\n %s", str(exc) 268 ) 269 conf_creds = {} 270 return conf_creds 271 272 273 class KedroContextError(Exception): 274 """Error occurred when loading project and running context pipeline.""" 275 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/kedro/framework/context/context.py b/kedro/framework/context/context.py --- a/kedro/framework/context/context.py +++ b/kedro/framework/context/context.py @@ -8,7 +8,7 @@ from urllib.parse import urlparse from warnings import warn -from attrs import field, frozen +from attrs import define, field from omegaconf import OmegaConf from pluggy import PluginManager @@ -142,18 +142,38 @@ return Path(project_path).expanduser().resolve() -@frozen +@define(slots=False) # Enable setting new attributes to `KedroContext` class KedroContext: """``KedroContext`` is the base class which holds the configuration and Kedro's main functionality. + + Create a context object by providing the root of a Kedro project and + the environment configuration subfolders (see ``kedro.config.OmegaConfigLoader``) + Raises: + KedroContextError: If there is a mismatch + between Kedro project version and package version. + Args: + project_path: Project path to define the context for. + config_loader: Kedro's ``OmegaConfigLoader`` for loading the configuration files. + env: Optional argument for configuration default environment to be used + for running the pipeline. If not specified, it defaults to "local". + package_name: Package name for the Kedro project the context is + created for. + hook_manager: The ``PluginManager`` to activate hooks, supplied by the session. + extra_params: Optional dictionary containing extra project parameters. + If specified, will update (and therefore take precedence over) + the parameters retrieved from the project configuration. + """ - _package_name: str - project_path: Path = field(converter=_expand_full_path) - config_loader: AbstractConfigLoader - _hook_manager: PluginManager - env: str | None = None - _extra_params: dict[str, Any] | None = field(default=None, converter=deepcopy) + project_path: Path = field(init=True, converter=_expand_full_path) + config_loader: AbstractConfigLoader = field(init=True) + env: str | None = field(init=True) + _package_name: str = field(init=True) + _hook_manager: PluginManager = field(init=True) + _extra_params: dict[str, Any] | None = field( + init=True, default=None, converter=deepcopy + ) @property def catalog(self) -> DataCatalog:
{"golden_diff": "diff --git a/kedro/framework/context/context.py b/kedro/framework/context/context.py\n--- a/kedro/framework/context/context.py\n+++ b/kedro/framework/context/context.py\n@@ -8,7 +8,7 @@\n from urllib.parse import urlparse\n from warnings import warn\n \n-from attrs import field, frozen\n+from attrs import define, field\n from omegaconf import OmegaConf\n from pluggy import PluginManager\n \n@@ -142,18 +142,38 @@\n return Path(project_path).expanduser().resolve()\n \n \n-@frozen\n+@define(slots=False) # Enable setting new attributes to `KedroContext`\n class KedroContext:\n \"\"\"``KedroContext`` is the base class which holds the configuration and\n Kedro's main functionality.\n+\n+ Create a context object by providing the root of a Kedro project and\n+ the environment configuration subfolders (see ``kedro.config.OmegaConfigLoader``)\n+ Raises:\n+ KedroContextError: If there is a mismatch\n+ between Kedro project version and package version.\n+ Args:\n+ project_path: Project path to define the context for.\n+ config_loader: Kedro's ``OmegaConfigLoader`` for loading the configuration files.\n+ env: Optional argument for configuration default environment to be used\n+ for running the pipeline. If not specified, it defaults to \"local\".\n+ package_name: Package name for the Kedro project the context is\n+ created for.\n+ hook_manager: The ``PluginManager`` to activate hooks, supplied by the session.\n+ extra_params: Optional dictionary containing extra project parameters.\n+ If specified, will update (and therefore take precedence over)\n+ the parameters retrieved from the project configuration.\n+\n \"\"\"\n \n- _package_name: str\n- project_path: Path = field(converter=_expand_full_path)\n- config_loader: AbstractConfigLoader\n- _hook_manager: PluginManager\n- env: str | None = None\n- _extra_params: dict[str, Any] | None = field(default=None, converter=deepcopy)\n+ project_path: Path = field(init=True, converter=_expand_full_path)\n+ config_loader: AbstractConfigLoader = field(init=True)\n+ env: str | None = field(init=True)\n+ _package_name: str = field(init=True)\n+ _hook_manager: PluginManager = field(init=True)\n+ _extra_params: dict[str, Any] | None = field(\n+ init=True, default=None, converter=deepcopy\n+ )\n \n @property\n def catalog(self) -> DataCatalog:\n", "issue": "Revisit: Make KedroContext a dataclass and add `config_loader` as a property\n# Description\r\nAs `KedroSession` now control the lifecycle of Kedro's run, `KedroContext` act like a container and it stores important attributes. Since we now dropped Python 3.6 support, we can make use of Python's `dataclass` to further tidy up `KedroContext`'s constructor code.\r\n\r\ntodo:\r\n- [x] Make `KedroContext` a dataclass(https://github.com/kedro-org/kedro/pull/1465)\r\n- [x] Add `config_loader` as @property instead of relying the private `context._get_config_loader()` https://github.com/kedro-org/kedro/pull/1505\r\n- [x] Update corresponding document in Starter/Kedro\r\n\n", "before_files": [{"content": "\"\"\"This module provides context for Kedro project.\"\"\"\nfrom __future__ import annotations\n\nimport logging\nfrom copy import deepcopy\nfrom pathlib import Path, PurePosixPath, PureWindowsPath\nfrom typing import Any\nfrom urllib.parse import urlparse\nfrom warnings import warn\n\nfrom attrs import field, frozen\nfrom omegaconf import OmegaConf\nfrom pluggy import PluginManager\n\nfrom kedro.config import AbstractConfigLoader, MissingConfigException\nfrom kedro.framework.project import settings\nfrom kedro.io import DataCatalog\nfrom kedro.pipeline.pipeline import _transcode_split\n\n\ndef _is_relative_path(path_string: str) -> bool:\n \"\"\"Checks whether a path string is a relative path.\n\n Example:\n ::\n >>> _is_relative_path(\"data/01_raw\") == True\n >>> _is_relative_path(\"info.log\") == True\n >>> _is_relative_path(\"/tmp/data/01_raw\") == False\n >>> _is_relative_path(r\"C:\\\\info.log\") == False\n >>> _is_relative_path(r\"\\\\'info.log\") == False\n >>> _is_relative_path(\"c:/info.log\") == False\n >>> _is_relative_path(\"s3://info.log\") == False\n\n Args:\n path_string: The path string to check.\n Returns:\n Whether the string is a relative path.\n \"\"\"\n # os.path.splitdrive does not reliably work on non-Windows systems\n # breaking the coverage, using PureWindowsPath instead\n is_full_windows_path_with_drive = bool(PureWindowsPath(path_string).drive)\n if is_full_windows_path_with_drive:\n return False\n\n is_remote_path = bool(urlparse(path_string).scheme)\n if is_remote_path:\n return False\n\n is_absolute_path = PurePosixPath(path_string).is_absolute()\n if is_absolute_path:\n return False\n\n return True\n\n\ndef _convert_paths_to_absolute_posix(\n project_path: Path, conf_dictionary: dict[str, Any]\n) -> dict[str, Any]:\n \"\"\"Turn all relative paths inside ``conf_dictionary`` into absolute paths by appending them\n to ``project_path`` and convert absolute Windows paths to POSIX format. This is a hack to\n make sure that we don't have to change user's working directory for logging and datasets to\n work. It is important for non-standard workflows such as IPython notebook where users don't go\n through `kedro run` or `__main__.py` entrypoints.\n\n Example:\n ::\n >>> conf = _convert_paths_to_absolute_posix(\n >>> project_path=Path(\"/path/to/my/project\"),\n >>> conf_dictionary={\n >>> \"handlers\": {\n >>> \"info_file_handler\": {\n >>> \"filename\": \"info.log\"\n >>> }\n >>> }\n >>> }\n >>> )\n >>> print(conf['handlers']['info_file_handler']['filename'])\n \"/path/to/my/project/info.log\"\n\n Args:\n project_path: The root directory to prepend to relative path to make absolute path.\n conf_dictionary: The configuration containing paths to expand.\n Returns:\n A dictionary containing only absolute paths.\n Raises:\n ValueError: If the provided ``project_path`` is not an absolute path.\n \"\"\"\n if not project_path.is_absolute():\n raise ValueError(\n f\"project_path must be an absolute path. Received: {project_path}\"\n )\n\n # only check a few conf keys that are known to specify a path string as value\n conf_keys_with_filepath = (\"filename\", \"filepath\", \"path\")\n\n for conf_key, conf_value in conf_dictionary.items():\n # if the conf_value is another dictionary, absolutify its paths first.\n if isinstance(conf_value, dict):\n conf_dictionary[conf_key] = _convert_paths_to_absolute_posix(\n project_path, conf_value\n )\n continue\n\n # if the conf_value is not a dictionary nor a string, skip\n if not isinstance(conf_value, str):\n continue\n\n # if the conf_value is a string but the conf_key isn't one associated with filepath, skip\n if conf_key not in conf_keys_with_filepath:\n continue\n\n if _is_relative_path(conf_value):\n # Absolute local path should be in POSIX format\n conf_value_absolute_path = (project_path / conf_value).as_posix()\n conf_dictionary[conf_key] = conf_value_absolute_path\n elif PureWindowsPath(conf_value).drive:\n # Convert absolute Windows path to POSIX format\n conf_dictionary[conf_key] = PureWindowsPath(conf_value).as_posix()\n\n return conf_dictionary\n\n\ndef _validate_transcoded_datasets(catalog: DataCatalog):\n \"\"\"Validates transcoded datasets are correctly named\n\n Args:\n catalog (DataCatalog): The catalog object containing the\n datasets to be validated.\n\n Raises:\n ValueError: If a dataset name does not conform to the expected\n transcoding naming conventions,a ValueError is raised by the\n `_transcode_split` function.\n\n \"\"\"\n # noqa: protected-access\n for dataset_name in catalog._datasets.keys():\n _transcode_split(dataset_name)\n\n\ndef _expand_full_path(project_path: str | Path) -> Path:\n return Path(project_path).expanduser().resolve()\n\n\n@frozen\nclass KedroContext:\n \"\"\"``KedroContext`` is the base class which holds the configuration and\n Kedro's main functionality.\n \"\"\"\n\n _package_name: str\n project_path: Path = field(converter=_expand_full_path)\n config_loader: AbstractConfigLoader\n _hook_manager: PluginManager\n env: str | None = None\n _extra_params: dict[str, Any] | None = field(default=None, converter=deepcopy)\n\n @property\n def catalog(self) -> DataCatalog:\n \"\"\"Read-only property referring to Kedro's ``DataCatalog`` for this context.\n\n Returns:\n DataCatalog defined in `catalog.yml`.\n Raises:\n KedroContextError: Incorrect ``DataCatalog`` registered for the project.\n\n \"\"\"\n return self._get_catalog()\n\n @property\n def params(self) -> dict[str, Any]:\n \"\"\"Read-only property referring to Kedro's parameters for this context.\n\n Returns:\n Parameters defined in `parameters.yml` with the addition of any\n extra parameters passed at initialization.\n \"\"\"\n try:\n params = self.config_loader[\"parameters\"]\n except MissingConfigException as exc:\n warn(f\"Parameters not found in your Kedro project config.\\n{str(exc)}\")\n params = {}\n\n if self._extra_params:\n # Merge nested structures\n params = OmegaConf.merge(params, self._extra_params)\n\n return OmegaConf.to_container(params) if OmegaConf.is_config(params) else params\n\n def _get_catalog(\n self,\n save_version: str = None,\n load_versions: dict[str, str] = None,\n ) -> DataCatalog:\n \"\"\"A hook for changing the creation of a DataCatalog instance.\n\n Returns:\n DataCatalog defined in `catalog.yml`.\n Raises:\n KedroContextError: Incorrect ``DataCatalog`` registered for the project.\n\n \"\"\"\n # '**/catalog*' reads modular pipeline configs\n conf_catalog = self.config_loader[\"catalog\"]\n # turn relative paths in conf_catalog into absolute paths\n # before initializing the catalog\n conf_catalog = _convert_paths_to_absolute_posix(\n project_path=self.project_path, conf_dictionary=conf_catalog\n )\n conf_creds = self._get_config_credentials()\n\n catalog = settings.DATA_CATALOG_CLASS.from_config(\n catalog=conf_catalog,\n credentials=conf_creds,\n load_versions=load_versions,\n save_version=save_version,\n )\n\n feed_dict = self._get_feed_dict()\n catalog.add_feed_dict(feed_dict)\n _validate_transcoded_datasets(catalog)\n self._hook_manager.hook.after_catalog_created(\n catalog=catalog,\n conf_catalog=conf_catalog,\n conf_creds=conf_creds,\n feed_dict=feed_dict,\n save_version=save_version,\n load_versions=load_versions,\n )\n return catalog\n\n def _get_feed_dict(self) -> dict[str, Any]:\n \"\"\"Get parameters and return the feed dictionary.\"\"\"\n params = self.params\n feed_dict = {\"parameters\": params}\n\n def _add_param_to_feed_dict(param_name, param_value):\n \"\"\"This recursively adds parameter paths to the `feed_dict`,\n whenever `param_value` is a dictionary itself, so that users can\n specify specific nested parameters in their node inputs.\n\n Example:\n\n >>> param_name = \"a\"\n >>> param_value = {\"b\": 1}\n >>> _add_param_to_feed_dict(param_name, param_value)\n >>> assert feed_dict[\"params:a\"] == {\"b\": 1}\n >>> assert feed_dict[\"params:a.b\"] == 1\n \"\"\"\n key = f\"params:{param_name}\"\n feed_dict[key] = param_value\n if isinstance(param_value, dict):\n for key, val in param_value.items():\n _add_param_to_feed_dict(f\"{param_name}.{key}\", val)\n\n for param_name, param_value in params.items():\n _add_param_to_feed_dict(param_name, param_value)\n\n return feed_dict\n\n def _get_config_credentials(self) -> dict[str, Any]:\n \"\"\"Getter for credentials specified in credentials directory.\"\"\"\n try:\n conf_creds = self.config_loader[\"credentials\"]\n except MissingConfigException as exc:\n logging.getLogger(__name__).debug(\n \"Credentials not found in your Kedro project config.\\n %s\", str(exc)\n )\n conf_creds = {}\n return conf_creds\n\n\nclass KedroContextError(Exception):\n \"\"\"Error occurred when loading project and running context pipeline.\"\"\"\n", "path": "kedro/framework/context/context.py"}], "after_files": [{"content": "\"\"\"This module provides context for Kedro project.\"\"\"\nfrom __future__ import annotations\n\nimport logging\nfrom copy import deepcopy\nfrom pathlib import Path, PurePosixPath, PureWindowsPath\nfrom typing import Any\nfrom urllib.parse import urlparse\nfrom warnings import warn\n\nfrom attrs import define, field\nfrom omegaconf import OmegaConf\nfrom pluggy import PluginManager\n\nfrom kedro.config import AbstractConfigLoader, MissingConfigException\nfrom kedro.framework.project import settings\nfrom kedro.io import DataCatalog\nfrom kedro.pipeline.pipeline import _transcode_split\n\n\ndef _is_relative_path(path_string: str) -> bool:\n \"\"\"Checks whether a path string is a relative path.\n\n Example:\n ::\n >>> _is_relative_path(\"data/01_raw\") == True\n >>> _is_relative_path(\"info.log\") == True\n >>> _is_relative_path(\"/tmp/data/01_raw\") == False\n >>> _is_relative_path(r\"C:\\\\info.log\") == False\n >>> _is_relative_path(r\"\\\\'info.log\") == False\n >>> _is_relative_path(\"c:/info.log\") == False\n >>> _is_relative_path(\"s3://info.log\") == False\n\n Args:\n path_string: The path string to check.\n Returns:\n Whether the string is a relative path.\n \"\"\"\n # os.path.splitdrive does not reliably work on non-Windows systems\n # breaking the coverage, using PureWindowsPath instead\n is_full_windows_path_with_drive = bool(PureWindowsPath(path_string).drive)\n if is_full_windows_path_with_drive:\n return False\n\n is_remote_path = bool(urlparse(path_string).scheme)\n if is_remote_path:\n return False\n\n is_absolute_path = PurePosixPath(path_string).is_absolute()\n if is_absolute_path:\n return False\n\n return True\n\n\ndef _convert_paths_to_absolute_posix(\n project_path: Path, conf_dictionary: dict[str, Any]\n) -> dict[str, Any]:\n \"\"\"Turn all relative paths inside ``conf_dictionary`` into absolute paths by appending them\n to ``project_path`` and convert absolute Windows paths to POSIX format. This is a hack to\n make sure that we don't have to change user's working directory for logging and datasets to\n work. It is important for non-standard workflows such as IPython notebook where users don't go\n through `kedro run` or `__main__.py` entrypoints.\n\n Example:\n ::\n >>> conf = _convert_paths_to_absolute_posix(\n >>> project_path=Path(\"/path/to/my/project\"),\n >>> conf_dictionary={\n >>> \"handlers\": {\n >>> \"info_file_handler\": {\n >>> \"filename\": \"info.log\"\n >>> }\n >>> }\n >>> }\n >>> )\n >>> print(conf['handlers']['info_file_handler']['filename'])\n \"/path/to/my/project/info.log\"\n\n Args:\n project_path: The root directory to prepend to relative path to make absolute path.\n conf_dictionary: The configuration containing paths to expand.\n Returns:\n A dictionary containing only absolute paths.\n Raises:\n ValueError: If the provided ``project_path`` is not an absolute path.\n \"\"\"\n if not project_path.is_absolute():\n raise ValueError(\n f\"project_path must be an absolute path. Received: {project_path}\"\n )\n\n # only check a few conf keys that are known to specify a path string as value\n conf_keys_with_filepath = (\"filename\", \"filepath\", \"path\")\n\n for conf_key, conf_value in conf_dictionary.items():\n # if the conf_value is another dictionary, absolutify its paths first.\n if isinstance(conf_value, dict):\n conf_dictionary[conf_key] = _convert_paths_to_absolute_posix(\n project_path, conf_value\n )\n continue\n\n # if the conf_value is not a dictionary nor a string, skip\n if not isinstance(conf_value, str):\n continue\n\n # if the conf_value is a string but the conf_key isn't one associated with filepath, skip\n if conf_key not in conf_keys_with_filepath:\n continue\n\n if _is_relative_path(conf_value):\n # Absolute local path should be in POSIX format\n conf_value_absolute_path = (project_path / conf_value).as_posix()\n conf_dictionary[conf_key] = conf_value_absolute_path\n elif PureWindowsPath(conf_value).drive:\n # Convert absolute Windows path to POSIX format\n conf_dictionary[conf_key] = PureWindowsPath(conf_value).as_posix()\n\n return conf_dictionary\n\n\ndef _validate_transcoded_datasets(catalog: DataCatalog):\n \"\"\"Validates transcoded datasets are correctly named\n\n Args:\n catalog (DataCatalog): The catalog object containing the\n datasets to be validated.\n\n Raises:\n ValueError: If a dataset name does not conform to the expected\n transcoding naming conventions,a ValueError is raised by the\n `_transcode_split` function.\n\n \"\"\"\n # noqa: protected-access\n for dataset_name in catalog._datasets.keys():\n _transcode_split(dataset_name)\n\n\ndef _expand_full_path(project_path: str | Path) -> Path:\n return Path(project_path).expanduser().resolve()\n\n\n@define(slots=False) # Enable setting new attributes to `KedroContext`\nclass KedroContext:\n \"\"\"``KedroContext`` is the base class which holds the configuration and\n Kedro's main functionality.\n\n Create a context object by providing the root of a Kedro project and\n the environment configuration subfolders (see ``kedro.config.OmegaConfigLoader``)\n Raises:\n KedroContextError: If there is a mismatch\n between Kedro project version and package version.\n Args:\n project_path: Project path to define the context for.\n config_loader: Kedro's ``OmegaConfigLoader`` for loading the configuration files.\n env: Optional argument for configuration default environment to be used\n for running the pipeline. If not specified, it defaults to \"local\".\n package_name: Package name for the Kedro project the context is\n created for.\n hook_manager: The ``PluginManager`` to activate hooks, supplied by the session.\n extra_params: Optional dictionary containing extra project parameters.\n If specified, will update (and therefore take precedence over)\n the parameters retrieved from the project configuration.\n\n \"\"\"\n\n project_path: Path = field(init=True, converter=_expand_full_path)\n config_loader: AbstractConfigLoader = field(init=True)\n env: str | None = field(init=True)\n _package_name: str = field(init=True)\n _hook_manager: PluginManager = field(init=True)\n _extra_params: dict[str, Any] | None = field(\n init=True, default=None, converter=deepcopy\n )\n\n @property\n def catalog(self) -> DataCatalog:\n \"\"\"Read-only property referring to Kedro's ``DataCatalog`` for this context.\n\n Returns:\n DataCatalog defined in `catalog.yml`.\n Raises:\n KedroContextError: Incorrect ``DataCatalog`` registered for the project.\n\n \"\"\"\n return self._get_catalog()\n\n @property\n def params(self) -> dict[str, Any]:\n \"\"\"Read-only property referring to Kedro's parameters for this context.\n\n Returns:\n Parameters defined in `parameters.yml` with the addition of any\n extra parameters passed at initialization.\n \"\"\"\n try:\n params = self.config_loader[\"parameters\"]\n except MissingConfigException as exc:\n warn(f\"Parameters not found in your Kedro project config.\\n{str(exc)}\")\n params = {}\n\n if self._extra_params:\n # Merge nested structures\n params = OmegaConf.merge(params, self._extra_params)\n\n return OmegaConf.to_container(params) if OmegaConf.is_config(params) else params\n\n def _get_catalog(\n self,\n save_version: str = None,\n load_versions: dict[str, str] = None,\n ) -> DataCatalog:\n \"\"\"A hook for changing the creation of a DataCatalog instance.\n\n Returns:\n DataCatalog defined in `catalog.yml`.\n Raises:\n KedroContextError: Incorrect ``DataCatalog`` registered for the project.\n\n \"\"\"\n # '**/catalog*' reads modular pipeline configs\n conf_catalog = self.config_loader[\"catalog\"]\n # turn relative paths in conf_catalog into absolute paths\n # before initializing the catalog\n conf_catalog = _convert_paths_to_absolute_posix(\n project_path=self.project_path, conf_dictionary=conf_catalog\n )\n conf_creds = self._get_config_credentials()\n\n catalog = settings.DATA_CATALOG_CLASS.from_config(\n catalog=conf_catalog,\n credentials=conf_creds,\n load_versions=load_versions,\n save_version=save_version,\n )\n\n feed_dict = self._get_feed_dict()\n catalog.add_feed_dict(feed_dict)\n _validate_transcoded_datasets(catalog)\n self._hook_manager.hook.after_catalog_created(\n catalog=catalog,\n conf_catalog=conf_catalog,\n conf_creds=conf_creds,\n feed_dict=feed_dict,\n save_version=save_version,\n load_versions=load_versions,\n )\n return catalog\n\n def _get_feed_dict(self) -> dict[str, Any]:\n \"\"\"Get parameters and return the feed dictionary.\"\"\"\n params = self.params\n feed_dict = {\"parameters\": params}\n\n def _add_param_to_feed_dict(param_name, param_value):\n \"\"\"This recursively adds parameter paths to the `feed_dict`,\n whenever `param_value` is a dictionary itself, so that users can\n specify specific nested parameters in their node inputs.\n\n Example:\n\n >>> param_name = \"a\"\n >>> param_value = {\"b\": 1}\n >>> _add_param_to_feed_dict(param_name, param_value)\n >>> assert feed_dict[\"params:a\"] == {\"b\": 1}\n >>> assert feed_dict[\"params:a.b\"] == 1\n \"\"\"\n key = f\"params:{param_name}\"\n feed_dict[key] = param_value\n if isinstance(param_value, dict):\n for key, val in param_value.items():\n _add_param_to_feed_dict(f\"{param_name}.{key}\", val)\n\n for param_name, param_value in params.items():\n _add_param_to_feed_dict(param_name, param_value)\n\n return feed_dict\n\n def _get_config_credentials(self) -> dict[str, Any]:\n \"\"\"Getter for credentials specified in credentials directory.\"\"\"\n try:\n conf_creds = self.config_loader[\"credentials\"]\n except MissingConfigException as exc:\n logging.getLogger(__name__).debug(\n \"Credentials not found in your Kedro project config.\\n %s\", str(exc)\n )\n conf_creds = {}\n return conf_creds\n\n\nclass KedroContextError(Exception):\n \"\"\"Error occurred when loading project and running context pipeline.\"\"\"\n", "path": "kedro/framework/context/context.py"}]}
3,280
579
gh_patches_debug_8416
rasdani/github-patches
git_diff
optuna__optuna-449
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- ValueError when calling suggest_categorical with int and str **Conditions** - Optuna version: 0.13.0 - Python version: 3.7.3 - OS: Windows 10 Education - Machine Learning library to be optimized: none **Code to reproduce** ``` def objective(trial: optuna.Trial): x = trial.suggest_categorical("x", [1, "0"]) print(x) optuna.create_study( study_name="test_" + now_string(), storage="sqlite:///tmp/example.db").optimize(objective, n_trials=10) ``` **Error messages, stack traces, or logs** ``` Traceback (most recent call last): File "C:\Users\imri\github\scoring-model\venv\lib\site-packages\optuna\study.py", line 468, in _run_trial result = func(trial) File "~\github\scoring-model\tests\TestOptuna.py", line 12, in objective x = trial.suggest_categorical("x", [1, "0"]) File "~\github\scoring-model\venv\lib\site-packages\optuna\trial.py", line 337, in suggest_categorical return self._suggest(name, distributions.CategoricalDistribution(choices=choices)) File "~\github\scoring-model\venv\lib\site-packages\optuna\trial.py", line 457, in _suggest return self._set_new_param_or_get_existing(name, param_value, distribution) File "~\github\scoring-model\venv\lib\site-packages\optuna\trial.py", line 462, in _set_new_param_or_get_existing param_value_in_internal_repr = distribution.to_internal_repr(param_value) File "~\github\scoring-model\venv\lib\site-packages\optuna\distributions.py", line 236, in to_internal_repr return self.choices.index(param_value_in_external_repr) ValueError: tuple.index(x): x not in tuple ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `optuna/samplers/random.py` Content: ``` 1 import numpy 2 3 from optuna import distributions 4 from optuna.samplers.base import BaseSampler 5 from optuna import types 6 7 if types.TYPE_CHECKING: 8 from typing import Any # NOQA 9 from typing import Dict # NOQA 10 from typing import Optional # NOQA 11 12 from optuna.distributions import BaseDistribution # NOQA 13 from optuna.structs import FrozenTrial # NOQA 14 from optuna.study import InTrialStudy # NOQA 15 16 17 class RandomSampler(BaseSampler): 18 """Sampler using random sampling. 19 20 Example: 21 22 .. code:: 23 24 >>> study = optuna.create_study(sampler=RandomSampler()) 25 >>> study.optimize(objective, direction='minimize') 26 27 Args: 28 seed: Seed for random number generator. 29 """ 30 31 def __init__(self, seed=None): 32 # type: (Optional[int]) -> None 33 34 self.seed = seed 35 self.rng = numpy.random.RandomState(seed) 36 37 def infer_relative_search_space(self, study, trial): 38 # type: (InTrialStudy, FrozenTrial) -> Dict[str, BaseDistribution] 39 40 return {} 41 42 def sample_relative(self, study, trial, search_space): 43 # type: (InTrialStudy, FrozenTrial, Dict[str, BaseDistribution]) -> Dict[str, Any] 44 45 return {} 46 47 def sample_independent(self, study, trial, param_name, param_distribution): 48 # type: (InTrialStudy, FrozenTrial, str, distributions.BaseDistribution) -> Any 49 """Please consult the documentation for :func:`BaseSampler.sample_independent`.""" 50 51 if isinstance(param_distribution, distributions.UniformDistribution): 52 return self.rng.uniform(param_distribution.low, param_distribution.high) 53 elif isinstance(param_distribution, distributions.LogUniformDistribution): 54 log_low = numpy.log(param_distribution.low) 55 log_high = numpy.log(param_distribution.high) 56 return float(numpy.exp(self.rng.uniform(log_low, log_high))) 57 elif isinstance(param_distribution, distributions.DiscreteUniformDistribution): 58 q = param_distribution.q 59 r = param_distribution.high - param_distribution.low 60 # [low, high] is shifted to [0, r] to align sampled values at regular intervals. 61 low = 0 - 0.5 * q 62 high = r + 0.5 * q 63 s = self.rng.uniform(low, high) 64 v = numpy.round(s / q) * q + param_distribution.low 65 # v may slightly exceed range due to round-off errors. 66 return float(min(max(v, param_distribution.low), param_distribution.high)) 67 elif isinstance(param_distribution, distributions.IntUniformDistribution): 68 # numpy.random.randint includes low but excludes high. 69 return self.rng.randint(param_distribution.low, param_distribution.high + 1) 70 elif isinstance(param_distribution, distributions.CategoricalDistribution): 71 choices = param_distribution.choices 72 return self.rng.choice(choices) 73 else: 74 raise NotImplementedError 75 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/optuna/samplers/random.py b/optuna/samplers/random.py --- a/optuna/samplers/random.py +++ b/optuna/samplers/random.py @@ -69,6 +69,7 @@ return self.rng.randint(param_distribution.low, param_distribution.high + 1) elif isinstance(param_distribution, distributions.CategoricalDistribution): choices = param_distribution.choices - return self.rng.choice(choices) + index = self.rng.randint(0, len(choices)) + return choices[index] else: raise NotImplementedError
{"golden_diff": "diff --git a/optuna/samplers/random.py b/optuna/samplers/random.py\n--- a/optuna/samplers/random.py\n+++ b/optuna/samplers/random.py\n@@ -69,6 +69,7 @@\n return self.rng.randint(param_distribution.low, param_distribution.high + 1)\n elif isinstance(param_distribution, distributions.CategoricalDistribution):\n choices = param_distribution.choices\n- return self.rng.choice(choices)\n+ index = self.rng.randint(0, len(choices))\n+ return choices[index]\n else:\n raise NotImplementedError\n", "issue": "ValueError when calling suggest_categorical with int and str\n**Conditions**\r\n- Optuna version: 0.13.0\r\n- Python version: 3.7.3\r\n- OS: Windows 10 Education\r\n- Machine Learning library to be optimized: none\r\n\r\n**Code to reproduce**\r\n```\r\ndef objective(trial: optuna.Trial):\r\n x = trial.suggest_categorical(\"x\", [1, \"0\"])\r\n print(x)\r\noptuna.create_study( study_name=\"test_\" + now_string(), storage=\"sqlite:///tmp/example.db\").optimize(objective, n_trials=10)\r\n```\r\n\r\n**Error messages, stack traces, or logs**\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\imri\\github\\scoring-model\\venv\\lib\\site-packages\\optuna\\study.py\", line 468, in _run_trial\r\n result = func(trial)\r\n File \"~\\github\\scoring-model\\tests\\TestOptuna.py\", line 12, in objective\r\n x = trial.suggest_categorical(\"x\", [1, \"0\"])\r\n File \"~\\github\\scoring-model\\venv\\lib\\site-packages\\optuna\\trial.py\", line 337, in suggest_categorical\r\n return self._suggest(name, distributions.CategoricalDistribution(choices=choices))\r\n File \"~\\github\\scoring-model\\venv\\lib\\site-packages\\optuna\\trial.py\", line 457, in _suggest\r\n return self._set_new_param_or_get_existing(name, param_value, distribution)\r\n File \"~\\github\\scoring-model\\venv\\lib\\site-packages\\optuna\\trial.py\", line 462, in _set_new_param_or_get_existing\r\n param_value_in_internal_repr = distribution.to_internal_repr(param_value)\r\n File \"~\\github\\scoring-model\\venv\\lib\\site-packages\\optuna\\distributions.py\", line 236, in to_internal_repr\r\n return self.choices.index(param_value_in_external_repr)\r\nValueError: tuple.index(x): x not in tuple\r\n```\r\n\r\n\n", "before_files": [{"content": "import numpy\n\nfrom optuna import distributions\nfrom optuna.samplers.base import BaseSampler\nfrom optuna import types\n\nif types.TYPE_CHECKING:\n from typing import Any # NOQA\n from typing import Dict # NOQA\n from typing import Optional # NOQA\n\n from optuna.distributions import BaseDistribution # NOQA\n from optuna.structs import FrozenTrial # NOQA\n from optuna.study import InTrialStudy # NOQA\n\n\nclass RandomSampler(BaseSampler):\n \"\"\"Sampler using random sampling.\n\n Example:\n\n .. code::\n\n >>> study = optuna.create_study(sampler=RandomSampler())\n >>> study.optimize(objective, direction='minimize')\n\n Args:\n seed: Seed for random number generator.\n \"\"\"\n\n def __init__(self, seed=None):\n # type: (Optional[int]) -> None\n\n self.seed = seed\n self.rng = numpy.random.RandomState(seed)\n\n def infer_relative_search_space(self, study, trial):\n # type: (InTrialStudy, FrozenTrial) -> Dict[str, BaseDistribution]\n\n return {}\n\n def sample_relative(self, study, trial, search_space):\n # type: (InTrialStudy, FrozenTrial, Dict[str, BaseDistribution]) -> Dict[str, Any]\n\n return {}\n\n def sample_independent(self, study, trial, param_name, param_distribution):\n # type: (InTrialStudy, FrozenTrial, str, distributions.BaseDistribution) -> Any\n \"\"\"Please consult the documentation for :func:`BaseSampler.sample_independent`.\"\"\"\n\n if isinstance(param_distribution, distributions.UniformDistribution):\n return self.rng.uniform(param_distribution.low, param_distribution.high)\n elif isinstance(param_distribution, distributions.LogUniformDistribution):\n log_low = numpy.log(param_distribution.low)\n log_high = numpy.log(param_distribution.high)\n return float(numpy.exp(self.rng.uniform(log_low, log_high)))\n elif isinstance(param_distribution, distributions.DiscreteUniformDistribution):\n q = param_distribution.q\n r = param_distribution.high - param_distribution.low\n # [low, high] is shifted to [0, r] to align sampled values at regular intervals.\n low = 0 - 0.5 * q\n high = r + 0.5 * q\n s = self.rng.uniform(low, high)\n v = numpy.round(s / q) * q + param_distribution.low\n # v may slightly exceed range due to round-off errors.\n return float(min(max(v, param_distribution.low), param_distribution.high))\n elif isinstance(param_distribution, distributions.IntUniformDistribution):\n # numpy.random.randint includes low but excludes high.\n return self.rng.randint(param_distribution.low, param_distribution.high + 1)\n elif isinstance(param_distribution, distributions.CategoricalDistribution):\n choices = param_distribution.choices\n return self.rng.choice(choices)\n else:\n raise NotImplementedError\n", "path": "optuna/samplers/random.py"}], "after_files": [{"content": "import numpy\n\nfrom optuna import distributions\nfrom optuna.samplers.base import BaseSampler\nfrom optuna import types\n\nif types.TYPE_CHECKING:\n from typing import Any # NOQA\n from typing import Dict # NOQA\n from typing import Optional # NOQA\n\n from optuna.distributions import BaseDistribution # NOQA\n from optuna.structs import FrozenTrial # NOQA\n from optuna.study import InTrialStudy # NOQA\n\n\nclass RandomSampler(BaseSampler):\n \"\"\"Sampler using random sampling.\n\n Example:\n\n .. code::\n\n >>> study = optuna.create_study(sampler=RandomSampler())\n >>> study.optimize(objective, direction='minimize')\n\n Args:\n seed: Seed for random number generator.\n \"\"\"\n\n def __init__(self, seed=None):\n # type: (Optional[int]) -> None\n\n self.seed = seed\n self.rng = numpy.random.RandomState(seed)\n\n def infer_relative_search_space(self, study, trial):\n # type: (InTrialStudy, FrozenTrial) -> Dict[str, BaseDistribution]\n\n return {}\n\n def sample_relative(self, study, trial, search_space):\n # type: (InTrialStudy, FrozenTrial, Dict[str, BaseDistribution]) -> Dict[str, Any]\n\n return {}\n\n def sample_independent(self, study, trial, param_name, param_distribution):\n # type: (InTrialStudy, FrozenTrial, str, distributions.BaseDistribution) -> Any\n \"\"\"Please consult the documentation for :func:`BaseSampler.sample_independent`.\"\"\"\n\n if isinstance(param_distribution, distributions.UniformDistribution):\n return self.rng.uniform(param_distribution.low, param_distribution.high)\n elif isinstance(param_distribution, distributions.LogUniformDistribution):\n log_low = numpy.log(param_distribution.low)\n log_high = numpy.log(param_distribution.high)\n return float(numpy.exp(self.rng.uniform(log_low, log_high)))\n elif isinstance(param_distribution, distributions.DiscreteUniformDistribution):\n q = param_distribution.q\n r = param_distribution.high - param_distribution.low\n # [low, high] is shifted to [0, r] to align sampled values at regular intervals.\n low = 0 - 0.5 * q\n high = r + 0.5 * q\n s = self.rng.uniform(low, high)\n v = numpy.round(s / q) * q + param_distribution.low\n # v may slightly exceed range due to round-off errors.\n return float(min(max(v, param_distribution.low), param_distribution.high))\n elif isinstance(param_distribution, distributions.IntUniformDistribution):\n # numpy.random.randint includes low but excludes high.\n return self.rng.randint(param_distribution.low, param_distribution.high + 1)\n elif isinstance(param_distribution, distributions.CategoricalDistribution):\n choices = param_distribution.choices\n index = self.rng.randint(0, len(choices))\n return choices[index]\n else:\n raise NotImplementedError\n", "path": "optuna/samplers/random.py"}]}
1,477
123
gh_patches_debug_59763
rasdani/github-patches
git_diff
pretix__pretix-1120
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Not all Backend-Signals are displayed in documentation I'm not sure why, but when looking at https://docs.pretix.eu/en/latest/development/api/general.html#backend, it seems to me like quite a few signals are not being displayed here... Comparing to https://github.com/pretix/pretix/blob/master/doc/development/api/general.rst#backend, for example all the `html` and `navbar`-signals are missing... --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/pretix/presale/signals.py` Content: ``` 1 from pretix.base.signals import EventPluginSignal 2 3 html_head = EventPluginSignal( 4 providing_args=["request"] 5 ) 6 """ 7 This signal allows you to put code inside the HTML ``<head>`` tag 8 of every page in the frontend. You will get the request as the keyword argument 9 ``request`` and are expected to return plain HTML. 10 11 As with all plugin signals, the ``sender`` keyword argument will contain the event. 12 """ 13 14 html_footer = EventPluginSignal( 15 providing_args=["request"] 16 ) 17 """ 18 This signal allows you to put code before the end of the HTML ``<body>`` tag 19 of every page in the frontend. You will get the request as the keyword argument 20 ``request`` and are expected to return plain HTML. 21 22 As with all plugin signals, the ``sender`` keyword argument will contain the event. 23 """ 24 25 footer_link = EventPluginSignal( 26 providing_args=["request"] 27 ) 28 """ 29 The signal ``pretix.presale.signals.footer_links`` allows you to add links to the footer of an event page. You 30 are expected to return a dictionary containing the keys ``label`` and ``url``. 31 32 As with all plugin signals, the ``sender`` keyword argument will contain the event. 33 """ 34 35 checkout_confirm_messages = EventPluginSignal() 36 """ 37 This signal is sent out to retrieve short messages that need to be acknowledged by the user before the 38 order can be completed. This is typically used for something like "accept the terms and conditions". 39 Receivers are expected to return a dictionary where the keys are globally unique identifiers for the 40 message and the values can be arbitrary HTML. 41 42 As with all plugin signals, the ``sender`` keyword argument will contain the event. 43 """ 44 45 checkout_flow_steps = EventPluginSignal() 46 """ 47 This signal is sent out to retrieve pages for the checkout flow 48 49 As with all plugin signals, the ``sender`` keyword argument will contain the event. 50 """ 51 52 voucher_redeem_info = EventPluginSignal( 53 providing_args=["voucher"] 54 ) 55 """ 56 This signal is sent out to display additional information on the "redeem a voucher" page 57 58 As with all plugin signals, the ``sender`` keyword argument will contain the event. 59 """ 60 61 order_meta_from_request = EventPluginSignal( 62 providing_args=["request"] 63 ) 64 """ 65 This signal is sent before an order is created through the pretixpresale frontend. It allows you 66 to return a dictionary that will be merged in the meta_info attribute of the order. 67 You will receive the request triggering the order creation as the ``request`` keyword argument. 68 69 As with all event-plugin signals, the ``sender`` keyword argument will contain the event. 70 """ 71 checkout_confirm_page_content = EventPluginSignal( 72 providing_args=['request'] 73 ) 74 """ 75 This signals allows you to add HTML content to the confirmation page that is presented at the 76 end of the checkout process, just before the order is being created. 77 78 As with all plugin signals, the ``sender`` keyword argument will contain the event. A ``request`` 79 argument will contain the request object. 80 """ 81 82 fee_calculation_for_cart = EventPluginSignal( 83 providing_args=['request', 'invoice_address', 'total'] 84 ) 85 """ 86 This signals allows you to add fees to a cart. You are expected to return a list of ``OrderFee`` 87 objects that are not yet saved to the database (because there is no order yet). 88 89 As with all plugin signals, the ``sender`` keyword argument will contain the event. A ``request`` 90 argument will contain the request object and ``invoice_address`` the invoice address (useful for 91 tax calculation). The ``total`` keyword argument will contain the total cart sum without any fees. 92 You should not rely on this ``total`` value for fee calculations as other fees might interfere. 93 """ 94 95 contact_form_fields = EventPluginSignal( 96 providing_args=[] 97 ) 98 """ 99 This signals allows you to add form fields to the contact form that is presented during checkout 100 and by default only asks for the email address. You are supposed to return a dictionary of 101 form fields with globally unique keys. The validated form results will be saved into the 102 ``contact_form_data`` entry of the order's meta_info dictionary. 103 104 As with all plugin signals, the ``sender`` keyword argument will contain the event. A ``request`` 105 argument will contain the request object. 106 """ 107 108 question_form_fields = EventPluginSignal( 109 providing_args=["position"] 110 ) 111 """ 112 This signals allows you to add form fields to the questions form that is presented during checkout 113 and by default asks for the questions configured in the backend. You are supposed to return a dictionary 114 of form fields with globally unique keys. The validated form results will be saved into the 115 ``question_form_data`` entry of the position's meta_info dictionary. 116 117 The ``position`` keyword argument will contain either a ``CartPosition`` object or an ``OrderPosition`` 118 object, depending on whether the form is called as part of the order checkout or for changing an order 119 later. 120 121 As with all plugin signals, the ``sender`` keyword argument will contain the event. 122 """ 123 124 order_info = EventPluginSignal( 125 providing_args=["order"] 126 ) 127 """ 128 This signal is sent out to display additional information on the order detail page 129 130 As with all plugin signals, the ``sender`` keyword argument will contain the event. 131 """ 132 133 process_request = EventPluginSignal( 134 providing_args=["request"] 135 ) 136 """ 137 This signal is sent out whenever a request is made to a event presale page. Most of the 138 time, this will be called from the middleware layer (except on plugin-provided pages 139 this will be called by the @event_view decorator). Similarly to Django's process_request 140 middleware method, if you return a Response, that response will be used and the request 141 won't be processed any further down the stack. 142 143 WARNING: Be very careful about using this signal as listening to it makes it really 144 easy to cause serious performance problems. 145 146 As with all plugin signals, the ``sender`` keyword argument will contain the event. 147 """ 148 149 process_response = EventPluginSignal( 150 providing_args=["request", "response"] 151 ) 152 """ 153 This signal is sent out whenever a response is sent from a event presale page. Most of 154 the time, this will be called from the middleware layer (except on plugin-provided pages 155 this will be called by the @event_view decorator). Similarly to Django's process_response 156 middleware method you must return a response object, that will be passed further up the 157 stack to other handlers of the signal. If you do not want to alter the response, just 158 return the ``response`` parameter. 159 160 WARNING: Be very careful about using this signal as listening to it makes it really 161 easy to cause serious performance problems. 162 163 As with all plugin signals, the ``sender`` keyword argument will contain the event. 164 """ 165 166 front_page_top = EventPluginSignal( 167 providing_args=[] 168 ) 169 """ 170 This signal is sent out to display additional information on the frontpage above the list 171 of products and but below a custom frontpage text. 172 173 As with all plugin signals, the ``sender`` keyword argument will contain the event. The 174 receivers are expected to return HTML. 175 """ 176 177 front_page_bottom = EventPluginSignal( 178 providing_args=[] 179 ) 180 """ 181 This signal is sent out to display additional information on the frontpage below the list 182 of products. 183 184 As with all plugin signals, the ``sender`` keyword argument will contain the event. The 185 receivers are expected to return HTML. 186 """ 187 188 checkout_all_optional = EventPluginSignal( 189 providing_args=['request'] 190 ) 191 """ 192 If any receiver of this signal returns ``True``, all input fields during checkout (contact data, 193 invoice address, confirmations) will be optional, except for questions. Use with care! 194 195 As with all plugin signals, the ``sender`` keyword argument will contain the event. A ``request`` 196 argument will contain the request object. 197 """ 198 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/pretix/presale/signals.py b/src/pretix/presale/signals.py --- a/src/pretix/presale/signals.py +++ b/src/pretix/presale/signals.py @@ -26,7 +26,7 @@ providing_args=["request"] ) """ -The signal ``pretix.presale.signals.footer_links`` allows you to add links to the footer of an event page. You +The signal ``pretix.presale.signals.footer_link`` allows you to add links to the footer of an event page. You are expected to return a dictionary containing the keys ``label`` and ``url``. As with all plugin signals, the ``sender`` keyword argument will contain the event.
{"golden_diff": "diff --git a/src/pretix/presale/signals.py b/src/pretix/presale/signals.py\n--- a/src/pretix/presale/signals.py\n+++ b/src/pretix/presale/signals.py\n@@ -26,7 +26,7 @@\n providing_args=[\"request\"]\n )\n \"\"\"\n-The signal ``pretix.presale.signals.footer_links`` allows you to add links to the footer of an event page. You\n+The signal ``pretix.presale.signals.footer_link`` allows you to add links to the footer of an event page. You\n are expected to return a dictionary containing the keys ``label`` and ``url``.\n \n As with all plugin signals, the ``sender`` keyword argument will contain the event.\n", "issue": "Not all Backend-Signals are displayed in documentation\nI'm not sure why, but when looking at https://docs.pretix.eu/en/latest/development/api/general.html#backend, it seems to me like quite a few signals are not being displayed here...\r\n\r\nComparing to https://github.com/pretix/pretix/blob/master/doc/development/api/general.rst#backend, for example all the `html` and `navbar`-signals are missing...\n", "before_files": [{"content": "from pretix.base.signals import EventPluginSignal\n\nhtml_head = EventPluginSignal(\n providing_args=[\"request\"]\n)\n\"\"\"\nThis signal allows you to put code inside the HTML ``<head>`` tag\nof every page in the frontend. You will get the request as the keyword argument\n``request`` and are expected to return plain HTML.\n\nAs with all plugin signals, the ``sender`` keyword argument will contain the event.\n\"\"\"\n\nhtml_footer = EventPluginSignal(\n providing_args=[\"request\"]\n)\n\"\"\"\nThis signal allows you to put code before the end of the HTML ``<body>`` tag\nof every page in the frontend. You will get the request as the keyword argument\n``request`` and are expected to return plain HTML.\n\nAs with all plugin signals, the ``sender`` keyword argument will contain the event.\n\"\"\"\n\nfooter_link = EventPluginSignal(\n providing_args=[\"request\"]\n)\n\"\"\"\nThe signal ``pretix.presale.signals.footer_links`` allows you to add links to the footer of an event page. You\nare expected to return a dictionary containing the keys ``label`` and ``url``.\n\nAs with all plugin signals, the ``sender`` keyword argument will contain the event.\n\"\"\"\n\ncheckout_confirm_messages = EventPluginSignal()\n\"\"\"\nThis signal is sent out to retrieve short messages that need to be acknowledged by the user before the\norder can be completed. This is typically used for something like \"accept the terms and conditions\".\nReceivers are expected to return a dictionary where the keys are globally unique identifiers for the\nmessage and the values can be arbitrary HTML.\n\nAs with all plugin signals, the ``sender`` keyword argument will contain the event.\n\"\"\"\n\ncheckout_flow_steps = EventPluginSignal()\n\"\"\"\nThis signal is sent out to retrieve pages for the checkout flow\n\nAs with all plugin signals, the ``sender`` keyword argument will contain the event.\n\"\"\"\n\nvoucher_redeem_info = EventPluginSignal(\n providing_args=[\"voucher\"]\n)\n\"\"\"\nThis signal is sent out to display additional information on the \"redeem a voucher\" page\n\nAs with all plugin signals, the ``sender`` keyword argument will contain the event.\n\"\"\"\n\norder_meta_from_request = EventPluginSignal(\n providing_args=[\"request\"]\n)\n\"\"\"\nThis signal is sent before an order is created through the pretixpresale frontend. It allows you\nto return a dictionary that will be merged in the meta_info attribute of the order.\nYou will receive the request triggering the order creation as the ``request`` keyword argument.\n\nAs with all event-plugin signals, the ``sender`` keyword argument will contain the event.\n\"\"\"\ncheckout_confirm_page_content = EventPluginSignal(\n providing_args=['request']\n)\n\"\"\"\nThis signals allows you to add HTML content to the confirmation page that is presented at the\nend of the checkout process, just before the order is being created.\n\nAs with all plugin signals, the ``sender`` keyword argument will contain the event. A ``request``\nargument will contain the request object.\n\"\"\"\n\nfee_calculation_for_cart = EventPluginSignal(\n providing_args=['request', 'invoice_address', 'total']\n)\n\"\"\"\nThis signals allows you to add fees to a cart. You are expected to return a list of ``OrderFee``\nobjects that are not yet saved to the database (because there is no order yet).\n\nAs with all plugin signals, the ``sender`` keyword argument will contain the event. A ``request``\nargument will contain the request object and ``invoice_address`` the invoice address (useful for\ntax calculation). The ``total`` keyword argument will contain the total cart sum without any fees.\nYou should not rely on this ``total`` value for fee calculations as other fees might interfere.\n\"\"\"\n\ncontact_form_fields = EventPluginSignal(\n providing_args=[]\n)\n\"\"\"\nThis signals allows you to add form fields to the contact form that is presented during checkout\nand by default only asks for the email address. You are supposed to return a dictionary of\nform fields with globally unique keys. The validated form results will be saved into the\n``contact_form_data`` entry of the order's meta_info dictionary.\n\nAs with all plugin signals, the ``sender`` keyword argument will contain the event. A ``request``\nargument will contain the request object.\n\"\"\"\n\nquestion_form_fields = EventPluginSignal(\n providing_args=[\"position\"]\n)\n\"\"\"\nThis signals allows you to add form fields to the questions form that is presented during checkout\nand by default asks for the questions configured in the backend. You are supposed to return a dictionary\nof form fields with globally unique keys. The validated form results will be saved into the\n``question_form_data`` entry of the position's meta_info dictionary.\n\nThe ``position`` keyword argument will contain either a ``CartPosition`` object or an ``OrderPosition``\nobject, depending on whether the form is called as part of the order checkout or for changing an order\nlater.\n\nAs with all plugin signals, the ``sender`` keyword argument will contain the event.\n\"\"\"\n\norder_info = EventPluginSignal(\n providing_args=[\"order\"]\n)\n\"\"\"\nThis signal is sent out to display additional information on the order detail page\n\nAs with all plugin signals, the ``sender`` keyword argument will contain the event.\n\"\"\"\n\nprocess_request = EventPluginSignal(\n providing_args=[\"request\"]\n)\n\"\"\"\nThis signal is sent out whenever a request is made to a event presale page. Most of the\ntime, this will be called from the middleware layer (except on plugin-provided pages\nthis will be called by the @event_view decorator). Similarly to Django's process_request\nmiddleware method, if you return a Response, that response will be used and the request\nwon't be processed any further down the stack.\n\nWARNING: Be very careful about using this signal as listening to it makes it really\neasy to cause serious performance problems.\n\nAs with all plugin signals, the ``sender`` keyword argument will contain the event.\n\"\"\"\n\nprocess_response = EventPluginSignal(\n providing_args=[\"request\", \"response\"]\n)\n\"\"\"\nThis signal is sent out whenever a response is sent from a event presale page. Most of\nthe time, this will be called from the middleware layer (except on plugin-provided pages\nthis will be called by the @event_view decorator). Similarly to Django's process_response\nmiddleware method you must return a response object, that will be passed further up the\nstack to other handlers of the signal. If you do not want to alter the response, just\nreturn the ``response`` parameter.\n\nWARNING: Be very careful about using this signal as listening to it makes it really\neasy to cause serious performance problems.\n\nAs with all plugin signals, the ``sender`` keyword argument will contain the event.\n\"\"\"\n\nfront_page_top = EventPluginSignal(\n providing_args=[]\n)\n\"\"\"\nThis signal is sent out to display additional information on the frontpage above the list\nof products and but below a custom frontpage text.\n\nAs with all plugin signals, the ``sender`` keyword argument will contain the event. The\nreceivers are expected to return HTML.\n\"\"\"\n\nfront_page_bottom = EventPluginSignal(\n providing_args=[]\n)\n\"\"\"\nThis signal is sent out to display additional information on the frontpage below the list\nof products.\n\nAs with all plugin signals, the ``sender`` keyword argument will contain the event. The\nreceivers are expected to return HTML.\n\"\"\"\n\ncheckout_all_optional = EventPluginSignal(\n providing_args=['request']\n)\n\"\"\"\nIf any receiver of this signal returns ``True``, all input fields during checkout (contact data,\ninvoice address, confirmations) will be optional, except for questions. Use with care!\n\nAs with all plugin signals, the ``sender`` keyword argument will contain the event. A ``request``\nargument will contain the request object.\n\"\"\"\n", "path": "src/pretix/presale/signals.py"}], "after_files": [{"content": "from pretix.base.signals import EventPluginSignal\n\nhtml_head = EventPluginSignal(\n providing_args=[\"request\"]\n)\n\"\"\"\nThis signal allows you to put code inside the HTML ``<head>`` tag\nof every page in the frontend. You will get the request as the keyword argument\n``request`` and are expected to return plain HTML.\n\nAs with all plugin signals, the ``sender`` keyword argument will contain the event.\n\"\"\"\n\nhtml_footer = EventPluginSignal(\n providing_args=[\"request\"]\n)\n\"\"\"\nThis signal allows you to put code before the end of the HTML ``<body>`` tag\nof every page in the frontend. You will get the request as the keyword argument\n``request`` and are expected to return plain HTML.\n\nAs with all plugin signals, the ``sender`` keyword argument will contain the event.\n\"\"\"\n\nfooter_link = EventPluginSignal(\n providing_args=[\"request\"]\n)\n\"\"\"\nThe signal ``pretix.presale.signals.footer_link`` allows you to add links to the footer of an event page. You\nare expected to return a dictionary containing the keys ``label`` and ``url``.\n\nAs with all plugin signals, the ``sender`` keyword argument will contain the event.\n\"\"\"\n\ncheckout_confirm_messages = EventPluginSignal()\n\"\"\"\nThis signal is sent out to retrieve short messages that need to be acknowledged by the user before the\norder can be completed. This is typically used for something like \"accept the terms and conditions\".\nReceivers are expected to return a dictionary where the keys are globally unique identifiers for the\nmessage and the values can be arbitrary HTML.\n\nAs with all plugin signals, the ``sender`` keyword argument will contain the event.\n\"\"\"\n\ncheckout_flow_steps = EventPluginSignal()\n\"\"\"\nThis signal is sent out to retrieve pages for the checkout flow\n\nAs with all plugin signals, the ``sender`` keyword argument will contain the event.\n\"\"\"\n\nvoucher_redeem_info = EventPluginSignal(\n providing_args=[\"voucher\"]\n)\n\"\"\"\nThis signal is sent out to display additional information on the \"redeem a voucher\" page\n\nAs with all plugin signals, the ``sender`` keyword argument will contain the event.\n\"\"\"\n\norder_meta_from_request = EventPluginSignal(\n providing_args=[\"request\"]\n)\n\"\"\"\nThis signal is sent before an order is created through the pretixpresale frontend. It allows you\nto return a dictionary that will be merged in the meta_info attribute of the order.\nYou will receive the request triggering the order creation as the ``request`` keyword argument.\n\nAs with all event-plugin signals, the ``sender`` keyword argument will contain the event.\n\"\"\"\ncheckout_confirm_page_content = EventPluginSignal(\n providing_args=['request']\n)\n\"\"\"\nThis signals allows you to add HTML content to the confirmation page that is presented at the\nend of the checkout process, just before the order is being created.\n\nAs with all plugin signals, the ``sender`` keyword argument will contain the event. A ``request``\nargument will contain the request object.\n\"\"\"\n\nfee_calculation_for_cart = EventPluginSignal(\n providing_args=['request', 'invoice_address', 'total']\n)\n\"\"\"\nThis signals allows you to add fees to a cart. You are expected to return a list of ``OrderFee``\nobjects that are not yet saved to the database (because there is no order yet).\n\nAs with all plugin signals, the ``sender`` keyword argument will contain the event. A ``request``\nargument will contain the request object and ``invoice_address`` the invoice address (useful for\ntax calculation). The ``total`` keyword argument will contain the total cart sum without any fees.\nYou should not rely on this ``total`` value for fee calculations as other fees might interfere.\n\"\"\"\n\ncontact_form_fields = EventPluginSignal(\n providing_args=[]\n)\n\"\"\"\nThis signals allows you to add form fields to the contact form that is presented during checkout\nand by default only asks for the email address. You are supposed to return a dictionary of\nform fields with globally unique keys. The validated form results will be saved into the\n``contact_form_data`` entry of the order's meta_info dictionary.\n\nAs with all plugin signals, the ``sender`` keyword argument will contain the event. A ``request``\nargument will contain the request object.\n\"\"\"\n\nquestion_form_fields = EventPluginSignal(\n providing_args=[\"position\"]\n)\n\"\"\"\nThis signals allows you to add form fields to the questions form that is presented during checkout\nand by default asks for the questions configured in the backend. You are supposed to return a dictionary\nof form fields with globally unique keys. The validated form results will be saved into the\n``question_form_data`` entry of the position's meta_info dictionary.\n\nThe ``position`` keyword argument will contain either a ``CartPosition`` object or an ``OrderPosition``\nobject, depending on whether the form is called as part of the order checkout or for changing an order\nlater.\n\nAs with all plugin signals, the ``sender`` keyword argument will contain the event.\n\"\"\"\n\norder_info = EventPluginSignal(\n providing_args=[\"order\"]\n)\n\"\"\"\nThis signal is sent out to display additional information on the order detail page\n\nAs with all plugin signals, the ``sender`` keyword argument will contain the event.\n\"\"\"\n\nprocess_request = EventPluginSignal(\n providing_args=[\"request\"]\n)\n\"\"\"\nThis signal is sent out whenever a request is made to a event presale page. Most of the\ntime, this will be called from the middleware layer (except on plugin-provided pages\nthis will be called by the @event_view decorator). Similarly to Django's process_request\nmiddleware method, if you return a Response, that response will be used and the request\nwon't be processed any further down the stack.\n\nWARNING: Be very careful about using this signal as listening to it makes it really\neasy to cause serious performance problems.\n\nAs with all plugin signals, the ``sender`` keyword argument will contain the event.\n\"\"\"\n\nprocess_response = EventPluginSignal(\n providing_args=[\"request\", \"response\"]\n)\n\"\"\"\nThis signal is sent out whenever a response is sent from a event presale page. Most of\nthe time, this will be called from the middleware layer (except on plugin-provided pages\nthis will be called by the @event_view decorator). Similarly to Django's process_response\nmiddleware method you must return a response object, that will be passed further up the\nstack to other handlers of the signal. If you do not want to alter the response, just\nreturn the ``response`` parameter.\n\nWARNING: Be very careful about using this signal as listening to it makes it really\neasy to cause serious performance problems.\n\nAs with all plugin signals, the ``sender`` keyword argument will contain the event.\n\"\"\"\n\nfront_page_top = EventPluginSignal(\n providing_args=[]\n)\n\"\"\"\nThis signal is sent out to display additional information on the frontpage above the list\nof products and but below a custom frontpage text.\n\nAs with all plugin signals, the ``sender`` keyword argument will contain the event. The\nreceivers are expected to return HTML.\n\"\"\"\n\nfront_page_bottom = EventPluginSignal(\n providing_args=[]\n)\n\"\"\"\nThis signal is sent out to display additional information on the frontpage below the list\nof products.\n\nAs with all plugin signals, the ``sender`` keyword argument will contain the event. The\nreceivers are expected to return HTML.\n\"\"\"\n\ncheckout_all_optional = EventPluginSignal(\n providing_args=['request']\n)\n\"\"\"\nIf any receiver of this signal returns ``True``, all input fields during checkout (contact data,\ninvoice address, confirmations) will be optional, except for questions. Use with care!\n\nAs with all plugin signals, the ``sender`` keyword argument will contain the event. A ``request``\nargument will contain the request object.\n\"\"\"\n", "path": "src/pretix/presale/signals.py"}]}
2,449
160
gh_patches_debug_24056
rasdani/github-patches
git_diff
pypi__warehouse-2574
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Improve sorting on simple page I'd like to submit a patch for this but I have a few questions :) First I'll describe what I'd like to do... ## sort by version number See https://pypi.org/simple/pre-commit/ You'll notice that `0.10.0` erroneously sorts *before* `0.2.0` (I'd like to fix this) ## investigation I've found the code which does this sorting [here](https://github.com/pypa/warehouse/blob/3bdfe5a89cc9a922ee97304c98384c24822a09ee/warehouse/legacy/api/simple.py#L76-L89) This seems to just sort by filename, but by inspecting and viewing [this page](https://pypi.org/simple/pre-commit-mirror-maker/) I notice it seems to ignore `_` vs. `-` (which is good, that's what I want to continue to happen but I'm just not seeing it from the code!) ## other questions The `File` objects which come back from the database contain a `.version` attribute that I'd like to use to participate in sorting, my main question is: **Can I depend on this version to be a valid [PEP440](https://www.python.org/dev/peps/pep-0440/) version and use something like `pkg_resources.parse_version`?** I'd basically like to replicate something close to the sorting which @chriskuehl's [dumb-pypi](https://github.com/chriskuehl/dumb-pypi) does [here](https://github.com/chriskuehl/dumb-pypi/blob/fd0f93fc2e82cbd9bae41b3c60c5f006b2319c60/dumb_pypi/main.py#L77-L91). Thanks in advance :) --- **Good First Issue**: This issue is good for first time contributors. If there is not a corresponding pull request for this issue, it is up for grabs. For directions for getting set up, see our [Getting Started Guide](https://warehouse.pypa.io/development/getting-started/). If you are working on this issue and have questions, please feel free to ask them here, [`#pypa-dev` on Freenode](https://webchat.freenode.net/?channels=%23pypa-dev), or the [pypa-dev mailing list](https://groups.google.com/forum/#!forum/pypa-dev). --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `warehouse/legacy/api/simple.py` Content: ``` 1 # Licensed under the Apache License, Version 2.0 (the "License"); 2 # you may not use this file except in compliance with the License. 3 # You may obtain a copy of the License at 4 # 5 # http://www.apache.org/licenses/LICENSE-2.0 6 # 7 # Unless required by applicable law or agreed to in writing, software 8 # distributed under the License is distributed on an "AS IS" BASIS, 9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 # See the License for the specific language governing permissions and 11 # limitations under the License. 12 13 from pyramid.httpexceptions import HTTPMovedPermanently 14 from pyramid.view import view_config 15 from sqlalchemy import func 16 from sqlalchemy.orm import joinedload 17 18 from warehouse.cache.http import cache_control 19 from warehouse.cache.origin import origin_cache 20 from warehouse.packaging.models import JournalEntry, File, Project, Release 21 22 23 @view_config( 24 route_name="legacy.api.simple.index", 25 renderer="legacy/api/simple/index.html", 26 decorator=[ 27 cache_control(10 * 60), # 10 minutes 28 origin_cache( 29 1 * 24 * 60 * 60, # 1 day 30 stale_while_revalidate=5 * 60, # 5 minutes 31 stale_if_error=1 * 24 * 60 * 60, # 1 day 32 ), 33 ], 34 ) 35 def simple_index(request): 36 # Get the latest serial number 37 serial = request.db.query(func.max(JournalEntry.id)).scalar() or 0 38 request.response.headers["X-PyPI-Last-Serial"] = str(serial) 39 40 # Fetch the name and normalized name for all of our projects 41 projects = ( 42 request.db.query(Project.name, Project.normalized_name) 43 .order_by(Project.normalized_name) 44 .all() 45 ) 46 47 return {"projects": projects} 48 49 50 @view_config( 51 route_name="legacy.api.simple.detail", 52 renderer="legacy/api/simple/detail.html", 53 decorator=[ 54 cache_control(10 * 60), # 10 minutes 55 origin_cache( 56 1 * 24 * 60 * 60, # 1 day 57 stale_while_revalidate=5 * 60, # 5 minutes 58 stale_if_error=1 * 24 * 60 * 60, # 1 day 59 ), 60 ], 61 ) 62 def simple_detail(project, request): 63 # TODO: Handle files which are not hosted on PyPI 64 65 # Make sure that we're using the normalized version of the URL. 66 if (project.normalized_name != 67 request.matchdict.get("name", project.normalized_name)): 68 return HTTPMovedPermanently( 69 request.current_route_path(name=project.normalized_name), 70 ) 71 72 # Get the latest serial number for this project. 73 request.response.headers["X-PyPI-Last-Serial"] = str(project.last_serial) 74 75 # Get all of the files for this project. 76 files = ( 77 request.db.query(File) 78 .options(joinedload(File.release)) 79 .filter( 80 File.name == project.name, 81 File.version.in_( 82 request.db.query(Release) 83 .filter(Release.project == project) 84 .with_entities(Release.version) 85 ) 86 ) 87 .order_by(File.filename) 88 .all() 89 ) 90 91 return {"project": project, "files": files} 92 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/warehouse/legacy/api/simple.py b/warehouse/legacy/api/simple.py --- a/warehouse/legacy/api/simple.py +++ b/warehouse/legacy/api/simple.py @@ -10,6 +10,8 @@ # See the License for the specific language governing permissions and # limitations under the License. + +from packaging.version import parse from pyramid.httpexceptions import HTTPMovedPermanently from pyramid.view import view_config from sqlalchemy import func @@ -73,7 +75,7 @@ request.response.headers["X-PyPI-Last-Serial"] = str(project.last_serial) # Get all of the files for this project. - files = ( + files = sorted( request.db.query(File) .options(joinedload(File.release)) .filter( @@ -84,8 +86,8 @@ .with_entities(Release.version) ) ) - .order_by(File.filename) - .all() + .all(), + key=lambda f: (parse(f.version), f.packagetype) ) return {"project": project, "files": files}
{"golden_diff": "diff --git a/warehouse/legacy/api/simple.py b/warehouse/legacy/api/simple.py\n--- a/warehouse/legacy/api/simple.py\n+++ b/warehouse/legacy/api/simple.py\n@@ -10,6 +10,8 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+\n+from packaging.version import parse\n from pyramid.httpexceptions import HTTPMovedPermanently\n from pyramid.view import view_config\n from sqlalchemy import func\n@@ -73,7 +75,7 @@\n request.response.headers[\"X-PyPI-Last-Serial\"] = str(project.last_serial)\n \n # Get all of the files for this project.\n- files = (\n+ files = sorted(\n request.db.query(File)\n .options(joinedload(File.release))\n .filter(\n@@ -84,8 +86,8 @@\n .with_entities(Release.version)\n )\n )\n- .order_by(File.filename)\n- .all()\n+ .all(),\n+ key=lambda f: (parse(f.version), f.packagetype)\n )\n \n return {\"project\": project, \"files\": files}\n", "issue": "Improve sorting on simple page\nI'd like to submit a patch for this but I have a few questions :)\r\n\r\nFirst I'll describe what I'd like to do...\r\n\r\n## sort by version number\r\n\r\nSee https://pypi.org/simple/pre-commit/\r\n\r\nYou'll notice that `0.10.0` erroneously sorts *before* `0.2.0` (I'd like to fix this)\r\n\r\n## investigation\r\n\r\nI've found the code which does this sorting [here](https://github.com/pypa/warehouse/blob/3bdfe5a89cc9a922ee97304c98384c24822a09ee/warehouse/legacy/api/simple.py#L76-L89)\r\n\r\nThis seems to just sort by filename, but by inspecting and viewing [this page](https://pypi.org/simple/pre-commit-mirror-maker/) I notice it seems to ignore `_` vs. `-` (which is good, that's what I want to continue to happen but I'm just not seeing it from the code!)\r\n\r\n## other questions\r\n\r\nThe `File` objects which come back from the database contain a `.version` attribute that I'd like to use to participate in sorting, my main question is: **Can I depend on this version to be a valid [PEP440](https://www.python.org/dev/peps/pep-0440/) version and use something like `pkg_resources.parse_version`?**\r\n\r\nI'd basically like to replicate something close to the sorting which @chriskuehl's [dumb-pypi](https://github.com/chriskuehl/dumb-pypi) does [here](https://github.com/chriskuehl/dumb-pypi/blob/fd0f93fc2e82cbd9bae41b3c60c5f006b2319c60/dumb_pypi/main.py#L77-L91).\r\n\r\nThanks in advance :)\r\n\r\n---\r\n\r\n**Good First Issue**: This issue is good for first time contributors. If there is not a corresponding pull request for this issue, it is up for grabs. For directions for getting set up, see our [Getting Started Guide](https://warehouse.pypa.io/development/getting-started/). If you are working on this issue and have questions, please feel free to ask them here, [`#pypa-dev` on Freenode](https://webchat.freenode.net/?channels=%23pypa-dev), or the [pypa-dev mailing list](https://groups.google.com/forum/#!forum/pypa-dev).\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom pyramid.httpexceptions import HTTPMovedPermanently\nfrom pyramid.view import view_config\nfrom sqlalchemy import func\nfrom sqlalchemy.orm import joinedload\n\nfrom warehouse.cache.http import cache_control\nfrom warehouse.cache.origin import origin_cache\nfrom warehouse.packaging.models import JournalEntry, File, Project, Release\n\n\n@view_config(\n route_name=\"legacy.api.simple.index\",\n renderer=\"legacy/api/simple/index.html\",\n decorator=[\n cache_control(10 * 60), # 10 minutes\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=5 * 60, # 5 minutes\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n ),\n ],\n)\ndef simple_index(request):\n # Get the latest serial number\n serial = request.db.query(func.max(JournalEntry.id)).scalar() or 0\n request.response.headers[\"X-PyPI-Last-Serial\"] = str(serial)\n\n # Fetch the name and normalized name for all of our projects\n projects = (\n request.db.query(Project.name, Project.normalized_name)\n .order_by(Project.normalized_name)\n .all()\n )\n\n return {\"projects\": projects}\n\n\n@view_config(\n route_name=\"legacy.api.simple.detail\",\n renderer=\"legacy/api/simple/detail.html\",\n decorator=[\n cache_control(10 * 60), # 10 minutes\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=5 * 60, # 5 minutes\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n ),\n ],\n)\ndef simple_detail(project, request):\n # TODO: Handle files which are not hosted on PyPI\n\n # Make sure that we're using the normalized version of the URL.\n if (project.normalized_name !=\n request.matchdict.get(\"name\", project.normalized_name)):\n return HTTPMovedPermanently(\n request.current_route_path(name=project.normalized_name),\n )\n\n # Get the latest serial number for this project.\n request.response.headers[\"X-PyPI-Last-Serial\"] = str(project.last_serial)\n\n # Get all of the files for this project.\n files = (\n request.db.query(File)\n .options(joinedload(File.release))\n .filter(\n File.name == project.name,\n File.version.in_(\n request.db.query(Release)\n .filter(Release.project == project)\n .with_entities(Release.version)\n )\n )\n .order_by(File.filename)\n .all()\n )\n\n return {\"project\": project, \"files\": files}\n", "path": "warehouse/legacy/api/simple.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom packaging.version import parse\nfrom pyramid.httpexceptions import HTTPMovedPermanently\nfrom pyramid.view import view_config\nfrom sqlalchemy import func\nfrom sqlalchemy.orm import joinedload\n\nfrom warehouse.cache.http import cache_control\nfrom warehouse.cache.origin import origin_cache\nfrom warehouse.packaging.models import JournalEntry, File, Project, Release\n\n\n@view_config(\n route_name=\"legacy.api.simple.index\",\n renderer=\"legacy/api/simple/index.html\",\n decorator=[\n cache_control(10 * 60), # 10 minutes\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=5 * 60, # 5 minutes\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n ),\n ],\n)\ndef simple_index(request):\n # Get the latest serial number\n serial = request.db.query(func.max(JournalEntry.id)).scalar() or 0\n request.response.headers[\"X-PyPI-Last-Serial\"] = str(serial)\n\n # Fetch the name and normalized name for all of our projects\n projects = (\n request.db.query(Project.name, Project.normalized_name)\n .order_by(Project.normalized_name)\n .all()\n )\n\n return {\"projects\": projects}\n\n\n@view_config(\n route_name=\"legacy.api.simple.detail\",\n renderer=\"legacy/api/simple/detail.html\",\n decorator=[\n cache_control(10 * 60), # 10 minutes\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=5 * 60, # 5 minutes\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n ),\n ],\n)\ndef simple_detail(project, request):\n # TODO: Handle files which are not hosted on PyPI\n\n # Make sure that we're using the normalized version of the URL.\n if (project.normalized_name !=\n request.matchdict.get(\"name\", project.normalized_name)):\n return HTTPMovedPermanently(\n request.current_route_path(name=project.normalized_name),\n )\n\n # Get the latest serial number for this project.\n request.response.headers[\"X-PyPI-Last-Serial\"] = str(project.last_serial)\n\n # Get all of the files for this project.\n files = sorted(\n request.db.query(File)\n .options(joinedload(File.release))\n .filter(\n File.name == project.name,\n File.version.in_(\n request.db.query(Release)\n .filter(Release.project == project)\n .with_entities(Release.version)\n )\n )\n .all(),\n key=lambda f: (parse(f.version), f.packagetype)\n )\n\n return {\"project\": project, \"files\": files}\n", "path": "warehouse/legacy/api/simple.py"}]}
1,731
249
gh_patches_debug_9708
rasdani/github-patches
git_diff
praw-dev__praw-1810
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Failed to upload a video. **Describe the bug** Failed to upload a video. **To Reproduce** Steps to reproduce the behavior: submit any video **Code/Logs** ``` >>> s = sbrdt.submit_video ('video', 'myvideo.mp4') Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/home/gaspar/.local/lib/python3.9/site-packages/praw/models/reddit/subreddit.py", line 1383, in submit_video video_poster_url=self._upload_media(thumbnail_path)[0], File "/home/gaspar/.local/lib/python3.9/site-packages/praw/models/reddit/subreddit.py", line 695, in _upload_media with open(media_path, "rb") as media: FileNotFoundError: [Errno 2] No such file or directory: '/home/gaspar/.local/lib/python3.9/site-packages/praw/images/PRAW logo.png' ``` **System Info** - OS: Arch Linux - Python: 3.9.5 - PRAW Version: 7.4.0 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 """praw setup.py""" 2 3 import re 4 from codecs import open 5 from os import path 6 7 from setuptools import find_packages, setup 8 9 PACKAGE_NAME = "praw" 10 HERE = path.abspath(path.dirname(__file__)) 11 with open(path.join(HERE, "README.rst"), encoding="utf-8") as fp: 12 README = fp.read() 13 with open(path.join(HERE, PACKAGE_NAME, "const.py"), encoding="utf-8") as fp: 14 VERSION = re.search('__version__ = "([^"]+)"', fp.read()).group(1) 15 16 extras = { 17 "ci": ["coveralls"], 18 "dev": ["packaging"], 19 "lint": [ 20 "pre-commit", 21 "sphinx", 22 "sphinx_rtd_theme", 23 ], 24 "readthedocs": ["sphinx", "sphinx_rtd_theme"], 25 "test": [ 26 "betamax >=0.8, <0.9", 27 "betamax-matchers >=0.3.0, <0.5", 28 "pytest >=2.7.3", 29 ], 30 } 31 extras["dev"] += extras["lint"] + extras["test"] 32 33 setup( 34 name=PACKAGE_NAME, 35 author="Bryce Boe", 36 author_email="[email protected]", 37 python_requires="~=3.6", 38 classifiers=[ 39 "Development Status :: 5 - Production/Stable", 40 "Environment :: Console", 41 "Intended Audience :: Developers", 42 "License :: OSI Approved :: BSD License", 43 "Natural Language :: English", 44 "Operating System :: OS Independent", 45 "Programming Language :: Python", 46 "Programming Language :: Python :: 3", 47 "Programming Language :: Python :: 3.6", 48 "Programming Language :: Python :: 3.7", 49 "Programming Language :: Python :: 3.8", 50 "Programming Language :: Python :: 3.9", 51 "Programming Language :: Python :: 3.10", 52 "Topic :: Utilities", 53 ], 54 description=( 55 "PRAW, an acronym for `Python Reddit API Wrapper`, is a python package that" 56 " allows for simple access to reddit's API." 57 ), 58 extras_require=extras, 59 install_requires=[ 60 "prawcore >=2.1, <3", 61 "update_checker >=0.18", 62 "websocket-client >=0.54.0", 63 ], 64 keywords="reddit api wrapper", 65 license="Simplified BSD License", 66 long_description=README, 67 package_data={"": ["LICENSE.txt"], PACKAGE_NAME: ["*.ini", "images/*.jpg"]}, 68 packages=find_packages(exclude=["tests", "tests.*", "tools", "tools.*"]), 69 project_urls={ 70 "Change Log": "https://praw.readthedocs.io/en/latest/package_info/change_log.html", 71 "Documentation": "https://praw.readthedocs.io/", 72 "Issue Tracker": "https://github.com/praw-dev/praw/issues", 73 "Source Code": "https://github.com/praw-dev/praw", 74 }, 75 version=VERSION, 76 ) 77 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -64,7 +64,7 @@ keywords="reddit api wrapper", license="Simplified BSD License", long_description=README, - package_data={"": ["LICENSE.txt"], PACKAGE_NAME: ["*.ini", "images/*.jpg"]}, + package_data={"": ["LICENSE.txt"], PACKAGE_NAME: ["*.ini", "images/*.png"]}, packages=find_packages(exclude=["tests", "tests.*", "tools", "tools.*"]), project_urls={ "Change Log": "https://praw.readthedocs.io/en/latest/package_info/change_log.html",
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -64,7 +64,7 @@\n keywords=\"reddit api wrapper\",\n license=\"Simplified BSD License\",\n long_description=README,\n- package_data={\"\": [\"LICENSE.txt\"], PACKAGE_NAME: [\"*.ini\", \"images/*.jpg\"]},\n+ package_data={\"\": [\"LICENSE.txt\"], PACKAGE_NAME: [\"*.ini\", \"images/*.png\"]},\n packages=find_packages(exclude=[\"tests\", \"tests.*\", \"tools\", \"tools.*\"]),\n project_urls={\n \"Change Log\": \"https://praw.readthedocs.io/en/latest/package_info/change_log.html\",\n", "issue": "Failed to upload a video.\n**Describe the bug**\r\nFailed to upload a video.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\nsubmit any video\r\n\r\n**Code/Logs**\r\n```\r\n>>> s = sbrdt.submit_video ('video', 'myvideo.mp4')\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/home/gaspar/.local/lib/python3.9/site-packages/praw/models/reddit/subreddit.py\", line 1383, in submit_video\r\n video_poster_url=self._upload_media(thumbnail_path)[0],\r\n File \"/home/gaspar/.local/lib/python3.9/site-packages/praw/models/reddit/subreddit.py\", line 695, in _upload_media\r\n with open(media_path, \"rb\") as media:\r\nFileNotFoundError: [Errno 2] No such file or directory: '/home/gaspar/.local/lib/python3.9/site-packages/praw/images/PRAW logo.png'\r\n```\r\n\r\n**System Info**\r\n - OS: Arch Linux\r\n - Python: 3.9.5\r\n - PRAW Version: 7.4.0\r\n\n", "before_files": [{"content": "\"\"\"praw setup.py\"\"\"\n\nimport re\nfrom codecs import open\nfrom os import path\n\nfrom setuptools import find_packages, setup\n\nPACKAGE_NAME = \"praw\"\nHERE = path.abspath(path.dirname(__file__))\nwith open(path.join(HERE, \"README.rst\"), encoding=\"utf-8\") as fp:\n README = fp.read()\nwith open(path.join(HERE, PACKAGE_NAME, \"const.py\"), encoding=\"utf-8\") as fp:\n VERSION = re.search('__version__ = \"([^\"]+)\"', fp.read()).group(1)\n\nextras = {\n \"ci\": [\"coveralls\"],\n \"dev\": [\"packaging\"],\n \"lint\": [\n \"pre-commit\",\n \"sphinx\",\n \"sphinx_rtd_theme\",\n ],\n \"readthedocs\": [\"sphinx\", \"sphinx_rtd_theme\"],\n \"test\": [\n \"betamax >=0.8, <0.9\",\n \"betamax-matchers >=0.3.0, <0.5\",\n \"pytest >=2.7.3\",\n ],\n}\nextras[\"dev\"] += extras[\"lint\"] + extras[\"test\"]\n\nsetup(\n name=PACKAGE_NAME,\n author=\"Bryce Boe\",\n author_email=\"[email protected]\",\n python_requires=\"~=3.6\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Utilities\",\n ],\n description=(\n \"PRAW, an acronym for `Python Reddit API Wrapper`, is a python package that\"\n \" allows for simple access to reddit's API.\"\n ),\n extras_require=extras,\n install_requires=[\n \"prawcore >=2.1, <3\",\n \"update_checker >=0.18\",\n \"websocket-client >=0.54.0\",\n ],\n keywords=\"reddit api wrapper\",\n license=\"Simplified BSD License\",\n long_description=README,\n package_data={\"\": [\"LICENSE.txt\"], PACKAGE_NAME: [\"*.ini\", \"images/*.jpg\"]},\n packages=find_packages(exclude=[\"tests\", \"tests.*\", \"tools\", \"tools.*\"]),\n project_urls={\n \"Change Log\": \"https://praw.readthedocs.io/en/latest/package_info/change_log.html\",\n \"Documentation\": \"https://praw.readthedocs.io/\",\n \"Issue Tracker\": \"https://github.com/praw-dev/praw/issues\",\n \"Source Code\": \"https://github.com/praw-dev/praw\",\n },\n version=VERSION,\n)\n", "path": "setup.py"}], "after_files": [{"content": "\"\"\"praw setup.py\"\"\"\n\nimport re\nfrom codecs import open\nfrom os import path\n\nfrom setuptools import find_packages, setup\n\nPACKAGE_NAME = \"praw\"\nHERE = path.abspath(path.dirname(__file__))\nwith open(path.join(HERE, \"README.rst\"), encoding=\"utf-8\") as fp:\n README = fp.read()\nwith open(path.join(HERE, PACKAGE_NAME, \"const.py\"), encoding=\"utf-8\") as fp:\n VERSION = re.search('__version__ = \"([^\"]+)\"', fp.read()).group(1)\n\nextras = {\n \"ci\": [\"coveralls\"],\n \"dev\": [\"packaging\"],\n \"lint\": [\n \"pre-commit\",\n \"sphinx\",\n \"sphinx_rtd_theme\",\n ],\n \"readthedocs\": [\"sphinx\", \"sphinx_rtd_theme\"],\n \"test\": [\n \"betamax >=0.8, <0.9\",\n \"betamax-matchers >=0.3.0, <0.5\",\n \"pytest >=2.7.3\",\n ],\n}\nextras[\"dev\"] += extras[\"lint\"] + extras[\"test\"]\n\nsetup(\n name=PACKAGE_NAME,\n author=\"Bryce Boe\",\n author_email=\"[email protected]\",\n python_requires=\"~=3.6\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Utilities\",\n ],\n description=(\n \"PRAW, an acronym for `Python Reddit API Wrapper`, is a python package that\"\n \" allows for simple access to reddit's API.\"\n ),\n extras_require=extras,\n install_requires=[\n \"prawcore >=2.1, <3\",\n \"update_checker >=0.18\",\n \"websocket-client >=0.54.0\",\n ],\n keywords=\"reddit api wrapper\",\n license=\"Simplified BSD License\",\n long_description=README,\n package_data={\"\": [\"LICENSE.txt\"], PACKAGE_NAME: [\"*.ini\", \"images/*.png\"]},\n packages=find_packages(exclude=[\"tests\", \"tests.*\", \"tools\", \"tools.*\"]),\n project_urls={\n \"Change Log\": \"https://praw.readthedocs.io/en/latest/package_info/change_log.html\",\n \"Documentation\": \"https://praw.readthedocs.io/\",\n \"Issue Tracker\": \"https://github.com/praw-dev/praw/issues\",\n \"Source Code\": \"https://github.com/praw-dev/praw\",\n },\n version=VERSION,\n)\n", "path": "setup.py"}]}
1,298
145
gh_patches_debug_35228
rasdani/github-patches
git_diff
mirumee__ariadne-529
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- OpenTracing plugin performs deepcopy of resolver's args, which fails when file upload for larger file is used. OpenTracing performs deep copy of arguments passed to the resolver function when args filtering is used (eg. to hide passwords), but this apparently fails there's larger uploaded file in the args. Potential fix would be default filter that replaces uploaded files with cheap str representation (eg. `<UploadedFile(name="test.jpg", type="image/jpeg", size=44100)>`) before custom filtering logic is ran next. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ariadne/contrib/tracing/opentracing.py` Content: ``` 1 from copy import deepcopy 2 from functools import partial 3 from inspect import isawaitable 4 from typing import Any, Callable, Dict, Optional 5 6 from graphql import GraphQLResolveInfo 7 from opentracing import Scope, Tracer, global_tracer 8 from opentracing.ext import tags 9 10 from ...types import ContextValue, Extension, Resolver 11 from .utils import format_path, should_trace 12 13 ArgFilter = Callable[[Dict[str, Any], GraphQLResolveInfo], Dict[str, Any]] 14 15 16 class OpenTracingExtension(Extension): 17 _arg_filter: Optional[ArgFilter] 18 _root_scope: Scope 19 _tracer: Tracer 20 21 def __init__(self, *, arg_filter: Optional[ArgFilter] = None): 22 self._arg_filter = arg_filter 23 self._tracer = global_tracer() 24 self._root_scope = None 25 26 def request_started(self, context: ContextValue): 27 self._root_scope = self._tracer.start_active_span("GraphQL Query") 28 self._root_scope.span.set_tag(tags.COMPONENT, "graphql") 29 30 def request_finished(self, context: ContextValue): 31 self._root_scope.close() 32 33 async def resolve( 34 self, next_: Resolver, parent: Any, info: GraphQLResolveInfo, **kwargs 35 ): 36 if not should_trace(info): 37 result = next_(parent, info, **kwargs) 38 if isawaitable(result): 39 result = await result 40 return result 41 42 with self._tracer.start_active_span(info.field_name) as scope: 43 span = scope.span 44 span.set_tag(tags.COMPONENT, "graphql") 45 span.set_tag("graphql.parentType", info.parent_type.name) 46 47 graphql_path = ".".join( 48 map(str, format_path(info.path)) # pylint: disable=bad-builtin 49 ) 50 span.set_tag("graphql.path", graphql_path) 51 52 if kwargs: 53 filtered_kwargs = self.filter_resolver_args(kwargs, info) 54 for kwarg, value in filtered_kwargs.items(): 55 span.set_tag(f"graphql.param.{kwarg}", value) 56 57 result = next_(parent, info, **kwargs) 58 if isawaitable(result): 59 result = await result 60 return result 61 62 def filter_resolver_args( 63 self, args: Dict[str, Any], info: GraphQLResolveInfo 64 ) -> Dict[str, Any]: 65 if not self._arg_filter: 66 return args 67 68 return self._arg_filter(deepcopy(args), info) 69 70 71 class OpenTracingExtensionSync(OpenTracingExtension): 72 def resolve( 73 self, next_: Resolver, parent: Any, info: GraphQLResolveInfo, **kwargs 74 ): # pylint: disable=invalid-overridden-method 75 if not should_trace(info): 76 result = next_(parent, info, **kwargs) 77 return result 78 79 with self._tracer.start_active_span(info.field_name) as scope: 80 span = scope.span 81 span.set_tag(tags.COMPONENT, "graphql") 82 span.set_tag("graphql.parentType", info.parent_type.name) 83 84 graphql_path = ".".join( 85 map(str, format_path(info.path)) # pylint: disable=bad-builtin 86 ) 87 span.set_tag("graphql.path", graphql_path) 88 89 if kwargs: 90 filtered_kwargs = self.filter_resolver_args(kwargs, info) 91 for kwarg, value in filtered_kwargs.items(): 92 span.set_tag(f"graphql.param.{kwarg}", value) 93 94 result = next_(parent, info, **kwargs) 95 return result 96 97 98 def opentracing_extension(*, arg_filter: Optional[ArgFilter] = None): 99 return partial(OpenTracingExtension, arg_filter=arg_filter) 100 101 102 def opentracing_extension_sync(*, arg_filter: Optional[ArgFilter] = None): 103 return partial(OpenTracingExtensionSync, arg_filter=arg_filter) 104 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ariadne/contrib/tracing/opentracing.py b/ariadne/contrib/tracing/opentracing.py --- a/ariadne/contrib/tracing/opentracing.py +++ b/ariadne/contrib/tracing/opentracing.py @@ -1,11 +1,13 @@ -from copy import deepcopy +import cgi +import os from functools import partial from inspect import isawaitable -from typing import Any, Callable, Dict, Optional +from typing import Any, Callable, Dict, Optional, Union from graphql import GraphQLResolveInfo from opentracing import Scope, Tracer, global_tracer from opentracing.ext import tags +from starlette.datastructures import UploadFile from ...types import ContextValue, Extension, Resolver from .utils import format_path, should_trace @@ -62,10 +64,12 @@ def filter_resolver_args( self, args: Dict[str, Any], info: GraphQLResolveInfo ) -> Dict[str, Any]: + args_to_trace = copy_args_for_tracing(args) + if not self._arg_filter: - return args + return args_to_trace - return self._arg_filter(deepcopy(args), info) + return self._arg_filter(args_to_trace, info) class OpenTracingExtensionSync(OpenTracingExtension): @@ -101,3 +105,34 @@ def opentracing_extension_sync(*, arg_filter: Optional[ArgFilter] = None): return partial(OpenTracingExtensionSync, arg_filter=arg_filter) + + +def copy_args_for_tracing(value: Any) -> Any: + if isinstance(value, dict): + return {k: copy_args_for_tracing(v) for k, v in value.items()} + if isinstance(value, list): + return [copy_args_for_tracing(v) for v in value] + if isinstance(value, (UploadFile, cgi.FieldStorage)): + return repr_upload_file(value) + return value + + +def repr_upload_file(upload_file: Union[UploadFile, cgi.FieldStorage]) -> str: + filename = upload_file.filename + + if isinstance(upload_file, cgi.FieldStorage): + mime_type = upload_file.type + else: + mime_type = upload_file.content_type + + if upload_file.file is None and isinstance(upload_file, cgi.FieldStorage): + size = len(upload_file.value) if upload_file.value is not None else 0 + else: + file_ = upload_file.file + file_.seek(0, os.SEEK_END) + size = file_.tell() + file_.seek(0) + + return ( + f"{type(upload_file)}(mime_type={mime_type}, size={size}, filename={filename})" + )
{"golden_diff": "diff --git a/ariadne/contrib/tracing/opentracing.py b/ariadne/contrib/tracing/opentracing.py\n--- a/ariadne/contrib/tracing/opentracing.py\n+++ b/ariadne/contrib/tracing/opentracing.py\n@@ -1,11 +1,13 @@\n-from copy import deepcopy\n+import cgi\n+import os\n from functools import partial\n from inspect import isawaitable\n-from typing import Any, Callable, Dict, Optional\n+from typing import Any, Callable, Dict, Optional, Union\n \n from graphql import GraphQLResolveInfo\n from opentracing import Scope, Tracer, global_tracer\n from opentracing.ext import tags\n+from starlette.datastructures import UploadFile\n \n from ...types import ContextValue, Extension, Resolver\n from .utils import format_path, should_trace\n@@ -62,10 +64,12 @@\n def filter_resolver_args(\n self, args: Dict[str, Any], info: GraphQLResolveInfo\n ) -> Dict[str, Any]:\n+ args_to_trace = copy_args_for_tracing(args)\n+\n if not self._arg_filter:\n- return args\n+ return args_to_trace\n \n- return self._arg_filter(deepcopy(args), info)\n+ return self._arg_filter(args_to_trace, info)\n \n \n class OpenTracingExtensionSync(OpenTracingExtension):\n@@ -101,3 +105,34 @@\n \n def opentracing_extension_sync(*, arg_filter: Optional[ArgFilter] = None):\n return partial(OpenTracingExtensionSync, arg_filter=arg_filter)\n+\n+\n+def copy_args_for_tracing(value: Any) -> Any:\n+ if isinstance(value, dict):\n+ return {k: copy_args_for_tracing(v) for k, v in value.items()}\n+ if isinstance(value, list):\n+ return [copy_args_for_tracing(v) for v in value]\n+ if isinstance(value, (UploadFile, cgi.FieldStorage)):\n+ return repr_upload_file(value)\n+ return value\n+\n+\n+def repr_upload_file(upload_file: Union[UploadFile, cgi.FieldStorage]) -> str:\n+ filename = upload_file.filename\n+\n+ if isinstance(upload_file, cgi.FieldStorage):\n+ mime_type = upload_file.type\n+ else:\n+ mime_type = upload_file.content_type\n+\n+ if upload_file.file is None and isinstance(upload_file, cgi.FieldStorage):\n+ size = len(upload_file.value) if upload_file.value is not None else 0\n+ else:\n+ file_ = upload_file.file\n+ file_.seek(0, os.SEEK_END)\n+ size = file_.tell()\n+ file_.seek(0)\n+\n+ return (\n+ f\"{type(upload_file)}(mime_type={mime_type}, size={size}, filename={filename})\"\n+ )\n", "issue": "OpenTracing plugin performs deepcopy of resolver's args, which fails when file upload for larger file is used.\nOpenTracing performs deep copy of arguments passed to the resolver function when args filtering is used (eg. to hide passwords), but this apparently fails there's larger uploaded file in the args.\r\n\r\nPotential fix would be default filter that replaces uploaded files with cheap str representation (eg. `<UploadedFile(name=\"test.jpg\", type=\"image/jpeg\", size=44100)>`) before custom filtering logic is ran next.\n", "before_files": [{"content": "from copy import deepcopy\nfrom functools import partial\nfrom inspect import isawaitable\nfrom typing import Any, Callable, Dict, Optional\n\nfrom graphql import GraphQLResolveInfo\nfrom opentracing import Scope, Tracer, global_tracer\nfrom opentracing.ext import tags\n\nfrom ...types import ContextValue, Extension, Resolver\nfrom .utils import format_path, should_trace\n\nArgFilter = Callable[[Dict[str, Any], GraphQLResolveInfo], Dict[str, Any]]\n\n\nclass OpenTracingExtension(Extension):\n _arg_filter: Optional[ArgFilter]\n _root_scope: Scope\n _tracer: Tracer\n\n def __init__(self, *, arg_filter: Optional[ArgFilter] = None):\n self._arg_filter = arg_filter\n self._tracer = global_tracer()\n self._root_scope = None\n\n def request_started(self, context: ContextValue):\n self._root_scope = self._tracer.start_active_span(\"GraphQL Query\")\n self._root_scope.span.set_tag(tags.COMPONENT, \"graphql\")\n\n def request_finished(self, context: ContextValue):\n self._root_scope.close()\n\n async def resolve(\n self, next_: Resolver, parent: Any, info: GraphQLResolveInfo, **kwargs\n ):\n if not should_trace(info):\n result = next_(parent, info, **kwargs)\n if isawaitable(result):\n result = await result\n return result\n\n with self._tracer.start_active_span(info.field_name) as scope:\n span = scope.span\n span.set_tag(tags.COMPONENT, \"graphql\")\n span.set_tag(\"graphql.parentType\", info.parent_type.name)\n\n graphql_path = \".\".join(\n map(str, format_path(info.path)) # pylint: disable=bad-builtin\n )\n span.set_tag(\"graphql.path\", graphql_path)\n\n if kwargs:\n filtered_kwargs = self.filter_resolver_args(kwargs, info)\n for kwarg, value in filtered_kwargs.items():\n span.set_tag(f\"graphql.param.{kwarg}\", value)\n\n result = next_(parent, info, **kwargs)\n if isawaitable(result):\n result = await result\n return result\n\n def filter_resolver_args(\n self, args: Dict[str, Any], info: GraphQLResolveInfo\n ) -> Dict[str, Any]:\n if not self._arg_filter:\n return args\n\n return self._arg_filter(deepcopy(args), info)\n\n\nclass OpenTracingExtensionSync(OpenTracingExtension):\n def resolve(\n self, next_: Resolver, parent: Any, info: GraphQLResolveInfo, **kwargs\n ): # pylint: disable=invalid-overridden-method\n if not should_trace(info):\n result = next_(parent, info, **kwargs)\n return result\n\n with self._tracer.start_active_span(info.field_name) as scope:\n span = scope.span\n span.set_tag(tags.COMPONENT, \"graphql\")\n span.set_tag(\"graphql.parentType\", info.parent_type.name)\n\n graphql_path = \".\".join(\n map(str, format_path(info.path)) # pylint: disable=bad-builtin\n )\n span.set_tag(\"graphql.path\", graphql_path)\n\n if kwargs:\n filtered_kwargs = self.filter_resolver_args(kwargs, info)\n for kwarg, value in filtered_kwargs.items():\n span.set_tag(f\"graphql.param.{kwarg}\", value)\n\n result = next_(parent, info, **kwargs)\n return result\n\n\ndef opentracing_extension(*, arg_filter: Optional[ArgFilter] = None):\n return partial(OpenTracingExtension, arg_filter=arg_filter)\n\n\ndef opentracing_extension_sync(*, arg_filter: Optional[ArgFilter] = None):\n return partial(OpenTracingExtensionSync, arg_filter=arg_filter)\n", "path": "ariadne/contrib/tracing/opentracing.py"}], "after_files": [{"content": "import cgi\nimport os\nfrom functools import partial\nfrom inspect import isawaitable\nfrom typing import Any, Callable, Dict, Optional, Union\n\nfrom graphql import GraphQLResolveInfo\nfrom opentracing import Scope, Tracer, global_tracer\nfrom opentracing.ext import tags\nfrom starlette.datastructures import UploadFile\n\nfrom ...types import ContextValue, Extension, Resolver\nfrom .utils import format_path, should_trace\n\nArgFilter = Callable[[Dict[str, Any], GraphQLResolveInfo], Dict[str, Any]]\n\n\nclass OpenTracingExtension(Extension):\n _arg_filter: Optional[ArgFilter]\n _root_scope: Scope\n _tracer: Tracer\n\n def __init__(self, *, arg_filter: Optional[ArgFilter] = None):\n self._arg_filter = arg_filter\n self._tracer = global_tracer()\n self._root_scope = None\n\n def request_started(self, context: ContextValue):\n self._root_scope = self._tracer.start_active_span(\"GraphQL Query\")\n self._root_scope.span.set_tag(tags.COMPONENT, \"graphql\")\n\n def request_finished(self, context: ContextValue):\n self._root_scope.close()\n\n async def resolve(\n self, next_: Resolver, parent: Any, info: GraphQLResolveInfo, **kwargs\n ):\n if not should_trace(info):\n result = next_(parent, info, **kwargs)\n if isawaitable(result):\n result = await result\n return result\n\n with self._tracer.start_active_span(info.field_name) as scope:\n span = scope.span\n span.set_tag(tags.COMPONENT, \"graphql\")\n span.set_tag(\"graphql.parentType\", info.parent_type.name)\n\n graphql_path = \".\".join(\n map(str, format_path(info.path)) # pylint: disable=bad-builtin\n )\n span.set_tag(\"graphql.path\", graphql_path)\n\n if kwargs:\n filtered_kwargs = self.filter_resolver_args(kwargs, info)\n for kwarg, value in filtered_kwargs.items():\n span.set_tag(f\"graphql.param.{kwarg}\", value)\n\n result = next_(parent, info, **kwargs)\n if isawaitable(result):\n result = await result\n return result\n\n def filter_resolver_args(\n self, args: Dict[str, Any], info: GraphQLResolveInfo\n ) -> Dict[str, Any]:\n args_to_trace = copy_args_for_tracing(args)\n\n if not self._arg_filter:\n return args_to_trace\n\n return self._arg_filter(args_to_trace, info)\n\n\nclass OpenTracingExtensionSync(OpenTracingExtension):\n def resolve(\n self, next_: Resolver, parent: Any, info: GraphQLResolveInfo, **kwargs\n ): # pylint: disable=invalid-overridden-method\n if not should_trace(info):\n result = next_(parent, info, **kwargs)\n return result\n\n with self._tracer.start_active_span(info.field_name) as scope:\n span = scope.span\n span.set_tag(tags.COMPONENT, \"graphql\")\n span.set_tag(\"graphql.parentType\", info.parent_type.name)\n\n graphql_path = \".\".join(\n map(str, format_path(info.path)) # pylint: disable=bad-builtin\n )\n span.set_tag(\"graphql.path\", graphql_path)\n\n if kwargs:\n filtered_kwargs = self.filter_resolver_args(kwargs, info)\n for kwarg, value in filtered_kwargs.items():\n span.set_tag(f\"graphql.param.{kwarg}\", value)\n\n result = next_(parent, info, **kwargs)\n return result\n\n\ndef opentracing_extension(*, arg_filter: Optional[ArgFilter] = None):\n return partial(OpenTracingExtension, arg_filter=arg_filter)\n\n\ndef opentracing_extension_sync(*, arg_filter: Optional[ArgFilter] = None):\n return partial(OpenTracingExtensionSync, arg_filter=arg_filter)\n\n\ndef copy_args_for_tracing(value: Any) -> Any:\n if isinstance(value, dict):\n return {k: copy_args_for_tracing(v) for k, v in value.items()}\n if isinstance(value, list):\n return [copy_args_for_tracing(v) for v in value]\n if isinstance(value, (UploadFile, cgi.FieldStorage)):\n return repr_upload_file(value)\n return value\n\n\ndef repr_upload_file(upload_file: Union[UploadFile, cgi.FieldStorage]) -> str:\n filename = upload_file.filename\n\n if isinstance(upload_file, cgi.FieldStorage):\n mime_type = upload_file.type\n else:\n mime_type = upload_file.content_type\n\n if upload_file.file is None and isinstance(upload_file, cgi.FieldStorage):\n size = len(upload_file.value) if upload_file.value is not None else 0\n else:\n file_ = upload_file.file\n file_.seek(0, os.SEEK_END)\n size = file_.tell()\n file_.seek(0)\n\n return (\n f\"{type(upload_file)}(mime_type={mime_type}, size={size}, filename={filename})\"\n )\n", "path": "ariadne/contrib/tracing/opentracing.py"}]}
1,392
623
gh_patches_debug_37501
rasdani/github-patches
git_diff
pytorch__vision-1005
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Mobilenet v2 width multiplier incorrect There seems to be a small issue with the width multiplier in mobilenet v2. The official implementation rounds filter channels to a multiple of 8. For example, mobilenet v2 width 1.4. The first conv layer has 44 channels as opposed to 48 in the official implementation: model = torchvision.models.mobilenet_v2(width_mult=1.4) for module in model.modules(): if isinstance(module, nn.Conv2d): print(module.weight.shape) torch.Size([44, 3, 3, 3]) torch.Size([44, 1, 3, 3]) torch.Size([22, 44, 1, 1]) torch.Size([132, 22, 1, 1]) torch.Size([132, 1, 3, 3]) torch.Size([33, 132, 1, 1]) Corresponding tensorflow 2.0 keras code: model = tf.keras.applications.MobileNetV2( weights="imagenet", input_shape=(224, 224, 3), alpha=1.4) model.summary() __________________________________________________________________________________________________ Layer (type) Output Shape Param # Connected to ================================================================================================== input_1 (InputLayer) [(None, 224, 224, 3) 0 __________________________________________________________________________________________________ Conv1_pad (ZeroPadding2D) (None, 225, 225, 3) 0 input_1[0][0] __________________________________________________________________________________________________ Conv1 (Conv2D) (None, 112, 112, 48) 1296 Conv1_pad[0][0] __________________________________________________________________________________________________ bn_Conv1 (BatchNormalization) (None, 112, 112, 48) 192 Conv1[0][0] __________________________________________________________________________________________________ Conv1_relu (ReLU) (None, 112, 112, 48) 0 bn_Conv1[0][0] __________________________________________________________________________________________________ expanded_conv_depthwise (Depthw (None, 112, 112, 48) 432 Conv1_relu[0][0] __________________________________________________________________________________________________ expanded_conv_depthwise_BN (Bat (None, 112, 112, 48) 192 expanded_conv_depthwise[0][0] __________________________________________________________________________________________________ expanded_conv_depthwise_relu (R (None, 112, 112, 48) 0 expanded_conv_depthwise_BN[0][0] __________________________________________________________________________________________________ expanded_conv_project (Conv2D) (None, 112, 112, 24) 1152 expanded_conv_depthwise_relu[0][0 __________________________________________________________________________________________________ expanded_conv_project_BN (Batch (None, 112, 112, 24) 96 expanded_conv_project[0][0] __________________________________________________________________________________________________ block_1_expand (Conv2D) (None, 112, 112, 144 3456 expanded_conv_project_BN[0][0] __________________________________________________________________________________________________ block_1_expand_BN (BatchNormali (None, 112, 112, 144 576 block_1_expand[0][0] __________________________________________________________________________________________________ block_1_expand_relu (ReLU) (None, 112, 112, 144 0 block_1_expand_BN[0][0] __________________________________________________________________________________________________ block_1_pad (ZeroPadding2D) (None, 113, 113, 144 0 block_1_expand_relu[0][0] __________________________________________________________________________________________________ block_1_depthwise (DepthwiseCon (None, 56, 56, 144) 1296 block_1_pad[0][0] __________________________________________________________________________________________________ block_1_depthwise_BN (BatchNorm (None, 56, 56, 144) 576 block_1_depthwise[0][0] __________________________________________________________________________________________________ block_1_depthwise_relu (ReLU) (None, 56, 56, 144) 0 block_1_depthwise_BN[0][0] __________________________________________________________________________________________________ block_1_project (Conv2D) (None, 56, 56, 32) 4608 block_1_depthwise_relu[0][0] __________________________________________________________________________________________________ I've implemented a fix here: https://github.com/yaysummeriscoming/vision/blob/master/torchvision/models/mobilenet.py Can I merge it in? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `torchvision/models/mobilenet.py` Content: ``` 1 from torch import nn 2 from .utils import load_state_dict_from_url 3 4 5 __all__ = ['MobileNetV2', 'mobilenet_v2'] 6 7 8 model_urls = { 9 'mobilenet_v2': 'https://download.pytorch.org/models/mobilenet_v2-b0353104.pth', 10 } 11 12 13 class ConvBNReLU(nn.Sequential): 14 def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1): 15 padding = (kernel_size - 1) // 2 16 super(ConvBNReLU, self).__init__( 17 nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False), 18 nn.BatchNorm2d(out_planes), 19 nn.ReLU6(inplace=True) 20 ) 21 22 23 class InvertedResidual(nn.Module): 24 def __init__(self, inp, oup, stride, expand_ratio): 25 super(InvertedResidual, self).__init__() 26 self.stride = stride 27 assert stride in [1, 2] 28 29 hidden_dim = int(round(inp * expand_ratio)) 30 self.use_res_connect = self.stride == 1 and inp == oup 31 32 layers = [] 33 if expand_ratio != 1: 34 # pw 35 layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1)) 36 layers.extend([ 37 # dw 38 ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim), 39 # pw-linear 40 nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), 41 nn.BatchNorm2d(oup), 42 ]) 43 self.conv = nn.Sequential(*layers) 44 45 def forward(self, x): 46 if self.use_res_connect: 47 return x + self.conv(x) 48 else: 49 return self.conv(x) 50 51 52 class MobileNetV2(nn.Module): 53 def __init__(self, num_classes=1000, width_mult=1.0, inverted_residual_setting=None): 54 super(MobileNetV2, self).__init__() 55 block = InvertedResidual 56 input_channel = 32 57 last_channel = 1280 58 59 if inverted_residual_setting is None: 60 inverted_residual_setting = [ 61 # t, c, n, s 62 [1, 16, 1, 1], 63 [6, 24, 2, 2], 64 [6, 32, 3, 2], 65 [6, 64, 4, 2], 66 [6, 96, 3, 1], 67 [6, 160, 3, 2], 68 [6, 320, 1, 1], 69 ] 70 71 # only check the first element, assuming user knows t,c,n,s are required 72 if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4: 73 raise ValueError("inverted_residual_setting should be non-empty " 74 "or a 4-element list, got {}".format(inverted_residual_setting)) 75 76 # building first layer 77 input_channel = int(input_channel * width_mult) 78 self.last_channel = int(last_channel * max(1.0, width_mult)) 79 features = [ConvBNReLU(3, input_channel, stride=2)] 80 # building inverted residual blocks 81 for t, c, n, s in inverted_residual_setting: 82 output_channel = int(c * width_mult) 83 for i in range(n): 84 stride = s if i == 0 else 1 85 features.append(block(input_channel, output_channel, stride, expand_ratio=t)) 86 input_channel = output_channel 87 # building last several layers 88 features.append(ConvBNReLU(input_channel, self.last_channel, kernel_size=1)) 89 # make it nn.Sequential 90 self.features = nn.Sequential(*features) 91 92 # building classifier 93 self.classifier = nn.Sequential( 94 nn.Dropout(0.2), 95 nn.Linear(self.last_channel, num_classes), 96 ) 97 98 # weight initialization 99 for m in self.modules(): 100 if isinstance(m, nn.Conv2d): 101 nn.init.kaiming_normal_(m.weight, mode='fan_out') 102 if m.bias is not None: 103 nn.init.zeros_(m.bias) 104 elif isinstance(m, nn.BatchNorm2d): 105 nn.init.ones_(m.weight) 106 nn.init.zeros_(m.bias) 107 elif isinstance(m, nn.Linear): 108 nn.init.normal_(m.weight, 0, 0.01) 109 nn.init.zeros_(m.bias) 110 111 def forward(self, x): 112 x = self.features(x) 113 x = x.mean([2, 3]) 114 x = self.classifier(x) 115 return x 116 117 118 def mobilenet_v2(pretrained=False, progress=True, **kwargs): 119 """ 120 Constructs a MobileNetV2 architecture from 121 `"MobileNetV2: Inverted Residuals and Linear Bottlenecks" <https://arxiv.org/abs/1801.04381>`_. 122 123 Args: 124 pretrained (bool): If True, returns a model pre-trained on ImageNet 125 progress (bool): If True, displays a progress bar of the download to stderr 126 """ 127 model = MobileNetV2(**kwargs) 128 if pretrained: 129 state_dict = load_state_dict_from_url(model_urls['mobilenet_v2'], 130 progress=progress) 131 model.load_state_dict(state_dict) 132 return model 133 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/torchvision/models/mobilenet.py b/torchvision/models/mobilenet.py --- a/torchvision/models/mobilenet.py +++ b/torchvision/models/mobilenet.py @@ -10,6 +10,26 @@ } +def _make_divisible(v, divisor, min_value=None): + """ + This function is taken from the original tf repo. + It ensures that all layers have a channel number that is divisible by 8 + It can be seen here: + https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py + :param v: + :param divisor: + :param min_value: + :return: + """ + if min_value is None: + min_value = divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + # Make sure that round down does not go down by more than 10%. + if new_v < 0.9 * v: + new_v += divisor + return new_v + + class ConvBNReLU(nn.Sequential): def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1): padding = (kernel_size - 1) // 2 @@ -50,7 +70,17 @@ class MobileNetV2(nn.Module): - def __init__(self, num_classes=1000, width_mult=1.0, inverted_residual_setting=None): + def __init__(self, num_classes=1000, width_mult=1.0, inverted_residual_setting=None, round_nearest=8): + """ + MobileNet V2 main class + + Args: + num_classes (int): Number of classes + width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount + inverted_residual_setting: Network structure + round_nearest (int): Round the number of channels in each layer to be a multiple of this number + Set to 1 to turn off rounding + """ super(MobileNetV2, self).__init__() block = InvertedResidual input_channel = 32 @@ -74,12 +104,12 @@ "or a 4-element list, got {}".format(inverted_residual_setting)) # building first layer - input_channel = int(input_channel * width_mult) - self.last_channel = int(last_channel * max(1.0, width_mult)) + input_channel = _make_divisible(input_channel * width_mult, round_nearest) + self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest) features = [ConvBNReLU(3, input_channel, stride=2)] # building inverted residual blocks for t, c, n, s in inverted_residual_setting: - output_channel = int(c * width_mult) + output_channel = _make_divisible(c * width_mult, round_nearest) for i in range(n): stride = s if i == 0 else 1 features.append(block(input_channel, output_channel, stride, expand_ratio=t))
{"golden_diff": "diff --git a/torchvision/models/mobilenet.py b/torchvision/models/mobilenet.py\n--- a/torchvision/models/mobilenet.py\n+++ b/torchvision/models/mobilenet.py\n@@ -10,6 +10,26 @@\n }\n \n \n+def _make_divisible(v, divisor, min_value=None):\n+ \"\"\"\n+ This function is taken from the original tf repo.\n+ It ensures that all layers have a channel number that is divisible by 8\n+ It can be seen here:\n+ https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py\n+ :param v:\n+ :param divisor:\n+ :param min_value:\n+ :return:\n+ \"\"\"\n+ if min_value is None:\n+ min_value = divisor\n+ new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)\n+ # Make sure that round down does not go down by more than 10%.\n+ if new_v < 0.9 * v:\n+ new_v += divisor\n+ return new_v\n+\n+\n class ConvBNReLU(nn.Sequential):\n def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1):\n padding = (kernel_size - 1) // 2\n@@ -50,7 +70,17 @@\n \n \n class MobileNetV2(nn.Module):\n- def __init__(self, num_classes=1000, width_mult=1.0, inverted_residual_setting=None):\n+ def __init__(self, num_classes=1000, width_mult=1.0, inverted_residual_setting=None, round_nearest=8):\n+ \"\"\"\n+ MobileNet V2 main class\n+\n+ Args:\n+ num_classes (int): Number of classes\n+ width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount\n+ inverted_residual_setting: Network structure\n+ round_nearest (int): Round the number of channels in each layer to be a multiple of this number\n+ Set to 1 to turn off rounding\n+ \"\"\"\n super(MobileNetV2, self).__init__()\n block = InvertedResidual\n input_channel = 32\n@@ -74,12 +104,12 @@\n \"or a 4-element list, got {}\".format(inverted_residual_setting))\n \n # building first layer\n- input_channel = int(input_channel * width_mult)\n- self.last_channel = int(last_channel * max(1.0, width_mult))\n+ input_channel = _make_divisible(input_channel * width_mult, round_nearest)\n+ self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)\n features = [ConvBNReLU(3, input_channel, stride=2)]\n # building inverted residual blocks\n for t, c, n, s in inverted_residual_setting:\n- output_channel = int(c * width_mult)\n+ output_channel = _make_divisible(c * width_mult, round_nearest)\n for i in range(n):\n stride = s if i == 0 else 1\n features.append(block(input_channel, output_channel, stride, expand_ratio=t))\n", "issue": "Mobilenet v2 width multiplier incorrect\nThere seems to be a small issue with the width multiplier in mobilenet v2. The official implementation rounds filter channels to a multiple of 8. \r\n\r\nFor example, mobilenet v2 width 1.4. The first conv layer has 44 channels as opposed to 48 in the official implementation:\r\nmodel = torchvision.models.mobilenet_v2(width_mult=1.4)\r\n\r\nfor module in model.modules():\r\n if isinstance(module, nn.Conv2d):\r\n print(module.weight.shape)\r\n\r\ntorch.Size([44, 3, 3, 3])\r\ntorch.Size([44, 1, 3, 3])\r\ntorch.Size([22, 44, 1, 1])\r\ntorch.Size([132, 22, 1, 1])\r\ntorch.Size([132, 1, 3, 3])\r\ntorch.Size([33, 132, 1, 1])\r\n\r\n\r\nCorresponding tensorflow 2.0 keras code:\r\nmodel = tf.keras.applications.MobileNetV2(\r\n weights=\"imagenet\", input_shape=(224, 224, 3), alpha=1.4)\r\nmodel.summary()\r\n\r\n__________________________________________________________________________________________________\r\nLayer (type) Output Shape Param # Connected to \r\n==================================================================================================\r\ninput_1 (InputLayer) [(None, 224, 224, 3) 0 \r\n__________________________________________________________________________________________________\r\nConv1_pad (ZeroPadding2D) (None, 225, 225, 3) 0 input_1[0][0] \r\n__________________________________________________________________________________________________\r\nConv1 (Conv2D) (None, 112, 112, 48) 1296 Conv1_pad[0][0] \r\n__________________________________________________________________________________________________\r\nbn_Conv1 (BatchNormalization) (None, 112, 112, 48) 192 Conv1[0][0] \r\n__________________________________________________________________________________________________\r\nConv1_relu (ReLU) (None, 112, 112, 48) 0 bn_Conv1[0][0] \r\n__________________________________________________________________________________________________\r\nexpanded_conv_depthwise (Depthw (None, 112, 112, 48) 432 Conv1_relu[0][0] \r\n__________________________________________________________________________________________________\r\nexpanded_conv_depthwise_BN (Bat (None, 112, 112, 48) 192 expanded_conv_depthwise[0][0] \r\n__________________________________________________________________________________________________\r\nexpanded_conv_depthwise_relu (R (None, 112, 112, 48) 0 expanded_conv_depthwise_BN[0][0] \r\n__________________________________________________________________________________________________\r\nexpanded_conv_project (Conv2D) (None, 112, 112, 24) 1152 expanded_conv_depthwise_relu[0][0\r\n__________________________________________________________________________________________________\r\nexpanded_conv_project_BN (Batch (None, 112, 112, 24) 96 expanded_conv_project[0][0] \r\n__________________________________________________________________________________________________\r\nblock_1_expand (Conv2D) (None, 112, 112, 144 3456 expanded_conv_project_BN[0][0] \r\n__________________________________________________________________________________________________\r\nblock_1_expand_BN (BatchNormali (None, 112, 112, 144 576 block_1_expand[0][0] \r\n__________________________________________________________________________________________________\r\nblock_1_expand_relu (ReLU) (None, 112, 112, 144 0 block_1_expand_BN[0][0] \r\n__________________________________________________________________________________________________\r\nblock_1_pad (ZeroPadding2D) (None, 113, 113, 144 0 block_1_expand_relu[0][0] \r\n__________________________________________________________________________________________________\r\nblock_1_depthwise (DepthwiseCon (None, 56, 56, 144) 1296 block_1_pad[0][0] \r\n__________________________________________________________________________________________________\r\nblock_1_depthwise_BN (BatchNorm (None, 56, 56, 144) 576 block_1_depthwise[0][0] \r\n__________________________________________________________________________________________________\r\nblock_1_depthwise_relu (ReLU) (None, 56, 56, 144) 0 block_1_depthwise_BN[0][0] \r\n__________________________________________________________________________________________________\r\nblock_1_project (Conv2D) (None, 56, 56, 32) 4608 block_1_depthwise_relu[0][0] \r\n__________________________________________________________________________________________________\r\n\r\n\r\nI've implemented a fix here:\r\nhttps://github.com/yaysummeriscoming/vision/blob/master/torchvision/models/mobilenet.py\r\n\r\nCan I merge it in?\n", "before_files": [{"content": "from torch import nn\nfrom .utils import load_state_dict_from_url\n\n\n__all__ = ['MobileNetV2', 'mobilenet_v2']\n\n\nmodel_urls = {\n 'mobilenet_v2': 'https://download.pytorch.org/models/mobilenet_v2-b0353104.pth',\n}\n\n\nclass ConvBNReLU(nn.Sequential):\n def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1):\n padding = (kernel_size - 1) // 2\n super(ConvBNReLU, self).__init__(\n nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False),\n nn.BatchNorm2d(out_planes),\n nn.ReLU6(inplace=True)\n )\n\n\nclass InvertedResidual(nn.Module):\n def __init__(self, inp, oup, stride, expand_ratio):\n super(InvertedResidual, self).__init__()\n self.stride = stride\n assert stride in [1, 2]\n\n hidden_dim = int(round(inp * expand_ratio))\n self.use_res_connect = self.stride == 1 and inp == oup\n\n layers = []\n if expand_ratio != 1:\n # pw\n layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1))\n layers.extend([\n # dw\n ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim),\n # pw-linear\n nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),\n nn.BatchNorm2d(oup),\n ])\n self.conv = nn.Sequential(*layers)\n\n def forward(self, x):\n if self.use_res_connect:\n return x + self.conv(x)\n else:\n return self.conv(x)\n\n\nclass MobileNetV2(nn.Module):\n def __init__(self, num_classes=1000, width_mult=1.0, inverted_residual_setting=None):\n super(MobileNetV2, self).__init__()\n block = InvertedResidual\n input_channel = 32\n last_channel = 1280\n\n if inverted_residual_setting is None:\n inverted_residual_setting = [\n # t, c, n, s\n [1, 16, 1, 1],\n [6, 24, 2, 2],\n [6, 32, 3, 2],\n [6, 64, 4, 2],\n [6, 96, 3, 1],\n [6, 160, 3, 2],\n [6, 320, 1, 1],\n ]\n\n # only check the first element, assuming user knows t,c,n,s are required\n if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:\n raise ValueError(\"inverted_residual_setting should be non-empty \"\n \"or a 4-element list, got {}\".format(inverted_residual_setting))\n\n # building first layer\n input_channel = int(input_channel * width_mult)\n self.last_channel = int(last_channel * max(1.0, width_mult))\n features = [ConvBNReLU(3, input_channel, stride=2)]\n # building inverted residual blocks\n for t, c, n, s in inverted_residual_setting:\n output_channel = int(c * width_mult)\n for i in range(n):\n stride = s if i == 0 else 1\n features.append(block(input_channel, output_channel, stride, expand_ratio=t))\n input_channel = output_channel\n # building last several layers\n features.append(ConvBNReLU(input_channel, self.last_channel, kernel_size=1))\n # make it nn.Sequential\n self.features = nn.Sequential(*features)\n\n # building classifier\n self.classifier = nn.Sequential(\n nn.Dropout(0.2),\n nn.Linear(self.last_channel, num_classes),\n )\n\n # weight initialization\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out')\n if m.bias is not None:\n nn.init.zeros_(m.bias)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.ones_(m.weight)\n nn.init.zeros_(m.bias)\n elif isinstance(m, nn.Linear):\n nn.init.normal_(m.weight, 0, 0.01)\n nn.init.zeros_(m.bias)\n\n def forward(self, x):\n x = self.features(x)\n x = x.mean([2, 3])\n x = self.classifier(x)\n return x\n\n\ndef mobilenet_v2(pretrained=False, progress=True, **kwargs):\n \"\"\"\n Constructs a MobileNetV2 architecture from\n `\"MobileNetV2: Inverted Residuals and Linear Bottlenecks\" <https://arxiv.org/abs/1801.04381>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n model = MobileNetV2(**kwargs)\n if pretrained:\n state_dict = load_state_dict_from_url(model_urls['mobilenet_v2'],\n progress=progress)\n model.load_state_dict(state_dict)\n return model\n", "path": "torchvision/models/mobilenet.py"}], "after_files": [{"content": "from torch import nn\nfrom .utils import load_state_dict_from_url\n\n\n__all__ = ['MobileNetV2', 'mobilenet_v2']\n\n\nmodel_urls = {\n 'mobilenet_v2': 'https://download.pytorch.org/models/mobilenet_v2-b0353104.pth',\n}\n\n\ndef _make_divisible(v, divisor, min_value=None):\n \"\"\"\n This function is taken from the original tf repo.\n It ensures that all layers have a channel number that is divisible by 8\n It can be seen here:\n https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py\n :param v:\n :param divisor:\n :param min_value:\n :return:\n \"\"\"\n if min_value is None:\n min_value = divisor\n new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)\n # Make sure that round down does not go down by more than 10%.\n if new_v < 0.9 * v:\n new_v += divisor\n return new_v\n\n\nclass ConvBNReLU(nn.Sequential):\n def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1):\n padding = (kernel_size - 1) // 2\n super(ConvBNReLU, self).__init__(\n nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False),\n nn.BatchNorm2d(out_planes),\n nn.ReLU6(inplace=True)\n )\n\n\nclass InvertedResidual(nn.Module):\n def __init__(self, inp, oup, stride, expand_ratio):\n super(InvertedResidual, self).__init__()\n self.stride = stride\n assert stride in [1, 2]\n\n hidden_dim = int(round(inp * expand_ratio))\n self.use_res_connect = self.stride == 1 and inp == oup\n\n layers = []\n if expand_ratio != 1:\n # pw\n layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1))\n layers.extend([\n # dw\n ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim),\n # pw-linear\n nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),\n nn.BatchNorm2d(oup),\n ])\n self.conv = nn.Sequential(*layers)\n\n def forward(self, x):\n if self.use_res_connect:\n return x + self.conv(x)\n else:\n return self.conv(x)\n\n\nclass MobileNetV2(nn.Module):\n def __init__(self, num_classes=1000, width_mult=1.0, inverted_residual_setting=None, round_nearest=8):\n \"\"\"\n MobileNet V2 main class\n\n Args:\n num_classes (int): Number of classes\n width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount\n inverted_residual_setting: Network structure\n round_nearest (int): Round the number of channels in each layer to be a multiple of this number\n Set to 1 to turn off rounding\n \"\"\"\n super(MobileNetV2, self).__init__()\n block = InvertedResidual\n input_channel = 32\n last_channel = 1280\n\n if inverted_residual_setting is None:\n inverted_residual_setting = [\n # t, c, n, s\n [1, 16, 1, 1],\n [6, 24, 2, 2],\n [6, 32, 3, 2],\n [6, 64, 4, 2],\n [6, 96, 3, 1],\n [6, 160, 3, 2],\n [6, 320, 1, 1],\n ]\n\n # only check the first element, assuming user knows t,c,n,s are required\n if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:\n raise ValueError(\"inverted_residual_setting should be non-empty \"\n \"or a 4-element list, got {}\".format(inverted_residual_setting))\n\n # building first layer\n input_channel = _make_divisible(input_channel * width_mult, round_nearest)\n self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)\n features = [ConvBNReLU(3, input_channel, stride=2)]\n # building inverted residual blocks\n for t, c, n, s in inverted_residual_setting:\n output_channel = _make_divisible(c * width_mult, round_nearest)\n for i in range(n):\n stride = s if i == 0 else 1\n features.append(block(input_channel, output_channel, stride, expand_ratio=t))\n input_channel = output_channel\n # building last several layers\n features.append(ConvBNReLU(input_channel, self.last_channel, kernel_size=1))\n # make it nn.Sequential\n self.features = nn.Sequential(*features)\n\n # building classifier\n self.classifier = nn.Sequential(\n nn.Dropout(0.2),\n nn.Linear(self.last_channel, num_classes),\n )\n\n # weight initialization\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out')\n if m.bias is not None:\n nn.init.zeros_(m.bias)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.ones_(m.weight)\n nn.init.zeros_(m.bias)\n elif isinstance(m, nn.Linear):\n nn.init.normal_(m.weight, 0, 0.01)\n nn.init.zeros_(m.bias)\n\n def forward(self, x):\n x = self.features(x)\n x = x.mean([2, 3])\n x = self.classifier(x)\n return x\n\n\ndef mobilenet_v2(pretrained=False, progress=True, **kwargs):\n \"\"\"\n Constructs a MobileNetV2 architecture from\n `\"MobileNetV2: Inverted Residuals and Linear Bottlenecks\" <https://arxiv.org/abs/1801.04381>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n model = MobileNetV2(**kwargs)\n if pretrained:\n state_dict = load_state_dict_from_url(model_urls['mobilenet_v2'],\n progress=progress)\n model.load_state_dict(state_dict)\n return model\n", "path": "torchvision/models/mobilenet.py"}]}
2,840
731
gh_patches_debug_8344
rasdani/github-patches
git_diff
spack__spack-42976
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- package error linux-pam: fatal error: rpc/rpc.h: No such file or directory I'm trying to install flux-security, and this looks to be a dependency "linux-pam" ```console ==> Installing linux-pam-1.5.2-offodszhf3suwkcqq5z2c4anlyfzwykm [69/79] ==> No binary for linux-pam-1.5.2-offodszhf3suwkcqq5z2c4anlyfzwykm found: installing from source ==> Using cached archive: /opt/spack-environment/spack/var/spack/cache/_source-cache/archive/e4/e4ec7131a91da44512574268f493c6d8ca105c87091691b8e9b56ca685d4f94d.tar.xz ==> No patches needed for linux-pam ==> linux-pam: Executing phase: 'autoreconf' ==> linux-pam: Executing phase: 'configure' ==> linux-pam: Executing phase: 'build' ==> Error: ProcessError: Command exited with status 2: 'make' '-j16' 'V=1' 5 errors found in build log: 964 mv -f .deps/unix_chkpwd-unix_chkpwd.Tpo .deps/unix_chkpwd-unix_chkpwd.Po 965 libtool: compile: /opt/spack-environment/spack/lib/spack/env/gcc/gcc -DHAVE_CONFIG_H -I. -I../.. -I../../libpam/include -I../../libpamc/include -DCHKPWD_HELPER=\"/opt/soft ware/linux-ubuntu22.04-neoverse_v1/gcc-11.4.0/linux-pam-1.5.2-offodszhf3suwkcqq5z2c4anlyfzwykm/sbin/unix_chkpwd\" -DUPDATE_HELPER=\"/opt/software/linux-ubuntu22.04-neoverse _v1/gcc-11.4.0/linux-pam-1.5.2-offodszhf3suwkcqq5z2c4anlyfzwykm/sbin/unix_update\" -W -Wall -Wbad-function-cast -Wcast-align -Wcast-align=strict -Wcast-qual -Wdeprecated -W inline -Wmain -Wmissing-declarations -Wmissing-format-attribute -Wmissing-prototypes -Wpointer-arith -Wreturn-type -Wshadow -Wstrict-prototypes -Wuninitialized -Wwrite-stri ngs -g -O2 -MT bigcrypt.lo -MD -MP -MF .deps/bigcrypt.Tpo -c bigcrypt.c -fPIC -DPIC -o .libs/bigcrypt.o 966 /bin/bash ../../libtool --tag=CC --mode=compile /opt/spack-environment/spack/lib/spack/env/gcc/gcc -DHAVE_CONFIG_H -I. -I../.. -I../../libpam/include -I../../libpamc/ include -DCHKPWD_HELPER=\"/opt/software/linux-ubuntu22.04-neoverse_v1/gcc-11.4.0/linux-pam-1.5.2-offodszhf3suwkcqq5z2c4anlyfzwykm/sbin/unix_chkpwd\" -DUPDATE_HELPER=\"/opt/ software/linux-ubuntu22.04-neoverse_v1/gcc-11.4.0/linux-pam-1.5.2-offodszhf3suwkcqq5z2c4anlyfzwykm/sbin/unix_update\" -W -Wall -Wbad-function-cast -Wcast-align -Wcast-align =strict -Wcast-qual -Wdeprecated -Winline -Wmain -Wmissing-declarations -Wmissing-format-attribute -Wmissing-prototypes -Wpointer-arith -Wreturn-type -Wshadow -Wstrict-prot otypes -Wuninitialized -Wwrite-strings -g -O2 -MT md5_good.lo -MD -MP -MF .deps/md5_good.Tpo -c -o md5_good.lo md5_good.c 967 mv -f .deps/unix_update-unix_update.Tpo .deps/unix_update-unix_update.Po 968 /bin/bash ../../libtool --tag=CC --mode=compile /opt/spack-environment/spack/lib/spack/env/gcc/gcc -DHAVE_CONFIG_H -I. -I../.. -I../../libpam/include -I../../libpamc/ include -DCHKPWD_HELPER=\"/opt/software/linux-ubuntu22.04-neoverse_v1/gcc-11.4.0/linux-pam-1.5.2-offodszhf3suwkcqq5z2c4anlyfzwykm/sbin/unix_chkpwd\" -DUPDATE_HELPER=\"/opt/ software/linux-ubuntu22.04-neoverse_v1/gcc-11.4.0/linux-pam-1.5.2-offodszhf3suwkcqq5z2c4anlyfzwykm/sbin/unix_update\" -W -Wall -Wbad-function-cast -Wcast-align -Wcast-align =strict -Wcast-qual -Wdeprecated -Winline -Wmain -Wmissing-declarations -Wmissing-format-attribute -Wmissing-prototypes -Wpointer-arith -Wreturn-type -Wshadow -Wstrict-prot otypes -Wuninitialized -Wwrite-strings -g -O2 -MT md5_broken.lo -MD -MP -MF .deps/md5_broken.Tpo -c -o md5_broken.lo md5_broken.c 969 libtool: compile: /opt/spack-environment/spack/lib/spack/env/gcc/gcc -DHAVE_CONFIG_H -I. -I../.. -I../../libpam/include -I../../libpamc/include -DCHKPWD_HELPER=\"/opt/soft ware/linux-ubuntu22.04-neoverse_v1/gcc-11.4.0/linux-pam-1.5.2-offodszhf3suwkcqq5z2c4anlyfzwykm/sbin/unix_chkpwd\" -DUPDATE_HELPER=\"/opt/software/linux-ubuntu22.04-neoverse _v1/gcc-11.4.0/linux-pam-1.5.2-offodszhf3suwkcqq5z2c4anlyfzwykm/sbin/unix_update\" -W -Wall -Wbad-function-cast -Wcast-align -Wcast-align=strict -Wcast-qual -Wdeprecated -W inline -Wmain -Wmissing-declarations -Wmissing-format-attribute -Wmissing-prototypes -Wpointer-arith -Wreturn-type -Wshadow -Wstrict-prototypes -Wuninitialized -Wwrite-stri ngs -g -O2 -MT pam_unix_sess.lo -MD -MP -MF .deps/pam_unix_sess.Tpo -c pam_unix_sess.c -fPIC -DPIC -o .libs/pam_unix_sess.o >> 970 pam_unix_passwd.c:80:11: fatal error: rpc/rpc.h: No such file or directory 971 80 | # include <rpc/rpc.h> 972 | ^~~~~~~~~~~ 973 compilation terminated. ``` I tried installing rpc.h on my host, but to no avail - it likely needs to be defined with the package here. Thanks for the help! --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `var/spack/repos/builtin/packages/linux-pam/package.py` Content: ``` 1 # Copyright 2013-2024 Lawrence Livermore National Security, LLC and other 2 # Spack Project Developers. See the top-level COPYRIGHT file for details. 3 # 4 # SPDX-License-Identifier: (Apache-2.0 OR MIT) 5 6 from spack.package import * 7 8 9 class LinuxPam(AutotoolsPackage): 10 """Linux PAM (Pluggable Authentication Modules for Linux) project.""" 11 12 homepage = "http://www.linux-pam.org/" 13 url = "https://github.com/linux-pam/linux-pam/releases/download/v1.5.2/Linux-PAM-1.5.2.tar.xz" 14 15 license("BSD-3-Clause") 16 17 version("1.5.1", sha256="201d40730b1135b1b3cdea09f2c28ac634d73181ccd0172ceddee3649c5792fc") 18 version("1.5.2", sha256="e4ec7131a91da44512574268f493c6d8ca105c87091691b8e9b56ca685d4f94d") 19 version("1.5.0", sha256="02d39854b508fae9dc713f7733bbcdadbe17b50de965aedddd65bcb6cc7852c8") 20 version("1.4.0", sha256="cd6d928c51e64139be3bdb38692c68183a509b83d4f2c221024ccd4bcddfd034") 21 version("1.3.1", sha256="eff47a4ecd833fbf18de9686632a70ee8d0794b79aecb217ebd0ce11db4cd0db") 22 23 depends_on("m4", type="build") 24 depends_on("autoconf", type="build") 25 depends_on("automake", type="build") 26 depends_on("libtool", type="build") 27 28 def configure_args(self): 29 config_args = ["--includedir=" + self.prefix.include.security] 30 return config_args 31 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/var/spack/repos/builtin/packages/linux-pam/package.py b/var/spack/repos/builtin/packages/linux-pam/package.py --- a/var/spack/repos/builtin/packages/linux-pam/package.py +++ b/var/spack/repos/builtin/packages/linux-pam/package.py @@ -20,6 +20,8 @@ version("1.4.0", sha256="cd6d928c51e64139be3bdb38692c68183a509b83d4f2c221024ccd4bcddfd034") version("1.3.1", sha256="eff47a4ecd833fbf18de9686632a70ee8d0794b79aecb217ebd0ce11db4cd0db") + depends_on("libtirpc") + depends_on("m4", type="build") depends_on("autoconf", type="build") depends_on("automake", type="build")
{"golden_diff": "diff --git a/var/spack/repos/builtin/packages/linux-pam/package.py b/var/spack/repos/builtin/packages/linux-pam/package.py\n--- a/var/spack/repos/builtin/packages/linux-pam/package.py\n+++ b/var/spack/repos/builtin/packages/linux-pam/package.py\n@@ -20,6 +20,8 @@\n version(\"1.4.0\", sha256=\"cd6d928c51e64139be3bdb38692c68183a509b83d4f2c221024ccd4bcddfd034\")\n version(\"1.3.1\", sha256=\"eff47a4ecd833fbf18de9686632a70ee8d0794b79aecb217ebd0ce11db4cd0db\")\n \n+ depends_on(\"libtirpc\")\n+\n depends_on(\"m4\", type=\"build\")\n depends_on(\"autoconf\", type=\"build\")\n depends_on(\"automake\", type=\"build\")\n", "issue": "package error linux-pam: fatal error: rpc/rpc.h: No such file or directory\nI'm trying to install flux-security, and this looks to be a dependency \"linux-pam\"\r\n\r\n```console\r\n==> Installing linux-pam-1.5.2-offodszhf3suwkcqq5z2c4anlyfzwykm [69/79]\r\n==> No binary for linux-pam-1.5.2-offodszhf3suwkcqq5z2c4anlyfzwykm found: installing from source\r\n==> Using cached archive: /opt/spack-environment/spack/var/spack/cache/_source-cache/archive/e4/e4ec7131a91da44512574268f493c6d8ca105c87091691b8e9b56ca685d4f94d.tar.xz\r\n==> No patches needed for linux-pam\r\n==> linux-pam: Executing phase: 'autoreconf'\r\n==> linux-pam: Executing phase: 'configure'\r\n==> linux-pam: Executing phase: 'build'\r\n==> Error: ProcessError: Command exited with status 2:\r\n 'make' '-j16' 'V=1'\r\n\r\n5 errors found in build log:\r\n 964 mv -f .deps/unix_chkpwd-unix_chkpwd.Tpo .deps/unix_chkpwd-unix_chkpwd.Po\r\n 965 libtool: compile: /opt/spack-environment/spack/lib/spack/env/gcc/gcc -DHAVE_CONFIG_H -I. -I../.. -I../../libpam/include -I../../libpamc/include -DCHKPWD_HELPER=\\\"/opt/soft\r\n ware/linux-ubuntu22.04-neoverse_v1/gcc-11.4.0/linux-pam-1.5.2-offodszhf3suwkcqq5z2c4anlyfzwykm/sbin/unix_chkpwd\\\" -DUPDATE_HELPER=\\\"/opt/software/linux-ubuntu22.04-neoverse\r\n _v1/gcc-11.4.0/linux-pam-1.5.2-offodszhf3suwkcqq5z2c4anlyfzwykm/sbin/unix_update\\\" -W -Wall -Wbad-function-cast -Wcast-align -Wcast-align=strict -Wcast-qual -Wdeprecated -W\r\n inline -Wmain -Wmissing-declarations -Wmissing-format-attribute -Wmissing-prototypes -Wpointer-arith -Wreturn-type -Wshadow -Wstrict-prototypes -Wuninitialized -Wwrite-stri\r\n ngs -g -O2 -MT bigcrypt.lo -MD -MP -MF .deps/bigcrypt.Tpo -c bigcrypt.c -fPIC -DPIC -o .libs/bigcrypt.o\r\n 966 /bin/bash ../../libtool --tag=CC --mode=compile /opt/spack-environment/spack/lib/spack/env/gcc/gcc -DHAVE_CONFIG_H -I. -I../.. -I../../libpam/include -I../../libpamc/\r\n include -DCHKPWD_HELPER=\\\"/opt/software/linux-ubuntu22.04-neoverse_v1/gcc-11.4.0/linux-pam-1.5.2-offodszhf3suwkcqq5z2c4anlyfzwykm/sbin/unix_chkpwd\\\" -DUPDATE_HELPER=\\\"/opt/\r\n software/linux-ubuntu22.04-neoverse_v1/gcc-11.4.0/linux-pam-1.5.2-offodszhf3suwkcqq5z2c4anlyfzwykm/sbin/unix_update\\\" -W -Wall -Wbad-function-cast -Wcast-align -Wcast-align\r\n =strict -Wcast-qual -Wdeprecated -Winline -Wmain -Wmissing-declarations -Wmissing-format-attribute -Wmissing-prototypes -Wpointer-arith -Wreturn-type -Wshadow -Wstrict-prot\r\n otypes -Wuninitialized -Wwrite-strings -g -O2 -MT md5_good.lo -MD -MP -MF .deps/md5_good.Tpo -c -o md5_good.lo md5_good.c\r\n 967 mv -f .deps/unix_update-unix_update.Tpo .deps/unix_update-unix_update.Po\r\n 968 /bin/bash ../../libtool --tag=CC --mode=compile /opt/spack-environment/spack/lib/spack/env/gcc/gcc -DHAVE_CONFIG_H -I. -I../.. -I../../libpam/include -I../../libpamc/\r\n include -DCHKPWD_HELPER=\\\"/opt/software/linux-ubuntu22.04-neoverse_v1/gcc-11.4.0/linux-pam-1.5.2-offodszhf3suwkcqq5z2c4anlyfzwykm/sbin/unix_chkpwd\\\" -DUPDATE_HELPER=\\\"/opt/\r\n software/linux-ubuntu22.04-neoverse_v1/gcc-11.4.0/linux-pam-1.5.2-offodszhf3suwkcqq5z2c4anlyfzwykm/sbin/unix_update\\\" -W -Wall -Wbad-function-cast -Wcast-align -Wcast-align\r\n =strict -Wcast-qual -Wdeprecated -Winline -Wmain -Wmissing-declarations -Wmissing-format-attribute -Wmissing-prototypes -Wpointer-arith -Wreturn-type -Wshadow -Wstrict-prot\r\n otypes -Wuninitialized -Wwrite-strings -g -O2 -MT md5_broken.lo -MD -MP -MF .deps/md5_broken.Tpo -c -o md5_broken.lo md5_broken.c\r\n 969 libtool: compile: /opt/spack-environment/spack/lib/spack/env/gcc/gcc -DHAVE_CONFIG_H -I. -I../.. -I../../libpam/include -I../../libpamc/include -DCHKPWD_HELPER=\\\"/opt/soft\r\n ware/linux-ubuntu22.04-neoverse_v1/gcc-11.4.0/linux-pam-1.5.2-offodszhf3suwkcqq5z2c4anlyfzwykm/sbin/unix_chkpwd\\\" -DUPDATE_HELPER=\\\"/opt/software/linux-ubuntu22.04-neoverse\r\n _v1/gcc-11.4.0/linux-pam-1.5.2-offodszhf3suwkcqq5z2c4anlyfzwykm/sbin/unix_update\\\" -W -Wall -Wbad-function-cast -Wcast-align -Wcast-align=strict -Wcast-qual -Wdeprecated -W\r\n inline -Wmain -Wmissing-declarations -Wmissing-format-attribute -Wmissing-prototypes -Wpointer-arith -Wreturn-type -Wshadow -Wstrict-prototypes -Wuninitialized -Wwrite-stri\r\n ngs -g -O2 -MT pam_unix_sess.lo -MD -MP -MF .deps/pam_unix_sess.Tpo -c pam_unix_sess.c -fPIC -DPIC -o .libs/pam_unix_sess.o\r\n >> 970 pam_unix_passwd.c:80:11: fatal error: rpc/rpc.h: No such file or directory\r\n 971 80 | # include <rpc/rpc.h>\r\n 972 | ^~~~~~~~~~~\r\n 973 compilation terminated.\r\n```\r\n\r\nI tried installing rpc.h on my host, but to no avail - it likely needs to be defined with the package here. Thanks for the help!\n", "before_files": [{"content": "# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack.package import *\n\n\nclass LinuxPam(AutotoolsPackage):\n \"\"\"Linux PAM (Pluggable Authentication Modules for Linux) project.\"\"\"\n\n homepage = \"http://www.linux-pam.org/\"\n url = \"https://github.com/linux-pam/linux-pam/releases/download/v1.5.2/Linux-PAM-1.5.2.tar.xz\"\n\n license(\"BSD-3-Clause\")\n\n version(\"1.5.1\", sha256=\"201d40730b1135b1b3cdea09f2c28ac634d73181ccd0172ceddee3649c5792fc\")\n version(\"1.5.2\", sha256=\"e4ec7131a91da44512574268f493c6d8ca105c87091691b8e9b56ca685d4f94d\")\n version(\"1.5.0\", sha256=\"02d39854b508fae9dc713f7733bbcdadbe17b50de965aedddd65bcb6cc7852c8\")\n version(\"1.4.0\", sha256=\"cd6d928c51e64139be3bdb38692c68183a509b83d4f2c221024ccd4bcddfd034\")\n version(\"1.3.1\", sha256=\"eff47a4ecd833fbf18de9686632a70ee8d0794b79aecb217ebd0ce11db4cd0db\")\n\n depends_on(\"m4\", type=\"build\")\n depends_on(\"autoconf\", type=\"build\")\n depends_on(\"automake\", type=\"build\")\n depends_on(\"libtool\", type=\"build\")\n\n def configure_args(self):\n config_args = [\"--includedir=\" + self.prefix.include.security]\n return config_args\n", "path": "var/spack/repos/builtin/packages/linux-pam/package.py"}], "after_files": [{"content": "# Copyright 2013-2024 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack.package import *\n\n\nclass LinuxPam(AutotoolsPackage):\n \"\"\"Linux PAM (Pluggable Authentication Modules for Linux) project.\"\"\"\n\n homepage = \"http://www.linux-pam.org/\"\n url = \"https://github.com/linux-pam/linux-pam/releases/download/v1.5.2/Linux-PAM-1.5.2.tar.xz\"\n\n license(\"BSD-3-Clause\")\n\n version(\"1.5.1\", sha256=\"201d40730b1135b1b3cdea09f2c28ac634d73181ccd0172ceddee3649c5792fc\")\n version(\"1.5.2\", sha256=\"e4ec7131a91da44512574268f493c6d8ca105c87091691b8e9b56ca685d4f94d\")\n version(\"1.5.0\", sha256=\"02d39854b508fae9dc713f7733bbcdadbe17b50de965aedddd65bcb6cc7852c8\")\n version(\"1.4.0\", sha256=\"cd6d928c51e64139be3bdb38692c68183a509b83d4f2c221024ccd4bcddfd034\")\n version(\"1.3.1\", sha256=\"eff47a4ecd833fbf18de9686632a70ee8d0794b79aecb217ebd0ce11db4cd0db\")\n\n depends_on(\"libtirpc\")\n\n depends_on(\"m4\", type=\"build\")\n depends_on(\"autoconf\", type=\"build\")\n depends_on(\"automake\", type=\"build\")\n depends_on(\"libtool\", type=\"build\")\n\n def configure_args(self):\n config_args = [\"--includedir=\" + self.prefix.include.security]\n return config_args\n", "path": "var/spack/repos/builtin/packages/linux-pam/package.py"}]}
2,599
252
gh_patches_debug_20046
rasdani/github-patches
git_diff
pytorch__vision-4649
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- WIDERFace download fails with zip error Originally from https://github.com/pytorch/vision/pull/4614#issuecomment-943468223 ```py In [4]: torchvision.datasets.WIDERFace(root='/tmp/lol', split='train', download=True) ``` ``` 91473it [00:00, 9168293.30it/s] --------------------------------------------------------------------------- BadZipFile Traceback (most recent call last) <ipython-input-4-61c4acdeef4e> in <module> ----> 1 torchvision.datasets.WIDERFace(root='/tmp/lol', split='train', download=True) ~/dev/vision/torchvision/datasets/widerface.py in __init__(self, root, split, transform, target_transform, download) 70 71 if download: ---> 72 self.download() 73 74 if not self._check_integrity(): ~/dev/vision/torchvision/datasets/widerface.py in download(self) 191 download_file_from_google_drive(file_id, self.root, filename, md5) 192 filepath = os.path.join(self.root, filename) --> 193 extract_archive(filepath) 194 195 # download and extract annotation files ~/dev/vision/torchvision/datasets/utils.py in extract_archive(from_path, to_path, remove_finished) 407 extractor = _ARCHIVE_EXTRACTORS[archive_type] 408 --> 409 extractor(from_path, to_path, compression) 410 411 return to_path ~/dev/vision/torchvision/datasets/utils.py in _extract_zip(from_path, to_path, compression) 281 282 def _extract_zip(from_path: str, to_path: str, compression: Optional[str]) -> None: --> 283 with zipfile.ZipFile( 284 from_path, "r", compression=_ZIP_COMPRESSION_MAP[compression] if compression else zipfile.ZIP_STORED 285 ) as zip: ~/opt/miniconda3/envs/pt/lib/python3.8/zipfile.py in __init__(self, file, mode, compression, allowZip64, compresslevel, strict_timestamps) 1267 try: 1268 if mode == 'r': -> 1269 self._RealGetContents() 1270 elif mode in ('w', 'x'): 1271 # set the modified flag so central directory gets written ~/opt/miniconda3/envs/pt/lib/python3.8/zipfile.py in _RealGetContents(self) 1334 raise BadZipFile("File is not a zip file") 1335 if not endrec: -> 1336 raise BadZipFile("File is not a zip file") 1337 if self.debug > 1: 1338 print(endrec) BadZipFile: File is not a zip file ``` cc @pmeier --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `torchvision/datasets/widerface.py` Content: ``` 1 import os 2 from os.path import abspath, expanduser 3 from typing import Any, Callable, List, Dict, Optional, Tuple, Union 4 5 import torch 6 from PIL import Image 7 8 from .utils import ( 9 download_file_from_google_drive, 10 download_and_extract_archive, 11 extract_archive, 12 verify_str_arg, 13 ) 14 from .vision import VisionDataset 15 16 17 class WIDERFace(VisionDataset): 18 """`WIDERFace <http://shuoyang1213.me/WIDERFACE/>`_ Dataset. 19 20 Args: 21 root (string): Root directory where images and annotations are downloaded to. 22 Expects the following folder structure if download=False: 23 24 .. code:: 25 26 <root> 27 └── widerface 28 ├── wider_face_split ('wider_face_split.zip' if compressed) 29 ├── WIDER_train ('WIDER_train.zip' if compressed) 30 ├── WIDER_val ('WIDER_val.zip' if compressed) 31 └── WIDER_test ('WIDER_test.zip' if compressed) 32 split (string): The dataset split to use. One of {``train``, ``val``, ``test``}. 33 Defaults to ``train``. 34 transform (callable, optional): A function/transform that takes in a PIL image 35 and returns a transformed version. E.g, ``transforms.RandomCrop`` 36 target_transform (callable, optional): A function/transform that takes in the 37 target and transforms it. 38 download (bool, optional): If true, downloads the dataset from the internet and 39 puts it in root directory. If dataset is already downloaded, it is not 40 downloaded again. 41 42 """ 43 44 BASE_FOLDER = "widerface" 45 FILE_LIST = [ 46 # File ID MD5 Hash Filename 47 ("0B6eKvaijfFUDQUUwd21EckhUbWs", "3fedf70df600953d25982bcd13d91ba2", "WIDER_train.zip"), 48 ("0B6eKvaijfFUDd3dIRmpvSk8tLUk", "dfa7d7e790efa35df3788964cf0bbaea", "WIDER_val.zip"), 49 ("0B6eKvaijfFUDbW4tdGpaYjgzZkU", "e5d8f4248ed24c334bbd12f49c29dd40", "WIDER_test.zip"), 50 ] 51 ANNOTATIONS_FILE = ( 52 "http://mmlab.ie.cuhk.edu.hk/projects/WIDERFace/support/bbx_annotation/wider_face_split.zip", 53 "0e3767bcf0e326556d407bf5bff5d27c", 54 "wider_face_split.zip", 55 ) 56 57 def __init__( 58 self, 59 root: str, 60 split: str = "train", 61 transform: Optional[Callable] = None, 62 target_transform: Optional[Callable] = None, 63 download: bool = False, 64 ) -> None: 65 super(WIDERFace, self).__init__( 66 root=os.path.join(root, self.BASE_FOLDER), transform=transform, target_transform=target_transform 67 ) 68 # check arguments 69 self.split = verify_str_arg(split, "split", ("train", "val", "test")) 70 71 if download: 72 self.download() 73 74 if not self._check_integrity(): 75 raise RuntimeError( 76 "Dataset not found or corrupted. " + "You can use download=True to download and prepare it" 77 ) 78 79 self.img_info: List[Dict[str, Union[str, Dict[str, torch.Tensor]]]] = [] 80 if self.split in ("train", "val"): 81 self.parse_train_val_annotations_file() 82 else: 83 self.parse_test_annotations_file() 84 85 def __getitem__(self, index: int) -> Tuple[Any, Any]: 86 """ 87 Args: 88 index (int): Index 89 90 Returns: 91 tuple: (image, target) where target is a dict of annotations for all faces in the image. 92 target=None for the test split. 93 """ 94 95 # stay consistent with other datasets and return a PIL Image 96 img = Image.open(self.img_info[index]["img_path"]) 97 98 if self.transform is not None: 99 img = self.transform(img) 100 101 target = None if self.split == "test" else self.img_info[index]["annotations"] 102 if self.target_transform is not None: 103 target = self.target_transform(target) 104 105 return img, target 106 107 def __len__(self) -> int: 108 return len(self.img_info) 109 110 def extra_repr(self) -> str: 111 lines = ["Split: {split}"] 112 return "\n".join(lines).format(**self.__dict__) 113 114 def parse_train_val_annotations_file(self) -> None: 115 filename = "wider_face_train_bbx_gt.txt" if self.split == "train" else "wider_face_val_bbx_gt.txt" 116 filepath = os.path.join(self.root, "wider_face_split", filename) 117 118 with open(filepath, "r") as f: 119 lines = f.readlines() 120 file_name_line, num_boxes_line, box_annotation_line = True, False, False 121 num_boxes, box_counter = 0, 0 122 labels = [] 123 for line in lines: 124 line = line.rstrip() 125 if file_name_line: 126 img_path = os.path.join(self.root, "WIDER_" + self.split, "images", line) 127 img_path = abspath(expanduser(img_path)) 128 file_name_line = False 129 num_boxes_line = True 130 elif num_boxes_line: 131 num_boxes = int(line) 132 num_boxes_line = False 133 box_annotation_line = True 134 elif box_annotation_line: 135 box_counter += 1 136 line_split = line.split(" ") 137 line_values = [int(x) for x in line_split] 138 labels.append(line_values) 139 if box_counter >= num_boxes: 140 box_annotation_line = False 141 file_name_line = True 142 labels_tensor = torch.tensor(labels) 143 self.img_info.append( 144 { 145 "img_path": img_path, 146 "annotations": { 147 "bbox": labels_tensor[:, 0:4], # x, y, width, height 148 "blur": labels_tensor[:, 4], 149 "expression": labels_tensor[:, 5], 150 "illumination": labels_tensor[:, 6], 151 "occlusion": labels_tensor[:, 7], 152 "pose": labels_tensor[:, 8], 153 "invalid": labels_tensor[:, 9], 154 }, 155 } 156 ) 157 box_counter = 0 158 labels.clear() 159 else: 160 raise RuntimeError("Error parsing annotation file {}".format(filepath)) 161 162 def parse_test_annotations_file(self) -> None: 163 filepath = os.path.join(self.root, "wider_face_split", "wider_face_test_filelist.txt") 164 filepath = abspath(expanduser(filepath)) 165 with open(filepath, "r") as f: 166 lines = f.readlines() 167 for line in lines: 168 line = line.rstrip() 169 img_path = os.path.join(self.root, "WIDER_test", "images", line) 170 img_path = abspath(expanduser(img_path)) 171 self.img_info.append({"img_path": img_path}) 172 173 def _check_integrity(self) -> bool: 174 # Allow original archive to be deleted (zip). Only need the extracted images 175 all_files = self.FILE_LIST.copy() 176 all_files.append(self.ANNOTATIONS_FILE) 177 for (_, md5, filename) in all_files: 178 file, ext = os.path.splitext(filename) 179 extracted_dir = os.path.join(self.root, file) 180 if not os.path.exists(extracted_dir): 181 return False 182 return True 183 184 def download(self) -> None: 185 if self._check_integrity(): 186 print("Files already downloaded and verified") 187 return 188 189 # download and extract image data 190 for (file_id, md5, filename) in self.FILE_LIST: 191 download_file_from_google_drive(file_id, self.root, filename, md5) 192 filepath = os.path.join(self.root, filename) 193 extract_archive(filepath) 194 195 # download and extract annotation files 196 download_and_extract_archive( 197 url=self.ANNOTATIONS_FILE[0], download_root=self.root, md5=self.ANNOTATIONS_FILE[1] 198 ) 199 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/torchvision/datasets/widerface.py b/torchvision/datasets/widerface.py --- a/torchvision/datasets/widerface.py +++ b/torchvision/datasets/widerface.py @@ -43,13 +43,13 @@ BASE_FOLDER = "widerface" FILE_LIST = [ - # File ID MD5 Hash Filename - ("0B6eKvaijfFUDQUUwd21EckhUbWs", "3fedf70df600953d25982bcd13d91ba2", "WIDER_train.zip"), - ("0B6eKvaijfFUDd3dIRmpvSk8tLUk", "dfa7d7e790efa35df3788964cf0bbaea", "WIDER_val.zip"), - ("0B6eKvaijfFUDbW4tdGpaYjgzZkU", "e5d8f4248ed24c334bbd12f49c29dd40", "WIDER_test.zip"), + # File ID MD5 Hash Filename + ("15hGDLhsx8bLgLcIRD5DhYt5iBxnjNF1M", "3fedf70df600953d25982bcd13d91ba2", "WIDER_train.zip"), + ("1GUCogbp16PMGa39thoMMeWxp7Rp5oM8Q", "dfa7d7e790efa35df3788964cf0bbaea", "WIDER_val.zip"), + ("1HIfDbVEWKmsYKJZm4lchTBDLW5N7dY5T", "e5d8f4248ed24c334bbd12f49c29dd40", "WIDER_test.zip"), ] ANNOTATIONS_FILE = ( - "http://mmlab.ie.cuhk.edu.hk/projects/WIDERFace/support/bbx_annotation/wider_face_split.zip", + "http://shuoyang1213.me/WIDERFACE/support/bbx_annotation/wider_face_split.zip", "0e3767bcf0e326556d407bf5bff5d27c", "wider_face_split.zip", )
{"golden_diff": "diff --git a/torchvision/datasets/widerface.py b/torchvision/datasets/widerface.py\n--- a/torchvision/datasets/widerface.py\n+++ b/torchvision/datasets/widerface.py\n@@ -43,13 +43,13 @@\n \n BASE_FOLDER = \"widerface\"\n FILE_LIST = [\n- # File ID MD5 Hash Filename\n- (\"0B6eKvaijfFUDQUUwd21EckhUbWs\", \"3fedf70df600953d25982bcd13d91ba2\", \"WIDER_train.zip\"),\n- (\"0B6eKvaijfFUDd3dIRmpvSk8tLUk\", \"dfa7d7e790efa35df3788964cf0bbaea\", \"WIDER_val.zip\"),\n- (\"0B6eKvaijfFUDbW4tdGpaYjgzZkU\", \"e5d8f4248ed24c334bbd12f49c29dd40\", \"WIDER_test.zip\"),\n+ # File ID MD5 Hash Filename\n+ (\"15hGDLhsx8bLgLcIRD5DhYt5iBxnjNF1M\", \"3fedf70df600953d25982bcd13d91ba2\", \"WIDER_train.zip\"),\n+ (\"1GUCogbp16PMGa39thoMMeWxp7Rp5oM8Q\", \"dfa7d7e790efa35df3788964cf0bbaea\", \"WIDER_val.zip\"),\n+ (\"1HIfDbVEWKmsYKJZm4lchTBDLW5N7dY5T\", \"e5d8f4248ed24c334bbd12f49c29dd40\", \"WIDER_test.zip\"),\n ]\n ANNOTATIONS_FILE = (\n- \"http://mmlab.ie.cuhk.edu.hk/projects/WIDERFace/support/bbx_annotation/wider_face_split.zip\",\n+ \"http://shuoyang1213.me/WIDERFACE/support/bbx_annotation/wider_face_split.zip\",\n \"0e3767bcf0e326556d407bf5bff5d27c\",\n \"wider_face_split.zip\",\n )\n", "issue": "WIDERFace download fails with zip error\nOriginally from https://github.com/pytorch/vision/pull/4614#issuecomment-943468223\r\n\r\n```py\r\nIn [4]: torchvision.datasets.WIDERFace(root='/tmp/lol', split='train', download=True)\r\n```\r\n\r\n```\r\n91473it [00:00, 9168293.30it/s]\r\n---------------------------------------------------------------------------\r\nBadZipFile Traceback (most recent call last)\r\n<ipython-input-4-61c4acdeef4e> in <module>\r\n----> 1 torchvision.datasets.WIDERFace(root='/tmp/lol', split='train', download=True)\r\n\r\n~/dev/vision/torchvision/datasets/widerface.py in __init__(self, root, split, transform, target_transform, download)\r\n 70\r\n 71 if download:\r\n---> 72 self.download()\r\n 73\r\n 74 if not self._check_integrity():\r\n\r\n~/dev/vision/torchvision/datasets/widerface.py in download(self)\r\n 191 download_file_from_google_drive(file_id, self.root, filename, md5)\r\n 192 filepath = os.path.join(self.root, filename)\r\n--> 193 extract_archive(filepath)\r\n 194\r\n 195 # download and extract annotation files\r\n\r\n~/dev/vision/torchvision/datasets/utils.py in extract_archive(from_path, to_path, remove_finished)\r\n 407 extractor = _ARCHIVE_EXTRACTORS[archive_type]\r\n 408\r\n--> 409 extractor(from_path, to_path, compression)\r\n 410\r\n 411 return to_path\r\n\r\n~/dev/vision/torchvision/datasets/utils.py in _extract_zip(from_path, to_path, compression)\r\n 281\r\n 282 def _extract_zip(from_path: str, to_path: str, compression: Optional[str]) -> None:\r\n--> 283 with zipfile.ZipFile(\r\n 284 from_path, \"r\", compression=_ZIP_COMPRESSION_MAP[compression] if compression else zipfile.ZIP_STORED\r\n 285 ) as zip:\r\n\r\n~/opt/miniconda3/envs/pt/lib/python3.8/zipfile.py in __init__(self, file, mode, compression, allowZip64, compresslevel, strict_timestamps)\r\n 1267 try:\r\n 1268 if mode == 'r':\r\n-> 1269 self._RealGetContents()\r\n 1270 elif mode in ('w', 'x'):\r\n 1271 # set the modified flag so central directory gets written\r\n\r\n~/opt/miniconda3/envs/pt/lib/python3.8/zipfile.py in _RealGetContents(self)\r\n 1334 raise BadZipFile(\"File is not a zip file\")\r\n 1335 if not endrec:\r\n-> 1336 raise BadZipFile(\"File is not a zip file\")\r\n 1337 if self.debug > 1:\r\n 1338 print(endrec)\r\n\r\nBadZipFile: File is not a zip file\r\n```\n\ncc @pmeier\n", "before_files": [{"content": "import os\nfrom os.path import abspath, expanduser\nfrom typing import Any, Callable, List, Dict, Optional, Tuple, Union\n\nimport torch\nfrom PIL import Image\n\nfrom .utils import (\n download_file_from_google_drive,\n download_and_extract_archive,\n extract_archive,\n verify_str_arg,\n)\nfrom .vision import VisionDataset\n\n\nclass WIDERFace(VisionDataset):\n \"\"\"`WIDERFace <http://shuoyang1213.me/WIDERFACE/>`_ Dataset.\n\n Args:\n root (string): Root directory where images and annotations are downloaded to.\n Expects the following folder structure if download=False:\n\n .. code::\n\n <root>\n \u2514\u2500\u2500 widerface\n \u251c\u2500\u2500 wider_face_split ('wider_face_split.zip' if compressed)\n \u251c\u2500\u2500 WIDER_train ('WIDER_train.zip' if compressed)\n \u251c\u2500\u2500 WIDER_val ('WIDER_val.zip' if compressed)\n \u2514\u2500\u2500 WIDER_test ('WIDER_test.zip' if compressed)\n split (string): The dataset split to use. One of {``train``, ``val``, ``test``}.\n Defaults to ``train``.\n transform (callable, optional): A function/transform that takes in a PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n download (bool, optional): If true, downloads the dataset from the internet and\n puts it in root directory. If dataset is already downloaded, it is not\n downloaded again.\n\n \"\"\"\n\n BASE_FOLDER = \"widerface\"\n FILE_LIST = [\n # File ID MD5 Hash Filename\n (\"0B6eKvaijfFUDQUUwd21EckhUbWs\", \"3fedf70df600953d25982bcd13d91ba2\", \"WIDER_train.zip\"),\n (\"0B6eKvaijfFUDd3dIRmpvSk8tLUk\", \"dfa7d7e790efa35df3788964cf0bbaea\", \"WIDER_val.zip\"),\n (\"0B6eKvaijfFUDbW4tdGpaYjgzZkU\", \"e5d8f4248ed24c334bbd12f49c29dd40\", \"WIDER_test.zip\"),\n ]\n ANNOTATIONS_FILE = (\n \"http://mmlab.ie.cuhk.edu.hk/projects/WIDERFace/support/bbx_annotation/wider_face_split.zip\",\n \"0e3767bcf0e326556d407bf5bff5d27c\",\n \"wider_face_split.zip\",\n )\n\n def __init__(\n self,\n root: str,\n split: str = \"train\",\n transform: Optional[Callable] = None,\n target_transform: Optional[Callable] = None,\n download: bool = False,\n ) -> None:\n super(WIDERFace, self).__init__(\n root=os.path.join(root, self.BASE_FOLDER), transform=transform, target_transform=target_transform\n )\n # check arguments\n self.split = verify_str_arg(split, \"split\", (\"train\", \"val\", \"test\"))\n\n if download:\n self.download()\n\n if not self._check_integrity():\n raise RuntimeError(\n \"Dataset not found or corrupted. \" + \"You can use download=True to download and prepare it\"\n )\n\n self.img_info: List[Dict[str, Union[str, Dict[str, torch.Tensor]]]] = []\n if self.split in (\"train\", \"val\"):\n self.parse_train_val_annotations_file()\n else:\n self.parse_test_annotations_file()\n\n def __getitem__(self, index: int) -> Tuple[Any, Any]:\n \"\"\"\n Args:\n index (int): Index\n\n Returns:\n tuple: (image, target) where target is a dict of annotations for all faces in the image.\n target=None for the test split.\n \"\"\"\n\n # stay consistent with other datasets and return a PIL Image\n img = Image.open(self.img_info[index][\"img_path\"])\n\n if self.transform is not None:\n img = self.transform(img)\n\n target = None if self.split == \"test\" else self.img_info[index][\"annotations\"]\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n return img, target\n\n def __len__(self) -> int:\n return len(self.img_info)\n\n def extra_repr(self) -> str:\n lines = [\"Split: {split}\"]\n return \"\\n\".join(lines).format(**self.__dict__)\n\n def parse_train_val_annotations_file(self) -> None:\n filename = \"wider_face_train_bbx_gt.txt\" if self.split == \"train\" else \"wider_face_val_bbx_gt.txt\"\n filepath = os.path.join(self.root, \"wider_face_split\", filename)\n\n with open(filepath, \"r\") as f:\n lines = f.readlines()\n file_name_line, num_boxes_line, box_annotation_line = True, False, False\n num_boxes, box_counter = 0, 0\n labels = []\n for line in lines:\n line = line.rstrip()\n if file_name_line:\n img_path = os.path.join(self.root, \"WIDER_\" + self.split, \"images\", line)\n img_path = abspath(expanduser(img_path))\n file_name_line = False\n num_boxes_line = True\n elif num_boxes_line:\n num_boxes = int(line)\n num_boxes_line = False\n box_annotation_line = True\n elif box_annotation_line:\n box_counter += 1\n line_split = line.split(\" \")\n line_values = [int(x) for x in line_split]\n labels.append(line_values)\n if box_counter >= num_boxes:\n box_annotation_line = False\n file_name_line = True\n labels_tensor = torch.tensor(labels)\n self.img_info.append(\n {\n \"img_path\": img_path,\n \"annotations\": {\n \"bbox\": labels_tensor[:, 0:4], # x, y, width, height\n \"blur\": labels_tensor[:, 4],\n \"expression\": labels_tensor[:, 5],\n \"illumination\": labels_tensor[:, 6],\n \"occlusion\": labels_tensor[:, 7],\n \"pose\": labels_tensor[:, 8],\n \"invalid\": labels_tensor[:, 9],\n },\n }\n )\n box_counter = 0\n labels.clear()\n else:\n raise RuntimeError(\"Error parsing annotation file {}\".format(filepath))\n\n def parse_test_annotations_file(self) -> None:\n filepath = os.path.join(self.root, \"wider_face_split\", \"wider_face_test_filelist.txt\")\n filepath = abspath(expanduser(filepath))\n with open(filepath, \"r\") as f:\n lines = f.readlines()\n for line in lines:\n line = line.rstrip()\n img_path = os.path.join(self.root, \"WIDER_test\", \"images\", line)\n img_path = abspath(expanduser(img_path))\n self.img_info.append({\"img_path\": img_path})\n\n def _check_integrity(self) -> bool:\n # Allow original archive to be deleted (zip). Only need the extracted images\n all_files = self.FILE_LIST.copy()\n all_files.append(self.ANNOTATIONS_FILE)\n for (_, md5, filename) in all_files:\n file, ext = os.path.splitext(filename)\n extracted_dir = os.path.join(self.root, file)\n if not os.path.exists(extracted_dir):\n return False\n return True\n\n def download(self) -> None:\n if self._check_integrity():\n print(\"Files already downloaded and verified\")\n return\n\n # download and extract image data\n for (file_id, md5, filename) in self.FILE_LIST:\n download_file_from_google_drive(file_id, self.root, filename, md5)\n filepath = os.path.join(self.root, filename)\n extract_archive(filepath)\n\n # download and extract annotation files\n download_and_extract_archive(\n url=self.ANNOTATIONS_FILE[0], download_root=self.root, md5=self.ANNOTATIONS_FILE[1]\n )\n", "path": "torchvision/datasets/widerface.py"}], "after_files": [{"content": "import os\nfrom os.path import abspath, expanduser\nfrom typing import Any, Callable, List, Dict, Optional, Tuple, Union\n\nimport torch\nfrom PIL import Image\n\nfrom .utils import (\n download_file_from_google_drive,\n download_and_extract_archive,\n extract_archive,\n verify_str_arg,\n)\nfrom .vision import VisionDataset\n\n\nclass WIDERFace(VisionDataset):\n \"\"\"`WIDERFace <http://shuoyang1213.me/WIDERFACE/>`_ Dataset.\n\n Args:\n root (string): Root directory where images and annotations are downloaded to.\n Expects the following folder structure if download=False:\n\n .. code::\n\n <root>\n \u2514\u2500\u2500 widerface\n \u251c\u2500\u2500 wider_face_split ('wider_face_split.zip' if compressed)\n \u251c\u2500\u2500 WIDER_train ('WIDER_train.zip' if compressed)\n \u251c\u2500\u2500 WIDER_val ('WIDER_val.zip' if compressed)\n \u2514\u2500\u2500 WIDER_test ('WIDER_test.zip' if compressed)\n split (string): The dataset split to use. One of {``train``, ``val``, ``test``}.\n Defaults to ``train``.\n transform (callable, optional): A function/transform that takes in a PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n download (bool, optional): If true, downloads the dataset from the internet and\n puts it in root directory. If dataset is already downloaded, it is not\n downloaded again.\n\n \"\"\"\n\n BASE_FOLDER = \"widerface\"\n FILE_LIST = [\n # File ID MD5 Hash Filename\n (\"15hGDLhsx8bLgLcIRD5DhYt5iBxnjNF1M\", \"3fedf70df600953d25982bcd13d91ba2\", \"WIDER_train.zip\"),\n (\"1GUCogbp16PMGa39thoMMeWxp7Rp5oM8Q\", \"dfa7d7e790efa35df3788964cf0bbaea\", \"WIDER_val.zip\"),\n (\"1HIfDbVEWKmsYKJZm4lchTBDLW5N7dY5T\", \"e5d8f4248ed24c334bbd12f49c29dd40\", \"WIDER_test.zip\"),\n ]\n ANNOTATIONS_FILE = (\n \"http://shuoyang1213.me/WIDERFACE/support/bbx_annotation/wider_face_split.zip\",\n \"0e3767bcf0e326556d407bf5bff5d27c\",\n \"wider_face_split.zip\",\n )\n\n def __init__(\n self,\n root: str,\n split: str = \"train\",\n transform: Optional[Callable] = None,\n target_transform: Optional[Callable] = None,\n download: bool = False,\n ) -> None:\n super(WIDERFace, self).__init__(\n root=os.path.join(root, self.BASE_FOLDER), transform=transform, target_transform=target_transform\n )\n # check arguments\n self.split = verify_str_arg(split, \"split\", (\"train\", \"val\", \"test\"))\n\n if download:\n self.download()\n\n if not self._check_integrity():\n raise RuntimeError(\n \"Dataset not found or corrupted. \" + \"You can use download=True to download and prepare it\"\n )\n\n self.img_info: List[Dict[str, Union[str, Dict[str, torch.Tensor]]]] = []\n if self.split in (\"train\", \"val\"):\n self.parse_train_val_annotations_file()\n else:\n self.parse_test_annotations_file()\n\n def __getitem__(self, index: int) -> Tuple[Any, Any]:\n \"\"\"\n Args:\n index (int): Index\n\n Returns:\n tuple: (image, target) where target is a dict of annotations for all faces in the image.\n target=None for the test split.\n \"\"\"\n\n # stay consistent with other datasets and return a PIL Image\n img = Image.open(self.img_info[index][\"img_path\"])\n\n if self.transform is not None:\n img = self.transform(img)\n\n target = None if self.split == \"test\" else self.img_info[index][\"annotations\"]\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n return img, target\n\n def __len__(self) -> int:\n return len(self.img_info)\n\n def extra_repr(self) -> str:\n lines = [\"Split: {split}\"]\n return \"\\n\".join(lines).format(**self.__dict__)\n\n def parse_train_val_annotations_file(self) -> None:\n filename = \"wider_face_train_bbx_gt.txt\" if self.split == \"train\" else \"wider_face_val_bbx_gt.txt\"\n filepath = os.path.join(self.root, \"wider_face_split\", filename)\n\n with open(filepath, \"r\") as f:\n lines = f.readlines()\n file_name_line, num_boxes_line, box_annotation_line = True, False, False\n num_boxes, box_counter = 0, 0\n labels = []\n for line in lines:\n line = line.rstrip()\n if file_name_line:\n img_path = os.path.join(self.root, \"WIDER_\" + self.split, \"images\", line)\n img_path = abspath(expanduser(img_path))\n file_name_line = False\n num_boxes_line = True\n elif num_boxes_line:\n num_boxes = int(line)\n num_boxes_line = False\n box_annotation_line = True\n elif box_annotation_line:\n box_counter += 1\n line_split = line.split(\" \")\n line_values = [int(x) for x in line_split]\n labels.append(line_values)\n if box_counter >= num_boxes:\n box_annotation_line = False\n file_name_line = True\n labels_tensor = torch.tensor(labels)\n self.img_info.append(\n {\n \"img_path\": img_path,\n \"annotations\": {\n \"bbox\": labels_tensor[:, 0:4], # x, y, width, height\n \"blur\": labels_tensor[:, 4],\n \"expression\": labels_tensor[:, 5],\n \"illumination\": labels_tensor[:, 6],\n \"occlusion\": labels_tensor[:, 7],\n \"pose\": labels_tensor[:, 8],\n \"invalid\": labels_tensor[:, 9],\n },\n }\n )\n box_counter = 0\n labels.clear()\n else:\n raise RuntimeError(\"Error parsing annotation file {}\".format(filepath))\n\n def parse_test_annotations_file(self) -> None:\n filepath = os.path.join(self.root, \"wider_face_split\", \"wider_face_test_filelist.txt\")\n filepath = abspath(expanduser(filepath))\n with open(filepath, \"r\") as f:\n lines = f.readlines()\n for line in lines:\n line = line.rstrip()\n img_path = os.path.join(self.root, \"WIDER_test\", \"images\", line)\n img_path = abspath(expanduser(img_path))\n self.img_info.append({\"img_path\": img_path})\n\n def _check_integrity(self) -> bool:\n # Allow original archive to be deleted (zip). Only need the extracted images\n all_files = self.FILE_LIST.copy()\n all_files.append(self.ANNOTATIONS_FILE)\n for (_, md5, filename) in all_files:\n file, ext = os.path.splitext(filename)\n extracted_dir = os.path.join(self.root, file)\n if not os.path.exists(extracted_dir):\n return False\n return True\n\n def download(self) -> None:\n if self._check_integrity():\n print(\"Files already downloaded and verified\")\n return\n\n # download and extract image data\n for (file_id, md5, filename) in self.FILE_LIST:\n download_file_from_google_drive(file_id, self.root, filename, md5)\n filepath = os.path.join(self.root, filename)\n extract_archive(filepath)\n\n # download and extract annotation files\n download_and_extract_archive(\n url=self.ANNOTATIONS_FILE[0], download_root=self.root, md5=self.ANNOTATIONS_FILE[1]\n )\n", "path": "torchvision/datasets/widerface.py"}]}
3,287
576
gh_patches_debug_24400
rasdani/github-patches
git_diff
joke2k__faker-1453
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Raises RecursionError under certain conditions * Faker version: 8.4.0 * OS: Arch Linux with kernel 5.12.6 Certain conditions can trigger a `RecursionError: maximum recursion depth exceeded`. I encountered this error in a django project, see below the steps to reproduce this error. ### Steps to reproduce 1. `pip install django faker` 1. `django-admin startproject mysite` 1. In the app mysite create a file called _test_example.py_, the source code is listed below 1. And in the app mysite create a file called _my_faker.py_, the source code is listed below ```python # test_example.py from django.test import TestCase from faker import Faker from .my_faker import MyFaker class ExampleTest(TestCase): @classmethod def setUpTestData(cls): cls.faker_example = MyFaker() def setUp(self): self.fake = Faker() self.example = self.faker_example.example() def test_example(self): self.assertEqual(1, 1) ``` ```python # my_faker.py from faker import Faker class MyFaker: def __init__(self): self.fake = Faker() def example(self): pass ``` ### Expected behavior No RecursionError, it worked before. Last time about 2 month ago with `faker==5.6.5`, however this version does not work on updated systems as of the `AttributeError: 'PosixPath' object has no attribute 'startswith'`error but which was fixed recently. ### Actual behavior ``` $ python manage.py test Creating test database for alias 'default'... System check identified no issues (0 silenced). E ====================================================================== ERROR: test_example (mysite.test_example.ExampleTest) ---------------------------------------------------------------------- Traceback (most recent call last): File "/home/username/Downloads/mysite/mysite/test_example.py", line 15, in setUp self.example = self.faker_example.example() File "/home/username/.virtualenvs/faker-test/lib/python3.9/site-packages/django/test/testcases.py", line 1124, in __get__ data = deepcopy(self.data, memo) File "/usr/lib/python3.9/copy.py", line 172, in deepcopy y = _reconstruct(x, memo, *rv) File "/usr/lib/python3.9/copy.py", line 270, in _reconstruct state = deepcopy(state, memo) File "/usr/lib/python3.9/copy.py", line 146, in deepcopy y = copier(x, memo) File "/usr/lib/python3.9/copy.py", line 230, in _deepcopy_dict y[deepcopy(key, memo)] = deepcopy(value, memo) File "/usr/lib/python3.9/copy.py", line 172, in deepcopy y = _reconstruct(x, memo, *rv) File "/usr/lib/python3.9/copy.py", line 270, in _reconstruct state = deepcopy(state, memo) File "/usr/lib/python3.9/copy.py", line 146, in deepcopy y = copier(x, memo) File "/usr/lib/python3.9/copy.py", line 230, in _deepcopy_dict y[deepcopy(key, memo)] = deepcopy(value, memo) File "/usr/lib/python3.9/copy.py", line 172, in deepcopy y = _reconstruct(x, memo, *rv) File "/usr/lib/python3.9/copy.py", line 271, in _reconstruct if hasattr(y, '__setstate__'): File "/home/username/.virtualenvs/faker-test/lib/python3.9/site-packages/faker/proxy.py", line 263, in __getattr__ obj = getattr(self._proxy, name) File "/home/username/.virtualenvs/faker-test/lib/python3.9/site-packages/faker/proxy.py", line 263, in __getattr__ obj = getattr(self._proxy, name) File "/home/username/.virtualenvs/faker-test/lib/python3.9/site-packages/faker/proxy.py", line 263, in __getattr__ obj = getattr(self._proxy, name) [Previous line repeated 964 more times] RecursionError: maximum recursion depth exceeded ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `faker/proxy.py` Content: ``` 1 import functools 2 import random 3 import re 4 5 from collections import OrderedDict 6 7 from faker.config import DEFAULT_LOCALE 8 from faker.exceptions import UniquenessException 9 from faker.factory import Factory 10 from faker.generator import Generator 11 from faker.utils.distribution import choices_distribution 12 13 _UNIQUE_ATTEMPTS = 1000 14 15 16 class Faker: 17 """Proxy class capable of supporting multiple locales""" 18 19 cache_pattern = re.compile(r'^_cached_\w*_mapping$') 20 generator_attrs = [ 21 attr for attr in dir(Generator) 22 if not attr.startswith('__') 23 and attr not in ['seed', 'seed_instance', 'random'] 24 ] 25 26 def __init__(self, locale=None, providers=None, 27 generator=None, includes=None, 28 use_weighting=True, **config): 29 self._factory_map = OrderedDict() 30 self._weights = None 31 self._unique_proxy = UniqueProxy(self) 32 33 if isinstance(locale, str): 34 locales = [locale.replace('-', '_')] 35 36 # This guarantees a FIFO ordering of elements in `locales` based on the final 37 # locale string while discarding duplicates after processing 38 elif isinstance(locale, (list, tuple, set)): 39 locales = [] 40 for code in locale: 41 if not isinstance(code, str): 42 raise TypeError('The locale "%s" must be a string.' % str(code)) 43 final_locale = code.replace('-', '_') 44 if final_locale not in locales: 45 locales.append(final_locale) 46 47 elif isinstance(locale, OrderedDict): 48 assert all(isinstance(v, (int, float)) for v in locale.values()) 49 odict = OrderedDict() 50 for k, v in locale.items(): 51 key = k.replace('-', '_') 52 odict[key] = v 53 locales = list(odict.keys()) 54 self._weights = list(odict.values()) 55 56 else: 57 locales = [DEFAULT_LOCALE] 58 59 for locale in locales: 60 self._factory_map[locale] = Factory.create(locale, providers, generator, includes, 61 use_weighting=use_weighting, 62 **config) 63 64 self._locales = locales 65 self._factories = list(self._factory_map.values()) 66 67 def __dir__(self): 68 attributes = set(super(Faker, self).__dir__()) 69 for factory in self.factories: 70 attributes |= { 71 attr for attr in dir(factory) if not attr.startswith('_') 72 } 73 return sorted(attributes) 74 75 def __getitem__(self, locale): 76 return self._factory_map[locale.replace('-', '_')] 77 78 def __getattribute__(self, attr): 79 """ 80 Handles the "attribute resolution" behavior for declared members of this proxy class 81 82 The class method `seed` cannot be called from an instance. 83 84 :param attr: attribute name 85 :return: the appropriate attribute 86 """ 87 if attr == 'seed': 88 msg = ( 89 'Calling `.seed()` on instances is deprecated. ' 90 'Use the class method `Faker.seed()` instead.' 91 ) 92 raise TypeError(msg) 93 else: 94 return super().__getattribute__(attr) 95 96 def __getattr__(self, attr): 97 """ 98 Handles cache access and proxying behavior 99 100 :param attr: attribute name 101 :return: the appropriate attribute 102 """ 103 104 if len(self._factories) == 1: 105 return getattr(self._factories[0], attr) 106 elif attr in self.generator_attrs: 107 msg = 'Proxying calls to `%s` is not implemented in multiple locale mode.' % attr 108 raise NotImplementedError(msg) 109 elif self.cache_pattern.match(attr): 110 msg = 'Cached attribute `%s` does not exist' % attr 111 raise AttributeError(msg) 112 else: 113 factory = self._select_factory(attr) 114 return getattr(factory, attr) 115 116 @property 117 def unique(self): 118 return self._unique_proxy 119 120 def _select_factory(self, method_name): 121 """ 122 Returns a random factory that supports the provider method 123 124 :param method_name: Name of provider method 125 :return: A factory that supports the provider method 126 """ 127 128 factories, weights = self._map_provider_method(method_name) 129 if len(factories) == 0: 130 msg = f'No generator object has attribute {method_name!r}' 131 raise AttributeError(msg) 132 elif len(factories) == 1: 133 return factories[0] 134 135 if weights: 136 factory = choices_distribution(factories, weights, length=1)[0] 137 else: 138 factory = random.choice(factories) 139 return factory 140 141 def _map_provider_method(self, method_name): 142 """ 143 Creates a 2-tuple of factories and weights for the given provider method name 144 145 The first element of the tuple contains a list of compatible factories. 146 The second element of the tuple contains a list of distribution weights. 147 148 :param method_name: Name of provider method 149 :return: 2-tuple (factories, weights) 150 """ 151 152 # Return cached mapping if it exists for given method 153 attr = f'_cached_{method_name}_mapping' 154 if hasattr(self, attr): 155 return getattr(self, attr) 156 157 # Create mapping if it does not exist 158 if self._weights: 159 value = [ 160 (factory, weight) 161 for factory, weight in zip(self.factories, self._weights) 162 if hasattr(factory, method_name) 163 ] 164 factories, weights = zip(*value) 165 mapping = list(factories), list(weights) 166 else: 167 value = [ 168 factory 169 for factory in self.factories 170 if hasattr(factory, method_name) 171 ] 172 mapping = value, None 173 174 # Then cache and return results 175 setattr(self, attr, mapping) 176 return mapping 177 178 @classmethod 179 def seed(cls, seed=None): 180 """ 181 Seeds the shared `random.Random` object across all factories 182 183 :param seed: seed value 184 """ 185 Generator.seed(seed) 186 187 def seed_instance(self, seed=None): 188 """ 189 Creates and seeds a new `random.Random` object for each factory 190 191 :param seed: seed value 192 """ 193 for factory in self._factories: 194 factory.seed_instance(seed) 195 196 def seed_locale(self, locale, seed=None): 197 """ 198 Creates and seeds a new `random.Random` object for the factory of the specified locale 199 200 :param locale: locale string 201 :param seed: seed value 202 """ 203 self._factory_map[locale.replace('-', '_')].seed_instance(seed) 204 205 @property 206 def random(self): 207 """ 208 Proxies `random` getter calls 209 210 In single locale mode, this will be proxied to the `random` getter 211 of the only internal `Generator` object. Subclasses will have to 212 implement desired behavior in multiple locale mode. 213 """ 214 215 if len(self._factories) == 1: 216 return self._factories[0].random 217 else: 218 msg = 'Proxying `random` getter calls is not implemented in multiple locale mode.' 219 raise NotImplementedError(msg) 220 221 @random.setter 222 def random(self, value): 223 """ 224 Proxies `random` setter calls 225 226 In single locale mode, this will be proxied to the `random` setter 227 of the only internal `Generator` object. Subclasses will have to 228 implement desired behavior in multiple locale mode. 229 """ 230 231 if len(self._factories) == 1: 232 self._factories[0].random = value 233 else: 234 msg = 'Proxying `random` setter calls is not implemented in multiple locale mode.' 235 raise NotImplementedError(msg) 236 237 @property 238 def locales(self): 239 return list(self._locales) 240 241 @property 242 def weights(self): 243 return self._weights 244 245 @property 246 def factories(self): 247 return self._factories 248 249 def items(self): 250 return self._factory_map.items() 251 252 253 class UniqueProxy: 254 def __init__(self, proxy): 255 self._proxy = proxy 256 self._seen = {} 257 self._sentinel = object() 258 259 def clear(self): 260 self._seen = {} 261 262 def __getattr__(self, name: str): 263 obj = getattr(self._proxy, name) 264 if callable(obj): 265 return self._wrap(name, obj) 266 else: 267 raise TypeError("Accessing non-functions through .unique is not supported.") 268 269 def _wrap(self, name, function): 270 @functools.wraps(function) 271 def wrapper(*args, **kwargs): 272 key = (name, args, tuple(sorted(kwargs.items()))) 273 274 generated = self._seen.setdefault(key, {self._sentinel}) 275 276 # With use of a sentinel value rather than None, we leave 277 # None open as a valid return value. 278 retval = self._sentinel 279 280 for i in range(_UNIQUE_ATTEMPTS): 281 if retval not in generated: 282 break 283 retval = function(*args, **kwargs) 284 else: 285 raise UniquenessException(f'Got duplicated values after {_UNIQUE_ATTEMPTS:,} iterations.') 286 287 generated.add(retval) 288 289 return retval 290 291 return wrapper 292 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/faker/proxy.py b/faker/proxy.py --- a/faker/proxy.py +++ b/faker/proxy.py @@ -1,3 +1,4 @@ +import copy import functools import random import re @@ -100,7 +101,6 @@ :param attr: attribute name :return: the appropriate attribute """ - if len(self._factories) == 1: return getattr(self._factories[0], attr) elif attr in self.generator_attrs: @@ -113,6 +113,20 @@ factory = self._select_factory(attr) return getattr(factory, attr) + def __deepcopy__(self, memodict={}): + cls = self.__class__ + result = cls.__new__(cls) + result._locales = copy.deepcopy(self._locales) + result._factories = copy.deepcopy(self._factories) + result._factory_map = copy.deepcopy(self._factory_map) + result._weights = copy.deepcopy(self._weights) + result._unique_proxy = UniqueProxy(self) + result._unique_proxy._seen = { + k: {result._unique_proxy._sentinel} + for k in self._unique_proxy._seen.keys() + } + return result + @property def unique(self): return self._unique_proxy
{"golden_diff": "diff --git a/faker/proxy.py b/faker/proxy.py\n--- a/faker/proxy.py\n+++ b/faker/proxy.py\n@@ -1,3 +1,4 @@\n+import copy\n import functools\n import random\n import re\n@@ -100,7 +101,6 @@\n :param attr: attribute name\n :return: the appropriate attribute\n \"\"\"\n-\n if len(self._factories) == 1:\n return getattr(self._factories[0], attr)\n elif attr in self.generator_attrs:\n@@ -113,6 +113,20 @@\n factory = self._select_factory(attr)\n return getattr(factory, attr)\n \n+ def __deepcopy__(self, memodict={}):\n+ cls = self.__class__\n+ result = cls.__new__(cls)\n+ result._locales = copy.deepcopy(self._locales)\n+ result._factories = copy.deepcopy(self._factories)\n+ result._factory_map = copy.deepcopy(self._factory_map)\n+ result._weights = copy.deepcopy(self._weights)\n+ result._unique_proxy = UniqueProxy(self)\n+ result._unique_proxy._seen = {\n+ k: {result._unique_proxy._sentinel}\n+ for k in self._unique_proxy._seen.keys()\n+ }\n+ return result\n+\n @property\n def unique(self):\n return self._unique_proxy\n", "issue": "Raises RecursionError under certain conditions\n* Faker version: 8.4.0\r\n* OS: Arch Linux with kernel 5.12.6\r\n\r\nCertain conditions can trigger a `RecursionError: maximum recursion depth exceeded`. I encountered this error in a django project, see below the steps to reproduce this error.\r\n\r\n### Steps to reproduce\r\n\r\n1. `pip install django faker`\r\n1. `django-admin startproject mysite`\r\n1. In the app mysite create a file called _test_example.py_, the source code is listed below\r\n1. And in the app mysite create a file called _my_faker.py_, the source code is listed below\r\n\r\n```python\r\n# test_example.py\r\nfrom django.test import TestCase\r\nfrom faker import Faker\r\n\r\nfrom .my_faker import MyFaker\r\n\r\n\r\nclass ExampleTest(TestCase):\r\n @classmethod\r\n def setUpTestData(cls):\r\n cls.faker_example = MyFaker()\r\n\r\n def setUp(self):\r\n self.fake = Faker()\r\n\r\n self.example = self.faker_example.example()\r\n\r\n def test_example(self):\r\n self.assertEqual(1, 1)\r\n```\r\n\r\n```python\r\n# my_faker.py\r\nfrom faker import Faker\r\n\r\n\r\nclass MyFaker:\r\n def __init__(self):\r\n self.fake = Faker()\r\n\r\n def example(self):\r\n pass\r\n```\r\n\r\n\r\n### Expected behavior\r\n\r\nNo RecursionError, it worked before. Last time about 2 month ago with `faker==5.6.5`, however this version does not work on updated systems as of the `AttributeError: 'PosixPath' object has no attribute 'startswith'`error but which was fixed recently.\r\n\r\n### Actual behavior\r\n```\r\n$ python manage.py test\r\nCreating test database for alias 'default'...\r\nSystem check identified no issues (0 silenced).\r\nE\r\n======================================================================\r\nERROR: test_example (mysite.test_example.ExampleTest)\r\n----------------------------------------------------------------------\r\nTraceback (most recent call last):\r\n File \"/home/username/Downloads/mysite/mysite/test_example.py\", line 15, in setUp\r\n self.example = self.faker_example.example()\r\n File \"/home/username/.virtualenvs/faker-test/lib/python3.9/site-packages/django/test/testcases.py\", line 1124, in __get__\r\n data = deepcopy(self.data, memo)\r\n File \"/usr/lib/python3.9/copy.py\", line 172, in deepcopy\r\n y = _reconstruct(x, memo, *rv)\r\n File \"/usr/lib/python3.9/copy.py\", line 270, in _reconstruct\r\n state = deepcopy(state, memo)\r\n File \"/usr/lib/python3.9/copy.py\", line 146, in deepcopy\r\n y = copier(x, memo)\r\n File \"/usr/lib/python3.9/copy.py\", line 230, in _deepcopy_dict\r\n y[deepcopy(key, memo)] = deepcopy(value, memo)\r\n File \"/usr/lib/python3.9/copy.py\", line 172, in deepcopy\r\n y = _reconstruct(x, memo, *rv)\r\n File \"/usr/lib/python3.9/copy.py\", line 270, in _reconstruct\r\n state = deepcopy(state, memo)\r\n File \"/usr/lib/python3.9/copy.py\", line 146, in deepcopy\r\n y = copier(x, memo)\r\n File \"/usr/lib/python3.9/copy.py\", line 230, in _deepcopy_dict\r\n y[deepcopy(key, memo)] = deepcopy(value, memo)\r\n File \"/usr/lib/python3.9/copy.py\", line 172, in deepcopy\r\n y = _reconstruct(x, memo, *rv)\r\n File \"/usr/lib/python3.9/copy.py\", line 271, in _reconstruct\r\n if hasattr(y, '__setstate__'):\r\n File \"/home/username/.virtualenvs/faker-test/lib/python3.9/site-packages/faker/proxy.py\", line 263, in __getattr__\r\n obj = getattr(self._proxy, name)\r\n File \"/home/username/.virtualenvs/faker-test/lib/python3.9/site-packages/faker/proxy.py\", line 263, in __getattr__\r\n obj = getattr(self._proxy, name)\r\n File \"/home/username/.virtualenvs/faker-test/lib/python3.9/site-packages/faker/proxy.py\", line 263, in __getattr__\r\n obj = getattr(self._proxy, name)\r\n [Previous line repeated 964 more times]\r\nRecursionError: maximum recursion depth exceeded\r\n```\r\n\n", "before_files": [{"content": "import functools\nimport random\nimport re\n\nfrom collections import OrderedDict\n\nfrom faker.config import DEFAULT_LOCALE\nfrom faker.exceptions import UniquenessException\nfrom faker.factory import Factory\nfrom faker.generator import Generator\nfrom faker.utils.distribution import choices_distribution\n\n_UNIQUE_ATTEMPTS = 1000\n\n\nclass Faker:\n \"\"\"Proxy class capable of supporting multiple locales\"\"\"\n\n cache_pattern = re.compile(r'^_cached_\\w*_mapping$')\n generator_attrs = [\n attr for attr in dir(Generator)\n if not attr.startswith('__')\n and attr not in ['seed', 'seed_instance', 'random']\n ]\n\n def __init__(self, locale=None, providers=None,\n generator=None, includes=None,\n use_weighting=True, **config):\n self._factory_map = OrderedDict()\n self._weights = None\n self._unique_proxy = UniqueProxy(self)\n\n if isinstance(locale, str):\n locales = [locale.replace('-', '_')]\n\n # This guarantees a FIFO ordering of elements in `locales` based on the final\n # locale string while discarding duplicates after processing\n elif isinstance(locale, (list, tuple, set)):\n locales = []\n for code in locale:\n if not isinstance(code, str):\n raise TypeError('The locale \"%s\" must be a string.' % str(code))\n final_locale = code.replace('-', '_')\n if final_locale not in locales:\n locales.append(final_locale)\n\n elif isinstance(locale, OrderedDict):\n assert all(isinstance(v, (int, float)) for v in locale.values())\n odict = OrderedDict()\n for k, v in locale.items():\n key = k.replace('-', '_')\n odict[key] = v\n locales = list(odict.keys())\n self._weights = list(odict.values())\n\n else:\n locales = [DEFAULT_LOCALE]\n\n for locale in locales:\n self._factory_map[locale] = Factory.create(locale, providers, generator, includes,\n use_weighting=use_weighting,\n **config)\n\n self._locales = locales\n self._factories = list(self._factory_map.values())\n\n def __dir__(self):\n attributes = set(super(Faker, self).__dir__())\n for factory in self.factories:\n attributes |= {\n attr for attr in dir(factory) if not attr.startswith('_')\n }\n return sorted(attributes)\n\n def __getitem__(self, locale):\n return self._factory_map[locale.replace('-', '_')]\n\n def __getattribute__(self, attr):\n \"\"\"\n Handles the \"attribute resolution\" behavior for declared members of this proxy class\n\n The class method `seed` cannot be called from an instance.\n\n :param attr: attribute name\n :return: the appropriate attribute\n \"\"\"\n if attr == 'seed':\n msg = (\n 'Calling `.seed()` on instances is deprecated. '\n 'Use the class method `Faker.seed()` instead.'\n )\n raise TypeError(msg)\n else:\n return super().__getattribute__(attr)\n\n def __getattr__(self, attr):\n \"\"\"\n Handles cache access and proxying behavior\n\n :param attr: attribute name\n :return: the appropriate attribute\n \"\"\"\n\n if len(self._factories) == 1:\n return getattr(self._factories[0], attr)\n elif attr in self.generator_attrs:\n msg = 'Proxying calls to `%s` is not implemented in multiple locale mode.' % attr\n raise NotImplementedError(msg)\n elif self.cache_pattern.match(attr):\n msg = 'Cached attribute `%s` does not exist' % attr\n raise AttributeError(msg)\n else:\n factory = self._select_factory(attr)\n return getattr(factory, attr)\n\n @property\n def unique(self):\n return self._unique_proxy\n\n def _select_factory(self, method_name):\n \"\"\"\n Returns a random factory that supports the provider method\n\n :param method_name: Name of provider method\n :return: A factory that supports the provider method\n \"\"\"\n\n factories, weights = self._map_provider_method(method_name)\n if len(factories) == 0:\n msg = f'No generator object has attribute {method_name!r}'\n raise AttributeError(msg)\n elif len(factories) == 1:\n return factories[0]\n\n if weights:\n factory = choices_distribution(factories, weights, length=1)[0]\n else:\n factory = random.choice(factories)\n return factory\n\n def _map_provider_method(self, method_name):\n \"\"\"\n Creates a 2-tuple of factories and weights for the given provider method name\n\n The first element of the tuple contains a list of compatible factories.\n The second element of the tuple contains a list of distribution weights.\n\n :param method_name: Name of provider method\n :return: 2-tuple (factories, weights)\n \"\"\"\n\n # Return cached mapping if it exists for given method\n attr = f'_cached_{method_name}_mapping'\n if hasattr(self, attr):\n return getattr(self, attr)\n\n # Create mapping if it does not exist\n if self._weights:\n value = [\n (factory, weight)\n for factory, weight in zip(self.factories, self._weights)\n if hasattr(factory, method_name)\n ]\n factories, weights = zip(*value)\n mapping = list(factories), list(weights)\n else:\n value = [\n factory\n for factory in self.factories\n if hasattr(factory, method_name)\n ]\n mapping = value, None\n\n # Then cache and return results\n setattr(self, attr, mapping)\n return mapping\n\n @classmethod\n def seed(cls, seed=None):\n \"\"\"\n Seeds the shared `random.Random` object across all factories\n\n :param seed: seed value\n \"\"\"\n Generator.seed(seed)\n\n def seed_instance(self, seed=None):\n \"\"\"\n Creates and seeds a new `random.Random` object for each factory\n\n :param seed: seed value\n \"\"\"\n for factory in self._factories:\n factory.seed_instance(seed)\n\n def seed_locale(self, locale, seed=None):\n \"\"\"\n Creates and seeds a new `random.Random` object for the factory of the specified locale\n\n :param locale: locale string\n :param seed: seed value\n \"\"\"\n self._factory_map[locale.replace('-', '_')].seed_instance(seed)\n\n @property\n def random(self):\n \"\"\"\n Proxies `random` getter calls\n\n In single locale mode, this will be proxied to the `random` getter\n of the only internal `Generator` object. Subclasses will have to\n implement desired behavior in multiple locale mode.\n \"\"\"\n\n if len(self._factories) == 1:\n return self._factories[0].random\n else:\n msg = 'Proxying `random` getter calls is not implemented in multiple locale mode.'\n raise NotImplementedError(msg)\n\n @random.setter\n def random(self, value):\n \"\"\"\n Proxies `random` setter calls\n\n In single locale mode, this will be proxied to the `random` setter\n of the only internal `Generator` object. Subclasses will have to\n implement desired behavior in multiple locale mode.\n \"\"\"\n\n if len(self._factories) == 1:\n self._factories[0].random = value\n else:\n msg = 'Proxying `random` setter calls is not implemented in multiple locale mode.'\n raise NotImplementedError(msg)\n\n @property\n def locales(self):\n return list(self._locales)\n\n @property\n def weights(self):\n return self._weights\n\n @property\n def factories(self):\n return self._factories\n\n def items(self):\n return self._factory_map.items()\n\n\nclass UniqueProxy:\n def __init__(self, proxy):\n self._proxy = proxy\n self._seen = {}\n self._sentinel = object()\n\n def clear(self):\n self._seen = {}\n\n def __getattr__(self, name: str):\n obj = getattr(self._proxy, name)\n if callable(obj):\n return self._wrap(name, obj)\n else:\n raise TypeError(\"Accessing non-functions through .unique is not supported.\")\n\n def _wrap(self, name, function):\n @functools.wraps(function)\n def wrapper(*args, **kwargs):\n key = (name, args, tuple(sorted(kwargs.items())))\n\n generated = self._seen.setdefault(key, {self._sentinel})\n\n # With use of a sentinel value rather than None, we leave\n # None open as a valid return value.\n retval = self._sentinel\n\n for i in range(_UNIQUE_ATTEMPTS):\n if retval not in generated:\n break\n retval = function(*args, **kwargs)\n else:\n raise UniquenessException(f'Got duplicated values after {_UNIQUE_ATTEMPTS:,} iterations.')\n\n generated.add(retval)\n\n return retval\n\n return wrapper\n", "path": "faker/proxy.py"}], "after_files": [{"content": "import copy\nimport functools\nimport random\nimport re\n\nfrom collections import OrderedDict\n\nfrom faker.config import DEFAULT_LOCALE\nfrom faker.exceptions import UniquenessException\nfrom faker.factory import Factory\nfrom faker.generator import Generator\nfrom faker.utils.distribution import choices_distribution\n\n_UNIQUE_ATTEMPTS = 1000\n\n\nclass Faker:\n \"\"\"Proxy class capable of supporting multiple locales\"\"\"\n\n cache_pattern = re.compile(r'^_cached_\\w*_mapping$')\n generator_attrs = [\n attr for attr in dir(Generator)\n if not attr.startswith('__')\n and attr not in ['seed', 'seed_instance', 'random']\n ]\n\n def __init__(self, locale=None, providers=None,\n generator=None, includes=None,\n use_weighting=True, **config):\n self._factory_map = OrderedDict()\n self._weights = None\n self._unique_proxy = UniqueProxy(self)\n\n if isinstance(locale, str):\n locales = [locale.replace('-', '_')]\n\n # This guarantees a FIFO ordering of elements in `locales` based on the final\n # locale string while discarding duplicates after processing\n elif isinstance(locale, (list, tuple, set)):\n locales = []\n for code in locale:\n if not isinstance(code, str):\n raise TypeError('The locale \"%s\" must be a string.' % str(code))\n final_locale = code.replace('-', '_')\n if final_locale not in locales:\n locales.append(final_locale)\n\n elif isinstance(locale, OrderedDict):\n assert all(isinstance(v, (int, float)) for v in locale.values())\n odict = OrderedDict()\n for k, v in locale.items():\n key = k.replace('-', '_')\n odict[key] = v\n locales = list(odict.keys())\n self._weights = list(odict.values())\n\n else:\n locales = [DEFAULT_LOCALE]\n\n for locale in locales:\n self._factory_map[locale] = Factory.create(locale, providers, generator, includes,\n use_weighting=use_weighting,\n **config)\n\n self._locales = locales\n self._factories = list(self._factory_map.values())\n\n def __dir__(self):\n attributes = set(super(Faker, self).__dir__())\n for factory in self.factories:\n attributes |= {\n attr for attr in dir(factory) if not attr.startswith('_')\n }\n return sorted(attributes)\n\n def __getitem__(self, locale):\n return self._factory_map[locale.replace('-', '_')]\n\n def __getattribute__(self, attr):\n \"\"\"\n Handles the \"attribute resolution\" behavior for declared members of this proxy class\n\n The class method `seed` cannot be called from an instance.\n\n :param attr: attribute name\n :return: the appropriate attribute\n \"\"\"\n if attr == 'seed':\n msg = (\n 'Calling `.seed()` on instances is deprecated. '\n 'Use the class method `Faker.seed()` instead.'\n )\n raise TypeError(msg)\n else:\n return super().__getattribute__(attr)\n\n def __getattr__(self, attr):\n \"\"\"\n Handles cache access and proxying behavior\n\n :param attr: attribute name\n :return: the appropriate attribute\n \"\"\"\n if len(self._factories) == 1:\n return getattr(self._factories[0], attr)\n elif attr in self.generator_attrs:\n msg = 'Proxying calls to `%s` is not implemented in multiple locale mode.' % attr\n raise NotImplementedError(msg)\n elif self.cache_pattern.match(attr):\n msg = 'Cached attribute `%s` does not exist' % attr\n raise AttributeError(msg)\n else:\n factory = self._select_factory(attr)\n return getattr(factory, attr)\n\n def __deepcopy__(self, memodict={}):\n cls = self.__class__\n result = cls.__new__(cls)\n result._locales = copy.deepcopy(self._locales)\n result._factories = copy.deepcopy(self._factories)\n result._factory_map = copy.deepcopy(self._factory_map)\n result._weights = copy.deepcopy(self._weights)\n result._unique_proxy = UniqueProxy(self)\n result._unique_proxy._seen = {\n k: {result._unique_proxy._sentinel}\n for k in self._unique_proxy._seen.keys()\n }\n return result\n\n @property\n def unique(self):\n return self._unique_proxy\n\n def _select_factory(self, method_name):\n \"\"\"\n Returns a random factory that supports the provider method\n\n :param method_name: Name of provider method\n :return: A factory that supports the provider method\n \"\"\"\n\n factories, weights = self._map_provider_method(method_name)\n if len(factories) == 0:\n msg = f'No generator object has attribute {method_name!r}'\n raise AttributeError(msg)\n elif len(factories) == 1:\n return factories[0]\n\n if weights:\n factory = choices_distribution(factories, weights, length=1)[0]\n else:\n factory = random.choice(factories)\n return factory\n\n def _map_provider_method(self, method_name):\n \"\"\"\n Creates a 2-tuple of factories and weights for the given provider method name\n\n The first element of the tuple contains a list of compatible factories.\n The second element of the tuple contains a list of distribution weights.\n\n :param method_name: Name of provider method\n :return: 2-tuple (factories, weights)\n \"\"\"\n\n # Return cached mapping if it exists for given method\n attr = f'_cached_{method_name}_mapping'\n if hasattr(self, attr):\n return getattr(self, attr)\n\n # Create mapping if it does not exist\n if self._weights:\n value = [\n (factory, weight)\n for factory, weight in zip(self.factories, self._weights)\n if hasattr(factory, method_name)\n ]\n factories, weights = zip(*value)\n mapping = list(factories), list(weights)\n else:\n value = [\n factory\n for factory in self.factories\n if hasattr(factory, method_name)\n ]\n mapping = value, None\n\n # Then cache and return results\n setattr(self, attr, mapping)\n return mapping\n\n @classmethod\n def seed(cls, seed=None):\n \"\"\"\n Seeds the shared `random.Random` object across all factories\n\n :param seed: seed value\n \"\"\"\n Generator.seed(seed)\n\n def seed_instance(self, seed=None):\n \"\"\"\n Creates and seeds a new `random.Random` object for each factory\n\n :param seed: seed value\n \"\"\"\n for factory in self._factories:\n factory.seed_instance(seed)\n\n def seed_locale(self, locale, seed=None):\n \"\"\"\n Creates and seeds a new `random.Random` object for the factory of the specified locale\n\n :param locale: locale string\n :param seed: seed value\n \"\"\"\n self._factory_map[locale.replace('-', '_')].seed_instance(seed)\n\n @property\n def random(self):\n \"\"\"\n Proxies `random` getter calls\n\n In single locale mode, this will be proxied to the `random` getter\n of the only internal `Generator` object. Subclasses will have to\n implement desired behavior in multiple locale mode.\n \"\"\"\n\n if len(self._factories) == 1:\n return self._factories[0].random\n else:\n msg = 'Proxying `random` getter calls is not implemented in multiple locale mode.'\n raise NotImplementedError(msg)\n\n @random.setter\n def random(self, value):\n \"\"\"\n Proxies `random` setter calls\n\n In single locale mode, this will be proxied to the `random` setter\n of the only internal `Generator` object. Subclasses will have to\n implement desired behavior in multiple locale mode.\n \"\"\"\n\n if len(self._factories) == 1:\n self._factories[0].random = value\n else:\n msg = 'Proxying `random` setter calls is not implemented in multiple locale mode.'\n raise NotImplementedError(msg)\n\n @property\n def locales(self):\n return list(self._locales)\n\n @property\n def weights(self):\n return self._weights\n\n @property\n def factories(self):\n return self._factories\n\n def items(self):\n return self._factory_map.items()\n\n\nclass UniqueProxy:\n def __init__(self, proxy):\n self._proxy = proxy\n self._seen = {}\n self._sentinel = object()\n\n def clear(self):\n self._seen = {}\n\n def __getattr__(self, name: str):\n obj = getattr(self._proxy, name)\n if callable(obj):\n return self._wrap(name, obj)\n else:\n raise TypeError(\"Accessing non-functions through .unique is not supported.\")\n\n def _wrap(self, name, function):\n @functools.wraps(function)\n def wrapper(*args, **kwargs):\n key = (name, args, tuple(sorted(kwargs.items())))\n\n generated = self._seen.setdefault(key, {self._sentinel})\n\n # With use of a sentinel value rather than None, we leave\n # None open as a valid return value.\n retval = self._sentinel\n\n for i in range(_UNIQUE_ATTEMPTS):\n if retval not in generated:\n break\n retval = function(*args, **kwargs)\n else:\n raise UniquenessException(f'Got duplicated values after {_UNIQUE_ATTEMPTS:,} iterations.')\n\n generated.add(retval)\n\n return retval\n\n return wrapper\n", "path": "faker/proxy.py"}]}
3,967
308
gh_patches_debug_2417
rasdani/github-patches
git_diff
bookwyrm-social__bookwyrm-1864
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Invalid table limit error **Describe the bug** When running a fresh dev instance I get an `Invalid table limit` error, coming from `initdb.py`. Not sure if something is broken in the latest main branch, or I need to update my configuration. **To Reproduce** Steps to reproduce the behavior: 1. fetch latest `main` branch 2. `./bw-dev resetdb` 3. Get error (see below) **Expected behavior** BookWyrm resets database and new install works without errors. **Screenshots** ``` Applying sessions.0001_initial... OK + execweb python manage.py initdb + docker-compose exec web python manage.py initdb Traceback (most recent call last): File "/app/manage.py", line 18, in <module> execute_from_command_line(sys.argv) File "/usr/local/lib/python3.9/site-packages/django/core/management/__init__.py", line 419, in execute_from_command_line utility.execute() File "/usr/local/lib/python3.9/site-packages/django/core/management/__init__.py", line 413, in execute self.fetch_command(subcommand).run_from_argv(self.argv) File "/usr/local/lib/python3.9/site-packages/django/core/management/base.py", line 354, in run_from_argv self.execute(*args, **cmd_options) File "/usr/local/lib/python3.9/site-packages/django/core/management/base.py", line 398, in execute output = self.handle(*args, **options) File "/app/bookwyrm/management/commands/initdb.py", line 168, in handle raise Exception("Invalid table limit:", limit) Exception: ('Invalid table limit:', None) ``` **Instance** local development, current `main` branch. **Additional context** I initially started getting this error on a branch I was working on, but it's occuring on the latest `main` branch without any changes. --- **Desktop (please complete the following information):** - OS: MacOS --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `bookwyrm/management/commands/initdb.py` Content: ``` 1 """ What you need in the database to make it work """ 2 from django.core.management.base import BaseCommand 3 from django.contrib.auth.models import Group, Permission 4 from django.contrib.contenttypes.models import ContentType 5 6 from bookwyrm import models 7 8 9 def init_groups(): 10 """permission levels""" 11 groups = ["admin", "moderator", "editor"] 12 for group in groups: 13 Group.objects.create(name=group) 14 15 16 def init_permissions(): 17 """permission types""" 18 permissions = [ 19 { 20 "codename": "edit_instance_settings", 21 "name": "change the instance info", 22 "groups": [ 23 "admin", 24 ], 25 }, 26 { 27 "codename": "set_user_group", 28 "name": "change what group a user is in", 29 "groups": ["admin", "moderator"], 30 }, 31 { 32 "codename": "control_federation", 33 "name": "control who to federate with", 34 "groups": ["admin", "moderator"], 35 }, 36 { 37 "codename": "create_invites", 38 "name": "issue invitations to join", 39 "groups": ["admin", "moderator"], 40 }, 41 { 42 "codename": "moderate_user", 43 "name": "deactivate or silence a user", 44 "groups": ["admin", "moderator"], 45 }, 46 { 47 "codename": "moderate_post", 48 "name": "delete other users' posts", 49 "groups": ["admin", "moderator"], 50 }, 51 { 52 "codename": "edit_book", 53 "name": "edit book info", 54 "groups": ["admin", "moderator", "editor"], 55 }, 56 ] 57 58 content_type = models.ContentType.objects.get_for_model(User) 59 for permission in permissions: 60 permission_obj = Permission.objects.create( 61 codename=permission["codename"], 62 name=permission["name"], 63 content_type=content_type, 64 ) 65 # add the permission to the appropriate groups 66 for group_name in permission["groups"]: 67 Group.objects.get(name=group_name).permissions.add(permission_obj) 68 69 # while the groups and permissions shouldn't be changed because the code 70 # depends on them, what permissions go with what groups should be editable 71 72 73 def init_connectors(): 74 """access book data sources""" 75 models.Connector.objects.create( 76 identifier="bookwyrm.social", 77 name="BookWyrm dot Social", 78 connector_file="bookwyrm_connector", 79 base_url="https://bookwyrm.social", 80 books_url="https://bookwyrm.social/book", 81 covers_url="https://bookwyrm.social/images/", 82 search_url="https://bookwyrm.social/search?q=", 83 isbn_search_url="https://bookwyrm.social/isbn/", 84 priority=2, 85 ) 86 87 models.Connector.objects.create( 88 identifier="inventaire.io", 89 name="Inventaire", 90 connector_file="inventaire", 91 base_url="https://inventaire.io", 92 books_url="https://inventaire.io/api/entities", 93 covers_url="https://inventaire.io", 94 search_url="https://inventaire.io/api/search?types=works&types=works&search=", 95 isbn_search_url="https://inventaire.io/api/entities?action=by-uris&uris=isbn%3A", 96 priority=3, 97 ) 98 99 models.Connector.objects.create( 100 identifier="openlibrary.org", 101 name="OpenLibrary", 102 connector_file="openlibrary", 103 base_url="https://openlibrary.org", 104 books_url="https://openlibrary.org", 105 covers_url="https://covers.openlibrary.org", 106 search_url="https://openlibrary.org/search?q=", 107 isbn_search_url="https://openlibrary.org/api/books?jscmd=data&format=json&bibkeys=ISBN:", 108 priority=3, 109 ) 110 111 112 def init_federated_servers(): 113 """big no to nazis""" 114 built_in_blocks = ["gab.ai", "gab.com"] 115 for server in built_in_blocks: 116 models.FederatedServer.objects.create( 117 server_name=server, 118 status="blocked", 119 ) 120 121 122 def init_settings(): 123 """info about the instance""" 124 models.SiteSettings.objects.create( 125 support_link="https://www.patreon.com/bookwyrm", 126 support_title="Patreon", 127 ) 128 129 130 def init_link_domains(*_): 131 """safe book links""" 132 domains = [ 133 ("standardebooks.org", "Standard EBooks"), 134 ("www.gutenberg.org", "Project Gutenberg"), 135 ("archive.org", "Internet Archive"), 136 ("openlibrary.org", "Open Library"), 137 ("theanarchistlibrary.org", "The Anarchist Library"), 138 ] 139 for domain, name in domains: 140 models.LinkDomain.objects.create( 141 domain=domain, 142 name=name, 143 status="approved", 144 ) 145 146 147 class Command(BaseCommand): 148 help = "Initializes the database with starter data" 149 150 def add_arguments(self, parser): 151 parser.add_argument( 152 "--limit", 153 default=None, 154 help="Limit init to specific table", 155 ) 156 157 def handle(self, *args, **options): 158 limit = options.get("limit") 159 tables = [ 160 "group", 161 "permission", 162 "connector", 163 "federatedserver", 164 "settings", 165 "linkdomain", 166 ] 167 if limit not in tables: 168 raise Exception("Invalid table limit:", limit) 169 170 if not limit or limit == "group": 171 init_groups() 172 if not limit or limit == "permission": 173 init_permissions() 174 if not limit or limit == "connector": 175 init_connectors() 176 if not limit or limit == "federatedserver": 177 init_federated_servers() 178 if not limit or limit == "settings": 179 init_settings() 180 if not limit or limit == "linkdomain": 181 init_link_domains() 182 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/bookwyrm/management/commands/initdb.py b/bookwyrm/management/commands/initdb.py --- a/bookwyrm/management/commands/initdb.py +++ b/bookwyrm/management/commands/initdb.py @@ -164,7 +164,7 @@ "settings", "linkdomain", ] - if limit not in tables: + if limit and limit not in tables: raise Exception("Invalid table limit:", limit) if not limit or limit == "group":
{"golden_diff": "diff --git a/bookwyrm/management/commands/initdb.py b/bookwyrm/management/commands/initdb.py\n--- a/bookwyrm/management/commands/initdb.py\n+++ b/bookwyrm/management/commands/initdb.py\n@@ -164,7 +164,7 @@\n \"settings\",\n \"linkdomain\",\n ]\n- if limit not in tables:\n+ if limit and limit not in tables:\n raise Exception(\"Invalid table limit:\", limit)\n \n if not limit or limit == \"group\":\n", "issue": "Invalid table limit error\n**Describe the bug**\r\nWhen running a fresh dev instance I get an `Invalid table limit` error, coming from `initdb.py`. Not sure if something is broken in the latest main branch, or I need to update my configuration.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. fetch latest `main` branch\r\n2. `./bw-dev resetdb`\r\n3. Get error (see below)\r\n\r\n**Expected behavior**\r\nBookWyrm resets database and new install works without errors.\r\n\r\n**Screenshots**\r\n```\r\n Applying sessions.0001_initial... OK\r\n+ execweb python manage.py initdb\r\n+ docker-compose exec web python manage.py initdb\r\nTraceback (most recent call last):\r\n File \"/app/manage.py\", line 18, in <module>\r\n execute_from_command_line(sys.argv)\r\n File \"/usr/local/lib/python3.9/site-packages/django/core/management/__init__.py\", line 419, in execute_from_command_line\r\n utility.execute()\r\n File \"/usr/local/lib/python3.9/site-packages/django/core/management/__init__.py\", line 413, in execute\r\n self.fetch_command(subcommand).run_from_argv(self.argv)\r\n File \"/usr/local/lib/python3.9/site-packages/django/core/management/base.py\", line 354, in run_from_argv\r\n self.execute(*args, **cmd_options)\r\n File \"/usr/local/lib/python3.9/site-packages/django/core/management/base.py\", line 398, in execute\r\n output = self.handle(*args, **options)\r\n File \"/app/bookwyrm/management/commands/initdb.py\", line 168, in handle\r\n raise Exception(\"Invalid table limit:\", limit)\r\nException: ('Invalid table limit:', None)\r\n```\r\n\r\n**Instance**\r\nlocal development, current `main` branch.\r\n\r\n**Additional context**\r\nI initially started getting this error on a branch I was working on, but it's occuring on the latest `main` branch without any changes.\r\n\r\n---\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: MacOS\r\n\n", "before_files": [{"content": "\"\"\" What you need in the database to make it work \"\"\"\nfrom django.core.management.base import BaseCommand\nfrom django.contrib.auth.models import Group, Permission\nfrom django.contrib.contenttypes.models import ContentType\n\nfrom bookwyrm import models\n\n\ndef init_groups():\n \"\"\"permission levels\"\"\"\n groups = [\"admin\", \"moderator\", \"editor\"]\n for group in groups:\n Group.objects.create(name=group)\n\n\ndef init_permissions():\n \"\"\"permission types\"\"\"\n permissions = [\n {\n \"codename\": \"edit_instance_settings\",\n \"name\": \"change the instance info\",\n \"groups\": [\n \"admin\",\n ],\n },\n {\n \"codename\": \"set_user_group\",\n \"name\": \"change what group a user is in\",\n \"groups\": [\"admin\", \"moderator\"],\n },\n {\n \"codename\": \"control_federation\",\n \"name\": \"control who to federate with\",\n \"groups\": [\"admin\", \"moderator\"],\n },\n {\n \"codename\": \"create_invites\",\n \"name\": \"issue invitations to join\",\n \"groups\": [\"admin\", \"moderator\"],\n },\n {\n \"codename\": \"moderate_user\",\n \"name\": \"deactivate or silence a user\",\n \"groups\": [\"admin\", \"moderator\"],\n },\n {\n \"codename\": \"moderate_post\",\n \"name\": \"delete other users' posts\",\n \"groups\": [\"admin\", \"moderator\"],\n },\n {\n \"codename\": \"edit_book\",\n \"name\": \"edit book info\",\n \"groups\": [\"admin\", \"moderator\", \"editor\"],\n },\n ]\n\n content_type = models.ContentType.objects.get_for_model(User)\n for permission in permissions:\n permission_obj = Permission.objects.create(\n codename=permission[\"codename\"],\n name=permission[\"name\"],\n content_type=content_type,\n )\n # add the permission to the appropriate groups\n for group_name in permission[\"groups\"]:\n Group.objects.get(name=group_name).permissions.add(permission_obj)\n\n # while the groups and permissions shouldn't be changed because the code\n # depends on them, what permissions go with what groups should be editable\n\n\ndef init_connectors():\n \"\"\"access book data sources\"\"\"\n models.Connector.objects.create(\n identifier=\"bookwyrm.social\",\n name=\"BookWyrm dot Social\",\n connector_file=\"bookwyrm_connector\",\n base_url=\"https://bookwyrm.social\",\n books_url=\"https://bookwyrm.social/book\",\n covers_url=\"https://bookwyrm.social/images/\",\n search_url=\"https://bookwyrm.social/search?q=\",\n isbn_search_url=\"https://bookwyrm.social/isbn/\",\n priority=2,\n )\n\n models.Connector.objects.create(\n identifier=\"inventaire.io\",\n name=\"Inventaire\",\n connector_file=\"inventaire\",\n base_url=\"https://inventaire.io\",\n books_url=\"https://inventaire.io/api/entities\",\n covers_url=\"https://inventaire.io\",\n search_url=\"https://inventaire.io/api/search?types=works&types=works&search=\",\n isbn_search_url=\"https://inventaire.io/api/entities?action=by-uris&uris=isbn%3A\",\n priority=3,\n )\n\n models.Connector.objects.create(\n identifier=\"openlibrary.org\",\n name=\"OpenLibrary\",\n connector_file=\"openlibrary\",\n base_url=\"https://openlibrary.org\",\n books_url=\"https://openlibrary.org\",\n covers_url=\"https://covers.openlibrary.org\",\n search_url=\"https://openlibrary.org/search?q=\",\n isbn_search_url=\"https://openlibrary.org/api/books?jscmd=data&format=json&bibkeys=ISBN:\",\n priority=3,\n )\n\n\ndef init_federated_servers():\n \"\"\"big no to nazis\"\"\"\n built_in_blocks = [\"gab.ai\", \"gab.com\"]\n for server in built_in_blocks:\n models.FederatedServer.objects.create(\n server_name=server,\n status=\"blocked\",\n )\n\n\ndef init_settings():\n \"\"\"info about the instance\"\"\"\n models.SiteSettings.objects.create(\n support_link=\"https://www.patreon.com/bookwyrm\",\n support_title=\"Patreon\",\n )\n\n\ndef init_link_domains(*_):\n \"\"\"safe book links\"\"\"\n domains = [\n (\"standardebooks.org\", \"Standard EBooks\"),\n (\"www.gutenberg.org\", \"Project Gutenberg\"),\n (\"archive.org\", \"Internet Archive\"),\n (\"openlibrary.org\", \"Open Library\"),\n (\"theanarchistlibrary.org\", \"The Anarchist Library\"),\n ]\n for domain, name in domains:\n models.LinkDomain.objects.create(\n domain=domain,\n name=name,\n status=\"approved\",\n )\n\n\nclass Command(BaseCommand):\n help = \"Initializes the database with starter data\"\n\n def add_arguments(self, parser):\n parser.add_argument(\n \"--limit\",\n default=None,\n help=\"Limit init to specific table\",\n )\n\n def handle(self, *args, **options):\n limit = options.get(\"limit\")\n tables = [\n \"group\",\n \"permission\",\n \"connector\",\n \"federatedserver\",\n \"settings\",\n \"linkdomain\",\n ]\n if limit not in tables:\n raise Exception(\"Invalid table limit:\", limit)\n\n if not limit or limit == \"group\":\n init_groups()\n if not limit or limit == \"permission\":\n init_permissions()\n if not limit or limit == \"connector\":\n init_connectors()\n if not limit or limit == \"federatedserver\":\n init_federated_servers()\n if not limit or limit == \"settings\":\n init_settings()\n if not limit or limit == \"linkdomain\":\n init_link_domains()\n", "path": "bookwyrm/management/commands/initdb.py"}], "after_files": [{"content": "\"\"\" What you need in the database to make it work \"\"\"\nfrom django.core.management.base import BaseCommand\nfrom django.contrib.auth.models import Group, Permission\nfrom django.contrib.contenttypes.models import ContentType\n\nfrom bookwyrm import models\n\n\ndef init_groups():\n \"\"\"permission levels\"\"\"\n groups = [\"admin\", \"moderator\", \"editor\"]\n for group in groups:\n Group.objects.create(name=group)\n\n\ndef init_permissions():\n \"\"\"permission types\"\"\"\n permissions = [\n {\n \"codename\": \"edit_instance_settings\",\n \"name\": \"change the instance info\",\n \"groups\": [\n \"admin\",\n ],\n },\n {\n \"codename\": \"set_user_group\",\n \"name\": \"change what group a user is in\",\n \"groups\": [\"admin\", \"moderator\"],\n },\n {\n \"codename\": \"control_federation\",\n \"name\": \"control who to federate with\",\n \"groups\": [\"admin\", \"moderator\"],\n },\n {\n \"codename\": \"create_invites\",\n \"name\": \"issue invitations to join\",\n \"groups\": [\"admin\", \"moderator\"],\n },\n {\n \"codename\": \"moderate_user\",\n \"name\": \"deactivate or silence a user\",\n \"groups\": [\"admin\", \"moderator\"],\n },\n {\n \"codename\": \"moderate_post\",\n \"name\": \"delete other users' posts\",\n \"groups\": [\"admin\", \"moderator\"],\n },\n {\n \"codename\": \"edit_book\",\n \"name\": \"edit book info\",\n \"groups\": [\"admin\", \"moderator\", \"editor\"],\n },\n ]\n\n content_type = models.ContentType.objects.get_for_model(User)\n for permission in permissions:\n permission_obj = Permission.objects.create(\n codename=permission[\"codename\"],\n name=permission[\"name\"],\n content_type=content_type,\n )\n # add the permission to the appropriate groups\n for group_name in permission[\"groups\"]:\n Group.objects.get(name=group_name).permissions.add(permission_obj)\n\n # while the groups and permissions shouldn't be changed because the code\n # depends on them, what permissions go with what groups should be editable\n\n\ndef init_connectors():\n \"\"\"access book data sources\"\"\"\n models.Connector.objects.create(\n identifier=\"bookwyrm.social\",\n name=\"BookWyrm dot Social\",\n connector_file=\"bookwyrm_connector\",\n base_url=\"https://bookwyrm.social\",\n books_url=\"https://bookwyrm.social/book\",\n covers_url=\"https://bookwyrm.social/images/\",\n search_url=\"https://bookwyrm.social/search?q=\",\n isbn_search_url=\"https://bookwyrm.social/isbn/\",\n priority=2,\n )\n\n models.Connector.objects.create(\n identifier=\"inventaire.io\",\n name=\"Inventaire\",\n connector_file=\"inventaire\",\n base_url=\"https://inventaire.io\",\n books_url=\"https://inventaire.io/api/entities\",\n covers_url=\"https://inventaire.io\",\n search_url=\"https://inventaire.io/api/search?types=works&types=works&search=\",\n isbn_search_url=\"https://inventaire.io/api/entities?action=by-uris&uris=isbn%3A\",\n priority=3,\n )\n\n models.Connector.objects.create(\n identifier=\"openlibrary.org\",\n name=\"OpenLibrary\",\n connector_file=\"openlibrary\",\n base_url=\"https://openlibrary.org\",\n books_url=\"https://openlibrary.org\",\n covers_url=\"https://covers.openlibrary.org\",\n search_url=\"https://openlibrary.org/search?q=\",\n isbn_search_url=\"https://openlibrary.org/api/books?jscmd=data&format=json&bibkeys=ISBN:\",\n priority=3,\n )\n\n\ndef init_federated_servers():\n \"\"\"big no to nazis\"\"\"\n built_in_blocks = [\"gab.ai\", \"gab.com\"]\n for server in built_in_blocks:\n models.FederatedServer.objects.create(\n server_name=server,\n status=\"blocked\",\n )\n\n\ndef init_settings():\n \"\"\"info about the instance\"\"\"\n models.SiteSettings.objects.create(\n support_link=\"https://www.patreon.com/bookwyrm\",\n support_title=\"Patreon\",\n )\n\n\ndef init_link_domains(*_):\n \"\"\"safe book links\"\"\"\n domains = [\n (\"standardebooks.org\", \"Standard EBooks\"),\n (\"www.gutenberg.org\", \"Project Gutenberg\"),\n (\"archive.org\", \"Internet Archive\"),\n (\"openlibrary.org\", \"Open Library\"),\n (\"theanarchistlibrary.org\", \"The Anarchist Library\"),\n ]\n for domain, name in domains:\n models.LinkDomain.objects.create(\n domain=domain,\n name=name,\n status=\"approved\",\n )\n\n\nclass Command(BaseCommand):\n help = \"Initializes the database with starter data\"\n\n def add_arguments(self, parser):\n parser.add_argument(\n \"--limit\",\n default=None,\n help=\"Limit init to specific table\",\n )\n\n def handle(self, *args, **options):\n limit = options.get(\"limit\")\n tables = [\n \"group\",\n \"permission\",\n \"connector\",\n \"federatedserver\",\n \"settings\",\n \"linkdomain\",\n ]\n if limit and limit not in tables:\n raise Exception(\"Invalid table limit:\", limit)\n\n if not limit or limit == \"group\":\n init_groups()\n if not limit or limit == \"permission\":\n init_permissions()\n if not limit or limit == \"connector\":\n init_connectors()\n if not limit or limit == \"federatedserver\":\n init_federated_servers()\n if not limit or limit == \"settings\":\n init_settings()\n if not limit or limit == \"linkdomain\":\n init_link_domains()\n", "path": "bookwyrm/management/commands/initdb.py"}]}
2,405
116
gh_patches_debug_41741
rasdani/github-patches
git_diff
python-telegram-bot__python-telegram-bot-3854
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [FEATURE] Add args present argument to command filter ### What kind of feature are you missing? Where do you notice a shortcoming of PTB? There is currently no filter defined by us which simply checks if a command message has args, e.g. `start payload` ### Describe the solution you'd like Add one argument to the command filter called `has_args` which checks if something comes after the command in the message. ### Describe alternatives you've considered One can do this filter by themselves, but providing one would be nice ### Additional context We could also start discussing if we want to provide some way to check the content of args, like a pattern match. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `telegram/ext/_commandhandler.py` Content: ``` 1 #!/usr/bin/env python 2 # 3 # A library that provides a Python interface to the Telegram Bot API 4 # Copyright (C) 2015-2023 5 # Leandro Toledo de Souza <[email protected]> 6 # 7 # This program is free software: you can redistribute it and/or modify 8 # it under the terms of the GNU Lesser Public License as published by 9 # the Free Software Foundation, either version 3 of the License, or 10 # (at your option) any later version. 11 # 12 # This program is distributed in the hope that it will be useful, 13 # but WITHOUT ANY WARRANTY; without even the implied warranty of 14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 # GNU Lesser Public License for more details. 16 # 17 # You should have received a copy of the GNU Lesser Public License 18 # along with this program. If not, see [http://www.gnu.org/licenses/]. 19 """This module contains the CommandHandler class.""" 20 import re 21 from typing import TYPE_CHECKING, Any, FrozenSet, List, Optional, Tuple, TypeVar, Union 22 23 from telegram import MessageEntity, Update 24 from telegram._utils.defaultvalue import DEFAULT_TRUE 25 from telegram._utils.types import SCT, DVType 26 from telegram.ext import filters as filters_module 27 from telegram.ext._basehandler import BaseHandler 28 from telegram.ext._utils.types import CCT, FilterDataDict, HandlerCallback 29 30 if TYPE_CHECKING: 31 from telegram.ext import Application 32 33 RT = TypeVar("RT") 34 35 36 class CommandHandler(BaseHandler[Update, CCT]): 37 """BaseHandler class to handle Telegram commands. 38 39 Commands are Telegram messages that start with ``/``, optionally followed by an ``@`` and the 40 bot's name and/or some additional text. The handler will add a :obj:`list` to the 41 :class:`CallbackContext` named :attr:`CallbackContext.args`. It will contain a list of strings, 42 which is the text following the command split on single or consecutive whitespace characters. 43 44 By default, the handler listens to messages as well as edited messages. To change this behavior 45 use :attr:`~filters.UpdateType.EDITED_MESSAGE <telegram.ext.filters.UpdateType.EDITED_MESSAGE>` 46 in the filter argument. 47 48 Note: 49 :class:`CommandHandler` does *not* handle (edited) channel posts and does *not* handle 50 commands that are part of a caption. Please use :class:`~telegram.ext.MessageHandler` 51 with a suitable combination of filters (e.g. 52 :attr:`telegram.ext.filters.UpdateType.CHANNEL_POSTS`, 53 :attr:`telegram.ext.filters.CAPTION` and :class:`telegram.ext.filters.Regex`) to handle 54 those messages. 55 56 Warning: 57 When setting :paramref:`block` to :obj:`False`, you cannot rely on adding custom 58 attributes to :class:`telegram.ext.CallbackContext`. See its docs for more info. 59 60 Examples: 61 * :any:`Timer Bot <examples.timerbot>` 62 * :any:`Error Handler Bot <examples.errorhandlerbot>` 63 64 .. versionchanged:: 20.0 65 66 * Renamed the attribute ``command`` to :attr:`commands`, which now is always a 67 :class:`frozenset` 68 * Updating the commands this handler listens to is no longer possible. 69 70 Args: 71 command (:obj:`str` | Collection[:obj:`str`]): 72 The command or list of commands this handler should listen for. Case-insensitive. 73 Limitations are the same as for :attr:`telegram.BotCommand.command`. 74 callback (:term:`coroutine function`): The callback function for this handler. Will be 75 called when :meth:`check_update` has determined that an update should be processed by 76 this handler. Callback signature:: 77 78 async def callback(update: Update, context: CallbackContext) 79 80 The return value of the callback is usually ignored except for the special case of 81 :class:`telegram.ext.ConversationHandler`. 82 filters (:class:`telegram.ext.filters.BaseFilter`, optional): A filter inheriting from 83 :class:`telegram.ext.filters.BaseFilter`. Standard filters can be found in 84 :mod:`telegram.ext.filters`. Filters can be combined using bitwise 85 operators (``&`` for :keyword:`and`, ``|`` for :keyword:`or`, ``~`` for :keyword:`not`) 86 block (:obj:`bool`, optional): Determines whether the return value of the callback should 87 be awaited before processing the next handler in 88 :meth:`telegram.ext.Application.process_update`. Defaults to :obj:`True`. 89 90 .. seealso:: :wiki:`Concurrency` 91 92 Raises: 93 :exc:`ValueError`: When the command is too long or has illegal chars. 94 95 Attributes: 96 commands (FrozenSet[:obj:`str`]): The set of commands this handler should listen for. 97 callback (:term:`coroutine function`): The callback function for this handler. 98 filters (:class:`telegram.ext.filters.BaseFilter`): Optional. Only allow updates with these 99 Filters. 100 block (:obj:`bool`): Determines whether the return value of the callback should be 101 awaited before processing the next handler in 102 :meth:`telegram.ext.Application.process_update`. 103 """ 104 105 __slots__ = ("commands", "filters") 106 107 def __init__( 108 self, 109 command: SCT[str], 110 callback: HandlerCallback[Update, CCT, RT], 111 filters: Optional[filters_module.BaseFilter] = None, 112 block: DVType[bool] = DEFAULT_TRUE, 113 ): 114 super().__init__(callback, block=block) 115 116 if isinstance(command, str): 117 commands = frozenset({command.lower()}) 118 else: 119 commands = frozenset(x.lower() for x in command) 120 for comm in commands: 121 if not re.match(r"^[\da-z_]{1,32}$", comm): 122 raise ValueError(f"Command `{comm}` is not a valid bot command") 123 self.commands: FrozenSet[str] = commands 124 125 self.filters: filters_module.BaseFilter = ( 126 filters if filters is not None else filters_module.UpdateType.MESSAGES 127 ) 128 129 def check_update( 130 self, update: object 131 ) -> Optional[Union[bool, Tuple[List[str], Optional[Union[bool, FilterDataDict]]]]]: 132 """Determines whether an update should be passed to this handler's :attr:`callback`. 133 134 Args: 135 update (:class:`telegram.Update` | :obj:`object`): Incoming update. 136 137 Returns: 138 :obj:`list`: The list of args for the handler. 139 140 """ 141 if isinstance(update, Update) and update.effective_message: 142 message = update.effective_message 143 144 if ( 145 message.entities 146 and message.entities[0].type == MessageEntity.BOT_COMMAND 147 and message.entities[0].offset == 0 148 and message.text 149 and message.get_bot() 150 ): 151 command = message.text[1 : message.entities[0].length] 152 args = message.text.split()[1:] 153 command_parts = command.split("@") 154 command_parts.append(message.get_bot().username) 155 156 if not ( 157 command_parts[0].lower() in self.commands 158 and command_parts[1].lower() == message.get_bot().username.lower() 159 ): 160 return None 161 162 filter_result = self.filters.check_update(update) 163 if filter_result: 164 return args, filter_result 165 return False 166 return None 167 168 def collect_additional_context( 169 self, 170 context: CCT, 171 update: Update, # skipcq: BAN-B301 172 application: "Application[Any, CCT, Any, Any, Any, Any]", # skipcq: BAN-B301 173 check_result: Optional[Union[bool, Tuple[List[str], Optional[bool]]]], 174 ) -> None: 175 """Add text after the command to :attr:`CallbackContext.args` as list, split on single 176 whitespaces and add output of data filters to :attr:`CallbackContext` as well. 177 """ 178 if isinstance(check_result, tuple): 179 context.args = check_result[0] 180 if isinstance(check_result[1], dict): 181 context.update(check_result[1]) 182 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/telegram/ext/_commandhandler.py b/telegram/ext/_commandhandler.py --- a/telegram/ext/_commandhandler.py +++ b/telegram/ext/_commandhandler.py @@ -88,6 +88,14 @@ :meth:`telegram.ext.Application.process_update`. Defaults to :obj:`True`. .. seealso:: :wiki:`Concurrency` + has_args (:obj:`bool` | :obj:`int`, optional): + Determines whether the command handler should process the update or not. + If :obj:`True`, the handler will process any non-zero number of args. + If :obj:`False`, the handler will only process if there are no args. + if :obj:`int`, the handler will only process if there are exactly that many args. + Defaults to :obj:`None`, which means the handler will process any or no args. + + .. versionadded:: NEXT.VERSION Raises: :exc:`ValueError`: When the command is too long or has illegal chars. @@ -100,9 +108,14 @@ block (:obj:`bool`): Determines whether the return value of the callback should be awaited before processing the next handler in :meth:`telegram.ext.Application.process_update`. + has_args (:obj:`bool` | :obj:`int` | None): + Optional argument, otherwise all implementations of :class:`CommandHandler` will break. + Defaults to :obj:`None`, which means the handler will process any args or no args. + + .. versionadded:: NEXT.VERSION """ - __slots__ = ("commands", "filters") + __slots__ = ("commands", "filters", "has_args") def __init__( self, @@ -110,6 +123,7 @@ callback: HandlerCallback[Update, CCT, RT], filters: Optional[filters_module.BaseFilter] = None, block: DVType[bool] = DEFAULT_TRUE, + has_args: Optional[Union[bool, int]] = None, ): super().__init__(callback, block=block) @@ -126,6 +140,28 @@ filters if filters is not None else filters_module.UpdateType.MESSAGES ) + self.has_args: Optional[Union[bool, int]] = has_args + + if (isinstance(self.has_args, int)) and (self.has_args < 0): + raise ValueError("CommandHandler argument has_args cannot be a negative integer") + + def _check_correct_args(self, args: List[str]) -> Optional[bool]: + """Determines whether the args are correct for this handler. Implemented in check_update(). + Args: + args (:obj:`list`): The args for the handler. + Returns: + :obj:`bool`: Whether the args are valid for this handler. + """ + # pylint: disable=too-many-boolean-expressions + if ( + (self.has_args is None) + or (self.has_args is True and args) + or (self.has_args is False and not args) + or (isinstance(self.has_args, int) and len(args) == self.has_args) + ): + return True + return False + def check_update( self, update: object ) -> Optional[Union[bool, Tuple[List[str], Optional[Union[bool, FilterDataDict]]]]]: @@ -159,6 +195,9 @@ ): return None + if not self._check_correct_args(args): + return None + filter_result = self.filters.check_update(update) if filter_result: return args, filter_result
{"golden_diff": "diff --git a/telegram/ext/_commandhandler.py b/telegram/ext/_commandhandler.py\n--- a/telegram/ext/_commandhandler.py\n+++ b/telegram/ext/_commandhandler.py\n@@ -88,6 +88,14 @@\n :meth:`telegram.ext.Application.process_update`. Defaults to :obj:`True`.\n \n .. seealso:: :wiki:`Concurrency`\n+ has_args (:obj:`bool` | :obj:`int`, optional):\n+ Determines whether the command handler should process the update or not.\n+ If :obj:`True`, the handler will process any non-zero number of args.\n+ If :obj:`False`, the handler will only process if there are no args.\n+ if :obj:`int`, the handler will only process if there are exactly that many args.\n+ Defaults to :obj:`None`, which means the handler will process any or no args.\n+\n+ .. versionadded:: NEXT.VERSION\n \n Raises:\n :exc:`ValueError`: When the command is too long or has illegal chars.\n@@ -100,9 +108,14 @@\n block (:obj:`bool`): Determines whether the return value of the callback should be\n awaited before processing the next handler in\n :meth:`telegram.ext.Application.process_update`.\n+ has_args (:obj:`bool` | :obj:`int` | None):\n+ Optional argument, otherwise all implementations of :class:`CommandHandler` will break.\n+ Defaults to :obj:`None`, which means the handler will process any args or no args.\n+\n+ .. versionadded:: NEXT.VERSION\n \"\"\"\n \n- __slots__ = (\"commands\", \"filters\")\n+ __slots__ = (\"commands\", \"filters\", \"has_args\")\n \n def __init__(\n self,\n@@ -110,6 +123,7 @@\n callback: HandlerCallback[Update, CCT, RT],\n filters: Optional[filters_module.BaseFilter] = None,\n block: DVType[bool] = DEFAULT_TRUE,\n+ has_args: Optional[Union[bool, int]] = None,\n ):\n super().__init__(callback, block=block)\n \n@@ -126,6 +140,28 @@\n filters if filters is not None else filters_module.UpdateType.MESSAGES\n )\n \n+ self.has_args: Optional[Union[bool, int]] = has_args\n+\n+ if (isinstance(self.has_args, int)) and (self.has_args < 0):\n+ raise ValueError(\"CommandHandler argument has_args cannot be a negative integer\")\n+\n+ def _check_correct_args(self, args: List[str]) -> Optional[bool]:\n+ \"\"\"Determines whether the args are correct for this handler. Implemented in check_update().\n+ Args:\n+ args (:obj:`list`): The args for the handler.\n+ Returns:\n+ :obj:`bool`: Whether the args are valid for this handler.\n+ \"\"\"\n+ # pylint: disable=too-many-boolean-expressions\n+ if (\n+ (self.has_args is None)\n+ or (self.has_args is True and args)\n+ or (self.has_args is False and not args)\n+ or (isinstance(self.has_args, int) and len(args) == self.has_args)\n+ ):\n+ return True\n+ return False\n+\n def check_update(\n self, update: object\n ) -> Optional[Union[bool, Tuple[List[str], Optional[Union[bool, FilterDataDict]]]]]:\n@@ -159,6 +195,9 @@\n ):\n return None\n \n+ if not self._check_correct_args(args):\n+ return None\n+\n filter_result = self.filters.check_update(update)\n if filter_result:\n return args, filter_result\n", "issue": "[FEATURE] Add args present argument to command filter \n### What kind of feature are you missing? Where do you notice a shortcoming of PTB?\n\nThere is currently no filter defined by us which simply checks if a command message has args, e.g. `start payload`\n\n### Describe the solution you'd like\n\nAdd one argument to the command filter called `has_args` which checks if something comes after the command in the message.\n\n### Describe alternatives you've considered\n\nOne can do this filter by themselves, but providing one would be nice\n\n### Additional context\n\nWe could also start discussing if we want to provide some way to check the content of args, like a pattern match.\n", "before_files": [{"content": "#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2023\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains the CommandHandler class.\"\"\"\nimport re\nfrom typing import TYPE_CHECKING, Any, FrozenSet, List, Optional, Tuple, TypeVar, Union\n\nfrom telegram import MessageEntity, Update\nfrom telegram._utils.defaultvalue import DEFAULT_TRUE\nfrom telegram._utils.types import SCT, DVType\nfrom telegram.ext import filters as filters_module\nfrom telegram.ext._basehandler import BaseHandler\nfrom telegram.ext._utils.types import CCT, FilterDataDict, HandlerCallback\n\nif TYPE_CHECKING:\n from telegram.ext import Application\n\nRT = TypeVar(\"RT\")\n\n\nclass CommandHandler(BaseHandler[Update, CCT]):\n \"\"\"BaseHandler class to handle Telegram commands.\n\n Commands are Telegram messages that start with ``/``, optionally followed by an ``@`` and the\n bot's name and/or some additional text. The handler will add a :obj:`list` to the\n :class:`CallbackContext` named :attr:`CallbackContext.args`. It will contain a list of strings,\n which is the text following the command split on single or consecutive whitespace characters.\n\n By default, the handler listens to messages as well as edited messages. To change this behavior\n use :attr:`~filters.UpdateType.EDITED_MESSAGE <telegram.ext.filters.UpdateType.EDITED_MESSAGE>`\n in the filter argument.\n\n Note:\n :class:`CommandHandler` does *not* handle (edited) channel posts and does *not* handle\n commands that are part of a caption. Please use :class:`~telegram.ext.MessageHandler`\n with a suitable combination of filters (e.g.\n :attr:`telegram.ext.filters.UpdateType.CHANNEL_POSTS`,\n :attr:`telegram.ext.filters.CAPTION` and :class:`telegram.ext.filters.Regex`) to handle\n those messages.\n\n Warning:\n When setting :paramref:`block` to :obj:`False`, you cannot rely on adding custom\n attributes to :class:`telegram.ext.CallbackContext`. See its docs for more info.\n\n Examples:\n * :any:`Timer Bot <examples.timerbot>`\n * :any:`Error Handler Bot <examples.errorhandlerbot>`\n\n .. versionchanged:: 20.0\n\n * Renamed the attribute ``command`` to :attr:`commands`, which now is always a\n :class:`frozenset`\n * Updating the commands this handler listens to is no longer possible.\n\n Args:\n command (:obj:`str` | Collection[:obj:`str`]):\n The command or list of commands this handler should listen for. Case-insensitive.\n Limitations are the same as for :attr:`telegram.BotCommand.command`.\n callback (:term:`coroutine function`): The callback function for this handler. Will be\n called when :meth:`check_update` has determined that an update should be processed by\n this handler. Callback signature::\n\n async def callback(update: Update, context: CallbackContext)\n\n The return value of the callback is usually ignored except for the special case of\n :class:`telegram.ext.ConversationHandler`.\n filters (:class:`telegram.ext.filters.BaseFilter`, optional): A filter inheriting from\n :class:`telegram.ext.filters.BaseFilter`. Standard filters can be found in\n :mod:`telegram.ext.filters`. Filters can be combined using bitwise\n operators (``&`` for :keyword:`and`, ``|`` for :keyword:`or`, ``~`` for :keyword:`not`)\n block (:obj:`bool`, optional): Determines whether the return value of the callback should\n be awaited before processing the next handler in\n :meth:`telegram.ext.Application.process_update`. Defaults to :obj:`True`.\n\n .. seealso:: :wiki:`Concurrency`\n\n Raises:\n :exc:`ValueError`: When the command is too long or has illegal chars.\n\n Attributes:\n commands (FrozenSet[:obj:`str`]): The set of commands this handler should listen for.\n callback (:term:`coroutine function`): The callback function for this handler.\n filters (:class:`telegram.ext.filters.BaseFilter`): Optional. Only allow updates with these\n Filters.\n block (:obj:`bool`): Determines whether the return value of the callback should be\n awaited before processing the next handler in\n :meth:`telegram.ext.Application.process_update`.\n \"\"\"\n\n __slots__ = (\"commands\", \"filters\")\n\n def __init__(\n self,\n command: SCT[str],\n callback: HandlerCallback[Update, CCT, RT],\n filters: Optional[filters_module.BaseFilter] = None,\n block: DVType[bool] = DEFAULT_TRUE,\n ):\n super().__init__(callback, block=block)\n\n if isinstance(command, str):\n commands = frozenset({command.lower()})\n else:\n commands = frozenset(x.lower() for x in command)\n for comm in commands:\n if not re.match(r\"^[\\da-z_]{1,32}$\", comm):\n raise ValueError(f\"Command `{comm}` is not a valid bot command\")\n self.commands: FrozenSet[str] = commands\n\n self.filters: filters_module.BaseFilter = (\n filters if filters is not None else filters_module.UpdateType.MESSAGES\n )\n\n def check_update(\n self, update: object\n ) -> Optional[Union[bool, Tuple[List[str], Optional[Union[bool, FilterDataDict]]]]]:\n \"\"\"Determines whether an update should be passed to this handler's :attr:`callback`.\n\n Args:\n update (:class:`telegram.Update` | :obj:`object`): Incoming update.\n\n Returns:\n :obj:`list`: The list of args for the handler.\n\n \"\"\"\n if isinstance(update, Update) and update.effective_message:\n message = update.effective_message\n\n if (\n message.entities\n and message.entities[0].type == MessageEntity.BOT_COMMAND\n and message.entities[0].offset == 0\n and message.text\n and message.get_bot()\n ):\n command = message.text[1 : message.entities[0].length]\n args = message.text.split()[1:]\n command_parts = command.split(\"@\")\n command_parts.append(message.get_bot().username)\n\n if not (\n command_parts[0].lower() in self.commands\n and command_parts[1].lower() == message.get_bot().username.lower()\n ):\n return None\n\n filter_result = self.filters.check_update(update)\n if filter_result:\n return args, filter_result\n return False\n return None\n\n def collect_additional_context(\n self,\n context: CCT,\n update: Update, # skipcq: BAN-B301\n application: \"Application[Any, CCT, Any, Any, Any, Any]\", # skipcq: BAN-B301\n check_result: Optional[Union[bool, Tuple[List[str], Optional[bool]]]],\n ) -> None:\n \"\"\"Add text after the command to :attr:`CallbackContext.args` as list, split on single\n whitespaces and add output of data filters to :attr:`CallbackContext` as well.\n \"\"\"\n if isinstance(check_result, tuple):\n context.args = check_result[0]\n if isinstance(check_result[1], dict):\n context.update(check_result[1])\n", "path": "telegram/ext/_commandhandler.py"}], "after_files": [{"content": "#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2023\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains the CommandHandler class.\"\"\"\nimport re\nfrom typing import TYPE_CHECKING, Any, FrozenSet, List, Optional, Tuple, TypeVar, Union\n\nfrom telegram import MessageEntity, Update\nfrom telegram._utils.defaultvalue import DEFAULT_TRUE\nfrom telegram._utils.types import SCT, DVType\nfrom telegram.ext import filters as filters_module\nfrom telegram.ext._basehandler import BaseHandler\nfrom telegram.ext._utils.types import CCT, FilterDataDict, HandlerCallback\n\nif TYPE_CHECKING:\n from telegram.ext import Application\n\nRT = TypeVar(\"RT\")\n\n\nclass CommandHandler(BaseHandler[Update, CCT]):\n \"\"\"BaseHandler class to handle Telegram commands.\n\n Commands are Telegram messages that start with ``/``, optionally followed by an ``@`` and the\n bot's name and/or some additional text. The handler will add a :obj:`list` to the\n :class:`CallbackContext` named :attr:`CallbackContext.args`. It will contain a list of strings,\n which is the text following the command split on single or consecutive whitespace characters.\n\n By default, the handler listens to messages as well as edited messages. To change this behavior\n use :attr:`~filters.UpdateType.EDITED_MESSAGE <telegram.ext.filters.UpdateType.EDITED_MESSAGE>`\n in the filter argument.\n\n Note:\n :class:`CommandHandler` does *not* handle (edited) channel posts and does *not* handle\n commands that are part of a caption. Please use :class:`~telegram.ext.MessageHandler`\n with a suitable combination of filters (e.g.\n :attr:`telegram.ext.filters.UpdateType.CHANNEL_POSTS`,\n :attr:`telegram.ext.filters.CAPTION` and :class:`telegram.ext.filters.Regex`) to handle\n those messages.\n\n Warning:\n When setting :paramref:`block` to :obj:`False`, you cannot rely on adding custom\n attributes to :class:`telegram.ext.CallbackContext`. See its docs for more info.\n\n Examples:\n * :any:`Timer Bot <examples.timerbot>`\n * :any:`Error Handler Bot <examples.errorhandlerbot>`\n\n .. versionchanged:: 20.0\n\n * Renamed the attribute ``command`` to :attr:`commands`, which now is always a\n :class:`frozenset`\n * Updating the commands this handler listens to is no longer possible.\n\n Args:\n command (:obj:`str` | Collection[:obj:`str`]):\n The command or list of commands this handler should listen for. Case-insensitive.\n Limitations are the same as for :attr:`telegram.BotCommand.command`.\n callback (:term:`coroutine function`): The callback function for this handler. Will be\n called when :meth:`check_update` has determined that an update should be processed by\n this handler. Callback signature::\n\n async def callback(update: Update, context: CallbackContext)\n\n The return value of the callback is usually ignored except for the special case of\n :class:`telegram.ext.ConversationHandler`.\n filters (:class:`telegram.ext.filters.BaseFilter`, optional): A filter inheriting from\n :class:`telegram.ext.filters.BaseFilter`. Standard filters can be found in\n :mod:`telegram.ext.filters`. Filters can be combined using bitwise\n operators (``&`` for :keyword:`and`, ``|`` for :keyword:`or`, ``~`` for :keyword:`not`)\n block (:obj:`bool`, optional): Determines whether the return value of the callback should\n be awaited before processing the next handler in\n :meth:`telegram.ext.Application.process_update`. Defaults to :obj:`True`.\n\n .. seealso:: :wiki:`Concurrency`\n has_args (:obj:`bool` | :obj:`int`, optional):\n Determines whether the command handler should process the update or not.\n If :obj:`True`, the handler will process any non-zero number of args.\n If :obj:`False`, the handler will only process if there are no args.\n if :obj:`int`, the handler will only process if there are exactly that many args.\n Defaults to :obj:`None`, which means the handler will process any or no args.\n\n .. versionadded:: NEXT.VERSION\n\n Raises:\n :exc:`ValueError`: When the command is too long or has illegal chars.\n\n Attributes:\n commands (FrozenSet[:obj:`str`]): The set of commands this handler should listen for.\n callback (:term:`coroutine function`): The callback function for this handler.\n filters (:class:`telegram.ext.filters.BaseFilter`): Optional. Only allow updates with these\n Filters.\n block (:obj:`bool`): Determines whether the return value of the callback should be\n awaited before processing the next handler in\n :meth:`telegram.ext.Application.process_update`.\n has_args (:obj:`bool` | :obj:`int` | None):\n Optional argument, otherwise all implementations of :class:`CommandHandler` will break.\n Defaults to :obj:`None`, which means the handler will process any args or no args.\n\n .. versionadded:: NEXT.VERSION\n \"\"\"\n\n __slots__ = (\"commands\", \"filters\", \"has_args\")\n\n def __init__(\n self,\n command: SCT[str],\n callback: HandlerCallback[Update, CCT, RT],\n filters: Optional[filters_module.BaseFilter] = None,\n block: DVType[bool] = DEFAULT_TRUE,\n has_args: Optional[Union[bool, int]] = None,\n ):\n super().__init__(callback, block=block)\n\n if isinstance(command, str):\n commands = frozenset({command.lower()})\n else:\n commands = frozenset(x.lower() for x in command)\n for comm in commands:\n if not re.match(r\"^[\\da-z_]{1,32}$\", comm):\n raise ValueError(f\"Command `{comm}` is not a valid bot command\")\n self.commands: FrozenSet[str] = commands\n\n self.filters: filters_module.BaseFilter = (\n filters if filters is not None else filters_module.UpdateType.MESSAGES\n )\n\n self.has_args: Optional[Union[bool, int]] = has_args\n\n if (isinstance(self.has_args, int)) and (self.has_args < 0):\n raise ValueError(\"CommandHandler argument has_args cannot be a negative integer\")\n\n def _check_correct_args(self, args: List[str]) -> Optional[bool]:\n \"\"\"Determines whether the args are correct for this handler. Implemented in check_update().\n Args:\n args (:obj:`list`): The args for the handler.\n Returns:\n :obj:`bool`: Whether the args are valid for this handler.\n \"\"\"\n # pylint: disable=too-many-boolean-expressions\n if (\n (self.has_args is None)\n or (self.has_args is True and args)\n or (self.has_args is False and not args)\n or (isinstance(self.has_args, int) and len(args) == self.has_args)\n ):\n return True\n return False\n\n def check_update(\n self, update: object\n ) -> Optional[Union[bool, Tuple[List[str], Optional[Union[bool, FilterDataDict]]]]]:\n \"\"\"Determines whether an update should be passed to this handler's :attr:`callback`.\n\n Args:\n update (:class:`telegram.Update` | :obj:`object`): Incoming update.\n\n Returns:\n :obj:`list`: The list of args for the handler.\n\n \"\"\"\n if isinstance(update, Update) and update.effective_message:\n message = update.effective_message\n\n if (\n message.entities\n and message.entities[0].type == MessageEntity.BOT_COMMAND\n and message.entities[0].offset == 0\n and message.text\n and message.get_bot()\n ):\n command = message.text[1 : message.entities[0].length]\n args = message.text.split()[1:]\n command_parts = command.split(\"@\")\n command_parts.append(message.get_bot().username)\n\n if not (\n command_parts[0].lower() in self.commands\n and command_parts[1].lower() == message.get_bot().username.lower()\n ):\n return None\n\n if not self._check_correct_args(args):\n return None\n\n filter_result = self.filters.check_update(update)\n if filter_result:\n return args, filter_result\n return False\n return None\n\n def collect_additional_context(\n self,\n context: CCT,\n update: Update, # skipcq: BAN-B301\n application: \"Application[Any, CCT, Any, Any, Any, Any]\", # skipcq: BAN-B301\n check_result: Optional[Union[bool, Tuple[List[str], Optional[bool]]]],\n ) -> None:\n \"\"\"Add text after the command to :attr:`CallbackContext.args` as list, split on single\n whitespaces and add output of data filters to :attr:`CallbackContext` as well.\n \"\"\"\n if isinstance(check_result, tuple):\n context.args = check_result[0]\n if isinstance(check_result[1], dict):\n context.update(check_result[1])\n", "path": "telegram/ext/_commandhandler.py"}]}
2,583
816
gh_patches_debug_25384
rasdani/github-patches
git_diff
getsentry__sentry-python-168
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Support for custom CA bundles With the Raven client it is possible to [specify a custom CA bundle by appending the `ca_cert` parameter to the DSN](https://docs.sentry.io/clients/python/transports/). This is important for use of the client with on-premise installations of Sentry that use certificates signed by a custom CA. Sadly, [looking at `sentry_sdk.transport._make_pool`](https://github.com/getsentry/sentry-python/blob/30f339db3e76384e23fc951627c689197cb0e7d5/sentry_sdk/transport.py#L26), it seems this value is now hard-coded to `certifi.where()`. In result, users that previously used the `ca_cert` parameter are forced to stay on the Raven client. Thus, it would be great if you could (re-)add this feature. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sentry_sdk/transport.py` Content: ``` 1 from __future__ import print_function 2 3 import json 4 import io 5 import urllib3 6 import certifi 7 import gzip 8 9 from datetime import datetime, timedelta 10 11 from sentry_sdk.consts import VERSION 12 from sentry_sdk.utils import Dsn, logger, capture_internal_exceptions 13 from sentry_sdk.worker import BackgroundWorker 14 15 try: 16 from urllib.request import getproxies 17 except ImportError: 18 from urllib import getproxies 19 20 21 def _make_pool(parsed_dsn, http_proxy, https_proxy): 22 proxy = https_proxy if parsed_dsn == "https" else http_proxy 23 if not proxy: 24 proxy = getproxies().get(parsed_dsn.scheme) 25 26 opts = {"num_pools": 2, "cert_reqs": "CERT_REQUIRED", "ca_certs": certifi.where()} 27 28 if proxy: 29 return urllib3.ProxyManager(proxy, **opts) 30 else: 31 return urllib3.PoolManager(**opts) 32 33 34 class Transport(object): 35 """Baseclass for all transports. 36 37 A transport is used to send an event to sentry. 38 """ 39 40 def __init__(self, options=None): 41 self.options = options 42 if options and options["dsn"]: 43 self.parsed_dsn = Dsn(options["dsn"]) 44 else: 45 self.parsed_dsn = None 46 47 def capture_event(self, event): 48 """This gets invoked with the event dictionary when an event should 49 be sent to sentry. 50 """ 51 raise NotImplementedError() 52 53 def shutdown(self, timeout, callback=None): 54 """Initiates a controlled shutdown that should flush out pending 55 events. The callback must be invoked with the number of pending 56 events and the timeout if the shutting down would take some period 57 of time (eg: not instant). 58 """ 59 self.kill() 60 61 def kill(self): 62 """Forcefully kills the transport.""" 63 pass 64 65 def copy(self): 66 """Copy the transport. 67 68 The returned transport should behave completely independent from the 69 previous one. It still may share HTTP connection pools, but not share 70 any state such as internal queues. 71 """ 72 return self 73 74 def __del__(self): 75 try: 76 self.kill() 77 except Exception: 78 pass 79 80 81 class HttpTransport(Transport): 82 """The default HTTP transport.""" 83 84 def __init__(self, options): 85 Transport.__init__(self, options) 86 self._worker = BackgroundWorker() 87 self._auth = self.parsed_dsn.to_auth("sentry-python/%s" % VERSION) 88 self._pool = _make_pool( 89 self.parsed_dsn, 90 http_proxy=options["http_proxy"], 91 https_proxy=options["https_proxy"], 92 ) 93 self._disabled_until = None 94 self._retry = urllib3.util.Retry() 95 self.options = options 96 97 from sentry_sdk import Hub 98 99 self.hub_cls = Hub 100 101 def _send_event(self, event): 102 if self._disabled_until is not None: 103 if datetime.utcnow() < self._disabled_until: 104 return 105 self._disabled_until = None 106 107 body = io.BytesIO() 108 with gzip.GzipFile(fileobj=body, mode="w") as f: 109 f.write(json.dumps(event).encode("utf-8")) 110 111 logger.debug( 112 "Sending %s event [%s] to %s project:%s" 113 % ( 114 event.get("level") or "error", 115 event["event_id"], 116 self.parsed_dsn.host, 117 self.parsed_dsn.project_id, 118 ) 119 ) 120 response = self._pool.request( 121 "POST", 122 str(self._auth.store_api_url), 123 body=body.getvalue(), 124 headers={ 125 "X-Sentry-Auth": str(self._auth.to_header()), 126 "Content-Type": "application/json", 127 "Content-Encoding": "gzip", 128 }, 129 ) 130 131 try: 132 if response.status == 429: 133 self._disabled_until = datetime.utcnow() + timedelta( 134 seconds=self._retry.get_retry_after(response) 135 ) 136 return 137 138 elif response.status >= 300 or response.status < 200: 139 raise ValueError("Unexpected status code: %s" % response.status) 140 finally: 141 response.close() 142 143 self._disabled_until = None 144 145 def capture_event(self, event): 146 hub = self.hub_cls.current 147 148 def send_event_wrapper(): 149 with hub: 150 with capture_internal_exceptions(): 151 self._send_event(event) 152 153 self._worker.submit(send_event_wrapper) 154 155 def shutdown(self, timeout, callback=None): 156 logger.debug("Shutting down HTTP transport orderly") 157 if timeout <= 0: 158 self._worker.kill() 159 else: 160 self._worker.shutdown(timeout, callback) 161 162 def kill(self): 163 logger.debug("Killing HTTP transport") 164 self._worker.kill() 165 166 def copy(self): 167 transport = type(self)(self.options) 168 transport._pool = self._pool 169 return transport 170 171 172 class _FunctionTransport(Transport): 173 def __init__(self, func): 174 Transport.__init__(self) 175 self._func = func 176 177 def capture_event(self, event): 178 self._func(event) 179 180 181 def make_transport(options): 182 ref_transport = options["transport"] 183 184 # If no transport is given, we use the http transport class 185 if ref_transport is None: 186 transport_cls = HttpTransport 187 else: 188 try: 189 issubclass(ref_transport, type) 190 except TypeError: 191 # if we are not a class but we are a callable, assume a 192 # function that acts as capture_event 193 if callable(ref_transport): 194 return _FunctionTransport(ref_transport) 195 # otherwise assume an object fulfilling the transport contract 196 return ref_transport 197 transport_cls = ref_transport 198 199 # if a transport class is given only instanciate it if the dsn is not 200 # empty or None 201 if options["dsn"]: 202 return transport_cls(options) 203 ``` Path: `sentry_sdk/consts.py` Content: ``` 1 import socket 2 3 4 VERSION = "0.5.4" 5 DEFAULT_SERVER_NAME = socket.gethostname() if hasattr(socket, "gethostname") else None 6 DEFAULT_OPTIONS = { 7 "dsn": None, 8 "with_locals": True, 9 "max_breadcrumbs": 100, 10 "release": None, 11 "environment": None, 12 "server_name": DEFAULT_SERVER_NAME, 13 "shutdown_timeout": 2.0, 14 "integrations": [], 15 "in_app_include": [], 16 "in_app_exclude": [], 17 "default_integrations": True, 18 "dist": None, 19 "transport": None, 20 "sample_rate": 1.0, 21 "send_default_pii": False, 22 "http_proxy": None, 23 "https_proxy": None, 24 "ignore_errors": [], 25 "request_bodies": "medium", 26 "before_send": None, 27 "before_breadcrumb": None, 28 "debug": False, 29 "attach_stacktrace": False, 30 } 31 32 33 SDK_INFO = { 34 "name": "sentry.python", 35 "version": VERSION, 36 "packages": [{"name": "pypi:sentry-sdk", "version": VERSION}], 37 } 38 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sentry_sdk/consts.py b/sentry_sdk/consts.py --- a/sentry_sdk/consts.py +++ b/sentry_sdk/consts.py @@ -27,6 +27,7 @@ "before_breadcrumb": None, "debug": False, "attach_stacktrace": False, + "ca_certs": None, } diff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py --- a/sentry_sdk/transport.py +++ b/sentry_sdk/transport.py @@ -18,12 +18,16 @@ from urllib import getproxies -def _make_pool(parsed_dsn, http_proxy, https_proxy): +def _make_pool(parsed_dsn, http_proxy, https_proxy, ca_certs): proxy = https_proxy if parsed_dsn == "https" else http_proxy if not proxy: proxy = getproxies().get(parsed_dsn.scheme) - opts = {"num_pools": 2, "cert_reqs": "CERT_REQUIRED", "ca_certs": certifi.where()} + opts = { + "num_pools": 2, + "cert_reqs": "CERT_REQUIRED", + "ca_certs": ca_certs or certifi.where(), + } if proxy: return urllib3.ProxyManager(proxy, **opts) @@ -89,6 +93,7 @@ self.parsed_dsn, http_proxy=options["http_proxy"], https_proxy=options["https_proxy"], + ca_certs=options["ca_certs"], ) self._disabled_until = None self._retry = urllib3.util.Retry()
{"golden_diff": "diff --git a/sentry_sdk/consts.py b/sentry_sdk/consts.py\n--- a/sentry_sdk/consts.py\n+++ b/sentry_sdk/consts.py\n@@ -27,6 +27,7 @@\n \"before_breadcrumb\": None,\n \"debug\": False,\n \"attach_stacktrace\": False,\n+ \"ca_certs\": None,\n }\n \n \ndiff --git a/sentry_sdk/transport.py b/sentry_sdk/transport.py\n--- a/sentry_sdk/transport.py\n+++ b/sentry_sdk/transport.py\n@@ -18,12 +18,16 @@\n from urllib import getproxies\n \n \n-def _make_pool(parsed_dsn, http_proxy, https_proxy):\n+def _make_pool(parsed_dsn, http_proxy, https_proxy, ca_certs):\n proxy = https_proxy if parsed_dsn == \"https\" else http_proxy\n if not proxy:\n proxy = getproxies().get(parsed_dsn.scheme)\n \n- opts = {\"num_pools\": 2, \"cert_reqs\": \"CERT_REQUIRED\", \"ca_certs\": certifi.where()}\n+ opts = {\n+ \"num_pools\": 2,\n+ \"cert_reqs\": \"CERT_REQUIRED\",\n+ \"ca_certs\": ca_certs or certifi.where(),\n+ }\n \n if proxy:\n return urllib3.ProxyManager(proxy, **opts)\n@@ -89,6 +93,7 @@\n self.parsed_dsn,\n http_proxy=options[\"http_proxy\"],\n https_proxy=options[\"https_proxy\"],\n+ ca_certs=options[\"ca_certs\"],\n )\n self._disabled_until = None\n self._retry = urllib3.util.Retry()\n", "issue": "Support for custom CA bundles\nWith the Raven client it is possible to [specify a custom CA bundle by appending the `ca_cert` parameter to the DSN](https://docs.sentry.io/clients/python/transports/). This is important for use of the client with on-premise installations of Sentry that use certificates signed by a custom CA. Sadly, [looking at `sentry_sdk.transport._make_pool`](https://github.com/getsentry/sentry-python/blob/30f339db3e76384e23fc951627c689197cb0e7d5/sentry_sdk/transport.py#L26), it seems this value is now hard-coded to `certifi.where()`. In result, users that previously used the `ca_cert` parameter are forced to stay on the Raven client. Thus, it would be great if you could (re-)add this feature.\n", "before_files": [{"content": "from __future__ import print_function\n\nimport json\nimport io\nimport urllib3\nimport certifi\nimport gzip\n\nfrom datetime import datetime, timedelta\n\nfrom sentry_sdk.consts import VERSION\nfrom sentry_sdk.utils import Dsn, logger, capture_internal_exceptions\nfrom sentry_sdk.worker import BackgroundWorker\n\ntry:\n from urllib.request import getproxies\nexcept ImportError:\n from urllib import getproxies\n\n\ndef _make_pool(parsed_dsn, http_proxy, https_proxy):\n proxy = https_proxy if parsed_dsn == \"https\" else http_proxy\n if not proxy:\n proxy = getproxies().get(parsed_dsn.scheme)\n\n opts = {\"num_pools\": 2, \"cert_reqs\": \"CERT_REQUIRED\", \"ca_certs\": certifi.where()}\n\n if proxy:\n return urllib3.ProxyManager(proxy, **opts)\n else:\n return urllib3.PoolManager(**opts)\n\n\nclass Transport(object):\n \"\"\"Baseclass for all transports.\n\n A transport is used to send an event to sentry.\n \"\"\"\n\n def __init__(self, options=None):\n self.options = options\n if options and options[\"dsn\"]:\n self.parsed_dsn = Dsn(options[\"dsn\"])\n else:\n self.parsed_dsn = None\n\n def capture_event(self, event):\n \"\"\"This gets invoked with the event dictionary when an event should\n be sent to sentry.\n \"\"\"\n raise NotImplementedError()\n\n def shutdown(self, timeout, callback=None):\n \"\"\"Initiates a controlled shutdown that should flush out pending\n events. The callback must be invoked with the number of pending\n events and the timeout if the shutting down would take some period\n of time (eg: not instant).\n \"\"\"\n self.kill()\n\n def kill(self):\n \"\"\"Forcefully kills the transport.\"\"\"\n pass\n\n def copy(self):\n \"\"\"Copy the transport.\n\n The returned transport should behave completely independent from the\n previous one. It still may share HTTP connection pools, but not share\n any state such as internal queues.\n \"\"\"\n return self\n\n def __del__(self):\n try:\n self.kill()\n except Exception:\n pass\n\n\nclass HttpTransport(Transport):\n \"\"\"The default HTTP transport.\"\"\"\n\n def __init__(self, options):\n Transport.__init__(self, options)\n self._worker = BackgroundWorker()\n self._auth = self.parsed_dsn.to_auth(\"sentry-python/%s\" % VERSION)\n self._pool = _make_pool(\n self.parsed_dsn,\n http_proxy=options[\"http_proxy\"],\n https_proxy=options[\"https_proxy\"],\n )\n self._disabled_until = None\n self._retry = urllib3.util.Retry()\n self.options = options\n\n from sentry_sdk import Hub\n\n self.hub_cls = Hub\n\n def _send_event(self, event):\n if self._disabled_until is not None:\n if datetime.utcnow() < self._disabled_until:\n return\n self._disabled_until = None\n\n body = io.BytesIO()\n with gzip.GzipFile(fileobj=body, mode=\"w\") as f:\n f.write(json.dumps(event).encode(\"utf-8\"))\n\n logger.debug(\n \"Sending %s event [%s] to %s project:%s\"\n % (\n event.get(\"level\") or \"error\",\n event[\"event_id\"],\n self.parsed_dsn.host,\n self.parsed_dsn.project_id,\n )\n )\n response = self._pool.request(\n \"POST\",\n str(self._auth.store_api_url),\n body=body.getvalue(),\n headers={\n \"X-Sentry-Auth\": str(self._auth.to_header()),\n \"Content-Type\": \"application/json\",\n \"Content-Encoding\": \"gzip\",\n },\n )\n\n try:\n if response.status == 429:\n self._disabled_until = datetime.utcnow() + timedelta(\n seconds=self._retry.get_retry_after(response)\n )\n return\n\n elif response.status >= 300 or response.status < 200:\n raise ValueError(\"Unexpected status code: %s\" % response.status)\n finally:\n response.close()\n\n self._disabled_until = None\n\n def capture_event(self, event):\n hub = self.hub_cls.current\n\n def send_event_wrapper():\n with hub:\n with capture_internal_exceptions():\n self._send_event(event)\n\n self._worker.submit(send_event_wrapper)\n\n def shutdown(self, timeout, callback=None):\n logger.debug(\"Shutting down HTTP transport orderly\")\n if timeout <= 0:\n self._worker.kill()\n else:\n self._worker.shutdown(timeout, callback)\n\n def kill(self):\n logger.debug(\"Killing HTTP transport\")\n self._worker.kill()\n\n def copy(self):\n transport = type(self)(self.options)\n transport._pool = self._pool\n return transport\n\n\nclass _FunctionTransport(Transport):\n def __init__(self, func):\n Transport.__init__(self)\n self._func = func\n\n def capture_event(self, event):\n self._func(event)\n\n\ndef make_transport(options):\n ref_transport = options[\"transport\"]\n\n # If no transport is given, we use the http transport class\n if ref_transport is None:\n transport_cls = HttpTransport\n else:\n try:\n issubclass(ref_transport, type)\n except TypeError:\n # if we are not a class but we are a callable, assume a\n # function that acts as capture_event\n if callable(ref_transport):\n return _FunctionTransport(ref_transport)\n # otherwise assume an object fulfilling the transport contract\n return ref_transport\n transport_cls = ref_transport\n\n # if a transport class is given only instanciate it if the dsn is not\n # empty or None\n if options[\"dsn\"]:\n return transport_cls(options)\n", "path": "sentry_sdk/transport.py"}, {"content": "import socket\n\n\nVERSION = \"0.5.4\"\nDEFAULT_SERVER_NAME = socket.gethostname() if hasattr(socket, \"gethostname\") else None\nDEFAULT_OPTIONS = {\n \"dsn\": None,\n \"with_locals\": True,\n \"max_breadcrumbs\": 100,\n \"release\": None,\n \"environment\": None,\n \"server_name\": DEFAULT_SERVER_NAME,\n \"shutdown_timeout\": 2.0,\n \"integrations\": [],\n \"in_app_include\": [],\n \"in_app_exclude\": [],\n \"default_integrations\": True,\n \"dist\": None,\n \"transport\": None,\n \"sample_rate\": 1.0,\n \"send_default_pii\": False,\n \"http_proxy\": None,\n \"https_proxy\": None,\n \"ignore_errors\": [],\n \"request_bodies\": \"medium\",\n \"before_send\": None,\n \"before_breadcrumb\": None,\n \"debug\": False,\n \"attach_stacktrace\": False,\n}\n\n\nSDK_INFO = {\n \"name\": \"sentry.python\",\n \"version\": VERSION,\n \"packages\": [{\"name\": \"pypi:sentry-sdk\", \"version\": VERSION}],\n}\n", "path": "sentry_sdk/consts.py"}], "after_files": [{"content": "from __future__ import print_function\n\nimport json\nimport io\nimport urllib3\nimport certifi\nimport gzip\n\nfrom datetime import datetime, timedelta\n\nfrom sentry_sdk.consts import VERSION\nfrom sentry_sdk.utils import Dsn, logger, capture_internal_exceptions\nfrom sentry_sdk.worker import BackgroundWorker\n\ntry:\n from urllib.request import getproxies\nexcept ImportError:\n from urllib import getproxies\n\n\ndef _make_pool(parsed_dsn, http_proxy, https_proxy, ca_certs):\n proxy = https_proxy if parsed_dsn == \"https\" else http_proxy\n if not proxy:\n proxy = getproxies().get(parsed_dsn.scheme)\n\n opts = {\n \"num_pools\": 2,\n \"cert_reqs\": \"CERT_REQUIRED\",\n \"ca_certs\": ca_certs or certifi.where(),\n }\n\n if proxy:\n return urllib3.ProxyManager(proxy, **opts)\n else:\n return urllib3.PoolManager(**opts)\n\n\nclass Transport(object):\n \"\"\"Baseclass for all transports.\n\n A transport is used to send an event to sentry.\n \"\"\"\n\n def __init__(self, options=None):\n self.options = options\n if options and options[\"dsn\"]:\n self.parsed_dsn = Dsn(options[\"dsn\"])\n else:\n self.parsed_dsn = None\n\n def capture_event(self, event):\n \"\"\"This gets invoked with the event dictionary when an event should\n be sent to sentry.\n \"\"\"\n raise NotImplementedError()\n\n def shutdown(self, timeout, callback=None):\n \"\"\"Initiates a controlled shutdown that should flush out pending\n events. The callback must be invoked with the number of pending\n events and the timeout if the shutting down would take some period\n of time (eg: not instant).\n \"\"\"\n self.kill()\n\n def kill(self):\n \"\"\"Forcefully kills the transport.\"\"\"\n pass\n\n def copy(self):\n \"\"\"Copy the transport.\n\n The returned transport should behave completely independent from the\n previous one. It still may share HTTP connection pools, but not share\n any state such as internal queues.\n \"\"\"\n return self\n\n def __del__(self):\n try:\n self.kill()\n except Exception:\n pass\n\n\nclass HttpTransport(Transport):\n \"\"\"The default HTTP transport.\"\"\"\n\n def __init__(self, options):\n Transport.__init__(self, options)\n self._worker = BackgroundWorker()\n self._auth = self.parsed_dsn.to_auth(\"sentry-python/%s\" % VERSION)\n self._pool = _make_pool(\n self.parsed_dsn,\n http_proxy=options[\"http_proxy\"],\n https_proxy=options[\"https_proxy\"],\n ca_certs=options[\"ca_certs\"],\n )\n self._disabled_until = None\n self._retry = urllib3.util.Retry()\n self.options = options\n\n from sentry_sdk import Hub\n\n self.hub_cls = Hub\n\n def _send_event(self, event):\n if self._disabled_until is not None:\n if datetime.utcnow() < self._disabled_until:\n return\n self._disabled_until = None\n\n body = io.BytesIO()\n with gzip.GzipFile(fileobj=body, mode=\"w\") as f:\n f.write(json.dumps(event).encode(\"utf-8\"))\n\n logger.debug(\n \"Sending %s event [%s] to %s project:%s\"\n % (\n event.get(\"level\") or \"error\",\n event[\"event_id\"],\n self.parsed_dsn.host,\n self.parsed_dsn.project_id,\n )\n )\n response = self._pool.request(\n \"POST\",\n str(self._auth.store_api_url),\n body=body.getvalue(),\n headers={\n \"X-Sentry-Auth\": str(self._auth.to_header()),\n \"Content-Type\": \"application/json\",\n \"Content-Encoding\": \"gzip\",\n },\n )\n\n try:\n if response.status == 429:\n self._disabled_until = datetime.utcnow() + timedelta(\n seconds=self._retry.get_retry_after(response)\n )\n return\n\n elif response.status >= 300 or response.status < 200:\n raise ValueError(\"Unexpected status code: %s\" % response.status)\n finally:\n response.close()\n\n self._disabled_until = None\n\n def capture_event(self, event):\n hub = self.hub_cls.current\n\n def send_event_wrapper():\n with hub:\n with capture_internal_exceptions():\n self._send_event(event)\n\n self._worker.submit(send_event_wrapper)\n\n def shutdown(self, timeout, callback=None):\n logger.debug(\"Shutting down HTTP transport orderly\")\n if timeout <= 0:\n self._worker.kill()\n else:\n self._worker.shutdown(timeout, callback)\n\n def kill(self):\n logger.debug(\"Killing HTTP transport\")\n self._worker.kill()\n\n def copy(self):\n transport = type(self)(self.options)\n transport._pool = self._pool\n return transport\n\n\nclass _FunctionTransport(Transport):\n def __init__(self, func):\n Transport.__init__(self)\n self._func = func\n\n def capture_event(self, event):\n self._func(event)\n\n\ndef make_transport(options):\n ref_transport = options[\"transport\"]\n\n # If no transport is given, we use the http transport class\n if ref_transport is None:\n transport_cls = HttpTransport\n else:\n try:\n issubclass(ref_transport, type)\n except TypeError:\n # if we are not a class but we are a callable, assume a\n # function that acts as capture_event\n if callable(ref_transport):\n return _FunctionTransport(ref_transport)\n # otherwise assume an object fulfilling the transport contract\n return ref_transport\n transport_cls = ref_transport\n\n # if a transport class is given only instanciate it if the dsn is not\n # empty or None\n if options[\"dsn\"]:\n return transport_cls(options)\n", "path": "sentry_sdk/transport.py"}, {"content": "import socket\n\n\nVERSION = \"0.5.4\"\nDEFAULT_SERVER_NAME = socket.gethostname() if hasattr(socket, \"gethostname\") else None\nDEFAULT_OPTIONS = {\n \"dsn\": None,\n \"with_locals\": True,\n \"max_breadcrumbs\": 100,\n \"release\": None,\n \"environment\": None,\n \"server_name\": DEFAULT_SERVER_NAME,\n \"shutdown_timeout\": 2.0,\n \"integrations\": [],\n \"in_app_include\": [],\n \"in_app_exclude\": [],\n \"default_integrations\": True,\n \"dist\": None,\n \"transport\": None,\n \"sample_rate\": 1.0,\n \"send_default_pii\": False,\n \"http_proxy\": None,\n \"https_proxy\": None,\n \"ignore_errors\": [],\n \"request_bodies\": \"medium\",\n \"before_send\": None,\n \"before_breadcrumb\": None,\n \"debug\": False,\n \"attach_stacktrace\": False,\n \"ca_certs\": None,\n}\n\n\nSDK_INFO = {\n \"name\": \"sentry.python\",\n \"version\": VERSION,\n \"packages\": [{\"name\": \"pypi:sentry-sdk\", \"version\": VERSION}],\n}\n", "path": "sentry_sdk/consts.py"}]}
2,573
366
gh_patches_debug_4983
rasdani/github-patches
git_diff
ocf__ocfweb-162
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Misleading error message when trying to register an account and you already have one "This CalNet account has already submitted a request for approval. If you believe this is an error, please contact us with your CalNet UID: xxx.” --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ocfweb/account/register.py` Content: ``` 1 import ocflib.account.search as search 2 import ocflib.account.validators as validators 3 import ocflib.misc.validators 4 import ocflib.ucb.directory as directory 5 from Crypto.PublicKey import RSA 6 from django import forms 7 from django.conf import settings 8 from django.core.urlresolvers import reverse 9 from django.forms.forms import NON_FIELD_ERRORS 10 from django.http import HttpResponseRedirect 11 from django.shortcuts import render 12 from ocflib.account.creation import encrypt_password 13 from ocflib.account.creation import NewAccountRequest 14 from ocflib.account.search import user_attrs_ucb 15 from ocflib.account.submission import NewAccountResponse 16 from ocflib.constants import CREATE_PUBLIC_KEY 17 18 from ocfweb.account.constants import TESTER_CALNET_UIDS 19 from ocfweb.auth import calnet_required 20 from ocfweb.component.celery import celery_app 21 from ocfweb.component.celery import validate_then_create_account 22 from ocfweb.component.forms import Form 23 from ocfweb.component.forms import wrap_validator 24 25 26 @calnet_required 27 def request_account(request): 28 calnet_uid = request.session['calnet_uid'] 29 status = 'new_request' 30 31 existing_accounts = search.users_by_calnet_uid(calnet_uid) 32 33 if existing_accounts and calnet_uid not in TESTER_CALNET_UIDS: 34 return render( 35 request, 36 'account/register/already-has-account.html', 37 { 38 'calnet_uid': calnet_uid, 39 'calnet_url': settings.LOGOUT_URL, 40 'title': 'You already have an account', 41 }, 42 ) 43 44 # ensure we can even find them in university LDAP 45 # (alumni etc. might not be readable in LDAP but can still auth via CalNet) 46 if not user_attrs_ucb(calnet_uid): 47 return render( 48 request, 49 'account/register/cant-find-in-ldap.html', 50 { 51 'calnet_uid': calnet_uid, 52 'calnet_url': settings.LOGOUT_URL, 53 'title': 'Unable to read account information', 54 }, 55 ) 56 57 real_name = directory.name_by_calnet_uid(calnet_uid) 58 59 if request.method == 'POST': 60 form = ApproveForm(request.POST) 61 if form.is_valid(): 62 req = NewAccountRequest( 63 user_name=form.cleaned_data['ocf_login_name'], 64 real_name=real_name, 65 is_group=False, 66 calnet_uid=calnet_uid, 67 callink_oid=None, 68 email=form.cleaned_data['contact_email'], 69 encrypted_password=encrypt_password( 70 form.cleaned_data['password'], 71 RSA.importKey(CREATE_PUBLIC_KEY), 72 ), 73 handle_warnings=NewAccountRequest.WARNINGS_WARN, 74 ) 75 if 'warnings-submit' in request.POST: 76 req = req._replace( 77 handle_warnings=NewAccountRequest.WARNINGS_SUBMIT, 78 ) 79 80 task = validate_then_create_account.delay(req) 81 task.wait(timeout=5) 82 83 if isinstance(task.result, NewAccountResponse): 84 if task.result.status == NewAccountResponse.REJECTED: 85 status = 'has_errors' 86 form._errors[NON_FIELD_ERRORS] = form.error_class(task.result.errors) 87 elif task.result.status == NewAccountResponse.FLAGGED: 88 status = 'has_warnings' 89 form._errors[NON_FIELD_ERRORS] = form.error_class(task.result.errors) 90 elif task.result.status == NewAccountResponse.PENDING: 91 return HttpResponseRedirect(reverse('account_pending')) 92 else: 93 raise AssertionError('Unexpected state reached') 94 else: 95 # validation was successful, the account is being created now 96 request.session['approve_task_id'] = task.result 97 return HttpResponseRedirect(reverse('wait_for_account')) 98 else: 99 form = ApproveForm() 100 101 return render( 102 request, 103 'account/register/index.html', 104 { 105 'form': form, 106 'real_name': real_name, 107 'status': status, 108 'title': 'Request an OCF account', 109 }, 110 ) 111 112 113 def wait_for_account(request): 114 if 'approve_task_id' not in request.session: 115 return render( 116 request, 117 'account/register/wait/error-no-task-id.html', 118 {'title': 'Account request error'}, 119 ) 120 121 task = celery_app.AsyncResult(request.session['approve_task_id']) 122 if not task.ready(): 123 meta = task.info 124 status = ['Starting creation'] 125 if isinstance(meta, dict) and 'status' in meta: 126 status.extend(meta['status']) 127 return render( 128 request, 129 'account/register/wait/wait.html', 130 { 131 'title': 'Creating account...', 132 'status': status, 133 }, 134 ) 135 elif isinstance(task.result, NewAccountResponse): 136 if task.result.status == NewAccountResponse.CREATED: 137 return HttpResponseRedirect(reverse('account_created')) 138 elif isinstance(task.result, Exception): 139 raise task.result 140 141 return render(request, 'account/register/wait/error-probably-not-created.html', {}) 142 143 144 def account_pending(request): 145 return render(request, 'account/register/pending.html', {'title': 'Account request pending'}) 146 147 148 def account_created(request): 149 return render(request, 'account/register/success.html', {'title': 'Account request successful'}) 150 151 152 class ApproveForm(Form): 153 154 ocf_login_name = forms.CharField( 155 label='OCF account name', 156 widget=forms.TextInput(attrs={'placeholder': 'jsmith'}), 157 validators=[wrap_validator(validators.validate_username)], 158 min_length=3, 159 max_length=16, 160 ) 161 162 # password is validated in clean since we need the username as part of the 163 # password validation (to compare similarity) 164 password = forms.CharField( 165 widget=forms.PasswordInput(render_value=True), 166 label='Password', 167 min_length=8, 168 max_length=256, 169 ) 170 171 verify_password = forms.CharField( 172 widget=forms.PasswordInput(render_value=True), 173 label='Confirm password', 174 min_length=8, 175 max_length=64, 176 ) 177 178 contact_email = forms.EmailField( 179 label='Contact e-mail', 180 validators=[wrap_validator(ocflib.misc.validators.valid_email)], 181 widget=forms.EmailInput(attrs={'placeholder': '[email protected]'}), 182 ) 183 184 verify_contact_email = forms.EmailField( 185 label='Confirm contact e-mail', 186 widget=forms.EmailInput(attrs={'placeholder': '[email protected]'}), 187 ) 188 189 disclaimer_agreement = forms.BooleanField( 190 label='I agree with the above statement.', 191 error_messages={ 192 'required': 'You must agree to our policies.' 193 }) 194 195 def clean_verify_password(self): 196 password = self.cleaned_data.get('password') 197 verify_password = self.cleaned_data.get('verify_password') 198 199 if password and verify_password: 200 if password != verify_password: 201 raise forms.ValidationError("Your passwords don't match.") 202 return verify_password 203 204 def clean_verify_contact_email(self): 205 email = self.cleaned_data.get('contact_email') 206 verify_contact_email = self.cleaned_data.get('verify_contact_email') 207 208 if email and verify_contact_email: 209 if email != verify_contact_email: 210 raise forms.ValidationError("Your emails don't match.") 211 return verify_contact_email 212 213 def clean(self): 214 cleaned_data = super(ApproveForm, self).clean() 215 216 # validate password (requires username to check similarity) 217 username = cleaned_data.get('username') 218 password = cleaned_data.get('password') 219 220 if username and password: 221 wrap_validator(validators.validate_password)(username, password) 222 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ocfweb/account/register.py b/ocfweb/account/register.py --- a/ocfweb/account/register.py +++ b/ocfweb/account/register.py @@ -35,6 +35,7 @@ request, 'account/register/already-has-account.html', { + 'account': ', '.join(existing_accounts), 'calnet_uid': calnet_uid, 'calnet_url': settings.LOGOUT_URL, 'title': 'You already have an account',
{"golden_diff": "diff --git a/ocfweb/account/register.py b/ocfweb/account/register.py\n--- a/ocfweb/account/register.py\n+++ b/ocfweb/account/register.py\n@@ -35,6 +35,7 @@\n request,\n 'account/register/already-has-account.html',\n {\n+ 'account': ', '.join(existing_accounts),\n 'calnet_uid': calnet_uid,\n 'calnet_url': settings.LOGOUT_URL,\n 'title': 'You already have an account',\n", "issue": "Misleading error message when trying to register an account and you already have one\n\"This CalNet account has already submitted a request for approval. If you believe this is an error, please contact us with your CalNet UID: xxx.\u201d\n\n", "before_files": [{"content": "import ocflib.account.search as search\nimport ocflib.account.validators as validators\nimport ocflib.misc.validators\nimport ocflib.ucb.directory as directory\nfrom Crypto.PublicKey import RSA\nfrom django import forms\nfrom django.conf import settings\nfrom django.core.urlresolvers import reverse\nfrom django.forms.forms import NON_FIELD_ERRORS\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render\nfrom ocflib.account.creation import encrypt_password\nfrom ocflib.account.creation import NewAccountRequest\nfrom ocflib.account.search import user_attrs_ucb\nfrom ocflib.account.submission import NewAccountResponse\nfrom ocflib.constants import CREATE_PUBLIC_KEY\n\nfrom ocfweb.account.constants import TESTER_CALNET_UIDS\nfrom ocfweb.auth import calnet_required\nfrom ocfweb.component.celery import celery_app\nfrom ocfweb.component.celery import validate_then_create_account\nfrom ocfweb.component.forms import Form\nfrom ocfweb.component.forms import wrap_validator\n\n\n@calnet_required\ndef request_account(request):\n calnet_uid = request.session['calnet_uid']\n status = 'new_request'\n\n existing_accounts = search.users_by_calnet_uid(calnet_uid)\n\n if existing_accounts and calnet_uid not in TESTER_CALNET_UIDS:\n return render(\n request,\n 'account/register/already-has-account.html',\n {\n 'calnet_uid': calnet_uid,\n 'calnet_url': settings.LOGOUT_URL,\n 'title': 'You already have an account',\n },\n )\n\n # ensure we can even find them in university LDAP\n # (alumni etc. might not be readable in LDAP but can still auth via CalNet)\n if not user_attrs_ucb(calnet_uid):\n return render(\n request,\n 'account/register/cant-find-in-ldap.html',\n {\n 'calnet_uid': calnet_uid,\n 'calnet_url': settings.LOGOUT_URL,\n 'title': 'Unable to read account information',\n },\n )\n\n real_name = directory.name_by_calnet_uid(calnet_uid)\n\n if request.method == 'POST':\n form = ApproveForm(request.POST)\n if form.is_valid():\n req = NewAccountRequest(\n user_name=form.cleaned_data['ocf_login_name'],\n real_name=real_name,\n is_group=False,\n calnet_uid=calnet_uid,\n callink_oid=None,\n email=form.cleaned_data['contact_email'],\n encrypted_password=encrypt_password(\n form.cleaned_data['password'],\n RSA.importKey(CREATE_PUBLIC_KEY),\n ),\n handle_warnings=NewAccountRequest.WARNINGS_WARN,\n )\n if 'warnings-submit' in request.POST:\n req = req._replace(\n handle_warnings=NewAccountRequest.WARNINGS_SUBMIT,\n )\n\n task = validate_then_create_account.delay(req)\n task.wait(timeout=5)\n\n if isinstance(task.result, NewAccountResponse):\n if task.result.status == NewAccountResponse.REJECTED:\n status = 'has_errors'\n form._errors[NON_FIELD_ERRORS] = form.error_class(task.result.errors)\n elif task.result.status == NewAccountResponse.FLAGGED:\n status = 'has_warnings'\n form._errors[NON_FIELD_ERRORS] = form.error_class(task.result.errors)\n elif task.result.status == NewAccountResponse.PENDING:\n return HttpResponseRedirect(reverse('account_pending'))\n else:\n raise AssertionError('Unexpected state reached')\n else:\n # validation was successful, the account is being created now\n request.session['approve_task_id'] = task.result\n return HttpResponseRedirect(reverse('wait_for_account'))\n else:\n form = ApproveForm()\n\n return render(\n request,\n 'account/register/index.html',\n {\n 'form': form,\n 'real_name': real_name,\n 'status': status,\n 'title': 'Request an OCF account',\n },\n )\n\n\ndef wait_for_account(request):\n if 'approve_task_id' not in request.session:\n return render(\n request,\n 'account/register/wait/error-no-task-id.html',\n {'title': 'Account request error'},\n )\n\n task = celery_app.AsyncResult(request.session['approve_task_id'])\n if not task.ready():\n meta = task.info\n status = ['Starting creation']\n if isinstance(meta, dict) and 'status' in meta:\n status.extend(meta['status'])\n return render(\n request,\n 'account/register/wait/wait.html',\n {\n 'title': 'Creating account...',\n 'status': status,\n },\n )\n elif isinstance(task.result, NewAccountResponse):\n if task.result.status == NewAccountResponse.CREATED:\n return HttpResponseRedirect(reverse('account_created'))\n elif isinstance(task.result, Exception):\n raise task.result\n\n return render(request, 'account/register/wait/error-probably-not-created.html', {})\n\n\ndef account_pending(request):\n return render(request, 'account/register/pending.html', {'title': 'Account request pending'})\n\n\ndef account_created(request):\n return render(request, 'account/register/success.html', {'title': 'Account request successful'})\n\n\nclass ApproveForm(Form):\n\n ocf_login_name = forms.CharField(\n label='OCF account name',\n widget=forms.TextInput(attrs={'placeholder': 'jsmith'}),\n validators=[wrap_validator(validators.validate_username)],\n min_length=3,\n max_length=16,\n )\n\n # password is validated in clean since we need the username as part of the\n # password validation (to compare similarity)\n password = forms.CharField(\n widget=forms.PasswordInput(render_value=True),\n label='Password',\n min_length=8,\n max_length=256,\n )\n\n verify_password = forms.CharField(\n widget=forms.PasswordInput(render_value=True),\n label='Confirm password',\n min_length=8,\n max_length=64,\n )\n\n contact_email = forms.EmailField(\n label='Contact e-mail',\n validators=[wrap_validator(ocflib.misc.validators.valid_email)],\n widget=forms.EmailInput(attrs={'placeholder': '[email protected]'}),\n )\n\n verify_contact_email = forms.EmailField(\n label='Confirm contact e-mail',\n widget=forms.EmailInput(attrs={'placeholder': '[email protected]'}),\n )\n\n disclaimer_agreement = forms.BooleanField(\n label='I agree with the above statement.',\n error_messages={\n 'required': 'You must agree to our policies.'\n })\n\n def clean_verify_password(self):\n password = self.cleaned_data.get('password')\n verify_password = self.cleaned_data.get('verify_password')\n\n if password and verify_password:\n if password != verify_password:\n raise forms.ValidationError(\"Your passwords don't match.\")\n return verify_password\n\n def clean_verify_contact_email(self):\n email = self.cleaned_data.get('contact_email')\n verify_contact_email = self.cleaned_data.get('verify_contact_email')\n\n if email and verify_contact_email:\n if email != verify_contact_email:\n raise forms.ValidationError(\"Your emails don't match.\")\n return verify_contact_email\n\n def clean(self):\n cleaned_data = super(ApproveForm, self).clean()\n\n # validate password (requires username to check similarity)\n username = cleaned_data.get('username')\n password = cleaned_data.get('password')\n\n if username and password:\n wrap_validator(validators.validate_password)(username, password)\n", "path": "ocfweb/account/register.py"}], "after_files": [{"content": "import ocflib.account.search as search\nimport ocflib.account.validators as validators\nimport ocflib.misc.validators\nimport ocflib.ucb.directory as directory\nfrom Crypto.PublicKey import RSA\nfrom django import forms\nfrom django.conf import settings\nfrom django.core.urlresolvers import reverse\nfrom django.forms.forms import NON_FIELD_ERRORS\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render\nfrom ocflib.account.creation import encrypt_password\nfrom ocflib.account.creation import NewAccountRequest\nfrom ocflib.account.search import user_attrs_ucb\nfrom ocflib.account.submission import NewAccountResponse\nfrom ocflib.constants import CREATE_PUBLIC_KEY\n\nfrom ocfweb.account.constants import TESTER_CALNET_UIDS\nfrom ocfweb.auth import calnet_required\nfrom ocfweb.component.celery import celery_app\nfrom ocfweb.component.celery import validate_then_create_account\nfrom ocfweb.component.forms import Form\nfrom ocfweb.component.forms import wrap_validator\n\n\n@calnet_required\ndef request_account(request):\n calnet_uid = request.session['calnet_uid']\n status = 'new_request'\n\n existing_accounts = search.users_by_calnet_uid(calnet_uid)\n\n if existing_accounts and calnet_uid not in TESTER_CALNET_UIDS:\n return render(\n request,\n 'account/register/already-has-account.html',\n {\n 'account': ', '.join(existing_accounts),\n 'calnet_uid': calnet_uid,\n 'calnet_url': settings.LOGOUT_URL,\n 'title': 'You already have an account',\n },\n )\n\n # ensure we can even find them in university LDAP\n # (alumni etc. might not be readable in LDAP but can still auth via CalNet)\n if not user_attrs_ucb(calnet_uid):\n return render(\n request,\n 'account/register/cant-find-in-ldap.html',\n {\n 'calnet_uid': calnet_uid,\n 'calnet_url': settings.LOGOUT_URL,\n 'title': 'Unable to read account information',\n },\n )\n\n real_name = directory.name_by_calnet_uid(calnet_uid)\n\n if request.method == 'POST':\n form = ApproveForm(request.POST)\n if form.is_valid():\n req = NewAccountRequest(\n user_name=form.cleaned_data['ocf_login_name'],\n real_name=real_name,\n is_group=False,\n calnet_uid=calnet_uid,\n callink_oid=None,\n email=form.cleaned_data['contact_email'],\n encrypted_password=encrypt_password(\n form.cleaned_data['password'],\n RSA.importKey(CREATE_PUBLIC_KEY),\n ),\n handle_warnings=NewAccountRequest.WARNINGS_WARN,\n )\n if 'warnings-submit' in request.POST:\n req = req._replace(\n handle_warnings=NewAccountRequest.WARNINGS_SUBMIT,\n )\n\n task = validate_then_create_account.delay(req)\n task.wait(timeout=5)\n\n if isinstance(task.result, NewAccountResponse):\n if task.result.status == NewAccountResponse.REJECTED:\n status = 'has_errors'\n form._errors[NON_FIELD_ERRORS] = form.error_class(task.result.errors)\n elif task.result.status == NewAccountResponse.FLAGGED:\n status = 'has_warnings'\n form._errors[NON_FIELD_ERRORS] = form.error_class(task.result.errors)\n elif task.result.status == NewAccountResponse.PENDING:\n return HttpResponseRedirect(reverse('account_pending'))\n else:\n raise AssertionError('Unexpected state reached')\n else:\n # validation was successful, the account is being created now\n request.session['approve_task_id'] = task.result\n return HttpResponseRedirect(reverse('wait_for_account'))\n else:\n form = ApproveForm()\n\n return render(\n request,\n 'account/register/index.html',\n {\n 'form': form,\n 'real_name': real_name,\n 'status': status,\n 'title': 'Request an OCF account',\n },\n )\n\n\ndef wait_for_account(request):\n if 'approve_task_id' not in request.session:\n return render(\n request,\n 'account/register/wait/error-no-task-id.html',\n {'title': 'Account request error'},\n )\n\n task = celery_app.AsyncResult(request.session['approve_task_id'])\n if not task.ready():\n meta = task.info\n status = ['Starting creation']\n if isinstance(meta, dict) and 'status' in meta:\n status.extend(meta['status'])\n return render(\n request,\n 'account/register/wait/wait.html',\n {\n 'title': 'Creating account...',\n 'status': status,\n },\n )\n elif isinstance(task.result, NewAccountResponse):\n if task.result.status == NewAccountResponse.CREATED:\n return HttpResponseRedirect(reverse('account_created'))\n elif isinstance(task.result, Exception):\n raise task.result\n\n return render(request, 'account/register/wait/error-probably-not-created.html', {})\n\n\ndef account_pending(request):\n return render(request, 'account/register/pending.html', {'title': 'Account request pending'})\n\n\ndef account_created(request):\n return render(request, 'account/register/success.html', {'title': 'Account request successful'})\n\n\nclass ApproveForm(Form):\n\n ocf_login_name = forms.CharField(\n label='OCF account name',\n widget=forms.TextInput(attrs={'placeholder': 'jsmith'}),\n validators=[wrap_validator(validators.validate_username)],\n min_length=3,\n max_length=16,\n )\n\n # password is validated in clean since we need the username as part of the\n # password validation (to compare similarity)\n password = forms.CharField(\n widget=forms.PasswordInput(render_value=True),\n label='Password',\n min_length=8,\n max_length=256,\n )\n\n verify_password = forms.CharField(\n widget=forms.PasswordInput(render_value=True),\n label='Confirm password',\n min_length=8,\n max_length=64,\n )\n\n contact_email = forms.EmailField(\n label='Contact e-mail',\n validators=[wrap_validator(ocflib.misc.validators.valid_email)],\n widget=forms.EmailInput(attrs={'placeholder': '[email protected]'}),\n )\n\n verify_contact_email = forms.EmailField(\n label='Confirm contact e-mail',\n widget=forms.EmailInput(attrs={'placeholder': '[email protected]'}),\n )\n\n disclaimer_agreement = forms.BooleanField(\n label='I agree with the above statement.',\n error_messages={\n 'required': 'You must agree to our policies.'\n })\n\n def clean_verify_password(self):\n password = self.cleaned_data.get('password')\n verify_password = self.cleaned_data.get('verify_password')\n\n if password and verify_password:\n if password != verify_password:\n raise forms.ValidationError(\"Your passwords don't match.\")\n return verify_password\n\n def clean_verify_contact_email(self):\n email = self.cleaned_data.get('contact_email')\n verify_contact_email = self.cleaned_data.get('verify_contact_email')\n\n if email and verify_contact_email:\n if email != verify_contact_email:\n raise forms.ValidationError(\"Your emails don't match.\")\n return verify_contact_email\n\n def clean(self):\n cleaned_data = super(ApproveForm, self).clean()\n\n # validate password (requires username to check similarity)\n username = cleaned_data.get('username')\n password = cleaned_data.get('password')\n\n if username and password:\n wrap_validator(validators.validate_password)(username, password)\n", "path": "ocfweb/account/register.py"}]}
2,429
110
gh_patches_debug_13894
rasdani/github-patches
git_diff
mars-project__mars-1699
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [BUG] Run the example code hangs in distributed mode <!-- Thank you for your contribution! Please review https://github.com/mars-project/mars/blob/master/CONTRIBUTING.rst before opening an issue. --> **Describe the bug** Create a Mars cluster and run the code in readme: ``` Python import mars.tensor as mt N = 200_000_000 a = mt.random.uniform(-1, 1, size=(N, 2)) print(((mt.linalg.norm(a, axis=1) < 1) .sum() * 4 / N).execute()) ``` it hangs and error be found in server client: ``` 2020-11-09 21:30:22,053 mars.scheduler.operands.common 97 ERROR Attempt 1: Unexpected error KeyError occurred in executing operand 230bef1901408a5f9134f34444918898 in 11.238.146.2:35131 Traceback (most recent call last): File "/home/admin/work/public-mars-0.5.4.zip/mars/promise.py", line 378, in _wrapped return func(*args, **kwargs) File "/home/admin/work/public-mars-0.5.4.zip/mars/utils.py", line 365, in _wrapped return func(*args, **kwargs) File "/home/admin/work/public-mars-0.5.4.zip/mars/worker/execution.py", line 564, in execute_graph quota_request = self._prepare_quota_request(session_id, graph_key) File "/home/admin/work/public-mars-0.5.4.zip/mars/worker/execution.py", line 249, in _prepare_quota_request memory_estimations = self._estimate_calc_memory(session_id, graph_key) File "/home/admin/work/public-mars-0.5.4.zip/mars/worker/execution.py", line 213, in _estimate_calc_memory res = executor.execute_graph(graph_record.graph, graph_record.chunk_targets, mock=True) File "/home/admin/work/public-mars-0.5.4.zip/mars/executor.py", line 690, in execute_graph res = graph_execution.execute(retval) File "/home/admin/work/public-mars-0.5.4.zip/mars/executor.py", line 571, in execute future.result() File "/home/admin/work/public-mars-0.5.4.zip/mars/executor.py", line 186, in result raise self._exc_info[1] from None File "/home/admin/work/public-mars-0.5.4.zip/mars/executor.py", line 198, in submit return self._MockResult(fn(*args, **kwargs)) File "/home/admin/work/public-mars-0.5.4.zip/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/home/admin/work/public-mars-0.5.4.zip/mars/executor.py", line 443, in _execute_operand Executor.handle(first_op, results, self._mock) File "/home/admin/work/public-mars-0.5.4.zip/mars/executor.py", line 641, in handle return runner(results, op) File "/home/admin/work/public-mars-0.5.4.zip/mars/tensor/fuse/ne.py", line 75, in estimate_size estimate_fuse_size(ctx, op) File "/home/admin/work/public-mars-0.5.4.zip/mars/tensor/fuse/core.py", line 49, in estimate_fuse_size results = executor.execute_graph(dag, output_keys, mock=True, no_intermediate=True) File "/home/admin/work/public-mars-0.5.4.zip/mars/executor.py", line 690, in execute_graph res = graph_execution.execute(retval) File "/home/admin/work/public-mars-0.5.4.zip/mars/executor.py", line 571, in execute future.result() File "/opt/conda/lib/python3.7/concurrent/futures/_base.py", line 428, in result return self.__get_result() File "/opt/conda/lib/python3.7/concurrent/futures/_base.py", line 384, in __get_result raise self._exception File "/opt/conda/lib/python3.7/concurrent/futures/thread.py", line 57, in run result = self.fn(*self.args, **self.kwargs) File "/home/admin/work/public-mars-0.5.4.zip/mars/utils.py", line 439, in _inner return func(*args, **kwargs) File "/home/admin/work/public-mars-0.5.4.zip/mars/executor.py", line 486, in _execute_operand del results[dep_key] KeyError: '94e11781368129674925eb2d4ae093bf' ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mars/tensor/fuse/core.py` Content: ``` 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 # Copyright 1999-2020 Alibaba Group Holding Ltd. 4 # 5 # Licensed under the Apache License, Version 2.0 (the "License"); 6 # you may not use this file except in compliance with the License. 7 # You may obtain a copy of the License at 8 # 9 # http://www.apache.org/licenses/LICENSE-2.0 10 # 11 # Unless required by applicable law or agreed to in writing, software 12 # distributed under the License is distributed on an "AS IS" BASIS, 13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 # See the License for the specific language governing permissions and 15 # limitations under the License. 16 17 from ...operands import FuseChunkMixin 18 from ..operands import TensorFuse, TensorOperandMixin 19 20 21 class TensorFuseChunkMixin(FuseChunkMixin, TensorOperandMixin): 22 __slots__ = () 23 24 25 class TensorFuseChunk(TensorFuse, TensorFuseChunkMixin): 26 def __init__(self, dtype=None, **kw): 27 super().__init__(_dtype=dtype, **kw) 28 29 30 def estimate_fuse_size(ctx, op): 31 from ...graph import DAG 32 from ...executor import Executor 33 34 chunk = op.outputs[0] 35 dag = DAG() 36 size_ctx = dict() 37 keys = set(c.key for c in chunk.composed) 38 for c in chunk.composed: 39 dag.add_node(c) 40 for inp in c.inputs: 41 if inp.key not in keys: 42 size_ctx[inp.key] = ctx[inp.key] 43 if inp not in dag: 44 dag.add_node(inp) 45 dag.add_edge(inp, c) 46 47 executor = Executor(storage=size_ctx) 48 output_keys = [o.key for o in op.outputs] 49 results = executor.execute_graph(dag, output_keys, mock=True, no_intermediate=True) 50 ctx.update(zip(output_keys, results)) 51 52 # update with the maximal memory cost during the whole execution 53 total_mem = sum(ctx[key][1] for key in output_keys) 54 if total_mem: 55 for key in output_keys: 56 r = ctx[key] 57 ctx[key] = (r[0], max(r[1], r[1] * executor.mock_max_memory // total_mem)) 58 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mars/tensor/fuse/core.py b/mars/tensor/fuse/core.py --- a/mars/tensor/fuse/core.py +++ b/mars/tensor/fuse/core.py @@ -30,6 +30,7 @@ def estimate_fuse_size(ctx, op): from ...graph import DAG from ...executor import Executor + from ...utils import build_fetch_chunk chunk = op.outputs[0] dag = DAG() @@ -40,6 +41,7 @@ for inp in c.inputs: if inp.key not in keys: size_ctx[inp.key] = ctx[inp.key] + inp = build_fetch_chunk(inp).data if inp not in dag: dag.add_node(inp) dag.add_edge(inp, c)
{"golden_diff": "diff --git a/mars/tensor/fuse/core.py b/mars/tensor/fuse/core.py\n--- a/mars/tensor/fuse/core.py\n+++ b/mars/tensor/fuse/core.py\n@@ -30,6 +30,7 @@\n def estimate_fuse_size(ctx, op):\n from ...graph import DAG\n from ...executor import Executor\n+ from ...utils import build_fetch_chunk\n \n chunk = op.outputs[0]\n dag = DAG()\n@@ -40,6 +41,7 @@\n for inp in c.inputs:\n if inp.key not in keys:\n size_ctx[inp.key] = ctx[inp.key]\n+ inp = build_fetch_chunk(inp).data\n if inp not in dag:\n dag.add_node(inp)\n dag.add_edge(inp, c)\n", "issue": "[BUG] Run the example code hangs in distributed mode\n<!--\r\nThank you for your contribution!\r\n\r\nPlease review https://github.com/mars-project/mars/blob/master/CONTRIBUTING.rst before opening an issue.\r\n-->\r\n\r\n**Describe the bug**\r\nCreate a Mars cluster and run the code in readme:\r\n``` Python\r\nimport mars.tensor as mt\r\nN = 200_000_000\r\na = mt.random.uniform(-1, 1, size=(N, 2))\r\nprint(((mt.linalg.norm(a, axis=1) < 1)\r\n .sum() * 4 / N).execute())\r\n```\r\n\r\nit hangs and error be found in server client:\r\n```\r\n2020-11-09 21:30:22,053 mars.scheduler.operands.common 97 ERROR Attempt 1: Unexpected error KeyError occurred in executing operand 230bef1901408a5f9134f34444918898 in 11.238.146.2:35131\r\nTraceback (most recent call last):\r\n File \"/home/admin/work/public-mars-0.5.4.zip/mars/promise.py\", line 378, in _wrapped\r\n return func(*args, **kwargs)\r\n File \"/home/admin/work/public-mars-0.5.4.zip/mars/utils.py\", line 365, in _wrapped\r\n return func(*args, **kwargs)\r\n File \"/home/admin/work/public-mars-0.5.4.zip/mars/worker/execution.py\", line 564, in execute_graph\r\n quota_request = self._prepare_quota_request(session_id, graph_key)\r\n File \"/home/admin/work/public-mars-0.5.4.zip/mars/worker/execution.py\", line 249, in _prepare_quota_request\r\n memory_estimations = self._estimate_calc_memory(session_id, graph_key)\r\n File \"/home/admin/work/public-mars-0.5.4.zip/mars/worker/execution.py\", line 213, in _estimate_calc_memory\r\n res = executor.execute_graph(graph_record.graph, graph_record.chunk_targets, mock=True)\r\n File \"/home/admin/work/public-mars-0.5.4.zip/mars/executor.py\", line 690, in execute_graph\r\n res = graph_execution.execute(retval)\r\n File \"/home/admin/work/public-mars-0.5.4.zip/mars/executor.py\", line 571, in execute\r\n future.result()\r\n File \"/home/admin/work/public-mars-0.5.4.zip/mars/executor.py\", line 186, in result\r\n raise self._exc_info[1] from None\r\n File \"/home/admin/work/public-mars-0.5.4.zip/mars/executor.py\", line 198, in submit\r\n return self._MockResult(fn(*args, **kwargs))\r\n File \"/home/admin/work/public-mars-0.5.4.zip/mars/utils.py\", line 439, in _inner\r\n return func(*args, **kwargs)\r\n File \"/home/admin/work/public-mars-0.5.4.zip/mars/executor.py\", line 443, in _execute_operand\r\n Executor.handle(first_op, results, self._mock)\r\n File \"/home/admin/work/public-mars-0.5.4.zip/mars/executor.py\", line 641, in handle\r\n return runner(results, op)\r\n File \"/home/admin/work/public-mars-0.5.4.zip/mars/tensor/fuse/ne.py\", line 75, in estimate_size\r\n estimate_fuse_size(ctx, op)\r\n File \"/home/admin/work/public-mars-0.5.4.zip/mars/tensor/fuse/core.py\", line 49, in estimate_fuse_size\r\n results = executor.execute_graph(dag, output_keys, mock=True, no_intermediate=True)\r\n File \"/home/admin/work/public-mars-0.5.4.zip/mars/executor.py\", line 690, in execute_graph\r\n res = graph_execution.execute(retval)\r\n File \"/home/admin/work/public-mars-0.5.4.zip/mars/executor.py\", line 571, in execute\r\n future.result()\r\n File \"/opt/conda/lib/python3.7/concurrent/futures/_base.py\", line 428, in result\r\n return self.__get_result()\r\n File \"/opt/conda/lib/python3.7/concurrent/futures/_base.py\", line 384, in __get_result\r\n raise self._exception\r\n File \"/opt/conda/lib/python3.7/concurrent/futures/thread.py\", line 57, in run\r\n result = self.fn(*self.args, **self.kwargs)\r\n File \"/home/admin/work/public-mars-0.5.4.zip/mars/utils.py\", line 439, in _inner\r\n return func(*args, **kwargs)\r\n File \"/home/admin/work/public-mars-0.5.4.zip/mars/executor.py\", line 486, in _execute_operand\r\n del results[dep_key]\r\nKeyError: '94e11781368129674925eb2d4ae093bf'\r\n```\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom ...operands import FuseChunkMixin\nfrom ..operands import TensorFuse, TensorOperandMixin\n\n\nclass TensorFuseChunkMixin(FuseChunkMixin, TensorOperandMixin):\n __slots__ = ()\n\n\nclass TensorFuseChunk(TensorFuse, TensorFuseChunkMixin):\n def __init__(self, dtype=None, **kw):\n super().__init__(_dtype=dtype, **kw)\n\n\ndef estimate_fuse_size(ctx, op):\n from ...graph import DAG\n from ...executor import Executor\n\n chunk = op.outputs[0]\n dag = DAG()\n size_ctx = dict()\n keys = set(c.key for c in chunk.composed)\n for c in chunk.composed:\n dag.add_node(c)\n for inp in c.inputs:\n if inp.key not in keys:\n size_ctx[inp.key] = ctx[inp.key]\n if inp not in dag:\n dag.add_node(inp)\n dag.add_edge(inp, c)\n\n executor = Executor(storage=size_ctx)\n output_keys = [o.key for o in op.outputs]\n results = executor.execute_graph(dag, output_keys, mock=True, no_intermediate=True)\n ctx.update(zip(output_keys, results))\n\n # update with the maximal memory cost during the whole execution\n total_mem = sum(ctx[key][1] for key in output_keys)\n if total_mem:\n for key in output_keys:\n r = ctx[key]\n ctx[key] = (r[0], max(r[1], r[1] * executor.mock_max_memory // total_mem))\n", "path": "mars/tensor/fuse/core.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom ...operands import FuseChunkMixin\nfrom ..operands import TensorFuse, TensorOperandMixin\n\n\nclass TensorFuseChunkMixin(FuseChunkMixin, TensorOperandMixin):\n __slots__ = ()\n\n\nclass TensorFuseChunk(TensorFuse, TensorFuseChunkMixin):\n def __init__(self, dtype=None, **kw):\n super().__init__(_dtype=dtype, **kw)\n\n\ndef estimate_fuse_size(ctx, op):\n from ...graph import DAG\n from ...executor import Executor\n from ...utils import build_fetch_chunk\n\n chunk = op.outputs[0]\n dag = DAG()\n size_ctx = dict()\n keys = set(c.key for c in chunk.composed)\n for c in chunk.composed:\n dag.add_node(c)\n for inp in c.inputs:\n if inp.key not in keys:\n size_ctx[inp.key] = ctx[inp.key]\n inp = build_fetch_chunk(inp).data\n if inp not in dag:\n dag.add_node(inp)\n dag.add_edge(inp, c)\n\n executor = Executor(storage=size_ctx)\n output_keys = [o.key for o in op.outputs]\n results = executor.execute_graph(dag, output_keys, mock=True, no_intermediate=True)\n ctx.update(zip(output_keys, results))\n\n # update with the maximal memory cost during the whole execution\n total_mem = sum(ctx[key][1] for key in output_keys)\n if total_mem:\n for key in output_keys:\n r = ctx[key]\n ctx[key] = (r[0], max(r[1], r[1] * executor.mock_max_memory // total_mem))\n", "path": "mars/tensor/fuse/core.py"}]}
2,024
174
gh_patches_debug_24958
rasdani/github-patches
git_diff
zulip__zulip-26231
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Sentry integration "Test Plugin" failure Hello, I'm setting up Sentry.io integration and got this error when I tried "Test Plugin" (meaning "Test Outgoing Webhook to Zulip"); I'm assuming it's the response payload that came back from Zulip Cloud: >"There was an internal error with the Plugin, {\"result\":\"error\",\"msg\":\"The 'Raven SDK' event isn't currently supported by the Sentry webhook\",\"webhook_name\":\"Sentry\",\"event_type\":\"Raven SDK\",\"code\":\"UNSUPPORTED_WEBHOOK_EVENT_TYPE\"}\n" I'm not sure if there are any events that do work because I'm new to Sentry and not sure how to trigger test events other than the Test Plugin event. **Zulip Server and web app version:** - [x] Zulip Cloud (`*.zulipchat.com`) - [ ] Zulip Server 7.0+ - [ ] Zulip Server 6.0+ - [ ] Zulip Server 5.0 or older - [ ] Other or not sure --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `zerver/webhooks/sentry/view.py` Content: ``` 1 import logging 2 from datetime import datetime, timezone 3 from typing import Any, Dict, List, Optional, Tuple 4 from urllib.parse import urljoin 5 6 from django.http import HttpRequest, HttpResponse 7 8 from zerver.decorator import webhook_view 9 from zerver.lib.exceptions import UnsupportedWebhookEventTypeError 10 from zerver.lib.request import REQ, has_request_variables 11 from zerver.lib.response import json_success 12 from zerver.lib.webhooks.common import check_send_webhook_message 13 from zerver.models import UserProfile 14 15 DEPRECATED_EXCEPTION_MESSAGE_TEMPLATE = """ 16 New [issue]({url}) (level: {level}): 17 18 ``` quote 19 {message} 20 ``` 21 """ 22 23 MESSAGE_EVENT_TEMPLATE = """ 24 **New message event:** [{title}]({web_link}) 25 ```quote 26 **level:** {level} 27 **timestamp:** {datetime} 28 ``` 29 """ 30 31 EXCEPTION_EVENT_TEMPLATE = """ 32 **New exception:** [{title}]({web_link}) 33 ```quote 34 **level:** {level} 35 **timestamp:** {datetime} 36 **filename:** {filename} 37 ``` 38 """ 39 40 EXCEPTION_EVENT_TEMPLATE_WITH_TRACEBACK = ( 41 EXCEPTION_EVENT_TEMPLATE 42 + """ 43 Traceback: 44 ```{syntax_highlight_as} 45 {pre_context}---> {context_line}{post_context}\ 46 ``` 47 """ 48 ) 49 # Because of the \n added at the end of each context element, 50 # this will actually look better in the traceback. 51 52 ISSUE_CREATED_MESSAGE_TEMPLATE = """ 53 **New issue created:** {title} 54 ```quote 55 **level:** {level} 56 **timestamp:** {datetime} 57 **assignee:** {assignee} 58 ``` 59 """ 60 61 ISSUE_ASSIGNED_MESSAGE_TEMPLATE = """ 62 Issue **{title}** has now been assigned to **{assignee}** by **{actor}**. 63 """ 64 65 ISSUE_RESOLVED_MESSAGE_TEMPLATE = """ 66 Issue **{title}** was marked as resolved by **{actor}**. 67 """ 68 69 ISSUE_IGNORED_MESSAGE_TEMPLATE = """ 70 Issue **{title}** was ignored by **{actor}**. 71 """ 72 73 # Maps "platform" name provided by Sentry to the Pygments lexer name 74 syntax_highlight_as_map = { 75 "go": "go", 76 "java": "java", 77 "javascript": "javascript", 78 "node": "javascript", 79 "python": "python3", 80 "ruby": "ruby", 81 } 82 83 84 def convert_lines_to_traceback_string(lines: Optional[List[str]]) -> str: 85 traceback = "" 86 if lines is not None: 87 for line in lines: 88 if line == "": 89 traceback += "\n" 90 else: 91 traceback += f" {line}\n" 92 return traceback 93 94 95 def handle_event_payload(event: Dict[str, Any]) -> Tuple[str, str]: 96 """Handle either an exception type event or a message type event payload.""" 97 98 topic = event["title"] 99 platform_name = event["platform"] 100 syntax_highlight_as = syntax_highlight_as_map.get(platform_name, "") 101 if syntax_highlight_as == "": # nocoverage 102 logging.info("Unknown Sentry platform: %s", platform_name) 103 104 # We shouldn't support the officially deprecated Raven series of 105 # Python SDKs. 106 if platform_name == "python" and int(event["version"]) < 7: 107 # The sample event is still an old "version" -- accept it even 108 # though we don't accept events from the old Python SDK. 109 tags = event.get("tags", []) 110 if ["sample_event", "yes"] not in tags: 111 raise UnsupportedWebhookEventTypeError("Raven SDK") 112 context = { 113 "title": topic, 114 "level": event["level"], 115 "web_link": event["web_url"], 116 "datetime": event["datetime"].split(".")[0].replace("T", " "), 117 } 118 119 if "exception" in event: 120 # The event was triggered by a sentry.capture_exception() call 121 # (in the Python Sentry SDK) or something similar. 122 123 filename = event["metadata"].get("filename", None) 124 125 stacktrace = None 126 for value in reversed(event["exception"]["values"]): 127 if "stacktrace" in value: 128 stacktrace = value["stacktrace"] 129 break 130 131 if stacktrace and filename: 132 exception_frame = None 133 for frame in reversed(stacktrace["frames"]): 134 if frame.get("filename", None) == filename: 135 exception_frame = frame 136 break 137 138 if ( 139 exception_frame 140 and "context_line" in exception_frame 141 and exception_frame["context_line"] is not None 142 ): 143 pre_context = convert_lines_to_traceback_string( 144 exception_frame.get("pre_context", None) 145 ) 146 context_line = exception_frame["context_line"] + "\n" 147 post_context = convert_lines_to_traceback_string( 148 exception_frame.get("post_context", None) 149 ) 150 151 context.update( 152 syntax_highlight_as=syntax_highlight_as, 153 filename=filename, 154 pre_context=pre_context, 155 context_line=context_line, 156 post_context=post_context, 157 ) 158 159 body = EXCEPTION_EVENT_TEMPLATE_WITH_TRACEBACK.format(**context) 160 return (topic, body) 161 162 context.update(filename=filename) # nocoverage 163 body = EXCEPTION_EVENT_TEMPLATE.format(**context) # nocoverage 164 return (topic, body) # nocoverage 165 166 elif "logentry" in event: 167 # The event was triggered by a sentry.capture_message() call 168 # (in the Python Sentry SDK) or something similar. 169 body = MESSAGE_EVENT_TEMPLATE.format(**context) 170 171 else: 172 raise UnsupportedWebhookEventTypeError("unknown-event type") 173 174 return (topic, body) 175 176 177 def handle_issue_payload( 178 action: str, issue: Dict[str, Any], actor: Dict[str, Any] 179 ) -> Tuple[str, str]: 180 """Handle either an issue type event.""" 181 topic = issue["title"] 182 datetime = issue["lastSeen"].split(".")[0].replace("T", " ") 183 184 if issue["assignedTo"]: 185 if issue["assignedTo"]["type"] == "team": 186 assignee = "team {}".format(issue["assignedTo"]["name"]) 187 else: 188 assignee = issue["assignedTo"]["name"] 189 else: 190 assignee = "No one" 191 192 if action == "created": 193 context = { 194 "title": topic, 195 "level": issue["level"], 196 "datetime": datetime, 197 "assignee": assignee, 198 } 199 body = ISSUE_CREATED_MESSAGE_TEMPLATE.format(**context) 200 201 elif action == "resolved": 202 context = { 203 "title": topic, 204 "actor": actor["name"], 205 } 206 body = ISSUE_RESOLVED_MESSAGE_TEMPLATE.format(**context) 207 208 elif action == "assigned": 209 context = { 210 "title": topic, 211 "assignee": assignee, 212 "actor": actor["name"], 213 } 214 body = ISSUE_ASSIGNED_MESSAGE_TEMPLATE.format(**context) 215 216 elif action == "ignored": 217 context = { 218 "title": topic, 219 "actor": actor["name"], 220 } 221 body = ISSUE_IGNORED_MESSAGE_TEMPLATE.format(**context) 222 223 else: 224 raise UnsupportedWebhookEventTypeError("unknown-issue-action type") 225 226 return (topic, body) 227 228 229 def handle_deprecated_payload(payload: Dict[str, Any]) -> Tuple[str, str]: 230 topic = "{}".format(payload.get("project_name")) 231 body = DEPRECATED_EXCEPTION_MESSAGE_TEMPLATE.format( 232 level=payload["level"].upper(), 233 url=payload.get("url"), 234 message=payload.get("message"), 235 ) 236 return (topic, body) 237 238 239 def transform_webhook_payload(payload: Dict[str, Any]) -> Optional[Dict[str, Any]]: 240 """Attempt to use webhook payload for the notification. 241 242 When the integration is configured as a webhook, instead of being added as 243 an internal integration, the payload is slightly different, but has all the 244 required information for sending a notification. We transform this payload to 245 look like the payload from a "properly configured" integration. 246 """ 247 event = payload.get("event", {}) 248 # deprecated payloads don't have event_id 249 event_id = event.get("event_id") 250 if not event_id: 251 return None 252 253 event_path = f"events/{event_id}/" 254 event["web_url"] = urljoin(payload["url"], event_path) 255 timestamp = event.get("timestamp", event["received"]) 256 event["datetime"] = datetime.fromtimestamp(timestamp, timezone.utc).isoformat( 257 timespec="microseconds" 258 ) 259 return payload 260 261 262 @webhook_view("Sentry") 263 @has_request_variables 264 def api_sentry_webhook( 265 request: HttpRequest, 266 user_profile: UserProfile, 267 payload: Dict[str, Any] = REQ(argument_type="body"), 268 ) -> HttpResponse: 269 data = payload.get("data", None) 270 271 if data is None: 272 data = transform_webhook_payload(payload) 273 274 # We currently support two types of payloads: events and issues. 275 if data: 276 if "event" in data: 277 topic, body = handle_event_payload(data["event"]) 278 elif "issue" in data: 279 topic, body = handle_issue_payload(payload["action"], data["issue"], payload["actor"]) 280 else: 281 raise UnsupportedWebhookEventTypeError(str(list(data.keys()))) 282 else: 283 topic, body = handle_deprecated_payload(payload) 284 285 check_send_webhook_message(request, user_profile, topic, body) 286 return json_success(request) 287 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/zerver/webhooks/sentry/view.py b/zerver/webhooks/sentry/view.py --- a/zerver/webhooks/sentry/view.py +++ b/zerver/webhooks/sentry/view.py @@ -81,6 +81,15 @@ } +def is_sample_event(event: Dict[str, Any]) -> bool: + # This is just a heuristic to detect the sample event, this should + # not be used for making important behavior decisions. + title = event.get("title", "") + if title == "This is an example Python exception": + return True + return False + + def convert_lines_to_traceback_string(lines: Optional[List[str]]) -> str: traceback = "" if lines is not None: @@ -103,12 +112,10 @@ # We shouldn't support the officially deprecated Raven series of # Python SDKs. - if platform_name == "python" and int(event["version"]) < 7: + if platform_name == "python" and int(event["version"]) < 7 and not is_sample_event(event): # The sample event is still an old "version" -- accept it even # though we don't accept events from the old Python SDK. - tags = event.get("tags", []) - if ["sample_event", "yes"] not in tags: - raise UnsupportedWebhookEventTypeError("Raven SDK") + raise UnsupportedWebhookEventTypeError("Raven SDK") context = { "title": subject, "level": event["level"],
{"golden_diff": "diff --git a/zerver/webhooks/sentry/view.py b/zerver/webhooks/sentry/view.py\n--- a/zerver/webhooks/sentry/view.py\n+++ b/zerver/webhooks/sentry/view.py\n@@ -81,6 +81,15 @@\n }\n \n \n+def is_sample_event(event: Dict[str, Any]) -> bool:\n+ # This is just a heuristic to detect the sample event, this should\n+ # not be used for making important behavior decisions.\n+ title = event.get(\"title\", \"\")\n+ if title == \"This is an example Python exception\":\n+ return True\n+ return False\n+\n+\n def convert_lines_to_traceback_string(lines: Optional[List[str]]) -> str:\n traceback = \"\"\n if lines is not None:\n@@ -103,12 +112,10 @@\n \n # We shouldn't support the officially deprecated Raven series of\n # Python SDKs.\n- if platform_name == \"python\" and int(event[\"version\"]) < 7:\n+ if platform_name == \"python\" and int(event[\"version\"]) < 7 and not is_sample_event(event):\n # The sample event is still an old \"version\" -- accept it even\n # though we don't accept events from the old Python SDK.\n- tags = event.get(\"tags\", [])\n- if [\"sample_event\", \"yes\"] not in tags:\n- raise UnsupportedWebhookEventTypeError(\"Raven SDK\")\n+ raise UnsupportedWebhookEventTypeError(\"Raven SDK\")\n context = {\n \"title\": subject,\n \"level\": event[\"level\"],\n", "issue": "Sentry integration \"Test Plugin\" failure\nHello, I'm setting up Sentry.io integration and got this error when I tried \"Test Plugin\" (meaning \"Test Outgoing Webhook to Zulip\"); I'm assuming it's the response payload that came back from Zulip Cloud:\r\n\r\n>\"There was an internal error with the Plugin, {\\\"result\\\":\\\"error\\\",\\\"msg\\\":\\\"The 'Raven SDK' event isn't currently supported by the Sentry webhook\\\",\\\"webhook_name\\\":\\\"Sentry\\\",\\\"event_type\\\":\\\"Raven SDK\\\",\\\"code\\\":\\\"UNSUPPORTED_WEBHOOK_EVENT_TYPE\\\"}\\n\"\r\n\r\nI'm not sure if there are any events that do work because I'm new to Sentry and not sure how to trigger test events other than the Test Plugin event.\r\n\r\n**Zulip Server and web app version:**\r\n\r\n- [x] Zulip Cloud (`*.zulipchat.com`)\r\n- [ ] Zulip Server 7.0+\r\n- [ ] Zulip Server 6.0+\r\n- [ ] Zulip Server 5.0 or older\r\n- [ ] Other or not sure\r\n\n", "before_files": [{"content": "import logging\nfrom datetime import datetime, timezone\nfrom typing import Any, Dict, List, Optional, Tuple\nfrom urllib.parse import urljoin\n\nfrom django.http import HttpRequest, HttpResponse\n\nfrom zerver.decorator import webhook_view\nfrom zerver.lib.exceptions import UnsupportedWebhookEventTypeError\nfrom zerver.lib.request import REQ, has_request_variables\nfrom zerver.lib.response import json_success\nfrom zerver.lib.webhooks.common import check_send_webhook_message\nfrom zerver.models import UserProfile\n\nDEPRECATED_EXCEPTION_MESSAGE_TEMPLATE = \"\"\"\nNew [issue]({url}) (level: {level}):\n\n``` quote\n{message}\n```\n\"\"\"\n\nMESSAGE_EVENT_TEMPLATE = \"\"\"\n**New message event:** [{title}]({web_link})\n```quote\n**level:** {level}\n**timestamp:** {datetime}\n```\n\"\"\"\n\nEXCEPTION_EVENT_TEMPLATE = \"\"\"\n**New exception:** [{title}]({web_link})\n```quote\n**level:** {level}\n**timestamp:** {datetime}\n**filename:** {filename}\n```\n\"\"\"\n\nEXCEPTION_EVENT_TEMPLATE_WITH_TRACEBACK = (\n EXCEPTION_EVENT_TEMPLATE\n + \"\"\"\nTraceback:\n```{syntax_highlight_as}\n{pre_context}---> {context_line}{post_context}\\\n```\n\"\"\"\n)\n# Because of the \\n added at the end of each context element,\n# this will actually look better in the traceback.\n\nISSUE_CREATED_MESSAGE_TEMPLATE = \"\"\"\n**New issue created:** {title}\n```quote\n**level:** {level}\n**timestamp:** {datetime}\n**assignee:** {assignee}\n```\n\"\"\"\n\nISSUE_ASSIGNED_MESSAGE_TEMPLATE = \"\"\"\nIssue **{title}** has now been assigned to **{assignee}** by **{actor}**.\n\"\"\"\n\nISSUE_RESOLVED_MESSAGE_TEMPLATE = \"\"\"\nIssue **{title}** was marked as resolved by **{actor}**.\n\"\"\"\n\nISSUE_IGNORED_MESSAGE_TEMPLATE = \"\"\"\nIssue **{title}** was ignored by **{actor}**.\n\"\"\"\n\n# Maps \"platform\" name provided by Sentry to the Pygments lexer name\nsyntax_highlight_as_map = {\n \"go\": \"go\",\n \"java\": \"java\",\n \"javascript\": \"javascript\",\n \"node\": \"javascript\",\n \"python\": \"python3\",\n \"ruby\": \"ruby\",\n}\n\n\ndef convert_lines_to_traceback_string(lines: Optional[List[str]]) -> str:\n traceback = \"\"\n if lines is not None:\n for line in lines:\n if line == \"\":\n traceback += \"\\n\"\n else:\n traceback += f\" {line}\\n\"\n return traceback\n\n\ndef handle_event_payload(event: Dict[str, Any]) -> Tuple[str, str]:\n \"\"\"Handle either an exception type event or a message type event payload.\"\"\"\n\n topic = event[\"title\"]\n platform_name = event[\"platform\"]\n syntax_highlight_as = syntax_highlight_as_map.get(platform_name, \"\")\n if syntax_highlight_as == \"\": # nocoverage\n logging.info(\"Unknown Sentry platform: %s\", platform_name)\n\n # We shouldn't support the officially deprecated Raven series of\n # Python SDKs.\n if platform_name == \"python\" and int(event[\"version\"]) < 7:\n # The sample event is still an old \"version\" -- accept it even\n # though we don't accept events from the old Python SDK.\n tags = event.get(\"tags\", [])\n if [\"sample_event\", \"yes\"] not in tags:\n raise UnsupportedWebhookEventTypeError(\"Raven SDK\")\n context = {\n \"title\": topic,\n \"level\": event[\"level\"],\n \"web_link\": event[\"web_url\"],\n \"datetime\": event[\"datetime\"].split(\".\")[0].replace(\"T\", \" \"),\n }\n\n if \"exception\" in event:\n # The event was triggered by a sentry.capture_exception() call\n # (in the Python Sentry SDK) or something similar.\n\n filename = event[\"metadata\"].get(\"filename\", None)\n\n stacktrace = None\n for value in reversed(event[\"exception\"][\"values\"]):\n if \"stacktrace\" in value:\n stacktrace = value[\"stacktrace\"]\n break\n\n if stacktrace and filename:\n exception_frame = None\n for frame in reversed(stacktrace[\"frames\"]):\n if frame.get(\"filename\", None) == filename:\n exception_frame = frame\n break\n\n if (\n exception_frame\n and \"context_line\" in exception_frame\n and exception_frame[\"context_line\"] is not None\n ):\n pre_context = convert_lines_to_traceback_string(\n exception_frame.get(\"pre_context\", None)\n )\n context_line = exception_frame[\"context_line\"] + \"\\n\"\n post_context = convert_lines_to_traceback_string(\n exception_frame.get(\"post_context\", None)\n )\n\n context.update(\n syntax_highlight_as=syntax_highlight_as,\n filename=filename,\n pre_context=pre_context,\n context_line=context_line,\n post_context=post_context,\n )\n\n body = EXCEPTION_EVENT_TEMPLATE_WITH_TRACEBACK.format(**context)\n return (topic, body)\n\n context.update(filename=filename) # nocoverage\n body = EXCEPTION_EVENT_TEMPLATE.format(**context) # nocoverage\n return (topic, body) # nocoverage\n\n elif \"logentry\" in event:\n # The event was triggered by a sentry.capture_message() call\n # (in the Python Sentry SDK) or something similar.\n body = MESSAGE_EVENT_TEMPLATE.format(**context)\n\n else:\n raise UnsupportedWebhookEventTypeError(\"unknown-event type\")\n\n return (topic, body)\n\n\ndef handle_issue_payload(\n action: str, issue: Dict[str, Any], actor: Dict[str, Any]\n) -> Tuple[str, str]:\n \"\"\"Handle either an issue type event.\"\"\"\n topic = issue[\"title\"]\n datetime = issue[\"lastSeen\"].split(\".\")[0].replace(\"T\", \" \")\n\n if issue[\"assignedTo\"]:\n if issue[\"assignedTo\"][\"type\"] == \"team\":\n assignee = \"team {}\".format(issue[\"assignedTo\"][\"name\"])\n else:\n assignee = issue[\"assignedTo\"][\"name\"]\n else:\n assignee = \"No one\"\n\n if action == \"created\":\n context = {\n \"title\": topic,\n \"level\": issue[\"level\"],\n \"datetime\": datetime,\n \"assignee\": assignee,\n }\n body = ISSUE_CREATED_MESSAGE_TEMPLATE.format(**context)\n\n elif action == \"resolved\":\n context = {\n \"title\": topic,\n \"actor\": actor[\"name\"],\n }\n body = ISSUE_RESOLVED_MESSAGE_TEMPLATE.format(**context)\n\n elif action == \"assigned\":\n context = {\n \"title\": topic,\n \"assignee\": assignee,\n \"actor\": actor[\"name\"],\n }\n body = ISSUE_ASSIGNED_MESSAGE_TEMPLATE.format(**context)\n\n elif action == \"ignored\":\n context = {\n \"title\": topic,\n \"actor\": actor[\"name\"],\n }\n body = ISSUE_IGNORED_MESSAGE_TEMPLATE.format(**context)\n\n else:\n raise UnsupportedWebhookEventTypeError(\"unknown-issue-action type\")\n\n return (topic, body)\n\n\ndef handle_deprecated_payload(payload: Dict[str, Any]) -> Tuple[str, str]:\n topic = \"{}\".format(payload.get(\"project_name\"))\n body = DEPRECATED_EXCEPTION_MESSAGE_TEMPLATE.format(\n level=payload[\"level\"].upper(),\n url=payload.get(\"url\"),\n message=payload.get(\"message\"),\n )\n return (topic, body)\n\n\ndef transform_webhook_payload(payload: Dict[str, Any]) -> Optional[Dict[str, Any]]:\n \"\"\"Attempt to use webhook payload for the notification.\n\n When the integration is configured as a webhook, instead of being added as\n an internal integration, the payload is slightly different, but has all the\n required information for sending a notification. We transform this payload to\n look like the payload from a \"properly configured\" integration.\n \"\"\"\n event = payload.get(\"event\", {})\n # deprecated payloads don't have event_id\n event_id = event.get(\"event_id\")\n if not event_id:\n return None\n\n event_path = f\"events/{event_id}/\"\n event[\"web_url\"] = urljoin(payload[\"url\"], event_path)\n timestamp = event.get(\"timestamp\", event[\"received\"])\n event[\"datetime\"] = datetime.fromtimestamp(timestamp, timezone.utc).isoformat(\n timespec=\"microseconds\"\n )\n return payload\n\n\n@webhook_view(\"Sentry\")\n@has_request_variables\ndef api_sentry_webhook(\n request: HttpRequest,\n user_profile: UserProfile,\n payload: Dict[str, Any] = REQ(argument_type=\"body\"),\n) -> HttpResponse:\n data = payload.get(\"data\", None)\n\n if data is None:\n data = transform_webhook_payload(payload)\n\n # We currently support two types of payloads: events and issues.\n if data:\n if \"event\" in data:\n topic, body = handle_event_payload(data[\"event\"])\n elif \"issue\" in data:\n topic, body = handle_issue_payload(payload[\"action\"], data[\"issue\"], payload[\"actor\"])\n else:\n raise UnsupportedWebhookEventTypeError(str(list(data.keys())))\n else:\n topic, body = handle_deprecated_payload(payload)\n\n check_send_webhook_message(request, user_profile, topic, body)\n return json_success(request)\n", "path": "zerver/webhooks/sentry/view.py"}], "after_files": [{"content": "import logging\nfrom datetime import datetime, timezone\nfrom typing import Any, Dict, List, Optional, Tuple\nfrom urllib.parse import urljoin\n\nfrom django.http import HttpRequest, HttpResponse\n\nfrom zerver.decorator import webhook_view\nfrom zerver.lib.exceptions import UnsupportedWebhookEventTypeError\nfrom zerver.lib.request import REQ, has_request_variables\nfrom zerver.lib.response import json_success\nfrom zerver.lib.webhooks.common import check_send_webhook_message\nfrom zerver.models import UserProfile\n\nDEPRECATED_EXCEPTION_MESSAGE_TEMPLATE = \"\"\"\nNew [issue]({url}) (level: {level}):\n\n``` quote\n{message}\n```\n\"\"\"\n\nMESSAGE_EVENT_TEMPLATE = \"\"\"\n**New message event:** [{title}]({web_link})\n```quote\n**level:** {level}\n**timestamp:** {datetime}\n```\n\"\"\"\n\nEXCEPTION_EVENT_TEMPLATE = \"\"\"\n**New exception:** [{title}]({web_link})\n```quote\n**level:** {level}\n**timestamp:** {datetime}\n**filename:** {filename}\n```\n\"\"\"\n\nEXCEPTION_EVENT_TEMPLATE_WITH_TRACEBACK = (\n EXCEPTION_EVENT_TEMPLATE\n + \"\"\"\nTraceback:\n```{syntax_highlight_as}\n{pre_context}---> {context_line}{post_context}\\\n```\n\"\"\"\n)\n# Because of the \\n added at the end of each context element,\n# this will actually look better in the traceback.\n\nISSUE_CREATED_MESSAGE_TEMPLATE = \"\"\"\n**New issue created:** {title}\n```quote\n**level:** {level}\n**timestamp:** {datetime}\n**assignee:** {assignee}\n```\n\"\"\"\n\nISSUE_ASSIGNED_MESSAGE_TEMPLATE = \"\"\"\nIssue **{title}** has now been assigned to **{assignee}** by **{actor}**.\n\"\"\"\n\nISSUE_RESOLVED_MESSAGE_TEMPLATE = \"\"\"\nIssue **{title}** was marked as resolved by **{actor}**.\n\"\"\"\n\nISSUE_IGNORED_MESSAGE_TEMPLATE = \"\"\"\nIssue **{title}** was ignored by **{actor}**.\n\"\"\"\n\n# Maps \"platform\" name provided by Sentry to the Pygments lexer name\nsyntax_highlight_as_map = {\n \"go\": \"go\",\n \"java\": \"java\",\n \"javascript\": \"javascript\",\n \"node\": \"javascript\",\n \"python\": \"python3\",\n \"ruby\": \"ruby\",\n}\n\n\ndef is_sample_event(event: Dict[str, Any]) -> bool:\n # This is just a heuristic to detect the sample event, this should\n # not be used for making important behavior decisions.\n title = event.get(\"title\", \"\")\n if title == \"This is an example Python exception\":\n return True\n return False\n\n\ndef convert_lines_to_traceback_string(lines: Optional[List[str]]) -> str:\n traceback = \"\"\n if lines is not None:\n for line in lines:\n if line == \"\":\n traceback += \"\\n\"\n else:\n traceback += f\" {line}\\n\"\n return traceback\n\n\ndef handle_event_payload(event: Dict[str, Any]) -> Tuple[str, str]:\n \"\"\"Handle either an exception type event or a message type event payload.\"\"\"\n\n subject = event[\"title\"]\n platform_name = event[\"platform\"]\n syntax_highlight_as = syntax_highlight_as_map.get(platform_name, \"\")\n if syntax_highlight_as == \"\": # nocoverage\n logging.info(\"Unknown Sentry platform: %s\", platform_name)\n\n # We shouldn't support the officially deprecated Raven series of\n # Python SDKs.\n if platform_name == \"python\" and int(event[\"version\"]) < 7 and not is_sample_event(event):\n # The sample event is still an old \"version\" -- accept it even\n # though we don't accept events from the old Python SDK.\n raise UnsupportedWebhookEventTypeError(\"Raven SDK\")\n context = {\n \"title\": subject,\n \"level\": event[\"level\"],\n \"web_link\": event[\"web_url\"],\n \"datetime\": event[\"datetime\"].split(\".\")[0].replace(\"T\", \" \"),\n }\n\n if \"exception\" in event:\n # The event was triggered by a sentry.capture_exception() call\n # (in the Python Sentry SDK) or something similar.\n\n filename = event[\"metadata\"].get(\"filename\", None)\n\n stacktrace = None\n for value in reversed(event[\"exception\"][\"values\"]):\n if \"stacktrace\" in value:\n stacktrace = value[\"stacktrace\"]\n break\n\n if stacktrace and filename:\n exception_frame = None\n for frame in reversed(stacktrace[\"frames\"]):\n if frame.get(\"filename\", None) == filename:\n exception_frame = frame\n break\n\n if (\n exception_frame\n and \"context_line\" in exception_frame\n and exception_frame[\"context_line\"] is not None\n ):\n pre_context = convert_lines_to_traceback_string(\n exception_frame.get(\"pre_context\", None)\n )\n context_line = exception_frame[\"context_line\"] + \"\\n\"\n post_context = convert_lines_to_traceback_string(\n exception_frame.get(\"post_context\", None)\n )\n\n context.update(\n syntax_highlight_as=syntax_highlight_as,\n filename=filename,\n pre_context=pre_context,\n context_line=context_line,\n post_context=post_context,\n )\n\n body = EXCEPTION_EVENT_TEMPLATE_WITH_TRACEBACK.format(**context)\n return (subject, body)\n\n context.update(filename=filename) # nocoverage\n body = EXCEPTION_EVENT_TEMPLATE.format(**context) # nocoverage\n return (subject, body) # nocoverage\n\n elif \"logentry\" in event:\n # The event was triggered by a sentry.capture_message() call\n # (in the Python Sentry SDK) or something similar.\n body = MESSAGE_EVENT_TEMPLATE.format(**context)\n\n else:\n raise UnsupportedWebhookEventTypeError(\"unknown-event type\")\n\n return (subject, body)\n\n\ndef handle_issue_payload(\n action: str, issue: Dict[str, Any], actor: Dict[str, Any]\n) -> Tuple[str, str]:\n \"\"\"Handle either an issue type event.\"\"\"\n subject = issue[\"title\"]\n datetime = issue[\"lastSeen\"].split(\".\")[0].replace(\"T\", \" \")\n\n if issue[\"assignedTo\"]:\n if issue[\"assignedTo\"][\"type\"] == \"team\":\n assignee = \"team {}\".format(issue[\"assignedTo\"][\"name\"])\n else:\n assignee = issue[\"assignedTo\"][\"name\"]\n else:\n assignee = \"No one\"\n\n if action == \"created\":\n context = {\n \"title\": subject,\n \"level\": issue[\"level\"],\n \"datetime\": datetime,\n \"assignee\": assignee,\n }\n body = ISSUE_CREATED_MESSAGE_TEMPLATE.format(**context)\n\n elif action == \"resolved\":\n context = {\n \"title\": subject,\n \"actor\": actor[\"name\"],\n }\n body = ISSUE_RESOLVED_MESSAGE_TEMPLATE.format(**context)\n\n elif action == \"assigned\":\n context = {\n \"title\": subject,\n \"assignee\": assignee,\n \"actor\": actor[\"name\"],\n }\n body = ISSUE_ASSIGNED_MESSAGE_TEMPLATE.format(**context)\n\n elif action == \"ignored\":\n context = {\n \"title\": subject,\n \"actor\": actor[\"name\"],\n }\n body = ISSUE_IGNORED_MESSAGE_TEMPLATE.format(**context)\n\n else:\n raise UnsupportedWebhookEventTypeError(\"unknown-issue-action type\")\n\n return (subject, body)\n\n\ndef handle_deprecated_payload(payload: Dict[str, Any]) -> Tuple[str, str]:\n subject = \"{}\".format(payload.get(\"project_name\"))\n body = DEPRECATED_EXCEPTION_MESSAGE_TEMPLATE.format(\n level=payload[\"level\"].upper(),\n url=payload.get(\"url\"),\n message=payload.get(\"message\"),\n )\n return (subject, body)\n\n\ndef transform_webhook_payload(payload: Dict[str, Any]) -> Optional[Dict[str, Any]]:\n \"\"\"Attempt to use webhook payload for the notification.\n\n When the integration is configured as a webhook, instead of being added as\n an internal integration, the payload is slightly different, but has all the\n required information for sending a notification. We transform this payload to\n look like the payload from a \"properly configured\" integration.\n \"\"\"\n event = payload.get(\"event\", {})\n # deprecated payloads don't have event_id\n event_id = event.get(\"event_id\")\n if not event_id:\n return None\n\n event_path = f\"events/{event_id}/\"\n event[\"web_url\"] = urljoin(payload[\"url\"], event_path)\n timestamp = event.get(\"timestamp\", event[\"received\"])\n event[\"datetime\"] = datetime.fromtimestamp(timestamp, timezone.utc).isoformat(\n timespec=\"microseconds\"\n )\n return payload\n\n\n@webhook_view(\"Sentry\")\n@has_request_variables\ndef api_sentry_webhook(\n request: HttpRequest,\n user_profile: UserProfile,\n payload: Dict[str, Any] = REQ(argument_type=\"body\"),\n) -> HttpResponse:\n data = payload.get(\"data\", None)\n\n if data is None:\n data = transform_webhook_payload(payload)\n\n # We currently support two types of payloads: events and issues.\n if data:\n if \"event\" in data:\n subject, body = handle_event_payload(data[\"event\"])\n elif \"issue\" in data:\n subject, body = handle_issue_payload(payload[\"action\"], data[\"issue\"], payload[\"actor\"])\n else:\n raise UnsupportedWebhookEventTypeError(str(list(data.keys())))\n else:\n subject, body = handle_deprecated_payload(payload)\n\n check_send_webhook_message(request, user_profile, subject, body)\n return json_success(request)\n", "path": "zerver/webhooks/sentry/view.py"}]}
3,238
344
gh_patches_debug_9307
rasdani/github-patches
git_diff
streamlink__streamlink-4210
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- plugins.tviplayer: unable to handle CNN Portugal ### Checklist - [X] This is a plugin issue and not a different kind of issue - [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink) - [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22) - [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master) ### Streamlink version Latest stable release ### Description - issue: - the new `tviplayer` plugin is unable to handle https://tviplayer.iol.pt/direto/CNN - of note, the previous TVI 24 became CNN Portugal after #4199. - to reproduce: ```sh streamlink https://tviplayer.iol.pt/direto/CNN ``` ```sh [cli][info] Found matching plugin tviplayer for URL https://tviplayer.iol.pt/direto/CNN error: Unable to validate response text: Unable to parse HTML: Unicode strings with encoding declaration are not supported. Please use bytes input or XML fragments without declaration. ('<?xml version=\'1.0\' encoding=\'U ...) ``` ### Debug log ```text streamlink --loglevel debug https://tviplayer.iol.pt/direto/CNN [cli][debug] OS: Linux-5.10.0-9-amd64-x86_64-with-glibc2.31 [cli][debug] Python: 3.9.2 [cli][debug] Streamlink: 3.0.2 [cli][debug] Requests(2.26.0), Socks(1.7.1), Websocket(1.2.1) [cli][debug] Arguments: [cli][debug] url=https://tviplayer.iol.pt/direto/CNN [cli][debug] --loglevel=debug [cli][info] Found matching plugin tviplayer for URL https://tviplayer.iol.pt/direto/CNN error: Unable to validate response text: Unable to parse HTML: Unicode strings with encoding declaration are not supported. Please use bytes input or XML fragments without declaration. ('<?xml version=\'1.0\' encoding=\'U ...) ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/streamlink/utils/parse.py` Content: ``` 1 import json 2 import re 3 from urllib.parse import parse_qsl 4 5 from lxml.etree import HTML, XML 6 7 from streamlink.plugin import PluginError 8 9 10 def _parse(parser, data, name, exception, schema, *args, **kwargs): 11 try: 12 parsed = parser(data, *args, **kwargs) 13 except Exception as err: 14 snippet = repr(data) 15 if len(snippet) > 35: 16 snippet = f"{snippet[:35]} ..." 17 18 raise exception(f"Unable to parse {name}: {err} ({snippet})") 19 20 if schema: 21 parsed = schema.validate(parsed, name=name, exception=exception) 22 23 return parsed 24 25 26 def parse_json( 27 data, 28 name="JSON", 29 exception=PluginError, 30 schema=None, 31 *args, **kwargs 32 ): 33 """Wrapper around json.loads. 34 35 Provides these extra features: 36 - Wraps errors in custom exception with a snippet of the data in the message 37 """ 38 return _parse(json.loads, data, name, exception, schema, *args, **kwargs) 39 40 41 def parse_html( 42 data, 43 name="HTML", 44 exception=PluginError, 45 schema=None, 46 *args, **kwargs 47 ): 48 """Wrapper around lxml.etree.HTML with some extras. 49 50 Provides these extra features: 51 - Wraps errors in custom exception with a snippet of the data in the message 52 """ 53 return _parse(HTML, data, name, exception, schema, *args, **kwargs) 54 55 56 def parse_xml( 57 data, 58 ignore_ns=False, 59 invalid_char_entities=False, 60 name="XML", 61 exception=PluginError, 62 schema=None, 63 *args, **kwargs 64 ): 65 """Wrapper around lxml.etree.XML with some extras. 66 67 Provides these extra features: 68 - Handles incorrectly encoded XML 69 - Allows stripping namespace information 70 - Wraps errors in custom exception with a snippet of the data in the message 71 """ 72 if isinstance(data, str): 73 data = bytes(data, "utf8") 74 if ignore_ns: 75 data = re.sub(br"\s+xmlns=\"(.+?)\"", b"", data) 76 if invalid_char_entities: 77 data = re.sub(br"&(?!(?:#(?:[0-9]+|[Xx][0-9A-Fa-f]+)|[A-Za-z0-9]+);)", b"&amp;", data) 78 79 return _parse(XML, data, name, exception, schema, *args, **kwargs) 80 81 82 def parse_qsd( 83 data, 84 name="query string", 85 exception=PluginError, 86 schema=None, 87 *args, **kwargs 88 ): 89 """Parses a query string into a dict. 90 91 Provides these extra features: 92 - Unlike parse_qs and parse_qsl, duplicate keys are not preserved in favor of a simpler return value 93 - Wraps errors in custom exception with a snippet of the data in the message 94 """ 95 return _parse(lambda d: dict(parse_qsl(d, *args, **kwargs)), data, name, exception, schema) 96 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/streamlink/utils/parse.py b/src/streamlink/utils/parse.py --- a/src/streamlink/utils/parse.py +++ b/src/streamlink/utils/parse.py @@ -48,8 +48,12 @@ """Wrapper around lxml.etree.HTML with some extras. Provides these extra features: + - Removes XML declarations of invalid XHTML5 documents - Wraps errors in custom exception with a snippet of the data in the message """ + if isinstance(data, str) and data.lstrip().startswith("<?xml"): + data = re.sub(r"^\s*<\?xml.+?\?>", "", data) + return _parse(HTML, data, name, exception, schema, *args, **kwargs)
{"golden_diff": "diff --git a/src/streamlink/utils/parse.py b/src/streamlink/utils/parse.py\n--- a/src/streamlink/utils/parse.py\n+++ b/src/streamlink/utils/parse.py\n@@ -48,8 +48,12 @@\n \"\"\"Wrapper around lxml.etree.HTML with some extras.\n \n Provides these extra features:\n+ - Removes XML declarations of invalid XHTML5 documents\n - Wraps errors in custom exception with a snippet of the data in the message\n \"\"\"\n+ if isinstance(data, str) and data.lstrip().startswith(\"<?xml\"):\n+ data = re.sub(r\"^\\s*<\\?xml.+?\\?>\", \"\", data)\n+\n return _parse(HTML, data, name, exception, schema, *args, **kwargs)\n", "issue": "plugins.tviplayer: unable to handle CNN Portugal\n### Checklist\n\n- [X] This is a plugin issue and not a different kind of issue\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\n- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)\n- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\n\n### Streamlink version\n\nLatest stable release\n\n### Description\n\n- issue:\r\n - the new `tviplayer` plugin is unable to handle https://tviplayer.iol.pt/direto/CNN \r\n - of note, the previous TVI 24 became CNN Portugal after #4199.\r\n\r\n- to reproduce:\r\n ```sh\r\n streamlink https://tviplayer.iol.pt/direto/CNN\r\n ```\r\n ```sh\r\n [cli][info] Found matching plugin tviplayer for URL https://tviplayer.iol.pt/direto/CNN\r\n error: Unable to validate response text: Unable to parse HTML: Unicode strings with encoding declaration are not supported. Please use bytes input or XML fragments without declaration. ('<?xml version=\\'1.0\\' encoding=\\'U ...)\r\n ```\r\n\r\n\r\n\n\n### Debug log\n\n```text\nstreamlink --loglevel debug https://tviplayer.iol.pt/direto/CNN\r\n[cli][debug] OS: Linux-5.10.0-9-amd64-x86_64-with-glibc2.31\r\n[cli][debug] Python: 3.9.2\r\n[cli][debug] Streamlink: 3.0.2\r\n[cli][debug] Requests(2.26.0), Socks(1.7.1), Websocket(1.2.1)\r\n[cli][debug] Arguments:\r\n[cli][debug] url=https://tviplayer.iol.pt/direto/CNN\r\n[cli][debug] --loglevel=debug\r\n[cli][info] Found matching plugin tviplayer for URL https://tviplayer.iol.pt/direto/CNN\r\nerror: Unable to validate response text: Unable to parse HTML: Unicode strings with encoding declaration are not supported. Please use bytes input or XML fragments without declaration. ('<?xml version=\\'1.0\\' encoding=\\'U ...)\n```\n\n", "before_files": [{"content": "import json\nimport re\nfrom urllib.parse import parse_qsl\n\nfrom lxml.etree import HTML, XML\n\nfrom streamlink.plugin import PluginError\n\n\ndef _parse(parser, data, name, exception, schema, *args, **kwargs):\n try:\n parsed = parser(data, *args, **kwargs)\n except Exception as err:\n snippet = repr(data)\n if len(snippet) > 35:\n snippet = f\"{snippet[:35]} ...\"\n\n raise exception(f\"Unable to parse {name}: {err} ({snippet})\")\n\n if schema:\n parsed = schema.validate(parsed, name=name, exception=exception)\n\n return parsed\n\n\ndef parse_json(\n data,\n name=\"JSON\",\n exception=PluginError,\n schema=None,\n *args, **kwargs\n):\n \"\"\"Wrapper around json.loads.\n\n Provides these extra features:\n - Wraps errors in custom exception with a snippet of the data in the message\n \"\"\"\n return _parse(json.loads, data, name, exception, schema, *args, **kwargs)\n\n\ndef parse_html(\n data,\n name=\"HTML\",\n exception=PluginError,\n schema=None,\n *args, **kwargs\n):\n \"\"\"Wrapper around lxml.etree.HTML with some extras.\n\n Provides these extra features:\n - Wraps errors in custom exception with a snippet of the data in the message\n \"\"\"\n return _parse(HTML, data, name, exception, schema, *args, **kwargs)\n\n\ndef parse_xml(\n data,\n ignore_ns=False,\n invalid_char_entities=False,\n name=\"XML\",\n exception=PluginError,\n schema=None,\n *args, **kwargs\n):\n \"\"\"Wrapper around lxml.etree.XML with some extras.\n\n Provides these extra features:\n - Handles incorrectly encoded XML\n - Allows stripping namespace information\n - Wraps errors in custom exception with a snippet of the data in the message\n \"\"\"\n if isinstance(data, str):\n data = bytes(data, \"utf8\")\n if ignore_ns:\n data = re.sub(br\"\\s+xmlns=\\\"(.+?)\\\"\", b\"\", data)\n if invalid_char_entities:\n data = re.sub(br\"&(?!(?:#(?:[0-9]+|[Xx][0-9A-Fa-f]+)|[A-Za-z0-9]+);)\", b\"&amp;\", data)\n\n return _parse(XML, data, name, exception, schema, *args, **kwargs)\n\n\ndef parse_qsd(\n data,\n name=\"query string\",\n exception=PluginError,\n schema=None,\n *args, **kwargs\n):\n \"\"\"Parses a query string into a dict.\n\n Provides these extra features:\n - Unlike parse_qs and parse_qsl, duplicate keys are not preserved in favor of a simpler return value\n - Wraps errors in custom exception with a snippet of the data in the message\n \"\"\"\n return _parse(lambda d: dict(parse_qsl(d, *args, **kwargs)), data, name, exception, schema)\n", "path": "src/streamlink/utils/parse.py"}], "after_files": [{"content": "import json\nimport re\nfrom urllib.parse import parse_qsl\n\nfrom lxml.etree import HTML, XML\n\nfrom streamlink.plugin import PluginError\n\n\ndef _parse(parser, data, name, exception, schema, *args, **kwargs):\n try:\n parsed = parser(data, *args, **kwargs)\n except Exception as err:\n snippet = repr(data)\n if len(snippet) > 35:\n snippet = f\"{snippet[:35]} ...\"\n\n raise exception(f\"Unable to parse {name}: {err} ({snippet})\")\n\n if schema:\n parsed = schema.validate(parsed, name=name, exception=exception)\n\n return parsed\n\n\ndef parse_json(\n data,\n name=\"JSON\",\n exception=PluginError,\n schema=None,\n *args, **kwargs\n):\n \"\"\"Wrapper around json.loads.\n\n Provides these extra features:\n - Wraps errors in custom exception with a snippet of the data in the message\n \"\"\"\n return _parse(json.loads, data, name, exception, schema, *args, **kwargs)\n\n\ndef parse_html(\n data,\n name=\"HTML\",\n exception=PluginError,\n schema=None,\n *args, **kwargs\n):\n \"\"\"Wrapper around lxml.etree.HTML with some extras.\n\n Provides these extra features:\n - Removes XML declarations of invalid XHTML5 documents\n - Wraps errors in custom exception with a snippet of the data in the message\n \"\"\"\n if isinstance(data, str) and data.lstrip().startswith(\"<?xml\"):\n data = re.sub(r\"^\\s*<\\?xml.+?\\?>\", \"\", data)\n\n return _parse(HTML, data, name, exception, schema, *args, **kwargs)\n\n\ndef parse_xml(\n data,\n ignore_ns=False,\n invalid_char_entities=False,\n name=\"XML\",\n exception=PluginError,\n schema=None,\n *args, **kwargs\n):\n \"\"\"Wrapper around lxml.etree.XML with some extras.\n\n Provides these extra features:\n - Handles incorrectly encoded XML\n - Allows stripping namespace information\n - Wraps errors in custom exception with a snippet of the data in the message\n \"\"\"\n if isinstance(data, str):\n data = bytes(data, \"utf8\")\n if ignore_ns:\n data = re.sub(br\"\\s+xmlns=\\\"(.+?)\\\"\", b\"\", data)\n if invalid_char_entities:\n data = re.sub(br\"&(?!(?:#(?:[0-9]+|[Xx][0-9A-Fa-f]+)|[A-Za-z0-9]+);)\", b\"&amp;\", data)\n\n return _parse(XML, data, name, exception, schema, *args, **kwargs)\n\n\ndef parse_qsd(\n data,\n name=\"query string\",\n exception=PluginError,\n schema=None,\n *args, **kwargs\n):\n \"\"\"Parses a query string into a dict.\n\n Provides these extra features:\n - Unlike parse_qs and parse_qsl, duplicate keys are not preserved in favor of a simpler return value\n - Wraps errors in custom exception with a snippet of the data in the message\n \"\"\"\n return _parse(lambda d: dict(parse_qsl(d, *args, **kwargs)), data, name, exception, schema)\n", "path": "src/streamlink/utils/parse.py"}]}
1,661
165
gh_patches_debug_17922
rasdani/github-patches
git_diff
Qiskit__qiskit-2836
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [Requires Aer 0.2] BackendSchema list coupling_map as optional <!-- ⚠️ If you do not respect this template, your issue will be closed --> <!-- ⚠️ Make sure to browse the opened and closed issues --> ### Information - **Qiskit Terra version**: - **Python version**: - **Operating system**: ### What is the current behavior? The schemas in `qiskit.providers.model` list `coupling_map` as an optional field in the `backend.configuration` when it is in fact required. ### Steps to reproduce the problem ### What is the expected behavior? ### Suggested solutions --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `qiskit/providers/models/backendconfiguration.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 # This code is part of Qiskit. 4 # 5 # (C) Copyright IBM 2017, 2018. 6 # 7 # This code is licensed under the Apache License, Version 2.0. You may 8 # obtain a copy of this license in the LICENSE.txt file in the root directory 9 # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 10 # 11 # Any modifications or derivative works of this code must retain this 12 # copyright notice, and modified files need to carry a notice indicating 13 # that they have been altered from the originals. 14 15 """Model and schema for backend configuration.""" 16 17 from marshmallow.validate import Length, OneOf, Range, Regexp 18 19 from qiskit.validation import BaseModel, BaseSchema, bind_schema 20 from qiskit.validation.fields import (Boolean, DateTime, Integer, List, Nested, String, 21 Complex, Float, Dict, InstructionParameter) 22 from qiskit.validation.validate import PatternProperties 23 24 25 class GateConfigSchema(BaseSchema): 26 """Schema for GateConfig.""" 27 28 # Required properties. 29 name = String(required=True) 30 parameters = List(String(), required=True) 31 qasm_def = String(required=True) 32 33 # Optional properties. 34 coupling_map = List(List(Integer(), 35 validate=Length(min=1)), 36 validate=Length(min=1)) 37 latency_map = List(List(Integer(validate=OneOf([0, 1])), 38 validate=Length(min=1)), 39 validate=Length(min=1)) 40 conditional = Boolean() 41 description = String() 42 43 44 class UchannelLOSchema(BaseSchema): 45 """Schema for uchannel LO.""" 46 47 # Required properties. 48 q = Integer(required=True, validate=Range(min=0)) 49 scale = Complex(required=True) 50 51 # Optional properties. 52 53 54 class PulseHamiltonianSchema(BaseSchema): 55 """Schema for PulseHamiltonian.""" 56 57 # Required properties. 58 h_str = List(String(), validate=Length(min=1), required=True) 59 dim_osc = List(Integer(validate=Range(min=1)), required=True) 60 dim_qub = List(Integer(validate=Range(min=2)), required=True) 61 vars = Dict(validate=PatternProperties({ 62 Regexp('^([a-z0-9])+$'): InstructionParameter() 63 }), required=True) 64 65 # Optional properties. 66 67 68 class BackendConfigurationSchema(BaseSchema): 69 """Schema for BackendConfiguration.""" 70 # Required properties. 71 backend_name = String(required=True) 72 backend_version = String(required=True, 73 validate=Regexp("[0-9]+.[0-9]+.[0-9]+$")) 74 n_qubits = Integer(required=True, validate=Range(min=1)) 75 basis_gates = List(String(), required=True) 76 gates = Nested(GateConfigSchema, required=True, many=True) 77 local = Boolean(required=True) 78 simulator = Boolean(required=True) 79 conditional = Boolean(required=True) 80 open_pulse = Boolean(required=True) 81 memory = Boolean(required=True) 82 max_shots = Integer(required=True, validate=Range(min=1)) 83 84 # Optional properties. 85 max_experiments = Integer(validate=Range(min=1)) 86 sample_name = String() 87 coupling_map = List(List(Integer(), 88 validate=Length(min=1)), 89 validate=Length(min=1), allow_none=True) 90 n_registers = Integer(validate=Range(min=1)) 91 register_map = List(List(Integer(validate=OneOf([0, 1])), 92 validate=Length(min=1)), 93 validate=Length(min=1)) 94 configurable = Boolean() 95 credits_required = Boolean() 96 online_date = DateTime() 97 display_name = String() 98 description = String() 99 tags = List(String()) 100 101 102 class QasmBackendConfigurationSchema(BackendConfigurationSchema): 103 """Schema for Qasm backend.""" 104 open_pulse = Boolean(required=True, validate=OneOf([False])) 105 106 107 class PulseBackendConfigurationSchema(QasmBackendConfigurationSchema): 108 """Schema for pulse backend""" 109 # Required properties. 110 open_pulse = Boolean(required=True, validate=OneOf([True])) 111 n_uchannels = Integer(required=True, validate=Range(min=0)) 112 u_channel_lo = List(Nested(UchannelLOSchema, validate=Length(min=1), 113 required=True, many=True)) 114 meas_levels = List(Integer(), validate=Length(min=1), required=True) 115 qubit_lo_range = List(List(Float(validate=Range(min=0)), 116 validate=Length(equal=2)), required=True) 117 meas_lo_range = List(List(Float(validate=Range(min=0)), 118 validate=Length(equal=2)), required=True) 119 dt = Float(required=True, validate=Range(min=0)) # pylint: disable=invalid-name 120 dtm = Float(required=True, validate=Range(min=0)) 121 rep_times = List(Integer(validate=Range(min=0)), required=True) 122 meas_kernels = List(String(), required=True) 123 discriminators = List(String(), required=True) 124 125 # Optional properties. 126 meas_map = List(List(Integer(), validate=Length(min=1))) 127 channel_bandwidth = List(List(Float(), validate=Length(equal=2))) 128 acquisition_latency = List(List(Integer())) 129 conditional_latency = List(List(Integer())) 130 hamiltonian = PulseHamiltonianSchema() 131 132 133 @bind_schema(GateConfigSchema) 134 class GateConfig(BaseModel): 135 """Model for GateConfig. 136 137 Please note that this class only describes the required fields. For the 138 full description of the model, please check ``GateConfigSchema``. 139 140 Attributes: 141 name (str): the gate name as it will be referred to in Qasm. 142 parameters (list[str]): variable names for the gate parameters (if any). 143 qasm_def (str): definition of this gate in terms of Qasm primitives U 144 and CX. 145 """ 146 147 def __init__(self, name, parameters, qasm_def, **kwargs): 148 self.name = name 149 self.parameters = parameters 150 self.qasm_def = qasm_def 151 152 super().__init__(**kwargs) 153 154 155 @bind_schema(UchannelLOSchema) 156 class UchannelLO(BaseModel): 157 """Model for GateConfig. 158 159 Please note that this class only describes the required fields. For the 160 full description of the model, please check ``GateConfigSchema``. 161 162 Attributes: 163 q (int): Qubit that scale corresponds too. 164 scale (complex): Scale factor for qubit frequency. 165 """ 166 def __init__(self, q, scale, **kwargs): 167 168 self.q = q 169 self.scale = scale 170 171 super().__init__(q=q, scale=scale, **kwargs) 172 173 174 @bind_schema(BackendConfigurationSchema) 175 class BackendConfiguration(BaseModel): 176 """Model for BackendConfiguration. 177 178 Please note that this class only describes the required fields. For the 179 full description of the model, please check ``BackendConfigurationSchema``. 180 Attributes: 181 backend_name (str): backend name. 182 backend_version (str): backend version in the form X.Y.Z. 183 n_qubits (int): number of qubits. 184 basis_gates (list[str]): list of basis gates names on the backend. 185 gates (GateConfig): list of basis gates on the backend. 186 local (bool): backend is local or remote. 187 simulator (bool): backend is a simulator. 188 conditional (bool): backend supports conditional operations. 189 open_pulse (bool): backend supports open pulse. 190 memory (bool): backend supports memory. 191 max_shots (int): maximum number of shots supported. 192 """ 193 194 def __init__(self, backend_name, backend_version, n_qubits, basis_gates, 195 gates, local, simulator, conditional, open_pulse, memory, 196 max_shots, **kwargs): 197 198 self.backend_name = backend_name 199 self.backend_version = backend_version 200 self.n_qubits = n_qubits 201 self.basis_gates = basis_gates 202 self.gates = gates 203 self.local = local 204 self.simulator = simulator 205 self.conditional = conditional 206 self.open_pulse = open_pulse 207 self.memory = memory 208 self.max_shots = max_shots 209 210 super().__init__(**kwargs) 211 212 213 @bind_schema(QasmBackendConfigurationSchema) 214 class QasmBackendConfiguration(BackendConfiguration): 215 """Model for QasmBackendConfiguration. 216 217 Please note that this class only describes the required fields. For the 218 full description of the model, please check ``QasmBackendConfigurationSchema``. 219 Attributes: 220 backend_name (str): backend name. 221 backend_version (str): backend version in the form X.Y.Z. 222 n_qubits (int): number of qubits. 223 basis_gates (list[str]): list of basis gates names on the backend. 224 gates (GateConfig): list of basis gates on the backend. 225 local (bool): backend is local or remote. 226 simulator (bool): backend is a simulator. 227 conditional (bool): backend supports conditional operations. 228 open_pulse (bool): backend supports open pulse. 229 memory (bool): backend supports memory. 230 max_shots (int): maximum number of shots supported. 231 **kwargs: Optional fields. 232 """ 233 234 def __init__(self, backend_name, backend_version, n_qubits, basis_gates, 235 gates, local, simulator, conditional, open_pulse, memory, 236 max_shots, **kwargs): 237 238 super().__init__(backend_name=backend_name, backend_version=backend_version, 239 n_qubits=n_qubits, basis_gates=basis_gates, gates=gates, 240 local=local, simulator=simulator, conditional=conditional, 241 open_pulse=open_pulse, memory=memory, max_shots=max_shots, 242 **kwargs) 243 244 245 @bind_schema(PulseBackendConfigurationSchema) 246 class PulseBackendConfiguration(BackendConfiguration): 247 """Model for PulseBackendConfiguration. 248 249 Please note that this class only describes the required fields. For the 250 full description of the model, please check ``PulseBackendConfigurationSchema``. 251 Attributes: 252 backend_name (str): backend name. 253 backend_version (str): backend version in the form X.Y.Z. 254 n_qubits (int): number of qubits. 255 basis_gates (list[str]): list of basis gates names on the backend. 256 gates (GateConfig): list of basis gates on the backend. 257 local (bool): backend is local or remote. 258 simulator (bool): backend is a simulator. 259 conditional (bool): backend supports conditional operations. 260 open_pulse (bool): backend supports open pulse. 261 memory (bool): backend supports memory. 262 max_shots (int): maximum number of shots supported. 263 n_uchannels (int): Number of u-channels. 264 u_channel_lo (list[UchannelLO]): U-channel relationship on device los. 265 meas_levels (list[int]): Supported measurement levels. 266 qubit_lo_range (list[list[float]]): Qubit lo ranges for each qubit 267 with form (min, max) in GHz. 268 meas_lo_range (list[list[float]]): Measurement lo ranges for each qubit 269 with form (min, max) in GHz. 270 dt (float): Qubit drive channel timestep in nanoseconds. 271 dtm (float): Measurement drive channel timestep in nanoseconds. 272 rep_times (list[float]): Supported repetition times for device in microseconds. 273 meas_kernels (list[str]): Supported measurement kernels. 274 discriminators: Supported discriminators. 275 **kwargs: Optional fields. 276 """ 277 278 def __init__(self, backend_name, backend_version, n_qubits, basis_gates, 279 gates, local, simulator, conditional, open_pulse, memory, 280 max_shots, n_uchannels, u_channel_lo, meas_levels, 281 qubit_lo_range, meas_lo_range, dt, dtm, rep_times, meas_kernels, 282 discriminators, **kwargs): 283 284 self.n_uchannels = n_uchannels 285 self.u_channel_lo = u_channel_lo 286 self.meas_levels = meas_levels 287 self.qubit_lo_range = qubit_lo_range 288 self.meas_lo_range = meas_lo_range 289 self.dt = dt # pylint: disable=invalid-name 290 self.dtm = dtm 291 self.rep_times = rep_times 292 self.meas_kernels = meas_kernels 293 self.discriminators = discriminators 294 295 super().__init__(backend_name=backend_name, backend_version=backend_version, 296 n_qubits=n_qubits, basis_gates=basis_gates, gates=gates, 297 local=local, simulator=simulator, conditional=conditional, 298 open_pulse=open_pulse, memory=memory, max_shots=max_shots, 299 n_uchannels=n_uchannels, u_channel_lo=u_channel_lo, 300 meas_levels=meas_levels, qubit_lo_range=qubit_lo_range, 301 meas_lo_range=meas_lo_range, dt=dt, dtm=dtm, 302 rep_times=rep_times, meas_kernels=meas_kernels, 303 discriminators=discriminators, **kwargs) 304 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/qiskit/providers/models/backendconfiguration.py b/qiskit/providers/models/backendconfiguration.py --- a/qiskit/providers/models/backendconfiguration.py +++ b/qiskit/providers/models/backendconfiguration.py @@ -80,13 +80,12 @@ open_pulse = Boolean(required=True) memory = Boolean(required=True) max_shots = Integer(required=True, validate=Range(min=1)) + coupling_map = List(List(Integer(), validate=Length(min=1)), + validate=Length(min=1), allow_none=True, required=True) # Optional properties. max_experiments = Integer(validate=Range(min=1)) sample_name = String() - coupling_map = List(List(Integer(), - validate=Length(min=1)), - validate=Length(min=1), allow_none=True) n_registers = Integer(validate=Range(min=1)) register_map = List(List(Integer(validate=OneOf([0, 1])), validate=Length(min=1)),
{"golden_diff": "diff --git a/qiskit/providers/models/backendconfiguration.py b/qiskit/providers/models/backendconfiguration.py\n--- a/qiskit/providers/models/backendconfiguration.py\n+++ b/qiskit/providers/models/backendconfiguration.py\n@@ -80,13 +80,12 @@\n open_pulse = Boolean(required=True)\n memory = Boolean(required=True)\n max_shots = Integer(required=True, validate=Range(min=1))\n+ coupling_map = List(List(Integer(), validate=Length(min=1)),\n+ validate=Length(min=1), allow_none=True, required=True)\n \n # Optional properties.\n max_experiments = Integer(validate=Range(min=1))\n sample_name = String()\n- coupling_map = List(List(Integer(),\n- validate=Length(min=1)),\n- validate=Length(min=1), allow_none=True)\n n_registers = Integer(validate=Range(min=1))\n register_map = List(List(Integer(validate=OneOf([0, 1])),\n validate=Length(min=1)),\n", "issue": "[Requires Aer 0.2] BackendSchema list coupling_map as optional\n<!-- \u26a0\ufe0f If you do not respect this template, your issue will be closed -->\r\n<!-- \u26a0\ufe0f Make sure to browse the opened and closed issues -->\r\n\r\n### Information\r\n\r\n- **Qiskit Terra version**:\r\n- **Python version**:\r\n- **Operating system**:\r\n\r\n### What is the current behavior?\r\nThe schemas in `qiskit.providers.model` list `coupling_map` as an optional field in the `backend.configuration` when it is in fact required.\r\n\r\n\r\n### Steps to reproduce the problem\r\n\r\n\r\n\r\n### What is the expected behavior?\r\n\r\n\r\n\r\n### Suggested solutions\r\n\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2018.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Model and schema for backend configuration.\"\"\"\n\nfrom marshmallow.validate import Length, OneOf, Range, Regexp\n\nfrom qiskit.validation import BaseModel, BaseSchema, bind_schema\nfrom qiskit.validation.fields import (Boolean, DateTime, Integer, List, Nested, String,\n Complex, Float, Dict, InstructionParameter)\nfrom qiskit.validation.validate import PatternProperties\n\n\nclass GateConfigSchema(BaseSchema):\n \"\"\"Schema for GateConfig.\"\"\"\n\n # Required properties.\n name = String(required=True)\n parameters = List(String(), required=True)\n qasm_def = String(required=True)\n\n # Optional properties.\n coupling_map = List(List(Integer(),\n validate=Length(min=1)),\n validate=Length(min=1))\n latency_map = List(List(Integer(validate=OneOf([0, 1])),\n validate=Length(min=1)),\n validate=Length(min=1))\n conditional = Boolean()\n description = String()\n\n\nclass UchannelLOSchema(BaseSchema):\n \"\"\"Schema for uchannel LO.\"\"\"\n\n # Required properties.\n q = Integer(required=True, validate=Range(min=0))\n scale = Complex(required=True)\n\n # Optional properties.\n\n\nclass PulseHamiltonianSchema(BaseSchema):\n \"\"\"Schema for PulseHamiltonian.\"\"\"\n\n # Required properties.\n h_str = List(String(), validate=Length(min=1), required=True)\n dim_osc = List(Integer(validate=Range(min=1)), required=True)\n dim_qub = List(Integer(validate=Range(min=2)), required=True)\n vars = Dict(validate=PatternProperties({\n Regexp('^([a-z0-9])+$'): InstructionParameter()\n }), required=True)\n\n # Optional properties.\n\n\nclass BackendConfigurationSchema(BaseSchema):\n \"\"\"Schema for BackendConfiguration.\"\"\"\n # Required properties.\n backend_name = String(required=True)\n backend_version = String(required=True,\n validate=Regexp(\"[0-9]+.[0-9]+.[0-9]+$\"))\n n_qubits = Integer(required=True, validate=Range(min=1))\n basis_gates = List(String(), required=True)\n gates = Nested(GateConfigSchema, required=True, many=True)\n local = Boolean(required=True)\n simulator = Boolean(required=True)\n conditional = Boolean(required=True)\n open_pulse = Boolean(required=True)\n memory = Boolean(required=True)\n max_shots = Integer(required=True, validate=Range(min=1))\n\n # Optional properties.\n max_experiments = Integer(validate=Range(min=1))\n sample_name = String()\n coupling_map = List(List(Integer(),\n validate=Length(min=1)),\n validate=Length(min=1), allow_none=True)\n n_registers = Integer(validate=Range(min=1))\n register_map = List(List(Integer(validate=OneOf([0, 1])),\n validate=Length(min=1)),\n validate=Length(min=1))\n configurable = Boolean()\n credits_required = Boolean()\n online_date = DateTime()\n display_name = String()\n description = String()\n tags = List(String())\n\n\nclass QasmBackendConfigurationSchema(BackendConfigurationSchema):\n \"\"\"Schema for Qasm backend.\"\"\"\n open_pulse = Boolean(required=True, validate=OneOf([False]))\n\n\nclass PulseBackendConfigurationSchema(QasmBackendConfigurationSchema):\n \"\"\"Schema for pulse backend\"\"\"\n # Required properties.\n open_pulse = Boolean(required=True, validate=OneOf([True]))\n n_uchannels = Integer(required=True, validate=Range(min=0))\n u_channel_lo = List(Nested(UchannelLOSchema, validate=Length(min=1),\n required=True, many=True))\n meas_levels = List(Integer(), validate=Length(min=1), required=True)\n qubit_lo_range = List(List(Float(validate=Range(min=0)),\n validate=Length(equal=2)), required=True)\n meas_lo_range = List(List(Float(validate=Range(min=0)),\n validate=Length(equal=2)), required=True)\n dt = Float(required=True, validate=Range(min=0)) # pylint: disable=invalid-name\n dtm = Float(required=True, validate=Range(min=0))\n rep_times = List(Integer(validate=Range(min=0)), required=True)\n meas_kernels = List(String(), required=True)\n discriminators = List(String(), required=True)\n\n # Optional properties.\n meas_map = List(List(Integer(), validate=Length(min=1)))\n channel_bandwidth = List(List(Float(), validate=Length(equal=2)))\n acquisition_latency = List(List(Integer()))\n conditional_latency = List(List(Integer()))\n hamiltonian = PulseHamiltonianSchema()\n\n\n@bind_schema(GateConfigSchema)\nclass GateConfig(BaseModel):\n \"\"\"Model for GateConfig.\n\n Please note that this class only describes the required fields. For the\n full description of the model, please check ``GateConfigSchema``.\n\n Attributes:\n name (str): the gate name as it will be referred to in Qasm.\n parameters (list[str]): variable names for the gate parameters (if any).\n qasm_def (str): definition of this gate in terms of Qasm primitives U\n and CX.\n \"\"\"\n\n def __init__(self, name, parameters, qasm_def, **kwargs):\n self.name = name\n self.parameters = parameters\n self.qasm_def = qasm_def\n\n super().__init__(**kwargs)\n\n\n@bind_schema(UchannelLOSchema)\nclass UchannelLO(BaseModel):\n \"\"\"Model for GateConfig.\n\n Please note that this class only describes the required fields. For the\n full description of the model, please check ``GateConfigSchema``.\n\n Attributes:\n q (int): Qubit that scale corresponds too.\n scale (complex): Scale factor for qubit frequency.\n \"\"\"\n def __init__(self, q, scale, **kwargs):\n\n self.q = q\n self.scale = scale\n\n super().__init__(q=q, scale=scale, **kwargs)\n\n\n@bind_schema(BackendConfigurationSchema)\nclass BackendConfiguration(BaseModel):\n \"\"\"Model for BackendConfiguration.\n\n Please note that this class only describes the required fields. For the\n full description of the model, please check ``BackendConfigurationSchema``.\n Attributes:\n backend_name (str): backend name.\n backend_version (str): backend version in the form X.Y.Z.\n n_qubits (int): number of qubits.\n basis_gates (list[str]): list of basis gates names on the backend.\n gates (GateConfig): list of basis gates on the backend.\n local (bool): backend is local or remote.\n simulator (bool): backend is a simulator.\n conditional (bool): backend supports conditional operations.\n open_pulse (bool): backend supports open pulse.\n memory (bool): backend supports memory.\n max_shots (int): maximum number of shots supported.\n \"\"\"\n\n def __init__(self, backend_name, backend_version, n_qubits, basis_gates,\n gates, local, simulator, conditional, open_pulse, memory,\n max_shots, **kwargs):\n\n self.backend_name = backend_name\n self.backend_version = backend_version\n self.n_qubits = n_qubits\n self.basis_gates = basis_gates\n self.gates = gates\n self.local = local\n self.simulator = simulator\n self.conditional = conditional\n self.open_pulse = open_pulse\n self.memory = memory\n self.max_shots = max_shots\n\n super().__init__(**kwargs)\n\n\n@bind_schema(QasmBackendConfigurationSchema)\nclass QasmBackendConfiguration(BackendConfiguration):\n \"\"\"Model for QasmBackendConfiguration.\n\n Please note that this class only describes the required fields. For the\n full description of the model, please check ``QasmBackendConfigurationSchema``.\n Attributes:\n backend_name (str): backend name.\n backend_version (str): backend version in the form X.Y.Z.\n n_qubits (int): number of qubits.\n basis_gates (list[str]): list of basis gates names on the backend.\n gates (GateConfig): list of basis gates on the backend.\n local (bool): backend is local or remote.\n simulator (bool): backend is a simulator.\n conditional (bool): backend supports conditional operations.\n open_pulse (bool): backend supports open pulse.\n memory (bool): backend supports memory.\n max_shots (int): maximum number of shots supported.\n **kwargs: Optional fields.\n \"\"\"\n\n def __init__(self, backend_name, backend_version, n_qubits, basis_gates,\n gates, local, simulator, conditional, open_pulse, memory,\n max_shots, **kwargs):\n\n super().__init__(backend_name=backend_name, backend_version=backend_version,\n n_qubits=n_qubits, basis_gates=basis_gates, gates=gates,\n local=local, simulator=simulator, conditional=conditional,\n open_pulse=open_pulse, memory=memory, max_shots=max_shots,\n **kwargs)\n\n\n@bind_schema(PulseBackendConfigurationSchema)\nclass PulseBackendConfiguration(BackendConfiguration):\n \"\"\"Model for PulseBackendConfiguration.\n\n Please note that this class only describes the required fields. For the\n full description of the model, please check ``PulseBackendConfigurationSchema``.\n Attributes:\n backend_name (str): backend name.\n backend_version (str): backend version in the form X.Y.Z.\n n_qubits (int): number of qubits.\n basis_gates (list[str]): list of basis gates names on the backend.\n gates (GateConfig): list of basis gates on the backend.\n local (bool): backend is local or remote.\n simulator (bool): backend is a simulator.\n conditional (bool): backend supports conditional operations.\n open_pulse (bool): backend supports open pulse.\n memory (bool): backend supports memory.\n max_shots (int): maximum number of shots supported.\n n_uchannels (int): Number of u-channels.\n u_channel_lo (list[UchannelLO]): U-channel relationship on device los.\n meas_levels (list[int]): Supported measurement levels.\n qubit_lo_range (list[list[float]]): Qubit lo ranges for each qubit\n with form (min, max) in GHz.\n meas_lo_range (list[list[float]]): Measurement lo ranges for each qubit\n with form (min, max) in GHz.\n dt (float): Qubit drive channel timestep in nanoseconds.\n dtm (float): Measurement drive channel timestep in nanoseconds.\n rep_times (list[float]): Supported repetition times for device in microseconds.\n meas_kernels (list[str]): Supported measurement kernels.\n discriminators: Supported discriminators.\n **kwargs: Optional fields.\n \"\"\"\n\n def __init__(self, backend_name, backend_version, n_qubits, basis_gates,\n gates, local, simulator, conditional, open_pulse, memory,\n max_shots, n_uchannels, u_channel_lo, meas_levels,\n qubit_lo_range, meas_lo_range, dt, dtm, rep_times, meas_kernels,\n discriminators, **kwargs):\n\n self.n_uchannels = n_uchannels\n self.u_channel_lo = u_channel_lo\n self.meas_levels = meas_levels\n self.qubit_lo_range = qubit_lo_range\n self.meas_lo_range = meas_lo_range\n self.dt = dt # pylint: disable=invalid-name\n self.dtm = dtm\n self.rep_times = rep_times\n self.meas_kernels = meas_kernels\n self.discriminators = discriminators\n\n super().__init__(backend_name=backend_name, backend_version=backend_version,\n n_qubits=n_qubits, basis_gates=basis_gates, gates=gates,\n local=local, simulator=simulator, conditional=conditional,\n open_pulse=open_pulse, memory=memory, max_shots=max_shots,\n n_uchannels=n_uchannels, u_channel_lo=u_channel_lo,\n meas_levels=meas_levels, qubit_lo_range=qubit_lo_range,\n meas_lo_range=meas_lo_range, dt=dt, dtm=dtm,\n rep_times=rep_times, meas_kernels=meas_kernels,\n discriminators=discriminators, **kwargs)\n", "path": "qiskit/providers/models/backendconfiguration.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2018.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Model and schema for backend configuration.\"\"\"\n\nfrom marshmallow.validate import Length, OneOf, Range, Regexp\n\nfrom qiskit.validation import BaseModel, BaseSchema, bind_schema\nfrom qiskit.validation.fields import (Boolean, DateTime, Integer, List, Nested, String,\n Complex, Float, Dict, InstructionParameter)\nfrom qiskit.validation.validate import PatternProperties\n\n\nclass GateConfigSchema(BaseSchema):\n \"\"\"Schema for GateConfig.\"\"\"\n\n # Required properties.\n name = String(required=True)\n parameters = List(String(), required=True)\n qasm_def = String(required=True)\n\n # Optional properties.\n coupling_map = List(List(Integer(),\n validate=Length(min=1)),\n validate=Length(min=1))\n latency_map = List(List(Integer(validate=OneOf([0, 1])),\n validate=Length(min=1)),\n validate=Length(min=1))\n conditional = Boolean()\n description = String()\n\n\nclass UchannelLOSchema(BaseSchema):\n \"\"\"Schema for uchannel LO.\"\"\"\n\n # Required properties.\n q = Integer(required=True, validate=Range(min=0))\n scale = Complex(required=True)\n\n # Optional properties.\n\n\nclass PulseHamiltonianSchema(BaseSchema):\n \"\"\"Schema for PulseHamiltonian.\"\"\"\n\n # Required properties.\n h_str = List(String(), validate=Length(min=1), required=True)\n dim_osc = List(Integer(validate=Range(min=1)), required=True)\n dim_qub = List(Integer(validate=Range(min=2)), required=True)\n vars = Dict(validate=PatternProperties({\n Regexp('^([a-z0-9])+$'): InstructionParameter()\n }), required=True)\n\n # Optional properties.\n\n\nclass BackendConfigurationSchema(BaseSchema):\n \"\"\"Schema for BackendConfiguration.\"\"\"\n # Required properties.\n backend_name = String(required=True)\n backend_version = String(required=True,\n validate=Regexp(\"[0-9]+.[0-9]+.[0-9]+$\"))\n n_qubits = Integer(required=True, validate=Range(min=1))\n basis_gates = List(String(), required=True)\n gates = Nested(GateConfigSchema, required=True, many=True)\n local = Boolean(required=True)\n simulator = Boolean(required=True)\n conditional = Boolean(required=True)\n open_pulse = Boolean(required=True)\n memory = Boolean(required=True)\n max_shots = Integer(required=True, validate=Range(min=1))\n coupling_map = List(List(Integer(), validate=Length(min=1)),\n validate=Length(min=1), allow_none=True, required=True)\n\n # Optional properties.\n max_experiments = Integer(validate=Range(min=1))\n sample_name = String()\n n_registers = Integer(validate=Range(min=1))\n register_map = List(List(Integer(validate=OneOf([0, 1])),\n validate=Length(min=1)),\n validate=Length(min=1))\n configurable = Boolean()\n credits_required = Boolean()\n online_date = DateTime()\n display_name = String()\n description = String()\n tags = List(String())\n\n\nclass QasmBackendConfigurationSchema(BackendConfigurationSchema):\n \"\"\"Schema for Qasm backend.\"\"\"\n open_pulse = Boolean(required=True, validate=OneOf([False]))\n\n\nclass PulseBackendConfigurationSchema(QasmBackendConfigurationSchema):\n \"\"\"Schema for pulse backend\"\"\"\n # Required properties.\n open_pulse = Boolean(required=True, validate=OneOf([True]))\n n_uchannels = Integer(required=True, validate=Range(min=0))\n u_channel_lo = List(Nested(UchannelLOSchema, validate=Length(min=1),\n required=True, many=True))\n meas_levels = List(Integer(), validate=Length(min=1), required=True)\n qubit_lo_range = List(List(Float(validate=Range(min=0)),\n validate=Length(equal=2)), required=True)\n meas_lo_range = List(List(Float(validate=Range(min=0)),\n validate=Length(equal=2)), required=True)\n dt = Float(required=True, validate=Range(min=0)) # pylint: disable=invalid-name\n dtm = Float(required=True, validate=Range(min=0))\n rep_times = List(Integer(validate=Range(min=0)), required=True)\n meas_kernels = List(String(), required=True)\n discriminators = List(String(), required=True)\n\n # Optional properties.\n meas_map = List(List(Integer(), validate=Length(min=1)))\n channel_bandwidth = List(List(Float(), validate=Length(equal=2)))\n acquisition_latency = List(List(Integer()))\n conditional_latency = List(List(Integer()))\n hamiltonian = PulseHamiltonianSchema()\n\n\n@bind_schema(GateConfigSchema)\nclass GateConfig(BaseModel):\n \"\"\"Model for GateConfig.\n\n Please note that this class only describes the required fields. For the\n full description of the model, please check ``GateConfigSchema``.\n\n Attributes:\n name (str): the gate name as it will be referred to in Qasm.\n parameters (list[str]): variable names for the gate parameters (if any).\n qasm_def (str): definition of this gate in terms of Qasm primitives U\n and CX.\n \"\"\"\n\n def __init__(self, name, parameters, qasm_def, **kwargs):\n self.name = name\n self.parameters = parameters\n self.qasm_def = qasm_def\n\n super().__init__(**kwargs)\n\n\n@bind_schema(UchannelLOSchema)\nclass UchannelLO(BaseModel):\n \"\"\"Model for GateConfig.\n\n Please note that this class only describes the required fields. For the\n full description of the model, please check ``GateConfigSchema``.\n\n Attributes:\n q (int): Qubit that scale corresponds too.\n scale (complex): Scale factor for qubit frequency.\n \"\"\"\n def __init__(self, q, scale, **kwargs):\n\n self.q = q\n self.scale = scale\n\n super().__init__(q=q, scale=scale, **kwargs)\n\n\n@bind_schema(BackendConfigurationSchema)\nclass BackendConfiguration(BaseModel):\n \"\"\"Model for BackendConfiguration.\n\n Please note that this class only describes the required fields. For the\n full description of the model, please check ``BackendConfigurationSchema``.\n Attributes:\n backend_name (str): backend name.\n backend_version (str): backend version in the form X.Y.Z.\n n_qubits (int): number of qubits.\n basis_gates (list[str]): list of basis gates names on the backend.\n gates (GateConfig): list of basis gates on the backend.\n local (bool): backend is local or remote.\n simulator (bool): backend is a simulator.\n conditional (bool): backend supports conditional operations.\n open_pulse (bool): backend supports open pulse.\n memory (bool): backend supports memory.\n max_shots (int): maximum number of shots supported.\n \"\"\"\n\n def __init__(self, backend_name, backend_version, n_qubits, basis_gates,\n gates, local, simulator, conditional, open_pulse, memory,\n max_shots, **kwargs):\n\n self.backend_name = backend_name\n self.backend_version = backend_version\n self.n_qubits = n_qubits\n self.basis_gates = basis_gates\n self.gates = gates\n self.local = local\n self.simulator = simulator\n self.conditional = conditional\n self.open_pulse = open_pulse\n self.memory = memory\n self.max_shots = max_shots\n\n super().__init__(**kwargs)\n\n\n@bind_schema(QasmBackendConfigurationSchema)\nclass QasmBackendConfiguration(BackendConfiguration):\n \"\"\"Model for QasmBackendConfiguration.\n\n Please note that this class only describes the required fields. For the\n full description of the model, please check ``QasmBackendConfigurationSchema``.\n Attributes:\n backend_name (str): backend name.\n backend_version (str): backend version in the form X.Y.Z.\n n_qubits (int): number of qubits.\n basis_gates (list[str]): list of basis gates names on the backend.\n gates (GateConfig): list of basis gates on the backend.\n local (bool): backend is local or remote.\n simulator (bool): backend is a simulator.\n conditional (bool): backend supports conditional operations.\n open_pulse (bool): backend supports open pulse.\n memory (bool): backend supports memory.\n max_shots (int): maximum number of shots supported.\n **kwargs: Optional fields.\n \"\"\"\n\n def __init__(self, backend_name, backend_version, n_qubits, basis_gates,\n gates, local, simulator, conditional, open_pulse, memory,\n max_shots, **kwargs):\n\n super().__init__(backend_name=backend_name, backend_version=backend_version,\n n_qubits=n_qubits, basis_gates=basis_gates, gates=gates,\n local=local, simulator=simulator, conditional=conditional,\n open_pulse=open_pulse, memory=memory, max_shots=max_shots,\n **kwargs)\n\n\n@bind_schema(PulseBackendConfigurationSchema)\nclass PulseBackendConfiguration(BackendConfiguration):\n \"\"\"Model for PulseBackendConfiguration.\n\n Please note that this class only describes the required fields. For the\n full description of the model, please check ``PulseBackendConfigurationSchema``.\n Attributes:\n backend_name (str): backend name.\n backend_version (str): backend version in the form X.Y.Z.\n n_qubits (int): number of qubits.\n basis_gates (list[str]): list of basis gates names on the backend.\n gates (GateConfig): list of basis gates on the backend.\n local (bool): backend is local or remote.\n simulator (bool): backend is a simulator.\n conditional (bool): backend supports conditional operations.\n open_pulse (bool): backend supports open pulse.\n memory (bool): backend supports memory.\n max_shots (int): maximum number of shots supported.\n n_uchannels (int): Number of u-channels.\n u_channel_lo (list[UchannelLO]): U-channel relationship on device los.\n meas_levels (list[int]): Supported measurement levels.\n qubit_lo_range (list[list[float]]): Qubit lo ranges for each qubit\n with form (min, max) in GHz.\n meas_lo_range (list[list[float]]): Measurement lo ranges for each qubit\n with form (min, max) in GHz.\n dt (float): Qubit drive channel timestep in nanoseconds.\n dtm (float): Measurement drive channel timestep in nanoseconds.\n rep_times (list[float]): Supported repetition times for device in microseconds.\n meas_kernels (list[str]): Supported measurement kernels.\n discriminators: Supported discriminators.\n **kwargs: Optional fields.\n \"\"\"\n\n def __init__(self, backend_name, backend_version, n_qubits, basis_gates,\n gates, local, simulator, conditional, open_pulse, memory,\n max_shots, n_uchannels, u_channel_lo, meas_levels,\n qubit_lo_range, meas_lo_range, dt, dtm, rep_times, meas_kernels,\n discriminators, **kwargs):\n\n self.n_uchannels = n_uchannels\n self.u_channel_lo = u_channel_lo\n self.meas_levels = meas_levels\n self.qubit_lo_range = qubit_lo_range\n self.meas_lo_range = meas_lo_range\n self.dt = dt # pylint: disable=invalid-name\n self.dtm = dtm\n self.rep_times = rep_times\n self.meas_kernels = meas_kernels\n self.discriminators = discriminators\n\n super().__init__(backend_name=backend_name, backend_version=backend_version,\n n_qubits=n_qubits, basis_gates=basis_gates, gates=gates,\n local=local, simulator=simulator, conditional=conditional,\n open_pulse=open_pulse, memory=memory, max_shots=max_shots,\n n_uchannels=n_uchannels, u_channel_lo=u_channel_lo,\n meas_levels=meas_levels, qubit_lo_range=qubit_lo_range,\n meas_lo_range=meas_lo_range, dt=dt, dtm=dtm,\n rep_times=rep_times, meas_kernels=meas_kernels,\n discriminators=discriminators, **kwargs)\n", "path": "qiskit/providers/models/backendconfiguration.py"}]}
3,963
214
gh_patches_debug_7253
rasdani/github-patches
git_diff
mlcommons__GaNDLF-250
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Default mapping to GPU device during inference **Describe the bug** When running on CPU, this issue occurs: RuntimeError: Attempting to deserialize object on a CUDA device but torch.cuda.is_available() is False. If you are running on a CPU-only machine, please use torch.load with map_location=torch.device('cpu') to map your storages to the CPU. Hence adding `map_location` to torch.load (Ref: https://pytorch.org/docs/stable/generated/torch.load.html). Please review. **To Reproduce** Steps to reproduce the behavior: After data preprocess, run: python gandlf_run -c <config> -i <val_dataset> -m <model_dir> -t False -d cpu **Expected behavior** Model should be mapped to the CPU device. Although there is a `send_model_to_device` method implemented, this error occurs before that, during `torch.load` **Screenshots** If applicable, add screenshots to help explain your problem. **GaNDLF Version** Version: 0.0.13 **Desktop (please complete the following information):** - OS: Linux CentOS 7.6 **Additional context** Can provided if needed --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `GANDLF/compute/inference_loop.py` Content: ``` 1 from .forward_pass import validate_network 2 import os 3 4 # hides torchio citation request, see https://github.com/fepegar/torchio/issues/235 5 os.environ["TORCHIO_HIDE_CITATION_PROMPT"] = "1" 6 7 import pickle, argparse, torch 8 import numpy as np 9 import pandas as pd 10 from torch.utils.data import DataLoader 11 from skimage.io import imsave 12 from tqdm import tqdm 13 from torch.cuda.amp import autocast 14 from GANDLF.data.ImagesFromDataFrame import ImagesFromDataFrame 15 from GANDLF.utils import populate_channel_keys_in_params, send_model_to_device 16 from GANDLF.models import global_models_dict 17 18 19 def inference_loop(inferenceDataFromPickle, device, parameters, outputDir): 20 """ 21 The main training loop. 22 23 Args: 24 inferenceDataFromPickle (pandas.DataFrame): The data to use for inference. 25 device (str): The device to perform computations on. 26 parameters (dict): The parameters dictionary. 27 outputDir (str): The output directory. 28 """ 29 # Defining our model here according to parameters mentioned in the configuration file 30 print("Number of dims : ", parameters["model"]["dimension"]) 31 if "num_channels" in parameters["model"]: 32 print("Number of channels : ", parameters["model"]["num_channels"]) 33 print("Number of classes : ", len(parameters["model"]["class_list"])) 34 35 # Fetch the model according to params mentioned in the configuration file 36 model = global_models_dict[parameters["model"]["architecture"]]( 37 parameters=parameters 38 ) 39 40 # Setting up the inference loader 41 inferenceDataForTorch = ImagesFromDataFrame( 42 inferenceDataFromPickle, parameters, train=False 43 ) 44 inference_loader = DataLoader(inferenceDataForTorch, batch_size=1) 45 46 # Loading the weights into the model 47 main_dict = outputDir 48 if os.path.isdir(outputDir): 49 file_to_check = os.path.join( 50 outputDir, str(parameters["model"]["architecture"]) + "_best.pth.tar" 51 ) 52 if not os.path.isfile(file_to_check): 53 raise ValueError("The model specified model was not found:", file_to_check) 54 main_dict = torch.load(file_to_check) 55 model.load_state_dict(main_dict["model_state_dict"]) 56 57 if not (os.environ.get("HOSTNAME") is None): 58 print("\nHostname :" + str(os.environ.get("HOSTNAME")), flush=True) 59 60 # get the channel keys for concatenation later (exclude non numeric channel keys) 61 parameters = populate_channel_keys_in_params(inference_loader, parameters) 62 parameters["save_output"] = True 63 64 print("Data Samples: ", len(inference_loader.dataset), flush=True) 65 model, parameters["model"]["amp"], parameters["device"] = send_model_to_device( 66 model, parameters["model"]["amp"], device, optimizer=None 67 ) 68 69 print("Using device:", parameters["device"], flush=True) 70 71 # radiology inference 72 if parameters["modality"] == "rad": 73 average_epoch_valid_loss, average_epoch_valid_metric = validate_network( 74 model, inference_loader, None, parameters, mode="inference" 75 ) 76 print(average_epoch_valid_loss, average_epoch_valid_metric) 77 elif (parameters["modality"] == "path") or (parameters["modality"] == "histo"): 78 # histology inference 79 if os.name != "nt": 80 """ 81 path inference is Linux-only because openslide for Windows works only for Python-3.8 whereas pickle5 works only for 3.6 and 3.7 82 """ 83 from GANDLF.data.inference_dataloader_histopath import InferTumorSegDataset 84 from openslide import OpenSlide 85 86 # actual computation 87 for _, row in inferenceDataForTorch.iterrows(): 88 subject_name = row[parameters["headers"]["subjectIDHeader"]] 89 print( 90 "Patient Slide : ", 91 row[parameters["headers"]["subjectIDHeader"]], 92 ) 93 print( 94 "Patient Location : ", 95 row[parameters["headers"]["channelHeaders"]], 96 ) 97 print(row[parameters["headers"]["channelHeaders"]].values[0]) 98 os_image = OpenSlide( 99 row[parameters["headers"]["channelHeaders"]].values[0] 100 ) 101 level_width, level_height = os_image.level_dimensions[ 102 int(parameters["slide_level"]) 103 ] 104 subject_dest_dir = os.path.join(outputDir, subject_name) 105 os.makedirs(subject_dest_dir, exist_ok=True) 106 107 probs_map = np.zeros((level_height, level_width), dtype=np.float16) 108 count_map = np.zeros((level_height, level_width), dtype=np.uint8) 109 110 patient_dataset_obj = InferTumorSegDataset( 111 row[parameters["headers"]["channelHeaders"]].values[0], 112 patch_size=patch_size, 113 stride_size=parameters["stride_size"], 114 selected_level=parameters["slide_level"], 115 mask_level=4, 116 ) 117 118 dataloader = DataLoader( 119 patient_dataset_obj, 120 batch_size=int(parameters["batch_size"]), 121 shuffle=False, 122 num_workers=parameters["q_num_workers"], 123 ) 124 for image_patches, (x_coords, y_coords) in tqdm(dataloader): 125 x_coords, y_coords = y_coords.numpy(), x_coords.numpy() 126 if parameters["model"]["amp"]: 127 with autocast(): 128 output = model( 129 image_patches.float().to(parameters["device"]) 130 ) 131 else: 132 output = model(image_patches.float().to(parameters["device"])) 133 output = output.detach().cpu().numpy() 134 for i in range(int(output.shape[0])): 135 count_map[ 136 x_coords[i] : x_coords[i] + patch_size[0], 137 y_coords[i] : y_coords[i] + patch_size[1], 138 ] += 1 139 probs_map[ 140 x_coords[i] : x_coords[i] + patch_size[0], 141 y_coords[i] : y_coords[i] + patch_size[1], 142 ] += output[i][0] 143 probs_map = probs_map / count_map 144 count_map = count_map / count_map.max() 145 out = count_map * probs_map 146 count_map = np.array(count_map * 255, dtype=np.uint16) 147 out_thresh = np.array((out > 0.5) * 255, dtype=np.uint16) 148 imsave( 149 os.path.join( 150 subject_dest_dir, 151 row[parameters["headers"]["subjectIDHeader"]] + "_prob.png", 152 ), 153 out, 154 ) 155 imsave( 156 os.path.join( 157 subject_dest_dir, 158 row[parameters["headers"]["subjectIDHeader"]] + "_seg.png", 159 ), 160 out_thresh, 161 ) 162 imsave( 163 os.path.join( 164 subject_dest_dir, 165 row[parameters["headers"]["subjectIDHeader"]] + "_count.png", 166 ), 167 count_map, 168 ) 169 else: 170 print( 171 "ERROR: histo/path inference is Linux-only because openslide for Windows works only for Python-3.8, whereas pickle5 works only for 3.6 and 3.7" 172 ) 173 174 175 if __name__ == "__main__": 176 177 # parse the cli arguments here 178 parser = argparse.ArgumentParser(description="Inference Loop of GANDLF") 179 parser.add_argument( 180 "-inference_loader_pickle", 181 type=str, 182 help="Inference loader pickle", 183 required=True, 184 ) 185 parser.add_argument( 186 "-parameter_pickle", type=str, help="Parameters pickle", required=True 187 ) 188 parser.add_argument( 189 "-headers_pickle", type=str, help="Header pickle", required=True 190 ) 191 parser.add_argument("-outputDir", type=str, help="Output directory", required=True) 192 parser.add_argument("-device", type=str, help="Device to train on", required=True) 193 194 args = parser.parse_args() 195 196 # # write parameters to pickle - this should not change for the different folds, so keeping is independent 197 patch_size = pickle.load(open(args.patch_size_pickle, "rb")) 198 headers = pickle.load(open(args.headers_pickle, "rb")) 199 label_header = pickle.load(open(args.label_header_pickle, "rb")) 200 parameters = pickle.load(open(args.parameter_pickle, "rb")) 201 inferenceDataFromPickle = pd.read_pickle(args.inference_loader_pickle) 202 203 inference_loop( 204 inferenceDataFromPickle=inferenceDataFromPickle, 205 parameters=parameters, 206 outputDir=args.outputDir, 207 device=args.device, 208 ) 209 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/GANDLF/compute/inference_loop.py b/GANDLF/compute/inference_loop.py --- a/GANDLF/compute/inference_loop.py +++ b/GANDLF/compute/inference_loop.py @@ -51,7 +51,8 @@ ) if not os.path.isfile(file_to_check): raise ValueError("The model specified model was not found:", file_to_check) - main_dict = torch.load(file_to_check) + + main_dict = torch.load(file_to_check, map_location=torch.device(device)) model.load_state_dict(main_dict["model_state_dict"]) if not (os.environ.get("HOSTNAME") is None):
{"golden_diff": "diff --git a/GANDLF/compute/inference_loop.py b/GANDLF/compute/inference_loop.py\n--- a/GANDLF/compute/inference_loop.py\n+++ b/GANDLF/compute/inference_loop.py\n@@ -51,7 +51,8 @@\n )\n if not os.path.isfile(file_to_check):\n raise ValueError(\"The model specified model was not found:\", file_to_check)\n- main_dict = torch.load(file_to_check)\n+\n+ main_dict = torch.load(file_to_check, map_location=torch.device(device))\n model.load_state_dict(main_dict[\"model_state_dict\"])\n \n if not (os.environ.get(\"HOSTNAME\") is None):\n", "issue": "Default mapping to GPU device during inference\n**Describe the bug**\r\nWhen running on CPU, this issue occurs: RuntimeError: Attempting to deserialize object on a CUDA device but torch.cuda.is_available() is False. If you are running on a CPU-only machine, please use torch.load with map_location=torch.device('cpu') to map your storages to the CPU. Hence adding `map_location` to torch.load (Ref: https://pytorch.org/docs/stable/generated/torch.load.html).\r\nPlease review.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\nAfter data preprocess, run:\r\npython gandlf_run -c <config> -i <val_dataset> -m <model_dir> -t False -d cpu\r\n\r\n**Expected behavior**\r\nModel should be mapped to the CPU device. Although there is a `send_model_to_device` method implemented, this error occurs before that, during `torch.load`\r\n\r\n**Screenshots**\r\nIf applicable, add screenshots to help explain your problem.\r\n\r\n**GaNDLF Version**\r\nVersion: 0.0.13\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Linux CentOS 7.6\r\n\r\n**Additional context**\r\nCan provided if needed\r\n\n", "before_files": [{"content": "from .forward_pass import validate_network\nimport os\n\n# hides torchio citation request, see https://github.com/fepegar/torchio/issues/235\nos.environ[\"TORCHIO_HIDE_CITATION_PROMPT\"] = \"1\"\n\nimport pickle, argparse, torch\nimport numpy as np\nimport pandas as pd\nfrom torch.utils.data import DataLoader\nfrom skimage.io import imsave\nfrom tqdm import tqdm\nfrom torch.cuda.amp import autocast\nfrom GANDLF.data.ImagesFromDataFrame import ImagesFromDataFrame\nfrom GANDLF.utils import populate_channel_keys_in_params, send_model_to_device\nfrom GANDLF.models import global_models_dict\n\n\ndef inference_loop(inferenceDataFromPickle, device, parameters, outputDir):\n \"\"\"\n The main training loop.\n\n Args:\n inferenceDataFromPickle (pandas.DataFrame): The data to use for inference.\n device (str): The device to perform computations on.\n parameters (dict): The parameters dictionary.\n outputDir (str): The output directory.\n \"\"\"\n # Defining our model here according to parameters mentioned in the configuration file\n print(\"Number of dims : \", parameters[\"model\"][\"dimension\"])\n if \"num_channels\" in parameters[\"model\"]:\n print(\"Number of channels : \", parameters[\"model\"][\"num_channels\"])\n print(\"Number of classes : \", len(parameters[\"model\"][\"class_list\"]))\n\n # Fetch the model according to params mentioned in the configuration file\n model = global_models_dict[parameters[\"model\"][\"architecture\"]](\n parameters=parameters\n )\n\n # Setting up the inference loader\n inferenceDataForTorch = ImagesFromDataFrame(\n inferenceDataFromPickle, parameters, train=False\n )\n inference_loader = DataLoader(inferenceDataForTorch, batch_size=1)\n\n # Loading the weights into the model\n main_dict = outputDir\n if os.path.isdir(outputDir):\n file_to_check = os.path.join(\n outputDir, str(parameters[\"model\"][\"architecture\"]) + \"_best.pth.tar\"\n )\n if not os.path.isfile(file_to_check):\n raise ValueError(\"The model specified model was not found:\", file_to_check)\n main_dict = torch.load(file_to_check)\n model.load_state_dict(main_dict[\"model_state_dict\"])\n\n if not (os.environ.get(\"HOSTNAME\") is None):\n print(\"\\nHostname :\" + str(os.environ.get(\"HOSTNAME\")), flush=True)\n\n # get the channel keys for concatenation later (exclude non numeric channel keys)\n parameters = populate_channel_keys_in_params(inference_loader, parameters)\n parameters[\"save_output\"] = True\n\n print(\"Data Samples: \", len(inference_loader.dataset), flush=True)\n model, parameters[\"model\"][\"amp\"], parameters[\"device\"] = send_model_to_device(\n model, parameters[\"model\"][\"amp\"], device, optimizer=None\n )\n\n print(\"Using device:\", parameters[\"device\"], flush=True)\n\n # radiology inference\n if parameters[\"modality\"] == \"rad\":\n average_epoch_valid_loss, average_epoch_valid_metric = validate_network(\n model, inference_loader, None, parameters, mode=\"inference\"\n )\n print(average_epoch_valid_loss, average_epoch_valid_metric)\n elif (parameters[\"modality\"] == \"path\") or (parameters[\"modality\"] == \"histo\"):\n # histology inference\n if os.name != \"nt\":\n \"\"\"\n path inference is Linux-only because openslide for Windows works only for Python-3.8 whereas pickle5 works only for 3.6 and 3.7\n \"\"\"\n from GANDLF.data.inference_dataloader_histopath import InferTumorSegDataset\n from openslide import OpenSlide\n\n # actual computation\n for _, row in inferenceDataForTorch.iterrows():\n subject_name = row[parameters[\"headers\"][\"subjectIDHeader\"]]\n print(\n \"Patient Slide : \",\n row[parameters[\"headers\"][\"subjectIDHeader\"]],\n )\n print(\n \"Patient Location : \",\n row[parameters[\"headers\"][\"channelHeaders\"]],\n )\n print(row[parameters[\"headers\"][\"channelHeaders\"]].values[0])\n os_image = OpenSlide(\n row[parameters[\"headers\"][\"channelHeaders\"]].values[0]\n )\n level_width, level_height = os_image.level_dimensions[\n int(parameters[\"slide_level\"])\n ]\n subject_dest_dir = os.path.join(outputDir, subject_name)\n os.makedirs(subject_dest_dir, exist_ok=True)\n\n probs_map = np.zeros((level_height, level_width), dtype=np.float16)\n count_map = np.zeros((level_height, level_width), dtype=np.uint8)\n\n patient_dataset_obj = InferTumorSegDataset(\n row[parameters[\"headers\"][\"channelHeaders\"]].values[0],\n patch_size=patch_size,\n stride_size=parameters[\"stride_size\"],\n selected_level=parameters[\"slide_level\"],\n mask_level=4,\n )\n\n dataloader = DataLoader(\n patient_dataset_obj,\n batch_size=int(parameters[\"batch_size\"]),\n shuffle=False,\n num_workers=parameters[\"q_num_workers\"],\n )\n for image_patches, (x_coords, y_coords) in tqdm(dataloader):\n x_coords, y_coords = y_coords.numpy(), x_coords.numpy()\n if parameters[\"model\"][\"amp\"]:\n with autocast():\n output = model(\n image_patches.float().to(parameters[\"device\"])\n )\n else:\n output = model(image_patches.float().to(parameters[\"device\"]))\n output = output.detach().cpu().numpy()\n for i in range(int(output.shape[0])):\n count_map[\n x_coords[i] : x_coords[i] + patch_size[0],\n y_coords[i] : y_coords[i] + patch_size[1],\n ] += 1\n probs_map[\n x_coords[i] : x_coords[i] + patch_size[0],\n y_coords[i] : y_coords[i] + patch_size[1],\n ] += output[i][0]\n probs_map = probs_map / count_map\n count_map = count_map / count_map.max()\n out = count_map * probs_map\n count_map = np.array(count_map * 255, dtype=np.uint16)\n out_thresh = np.array((out > 0.5) * 255, dtype=np.uint16)\n imsave(\n os.path.join(\n subject_dest_dir,\n row[parameters[\"headers\"][\"subjectIDHeader\"]] + \"_prob.png\",\n ),\n out,\n )\n imsave(\n os.path.join(\n subject_dest_dir,\n row[parameters[\"headers\"][\"subjectIDHeader\"]] + \"_seg.png\",\n ),\n out_thresh,\n )\n imsave(\n os.path.join(\n subject_dest_dir,\n row[parameters[\"headers\"][\"subjectIDHeader\"]] + \"_count.png\",\n ),\n count_map,\n )\n else:\n print(\n \"ERROR: histo/path inference is Linux-only because openslide for Windows works only for Python-3.8, whereas pickle5 works only for 3.6 and 3.7\"\n )\n\n\nif __name__ == \"__main__\":\n\n # parse the cli arguments here\n parser = argparse.ArgumentParser(description=\"Inference Loop of GANDLF\")\n parser.add_argument(\n \"-inference_loader_pickle\",\n type=str,\n help=\"Inference loader pickle\",\n required=True,\n )\n parser.add_argument(\n \"-parameter_pickle\", type=str, help=\"Parameters pickle\", required=True\n )\n parser.add_argument(\n \"-headers_pickle\", type=str, help=\"Header pickle\", required=True\n )\n parser.add_argument(\"-outputDir\", type=str, help=\"Output directory\", required=True)\n parser.add_argument(\"-device\", type=str, help=\"Device to train on\", required=True)\n\n args = parser.parse_args()\n\n # # write parameters to pickle - this should not change for the different folds, so keeping is independent\n patch_size = pickle.load(open(args.patch_size_pickle, \"rb\"))\n headers = pickle.load(open(args.headers_pickle, \"rb\"))\n label_header = pickle.load(open(args.label_header_pickle, \"rb\"))\n parameters = pickle.load(open(args.parameter_pickle, \"rb\"))\n inferenceDataFromPickle = pd.read_pickle(args.inference_loader_pickle)\n\n inference_loop(\n inferenceDataFromPickle=inferenceDataFromPickle,\n parameters=parameters,\n outputDir=args.outputDir,\n device=args.device,\n )\n", "path": "GANDLF/compute/inference_loop.py"}], "after_files": [{"content": "from .forward_pass import validate_network\nimport os\n\n# hides torchio citation request, see https://github.com/fepegar/torchio/issues/235\nos.environ[\"TORCHIO_HIDE_CITATION_PROMPT\"] = \"1\"\n\nimport pickle, argparse, torch\nimport numpy as np\nimport pandas as pd\nfrom torch.utils.data import DataLoader\nfrom skimage.io import imsave\nfrom tqdm import tqdm\nfrom torch.cuda.amp import autocast\nfrom GANDLF.data.ImagesFromDataFrame import ImagesFromDataFrame\nfrom GANDLF.utils import populate_channel_keys_in_params, send_model_to_device\nfrom GANDLF.models import global_models_dict\n\n\ndef inference_loop(inferenceDataFromPickle, device, parameters, outputDir):\n \"\"\"\n The main training loop.\n\n Args:\n inferenceDataFromPickle (pandas.DataFrame): The data to use for inference.\n device (str): The device to perform computations on.\n parameters (dict): The parameters dictionary.\n outputDir (str): The output directory.\n \"\"\"\n # Defining our model here according to parameters mentioned in the configuration file\n print(\"Number of dims : \", parameters[\"model\"][\"dimension\"])\n if \"num_channels\" in parameters[\"model\"]:\n print(\"Number of channels : \", parameters[\"model\"][\"num_channels\"])\n print(\"Number of classes : \", len(parameters[\"model\"][\"class_list\"]))\n\n # Fetch the model according to params mentioned in the configuration file\n model = global_models_dict[parameters[\"model\"][\"architecture\"]](\n parameters=parameters\n )\n\n # Setting up the inference loader\n inferenceDataForTorch = ImagesFromDataFrame(\n inferenceDataFromPickle, parameters, train=False\n )\n inference_loader = DataLoader(inferenceDataForTorch, batch_size=1)\n\n # Loading the weights into the model\n main_dict = outputDir\n if os.path.isdir(outputDir):\n file_to_check = os.path.join(\n outputDir, str(parameters[\"model\"][\"architecture\"]) + \"_best.pth.tar\"\n )\n if not os.path.isfile(file_to_check):\n raise ValueError(\"The model specified model was not found:\", file_to_check)\n\n main_dict = torch.load(file_to_check, map_location=torch.device(device))\n model.load_state_dict(main_dict[\"model_state_dict\"])\n\n if not (os.environ.get(\"HOSTNAME\") is None):\n print(\"\\nHostname :\" + str(os.environ.get(\"HOSTNAME\")), flush=True)\n\n # get the channel keys for concatenation later (exclude non numeric channel keys)\n parameters = populate_channel_keys_in_params(inference_loader, parameters)\n parameters[\"save_output\"] = True\n\n print(\"Data Samples: \", len(inference_loader.dataset), flush=True)\n model, parameters[\"model\"][\"amp\"], parameters[\"device\"] = send_model_to_device(\n model, parameters[\"model\"][\"amp\"], device, optimizer=None\n )\n\n print(\"Using device:\", parameters[\"device\"], flush=True)\n\n # radiology inference\n if parameters[\"modality\"] == \"rad\":\n average_epoch_valid_loss, average_epoch_valid_metric = validate_network(\n model, inference_loader, None, parameters, mode=\"inference\"\n )\n print(average_epoch_valid_loss, average_epoch_valid_metric)\n elif (parameters[\"modality\"] == \"path\") or (parameters[\"modality\"] == \"histo\"):\n # histology inference\n if os.name != \"nt\":\n \"\"\"\n path inference is Linux-only because openslide for Windows works only for Python-3.8 whereas pickle5 works only for 3.6 and 3.7\n \"\"\"\n from GANDLF.data.inference_dataloader_histopath import InferTumorSegDataset\n from openslide import OpenSlide\n\n # actual computation\n for _, row in inferenceDataForTorch.iterrows():\n subject_name = row[parameters[\"headers\"][\"subjectIDHeader\"]]\n print(\n \"Patient Slide : \",\n row[parameters[\"headers\"][\"subjectIDHeader\"]],\n )\n print(\n \"Patient Location : \",\n row[parameters[\"headers\"][\"channelHeaders\"]],\n )\n print(row[parameters[\"headers\"][\"channelHeaders\"]].values[0])\n os_image = OpenSlide(\n row[parameters[\"headers\"][\"channelHeaders\"]].values[0]\n )\n level_width, level_height = os_image.level_dimensions[\n int(parameters[\"slide_level\"])\n ]\n subject_dest_dir = os.path.join(outputDir, subject_name)\n os.makedirs(subject_dest_dir, exist_ok=True)\n\n probs_map = np.zeros((level_height, level_width), dtype=np.float16)\n count_map = np.zeros((level_height, level_width), dtype=np.uint8)\n\n patient_dataset_obj = InferTumorSegDataset(\n row[parameters[\"headers\"][\"channelHeaders\"]].values[0],\n patch_size=patch_size,\n stride_size=parameters[\"stride_size\"],\n selected_level=parameters[\"slide_level\"],\n mask_level=4,\n )\n\n dataloader = DataLoader(\n patient_dataset_obj,\n batch_size=int(parameters[\"batch_size\"]),\n shuffle=False,\n num_workers=parameters[\"q_num_workers\"],\n )\n for image_patches, (x_coords, y_coords) in tqdm(dataloader):\n x_coords, y_coords = y_coords.numpy(), x_coords.numpy()\n if parameters[\"model\"][\"amp\"]:\n with autocast():\n output = model(\n image_patches.float().to(parameters[\"device\"])\n )\n else:\n output = model(image_patches.float().to(parameters[\"device\"]))\n output = output.detach().cpu().numpy()\n for i in range(int(output.shape[0])):\n count_map[\n x_coords[i] : x_coords[i] + patch_size[0],\n y_coords[i] : y_coords[i] + patch_size[1],\n ] += 1\n probs_map[\n x_coords[i] : x_coords[i] + patch_size[0],\n y_coords[i] : y_coords[i] + patch_size[1],\n ] += output[i][0]\n probs_map = probs_map / count_map\n count_map = count_map / count_map.max()\n out = count_map * probs_map\n count_map = np.array(count_map * 255, dtype=np.uint16)\n out_thresh = np.array((out > 0.5) * 255, dtype=np.uint16)\n imsave(\n os.path.join(\n subject_dest_dir,\n row[parameters[\"headers\"][\"subjectIDHeader\"]] + \"_prob.png\",\n ),\n out,\n )\n imsave(\n os.path.join(\n subject_dest_dir,\n row[parameters[\"headers\"][\"subjectIDHeader\"]] + \"_seg.png\",\n ),\n out_thresh,\n )\n imsave(\n os.path.join(\n subject_dest_dir,\n row[parameters[\"headers\"][\"subjectIDHeader\"]] + \"_count.png\",\n ),\n count_map,\n )\n else:\n print(\n \"ERROR: histo/path inference is Linux-only because openslide for Windows works only for Python-3.8, whereas pickle5 works only for 3.6 and 3.7\"\n )\n\n\nif __name__ == \"__main__\":\n\n # parse the cli arguments here\n parser = argparse.ArgumentParser(description=\"Inference Loop of GANDLF\")\n parser.add_argument(\n \"-inference_loader_pickle\",\n type=str,\n help=\"Inference loader pickle\",\n required=True,\n )\n parser.add_argument(\n \"-parameter_pickle\", type=str, help=\"Parameters pickle\", required=True\n )\n parser.add_argument(\n \"-headers_pickle\", type=str, help=\"Header pickle\", required=True\n )\n parser.add_argument(\"-outputDir\", type=str, help=\"Output directory\", required=True)\n parser.add_argument(\"-device\", type=str, help=\"Device to train on\", required=True)\n\n args = parser.parse_args()\n\n # # write parameters to pickle - this should not change for the different folds, so keeping is independent\n patch_size = pickle.load(open(args.patch_size_pickle, \"rb\"))\n headers = pickle.load(open(args.headers_pickle, \"rb\"))\n label_header = pickle.load(open(args.label_header_pickle, \"rb\"))\n parameters = pickle.load(open(args.parameter_pickle, \"rb\"))\n inferenceDataFromPickle = pd.read_pickle(args.inference_loader_pickle)\n\n inference_loop(\n inferenceDataFromPickle=inferenceDataFromPickle,\n parameters=parameters,\n outputDir=args.outputDir,\n device=args.device,\n )\n", "path": "GANDLF/compute/inference_loop.py"}]}
2,832
144
gh_patches_debug_15079
rasdani/github-patches
git_diff
python-telegram-bot__python-telegram-bot-1488
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Typo in PicklePersistence ctor arguments https://github.com/python-telegram-bot/python-telegram-bot/blob/2c92c356b8e3b07f20dcffa5b10fecc62b67e906/telegram/ext/picklepersistence.py#L59 `singe_file` should be `single_file`. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `telegram/ext/picklepersistence.py` Content: ``` 1 #!/usr/bin/env python 2 # 3 # A library that provides a Python interface to the Telegram Bot API 4 # Copyright (C) 2015-2018 5 # Leandro Toledo de Souza <[email protected]> 6 # 7 # This program is free software: you can redistribute it and/or modify 8 # it under the terms of the GNU Lesser Public License as published by 9 # the Free Software Foundation, either version 3 of the License, or 10 # (at your option) any later version. 11 # 12 # This program is distributed in the hope that it will be useful, 13 # but WITHOUT ANY WARRANTY; without even the implied warranty of 14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 # GNU Lesser Public License for more details. 16 # 17 # You should have received a copy of the GNU Lesser Public License 18 # along with this program. If not, see [http://www.gnu.org/licenses/]. 19 """This module contains the PicklePersistence class.""" 20 import pickle 21 from collections import defaultdict 22 from copy import deepcopy 23 24 from telegram.ext import BasePersistence 25 26 27 class PicklePersistence(BasePersistence): 28 """Using python's builtin pickle for making you bot persistent. 29 30 Attributes: 31 filename (:obj:`str`): The filename for storing the pickle files. When :attr:`single_file` 32 is false this will be used as a prefix. 33 store_user_data (:obj:`bool`): Optional. Whether user_data should be saved by this 34 persistence class. 35 store_chat_data (:obj:`bool`): Optional. Whether user_data should be saved by this 36 persistence class. 37 single_file (:obj:`bool`): Optional. When ``False`` will store 3 sperate files of 38 `filename_user_data`, `filename_chat_data` and `filename_conversations`. Default is 39 ``True``. 40 on_flush (:obj:`bool`, optional): When ``True`` will only save to file when :meth:`flush` 41 is called and keep data in memory until that happens. When ``False`` will store data 42 on any transaction *and* on call fo :meth:`flush`. Default is ``False``. 43 44 Args: 45 filename (:obj:`str`): The filename for storing the pickle files. When :attr:`single_file` 46 is false this will be used as a prefix. 47 store_user_data (:obj:`bool`, optional): Whether user_data should be saved by this 48 persistence class. Default is ``True``. 49 store_chat_data (:obj:`bool`, optional): Whether user_data should be saved by this 50 persistence class. Default is ``True``. 51 single_file (:obj:`bool`, optional): When ``False`` will store 3 sperate files of 52 `filename_user_data`, `filename_chat_data` and `filename_conversations`. Default is 53 ``True``. 54 on_flush (:obj:`bool`, optional): When ``True`` will only save to file when :meth:`flush` 55 is called and keep data in memory until that happens. When ``False`` will store data 56 on any transaction *and* on call fo :meth:`flush`. Default is ``False``. 57 """ 58 59 def __init__(self, filename, store_user_data=True, store_chat_data=True, singe_file=True, 60 on_flush=False): 61 self.filename = filename 62 self.store_user_data = store_user_data 63 self.store_chat_data = store_chat_data 64 self.single_file = singe_file 65 self.on_flush = on_flush 66 self.user_data = None 67 self.chat_data = None 68 self.conversations = None 69 70 def load_singlefile(self): 71 try: 72 filename = self.filename 73 with open(self.filename, "rb") as f: 74 all = pickle.load(f) 75 self.user_data = defaultdict(dict, all['user_data']) 76 self.chat_data = defaultdict(dict, all['chat_data']) 77 self.conversations = all['conversations'] 78 except IOError: 79 self.conversations = {} 80 self.user_data = defaultdict(dict) 81 self.chat_data = defaultdict(dict) 82 except pickle.UnpicklingError: 83 raise TypeError("File {} does not contain valid pickle data".format(filename)) 84 except Exception: 85 raise TypeError("Something went wrong unpickling {}".format(filename)) 86 87 def load_file(self, filename): 88 try: 89 with open(filename, "rb") as f: 90 return pickle.load(f) 91 except IOError: 92 return None 93 except pickle.UnpicklingError: 94 raise TypeError("File {} does not contain valid pickle data".format(filename)) 95 except Exception: 96 raise TypeError("Something went wrong unpickling {}".format(filename)) 97 98 def dump_singlefile(self): 99 with open(self.filename, "wb") as f: 100 all = {'conversations': self.conversations, 'user_data': self.user_data, 101 'chat_data': self.chat_data} 102 pickle.dump(all, f) 103 104 def dump_file(self, filename, data): 105 with open(filename, "wb") as f: 106 pickle.dump(data, f) 107 108 def get_user_data(self): 109 """Returns the user_data from the pickle file if it exsists or an empty defaultdict. 110 111 Returns: 112 :obj:`defaultdict`: The restored user data. 113 """ 114 if self.user_data: 115 pass 116 elif not self.single_file: 117 filename = "{}_user_data".format(self.filename) 118 data = self.load_file(filename) 119 if not data: 120 data = defaultdict(dict) 121 else: 122 data = defaultdict(dict, data) 123 self.user_data = data 124 else: 125 self.load_singlefile() 126 return deepcopy(self.user_data) 127 128 def get_chat_data(self): 129 """Returns the chat_data from the pickle file if it exsists or an empty defaultdict. 130 131 Returns: 132 :obj:`defaultdict`: The restored chat data. 133 """ 134 if self.chat_data: 135 pass 136 elif not self.single_file: 137 filename = "{}_chat_data".format(self.filename) 138 data = self.load_file(filename) 139 if not data: 140 data = defaultdict(dict) 141 else: 142 data = defaultdict(dict, data) 143 self.chat_data = data 144 else: 145 self.load_singlefile() 146 return deepcopy(self.chat_data) 147 148 def get_conversations(self, name): 149 """Returns the conversations from the pickle file if it exsists or an empty defaultdict. 150 151 Args: 152 name (:obj:`str`): The handlers name. 153 154 Returns: 155 :obj:`dict`: The restored conversations for the handler. 156 """ 157 if self.conversations: 158 pass 159 elif not self.single_file: 160 filename = "{}_conversations".format(self.filename) 161 data = self.load_file(filename) 162 if not data: 163 data = {name: {}} 164 self.conversations = data 165 else: 166 self.load_singlefile() 167 return self.conversations.get(name, {}).copy() 168 169 def update_conversation(self, name, key, new_state): 170 """Will update the conversations for the given handler and depending on :attr:`on_flush` 171 save the pickle file. 172 173 Args: 174 name (:obj:`str`): The handlers name. 175 key (:obj:`tuple`): The key the state is changed for. 176 new_state (:obj:`tuple` | :obj:`any`): The new state for the given key. 177 """ 178 if self.conversations.setdefault(name, {}).get(key) == new_state: 179 return 180 self.conversations[name][key] = new_state 181 if not self.on_flush: 182 if not self.single_file: 183 filename = "{}_conversations".format(self.filename) 184 self.dump_file(filename, self.conversations) 185 else: 186 self.dump_singlefile() 187 188 def update_user_data(self, user_id, data): 189 """Will update the user_data (if changed) and depending on :attr:`on_flush` save the 190 pickle file. 191 192 Args: 193 user_id (:obj:`int`): The user the data might have been changed for. 194 data (:obj:`dict`): The :attr:`telegram.ext.dispatcher.user_data` [user_id]. 195 """ 196 if self.user_data.get(user_id) == data: 197 return 198 self.user_data[user_id] = data 199 if not self.on_flush: 200 if not self.single_file: 201 filename = "{}_user_data".format(self.filename) 202 self.dump_file(filename, self.user_data) 203 else: 204 self.dump_singlefile() 205 206 def update_chat_data(self, chat_id, data): 207 """Will update the chat_data (if changed) and depending on :attr:`on_flush` save the 208 pickle file. 209 210 Args: 211 chat_id (:obj:`int`): The chat the data might have been changed for. 212 data (:obj:`dict`): The :attr:`telegram.ext.dispatcher.chat_data` [chat_id]. 213 """ 214 if self.chat_data.get(chat_id) == data: 215 return 216 self.chat_data[chat_id] = data 217 if not self.on_flush: 218 if not self.single_file: 219 filename = "{}_chat_data".format(self.filename) 220 self.dump_file(filename, self.chat_data) 221 else: 222 self.dump_singlefile() 223 224 def flush(self): 225 """ Will save all data in memory to pickle file(s). 226 """ 227 if self.single_file: 228 if self.user_data or self.chat_data or self.conversations: 229 self.dump_singlefile() 230 else: 231 if self.user_data: 232 self.dump_file("{}_user_data".format(self.filename), self.user_data) 233 if self.chat_data: 234 self.dump_file("{}_chat_data".format(self.filename), self.chat_data) 235 if self.conversations: 236 self.dump_file("{}_conversations".format(self.filename), self.conversations) 237 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/telegram/ext/picklepersistence.py b/telegram/ext/picklepersistence.py --- a/telegram/ext/picklepersistence.py +++ b/telegram/ext/picklepersistence.py @@ -56,12 +56,12 @@ on any transaction *and* on call fo :meth:`flush`. Default is ``False``. """ - def __init__(self, filename, store_user_data=True, store_chat_data=True, singe_file=True, + def __init__(self, filename, store_user_data=True, store_chat_data=True, single_file=True, on_flush=False): self.filename = filename self.store_user_data = store_user_data self.store_chat_data = store_chat_data - self.single_file = singe_file + self.single_file = single_file self.on_flush = on_flush self.user_data = None self.chat_data = None
{"golden_diff": "diff --git a/telegram/ext/picklepersistence.py b/telegram/ext/picklepersistence.py\n--- a/telegram/ext/picklepersistence.py\n+++ b/telegram/ext/picklepersistence.py\n@@ -56,12 +56,12 @@\n on any transaction *and* on call fo :meth:`flush`. Default is ``False``.\n \"\"\"\n \n- def __init__(self, filename, store_user_data=True, store_chat_data=True, singe_file=True,\n+ def __init__(self, filename, store_user_data=True, store_chat_data=True, single_file=True,\n on_flush=False):\n self.filename = filename\n self.store_user_data = store_user_data\n self.store_chat_data = store_chat_data\n- self.single_file = singe_file\n+ self.single_file = single_file\n self.on_flush = on_flush\n self.user_data = None\n self.chat_data = None\n", "issue": "Typo in PicklePersistence ctor arguments\nhttps://github.com/python-telegram-bot/python-telegram-bot/blob/2c92c356b8e3b07f20dcffa5b10fecc62b67e906/telegram/ext/picklepersistence.py#L59\r\n`singe_file` should be `single_file`.\n", "before_files": [{"content": "#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2018\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains the PicklePersistence class.\"\"\"\nimport pickle\nfrom collections import defaultdict\nfrom copy import deepcopy\n\nfrom telegram.ext import BasePersistence\n\n\nclass PicklePersistence(BasePersistence):\n \"\"\"Using python's builtin pickle for making you bot persistent.\n\n Attributes:\n filename (:obj:`str`): The filename for storing the pickle files. When :attr:`single_file`\n is false this will be used as a prefix.\n store_user_data (:obj:`bool`): Optional. Whether user_data should be saved by this\n persistence class.\n store_chat_data (:obj:`bool`): Optional. Whether user_data should be saved by this\n persistence class.\n single_file (:obj:`bool`): Optional. When ``False`` will store 3 sperate files of\n `filename_user_data`, `filename_chat_data` and `filename_conversations`. Default is\n ``True``.\n on_flush (:obj:`bool`, optional): When ``True`` will only save to file when :meth:`flush`\n is called and keep data in memory until that happens. When ``False`` will store data\n on any transaction *and* on call fo :meth:`flush`. Default is ``False``.\n\n Args:\n filename (:obj:`str`): The filename for storing the pickle files. When :attr:`single_file`\n is false this will be used as a prefix.\n store_user_data (:obj:`bool`, optional): Whether user_data should be saved by this\n persistence class. Default is ``True``.\n store_chat_data (:obj:`bool`, optional): Whether user_data should be saved by this\n persistence class. Default is ``True``.\n single_file (:obj:`bool`, optional): When ``False`` will store 3 sperate files of\n `filename_user_data`, `filename_chat_data` and `filename_conversations`. Default is\n ``True``.\n on_flush (:obj:`bool`, optional): When ``True`` will only save to file when :meth:`flush`\n is called and keep data in memory until that happens. When ``False`` will store data\n on any transaction *and* on call fo :meth:`flush`. Default is ``False``.\n \"\"\"\n\n def __init__(self, filename, store_user_data=True, store_chat_data=True, singe_file=True,\n on_flush=False):\n self.filename = filename\n self.store_user_data = store_user_data\n self.store_chat_data = store_chat_data\n self.single_file = singe_file\n self.on_flush = on_flush\n self.user_data = None\n self.chat_data = None\n self.conversations = None\n\n def load_singlefile(self):\n try:\n filename = self.filename\n with open(self.filename, \"rb\") as f:\n all = pickle.load(f)\n self.user_data = defaultdict(dict, all['user_data'])\n self.chat_data = defaultdict(dict, all['chat_data'])\n self.conversations = all['conversations']\n except IOError:\n self.conversations = {}\n self.user_data = defaultdict(dict)\n self.chat_data = defaultdict(dict)\n except pickle.UnpicklingError:\n raise TypeError(\"File {} does not contain valid pickle data\".format(filename))\n except Exception:\n raise TypeError(\"Something went wrong unpickling {}\".format(filename))\n\n def load_file(self, filename):\n try:\n with open(filename, \"rb\") as f:\n return pickle.load(f)\n except IOError:\n return None\n except pickle.UnpicklingError:\n raise TypeError(\"File {} does not contain valid pickle data\".format(filename))\n except Exception:\n raise TypeError(\"Something went wrong unpickling {}\".format(filename))\n\n def dump_singlefile(self):\n with open(self.filename, \"wb\") as f:\n all = {'conversations': self.conversations, 'user_data': self.user_data,\n 'chat_data': self.chat_data}\n pickle.dump(all, f)\n\n def dump_file(self, filename, data):\n with open(filename, \"wb\") as f:\n pickle.dump(data, f)\n\n def get_user_data(self):\n \"\"\"Returns the user_data from the pickle file if it exsists or an empty defaultdict.\n\n Returns:\n :obj:`defaultdict`: The restored user data.\n \"\"\"\n if self.user_data:\n pass\n elif not self.single_file:\n filename = \"{}_user_data\".format(self.filename)\n data = self.load_file(filename)\n if not data:\n data = defaultdict(dict)\n else:\n data = defaultdict(dict, data)\n self.user_data = data\n else:\n self.load_singlefile()\n return deepcopy(self.user_data)\n\n def get_chat_data(self):\n \"\"\"Returns the chat_data from the pickle file if it exsists or an empty defaultdict.\n\n Returns:\n :obj:`defaultdict`: The restored chat data.\n \"\"\"\n if self.chat_data:\n pass\n elif not self.single_file:\n filename = \"{}_chat_data\".format(self.filename)\n data = self.load_file(filename)\n if not data:\n data = defaultdict(dict)\n else:\n data = defaultdict(dict, data)\n self.chat_data = data\n else:\n self.load_singlefile()\n return deepcopy(self.chat_data)\n\n def get_conversations(self, name):\n \"\"\"Returns the conversations from the pickle file if it exsists or an empty defaultdict.\n\n Args:\n name (:obj:`str`): The handlers name.\n\n Returns:\n :obj:`dict`: The restored conversations for the handler.\n \"\"\"\n if self.conversations:\n pass\n elif not self.single_file:\n filename = \"{}_conversations\".format(self.filename)\n data = self.load_file(filename)\n if not data:\n data = {name: {}}\n self.conversations = data\n else:\n self.load_singlefile()\n return self.conversations.get(name, {}).copy()\n\n def update_conversation(self, name, key, new_state):\n \"\"\"Will update the conversations for the given handler and depending on :attr:`on_flush`\n save the pickle file.\n\n Args:\n name (:obj:`str`): The handlers name.\n key (:obj:`tuple`): The key the state is changed for.\n new_state (:obj:`tuple` | :obj:`any`): The new state for the given key.\n \"\"\"\n if self.conversations.setdefault(name, {}).get(key) == new_state:\n return\n self.conversations[name][key] = new_state\n if not self.on_flush:\n if not self.single_file:\n filename = \"{}_conversations\".format(self.filename)\n self.dump_file(filename, self.conversations)\n else:\n self.dump_singlefile()\n\n def update_user_data(self, user_id, data):\n \"\"\"Will update the user_data (if changed) and depending on :attr:`on_flush` save the\n pickle file.\n\n Args:\n user_id (:obj:`int`): The user the data might have been changed for.\n data (:obj:`dict`): The :attr:`telegram.ext.dispatcher.user_data` [user_id].\n \"\"\"\n if self.user_data.get(user_id) == data:\n return\n self.user_data[user_id] = data\n if not self.on_flush:\n if not self.single_file:\n filename = \"{}_user_data\".format(self.filename)\n self.dump_file(filename, self.user_data)\n else:\n self.dump_singlefile()\n\n def update_chat_data(self, chat_id, data):\n \"\"\"Will update the chat_data (if changed) and depending on :attr:`on_flush` save the\n pickle file.\n\n Args:\n chat_id (:obj:`int`): The chat the data might have been changed for.\n data (:obj:`dict`): The :attr:`telegram.ext.dispatcher.chat_data` [chat_id].\n \"\"\"\n if self.chat_data.get(chat_id) == data:\n return\n self.chat_data[chat_id] = data\n if not self.on_flush:\n if not self.single_file:\n filename = \"{}_chat_data\".format(self.filename)\n self.dump_file(filename, self.chat_data)\n else:\n self.dump_singlefile()\n\n def flush(self):\n \"\"\" Will save all data in memory to pickle file(s).\n \"\"\"\n if self.single_file:\n if self.user_data or self.chat_data or self.conversations:\n self.dump_singlefile()\n else:\n if self.user_data:\n self.dump_file(\"{}_user_data\".format(self.filename), self.user_data)\n if self.chat_data:\n self.dump_file(\"{}_chat_data\".format(self.filename), self.chat_data)\n if self.conversations:\n self.dump_file(\"{}_conversations\".format(self.filename), self.conversations)\n", "path": "telegram/ext/picklepersistence.py"}], "after_files": [{"content": "#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2018\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains the PicklePersistence class.\"\"\"\nimport pickle\nfrom collections import defaultdict\nfrom copy import deepcopy\n\nfrom telegram.ext import BasePersistence\n\n\nclass PicklePersistence(BasePersistence):\n \"\"\"Using python's builtin pickle for making you bot persistent.\n\n Attributes:\n filename (:obj:`str`): The filename for storing the pickle files. When :attr:`single_file`\n is false this will be used as a prefix.\n store_user_data (:obj:`bool`): Optional. Whether user_data should be saved by this\n persistence class.\n store_chat_data (:obj:`bool`): Optional. Whether user_data should be saved by this\n persistence class.\n single_file (:obj:`bool`): Optional. When ``False`` will store 3 sperate files of\n `filename_user_data`, `filename_chat_data` and `filename_conversations`. Default is\n ``True``.\n on_flush (:obj:`bool`, optional): When ``True`` will only save to file when :meth:`flush`\n is called and keep data in memory until that happens. When ``False`` will store data\n on any transaction *and* on call fo :meth:`flush`. Default is ``False``.\n\n Args:\n filename (:obj:`str`): The filename for storing the pickle files. When :attr:`single_file`\n is false this will be used as a prefix.\n store_user_data (:obj:`bool`, optional): Whether user_data should be saved by this\n persistence class. Default is ``True``.\n store_chat_data (:obj:`bool`, optional): Whether user_data should be saved by this\n persistence class. Default is ``True``.\n single_file (:obj:`bool`, optional): When ``False`` will store 3 sperate files of\n `filename_user_data`, `filename_chat_data` and `filename_conversations`. Default is\n ``True``.\n on_flush (:obj:`bool`, optional): When ``True`` will only save to file when :meth:`flush`\n is called and keep data in memory until that happens. When ``False`` will store data\n on any transaction *and* on call fo :meth:`flush`. Default is ``False``.\n \"\"\"\n\n def __init__(self, filename, store_user_data=True, store_chat_data=True, single_file=True,\n on_flush=False):\n self.filename = filename\n self.store_user_data = store_user_data\n self.store_chat_data = store_chat_data\n self.single_file = single_file\n self.on_flush = on_flush\n self.user_data = None\n self.chat_data = None\n self.conversations = None\n\n def load_singlefile(self):\n try:\n filename = self.filename\n with open(self.filename, \"rb\") as f:\n all = pickle.load(f)\n self.user_data = defaultdict(dict, all['user_data'])\n self.chat_data = defaultdict(dict, all['chat_data'])\n self.conversations = all['conversations']\n except IOError:\n self.conversations = {}\n self.user_data = defaultdict(dict)\n self.chat_data = defaultdict(dict)\n except pickle.UnpicklingError:\n raise TypeError(\"File {} does not contain valid pickle data\".format(filename))\n except Exception:\n raise TypeError(\"Something went wrong unpickling {}\".format(filename))\n\n def load_file(self, filename):\n try:\n with open(filename, \"rb\") as f:\n return pickle.load(f)\n except IOError:\n return None\n except pickle.UnpicklingError:\n raise TypeError(\"File {} does not contain valid pickle data\".format(filename))\n except Exception:\n raise TypeError(\"Something went wrong unpickling {}\".format(filename))\n\n def dump_singlefile(self):\n with open(self.filename, \"wb\") as f:\n all = {'conversations': self.conversations, 'user_data': self.user_data,\n 'chat_data': self.chat_data}\n pickle.dump(all, f)\n\n def dump_file(self, filename, data):\n with open(filename, \"wb\") as f:\n pickle.dump(data, f)\n\n def get_user_data(self):\n \"\"\"Returns the user_data from the pickle file if it exsists or an empty defaultdict.\n\n Returns:\n :obj:`defaultdict`: The restored user data.\n \"\"\"\n if self.user_data:\n pass\n elif not self.single_file:\n filename = \"{}_user_data\".format(self.filename)\n data = self.load_file(filename)\n if not data:\n data = defaultdict(dict)\n else:\n data = defaultdict(dict, data)\n self.user_data = data\n else:\n self.load_singlefile()\n return deepcopy(self.user_data)\n\n def get_chat_data(self):\n \"\"\"Returns the chat_data from the pickle file if it exsists or an empty defaultdict.\n\n Returns:\n :obj:`defaultdict`: The restored chat data.\n \"\"\"\n if self.chat_data:\n pass\n elif not self.single_file:\n filename = \"{}_chat_data\".format(self.filename)\n data = self.load_file(filename)\n if not data:\n data = defaultdict(dict)\n else:\n data = defaultdict(dict, data)\n self.chat_data = data\n else:\n self.load_singlefile()\n return deepcopy(self.chat_data)\n\n def get_conversations(self, name):\n \"\"\"Returns the conversations from the pickle file if it exsists or an empty defaultdict.\n\n Args:\n name (:obj:`str`): The handlers name.\n\n Returns:\n :obj:`dict`: The restored conversations for the handler.\n \"\"\"\n if self.conversations:\n pass\n elif not self.single_file:\n filename = \"{}_conversations\".format(self.filename)\n data = self.load_file(filename)\n if not data:\n data = {name: {}}\n self.conversations = data\n else:\n self.load_singlefile()\n return self.conversations.get(name, {}).copy()\n\n def update_conversation(self, name, key, new_state):\n \"\"\"Will update the conversations for the given handler and depending on :attr:`on_flush`\n save the pickle file.\n\n Args:\n name (:obj:`str`): The handlers name.\n key (:obj:`tuple`): The key the state is changed for.\n new_state (:obj:`tuple` | :obj:`any`): The new state for the given key.\n \"\"\"\n if self.conversations.setdefault(name, {}).get(key) == new_state:\n return\n self.conversations[name][key] = new_state\n if not self.on_flush:\n if not self.single_file:\n filename = \"{}_conversations\".format(self.filename)\n self.dump_file(filename, self.conversations)\n else:\n self.dump_singlefile()\n\n def update_user_data(self, user_id, data):\n \"\"\"Will update the user_data (if changed) and depending on :attr:`on_flush` save the\n pickle file.\n\n Args:\n user_id (:obj:`int`): The user the data might have been changed for.\n data (:obj:`dict`): The :attr:`telegram.ext.dispatcher.user_data` [user_id].\n \"\"\"\n if self.user_data.get(user_id) == data:\n return\n self.user_data[user_id] = data\n if not self.on_flush:\n if not self.single_file:\n filename = \"{}_user_data\".format(self.filename)\n self.dump_file(filename, self.user_data)\n else:\n self.dump_singlefile()\n\n def update_chat_data(self, chat_id, data):\n \"\"\"Will update the chat_data (if changed) and depending on :attr:`on_flush` save the\n pickle file.\n\n Args:\n chat_id (:obj:`int`): The chat the data might have been changed for.\n data (:obj:`dict`): The :attr:`telegram.ext.dispatcher.chat_data` [chat_id].\n \"\"\"\n if self.chat_data.get(chat_id) == data:\n return\n self.chat_data[chat_id] = data\n if not self.on_flush:\n if not self.single_file:\n filename = \"{}_chat_data\".format(self.filename)\n self.dump_file(filename, self.chat_data)\n else:\n self.dump_singlefile()\n\n def flush(self):\n \"\"\" Will save all data in memory to pickle file(s).\n \"\"\"\n if self.single_file:\n if self.user_data or self.chat_data or self.conversations:\n self.dump_singlefile()\n else:\n if self.user_data:\n self.dump_file(\"{}_user_data\".format(self.filename), self.user_data)\n if self.chat_data:\n self.dump_file(\"{}_chat_data\".format(self.filename), self.chat_data)\n if self.conversations:\n self.dump_file(\"{}_conversations\".format(self.filename), self.conversations)\n", "path": "telegram/ext/picklepersistence.py"}]}
3,010
203
gh_patches_debug_24490
rasdani/github-patches
git_diff
ansible__ansible-lint-3437
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- no-handler: should not react on when-conditions containing "and" or "or" <!--- Verify first that your issue is not already reported on GitHub --> <!--- Also test if the latest release and master branch are affected too --> ##### Summary Right now the rule `Tasks that run when changed should likely be handlers` (which BTW, i am a big fan of) would produce findings for all of this lines: `when: mytask.changed` `when: mytask is changed` ... `when: mytask is changed and wartherIsNice|bool` While i totally agree that the first two examples are bad practices and should produce a linter warning, i would not agree, that the last example should. ##### Proposed solution As mentioned in #419 i could imagine of splitting up E503 into two rules, one of which reacts to single conditions and one for more complex conditions involving `and` or `or` - that way both could be skipped/disabled seperately. As @ssbarnea pointed out, it might also be a solution to disable the check completeley for complex conditons. ##### Issue Type - Bug Report - ansible installation method: OS package - ansible-lint installation method: pip --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/ansiblelint/rules/no_handler.py` Content: ``` 1 # Copyright (c) 2016 Will Thames <[email protected]> 2 # 3 # Permission is hereby granted, free of charge, to any person obtaining a copy 4 # of this software and associated documentation files (the "Software"), to deal 5 # in the Software without restriction, including without limitation the rights 6 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 # copies of the Software, and to permit persons to whom the Software is 8 # furnished to do so, subject to the following conditions: 9 # 10 # The above copyright notice and this permission notice shall be included in 11 # all copies or substantial portions of the Software. 12 # 13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 # THE SOFTWARE. 20 21 """UseHandlerRatherThanWhenChangedRule used with ansible-lint.""" 22 from __future__ import annotations 23 24 import sys 25 from typing import TYPE_CHECKING 26 27 from ansiblelint.rules import AnsibleLintRule 28 29 if TYPE_CHECKING: 30 from ansiblelint.file_utils import Lintable 31 from ansiblelint.utils import Task 32 33 34 def _changed_in_when(item: str) -> bool: 35 if not isinstance(item, str): 36 return False 37 item_list = item.split() 38 39 if {"and", "not"} & set(item_list): 40 return False 41 return any( 42 changed in item 43 for changed in [ 44 ".changed", 45 "|changed", 46 '["changed"]', 47 "['changed']", 48 "is changed", 49 ] 50 ) 51 52 53 class UseHandlerRatherThanWhenChangedRule(AnsibleLintRule): 54 """Tasks that run when changed should likely be handlers.""" 55 56 id = "no-handler" 57 description = ( 58 "If a task has a ``when: result.changed`` setting, it is effectively " 59 "acting as a handler. You could use ``notify`` and move that task to " 60 "``handlers``." 61 ) 62 link = "https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_handlers.html#handlers" 63 severity = "MEDIUM" 64 tags = ["idiom"] 65 version_added = "historic" 66 67 def matchtask( 68 self, 69 task: Task, 70 file: Lintable | None = None, 71 ) -> bool | str: 72 if task["__ansible_action_type__"] != "task": 73 return False 74 75 when = task.get("when") 76 77 if isinstance(when, list): 78 for item in when: 79 if _changed_in_when(item): 80 return True 81 if isinstance(when, str): 82 return _changed_in_when(when) 83 return False 84 85 86 if "pytest" in sys.modules: 87 import pytest 88 89 from ansiblelint.rules import RulesCollection # pylint: disable=ungrouped-imports 90 from ansiblelint.runner import Runner # pylint: disable=ungrouped-imports 91 92 @pytest.mark.parametrize( 93 ("test_file", "failures"), 94 ( 95 pytest.param("examples/playbooks/no_handler_fail.yml", 7, id="fail"), 96 pytest.param("examples/playbooks/no_handler_pass.yml", 0, id="pass"), 97 ), 98 ) 99 def test_no_handler( 100 default_rules_collection: RulesCollection, 101 test_file: str, 102 failures: int, 103 ) -> None: 104 """Test rule matches.""" 105 results = Runner(test_file, rules=default_rules_collection).run() 106 assert len(results) == failures 107 for result in results: 108 assert result.tag == "no-handler" 109 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/ansiblelint/rules/no_handler.py b/src/ansiblelint/rules/no_handler.py --- a/src/ansiblelint/rules/no_handler.py +++ b/src/ansiblelint/rules/no_handler.py @@ -36,7 +36,7 @@ return False item_list = item.split() - if {"and", "not"} & set(item_list): + if {"and", "or", "not"} & set(item_list): return False return any( changed in item @@ -75,9 +75,9 @@ when = task.get("when") if isinstance(when, list): - for item in when: - if _changed_in_when(item): - return True + if len(when) > 1: + return False + return _changed_in_when(when[0]) if isinstance(when, str): return _changed_in_when(when) return False @@ -92,7 +92,7 @@ @pytest.mark.parametrize( ("test_file", "failures"), ( - pytest.param("examples/playbooks/no_handler_fail.yml", 7, id="fail"), + pytest.param("examples/playbooks/no_handler_fail.yml", 5, id="fail"), pytest.param("examples/playbooks/no_handler_pass.yml", 0, id="pass"), ), )
{"golden_diff": "diff --git a/src/ansiblelint/rules/no_handler.py b/src/ansiblelint/rules/no_handler.py\n--- a/src/ansiblelint/rules/no_handler.py\n+++ b/src/ansiblelint/rules/no_handler.py\n@@ -36,7 +36,7 @@\n return False\n item_list = item.split()\n \n- if {\"and\", \"not\"} & set(item_list):\n+ if {\"and\", \"or\", \"not\"} & set(item_list):\n return False\n return any(\n changed in item\n@@ -75,9 +75,9 @@\n when = task.get(\"when\")\n \n if isinstance(when, list):\n- for item in when:\n- if _changed_in_when(item):\n- return True\n+ if len(when) > 1:\n+ return False\n+ return _changed_in_when(when[0])\n if isinstance(when, str):\n return _changed_in_when(when)\n return False\n@@ -92,7 +92,7 @@\n @pytest.mark.parametrize(\n (\"test_file\", \"failures\"),\n (\n- pytest.param(\"examples/playbooks/no_handler_fail.yml\", 7, id=\"fail\"),\n+ pytest.param(\"examples/playbooks/no_handler_fail.yml\", 5, id=\"fail\"),\n pytest.param(\"examples/playbooks/no_handler_pass.yml\", 0, id=\"pass\"),\n ),\n )\n", "issue": "no-handler: should not react on when-conditions containing \"and\" or \"or\"\n<!--- Verify first that your issue is not already reported on GitHub -->\r\n<!--- Also test if the latest release and master branch are affected too -->\r\n\r\n##### Summary\r\nRight now the rule `Tasks that run when changed should likely be handlers` (which BTW, i am a big fan of) would produce findings for all of this lines:\r\n\r\n`when: mytask.changed`\r\n`when: mytask is changed`\r\n...\r\n`when: mytask is changed and wartherIsNice|bool`\r\n\r\nWhile i totally agree that the first two examples are bad practices and should produce a linter warning, i would not agree, that the last example should.\r\n\r\n##### Proposed solution\r\n\r\nAs mentioned in #419 i could imagine of splitting up E503 into two rules, one of which reacts to single conditions and one for more complex conditions involving `and` or `or` - that way both could be skipped/disabled seperately.\r\n\r\nAs @ssbarnea pointed out, it might also be a solution to disable the check completeley for complex conditons.\r\n\r\n##### Issue Type\r\n\r\n- Bug Report\r\n\r\n\r\n- ansible installation method: OS package\r\n- ansible-lint installation method: pip\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright (c) 2016 Will Thames <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n\"\"\"UseHandlerRatherThanWhenChangedRule used with ansible-lint.\"\"\"\nfrom __future__ import annotations\n\nimport sys\nfrom typing import TYPE_CHECKING\n\nfrom ansiblelint.rules import AnsibleLintRule\n\nif TYPE_CHECKING:\n from ansiblelint.file_utils import Lintable\n from ansiblelint.utils import Task\n\n\ndef _changed_in_when(item: str) -> bool:\n if not isinstance(item, str):\n return False\n item_list = item.split()\n\n if {\"and\", \"not\"} & set(item_list):\n return False\n return any(\n changed in item\n for changed in [\n \".changed\",\n \"|changed\",\n '[\"changed\"]',\n \"['changed']\",\n \"is changed\",\n ]\n )\n\n\nclass UseHandlerRatherThanWhenChangedRule(AnsibleLintRule):\n \"\"\"Tasks that run when changed should likely be handlers.\"\"\"\n\n id = \"no-handler\"\n description = (\n \"If a task has a ``when: result.changed`` setting, it is effectively \"\n \"acting as a handler. You could use ``notify`` and move that task to \"\n \"``handlers``.\"\n )\n link = \"https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_handlers.html#handlers\"\n severity = \"MEDIUM\"\n tags = [\"idiom\"]\n version_added = \"historic\"\n\n def matchtask(\n self,\n task: Task,\n file: Lintable | None = None,\n ) -> bool | str:\n if task[\"__ansible_action_type__\"] != \"task\":\n return False\n\n when = task.get(\"when\")\n\n if isinstance(when, list):\n for item in when:\n if _changed_in_when(item):\n return True\n if isinstance(when, str):\n return _changed_in_when(when)\n return False\n\n\nif \"pytest\" in sys.modules:\n import pytest\n\n from ansiblelint.rules import RulesCollection # pylint: disable=ungrouped-imports\n from ansiblelint.runner import Runner # pylint: disable=ungrouped-imports\n\n @pytest.mark.parametrize(\n (\"test_file\", \"failures\"),\n (\n pytest.param(\"examples/playbooks/no_handler_fail.yml\", 7, id=\"fail\"),\n pytest.param(\"examples/playbooks/no_handler_pass.yml\", 0, id=\"pass\"),\n ),\n )\n def test_no_handler(\n default_rules_collection: RulesCollection,\n test_file: str,\n failures: int,\n ) -> None:\n \"\"\"Test rule matches.\"\"\"\n results = Runner(test_file, rules=default_rules_collection).run()\n assert len(results) == failures\n for result in results:\n assert result.tag == \"no-handler\"\n", "path": "src/ansiblelint/rules/no_handler.py"}], "after_files": [{"content": "# Copyright (c) 2016 Will Thames <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n\"\"\"UseHandlerRatherThanWhenChangedRule used with ansible-lint.\"\"\"\nfrom __future__ import annotations\n\nimport sys\nfrom typing import TYPE_CHECKING\n\nfrom ansiblelint.rules import AnsibleLintRule\n\nif TYPE_CHECKING:\n from ansiblelint.file_utils import Lintable\n from ansiblelint.utils import Task\n\n\ndef _changed_in_when(item: str) -> bool:\n if not isinstance(item, str):\n return False\n item_list = item.split()\n\n if {\"and\", \"or\", \"not\"} & set(item_list):\n return False\n return any(\n changed in item\n for changed in [\n \".changed\",\n \"|changed\",\n '[\"changed\"]',\n \"['changed']\",\n \"is changed\",\n ]\n )\n\n\nclass UseHandlerRatherThanWhenChangedRule(AnsibleLintRule):\n \"\"\"Tasks that run when changed should likely be handlers.\"\"\"\n\n id = \"no-handler\"\n description = (\n \"If a task has a ``when: result.changed`` setting, it is effectively \"\n \"acting as a handler. You could use ``notify`` and move that task to \"\n \"``handlers``.\"\n )\n link = \"https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_handlers.html#handlers\"\n severity = \"MEDIUM\"\n tags = [\"idiom\"]\n version_added = \"historic\"\n\n def matchtask(\n self,\n task: Task,\n file: Lintable | None = None,\n ) -> bool | str:\n if task[\"__ansible_action_type__\"] != \"task\":\n return False\n\n when = task.get(\"when\")\n\n if isinstance(when, list):\n if len(when) > 1:\n return False\n return _changed_in_when(when[0])\n if isinstance(when, str):\n return _changed_in_when(when)\n return False\n\n\nif \"pytest\" in sys.modules:\n import pytest\n\n from ansiblelint.rules import RulesCollection # pylint: disable=ungrouped-imports\n from ansiblelint.runner import Runner # pylint: disable=ungrouped-imports\n\n @pytest.mark.parametrize(\n (\"test_file\", \"failures\"),\n (\n pytest.param(\"examples/playbooks/no_handler_fail.yml\", 5, id=\"fail\"),\n pytest.param(\"examples/playbooks/no_handler_pass.yml\", 0, id=\"pass\"),\n ),\n )\n def test_no_handler(\n default_rules_collection: RulesCollection,\n test_file: str,\n failures: int,\n ) -> None:\n \"\"\"Test rule matches.\"\"\"\n results = Runner(test_file, rules=default_rules_collection).run()\n assert len(results) == failures\n for result in results:\n assert result.tag == \"no-handler\"\n", "path": "src/ansiblelint/rules/no_handler.py"}]}
1,568
303
gh_patches_debug_6754
rasdani/github-patches
git_diff
strawberry-graphql__strawberry-2455
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- ContrainedFloatValue from pydantic needs support <!-- Provide a general summary of the bug in the title above. --> <!--- This template is entirely optional and can be removed, but is here to help both you and us. --> <!--- Anything on lines wrapped in comments like these will not show up in the final text. --> ## Describe the Bug <!-- A clear and concise description of what the bug is. --> I am trying to import the below into a strawberry type ``` class coordinates(BaseModel): latitude: float= Field(...,gt=-90,lt=90) longitude: float= Field(...,gt=-180,lt=180) accuracy: int | None = Field(None, gt=50, lt=100) ``` However, I run into this error: TypeError: Coordinates fields cannot be resolved. Unexpected type '<class 'schema.ConstrainedFloatValue'>' If, I change `latitude: float= Field(...,gt=-90,lt=90)` into `latitude: int= Field(...,gt=-90,lt=90)` Then importing using the below works: ``` @strawberry.experimental.pydantic.type(model=coordinates) class Coordinates: """ Class that takes in coordinates from GeoLocation Provider in front-end """ latitude: strawberry.auto longitude: strawberry.auto accuracy: strawberry.auto timestamp: Date ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `strawberry/experimental/pydantic/fields.py` Content: ``` 1 import builtins 2 from decimal import Decimal 3 from typing import Any, List, Optional, Type 4 from uuid import UUID 5 6 import pydantic 7 from pydantic import BaseModel 8 from pydantic.typing import get_args, get_origin, is_new_type, new_type_supertype 9 from pydantic.utils import lenient_issubclass 10 11 from strawberry.experimental.pydantic.exceptions import ( 12 UnregisteredTypeException, 13 UnsupportedTypeError, 14 ) 15 from strawberry.types.types import TypeDefinition 16 17 try: 18 from typing import GenericAlias as TypingGenericAlias # type: ignore 19 except ImportError: 20 # python < 3.9 does not have GenericAlias (list[int], tuple[str, ...] and so on) 21 TypingGenericAlias = () 22 23 24 ATTR_TO_TYPE_MAP = { 25 "NoneStr": Optional[str], 26 "NoneBytes": Optional[bytes], 27 "StrBytes": None, 28 "NoneStrBytes": None, 29 "StrictStr": str, 30 "ConstrainedBytes": bytes, 31 "conbytes": bytes, 32 "ConstrainedStr": str, 33 "constr": str, 34 "EmailStr": str, 35 "PyObject": None, 36 "ConstrainedInt": int, 37 "conint": int, 38 "PositiveInt": int, 39 "NegativeInt": int, 40 "ConstrainedFloat": float, 41 "confloat": float, 42 "PositiveFloat": float, 43 "NegativeFloat": float, 44 "ConstrainedDecimal": Decimal, 45 "condecimal": Decimal, 46 "UUID1": UUID, 47 "UUID3": UUID, 48 "UUID4": UUID, 49 "UUID5": UUID, 50 "FilePath": None, 51 "DirectoryPath": None, 52 "Json": None, 53 "JsonWrapper": None, 54 "SecretStr": str, 55 "SecretBytes": bytes, 56 "StrictBool": bool, 57 "StrictInt": int, 58 "StrictFloat": float, 59 "PaymentCardNumber": None, 60 "ByteSize": None, 61 "AnyUrl": str, 62 "AnyHttpUrl": str, 63 "HttpUrl": str, 64 "PostgresDsn": str, 65 "RedisDsn": str, 66 } 67 68 69 FIELDS_MAP = { 70 getattr(pydantic, field_name): type 71 for field_name, type in ATTR_TO_TYPE_MAP.items() 72 if hasattr(pydantic, field_name) 73 } 74 75 76 def get_basic_type(type_) -> Type[Any]: 77 if lenient_issubclass(type_, pydantic.ConstrainedInt): 78 return int 79 if lenient_issubclass(type_, pydantic.ConstrainedStr): 80 return str 81 if lenient_issubclass(type_, pydantic.ConstrainedList): 82 return List[get_basic_type(type_.item_type)] # type: ignore 83 84 if type_ in FIELDS_MAP: 85 type_ = FIELDS_MAP.get(type_) 86 87 if type_ is None: 88 raise UnsupportedTypeError() 89 90 if is_new_type(type_): 91 return new_type_supertype(type_) 92 93 return type_ 94 95 96 def replace_pydantic_types(type_: Any, is_input: bool): 97 if lenient_issubclass(type_, BaseModel): 98 attr = "_strawberry_input_type" if is_input else "_strawberry_type" 99 if hasattr(type_, attr): 100 return getattr(type_, attr) 101 else: 102 raise UnregisteredTypeException(type_) 103 return type_ 104 105 106 def replace_types_recursively(type_: Any, is_input: bool) -> Any: 107 """Runs the conversions recursively into the arguments of generic types if any""" 108 basic_type = get_basic_type(type_) 109 replaced_type = replace_pydantic_types(basic_type, is_input) 110 111 origin = get_origin(type_) 112 if not origin or not hasattr(type_, "__args__"): 113 return replaced_type 114 115 converted = tuple( 116 replace_types_recursively(t, is_input=is_input) for t in get_args(replaced_type) 117 ) 118 119 if isinstance(replaced_type, TypingGenericAlias): 120 return TypingGenericAlias(origin, converted) 121 122 replaced_type = replaced_type.copy_with(converted) 123 124 if isinstance(replaced_type, TypeDefinition): 125 # TODO: Not sure if this is necessary. No coverage in tests 126 # TODO: Unnecessary with StrawberryObject 127 replaced_type = builtins.type( 128 replaced_type.name, 129 (), 130 {"_type_definition": replaced_type}, 131 ) 132 133 return replaced_type 134 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/strawberry/experimental/pydantic/fields.py b/strawberry/experimental/pydantic/fields.py --- a/strawberry/experimental/pydantic/fields.py +++ b/strawberry/experimental/pydantic/fields.py @@ -76,6 +76,8 @@ def get_basic_type(type_) -> Type[Any]: if lenient_issubclass(type_, pydantic.ConstrainedInt): return int + if lenient_issubclass(type_, pydantic.ConstrainedFloat): + return float if lenient_issubclass(type_, pydantic.ConstrainedStr): return str if lenient_issubclass(type_, pydantic.ConstrainedList):
{"golden_diff": "diff --git a/strawberry/experimental/pydantic/fields.py b/strawberry/experimental/pydantic/fields.py\n--- a/strawberry/experimental/pydantic/fields.py\n+++ b/strawberry/experimental/pydantic/fields.py\n@@ -76,6 +76,8 @@\n def get_basic_type(type_) -> Type[Any]:\n if lenient_issubclass(type_, pydantic.ConstrainedInt):\n return int\n+ if lenient_issubclass(type_, pydantic.ConstrainedFloat):\n+ return float\n if lenient_issubclass(type_, pydantic.ConstrainedStr):\n return str\n if lenient_issubclass(type_, pydantic.ConstrainedList):\n", "issue": "ContrainedFloatValue from pydantic needs support\n<!-- Provide a general summary of the bug in the title above. -->\r\n\r\n<!--- This template is entirely optional and can be removed, but is here to help both you and us. -->\r\n<!--- Anything on lines wrapped in comments like these will not show up in the final text. -->\r\n\r\n## Describe the Bug\r\n\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\nI am trying to import the below into a strawberry type\r\n\r\n```\r\nclass coordinates(BaseModel):\r\n latitude: float= Field(...,gt=-90,lt=90)\r\n longitude: float= Field(...,gt=-180,lt=180)\r\n accuracy: int | None = Field(None, gt=50, lt=100)\r\n```\r\n\r\nHowever, I run into this error:\r\n\r\nTypeError: Coordinates fields cannot be resolved. Unexpected type '<class 'schema.ConstrainedFloatValue'>'\r\n\r\nIf, I change `latitude: float= Field(...,gt=-90,lt=90)` into `latitude: int= Field(...,gt=-90,lt=90)`\r\n\r\nThen importing using the below works:\r\n\r\n```\r\[email protected](model=coordinates)\r\nclass Coordinates:\r\n \"\"\" \r\n Class that takes in coordinates from GeoLocation Provider in front-end\r\n \"\"\" \r\n latitude: strawberry.auto\r\n longitude: strawberry.auto\r\n accuracy: strawberry.auto\r\n timestamp: Date\r\n```\r\n\r\n\n", "before_files": [{"content": "import builtins\nfrom decimal import Decimal\nfrom typing import Any, List, Optional, Type\nfrom uuid import UUID\n\nimport pydantic\nfrom pydantic import BaseModel\nfrom pydantic.typing import get_args, get_origin, is_new_type, new_type_supertype\nfrom pydantic.utils import lenient_issubclass\n\nfrom strawberry.experimental.pydantic.exceptions import (\n UnregisteredTypeException,\n UnsupportedTypeError,\n)\nfrom strawberry.types.types import TypeDefinition\n\ntry:\n from typing import GenericAlias as TypingGenericAlias # type: ignore\nexcept ImportError:\n # python < 3.9 does not have GenericAlias (list[int], tuple[str, ...] and so on)\n TypingGenericAlias = ()\n\n\nATTR_TO_TYPE_MAP = {\n \"NoneStr\": Optional[str],\n \"NoneBytes\": Optional[bytes],\n \"StrBytes\": None,\n \"NoneStrBytes\": None,\n \"StrictStr\": str,\n \"ConstrainedBytes\": bytes,\n \"conbytes\": bytes,\n \"ConstrainedStr\": str,\n \"constr\": str,\n \"EmailStr\": str,\n \"PyObject\": None,\n \"ConstrainedInt\": int,\n \"conint\": int,\n \"PositiveInt\": int,\n \"NegativeInt\": int,\n \"ConstrainedFloat\": float,\n \"confloat\": float,\n \"PositiveFloat\": float,\n \"NegativeFloat\": float,\n \"ConstrainedDecimal\": Decimal,\n \"condecimal\": Decimal,\n \"UUID1\": UUID,\n \"UUID3\": UUID,\n \"UUID4\": UUID,\n \"UUID5\": UUID,\n \"FilePath\": None,\n \"DirectoryPath\": None,\n \"Json\": None,\n \"JsonWrapper\": None,\n \"SecretStr\": str,\n \"SecretBytes\": bytes,\n \"StrictBool\": bool,\n \"StrictInt\": int,\n \"StrictFloat\": float,\n \"PaymentCardNumber\": None,\n \"ByteSize\": None,\n \"AnyUrl\": str,\n \"AnyHttpUrl\": str,\n \"HttpUrl\": str,\n \"PostgresDsn\": str,\n \"RedisDsn\": str,\n}\n\n\nFIELDS_MAP = {\n getattr(pydantic, field_name): type\n for field_name, type in ATTR_TO_TYPE_MAP.items()\n if hasattr(pydantic, field_name)\n}\n\n\ndef get_basic_type(type_) -> Type[Any]:\n if lenient_issubclass(type_, pydantic.ConstrainedInt):\n return int\n if lenient_issubclass(type_, pydantic.ConstrainedStr):\n return str\n if lenient_issubclass(type_, pydantic.ConstrainedList):\n return List[get_basic_type(type_.item_type)] # type: ignore\n\n if type_ in FIELDS_MAP:\n type_ = FIELDS_MAP.get(type_)\n\n if type_ is None:\n raise UnsupportedTypeError()\n\n if is_new_type(type_):\n return new_type_supertype(type_)\n\n return type_\n\n\ndef replace_pydantic_types(type_: Any, is_input: bool):\n if lenient_issubclass(type_, BaseModel):\n attr = \"_strawberry_input_type\" if is_input else \"_strawberry_type\"\n if hasattr(type_, attr):\n return getattr(type_, attr)\n else:\n raise UnregisteredTypeException(type_)\n return type_\n\n\ndef replace_types_recursively(type_: Any, is_input: bool) -> Any:\n \"\"\"Runs the conversions recursively into the arguments of generic types if any\"\"\"\n basic_type = get_basic_type(type_)\n replaced_type = replace_pydantic_types(basic_type, is_input)\n\n origin = get_origin(type_)\n if not origin or not hasattr(type_, \"__args__\"):\n return replaced_type\n\n converted = tuple(\n replace_types_recursively(t, is_input=is_input) for t in get_args(replaced_type)\n )\n\n if isinstance(replaced_type, TypingGenericAlias):\n return TypingGenericAlias(origin, converted)\n\n replaced_type = replaced_type.copy_with(converted)\n\n if isinstance(replaced_type, TypeDefinition):\n # TODO: Not sure if this is necessary. No coverage in tests\n # TODO: Unnecessary with StrawberryObject\n replaced_type = builtins.type(\n replaced_type.name,\n (),\n {\"_type_definition\": replaced_type},\n )\n\n return replaced_type\n", "path": "strawberry/experimental/pydantic/fields.py"}], "after_files": [{"content": "import builtins\nfrom decimal import Decimal\nfrom typing import Any, List, Optional, Type\nfrom uuid import UUID\n\nimport pydantic\nfrom pydantic import BaseModel\nfrom pydantic.typing import get_args, get_origin, is_new_type, new_type_supertype\nfrom pydantic.utils import lenient_issubclass\n\nfrom strawberry.experimental.pydantic.exceptions import (\n UnregisteredTypeException,\n UnsupportedTypeError,\n)\nfrom strawberry.types.types import TypeDefinition\n\ntry:\n from typing import GenericAlias as TypingGenericAlias # type: ignore\nexcept ImportError:\n # python < 3.9 does not have GenericAlias (list[int], tuple[str, ...] and so on)\n TypingGenericAlias = ()\n\n\nATTR_TO_TYPE_MAP = {\n \"NoneStr\": Optional[str],\n \"NoneBytes\": Optional[bytes],\n \"StrBytes\": None,\n \"NoneStrBytes\": None,\n \"StrictStr\": str,\n \"ConstrainedBytes\": bytes,\n \"conbytes\": bytes,\n \"ConstrainedStr\": str,\n \"constr\": str,\n \"EmailStr\": str,\n \"PyObject\": None,\n \"ConstrainedInt\": int,\n \"conint\": int,\n \"PositiveInt\": int,\n \"NegativeInt\": int,\n \"ConstrainedFloat\": float,\n \"confloat\": float,\n \"PositiveFloat\": float,\n \"NegativeFloat\": float,\n \"ConstrainedDecimal\": Decimal,\n \"condecimal\": Decimal,\n \"UUID1\": UUID,\n \"UUID3\": UUID,\n \"UUID4\": UUID,\n \"UUID5\": UUID,\n \"FilePath\": None,\n \"DirectoryPath\": None,\n \"Json\": None,\n \"JsonWrapper\": None,\n \"SecretStr\": str,\n \"SecretBytes\": bytes,\n \"StrictBool\": bool,\n \"StrictInt\": int,\n \"StrictFloat\": float,\n \"PaymentCardNumber\": None,\n \"ByteSize\": None,\n \"AnyUrl\": str,\n \"AnyHttpUrl\": str,\n \"HttpUrl\": str,\n \"PostgresDsn\": str,\n \"RedisDsn\": str,\n}\n\n\nFIELDS_MAP = {\n getattr(pydantic, field_name): type\n for field_name, type in ATTR_TO_TYPE_MAP.items()\n if hasattr(pydantic, field_name)\n}\n\n\ndef get_basic_type(type_) -> Type[Any]:\n if lenient_issubclass(type_, pydantic.ConstrainedInt):\n return int\n if lenient_issubclass(type_, pydantic.ConstrainedFloat):\n return float\n if lenient_issubclass(type_, pydantic.ConstrainedStr):\n return str\n if lenient_issubclass(type_, pydantic.ConstrainedList):\n return List[get_basic_type(type_.item_type)] # type: ignore\n\n if type_ in FIELDS_MAP:\n type_ = FIELDS_MAP.get(type_)\n\n if type_ is None:\n raise UnsupportedTypeError()\n\n if is_new_type(type_):\n return new_type_supertype(type_)\n\n return type_\n\n\ndef replace_pydantic_types(type_: Any, is_input: bool):\n if lenient_issubclass(type_, BaseModel):\n attr = \"_strawberry_input_type\" if is_input else \"_strawberry_type\"\n if hasattr(type_, attr):\n return getattr(type_, attr)\n else:\n raise UnregisteredTypeException(type_)\n return type_\n\n\ndef replace_types_recursively(type_: Any, is_input: bool) -> Any:\n \"\"\"Runs the conversions recursively into the arguments of generic types if any\"\"\"\n basic_type = get_basic_type(type_)\n replaced_type = replace_pydantic_types(basic_type, is_input)\n\n origin = get_origin(type_)\n if not origin or not hasattr(type_, \"__args__\"):\n return replaced_type\n\n converted = tuple(\n replace_types_recursively(t, is_input=is_input) for t in get_args(replaced_type)\n )\n\n if isinstance(replaced_type, TypingGenericAlias):\n return TypingGenericAlias(origin, converted)\n\n replaced_type = replaced_type.copy_with(converted)\n\n if isinstance(replaced_type, TypeDefinition):\n # TODO: Not sure if this is necessary. No coverage in tests\n # TODO: Unnecessary with StrawberryObject\n replaced_type = builtins.type(\n replaced_type.name,\n (),\n {\"_type_definition\": replaced_type},\n )\n\n return replaced_type\n", "path": "strawberry/experimental/pydantic/fields.py"}]}
1,815
164
gh_patches_debug_7454
rasdani/github-patches
git_diff
alltheplaces__alltheplaces-3333
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Spider target_au is broken During the global build at 2021-05-26-14-42-23, spider **target_au** failed with **0 features** and **16 errors**. Here's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/target_au.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/target_au.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/target_au.geojson)) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `locations/spiders/target_au.py` Content: ``` 1 import scrapy 2 3 from locations.hours import OpeningHours 4 from locations.items import GeojsonPointItem 5 6 7 class TargetAUSpider(scrapy.Spider): 8 name = "target_au" 9 item_attributes = { 'brand': "Target", 'brand_wikidata': "Q7685854" } 10 allowed_domains = ["target.com.au"] 11 states = ["nsw","vic","qld","nt", "act", "sa", "tas", "wa"] 12 headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:81.0) Gecko/20100101 Firefox/81.0", 13 "Referer": "https://www.target.com.au/store-finder"} 14 15 custom_settings = {'DOWNLOAD_DELAY' : 0.5,} 16 17 def start_requests(self): 18 url = "https://www.target.com.au/store-finder/state/{}" 19 for state in self.states: 20 yield scrapy.Request(url.format(state),headers=self.headers, callback=self.parse) 21 22 23 def parse(self, response): 24 store_links = response.xpath('//a[@class="table-tap-canonical"]/@href').getall() 25 for link in store_links: 26 yield scrapy.Request(response.urljoin(link), callback=self.parse_store, headers=self.headers) 27 28 def _parse_hour_str(self, hour_string): 29 time_, am_pm = tuple(hour_string.split(" ")) 30 hour, min = tuple(time_.split(":")) 31 hour = int(hour) 32 if am_pm == "PM": 33 hour += 12 34 return f"{hour}:{min}" 35 36 def parse_hours(self, hours_node): 37 opening_hours = OpeningHours() 38 days = hours_node.xpath(".//dt/text()").getall() 39 hours = hours_node.xpath(".//dd/text()").getall() 40 for idx, day in enumerate(days): 41 store_hours = hours[idx] 42 if "–" not in store_hours or ":" not in store_hours: 43 continue 44 parts = store_hours.strip().split(" – ") 45 open_time = self._parse_hour_str(parts[0]) 46 close_time = self._parse_hour_str(parts[1]) 47 opening_hours.add_range(day[0:2], open_time, close_time) 48 49 return opening_hours.as_opening_hours() 50 51 52 53 def parse_store(self, response): 54 store_name = response.xpath("//h4/text()").get().replace("Target – ","") 55 address_header = response.xpath("//span[@itemprop='streetAddress']/strong/text()").get() 56 address = " ".join(response.xpath("//span[@itemprop='streetAddress']/text()").getall()).strip() 57 if address_header: 58 address = address_header + " " + address 59 locality = response.xpath("//span[@itemprop='addressLocality']/text()").get() 60 region = response.xpath("//span[@itemprop='addressRegion']/text()").get() 61 post_code = response.xpath("//span[@itemprop='postalCode']/text()").get() 62 phone_number = response.xpath("//span[@itemprop='telephone']/text()").get() 63 hours_section = response.xpath("(//dl)[1]")[0] 64 opening_hours = self.parse_hours(hours_section) 65 lat = response.xpath("//div[@data-embedded-json='store-content-data']//@data-lat").get() 66 lon = response.xpath("//div[@data-embedded-json='store-content-data']//@data-lng").get() 67 68 yield GeojsonPointItem(lat=lat, 69 lon=lon, 70 name=store_name, 71 addr_full=address, 72 city=locality, 73 state=region, 74 postcode=post_code, 75 country="AU", 76 phone=phone_number, 77 website=response.url, 78 opening_hours=opening_hours, 79 ref=response.url.split("/")[-1]) 80 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/locations/spiders/target_au.py b/locations/spiders/target_au.py --- a/locations/spiders/target_au.py +++ b/locations/spiders/target_au.py @@ -26,6 +26,8 @@ yield scrapy.Request(response.urljoin(link), callback=self.parse_store, headers=self.headers) def _parse_hour_str(self, hour_string): + if hour_string == "Midnight": + return self._parse_hour_str("12:00 AM") time_, am_pm = tuple(hour_string.split(" ")) hour, min = tuple(time_.split(":")) hour = int(hour)
{"golden_diff": "diff --git a/locations/spiders/target_au.py b/locations/spiders/target_au.py\n--- a/locations/spiders/target_au.py\n+++ b/locations/spiders/target_au.py\n@@ -26,6 +26,8 @@\n yield scrapy.Request(response.urljoin(link), callback=self.parse_store, headers=self.headers)\n \n def _parse_hour_str(self, hour_string):\n+ if hour_string == \"Midnight\":\n+ return self._parse_hour_str(\"12:00 AM\")\n time_, am_pm = tuple(hour_string.split(\" \"))\n hour, min = tuple(time_.split(\":\"))\n hour = int(hour)\n", "issue": "Spider target_au is broken\nDuring the global build at 2021-05-26-14-42-23, spider **target_au** failed with **0 features** and **16 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/logs/target_au.log) and [the output](https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/target_au.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-05-26-14-42-23/output/target_au.geojson))\n", "before_files": [{"content": "import scrapy\n\nfrom locations.hours import OpeningHours\nfrom locations.items import GeojsonPointItem\n\n\nclass TargetAUSpider(scrapy.Spider):\n name = \"target_au\"\n item_attributes = { 'brand': \"Target\", 'brand_wikidata': \"Q7685854\" }\n allowed_domains = [\"target.com.au\"]\n states = [\"nsw\",\"vic\",\"qld\",\"nt\", \"act\", \"sa\", \"tas\", \"wa\"]\n headers = {\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:81.0) Gecko/20100101 Firefox/81.0\",\n \"Referer\": \"https://www.target.com.au/store-finder\"}\n\n custom_settings = {'DOWNLOAD_DELAY' : 0.5,}\n\n def start_requests(self):\n url = \"https://www.target.com.au/store-finder/state/{}\"\n for state in self.states:\n yield scrapy.Request(url.format(state),headers=self.headers, callback=self.parse)\n\n\n def parse(self, response):\n store_links = response.xpath('//a[@class=\"table-tap-canonical\"]/@href').getall()\n for link in store_links:\n yield scrapy.Request(response.urljoin(link), callback=self.parse_store, headers=self.headers)\n\n def _parse_hour_str(self, hour_string):\n time_, am_pm = tuple(hour_string.split(\" \"))\n hour, min = tuple(time_.split(\":\"))\n hour = int(hour)\n if am_pm == \"PM\":\n hour += 12\n return f\"{hour}:{min}\"\n\n def parse_hours(self, hours_node):\n opening_hours = OpeningHours()\n days = hours_node.xpath(\".//dt/text()\").getall()\n hours = hours_node.xpath(\".//dd/text()\").getall()\n for idx, day in enumerate(days):\n store_hours = hours[idx]\n if \"\u2013\" not in store_hours or \":\" not in store_hours:\n continue\n parts = store_hours.strip().split(\" \u2013 \")\n open_time = self._parse_hour_str(parts[0])\n close_time = self._parse_hour_str(parts[1])\n opening_hours.add_range(day[0:2], open_time, close_time)\n \n return opening_hours.as_opening_hours()\n\n\n\n def parse_store(self, response):\n store_name = response.xpath(\"//h4/text()\").get().replace(\"Target \u2013 \",\"\")\n address_header = response.xpath(\"//span[@itemprop='streetAddress']/strong/text()\").get()\n address = \" \".join(response.xpath(\"//span[@itemprop='streetAddress']/text()\").getall()).strip()\n if address_header:\n address = address_header + \" \" + address\n locality = response.xpath(\"//span[@itemprop='addressLocality']/text()\").get()\n region = response.xpath(\"//span[@itemprop='addressRegion']/text()\").get()\n post_code = response.xpath(\"//span[@itemprop='postalCode']/text()\").get()\n phone_number = response.xpath(\"//span[@itemprop='telephone']/text()\").get()\n hours_section = response.xpath(\"(//dl)[1]\")[0]\n opening_hours = self.parse_hours(hours_section)\n lat = response.xpath(\"//div[@data-embedded-json='store-content-data']//@data-lat\").get()\n lon = response.xpath(\"//div[@data-embedded-json='store-content-data']//@data-lng\").get()\n\n yield GeojsonPointItem(lat=lat,\n lon=lon,\n name=store_name,\n addr_full=address,\n city=locality,\n state=region,\n postcode=post_code,\n country=\"AU\",\n phone=phone_number,\n website=response.url,\n opening_hours=opening_hours,\n ref=response.url.split(\"/\")[-1]) \n", "path": "locations/spiders/target_au.py"}], "after_files": [{"content": "import scrapy\n\nfrom locations.hours import OpeningHours\nfrom locations.items import GeojsonPointItem\n\n\nclass TargetAUSpider(scrapy.Spider):\n name = \"target_au\"\n item_attributes = { 'brand': \"Target\", 'brand_wikidata': \"Q7685854\" }\n allowed_domains = [\"target.com.au\"]\n states = [\"nsw\",\"vic\",\"qld\",\"nt\", \"act\", \"sa\", \"tas\", \"wa\"]\n headers = {\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:81.0) Gecko/20100101 Firefox/81.0\",\n \"Referer\": \"https://www.target.com.au/store-finder\"}\n\n custom_settings = {'DOWNLOAD_DELAY' : 0.5,}\n\n def start_requests(self):\n url = \"https://www.target.com.au/store-finder/state/{}\"\n for state in self.states:\n yield scrapy.Request(url.format(state),headers=self.headers, callback=self.parse)\n\n\n def parse(self, response):\n store_links = response.xpath('//a[@class=\"table-tap-canonical\"]/@href').getall()\n for link in store_links:\n yield scrapy.Request(response.urljoin(link), callback=self.parse_store, headers=self.headers)\n\n def _parse_hour_str(self, hour_string):\n if hour_string == \"Midnight\":\n return self._parse_hour_str(\"12:00 AM\")\n time_, am_pm = tuple(hour_string.split(\" \"))\n hour, min = tuple(time_.split(\":\"))\n hour = int(hour)\n if am_pm == \"PM\":\n hour += 12\n return f\"{hour}:{min}\"\n\n def parse_hours(self, hours_node):\n opening_hours = OpeningHours()\n days = hours_node.xpath(\".//dt/text()\").getall()\n hours = hours_node.xpath(\".//dd/text()\").getall()\n for idx, day in enumerate(days):\n store_hours = hours[idx]\n if \"\u2013\" not in store_hours or \":\" not in store_hours:\n continue\n parts = store_hours.strip().split(\" \u2013 \")\n open_time = self._parse_hour_str(parts[0])\n close_time = self._parse_hour_str(parts[1])\n opening_hours.add_range(day[0:2], open_time, close_time)\n \n return opening_hours.as_opening_hours()\n\n\n\n def parse_store(self, response):\n store_name = response.xpath(\"//h4/text()\").get().replace(\"Target \u2013 \",\"\")\n address_header = response.xpath(\"//span[@itemprop='streetAddress']/strong/text()\").get()\n address = \" \".join(response.xpath(\"//span[@itemprop='streetAddress']/text()\").getall()).strip()\n if address_header:\n address = address_header + \" \" + address\n locality = response.xpath(\"//span[@itemprop='addressLocality']/text()\").get()\n region = response.xpath(\"//span[@itemprop='addressRegion']/text()\").get()\n post_code = response.xpath(\"//span[@itemprop='postalCode']/text()\").get()\n phone_number = response.xpath(\"//span[@itemprop='telephone']/text()\").get()\n hours_section = response.xpath(\"(//dl)[1]\")[0]\n opening_hours = self.parse_hours(hours_section)\n lat = response.xpath(\"//div[@data-embedded-json='store-content-data']//@data-lat\").get()\n lon = response.xpath(\"//div[@data-embedded-json='store-content-data']//@data-lng\").get()\n\n yield GeojsonPointItem(lat=lat,\n lon=lon,\n name=store_name,\n addr_full=address,\n city=locality,\n state=region,\n postcode=post_code,\n country=\"AU\",\n phone=phone_number,\n website=response.url,\n opening_hours=opening_hours,\n ref=response.url.split(\"/\")[-1]) \n", "path": "locations/spiders/target_au.py"}]}
1,422
141
gh_patches_debug_22562
rasdani/github-patches
git_diff
liqd__a4-meinberlin-2372
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- text commenting results show module detail when only text commenting module used for project, module detail also shown in results tab --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `meinberlin/apps/documents/views.py` Content: ``` 1 from django.http import Http404 2 from django.http.response import HttpResponseRedirect 3 from django.urls import reverse 4 from django.utils.translation import ugettext_lazy as _ 5 from django.views import generic 6 7 from adhocracy4.dashboard import mixins as dashboard_mixins 8 from adhocracy4.projects.mixins import ProjectMixin 9 from adhocracy4.rules import mixins as rules_mixins 10 from meinberlin.apps.contrib import mixins as contrib_mixins 11 from meinberlin.apps.exports.views import DashboardExportView 12 13 from . import models 14 15 16 class DocumentDashboardView(ProjectMixin, 17 dashboard_mixins.DashboardBaseMixin, 18 dashboard_mixins.DashboardComponentMixin, 19 generic.TemplateView): 20 template_name = 'meinberlin_documents/document_dashboard.html' 21 permission_required = 'a4projects.change_project' 22 23 def get_permission_object(self): 24 return self.project 25 26 27 class ChapterDetailView(ProjectMixin, 28 rules_mixins.PermissionRequiredMixin, 29 generic.DetailView, 30 contrib_mixins.DisplayProjectOrModuleMixin): 31 model = models.Chapter 32 permission_required = 'meinberlin_documents.view_chapter' 33 get_context_from_object = True 34 35 def dispatch(self, request, *args, **kwargs): 36 # Redirect first chapter view to the project detail page 37 res = super().dispatch(request, *args, **kwargs) 38 chapter = self.get_object() 39 if self.request.path == chapter.get_absolute_url() \ 40 and chapter == self.chapter_list.first(): 41 return HttpResponseRedirect(self.project.get_absolute_url()) 42 else: 43 return res 44 45 def get_context_data(self, **kwargs): 46 context = super(ChapterDetailView, self).get_context_data(**kwargs) 47 context['chapter_list'] = self.chapter_list 48 return context 49 50 @property 51 def chapter_list(self): 52 return models.Chapter.objects.filter(module=self.module) 53 54 55 class DocumentDetailView(ChapterDetailView): 56 get_context_from_object = False 57 58 def get_object(self): 59 first_chapter = models.Chapter.objects \ 60 .filter(module=self.module) \ 61 .first() 62 63 if not first_chapter: 64 raise Http404(_('Document has no chapters defined.')) 65 return first_chapter 66 67 68 class ParagraphDetailView(ProjectMixin, 69 rules_mixins.PermissionRequiredMixin, 70 generic.DetailView): 71 model = models.Paragraph 72 permission_required = 'meinberlin_documents.view_paragraph' 73 74 75 class DocumentDashboardExportView(DashboardExportView): 76 template_name = 'meinberlin_exports/export_dashboard.html' 77 78 def get_context_data(self, **kwargs): 79 context = super().get_context_data(**kwargs) 80 context['comment_export'] = reverse( 81 'a4dashboard:document-comment-export', 82 kwargs={'module_slug': self.module.slug}) 83 return context 84 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/meinberlin/apps/documents/views.py b/meinberlin/apps/documents/views.py --- a/meinberlin/apps/documents/views.py +++ b/meinberlin/apps/documents/views.py @@ -1,5 +1,4 @@ from django.http import Http404 -from django.http.response import HttpResponseRedirect from django.urls import reverse from django.utils.translation import ugettext_lazy as _ from django.views import generic @@ -32,16 +31,6 @@ permission_required = 'meinberlin_documents.view_chapter' get_context_from_object = True - def dispatch(self, request, *args, **kwargs): - # Redirect first chapter view to the project detail page - res = super().dispatch(request, *args, **kwargs) - chapter = self.get_object() - if self.request.path == chapter.get_absolute_url() \ - and chapter == self.chapter_list.first(): - return HttpResponseRedirect(self.project.get_absolute_url()) - else: - return res - def get_context_data(self, **kwargs): context = super(ChapterDetailView, self).get_context_data(**kwargs) context['chapter_list'] = self.chapter_list
{"golden_diff": "diff --git a/meinberlin/apps/documents/views.py b/meinberlin/apps/documents/views.py\n--- a/meinberlin/apps/documents/views.py\n+++ b/meinberlin/apps/documents/views.py\n@@ -1,5 +1,4 @@\n from django.http import Http404\n-from django.http.response import HttpResponseRedirect\n from django.urls import reverse\n from django.utils.translation import ugettext_lazy as _\n from django.views import generic\n@@ -32,16 +31,6 @@\n permission_required = 'meinberlin_documents.view_chapter'\n get_context_from_object = True\n \n- def dispatch(self, request, *args, **kwargs):\n- # Redirect first chapter view to the project detail page\n- res = super().dispatch(request, *args, **kwargs)\n- chapter = self.get_object()\n- if self.request.path == chapter.get_absolute_url() \\\n- and chapter == self.chapter_list.first():\n- return HttpResponseRedirect(self.project.get_absolute_url())\n- else:\n- return res\n-\n def get_context_data(self, **kwargs):\n context = super(ChapterDetailView, self).get_context_data(**kwargs)\n context['chapter_list'] = self.chapter_list\n", "issue": "text commenting results show module detail \nwhen only text commenting module used for project, module detail also shown in results tab\n", "before_files": [{"content": "from django.http import Http404\nfrom django.http.response import HttpResponseRedirect\nfrom django.urls import reverse\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views import generic\n\nfrom adhocracy4.dashboard import mixins as dashboard_mixins\nfrom adhocracy4.projects.mixins import ProjectMixin\nfrom adhocracy4.rules import mixins as rules_mixins\nfrom meinberlin.apps.contrib import mixins as contrib_mixins\nfrom meinberlin.apps.exports.views import DashboardExportView\n\nfrom . import models\n\n\nclass DocumentDashboardView(ProjectMixin,\n dashboard_mixins.DashboardBaseMixin,\n dashboard_mixins.DashboardComponentMixin,\n generic.TemplateView):\n template_name = 'meinberlin_documents/document_dashboard.html'\n permission_required = 'a4projects.change_project'\n\n def get_permission_object(self):\n return self.project\n\n\nclass ChapterDetailView(ProjectMixin,\n rules_mixins.PermissionRequiredMixin,\n generic.DetailView,\n contrib_mixins.DisplayProjectOrModuleMixin):\n model = models.Chapter\n permission_required = 'meinberlin_documents.view_chapter'\n get_context_from_object = True\n\n def dispatch(self, request, *args, **kwargs):\n # Redirect first chapter view to the project detail page\n res = super().dispatch(request, *args, **kwargs)\n chapter = self.get_object()\n if self.request.path == chapter.get_absolute_url() \\\n and chapter == self.chapter_list.first():\n return HttpResponseRedirect(self.project.get_absolute_url())\n else:\n return res\n\n def get_context_data(self, **kwargs):\n context = super(ChapterDetailView, self).get_context_data(**kwargs)\n context['chapter_list'] = self.chapter_list\n return context\n\n @property\n def chapter_list(self):\n return models.Chapter.objects.filter(module=self.module)\n\n\nclass DocumentDetailView(ChapterDetailView):\n get_context_from_object = False\n\n def get_object(self):\n first_chapter = models.Chapter.objects \\\n .filter(module=self.module) \\\n .first()\n\n if not first_chapter:\n raise Http404(_('Document has no chapters defined.'))\n return first_chapter\n\n\nclass ParagraphDetailView(ProjectMixin,\n rules_mixins.PermissionRequiredMixin,\n generic.DetailView):\n model = models.Paragraph\n permission_required = 'meinberlin_documents.view_paragraph'\n\n\nclass DocumentDashboardExportView(DashboardExportView):\n template_name = 'meinberlin_exports/export_dashboard.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['comment_export'] = reverse(\n 'a4dashboard:document-comment-export',\n kwargs={'module_slug': self.module.slug})\n return context\n", "path": "meinberlin/apps/documents/views.py"}], "after_files": [{"content": "from django.http import Http404\nfrom django.urls import reverse\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views import generic\n\nfrom adhocracy4.dashboard import mixins as dashboard_mixins\nfrom adhocracy4.projects.mixins import ProjectMixin\nfrom adhocracy4.rules import mixins as rules_mixins\nfrom meinberlin.apps.contrib import mixins as contrib_mixins\nfrom meinberlin.apps.exports.views import DashboardExportView\n\nfrom . import models\n\n\nclass DocumentDashboardView(ProjectMixin,\n dashboard_mixins.DashboardBaseMixin,\n dashboard_mixins.DashboardComponentMixin,\n generic.TemplateView):\n template_name = 'meinberlin_documents/document_dashboard.html'\n permission_required = 'a4projects.change_project'\n\n def get_permission_object(self):\n return self.project\n\n\nclass ChapterDetailView(ProjectMixin,\n rules_mixins.PermissionRequiredMixin,\n generic.DetailView,\n contrib_mixins.DisplayProjectOrModuleMixin):\n model = models.Chapter\n permission_required = 'meinberlin_documents.view_chapter'\n get_context_from_object = True\n\n def get_context_data(self, **kwargs):\n context = super(ChapterDetailView, self).get_context_data(**kwargs)\n context['chapter_list'] = self.chapter_list\n return context\n\n @property\n def chapter_list(self):\n return models.Chapter.objects.filter(module=self.module)\n\n\nclass DocumentDetailView(ChapterDetailView):\n get_context_from_object = False\n\n def get_object(self):\n first_chapter = models.Chapter.objects \\\n .filter(module=self.module) \\\n .first()\n\n if not first_chapter:\n raise Http404(_('Document has no chapters defined.'))\n return first_chapter\n\n\nclass ParagraphDetailView(ProjectMixin,\n rules_mixins.PermissionRequiredMixin,\n generic.DetailView):\n model = models.Paragraph\n permission_required = 'meinberlin_documents.view_paragraph'\n\n\nclass DocumentDashboardExportView(DashboardExportView):\n template_name = 'meinberlin_exports/export_dashboard.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['comment_export'] = reverse(\n 'a4dashboard:document-comment-export',\n kwargs={'module_slug': self.module.slug})\n return context\n", "path": "meinberlin/apps/documents/views.py"}]}
1,025
257
gh_patches_debug_26732
rasdani/github-patches
git_diff
pre-commit__pre-commit-1359
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Running pre-commit with Python installed from Windows Store raises UnicodeDecodeError I think it's a special use case and maybe related to the [known issues of this kind of installation](https://docs.python.org/3.7/using/windows.html#known-issues), but still interesting to track it in issues isn't? And the kind of error surprised me: `UnicodeDecodeError`. **Reproduce** 1. Install Python through Windows Store 2. Create a virtualenv 3. Install pre-commit and run the hooks **Environment** - Windows 10 64 bits - Python 3.7.6 installed from Windows Store (see: https://docs.python.org/3.7/using/windows.html#windows-store) **Trace** ```python [WARNING] Unstaged files detected. [INFO] Stashing unstaged files to C:\Users\username/.cache\pre-commit\patch1583836330. [INFO] Initializing environment for https://github.com/pre-commit/pre-commit-hooks. [INFO] Initializing environment for https://github.com/python/black. [INFO] Installing environment for https://github.com/pre-commit/pre-commit-hooks. [INFO] Once installed this environment will be reused. [INFO] This may take a few minutes... [INFO] Restored changes from C:\Users\username/.cache\pre-commit\patch1583836330. Traceback (most recent call last): File "d:\username\doculents\git\test-project\.venv\lib\site-packages\pre_commit\error_handler.py", line 54, in error_handler yield File "d:\username\doculents\git\test-project\.venv\lib\site-packages\pre_commit\main.py", line 371, in main return run(args.config, store, args) File "d:\username\doculents\git\test-project\.venv\lib\site-packages\pre_commit\commands\run.py", line 337, in run install_hook_envs(hooks, store) File "d:\username\doculents\git\test-project\.venv\lib\site-packages\pre_commit\repository.py", line 200, in install_hook_envs _hook_install(hook) File "d:\username\doculents\git\test-project\.venv\lib\site-packages\pre_commit\repository.py", line 83, in _hook_install hook.prefix, hook.language_version, hook.additional_dependencies, File "d:\username\doculents\git\test-project\.venv\lib\site-packages\pre_commit\languages\python.py", line 192, in install_environment _make_venv(env_dir, python) File "d:\username\doculents\git\test-project\.venv\lib\site-packages\pre_commit\languages\python.py", line 204, in make_venv cmd_output_b(*cmd, env=env, cwd='/') File "d:\username\doculents\git\test-project\.venv\lib\site-packages\pre_commit\util.py", line 140, in cmd_output_b raise CalledProcessError(returncode, cmd, retcode, stdout_b, stderr_b) pre_commit.util.CalledProcessError: <exception str() failed> During handling of the above exception, another exception occurred: Traceback (most recent call last): File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.7_3.7.1776.0_x64__qbz5n2kfra8p0\lib\runpy.py", line 193, in _run_module_as_main "__main__", mod_spec) File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.7_3.7.1776.0_x64__qbz5n2kfra8p0\lib\runpy.py", line 85, in _run_code exec(code, run_globals) File "D:\username\doculents\git\test-project\.venv\Scripts\pre-commit.exe\__main__.py", line 7, in <module> File "d:\username\doculents\git\test-project\.venv\lib\site-packages\pre_commit\main.py", line 384, in main f'Command {args.command} failed to exit with a returncode', File "C:\Program Files\WindowsApps\PythonSoftwareFoundation.Python.3.7_3.7.1776.0_x64__qbz5n2kfra8p0\lib\contextlib.py", line 130, in __exit__ self.gen.throw(type, value, traceback) File "d:\username\doculents\git\test-project\.venv\lib\site-packages\pre_commit\error_handler.py", line 62, in error_handler _log_and_exit(msg, e, traceback.format_exc()) File "d:\username\doculents\git\test-project\.venv\lib\site-packages\pre_commit\error_handler.py", line 18, in _log_and_exit error_msg = f'{msg}: {type(exc).__name__}: {exc}' File "d:\username\doculents\git\test-project\.venv\lib\site-packages\pre_commit\util.py", line 115, in __str__ return self.__bytes__().decode() UnicodeDecodeError: 'utf-8' codec can't decode byte 0xe8 in position 341: invalid continuation byte ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pre_commit/error_handler.py` Content: ``` 1 import contextlib 2 import functools 3 import os.path 4 import sys 5 import traceback 6 from typing import Generator 7 8 import pre_commit.constants as C 9 from pre_commit import output 10 from pre_commit.store import Store 11 12 13 class FatalError(RuntimeError): 14 pass 15 16 17 def _log_and_exit(msg: str, exc: BaseException, formatted: str) -> None: 18 error_msg = f'{msg}: {type(exc).__name__}: {exc}' 19 output.write_line(error_msg) 20 log_path = os.path.join(Store().directory, 'pre-commit.log') 21 output.write_line(f'Check the log at {log_path}') 22 23 with open(log_path, 'wb') as log: 24 _log_line = functools.partial(output.write_line, stream=log) 25 26 _log_line('### version information') 27 _log_line() 28 _log_line('```') 29 _log_line(f'pre-commit version: {C.VERSION}') 30 _log_line('sys.version:') 31 for line in sys.version.splitlines(): 32 _log_line(f' {line}') 33 _log_line(f'sys.executable: {sys.executable}') 34 _log_line(f'os.name: {os.name}') 35 _log_line(f'sys.platform: {sys.platform}') 36 _log_line('```') 37 _log_line() 38 39 _log_line('### error information') 40 _log_line() 41 _log_line('```') 42 _log_line(error_msg) 43 _log_line('```') 44 _log_line() 45 _log_line('```') 46 _log_line(formatted) 47 _log_line('```') 48 raise SystemExit(1) 49 50 51 @contextlib.contextmanager 52 def error_handler() -> Generator[None, None, None]: 53 try: 54 yield 55 except (Exception, KeyboardInterrupt) as e: 56 if isinstance(e, FatalError): 57 msg = 'An error has occurred' 58 elif isinstance(e, KeyboardInterrupt): 59 msg = 'Interrupted (^C)' 60 else: 61 msg = 'An unexpected error has occurred' 62 _log_and_exit(msg, e, traceback.format_exc()) 63 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pre_commit/error_handler.py b/pre_commit/error_handler.py --- a/pre_commit/error_handler.py +++ b/pre_commit/error_handler.py @@ -14,14 +14,24 @@ pass +def _exception_to_bytes(exc: BaseException) -> bytes: + with contextlib.suppress(TypeError): + return bytes(exc) # type: ignore + with contextlib.suppress(Exception): + return str(exc).encode() + return f'<unprintable {type(exc).__name__} object>'.encode() + + def _log_and_exit(msg: str, exc: BaseException, formatted: str) -> None: - error_msg = f'{msg}: {type(exc).__name__}: {exc}' - output.write_line(error_msg) + error_msg = f'{msg}: {type(exc).__name__}: '.encode() + error_msg += _exception_to_bytes(exc) + output.write_line_b(error_msg) log_path = os.path.join(Store().directory, 'pre-commit.log') output.write_line(f'Check the log at {log_path}') with open(log_path, 'wb') as log: _log_line = functools.partial(output.write_line, stream=log) + _log_line_b = functools.partial(output.write_line_b, stream=log) _log_line('### version information') _log_line() @@ -39,7 +49,7 @@ _log_line('### error information') _log_line() _log_line('```') - _log_line(error_msg) + _log_line_b(error_msg) _log_line('```') _log_line() _log_line('```')
{"golden_diff": "diff --git a/pre_commit/error_handler.py b/pre_commit/error_handler.py\n--- a/pre_commit/error_handler.py\n+++ b/pre_commit/error_handler.py\n@@ -14,14 +14,24 @@\n pass\n \n \n+def _exception_to_bytes(exc: BaseException) -> bytes:\n+ with contextlib.suppress(TypeError):\n+ return bytes(exc) # type: ignore\n+ with contextlib.suppress(Exception):\n+ return str(exc).encode()\n+ return f'<unprintable {type(exc).__name__} object>'.encode()\n+\n+\n def _log_and_exit(msg: str, exc: BaseException, formatted: str) -> None:\n- error_msg = f'{msg}: {type(exc).__name__}: {exc}'\n- output.write_line(error_msg)\n+ error_msg = f'{msg}: {type(exc).__name__}: '.encode()\n+ error_msg += _exception_to_bytes(exc)\n+ output.write_line_b(error_msg)\n log_path = os.path.join(Store().directory, 'pre-commit.log')\n output.write_line(f'Check the log at {log_path}')\n \n with open(log_path, 'wb') as log:\n _log_line = functools.partial(output.write_line, stream=log)\n+ _log_line_b = functools.partial(output.write_line_b, stream=log)\n \n _log_line('### version information')\n _log_line()\n@@ -39,7 +49,7 @@\n _log_line('### error information')\n _log_line()\n _log_line('```')\n- _log_line(error_msg)\n+ _log_line_b(error_msg)\n _log_line('```')\n _log_line()\n _log_line('```')\n", "issue": "Running pre-commit with Python installed from Windows Store raises UnicodeDecodeError\nI think it's a special use case and maybe related to the [known issues of this kind of installation](https://docs.python.org/3.7/using/windows.html#known-issues), but still interesting to track it in issues isn't?\r\n\r\nAnd the kind of error surprised me: `UnicodeDecodeError`.\r\n\r\n**Reproduce**\r\n\r\n1. Install Python through Windows Store\r\n2. Create a virtualenv\r\n3. Install pre-commit and run the hooks\r\n\r\n**Environment**\r\n\r\n- Windows 10 64 bits\r\n- Python 3.7.6 installed from Windows Store (see: https://docs.python.org/3.7/using/windows.html#windows-store)\r\n\r\n**Trace**\r\n\r\n```python\r\n[WARNING] Unstaged files detected.\r\n[INFO] Stashing unstaged files to C:\\Users\\username/.cache\\pre-commit\\patch1583836330.\r\n[INFO] Initializing environment for https://github.com/pre-commit/pre-commit-hooks.\r\n[INFO] Initializing environment for https://github.com/python/black.\r\n[INFO] Installing environment for https://github.com/pre-commit/pre-commit-hooks.\r\n[INFO] Once installed this environment will be reused.\r\n[INFO] This may take a few minutes...\r\n[INFO] Restored changes from C:\\Users\\username/.cache\\pre-commit\\patch1583836330.\r\nTraceback (most recent call last):\r\n File \"d:\\username\\doculents\\git\\test-project\\.venv\\lib\\site-packages\\pre_commit\\error_handler.py\", line 54, in error_handler\r\n yield\r\n File \"d:\\username\\doculents\\git\\test-project\\.venv\\lib\\site-packages\\pre_commit\\main.py\", line 371, in main\r\n return run(args.config, store, args)\r\n File \"d:\\username\\doculents\\git\\test-project\\.venv\\lib\\site-packages\\pre_commit\\commands\\run.py\", line 337, in run\r\n install_hook_envs(hooks, store)\r\n File \"d:\\username\\doculents\\git\\test-project\\.venv\\lib\\site-packages\\pre_commit\\repository.py\", line 200, in install_hook_envs\r\n _hook_install(hook)\r\n File \"d:\\username\\doculents\\git\\test-project\\.venv\\lib\\site-packages\\pre_commit\\repository.py\", line 83, in _hook_install\r\n hook.prefix, hook.language_version, hook.additional_dependencies,\r\n File \"d:\\username\\doculents\\git\\test-project\\.venv\\lib\\site-packages\\pre_commit\\languages\\python.py\", line 192, in install_environment\r\n _make_venv(env_dir, python)\r\n File \"d:\\username\\doculents\\git\\test-project\\.venv\\lib\\site-packages\\pre_commit\\languages\\python.py\", line 204, in make_venv\r\n cmd_output_b(*cmd, env=env, cwd='/')\r\n File \"d:\\username\\doculents\\git\\test-project\\.venv\\lib\\site-packages\\pre_commit\\util.py\", line 140, in cmd_output_b\r\n raise CalledProcessError(returncode, cmd, retcode, stdout_b, stderr_b)\r\npre_commit.util.CalledProcessError: <exception str() failed>\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"C:\\Program Files\\WindowsApps\\PythonSoftwareFoundation.Python.3.7_3.7.1776.0_x64__qbz5n2kfra8p0\\lib\\runpy.py\", line 193, in _run_module_as_main\r\n \"__main__\", mod_spec)\r\n File \"C:\\Program Files\\WindowsApps\\PythonSoftwareFoundation.Python.3.7_3.7.1776.0_x64__qbz5n2kfra8p0\\lib\\runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)\r\n File \"D:\\username\\doculents\\git\\test-project\\.venv\\Scripts\\pre-commit.exe\\__main__.py\", line 7, in <module>\r\n File \"d:\\username\\doculents\\git\\test-project\\.venv\\lib\\site-packages\\pre_commit\\main.py\", line 384, in main\r\n f'Command {args.command} failed to exit with a returncode',\r\n File \"C:\\Program Files\\WindowsApps\\PythonSoftwareFoundation.Python.3.7_3.7.1776.0_x64__qbz5n2kfra8p0\\lib\\contextlib.py\", line 130, in __exit__\r\n self.gen.throw(type, value, traceback)\r\n File \"d:\\username\\doculents\\git\\test-project\\.venv\\lib\\site-packages\\pre_commit\\error_handler.py\", line 62, in error_handler\r\n _log_and_exit(msg, e, traceback.format_exc())\r\n File \"d:\\username\\doculents\\git\\test-project\\.venv\\lib\\site-packages\\pre_commit\\error_handler.py\", line 18, in _log_and_exit\r\n error_msg = f'{msg}: {type(exc).__name__}: {exc}'\r\n File \"d:\\username\\doculents\\git\\test-project\\.venv\\lib\\site-packages\\pre_commit\\util.py\", line 115, in __str__\r\n return self.__bytes__().decode()\r\nUnicodeDecodeError: 'utf-8' codec can't decode byte 0xe8 in position 341: invalid continuation byte\r\n```\n", "before_files": [{"content": "import contextlib\nimport functools\nimport os.path\nimport sys\nimport traceback\nfrom typing import Generator\n\nimport pre_commit.constants as C\nfrom pre_commit import output\nfrom pre_commit.store import Store\n\n\nclass FatalError(RuntimeError):\n pass\n\n\ndef _log_and_exit(msg: str, exc: BaseException, formatted: str) -> None:\n error_msg = f'{msg}: {type(exc).__name__}: {exc}'\n output.write_line(error_msg)\n log_path = os.path.join(Store().directory, 'pre-commit.log')\n output.write_line(f'Check the log at {log_path}')\n\n with open(log_path, 'wb') as log:\n _log_line = functools.partial(output.write_line, stream=log)\n\n _log_line('### version information')\n _log_line()\n _log_line('```')\n _log_line(f'pre-commit version: {C.VERSION}')\n _log_line('sys.version:')\n for line in sys.version.splitlines():\n _log_line(f' {line}')\n _log_line(f'sys.executable: {sys.executable}')\n _log_line(f'os.name: {os.name}')\n _log_line(f'sys.platform: {sys.platform}')\n _log_line('```')\n _log_line()\n\n _log_line('### error information')\n _log_line()\n _log_line('```')\n _log_line(error_msg)\n _log_line('```')\n _log_line()\n _log_line('```')\n _log_line(formatted)\n _log_line('```')\n raise SystemExit(1)\n\n\[email protected]\ndef error_handler() -> Generator[None, None, None]:\n try:\n yield\n except (Exception, KeyboardInterrupt) as e:\n if isinstance(e, FatalError):\n msg = 'An error has occurred'\n elif isinstance(e, KeyboardInterrupt):\n msg = 'Interrupted (^C)'\n else:\n msg = 'An unexpected error has occurred'\n _log_and_exit(msg, e, traceback.format_exc())\n", "path": "pre_commit/error_handler.py"}], "after_files": [{"content": "import contextlib\nimport functools\nimport os.path\nimport sys\nimport traceback\nfrom typing import Generator\n\nimport pre_commit.constants as C\nfrom pre_commit import output\nfrom pre_commit.store import Store\n\n\nclass FatalError(RuntimeError):\n pass\n\n\ndef _exception_to_bytes(exc: BaseException) -> bytes:\n with contextlib.suppress(TypeError):\n return bytes(exc) # type: ignore\n with contextlib.suppress(Exception):\n return str(exc).encode()\n return f'<unprintable {type(exc).__name__} object>'.encode()\n\n\ndef _log_and_exit(msg: str, exc: BaseException, formatted: str) -> None:\n error_msg = f'{msg}: {type(exc).__name__}: '.encode()\n error_msg += _exception_to_bytes(exc)\n output.write_line_b(error_msg)\n log_path = os.path.join(Store().directory, 'pre-commit.log')\n output.write_line(f'Check the log at {log_path}')\n\n with open(log_path, 'wb') as log:\n _log_line = functools.partial(output.write_line, stream=log)\n _log_line_b = functools.partial(output.write_line_b, stream=log)\n\n _log_line('### version information')\n _log_line()\n _log_line('```')\n _log_line(f'pre-commit version: {C.VERSION}')\n _log_line('sys.version:')\n for line in sys.version.splitlines():\n _log_line(f' {line}')\n _log_line(f'sys.executable: {sys.executable}')\n _log_line(f'os.name: {os.name}')\n _log_line(f'sys.platform: {sys.platform}')\n _log_line('```')\n _log_line()\n\n _log_line('### error information')\n _log_line()\n _log_line('```')\n _log_line_b(error_msg)\n _log_line('```')\n _log_line()\n _log_line('```')\n _log_line(formatted)\n _log_line('```')\n raise SystemExit(1)\n\n\[email protected]\ndef error_handler() -> Generator[None, None, None]:\n try:\n yield\n except (Exception, KeyboardInterrupt) as e:\n if isinstance(e, FatalError):\n msg = 'An error has occurred'\n elif isinstance(e, KeyboardInterrupt):\n msg = 'Interrupted (^C)'\n else:\n msg = 'An unexpected error has occurred'\n _log_and_exit(msg, e, traceback.format_exc())\n", "path": "pre_commit/error_handler.py"}]}
2,074
369
gh_patches_debug_1924
rasdani/github-patches
git_diff
cobbler__cobbler-1265
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- build_reporting fails if empty string in ignorelist The default configuration in the ubuntu 12.04 cobbler 2.6.5 package has the following in `/etc/settings`: ``` build_reporting_ignorelist = [""] ``` The code that reads this value is in `install_post_report.py`, and the condition that determines whether to send a build report email is: ``` for prefix in settings.build_reporting_ignorelist: if name.lower().startswith(prefix) == True: sendmail = False ``` With the default configuration, this check always succeeds, and **mail is not sent**. Fix the issue by modifying the condition to: ``` if prefix != '' and name.lower().startswith(prefix): ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `cobbler/modules/install_post_report.py` Content: ``` 1 # (c) 2008-2009 2 # Jeff Schroeder <[email protected]> 3 # Michael DeHaan <michael.dehaan AT gmail> 4 # 5 # License: GPLv2+ 6 7 # Post install trigger for cobbler to 8 # send out a pretty email report that 9 # contains target information. 10 11 import distutils.sysconfig 12 import sys 13 import os 14 import traceback 15 16 plib = distutils.sysconfig.get_python_lib() 17 mod_path="%s/cobbler" % plib 18 sys.path.insert(0, mod_path) 19 20 from utils import _ 21 import smtplib 22 import sys 23 import cobbler.templar as templar 24 from cobbler.cexceptions import CX 25 import utils 26 27 def register(): 28 # this pure python trigger acts as if it were a legacy shell-trigger, but is much faster. 29 # the return of this method indicates the trigger type 30 return "/var/lib/cobbler/triggers/install/post/*" 31 32 def run(api, args, logger): 33 # FIXME: make everything use the logger 34 35 settings = api.settings() 36 37 # go no further if this feature is turned off 38 if not str(settings.build_reporting_enabled).lower() in [ "1", "yes", "y", "true"]: 39 return 0 40 41 objtype = args[0] # "target" or "profile" 42 name = args[1] # name of target or profile 43 boot_ip = args[2] # ip or "?" 44 45 if objtype == "system": 46 target = api.find_system(name) 47 else: 48 target = api.find_profile(name) 49 50 # collapse the object down to a rendered datastructure 51 target = utils.blender(api, False, target) 52 53 if target == {}: 54 raise CX("failure looking up target") 55 56 to_addr = settings.build_reporting_email 57 if to_addr == "": 58 return 0 59 60 # add the ability to specify an MTA for servers that don't run their own 61 smtp_server = settings.build_reporting_smtp_server 62 if smtp_server == "": 63 smtp_server = "localhost" 64 65 # use a custom from address or fall back to a reasonable default 66 from_addr = settings.build_reporting_sender 67 if from_addr == "": 68 from_addr = "cobbler@%s" % settings.server 69 70 subject = settings.build_reporting_subject 71 if subject == "": 72 subject = '[Cobbler] install complete ' 73 74 to_addr = ",".join(to_addr) 75 metadata = { 76 "from_addr" : from_addr, 77 "to_addr" : to_addr, 78 "subject" : subject, 79 "boot_ip" : boot_ip 80 } 81 metadata.update(target) 82 83 input_template = open("/etc/cobbler/reporting/build_report_email.template") 84 input_data = input_template.read() 85 input_template.close() 86 87 message = templar.Templar(api._config).render(input_data, metadata, None) 88 89 # for debug, call 90 # print message 91 92 sendmail = True 93 for prefix in settings.build_reporting_ignorelist: 94 if name.lower().startswith(prefix) == True: 95 sendmail = False 96 97 if sendmail == True: 98 # Send the mail 99 # FIXME: on error, return non-zero 100 server_handle = smtplib.SMTP(smtp_server) 101 server_handle.sendmail(from_addr, to_addr.split(','), message) 102 server_handle.quit() 103 104 return 0 105 106 107 108 109 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/cobbler/modules/install_post_report.py b/cobbler/modules/install_post_report.py --- a/cobbler/modules/install_post_report.py +++ b/cobbler/modules/install_post_report.py @@ -91,7 +91,7 @@ sendmail = True for prefix in settings.build_reporting_ignorelist: - if name.lower().startswith(prefix) == True: + if prefix != '' and name.lower().startswith(prefix): sendmail = False if sendmail == True:
{"golden_diff": "diff --git a/cobbler/modules/install_post_report.py b/cobbler/modules/install_post_report.py\n--- a/cobbler/modules/install_post_report.py\n+++ b/cobbler/modules/install_post_report.py\n@@ -91,7 +91,7 @@\n \n sendmail = True\n for prefix in settings.build_reporting_ignorelist:\n- if name.lower().startswith(prefix) == True:\n+ if prefix != '' and name.lower().startswith(prefix):\n sendmail = False\n \n if sendmail == True:\n", "issue": "build_reporting fails if empty string in ignorelist\nThe default configuration in the ubuntu 12.04 cobbler 2.6.5 package has the following in `/etc/settings`:\n\n```\nbuild_reporting_ignorelist = [\"\"]\n```\n\nThe code that reads this value is in `install_post_report.py`, and the condition that determines whether to send a build report email is:\n\n```\nfor prefix in settings.build_reporting_ignorelist:\n if name.lower().startswith(prefix) == True:\n sendmail = False\n```\n\nWith the default configuration, this check always succeeds, and **mail is not sent**.\n\nFix the issue by modifying the condition to:\n\n```\n if prefix != '' and name.lower().startswith(prefix):\n```\n\n", "before_files": [{"content": "# (c) 2008-2009\n# Jeff Schroeder <[email protected]>\n# Michael DeHaan <michael.dehaan AT gmail>\n#\n# License: GPLv2+\n\n# Post install trigger for cobbler to\n# send out a pretty email report that\n# contains target information.\n\nimport distutils.sysconfig\nimport sys\nimport os\nimport traceback\n\nplib = distutils.sysconfig.get_python_lib()\nmod_path=\"%s/cobbler\" % plib\nsys.path.insert(0, mod_path)\n\nfrom utils import _\nimport smtplib\nimport sys\nimport cobbler.templar as templar\nfrom cobbler.cexceptions import CX\nimport utils\n\ndef register():\n # this pure python trigger acts as if it were a legacy shell-trigger, but is much faster.\n # the return of this method indicates the trigger type\n return \"/var/lib/cobbler/triggers/install/post/*\"\n\ndef run(api, args, logger):\n # FIXME: make everything use the logger\n\n settings = api.settings()\n\n # go no further if this feature is turned off\n if not str(settings.build_reporting_enabled).lower() in [ \"1\", \"yes\", \"y\", \"true\"]:\n return 0\n\n objtype = args[0] # \"target\" or \"profile\"\n name = args[1] # name of target or profile\n boot_ip = args[2] # ip or \"?\"\n\n if objtype == \"system\":\n target = api.find_system(name)\n else:\n target = api.find_profile(name)\n\n # collapse the object down to a rendered datastructure\n target = utils.blender(api, False, target)\n\n if target == {}:\n raise CX(\"failure looking up target\")\n\n to_addr = settings.build_reporting_email\n if to_addr == \"\":\n return 0\n\n # add the ability to specify an MTA for servers that don't run their own\n smtp_server = settings.build_reporting_smtp_server\n if smtp_server == \"\":\n smtp_server = \"localhost\"\n\n # use a custom from address or fall back to a reasonable default\n from_addr = settings.build_reporting_sender\n if from_addr == \"\":\n from_addr = \"cobbler@%s\" % settings.server\n\n subject = settings.build_reporting_subject\n if subject == \"\":\n subject = '[Cobbler] install complete '\n\n to_addr = \",\".join(to_addr)\n metadata = {\n \"from_addr\" : from_addr,\n \"to_addr\" : to_addr,\n \"subject\" : subject,\n \"boot_ip\" : boot_ip\n }\n metadata.update(target)\n\n input_template = open(\"/etc/cobbler/reporting/build_report_email.template\")\n input_data = input_template.read()\n input_template.close()\n\n message = templar.Templar(api._config).render(input_data, metadata, None)\n \n # for debug, call\n # print message\n\n sendmail = True\n for prefix in settings.build_reporting_ignorelist:\n if name.lower().startswith(prefix) == True:\n sendmail = False\n\n if sendmail == True:\n # Send the mail\n # FIXME: on error, return non-zero\n server_handle = smtplib.SMTP(smtp_server)\n server_handle.sendmail(from_addr, to_addr.split(','), message)\n server_handle.quit()\n\n return 0\n\n\n\n\n", "path": "cobbler/modules/install_post_report.py"}], "after_files": [{"content": "# (c) 2008-2009\n# Jeff Schroeder <[email protected]>\n# Michael DeHaan <michael.dehaan AT gmail>\n#\n# License: GPLv2+\n\n# Post install trigger for cobbler to\n# send out a pretty email report that\n# contains target information.\n\nimport distutils.sysconfig\nimport sys\nimport os\nimport traceback\n\nplib = distutils.sysconfig.get_python_lib()\nmod_path=\"%s/cobbler\" % plib\nsys.path.insert(0, mod_path)\n\nfrom utils import _\nimport smtplib\nimport sys\nimport cobbler.templar as templar\nfrom cobbler.cexceptions import CX\nimport utils\n\ndef register():\n # this pure python trigger acts as if it were a legacy shell-trigger, but is much faster.\n # the return of this method indicates the trigger type\n return \"/var/lib/cobbler/triggers/install/post/*\"\n\ndef run(api, args, logger):\n # FIXME: make everything use the logger\n\n settings = api.settings()\n\n # go no further if this feature is turned off\n if not str(settings.build_reporting_enabled).lower() in [ \"1\", \"yes\", \"y\", \"true\"]:\n return 0\n\n objtype = args[0] # \"target\" or \"profile\"\n name = args[1] # name of target or profile\n boot_ip = args[2] # ip or \"?\"\n\n if objtype == \"system\":\n target = api.find_system(name)\n else:\n target = api.find_profile(name)\n\n # collapse the object down to a rendered datastructure\n target = utils.blender(api, False, target)\n\n if target == {}:\n raise CX(\"failure looking up target\")\n\n to_addr = settings.build_reporting_email\n if to_addr == \"\":\n return 0\n\n # add the ability to specify an MTA for servers that don't run their own\n smtp_server = settings.build_reporting_smtp_server\n if smtp_server == \"\":\n smtp_server = \"localhost\"\n\n # use a custom from address or fall back to a reasonable default\n from_addr = settings.build_reporting_sender\n if from_addr == \"\":\n from_addr = \"cobbler@%s\" % settings.server\n\n subject = settings.build_reporting_subject\n if subject == \"\":\n subject = '[Cobbler] install complete '\n\n to_addr = \",\".join(to_addr)\n metadata = {\n \"from_addr\" : from_addr,\n \"to_addr\" : to_addr,\n \"subject\" : subject,\n \"boot_ip\" : boot_ip\n }\n metadata.update(target)\n\n input_template = open(\"/etc/cobbler/reporting/build_report_email.template\")\n input_data = input_template.read()\n input_template.close()\n\n message = templar.Templar(api._config).render(input_data, metadata, None)\n \n # for debug, call\n # print message\n\n sendmail = True\n for prefix in settings.build_reporting_ignorelist:\n if prefix != '' and name.lower().startswith(prefix):\n sendmail = False\n\n if sendmail == True:\n # Send the mail\n # FIXME: on error, return non-zero\n server_handle = smtplib.SMTP(smtp_server)\n server_handle.sendmail(from_addr, to_addr.split(','), message)\n server_handle.quit()\n\n return 0\n\n\n\n\n", "path": "cobbler/modules/install_post_report.py"}]}
1,390
111
gh_patches_debug_6862
rasdani/github-patches
git_diff
doccano__doccano-1654
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- I can't add members in the Django admin page. I can't add members in the Django admin page. steps - Add a member in the admin page (click a SAVE button). - <img width="1273" alt="スクリーンショット 2022-01-27 9 52 17" src="https://user-images.githubusercontent.com/20487308/151271702-bf60ae7e-f131-45fe-8314-e7726e90f90c.png"> - However, I get a 500 error. - <img width="1085" alt="スクリーンショット 2022-01-27 9 53 08" src="https://user-images.githubusercontent.com/20487308/151271872-c3fa75e8-c491-4aff-b88e-c9d970406ede.png"> - The endpoints of the POST request are different between admin page and member page. - `POST /admin/members/member/add/` - `POST /v1/projects/1/members` Environment --------- doccano v1.5.5 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `backend/members/models.py` Content: ``` 1 from django.conf import settings 2 from django.contrib.auth.models import User 3 from django.core.exceptions import ValidationError 4 from django.db import models 5 6 from django.db.models import Manager 7 8 from api.models import Project 9 from roles.models import Role 10 11 12 class MemberManager(Manager): 13 14 def can_update(self, project: int, member_id: int, new_role: str) -> bool: 15 """The project needs at least 1 admin. 16 17 Args: 18 project: The project id. 19 member_id: The member id. 20 new_role: The new role name. 21 22 Returns: 23 Whether the mapping can be updated or not. 24 """ 25 queryset = self.filter( 26 project=project, role__name=settings.ROLE_PROJECT_ADMIN 27 ) 28 if queryset.count() > 1: 29 return True 30 else: 31 admin = queryset.first() 32 # we can change the role except for the only admin. 33 return admin.id != member_id or new_role == settings.ROLE_PROJECT_ADMIN 34 35 def has_role(self, project_id: int, user: User, role_name: str): 36 return self.filter(project=project_id, user=user, role__name=role_name).exists() 37 38 39 class Member(models.Model): 40 user = models.ForeignKey( 41 to=User, 42 on_delete=models.CASCADE, 43 related_name='role_mappings' 44 ) 45 project = models.ForeignKey( 46 to=Project, 47 on_delete=models.CASCADE, 48 related_name='role_mappings' 49 ) 50 role = models.ForeignKey( 51 to=Role, 52 on_delete=models.CASCADE 53 ) 54 created_at = models.DateTimeField(auto_now_add=True) 55 updated_at = models.DateTimeField(auto_now=True) 56 objects = MemberManager() 57 58 def clean(self): 59 members = self.objects.exclude(id=self.id) 60 if members.filter(user=self.user, project=self.project).exists(): 61 message = 'This user is already assigned to a role in this project.' 62 raise ValidationError(message) 63 64 @property 65 def username(self): 66 return self.user.username 67 68 class Meta: 69 unique_together = ('user', 'project') 70 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/backend/members/models.py b/backend/members/models.py --- a/backend/members/models.py +++ b/backend/members/models.py @@ -56,7 +56,7 @@ objects = MemberManager() def clean(self): - members = self.objects.exclude(id=self.id) + members = self.__class__.objects.exclude(id=self.id) if members.filter(user=self.user, project=self.project).exists(): message = 'This user is already assigned to a role in this project.' raise ValidationError(message)
{"golden_diff": "diff --git a/backend/members/models.py b/backend/members/models.py\n--- a/backend/members/models.py\n+++ b/backend/members/models.py\n@@ -56,7 +56,7 @@\n objects = MemberManager()\n \n def clean(self):\n- members = self.objects.exclude(id=self.id)\n+ members = self.__class__.objects.exclude(id=self.id)\n if members.filter(user=self.user, project=self.project).exists():\n message = 'This user is already assigned to a role in this project.'\n raise ValidationError(message)\n", "issue": "I can't add members in the Django admin page.\nI can't add members in the Django admin page.\r\n\r\nsteps\r\n- Add a member in the admin page (click a SAVE button).\r\n - <img width=\"1273\" alt=\"\u30b9\u30af\u30ea\u30fc\u30f3\u30b7\u30e7\u30c3\u30c8 2022-01-27 9 52 17\" src=\"https://user-images.githubusercontent.com/20487308/151271702-bf60ae7e-f131-45fe-8314-e7726e90f90c.png\">\r\n- However, I get a 500 error.\r\n - <img width=\"1085\" alt=\"\u30b9\u30af\u30ea\u30fc\u30f3\u30b7\u30e7\u30c3\u30c8 2022-01-27 9 53 08\" src=\"https://user-images.githubusercontent.com/20487308/151271872-c3fa75e8-c491-4aff-b88e-c9d970406ede.png\">\r\n- The endpoints of the POST request are different between admin page and member page.\r\n - `POST /admin/members/member/add/`\r\n - `POST /v1/projects/1/members`\r\n\r\nEnvironment\r\n---------\r\ndoccano v1.5.5\r\n\n", "before_files": [{"content": "from django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.core.exceptions import ValidationError\nfrom django.db import models\n\nfrom django.db.models import Manager\n\nfrom api.models import Project\nfrom roles.models import Role\n\n\nclass MemberManager(Manager):\n\n def can_update(self, project: int, member_id: int, new_role: str) -> bool:\n \"\"\"The project needs at least 1 admin.\n\n Args:\n project: The project id.\n member_id: The member id.\n new_role: The new role name.\n\n Returns:\n Whether the mapping can be updated or not.\n \"\"\"\n queryset = self.filter(\n project=project, role__name=settings.ROLE_PROJECT_ADMIN\n )\n if queryset.count() > 1:\n return True\n else:\n admin = queryset.first()\n # we can change the role except for the only admin.\n return admin.id != member_id or new_role == settings.ROLE_PROJECT_ADMIN\n\n def has_role(self, project_id: int, user: User, role_name: str):\n return self.filter(project=project_id, user=user, role__name=role_name).exists()\n\n\nclass Member(models.Model):\n user = models.ForeignKey(\n to=User,\n on_delete=models.CASCADE,\n related_name='role_mappings'\n )\n project = models.ForeignKey(\n to=Project,\n on_delete=models.CASCADE,\n related_name='role_mappings'\n )\n role = models.ForeignKey(\n to=Role,\n on_delete=models.CASCADE\n )\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n objects = MemberManager()\n\n def clean(self):\n members = self.objects.exclude(id=self.id)\n if members.filter(user=self.user, project=self.project).exists():\n message = 'This user is already assigned to a role in this project.'\n raise ValidationError(message)\n\n @property\n def username(self):\n return self.user.username\n\n class Meta:\n unique_together = ('user', 'project')\n", "path": "backend/members/models.py"}], "after_files": [{"content": "from django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.core.exceptions import ValidationError\nfrom django.db import models\n\nfrom django.db.models import Manager\n\nfrom api.models import Project\nfrom roles.models import Role\n\n\nclass MemberManager(Manager):\n\n def can_update(self, project: int, member_id: int, new_role: str) -> bool:\n \"\"\"The project needs at least 1 admin.\n\n Args:\n project: The project id.\n member_id: The member id.\n new_role: The new role name.\n\n Returns:\n Whether the mapping can be updated or not.\n \"\"\"\n queryset = self.filter(\n project=project, role__name=settings.ROLE_PROJECT_ADMIN\n )\n if queryset.count() > 1:\n return True\n else:\n admin = queryset.first()\n # we can change the role except for the only admin.\n return admin.id != member_id or new_role == settings.ROLE_PROJECT_ADMIN\n\n def has_role(self, project_id: int, user: User, role_name: str):\n return self.filter(project=project_id, user=user, role__name=role_name).exists()\n\n\nclass Member(models.Model):\n user = models.ForeignKey(\n to=User,\n on_delete=models.CASCADE,\n related_name='role_mappings'\n )\n project = models.ForeignKey(\n to=Project,\n on_delete=models.CASCADE,\n related_name='role_mappings'\n )\n role = models.ForeignKey(\n to=Role,\n on_delete=models.CASCADE\n )\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n objects = MemberManager()\n\n def clean(self):\n members = self.__class__.objects.exclude(id=self.id)\n if members.filter(user=self.user, project=self.project).exists():\n message = 'This user is already assigned to a role in this project.'\n raise ValidationError(message)\n\n @property\n def username(self):\n return self.user.username\n\n class Meta:\n unique_together = ('user', 'project')\n", "path": "backend/members/models.py"}]}
1,128
116
gh_patches_debug_29562
rasdani/github-patches
git_diff
open-telemetry__opentelemetry-python-contrib-469
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- sqlalchemy: TypeError: cannot create weak reference to 'pyodbc.Cursor' object **Describe your environment** See the repo provided below for a simple reproduction environment. I was using Python 3.8.8 when running it locally. **Steps to reproduce** Provided a reproduction repo: https://github.com/jomasti/opentelemetry-instrumentation-sqlalchemy-pyodbc-bug **What is the expected behavior?** I expected the query to work successfully when the engine is instrumented. The code in the repo above works fine with 0.18b1. **What is the actual behavior?** Ran into `TypeError: cannot create weak reference to 'pyodbc.Cursor' object`. **Additional context** #315 appears to be the culprit. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/engine.py` Content: ``` 1 # Copyright The OpenTelemetry Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from threading import local 16 from weakref import WeakKeyDictionary 17 18 from sqlalchemy.event import listen # pylint: disable=no-name-in-module 19 20 from opentelemetry import trace 21 from opentelemetry.instrumentation.sqlalchemy.version import __version__ 22 from opentelemetry.semconv.trace import SpanAttributes 23 from opentelemetry.trace.status import Status, StatusCode 24 25 26 def _normalize_vendor(vendor): 27 """Return a canonical name for a type of database.""" 28 if not vendor: 29 return "db" # should this ever happen? 30 31 if "sqlite" in vendor: 32 return "sqlite" 33 34 if "postgres" in vendor or vendor == "psycopg2": 35 return "postgresql" 36 37 return vendor 38 39 40 def _get_tracer(engine, tracer_provider=None): 41 return trace.get_tracer( 42 _normalize_vendor(engine.name), 43 __version__, 44 tracer_provider=tracer_provider, 45 ) 46 47 48 # pylint: disable=unused-argument 49 def _wrap_create_engine(func, module, args, kwargs): 50 """Trace the SQLAlchemy engine, creating an `EngineTracer` 51 object that will listen to SQLAlchemy events. 52 """ 53 engine = func(*args, **kwargs) 54 EngineTracer(_get_tracer(engine), engine) 55 return engine 56 57 58 class EngineTracer: 59 def __init__(self, tracer, engine): 60 self.tracer = tracer 61 self.engine = engine 62 self.vendor = _normalize_vendor(engine.name) 63 self.cursor_mapping = WeakKeyDictionary() 64 self.local = local() 65 66 listen(engine, "before_cursor_execute", self._before_cur_exec) 67 listen(engine, "after_cursor_execute", self._after_cur_exec) 68 listen(engine, "handle_error", self._handle_error) 69 70 @property 71 def current_thread_span(self): 72 return getattr(self.local, "current_span", None) 73 74 @current_thread_span.setter 75 def current_thread_span(self, span): 76 setattr(self.local, "current_span", span) 77 78 def _operation_name(self, db_name, statement): 79 parts = [] 80 if isinstance(statement, str): 81 # otel spec recommends against parsing SQL queries. We are not trying to parse SQL 82 # but simply truncating the statement to the first word. This covers probably >95% 83 # use cases and uses the SQL statement in span name correctly as per the spec. 84 # For some very special cases it might not record the correct statement if the SQL 85 # dialect is too weird but in any case it shouldn't break anything. 86 parts.append(statement.split()[0]) 87 if db_name: 88 parts.append(db_name) 89 if not parts: 90 return self.vendor 91 return " ".join(parts) 92 93 # pylint: disable=unused-argument 94 def _before_cur_exec(self, conn, cursor, statement, *args): 95 attrs, found = _get_attributes_from_url(conn.engine.url) 96 if not found: 97 attrs = _get_attributes_from_cursor(self.vendor, cursor, attrs) 98 99 db_name = attrs.get(SpanAttributes.DB_NAME, "") 100 span = self.tracer.start_span( 101 self._operation_name(db_name, statement), 102 kind=trace.SpanKind.CLIENT, 103 ) 104 self.current_thread_span = self.cursor_mapping[cursor] = span 105 with trace.use_span(span, end_on_exit=False): 106 if span.is_recording(): 107 span.set_attribute(SpanAttributes.DB_STATEMENT, statement) 108 span.set_attribute(SpanAttributes.DB_SYSTEM, self.vendor) 109 for key, value in attrs.items(): 110 span.set_attribute(key, value) 111 112 # pylint: disable=unused-argument 113 def _after_cur_exec(self, conn, cursor, statement, *args): 114 span = self.cursor_mapping.get(cursor, None) 115 if span is None: 116 return 117 118 span.end() 119 120 def _handle_error(self, context): 121 span = self.current_thread_span 122 if span is None: 123 return 124 125 try: 126 if span.is_recording(): 127 span.set_status( 128 Status(StatusCode.ERROR, str(context.original_exception),) 129 ) 130 finally: 131 span.end() 132 133 134 def _get_attributes_from_url(url): 135 """Set connection tags from the url. return true if successful.""" 136 attrs = {} 137 if url.host: 138 attrs[SpanAttributes.NET_PEER_NAME] = url.host 139 if url.port: 140 attrs[SpanAttributes.NET_PEER_PORT] = url.port 141 if url.database: 142 attrs[SpanAttributes.DB_NAME] = url.database 143 if url.username: 144 attrs[SpanAttributes.DB_USER] = url.username 145 return attrs, bool(url.host) 146 147 148 def _get_attributes_from_cursor(vendor, cursor, attrs): 149 """Attempt to set db connection attributes by introspecting the cursor.""" 150 if vendor == "postgresql": 151 # pylint: disable=import-outside-toplevel 152 from psycopg2.extensions import parse_dsn 153 154 if hasattr(cursor, "connection") and hasattr(cursor.connection, "dsn"): 155 dsn = getattr(cursor.connection, "dsn", None) 156 if dsn: 157 data = parse_dsn(dsn) 158 attrs[SpanAttributes.DB_NAME] = data.get("dbname") 159 attrs[SpanAttributes.NET_PEER_NAME] = data.get("host") 160 attrs[SpanAttributes.NET_PEER_PORT] = int(data.get("port")) 161 return attrs 162 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/engine.py b/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/engine.py --- a/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/engine.py +++ b/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/engine.py @@ -13,7 +13,6 @@ # limitations under the License. from threading import local -from weakref import WeakKeyDictionary from sqlalchemy.event import listen # pylint: disable=no-name-in-module @@ -60,7 +59,7 @@ self.tracer = tracer self.engine = engine self.vendor = _normalize_vendor(engine.name) - self.cursor_mapping = WeakKeyDictionary() + self.cursor_mapping = {} self.local = local() listen(engine, "before_cursor_execute", self._before_cur_exec) @@ -116,6 +115,7 @@ return span.end() + self._cleanup(cursor) def _handle_error(self, context): span = self.current_thread_span @@ -129,6 +129,13 @@ ) finally: span.end() + self._cleanup(context.cursor) + + def _cleanup(self, cursor): + try: + del self.cursor_mapping[cursor] + except KeyError: + pass def _get_attributes_from_url(url):
{"golden_diff": "diff --git a/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/engine.py b/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/engine.py\n--- a/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/engine.py\n+++ b/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/engine.py\n@@ -13,7 +13,6 @@\n # limitations under the License.\n \n from threading import local\n-from weakref import WeakKeyDictionary\n \n from sqlalchemy.event import listen # pylint: disable=no-name-in-module\n \n@@ -60,7 +59,7 @@\n self.tracer = tracer\n self.engine = engine\n self.vendor = _normalize_vendor(engine.name)\n- self.cursor_mapping = WeakKeyDictionary()\n+ self.cursor_mapping = {}\n self.local = local()\n \n listen(engine, \"before_cursor_execute\", self._before_cur_exec)\n@@ -116,6 +115,7 @@\n return\n \n span.end()\n+ self._cleanup(cursor)\n \n def _handle_error(self, context):\n span = self.current_thread_span\n@@ -129,6 +129,13 @@\n )\n finally:\n span.end()\n+ self._cleanup(context.cursor)\n+\n+ def _cleanup(self, cursor):\n+ try:\n+ del self.cursor_mapping[cursor]\n+ except KeyError:\n+ pass\n \n \n def _get_attributes_from_url(url):\n", "issue": "sqlalchemy: TypeError: cannot create weak reference to 'pyodbc.Cursor' object\n**Describe your environment** \r\nSee the repo provided below for a simple reproduction environment. I was using Python 3.8.8 when running it locally.\r\n\r\n**Steps to reproduce**\r\nProvided a reproduction repo: https://github.com/jomasti/opentelemetry-instrumentation-sqlalchemy-pyodbc-bug\r\n\r\n**What is the expected behavior?**\r\nI expected the query to work successfully when the engine is instrumented. The code in the repo above works fine with 0.18b1.\r\n\r\n**What is the actual behavior?**\r\nRan into `TypeError: cannot create weak reference to 'pyodbc.Cursor' object`.\r\n\r\n**Additional context**\r\n#315 appears to be the culprit.\r\n\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom threading import local\nfrom weakref import WeakKeyDictionary\n\nfrom sqlalchemy.event import listen # pylint: disable=no-name-in-module\n\nfrom opentelemetry import trace\nfrom opentelemetry.instrumentation.sqlalchemy.version import __version__\nfrom opentelemetry.semconv.trace import SpanAttributes\nfrom opentelemetry.trace.status import Status, StatusCode\n\n\ndef _normalize_vendor(vendor):\n \"\"\"Return a canonical name for a type of database.\"\"\"\n if not vendor:\n return \"db\" # should this ever happen?\n\n if \"sqlite\" in vendor:\n return \"sqlite\"\n\n if \"postgres\" in vendor or vendor == \"psycopg2\":\n return \"postgresql\"\n\n return vendor\n\n\ndef _get_tracer(engine, tracer_provider=None):\n return trace.get_tracer(\n _normalize_vendor(engine.name),\n __version__,\n tracer_provider=tracer_provider,\n )\n\n\n# pylint: disable=unused-argument\ndef _wrap_create_engine(func, module, args, kwargs):\n \"\"\"Trace the SQLAlchemy engine, creating an `EngineTracer`\n object that will listen to SQLAlchemy events.\n \"\"\"\n engine = func(*args, **kwargs)\n EngineTracer(_get_tracer(engine), engine)\n return engine\n\n\nclass EngineTracer:\n def __init__(self, tracer, engine):\n self.tracer = tracer\n self.engine = engine\n self.vendor = _normalize_vendor(engine.name)\n self.cursor_mapping = WeakKeyDictionary()\n self.local = local()\n\n listen(engine, \"before_cursor_execute\", self._before_cur_exec)\n listen(engine, \"after_cursor_execute\", self._after_cur_exec)\n listen(engine, \"handle_error\", self._handle_error)\n\n @property\n def current_thread_span(self):\n return getattr(self.local, \"current_span\", None)\n\n @current_thread_span.setter\n def current_thread_span(self, span):\n setattr(self.local, \"current_span\", span)\n\n def _operation_name(self, db_name, statement):\n parts = []\n if isinstance(statement, str):\n # otel spec recommends against parsing SQL queries. We are not trying to parse SQL\n # but simply truncating the statement to the first word. This covers probably >95%\n # use cases and uses the SQL statement in span name correctly as per the spec.\n # For some very special cases it might not record the correct statement if the SQL\n # dialect is too weird but in any case it shouldn't break anything.\n parts.append(statement.split()[0])\n if db_name:\n parts.append(db_name)\n if not parts:\n return self.vendor\n return \" \".join(parts)\n\n # pylint: disable=unused-argument\n def _before_cur_exec(self, conn, cursor, statement, *args):\n attrs, found = _get_attributes_from_url(conn.engine.url)\n if not found:\n attrs = _get_attributes_from_cursor(self.vendor, cursor, attrs)\n\n db_name = attrs.get(SpanAttributes.DB_NAME, \"\")\n span = self.tracer.start_span(\n self._operation_name(db_name, statement),\n kind=trace.SpanKind.CLIENT,\n )\n self.current_thread_span = self.cursor_mapping[cursor] = span\n with trace.use_span(span, end_on_exit=False):\n if span.is_recording():\n span.set_attribute(SpanAttributes.DB_STATEMENT, statement)\n span.set_attribute(SpanAttributes.DB_SYSTEM, self.vendor)\n for key, value in attrs.items():\n span.set_attribute(key, value)\n\n # pylint: disable=unused-argument\n def _after_cur_exec(self, conn, cursor, statement, *args):\n span = self.cursor_mapping.get(cursor, None)\n if span is None:\n return\n\n span.end()\n\n def _handle_error(self, context):\n span = self.current_thread_span\n if span is None:\n return\n\n try:\n if span.is_recording():\n span.set_status(\n Status(StatusCode.ERROR, str(context.original_exception),)\n )\n finally:\n span.end()\n\n\ndef _get_attributes_from_url(url):\n \"\"\"Set connection tags from the url. return true if successful.\"\"\"\n attrs = {}\n if url.host:\n attrs[SpanAttributes.NET_PEER_NAME] = url.host\n if url.port:\n attrs[SpanAttributes.NET_PEER_PORT] = url.port\n if url.database:\n attrs[SpanAttributes.DB_NAME] = url.database\n if url.username:\n attrs[SpanAttributes.DB_USER] = url.username\n return attrs, bool(url.host)\n\n\ndef _get_attributes_from_cursor(vendor, cursor, attrs):\n \"\"\"Attempt to set db connection attributes by introspecting the cursor.\"\"\"\n if vendor == \"postgresql\":\n # pylint: disable=import-outside-toplevel\n from psycopg2.extensions import parse_dsn\n\n if hasattr(cursor, \"connection\") and hasattr(cursor.connection, \"dsn\"):\n dsn = getattr(cursor.connection, \"dsn\", None)\n if dsn:\n data = parse_dsn(dsn)\n attrs[SpanAttributes.DB_NAME] = data.get(\"dbname\")\n attrs[SpanAttributes.NET_PEER_NAME] = data.get(\"host\")\n attrs[SpanAttributes.NET_PEER_PORT] = int(data.get(\"port\"))\n return attrs\n", "path": "instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/engine.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom threading import local\n\nfrom sqlalchemy.event import listen # pylint: disable=no-name-in-module\n\nfrom opentelemetry import trace\nfrom opentelemetry.instrumentation.sqlalchemy.version import __version__\nfrom opentelemetry.semconv.trace import SpanAttributes\nfrom opentelemetry.trace.status import Status, StatusCode\n\n\ndef _normalize_vendor(vendor):\n \"\"\"Return a canonical name for a type of database.\"\"\"\n if not vendor:\n return \"db\" # should this ever happen?\n\n if \"sqlite\" in vendor:\n return \"sqlite\"\n\n if \"postgres\" in vendor or vendor == \"psycopg2\":\n return \"postgresql\"\n\n return vendor\n\n\ndef _get_tracer(engine, tracer_provider=None):\n return trace.get_tracer(\n _normalize_vendor(engine.name),\n __version__,\n tracer_provider=tracer_provider,\n )\n\n\n# pylint: disable=unused-argument\ndef _wrap_create_engine(func, module, args, kwargs):\n \"\"\"Trace the SQLAlchemy engine, creating an `EngineTracer`\n object that will listen to SQLAlchemy events.\n \"\"\"\n engine = func(*args, **kwargs)\n EngineTracer(_get_tracer(engine), engine)\n return engine\n\n\nclass EngineTracer:\n def __init__(self, tracer, engine):\n self.tracer = tracer\n self.engine = engine\n self.vendor = _normalize_vendor(engine.name)\n self.cursor_mapping = {}\n self.local = local()\n\n listen(engine, \"before_cursor_execute\", self._before_cur_exec)\n listen(engine, \"after_cursor_execute\", self._after_cur_exec)\n listen(engine, \"handle_error\", self._handle_error)\n\n @property\n def current_thread_span(self):\n return getattr(self.local, \"current_span\", None)\n\n @current_thread_span.setter\n def current_thread_span(self, span):\n setattr(self.local, \"current_span\", span)\n\n def _operation_name(self, db_name, statement):\n parts = []\n if isinstance(statement, str):\n # otel spec recommends against parsing SQL queries. We are not trying to parse SQL\n # but simply truncating the statement to the first word. This covers probably >95%\n # use cases and uses the SQL statement in span name correctly as per the spec.\n # For some very special cases it might not record the correct statement if the SQL\n # dialect is too weird but in any case it shouldn't break anything.\n parts.append(statement.split()[0])\n if db_name:\n parts.append(db_name)\n if not parts:\n return self.vendor\n return \" \".join(parts)\n\n # pylint: disable=unused-argument\n def _before_cur_exec(self, conn, cursor, statement, *args):\n attrs, found = _get_attributes_from_url(conn.engine.url)\n if not found:\n attrs = _get_attributes_from_cursor(self.vendor, cursor, attrs)\n\n db_name = attrs.get(SpanAttributes.DB_NAME, \"\")\n span = self.tracer.start_span(\n self._operation_name(db_name, statement),\n kind=trace.SpanKind.CLIENT,\n )\n self.current_thread_span = self.cursor_mapping[cursor] = span\n with trace.use_span(span, end_on_exit=False):\n if span.is_recording():\n span.set_attribute(SpanAttributes.DB_STATEMENT, statement)\n span.set_attribute(SpanAttributes.DB_SYSTEM, self.vendor)\n for key, value in attrs.items():\n span.set_attribute(key, value)\n\n # pylint: disable=unused-argument\n def _after_cur_exec(self, conn, cursor, statement, *args):\n span = self.cursor_mapping.get(cursor, None)\n if span is None:\n return\n\n span.end()\n self._cleanup(cursor)\n\n def _handle_error(self, context):\n span = self.current_thread_span\n if span is None:\n return\n\n try:\n if span.is_recording():\n span.set_status(\n Status(StatusCode.ERROR, str(context.original_exception),)\n )\n finally:\n span.end()\n self._cleanup(context.cursor)\n\n def _cleanup(self, cursor):\n try:\n del self.cursor_mapping[cursor]\n except KeyError:\n pass\n\n\ndef _get_attributes_from_url(url):\n \"\"\"Set connection tags from the url. return true if successful.\"\"\"\n attrs = {}\n if url.host:\n attrs[SpanAttributes.NET_PEER_NAME] = url.host\n if url.port:\n attrs[SpanAttributes.NET_PEER_PORT] = url.port\n if url.database:\n attrs[SpanAttributes.DB_NAME] = url.database\n if url.username:\n attrs[SpanAttributes.DB_USER] = url.username\n return attrs, bool(url.host)\n\n\ndef _get_attributes_from_cursor(vendor, cursor, attrs):\n \"\"\"Attempt to set db connection attributes by introspecting the cursor.\"\"\"\n if vendor == \"postgresql\":\n # pylint: disable=import-outside-toplevel\n from psycopg2.extensions import parse_dsn\n\n if hasattr(cursor, \"connection\") and hasattr(cursor.connection, \"dsn\"):\n dsn = getattr(cursor.connection, \"dsn\", None)\n if dsn:\n data = parse_dsn(dsn)\n attrs[SpanAttributes.DB_NAME] = data.get(\"dbname\")\n attrs[SpanAttributes.NET_PEER_NAME] = data.get(\"host\")\n attrs[SpanAttributes.NET_PEER_PORT] = int(data.get(\"port\"))\n return attrs\n", "path": "instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/engine.py"}]}
2,074
344
gh_patches_debug_31363
rasdani/github-patches
git_diff
nextcloud__appstore-655
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- add "Featured" category In order to find "Featured" labeled apps better, there should be a separate category, making all official apps accessible on one screen. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `nextcloudappstore/core/views.py` Content: ``` 1 from urllib.parse import urlencode 2 3 from django.conf import settings 4 from django.contrib.auth.mixins import LoginRequiredMixin 5 from django.contrib.auth.models import User 6 from django.core.exceptions import ObjectDoesNotExist 7 from django.db.models import Q 8 from django.http import HttpResponse 9 from django.shortcuts import get_object_or_404, redirect 10 from django.utils.functional import cached_property 11 from django.utils.translation import get_language, get_language_info 12 from django.views.decorators.http import etag 13 from django.views.generic.base import TemplateView 14 from django.views.generic.detail import DetailView 15 from django.views.generic.list import ListView 16 from rest_framework.generics import ListAPIView 17 from semantic_version import Version 18 19 from nextcloudappstore.core.caching import app_etag 20 from nextcloudappstore.core.facades import flatmap 21 from nextcloudappstore.core.forms import AppRatingForm, AppReleaseUploadForm, \ 22 AppRegisterForm 23 from nextcloudappstore.core.models import App, Category, AppRating 24 from nextcloudappstore.core.serializers import AppRatingSerializer 25 from nextcloudappstore.core.versioning import pad_min_version 26 27 28 @etag(app_etag) 29 def app_description(request, id): 30 app = get_object_or_404(App, id=id) 31 return HttpResponse(app.description, content_type='text/plain') 32 33 34 class AppRatingApi(ListAPIView): 35 serializer_class = AppRatingSerializer 36 37 def get_queryset(self): 38 id = self.kwargs.get('id') 39 lang = self.request.GET.get('lang', self.request.LANGUAGE_CODE) 40 app = get_object_or_404(App, id=id) 41 queryset = AppRating.objects.language(lang).filter(app=app) 42 43 current_user = self.request.GET.get('current_user', 'false') 44 if current_user == 'true': 45 return queryset.filter(user=self.request.user) 46 else: 47 return queryset 48 49 50 class AppDetailView(DetailView): 51 queryset = App.objects.prefetch_related( 52 'releases', 53 'screenshots', 54 'co_maintainers', 55 'translations', 56 ).select_related('owner') 57 template_name = 'app/detail.html' 58 slug_field = 'id' 59 slug_url_kwarg = 'id' 60 61 def post(self, request, id): 62 form = AppRatingForm(request.POST, id=id, user=request.user) 63 # there is no way that a rating can be invalid by default 64 if form.is_valid() and request.user.is_authenticated: 65 form.save() 66 return redirect('app-detail', id=id) 67 68 def get_context_data(self, **kwargs): 69 context = super().get_context_data(**kwargs) 70 context['DISCOURSE_URL'] = settings.DISCOURSE_URL.rstrip('/') 71 context['rating_form'] = AppRatingForm( 72 initial={'language_code': get_language()}) 73 74 ratings = AppRating.objects.filter(app=context['app']) 75 rating_languages = flatmap( 76 lambda r: r.get_available_languages(), ratings) 77 78 # make sure current session language is in the list even if there are 79 # no comments. 80 rating_languages = list(rating_languages) 81 if get_language() not in rating_languages: 82 rating_languages.append(get_language()) 83 84 context['languages'] = set(sorted(rating_languages)) 85 context['fallbackLang'] = 'en' if 'en' in context['languages'] else '' 86 context['user_has_rated_app'] = False 87 if self.request.user.is_authenticated: 88 try: 89 app_rating = AppRating.objects.get(user=self.request.user, 90 app=context['app']) 91 92 # if parler fallsback to a fallback language 93 # it doesn't set the language as current language 94 # and we can't select the correct language in the 95 # frontend. So we try and find a languge that is 96 # available 97 language_code = app_rating.get_current_language() 98 if not app_rating.has_translation(language_code): 99 for fallback in app_rating.get_fallback_languages(): 100 if app_rating.has_translation(fallback): 101 app_rating.set_current_language(fallback) 102 103 # when accessing an empty comment django-parler tries to 104 # fall back to the default language. However for comments 105 # the default (English) does not always exist. Unfortunately 106 # it throws the same exception as non existing models, 107 # so we need to access it beforehand 108 try: 109 comment = app_rating.comment 110 except AppRating.DoesNotExist: 111 comment = '' 112 113 context['rating_form'] = AppRatingForm({ 114 'rating': app_rating.rating, 115 'comment': comment, 116 'language_code': app_rating.get_current_language(), 117 }) 118 context['user_has_rated_app'] = True 119 except AppRating.DoesNotExist: 120 pass 121 context['categories'] = Category.objects.prefetch_related( 122 'translations').all() 123 context['latest_releases_by_platform_v'] = \ 124 self.object.latest_releases_by_platform_v() 125 return context 126 127 128 class AppReleasesView(DetailView): 129 queryset = App.objects.prefetch_related( 130 'translations', 131 'releases__translations', 132 'releases__phpextensiondependencies__php_extension', 133 'releases__databasedependencies__database', 134 'releases__shell_commands', 135 'releases__licenses', 136 ) 137 template_name = 'app/releases.html' 138 slug_field = 'id' 139 slug_url_kwarg = 'id' 140 141 def get_context_data(self, **kwargs): 142 context = super().get_context_data(**kwargs) 143 context['categories'] = Category.objects.prefetch_related( 144 'translations').all() 145 146 releases = self.object.releases_by_platform_v() 147 unstables = self.object.unstable_releases_by_platform_v() 148 versions = set(list(releases.keys()) + list(unstables.keys())) 149 all_releases = list(map( 150 lambda v: (v, releases.get(v, []) + unstables.get(v, [])), 151 versions)) 152 context['releases_by_platform_v'] = \ 153 self._sort_by_platform_v(all_releases) 154 return context 155 156 def _sort_by_platform_v(self, releases_by_platform, reverse=True): 157 """Sorts a list of tuples like (<platform version>, [releases]) by 158 platform version. 159 160 :param releases_by_platform: A list of tuples. 161 :param reverse: Descending order if True, ascending otherwise. 162 :return sorted list of tuples. 163 """ 164 165 return sorted(releases_by_platform, reverse=reverse, 166 key=lambda v: Version(pad_min_version(v[0]))) 167 168 169 class CategoryAppListView(ListView): 170 model = App 171 template_name = 'app/list.html' 172 allow_empty = True 173 174 def get_queryset(self): 175 order_by = self.request.GET.get('order_by', 'rating_overall') 176 ordering = self.request.GET.get('ordering', 'desc') 177 is_featured = self.request.GET.get('is_featured', False) 178 maintainer = self.request.GET.get('maintainer', False) 179 sort_columns = [] 180 181 allowed_order_by = {'name', 'last_release', 'rating_overall', 182 'rating_recent'} 183 if order_by in allowed_order_by: 184 if order_by == 'name': 185 order_by = 'translations__name' 186 if ordering == 'desc': 187 sort_columns.append('-' + order_by) 188 else: 189 sort_columns.append(order_by) 190 191 lang = get_language_info(get_language())['code'] 192 category_id = self.kwargs['id'] 193 queryset = App.objects.search(self.search_terms, lang).order_by( 194 *sort_columns).filter(releases__gt=0) 195 if maintainer: 196 try: 197 user = User.objects.get_by_natural_key(maintainer) 198 queryset = queryset.filter(Q(owner=user) | 199 Q(co_maintainers=user)) 200 except ObjectDoesNotExist: 201 return queryset.none() 202 if category_id: 203 queryset = queryset.filter(categories__id=category_id) 204 if is_featured == "true": 205 queryset = queryset.filter(is_featured=True) 206 return queryset.prefetch_related('screenshots', 'translations') 207 208 def get_context_data(self, **kwargs): 209 context = super().get_context_data(**kwargs) 210 context['categories'] = Category.objects.prefetch_related( 211 'translations').all() 212 category_id = self.kwargs['id'] 213 if category_id: 214 context['current_category'] = Category.objects.get(id=category_id) 215 if self.search_terms: 216 context['search_query'] = ' '.join(self.search_terms) 217 context['url_params'] = self.url_params 218 return context 219 220 @cached_property 221 def url_params(self): 222 """URL encoded strings with the GET params of the last request. 223 224 Intended for preserving GET params upon clicking a link by including 225 one (and only one) of these strings in the "href" attribute. 226 227 The parameters are divided into three groups: search, filters and 228 ordering. In addition to these three, the returned dict also contains 229 some combinations of them, as specified by the dict keys. 230 231 No leading "?" or "&". 232 233 :return dict with URL encoded strings. 234 """ 235 236 search = self._url_params_str('search') 237 filters = self._url_params_str('is_featured', 'maintainer') 238 ordering = self._url_params_str('order_by', 'ordering') 239 240 return { 241 'search': search, 242 'filters': filters, 243 'ordering': ordering, 244 'search_filters': self._join_url_params_strs(search, filters), 245 'filters_ordering': self._join_url_params_strs(filters, ordering), 246 } 247 248 def _url_params_str(self, *params): 249 args = map(lambda param: (param, self.request.GET.get(param, '')), 250 params) 251 present_args = filter(lambda a: a[1], args) 252 return urlencode(dict(present_args)) 253 254 def _join_url_params_strs(self, *strings): 255 return '&'.join(filter(None, strings)) 256 257 @cached_property 258 def search_terms(self): 259 return self.request.GET.get('search', '').strip().split() 260 261 262 class AppUploadView(LoginRequiredMixin, TemplateView): 263 template_name = 'app/upload.html' 264 265 def get_context_data(self, **kwargs): 266 context = super().get_context_data(**kwargs) 267 context['form'] = AppReleaseUploadForm() 268 return context 269 270 271 class AppRegisterView(LoginRequiredMixin, TemplateView): 272 template_name = 'app/register.html' 273 274 def get_context_data(self, **kwargs): 275 context = super().get_context_data(**kwargs) 276 context['form'] = AppRegisterForm() 277 return context 278 ``` Path: `nextcloudappstore/urls.py` Content: ``` 1 from allauth.account.views import signup 2 from allauth.socialaccount.views import signup as social_signup 3 from csp.decorators import csp_update 4 from django.conf import settings 5 from django.conf.urls import url, include 6 from django.conf.urls.i18n import i18n_patterns 7 from django.contrib import admin 8 from django.contrib.auth.decorators import login_required 9 from django.views.decorators.http import etag 10 11 from nextcloudappstore.core.caching import app_rating_etag 12 from nextcloudappstore.core.feeds import AppReleaseAtomFeed, AppReleaseRssFeed 13 from nextcloudappstore.core.views import CategoryAppListView, AppDetailView, \ 14 app_description, AppReleasesView, AppUploadView, AppRatingApi, \ 15 AppRegisterView 16 from nextcloudappstore.scaffolding.views import AppScaffoldingView 17 18 admin.site.login = login_required(admin.site.login) 19 20 urlpatterns = [ 21 url(r'^$', CategoryAppListView.as_view(), {'id': None}, name='home'), 22 url(r"^signup/$", csp_update(**settings.CSP_SIGNUP)(signup), 23 name="account_signup"), 24 url(r"^social/signup/$", csp_update(**settings.CSP_SIGNUP)(social_signup), 25 name="socialaccount_signup"), 26 url(r'^', include('allauth.urls')), 27 url(r'^categories/(?P<id>[\w]*)/?$', CategoryAppListView.as_view(), 28 name='category-app-list'), 29 url(r'^developer/apps/generate/?$', AppScaffoldingView.as_view(), 30 name='app-scaffold'), 31 url(r'^developer/apps/releases/new/?$', AppUploadView.as_view(), 32 name='app-upload'), 33 url(r'^developer/apps/new/?$', AppRegisterView.as_view(), 34 name='app-register'), 35 url(r'^apps/(?P<id>[\w_]+)/?$', AppDetailView.as_view(), 36 name='app-detail'), 37 url(r'^apps/(?P<id>[\w_]+)/releases/?$', AppReleasesView.as_view(), 38 name='app-releases'), 39 url(r'^apps/(?P<id>[\w_]+)/description/?$', app_description, 40 name='app-description'), 41 url(r'^apps/(?P<id>[\w_]+)/ratings.json$', 42 etag(app_rating_etag)(AppRatingApi.as_view()), name='app-ratings'), 43 url(r'^api/', include('nextcloudappstore.api.urls', 44 namespace='api')), 45 url(r'^account/', 46 include('nextcloudappstore.user.urls', namespace='user')), 47 url(r'^admin/', admin.site.urls), 48 url(r'^i18n/', include('django.conf.urls.i18n')), 49 ] 50 51 urlpatterns += i18n_patterns( 52 url(r'feeds/releases.rss', AppReleaseRssFeed(), name='feeds-releases-rss'), 53 url(r'feeds/releases.atom', AppReleaseAtomFeed(), 54 name='feeds-releases-atom'), 55 ) 56 57 if settings.DEBUG: 58 try: 59 import debug_toolbar 60 61 urlpatterns += [url(r'^__debug__/', include(debug_toolbar.urls)), ] 62 except ImportError: 63 pass 64 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/nextcloudappstore/core/views.py b/nextcloudappstore/core/views.py --- a/nextcloudappstore/core/views.py +++ b/nextcloudappstore/core/views.py @@ -174,7 +174,8 @@ def get_queryset(self): order_by = self.request.GET.get('order_by', 'rating_overall') ordering = self.request.GET.get('ordering', 'desc') - is_featured = self.request.GET.get('is_featured', False) + is_featured = self.request.GET.get('is_featured', self.kwargs + .get('is_featured_category', False)) maintainer = self.request.GET.get('maintainer', False) sort_columns = [] @@ -210,6 +211,8 @@ context['categories'] = Category.objects.prefetch_related( 'translations').all() category_id = self.kwargs['id'] + context['is_featured_category'] = self.kwargs\ + .get('is_featured_category', False) if category_id: context['current_category'] = Category.objects.get(id=category_id) if self.search_terms: diff --git a/nextcloudappstore/urls.py b/nextcloudappstore/urls.py --- a/nextcloudappstore/urls.py +++ b/nextcloudappstore/urls.py @@ -19,6 +19,10 @@ urlpatterns = [ url(r'^$', CategoryAppListView.as_view(), {'id': None}, name='home'), + url(r"^featured$", CategoryAppListView.as_view(), {'id': None, + 'is_featured_category': + True}, + name='featured'), url(r"^signup/$", csp_update(**settings.CSP_SIGNUP)(signup), name="account_signup"), url(r"^social/signup/$", csp_update(**settings.CSP_SIGNUP)(social_signup),
{"golden_diff": "diff --git a/nextcloudappstore/core/views.py b/nextcloudappstore/core/views.py\n--- a/nextcloudappstore/core/views.py\n+++ b/nextcloudappstore/core/views.py\n@@ -174,7 +174,8 @@\n def get_queryset(self):\n order_by = self.request.GET.get('order_by', 'rating_overall')\n ordering = self.request.GET.get('ordering', 'desc')\n- is_featured = self.request.GET.get('is_featured', False)\n+ is_featured = self.request.GET.get('is_featured', self.kwargs\n+ .get('is_featured_category', False))\n maintainer = self.request.GET.get('maintainer', False)\n sort_columns = []\n \n@@ -210,6 +211,8 @@\n context['categories'] = Category.objects.prefetch_related(\n 'translations').all()\n category_id = self.kwargs['id']\n+ context['is_featured_category'] = self.kwargs\\\n+ .get('is_featured_category', False)\n if category_id:\n context['current_category'] = Category.objects.get(id=category_id)\n if self.search_terms:\ndiff --git a/nextcloudappstore/urls.py b/nextcloudappstore/urls.py\n--- a/nextcloudappstore/urls.py\n+++ b/nextcloudappstore/urls.py\n@@ -19,6 +19,10 @@\n \n urlpatterns = [\n url(r'^$', CategoryAppListView.as_view(), {'id': None}, name='home'),\n+ url(r\"^featured$\", CategoryAppListView.as_view(), {'id': None,\n+ 'is_featured_category':\n+ True},\n+ name='featured'),\n url(r\"^signup/$\", csp_update(**settings.CSP_SIGNUP)(signup),\n name=\"account_signup\"),\n url(r\"^social/signup/$\", csp_update(**settings.CSP_SIGNUP)(social_signup),\n", "issue": "add \"Featured\" category\nIn order to find \"Featured\" labeled apps better, there should be a separate category, making all official apps accessible on one screen.\n", "before_files": [{"content": "from urllib.parse import urlencode\n\nfrom django.conf import settings\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.auth.models import User\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db.models import Q\nfrom django.http import HttpResponse\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import get_language, get_language_info\nfrom django.views.decorators.http import etag\nfrom django.views.generic.base import TemplateView\nfrom django.views.generic.detail import DetailView\nfrom django.views.generic.list import ListView\nfrom rest_framework.generics import ListAPIView\nfrom semantic_version import Version\n\nfrom nextcloudappstore.core.caching import app_etag\nfrom nextcloudappstore.core.facades import flatmap\nfrom nextcloudappstore.core.forms import AppRatingForm, AppReleaseUploadForm, \\\n AppRegisterForm\nfrom nextcloudappstore.core.models import App, Category, AppRating\nfrom nextcloudappstore.core.serializers import AppRatingSerializer\nfrom nextcloudappstore.core.versioning import pad_min_version\n\n\n@etag(app_etag)\ndef app_description(request, id):\n app = get_object_or_404(App, id=id)\n return HttpResponse(app.description, content_type='text/plain')\n\n\nclass AppRatingApi(ListAPIView):\n serializer_class = AppRatingSerializer\n\n def get_queryset(self):\n id = self.kwargs.get('id')\n lang = self.request.GET.get('lang', self.request.LANGUAGE_CODE)\n app = get_object_or_404(App, id=id)\n queryset = AppRating.objects.language(lang).filter(app=app)\n\n current_user = self.request.GET.get('current_user', 'false')\n if current_user == 'true':\n return queryset.filter(user=self.request.user)\n else:\n return queryset\n\n\nclass AppDetailView(DetailView):\n queryset = App.objects.prefetch_related(\n 'releases',\n 'screenshots',\n 'co_maintainers',\n 'translations',\n ).select_related('owner')\n template_name = 'app/detail.html'\n slug_field = 'id'\n slug_url_kwarg = 'id'\n\n def post(self, request, id):\n form = AppRatingForm(request.POST, id=id, user=request.user)\n # there is no way that a rating can be invalid by default\n if form.is_valid() and request.user.is_authenticated:\n form.save()\n return redirect('app-detail', id=id)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['DISCOURSE_URL'] = settings.DISCOURSE_URL.rstrip('/')\n context['rating_form'] = AppRatingForm(\n initial={'language_code': get_language()})\n\n ratings = AppRating.objects.filter(app=context['app'])\n rating_languages = flatmap(\n lambda r: r.get_available_languages(), ratings)\n\n # make sure current session language is in the list even if there are\n # no comments.\n rating_languages = list(rating_languages)\n if get_language() not in rating_languages:\n rating_languages.append(get_language())\n\n context['languages'] = set(sorted(rating_languages))\n context['fallbackLang'] = 'en' if 'en' in context['languages'] else ''\n context['user_has_rated_app'] = False\n if self.request.user.is_authenticated:\n try:\n app_rating = AppRating.objects.get(user=self.request.user,\n app=context['app'])\n\n # if parler fallsback to a fallback language\n # it doesn't set the language as current language\n # and we can't select the correct language in the\n # frontend. So we try and find a languge that is\n # available\n language_code = app_rating.get_current_language()\n if not app_rating.has_translation(language_code):\n for fallback in app_rating.get_fallback_languages():\n if app_rating.has_translation(fallback):\n app_rating.set_current_language(fallback)\n\n # when accessing an empty comment django-parler tries to\n # fall back to the default language. However for comments\n # the default (English) does not always exist. Unfortunately\n # it throws the same exception as non existing models,\n # so we need to access it beforehand\n try:\n comment = app_rating.comment\n except AppRating.DoesNotExist:\n comment = ''\n\n context['rating_form'] = AppRatingForm({\n 'rating': app_rating.rating,\n 'comment': comment,\n 'language_code': app_rating.get_current_language(),\n })\n context['user_has_rated_app'] = True\n except AppRating.DoesNotExist:\n pass\n context['categories'] = Category.objects.prefetch_related(\n 'translations').all()\n context['latest_releases_by_platform_v'] = \\\n self.object.latest_releases_by_platform_v()\n return context\n\n\nclass AppReleasesView(DetailView):\n queryset = App.objects.prefetch_related(\n 'translations',\n 'releases__translations',\n 'releases__phpextensiondependencies__php_extension',\n 'releases__databasedependencies__database',\n 'releases__shell_commands',\n 'releases__licenses',\n )\n template_name = 'app/releases.html'\n slug_field = 'id'\n slug_url_kwarg = 'id'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['categories'] = Category.objects.prefetch_related(\n 'translations').all()\n\n releases = self.object.releases_by_platform_v()\n unstables = self.object.unstable_releases_by_platform_v()\n versions = set(list(releases.keys()) + list(unstables.keys()))\n all_releases = list(map(\n lambda v: (v, releases.get(v, []) + unstables.get(v, [])),\n versions))\n context['releases_by_platform_v'] = \\\n self._sort_by_platform_v(all_releases)\n return context\n\n def _sort_by_platform_v(self, releases_by_platform, reverse=True):\n \"\"\"Sorts a list of tuples like (<platform version>, [releases]) by\n platform version.\n\n :param releases_by_platform: A list of tuples.\n :param reverse: Descending order if True, ascending otherwise.\n :return sorted list of tuples.\n \"\"\"\n\n return sorted(releases_by_platform, reverse=reverse,\n key=lambda v: Version(pad_min_version(v[0])))\n\n\nclass CategoryAppListView(ListView):\n model = App\n template_name = 'app/list.html'\n allow_empty = True\n\n def get_queryset(self):\n order_by = self.request.GET.get('order_by', 'rating_overall')\n ordering = self.request.GET.get('ordering', 'desc')\n is_featured = self.request.GET.get('is_featured', False)\n maintainer = self.request.GET.get('maintainer', False)\n sort_columns = []\n\n allowed_order_by = {'name', 'last_release', 'rating_overall',\n 'rating_recent'}\n if order_by in allowed_order_by:\n if order_by == 'name':\n order_by = 'translations__name'\n if ordering == 'desc':\n sort_columns.append('-' + order_by)\n else:\n sort_columns.append(order_by)\n\n lang = get_language_info(get_language())['code']\n category_id = self.kwargs['id']\n queryset = App.objects.search(self.search_terms, lang).order_by(\n *sort_columns).filter(releases__gt=0)\n if maintainer:\n try:\n user = User.objects.get_by_natural_key(maintainer)\n queryset = queryset.filter(Q(owner=user) |\n Q(co_maintainers=user))\n except ObjectDoesNotExist:\n return queryset.none()\n if category_id:\n queryset = queryset.filter(categories__id=category_id)\n if is_featured == \"true\":\n queryset = queryset.filter(is_featured=True)\n return queryset.prefetch_related('screenshots', 'translations')\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['categories'] = Category.objects.prefetch_related(\n 'translations').all()\n category_id = self.kwargs['id']\n if category_id:\n context['current_category'] = Category.objects.get(id=category_id)\n if self.search_terms:\n context['search_query'] = ' '.join(self.search_terms)\n context['url_params'] = self.url_params\n return context\n\n @cached_property\n def url_params(self):\n \"\"\"URL encoded strings with the GET params of the last request.\n\n Intended for preserving GET params upon clicking a link by including\n one (and only one) of these strings in the \"href\" attribute.\n\n The parameters are divided into three groups: search, filters and\n ordering. In addition to these three, the returned dict also contains\n some combinations of them, as specified by the dict keys.\n\n No leading \"?\" or \"&\".\n\n :return dict with URL encoded strings.\n \"\"\"\n\n search = self._url_params_str('search')\n filters = self._url_params_str('is_featured', 'maintainer')\n ordering = self._url_params_str('order_by', 'ordering')\n\n return {\n 'search': search,\n 'filters': filters,\n 'ordering': ordering,\n 'search_filters': self._join_url_params_strs(search, filters),\n 'filters_ordering': self._join_url_params_strs(filters, ordering),\n }\n\n def _url_params_str(self, *params):\n args = map(lambda param: (param, self.request.GET.get(param, '')),\n params)\n present_args = filter(lambda a: a[1], args)\n return urlencode(dict(present_args))\n\n def _join_url_params_strs(self, *strings):\n return '&'.join(filter(None, strings))\n\n @cached_property\n def search_terms(self):\n return self.request.GET.get('search', '').strip().split()\n\n\nclass AppUploadView(LoginRequiredMixin, TemplateView):\n template_name = 'app/upload.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['form'] = AppReleaseUploadForm()\n return context\n\n\nclass AppRegisterView(LoginRequiredMixin, TemplateView):\n template_name = 'app/register.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['form'] = AppRegisterForm()\n return context\n", "path": "nextcloudappstore/core/views.py"}, {"content": "from allauth.account.views import signup\nfrom allauth.socialaccount.views import signup as social_signup\nfrom csp.decorators import csp_update\nfrom django.conf import settings\nfrom django.conf.urls import url, include\nfrom django.conf.urls.i18n import i18n_patterns\nfrom django.contrib import admin\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.http import etag\n\nfrom nextcloudappstore.core.caching import app_rating_etag\nfrom nextcloudappstore.core.feeds import AppReleaseAtomFeed, AppReleaseRssFeed\nfrom nextcloudappstore.core.views import CategoryAppListView, AppDetailView, \\\n app_description, AppReleasesView, AppUploadView, AppRatingApi, \\\n AppRegisterView\nfrom nextcloudappstore.scaffolding.views import AppScaffoldingView\n\nadmin.site.login = login_required(admin.site.login)\n\nurlpatterns = [\n url(r'^$', CategoryAppListView.as_view(), {'id': None}, name='home'),\n url(r\"^signup/$\", csp_update(**settings.CSP_SIGNUP)(signup),\n name=\"account_signup\"),\n url(r\"^social/signup/$\", csp_update(**settings.CSP_SIGNUP)(social_signup),\n name=\"socialaccount_signup\"),\n url(r'^', include('allauth.urls')),\n url(r'^categories/(?P<id>[\\w]*)/?$', CategoryAppListView.as_view(),\n name='category-app-list'),\n url(r'^developer/apps/generate/?$', AppScaffoldingView.as_view(),\n name='app-scaffold'),\n url(r'^developer/apps/releases/new/?$', AppUploadView.as_view(),\n name='app-upload'),\n url(r'^developer/apps/new/?$', AppRegisterView.as_view(),\n name='app-register'),\n url(r'^apps/(?P<id>[\\w_]+)/?$', AppDetailView.as_view(),\n name='app-detail'),\n url(r'^apps/(?P<id>[\\w_]+)/releases/?$', AppReleasesView.as_view(),\n name='app-releases'),\n url(r'^apps/(?P<id>[\\w_]+)/description/?$', app_description,\n name='app-description'),\n url(r'^apps/(?P<id>[\\w_]+)/ratings.json$',\n etag(app_rating_etag)(AppRatingApi.as_view()), name='app-ratings'),\n url(r'^api/', include('nextcloudappstore.api.urls',\n namespace='api')),\n url(r'^account/',\n include('nextcloudappstore.user.urls', namespace='user')),\n url(r'^admin/', admin.site.urls),\n url(r'^i18n/', include('django.conf.urls.i18n')),\n]\n\nurlpatterns += i18n_patterns(\n url(r'feeds/releases.rss', AppReleaseRssFeed(), name='feeds-releases-rss'),\n url(r'feeds/releases.atom', AppReleaseAtomFeed(),\n name='feeds-releases-atom'),\n)\n\nif settings.DEBUG:\n try:\n import debug_toolbar\n\n urlpatterns += [url(r'^__debug__/', include(debug_toolbar.urls)), ]\n except ImportError:\n pass\n", "path": "nextcloudappstore/urls.py"}], "after_files": [{"content": "from urllib.parse import urlencode\n\nfrom django.conf import settings\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.auth.models import User\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db.models import Q\nfrom django.http import HttpResponse\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import get_language, get_language_info\nfrom django.views.decorators.http import etag\nfrom django.views.generic.base import TemplateView\nfrom django.views.generic.detail import DetailView\nfrom django.views.generic.list import ListView\nfrom rest_framework.generics import ListAPIView\nfrom semantic_version import Version\n\nfrom nextcloudappstore.core.caching import app_etag\nfrom nextcloudappstore.core.facades import flatmap\nfrom nextcloudappstore.core.forms import AppRatingForm, AppReleaseUploadForm, \\\n AppRegisterForm\nfrom nextcloudappstore.core.models import App, Category, AppRating\nfrom nextcloudappstore.core.serializers import AppRatingSerializer\nfrom nextcloudappstore.core.versioning import pad_min_version\n\n\n@etag(app_etag)\ndef app_description(request, id):\n app = get_object_or_404(App, id=id)\n return HttpResponse(app.description, content_type='text/plain')\n\n\nclass AppRatingApi(ListAPIView):\n serializer_class = AppRatingSerializer\n\n def get_queryset(self):\n id = self.kwargs.get('id')\n lang = self.request.GET.get('lang', self.request.LANGUAGE_CODE)\n app = get_object_or_404(App, id=id)\n queryset = AppRating.objects.language(lang).filter(app=app)\n\n current_user = self.request.GET.get('current_user', 'false')\n if current_user == 'true':\n return queryset.filter(user=self.request.user)\n else:\n return queryset\n\n\nclass AppDetailView(DetailView):\n queryset = App.objects.prefetch_related(\n 'releases',\n 'screenshots',\n 'co_maintainers',\n 'translations',\n ).select_related('owner')\n template_name = 'app/detail.html'\n slug_field = 'id'\n slug_url_kwarg = 'id'\n\n def post(self, request, id):\n form = AppRatingForm(request.POST, id=id, user=request.user)\n # there is no way that a rating can be invalid by default\n if form.is_valid() and request.user.is_authenticated:\n form.save()\n return redirect('app-detail', id=id)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['DISCOURSE_URL'] = settings.DISCOURSE_URL.rstrip('/')\n context['rating_form'] = AppRatingForm(\n initial={'language_code': get_language()})\n\n ratings = AppRating.objects.filter(app=context['app'])\n rating_languages = flatmap(\n lambda r: r.get_available_languages(), ratings)\n\n # make sure current session language is in the list even if there are\n # no comments.\n rating_languages = list(rating_languages)\n if get_language() not in rating_languages:\n rating_languages.append(get_language())\n\n context['languages'] = set(sorted(rating_languages))\n context['fallbackLang'] = 'en' if 'en' in context['languages'] else ''\n context['user_has_rated_app'] = False\n if self.request.user.is_authenticated:\n try:\n app_rating = AppRating.objects.get(user=self.request.user,\n app=context['app'])\n\n # if parler fallsback to a fallback language\n # it doesn't set the language as current language\n # and we can't select the correct language in the\n # frontend. So we try and find a languge that is\n # available\n language_code = app_rating.get_current_language()\n if not app_rating.has_translation(language_code):\n for fallback in app_rating.get_fallback_languages():\n if app_rating.has_translation(fallback):\n app_rating.set_current_language(fallback)\n\n # when accessing an empty comment django-parler tries to\n # fall back to the default language. However for comments\n # the default (English) does not always exist. Unfortunately\n # it throws the same exception as non existing models,\n # so we need to access it beforehand\n try:\n comment = app_rating.comment\n except AppRating.DoesNotExist:\n comment = ''\n\n context['rating_form'] = AppRatingForm({\n 'rating': app_rating.rating,\n 'comment': comment,\n 'language_code': app_rating.get_current_language(),\n })\n context['user_has_rated_app'] = True\n except AppRating.DoesNotExist:\n pass\n context['categories'] = Category.objects.prefetch_related(\n 'translations').all()\n context['latest_releases_by_platform_v'] = \\\n self.object.latest_releases_by_platform_v()\n return context\n\n\nclass AppReleasesView(DetailView):\n queryset = App.objects.prefetch_related(\n 'translations',\n 'releases__translations',\n 'releases__phpextensiondependencies__php_extension',\n 'releases__databasedependencies__database',\n 'releases__shell_commands',\n 'releases__licenses',\n )\n template_name = 'app/releases.html'\n slug_field = 'id'\n slug_url_kwarg = 'id'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['categories'] = Category.objects.prefetch_related(\n 'translations').all()\n\n releases = self.object.releases_by_platform_v()\n unstables = self.object.unstable_releases_by_platform_v()\n versions = set(list(releases.keys()) + list(unstables.keys()))\n all_releases = list(map(\n lambda v: (v, releases.get(v, []) + unstables.get(v, [])),\n versions))\n context['releases_by_platform_v'] = \\\n self._sort_by_platform_v(all_releases)\n return context\n\n def _sort_by_platform_v(self, releases_by_platform, reverse=True):\n \"\"\"Sorts a list of tuples like (<platform version>, [releases]) by\n platform version.\n\n :param releases_by_platform: A list of tuples.\n :param reverse: Descending order if True, ascending otherwise.\n :return sorted list of tuples.\n \"\"\"\n\n return sorted(releases_by_platform, reverse=reverse,\n key=lambda v: Version(pad_min_version(v[0])))\n\n\nclass CategoryAppListView(ListView):\n model = App\n template_name = 'app/list.html'\n allow_empty = True\n\n def get_queryset(self):\n order_by = self.request.GET.get('order_by', 'rating_overall')\n ordering = self.request.GET.get('ordering', 'desc')\n is_featured = self.request.GET.get('is_featured', self.kwargs\n .get('is_featured_category', False))\n maintainer = self.request.GET.get('maintainer', False)\n sort_columns = []\n\n allowed_order_by = {'name', 'last_release', 'rating_overall',\n 'rating_recent'}\n if order_by in allowed_order_by:\n if order_by == 'name':\n order_by = 'translations__name'\n if ordering == 'desc':\n sort_columns.append('-' + order_by)\n else:\n sort_columns.append(order_by)\n\n lang = get_language_info(get_language())['code']\n category_id = self.kwargs['id']\n queryset = App.objects.search(self.search_terms, lang).order_by(\n *sort_columns).filter(releases__gt=0)\n if maintainer:\n try:\n user = User.objects.get_by_natural_key(maintainer)\n queryset = queryset.filter(Q(owner=user) |\n Q(co_maintainers=user))\n except ObjectDoesNotExist:\n return queryset.none()\n if category_id:\n queryset = queryset.filter(categories__id=category_id)\n if is_featured == \"true\":\n queryset = queryset.filter(is_featured=True)\n return queryset.prefetch_related('screenshots', 'translations')\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['categories'] = Category.objects.prefetch_related(\n 'translations').all()\n category_id = self.kwargs['id']\n context['is_featured_category'] = self.kwargs\\\n .get('is_featured_category', False)\n if category_id:\n context['current_category'] = Category.objects.get(id=category_id)\n if self.search_terms:\n context['search_query'] = ' '.join(self.search_terms)\n context['url_params'] = self.url_params\n return context\n\n @cached_property\n def url_params(self):\n \"\"\"URL encoded strings with the GET params of the last request.\n\n Intended for preserving GET params upon clicking a link by including\n one (and only one) of these strings in the \"href\" attribute.\n\n The parameters are divided into three groups: search, filters and\n ordering. In addition to these three, the returned dict also contains\n some combinations of them, as specified by the dict keys.\n\n No leading \"?\" or \"&\".\n\n :return dict with URL encoded strings.\n \"\"\"\n\n search = self._url_params_str('search')\n filters = self._url_params_str('is_featured', 'maintainer')\n ordering = self._url_params_str('order_by', 'ordering')\n\n return {\n 'search': search,\n 'filters': filters,\n 'ordering': ordering,\n 'search_filters': self._join_url_params_strs(search, filters),\n 'filters_ordering': self._join_url_params_strs(filters, ordering),\n }\n\n def _url_params_str(self, *params):\n args = map(lambda param: (param, self.request.GET.get(param, '')),\n params)\n present_args = filter(lambda a: a[1], args)\n return urlencode(dict(present_args))\n\n def _join_url_params_strs(self, *strings):\n return '&'.join(filter(None, strings))\n\n @cached_property\n def search_terms(self):\n return self.request.GET.get('search', '').strip().split()\n\n\nclass AppUploadView(LoginRequiredMixin, TemplateView):\n template_name = 'app/upload.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['form'] = AppReleaseUploadForm()\n return context\n\n\nclass AppRegisterView(LoginRequiredMixin, TemplateView):\n template_name = 'app/register.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['form'] = AppRegisterForm()\n return context\n", "path": "nextcloudappstore/core/views.py"}, {"content": "from allauth.account.views import signup\nfrom allauth.socialaccount.views import signup as social_signup\nfrom csp.decorators import csp_update\nfrom django.conf import settings\nfrom django.conf.urls import url, include\nfrom django.conf.urls.i18n import i18n_patterns\nfrom django.contrib import admin\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.http import etag\n\nfrom nextcloudappstore.core.caching import app_rating_etag\nfrom nextcloudappstore.core.feeds import AppReleaseAtomFeed, AppReleaseRssFeed\nfrom nextcloudappstore.core.views import CategoryAppListView, AppDetailView, \\\n app_description, AppReleasesView, AppUploadView, AppRatingApi, \\\n AppRegisterView\nfrom nextcloudappstore.scaffolding.views import AppScaffoldingView\n\nadmin.site.login = login_required(admin.site.login)\n\nurlpatterns = [\n url(r'^$', CategoryAppListView.as_view(), {'id': None}, name='home'),\n url(r\"^featured$\", CategoryAppListView.as_view(), {'id': None,\n 'is_featured_category':\n True},\n name='featured'),\n url(r\"^signup/$\", csp_update(**settings.CSP_SIGNUP)(signup),\n name=\"account_signup\"),\n url(r\"^social/signup/$\", csp_update(**settings.CSP_SIGNUP)(social_signup),\n name=\"socialaccount_signup\"),\n url(r'^', include('allauth.urls')),\n url(r'^categories/(?P<id>[\\w]*)/?$', CategoryAppListView.as_view(),\n name='category-app-list'),\n url(r'^developer/apps/generate/?$', AppScaffoldingView.as_view(),\n name='app-scaffold'),\n url(r'^developer/apps/releases/new/?$', AppUploadView.as_view(),\n name='app-upload'),\n url(r'^developer/apps/new/?$', AppRegisterView.as_view(),\n name='app-register'),\n url(r'^apps/(?P<id>[\\w_]+)/?$', AppDetailView.as_view(),\n name='app-detail'),\n url(r'^apps/(?P<id>[\\w_]+)/releases/?$', AppReleasesView.as_view(),\n name='app-releases'),\n url(r'^apps/(?P<id>[\\w_]+)/description/?$', app_description,\n name='app-description'),\n url(r'^apps/(?P<id>[\\w_]+)/ratings.json$',\n etag(app_rating_etag)(AppRatingApi.as_view()), name='app-ratings'),\n url(r'^api/', include('nextcloudappstore.api.urls',\n namespace='api')),\n url(r'^account/',\n include('nextcloudappstore.user.urls', namespace='user')),\n url(r'^admin/', admin.site.urls),\n url(r'^i18n/', include('django.conf.urls.i18n')),\n]\n\nurlpatterns += i18n_patterns(\n url(r'feeds/releases.rss', AppReleaseRssFeed(), name='feeds-releases-rss'),\n url(r'feeds/releases.atom', AppReleaseAtomFeed(),\n name='feeds-releases-atom'),\n)\n\nif settings.DEBUG:\n try:\n import debug_toolbar\n\n urlpatterns += [url(r'^__debug__/', include(debug_toolbar.urls)), ]\n except ImportError:\n pass\n", "path": "nextcloudappstore/urls.py"}]}
4,048
407
gh_patches_debug_33008
rasdani/github-patches
git_diff
interlegis__sapl-1588
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Alimentação da tabela TipoAutor automática Preencher a tabela Tipor autor automaticamente para que autores que possuem models relacionadas já estejam disponíveis no sistema. Apenas a criação de autores categorizados como "Outros" serão adicionados pelo usuário do sistema. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sapl/base/models.py` Content: ``` 1 import reversion 2 from django.contrib.contenttypes.fields import GenericForeignKey 3 from django.contrib.contenttypes.models import ContentType 4 from django.db import models 5 from django.utils.translation import ugettext_lazy as _ 6 7 from sapl.utils import UF, YES_NO_CHOICES, get_settings_auth_user_model 8 9 TIPO_DOCUMENTO_ADMINISTRATIVO = (('O', _('Ostensivo')), 10 ('R', _('Restritivo'))) 11 12 SEQUENCIA_NUMERACAO = (('A', _('Sequencial por ano')), 13 ('L', _('Sequencial por legislatura')), 14 ('U', _('Sequencial único'))) 15 16 17 @reversion.register() 18 class CasaLegislativa(models.Model): 19 # TODO ajustar todos os max_length !!!! 20 # cod_casa => id (pk) 21 22 codigo = models.CharField(max_length=100, 23 blank=True, 24 verbose_name=_('Codigo')) 25 nome = models.CharField(max_length=100, verbose_name=_('Nome')) 26 sigla = models.CharField(max_length=100, verbose_name=_('Sigla')) 27 endereco = models.CharField(max_length=100, verbose_name=_('Endereço')) 28 cep = models.CharField(max_length=100, verbose_name=_('CEP')) 29 municipio = models.CharField(max_length=100, verbose_name=_('Município')) 30 uf = models.CharField(max_length=100, 31 choices=UF, 32 verbose_name=_('UF')) 33 telefone = models.CharField( 34 max_length=100, blank=True, verbose_name=_('Telefone')) 35 fax = models.CharField( 36 max_length=100, blank=True, verbose_name=_('Fax')) 37 logotipo = models.ImageField( 38 blank=True, 39 upload_to='sapl/casa/logotipo/', 40 verbose_name=_('Logotipo')) 41 endereco_web = models.URLField( 42 max_length=100, blank=True, verbose_name=_('HomePage')) 43 email = models.EmailField( 44 max_length=100, blank=True, verbose_name=_('E-mail')) 45 informacao_geral = models.TextField( 46 max_length=100, 47 blank=True, 48 verbose_name=_('Informação Geral')) 49 50 class Meta: 51 verbose_name = _('Casa Legislativa') 52 verbose_name_plural = _('Casa Legislativa') 53 54 def __str__(self): 55 return _('Casa Legislativa de %(municipio)s') % { 56 'municipio': self.municipio} 57 58 59 @reversion.register() 60 class ProblemaMigracao(models.Model): 61 content_type = models.ForeignKey(ContentType, 62 verbose_name=_('Tipo de Content')) 63 object_id = models.PositiveIntegerField(verbose_name=_('ID do Objeto')) 64 content_object = GenericForeignKey('content_type', 'object_id') 65 nome_campo = models.CharField(max_length=100, 66 blank=True, 67 verbose_name=_('Nome do(s) Campo(s)')) 68 problema = models.CharField(max_length=300, verbose_name=_('Problema')) 69 descricao = models.CharField(max_length=300, verbose_name=_('Descrição')) 70 eh_stub = models.BooleanField(verbose_name=_('É stub?')) 71 critico = models.BooleanField( 72 default=False, verbose_name=_('Crítico')) 73 74 class Meta: 75 verbose_name = _('Problema na Migração') 76 verbose_name_plural = _('Problemas na Migração') 77 78 79 @reversion.register() 80 class Constraint(models.Model): 81 nome_tabela = models.CharField( 82 max_length=50, verbose_name=_('Nome da tabela')) 83 nome_constraint = models.CharField( 84 max_length=100, verbose_name=_('Nome da constraint')) 85 nome_model = models.CharField( 86 max_length=50, verbose_name=_('Nome da model')) 87 tipo_constraint = models.CharField( 88 max_length=50, verbose_name=_('Tipo da constraint')) 89 90 class Meta: 91 verbose_name = _('Constraint removida') 92 verbose_name_plural = _('Constraints removidas') 93 94 95 @reversion.register() 96 class Argumento(models.Model): 97 constraint = models.ForeignKey(Constraint) 98 argumento = models.CharField( 99 max_length=50, verbose_name=_('Argumento')) 100 101 class Meta: 102 verbose_name = _('Argumento da constraint') 103 verbose_name_plural = _('Argumentos da constraint') 104 105 106 @reversion.register() 107 class AppConfig(models.Model): 108 109 POLITICA_PROTOCOLO_CHOICES = ( 110 ('O', _('Sempre Gerar Protocolo')), 111 ('C', _('Perguntar se é pra gerar protocolo ao incorporar')), 112 ('N', _('Nunca Protocolar ao incorporar uma proposição')), 113 ) 114 115 documentos_administrativos = models.CharField( 116 max_length=1, 117 verbose_name=_('Ostensivo/Restritivo'), 118 choices=TIPO_DOCUMENTO_ADMINISTRATIVO, default='O') 119 120 sequencia_numeracao = models.CharField( 121 max_length=1, 122 verbose_name=_('Sequência de numeração'), 123 choices=SEQUENCIA_NUMERACAO, default='A') 124 125 # TODO: a ser implementado na versão 3.2 126 # painel_aberto = models.BooleanField( 127 # verbose_name=_('Painel aberto para usuário anônimo'), 128 # choices=YES_NO_CHOICES, default=False) 129 130 texto_articulado_proposicao = models.BooleanField( 131 verbose_name=_('Usar Textos Articulados para Proposições'), 132 choices=YES_NO_CHOICES, default=False) 133 134 texto_articulado_materia = models.BooleanField( 135 verbose_name=_('Usar Textos Articulados para Matérias'), 136 choices=YES_NO_CHOICES, default=False) 137 138 texto_articulado_norma = models.BooleanField( 139 verbose_name=_('Usar Textos Articulados para Normas'), 140 choices=YES_NO_CHOICES, default=True) 141 142 proposicao_incorporacao_obrigatoria = models.CharField( 143 verbose_name=_('Regra de incorporação de proposições e protocolo'), 144 max_length=1, choices=POLITICA_PROTOCOLO_CHOICES, default='O') 145 146 cronometro_discurso = models.TimeField( 147 verbose_name=_('Cronômetro do Discurso'), 148 blank=True, 149 null=True) 150 151 cronometro_aparte = models.TimeField( 152 verbose_name=_('Cronômetro do Aparte'), 153 blank=True, 154 null=True) 155 156 cronometro_ordem = models.TimeField( 157 verbose_name=_('Cronômetro da Ordem'), 158 blank=True, 159 null=True) 160 161 mostrar_brasao_painel = models.BooleanField( 162 default=False, 163 verbose_name=_('Mostrar brasão da Casa no painel?')) 164 165 class Meta: 166 verbose_name = _('Configurações da Aplicação') 167 verbose_name_plural = _('Configurações da Aplicação') 168 permissions = ( 169 ('menu_sistemas', _('Renderizar Menu Sistemas')), 170 ('view_tabelas_auxiliares', _('Visualizar Tabelas Auxiliares')), 171 ) 172 173 @classmethod 174 def attr(cls, attr): 175 config = AppConfig.objects.first() 176 177 if not config: 178 return '' 179 180 return getattr(config, attr) 181 182 def __str__(self): 183 return _('Configurações da Aplicação - %(id)s') % { 184 'id': self.id} 185 186 187 @reversion.register() 188 class TipoAutor(models.Model): 189 descricao = models.CharField(max_length=50, verbose_name=_('Descrição')) 190 191 content_type = models.OneToOneField( 192 ContentType, 193 null=True, default=None, 194 verbose_name=_('Modelagem no SAPL')) 195 196 class Meta: 197 ordering = ['descricao'] 198 verbose_name = _('Tipo de Autor') 199 verbose_name_plural = _('Tipos de Autor') 200 201 def __str__(self): 202 return self.descricao 203 204 205 @reversion.register() 206 class Autor(models.Model): 207 208 user = models.OneToOneField(get_settings_auth_user_model(), 209 on_delete=models.SET_NULL, 210 null=True) 211 212 tipo = models.ForeignKey(TipoAutor, verbose_name=_('Tipo do Autor')) 213 214 content_type = models.ForeignKey( 215 ContentType, 216 blank=True, null=True, default=None) 217 object_id = models.PositiveIntegerField( 218 blank=True, null=True, default=None) 219 autor_related = GenericForeignKey('content_type', 'object_id') 220 221 nome = models.CharField( 222 max_length=60, blank=True, verbose_name=_('Nome do Autor')) 223 224 cargo = models.CharField(max_length=50, blank=True) 225 226 class Meta: 227 verbose_name = _('Autor') 228 verbose_name_plural = _('Autores') 229 unique_together = (('content_type', 'object_id'), ) 230 ordering = ('nome',) 231 232 def __str__(self): 233 234 if self.autor_related: 235 return str(self.autor_related) 236 else: 237 if str(self.cargo): 238 return _('%(nome)s - %(cargo)s') % { 239 'nome': self.nome, 'cargo': self.cargo} 240 else: 241 return str(self.nome) 242 """if str(self.tipo) == 'Parlamentar' and self.parlamentar: 243 return self.parlamentar.nome_parlamentar 244 elif str(self.tipo) == 'Comissao' and self.comissao: 245 return str(self.comissao) 246 elif str(self.tipo) == 'Partido' and self.partido: 247 return str(self.partido) 248 else: 249 """ 250 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sapl/base/models.py b/sapl/base/models.py --- a/sapl/base/models.py +++ b/sapl/base/models.py @@ -2,9 +2,16 @@ from django.contrib.contenttypes.fields import GenericForeignKey from django.contrib.contenttypes.models import ContentType from django.db import models +from django.db.models.signals import post_migrate +from django.db.utils import DEFAULT_DB_ALIAS from django.utils.translation import ugettext_lazy as _ -from sapl.utils import UF, YES_NO_CHOICES, get_settings_auth_user_model +from sapl.utils import ( + UF, + YES_NO_CHOICES, + get_settings_auth_user_model, + models_with_gr_for_model + ) TIPO_DOCUMENTO_ADMINISTRATIVO = (('O', _('Ostensivo')), ('R', _('Restritivo'))) @@ -247,3 +254,39 @@ return str(self.partido) else: """ + + +def cria_models_tipo_autor(app_config, verbosity=2, interactive=True, + using=DEFAULT_DB_ALIAS, **kwargs): + + models = models_with_gr_for_model(Autor) + + print("\n\033[93m\033[1m{}\033[0m".format( + _('Atualizando registros TipoAutor do SAPL:'))) + for model in models: + content_type = ContentType.objects.get_for_model(model) + tipo_autor = TipoAutor.objects.filter( + content_type=content_type.id).exists() + + if tipo_autor: + msg1 = "Carga de {} não efetuada.".format( + TipoAutor._meta.verbose_name) + msg2 = " Já Existe um {} {} relacionado...".format( + TipoAutor._meta.verbose_name, + model._meta.verbose_name) + msg = " {}{}".format(msg1, msg2) + else: + novo_autor = TipoAutor() + novo_autor.content_type_id = content_type.id + novo_autor.descricao = model._meta.verbose_name + novo_autor.save() + msg1 = "Carga de {} efetuada.".format( + TipoAutor._meta.verbose_name) + msg2 = " {} {} criado...".format( + TipoAutor._meta.verbose_name, content_type.model) + msg = " {}{}".format(msg1, msg2) + print(msg) + # Disconecta função para evitar a chamada repetidas vezes. + post_migrate.disconnect(receiver=cria_models_tipo_autor) + +post_migrate.connect(receiver=cria_models_tipo_autor)
{"golden_diff": "diff --git a/sapl/base/models.py b/sapl/base/models.py\n--- a/sapl/base/models.py\n+++ b/sapl/base/models.py\n@@ -2,9 +2,16 @@\n from django.contrib.contenttypes.fields import GenericForeignKey\n from django.contrib.contenttypes.models import ContentType\n from django.db import models\n+from django.db.models.signals import post_migrate\n+from django.db.utils import DEFAULT_DB_ALIAS\n from django.utils.translation import ugettext_lazy as _\n \n-from sapl.utils import UF, YES_NO_CHOICES, get_settings_auth_user_model\n+from sapl.utils import (\n+ UF,\n+ YES_NO_CHOICES,\n+ get_settings_auth_user_model,\n+ models_with_gr_for_model\n+ )\n \n TIPO_DOCUMENTO_ADMINISTRATIVO = (('O', _('Ostensivo')),\n ('R', _('Restritivo')))\n@@ -247,3 +254,39 @@\n return str(self.partido)\n else:\n \"\"\"\n+\n+\n+def cria_models_tipo_autor(app_config, verbosity=2, interactive=True,\n+ using=DEFAULT_DB_ALIAS, **kwargs):\n+\n+ models = models_with_gr_for_model(Autor)\n+\n+ print(\"\\n\\033[93m\\033[1m{}\\033[0m\".format(\n+ _('Atualizando registros TipoAutor do SAPL:')))\n+ for model in models:\n+ content_type = ContentType.objects.get_for_model(model)\n+ tipo_autor = TipoAutor.objects.filter(\n+ content_type=content_type.id).exists()\n+\n+ if tipo_autor:\n+ msg1 = \"Carga de {} n\u00e3o efetuada.\".format(\n+ TipoAutor._meta.verbose_name)\n+ msg2 = \" J\u00e1 Existe um {} {} relacionado...\".format(\n+ TipoAutor._meta.verbose_name,\n+ model._meta.verbose_name)\n+ msg = \" {}{}\".format(msg1, msg2)\n+ else:\n+ novo_autor = TipoAutor()\n+ novo_autor.content_type_id = content_type.id\n+ novo_autor.descricao = model._meta.verbose_name\n+ novo_autor.save()\n+ msg1 = \"Carga de {} efetuada.\".format(\n+ TipoAutor._meta.verbose_name)\n+ msg2 = \" {} {} criado...\".format(\n+ TipoAutor._meta.verbose_name, content_type.model)\n+ msg = \" {}{}\".format(msg1, msg2)\n+ print(msg)\n+ # Disconecta fun\u00e7\u00e3o para evitar a chamada repetidas vezes.\n+ post_migrate.disconnect(receiver=cria_models_tipo_autor)\n+\n+post_migrate.connect(receiver=cria_models_tipo_autor)\n", "issue": "Alimenta\u00e7\u00e3o da tabela TipoAutor autom\u00e1tica\nPreencher a tabela Tipor autor automaticamente para que autores que possuem models relacionadas j\u00e1 estejam dispon\u00edveis no sistema.\r\nApenas a cria\u00e7\u00e3o de autores categorizados como \"Outros\" ser\u00e3o adicionados pelo usu\u00e1rio do sistema.\n", "before_files": [{"content": "import reversion\nfrom django.contrib.contenttypes.fields import GenericForeignKey\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom sapl.utils import UF, YES_NO_CHOICES, get_settings_auth_user_model\n\nTIPO_DOCUMENTO_ADMINISTRATIVO = (('O', _('Ostensivo')),\n ('R', _('Restritivo')))\n\nSEQUENCIA_NUMERACAO = (('A', _('Sequencial por ano')),\n ('L', _('Sequencial por legislatura')),\n ('U', _('Sequencial \u00fanico')))\n\n\[email protected]()\nclass CasaLegislativa(models.Model):\n # TODO ajustar todos os max_length !!!!\n # cod_casa => id (pk)\n\n codigo = models.CharField(max_length=100,\n blank=True,\n verbose_name=_('Codigo'))\n nome = models.CharField(max_length=100, verbose_name=_('Nome'))\n sigla = models.CharField(max_length=100, verbose_name=_('Sigla'))\n endereco = models.CharField(max_length=100, verbose_name=_('Endere\u00e7o'))\n cep = models.CharField(max_length=100, verbose_name=_('CEP'))\n municipio = models.CharField(max_length=100, verbose_name=_('Munic\u00edpio'))\n uf = models.CharField(max_length=100,\n choices=UF,\n verbose_name=_('UF'))\n telefone = models.CharField(\n max_length=100, blank=True, verbose_name=_('Telefone'))\n fax = models.CharField(\n max_length=100, blank=True, verbose_name=_('Fax'))\n logotipo = models.ImageField(\n blank=True,\n upload_to='sapl/casa/logotipo/',\n verbose_name=_('Logotipo'))\n endereco_web = models.URLField(\n max_length=100, blank=True, verbose_name=_('HomePage'))\n email = models.EmailField(\n max_length=100, blank=True, verbose_name=_('E-mail'))\n informacao_geral = models.TextField(\n max_length=100,\n blank=True,\n verbose_name=_('Informa\u00e7\u00e3o Geral'))\n\n class Meta:\n verbose_name = _('Casa Legislativa')\n verbose_name_plural = _('Casa Legislativa')\n\n def __str__(self):\n return _('Casa Legislativa de %(municipio)s') % {\n 'municipio': self.municipio}\n\n\[email protected]()\nclass ProblemaMigracao(models.Model):\n content_type = models.ForeignKey(ContentType,\n verbose_name=_('Tipo de Content'))\n object_id = models.PositiveIntegerField(verbose_name=_('ID do Objeto'))\n content_object = GenericForeignKey('content_type', 'object_id')\n nome_campo = models.CharField(max_length=100,\n blank=True,\n verbose_name=_('Nome do(s) Campo(s)'))\n problema = models.CharField(max_length=300, verbose_name=_('Problema'))\n descricao = models.CharField(max_length=300, verbose_name=_('Descri\u00e7\u00e3o'))\n eh_stub = models.BooleanField(verbose_name=_('\u00c9 stub?'))\n critico = models.BooleanField(\n default=False, verbose_name=_('Cr\u00edtico'))\n\n class Meta:\n verbose_name = _('Problema na Migra\u00e7\u00e3o')\n verbose_name_plural = _('Problemas na Migra\u00e7\u00e3o')\n\n\[email protected]()\nclass Constraint(models.Model):\n nome_tabela = models.CharField(\n max_length=50, verbose_name=_('Nome da tabela'))\n nome_constraint = models.CharField(\n max_length=100, verbose_name=_('Nome da constraint'))\n nome_model = models.CharField(\n max_length=50, verbose_name=_('Nome da model'))\n tipo_constraint = models.CharField(\n max_length=50, verbose_name=_('Tipo da constraint'))\n\n class Meta:\n verbose_name = _('Constraint removida')\n verbose_name_plural = _('Constraints removidas')\n\n\[email protected]()\nclass Argumento(models.Model):\n constraint = models.ForeignKey(Constraint)\n argumento = models.CharField(\n max_length=50, verbose_name=_('Argumento'))\n\n class Meta:\n verbose_name = _('Argumento da constraint')\n verbose_name_plural = _('Argumentos da constraint')\n\n\[email protected]()\nclass AppConfig(models.Model):\n\n POLITICA_PROTOCOLO_CHOICES = (\n ('O', _('Sempre Gerar Protocolo')),\n ('C', _('Perguntar se \u00e9 pra gerar protocolo ao incorporar')),\n ('N', _('Nunca Protocolar ao incorporar uma proposi\u00e7\u00e3o')),\n )\n\n documentos_administrativos = models.CharField(\n max_length=1,\n verbose_name=_('Ostensivo/Restritivo'),\n choices=TIPO_DOCUMENTO_ADMINISTRATIVO, default='O')\n\n sequencia_numeracao = models.CharField(\n max_length=1,\n verbose_name=_('Sequ\u00eancia de numera\u00e7\u00e3o'),\n choices=SEQUENCIA_NUMERACAO, default='A')\n\n # TODO: a ser implementado na vers\u00e3o 3.2\n # painel_aberto = models.BooleanField(\n # verbose_name=_('Painel aberto para usu\u00e1rio an\u00f4nimo'),\n # choices=YES_NO_CHOICES, default=False)\n\n texto_articulado_proposicao = models.BooleanField(\n verbose_name=_('Usar Textos Articulados para Proposi\u00e7\u00f5es'),\n choices=YES_NO_CHOICES, default=False)\n\n texto_articulado_materia = models.BooleanField(\n verbose_name=_('Usar Textos Articulados para Mat\u00e9rias'),\n choices=YES_NO_CHOICES, default=False)\n\n texto_articulado_norma = models.BooleanField(\n verbose_name=_('Usar Textos Articulados para Normas'),\n choices=YES_NO_CHOICES, default=True)\n\n proposicao_incorporacao_obrigatoria = models.CharField(\n verbose_name=_('Regra de incorpora\u00e7\u00e3o de proposi\u00e7\u00f5es e protocolo'),\n max_length=1, choices=POLITICA_PROTOCOLO_CHOICES, default='O')\n\n cronometro_discurso = models.TimeField(\n verbose_name=_('Cron\u00f4metro do Discurso'),\n blank=True,\n null=True)\n\n cronometro_aparte = models.TimeField(\n verbose_name=_('Cron\u00f4metro do Aparte'),\n blank=True,\n null=True)\n\n cronometro_ordem = models.TimeField(\n verbose_name=_('Cron\u00f4metro da Ordem'),\n blank=True,\n null=True)\n\n mostrar_brasao_painel = models.BooleanField(\n default=False,\n verbose_name=_('Mostrar bras\u00e3o da Casa no painel?'))\n\n class Meta:\n verbose_name = _('Configura\u00e7\u00f5es da Aplica\u00e7\u00e3o')\n verbose_name_plural = _('Configura\u00e7\u00f5es da Aplica\u00e7\u00e3o')\n permissions = (\n ('menu_sistemas', _('Renderizar Menu Sistemas')),\n ('view_tabelas_auxiliares', _('Visualizar Tabelas Auxiliares')),\n )\n\n @classmethod\n def attr(cls, attr):\n config = AppConfig.objects.first()\n\n if not config:\n return ''\n\n return getattr(config, attr)\n\n def __str__(self):\n return _('Configura\u00e7\u00f5es da Aplica\u00e7\u00e3o - %(id)s') % {\n 'id': self.id}\n\n\[email protected]()\nclass TipoAutor(models.Model):\n descricao = models.CharField(max_length=50, verbose_name=_('Descri\u00e7\u00e3o'))\n\n content_type = models.OneToOneField(\n ContentType,\n null=True, default=None,\n verbose_name=_('Modelagem no SAPL'))\n\n class Meta:\n ordering = ['descricao']\n verbose_name = _('Tipo de Autor')\n verbose_name_plural = _('Tipos de Autor')\n\n def __str__(self):\n return self.descricao\n\n\[email protected]()\nclass Autor(models.Model):\n\n user = models.OneToOneField(get_settings_auth_user_model(),\n on_delete=models.SET_NULL,\n null=True)\n\n tipo = models.ForeignKey(TipoAutor, verbose_name=_('Tipo do Autor'))\n\n content_type = models.ForeignKey(\n ContentType,\n blank=True, null=True, default=None)\n object_id = models.PositiveIntegerField(\n blank=True, null=True, default=None)\n autor_related = GenericForeignKey('content_type', 'object_id')\n\n nome = models.CharField(\n max_length=60, blank=True, verbose_name=_('Nome do Autor'))\n\n cargo = models.CharField(max_length=50, blank=True)\n\n class Meta:\n verbose_name = _('Autor')\n verbose_name_plural = _('Autores')\n unique_together = (('content_type', 'object_id'), )\n ordering = ('nome',)\n\n def __str__(self):\n\n if self.autor_related:\n return str(self.autor_related)\n else:\n if str(self.cargo):\n return _('%(nome)s - %(cargo)s') % {\n 'nome': self.nome, 'cargo': self.cargo}\n else:\n return str(self.nome)\n \"\"\"if str(self.tipo) == 'Parlamentar' and self.parlamentar:\n return self.parlamentar.nome_parlamentar\n elif str(self.tipo) == 'Comissao' and self.comissao:\n return str(self.comissao)\n elif str(self.tipo) == 'Partido' and self.partido:\n return str(self.partido)\n else:\n \"\"\"\n", "path": "sapl/base/models.py"}], "after_files": [{"content": "import reversion\nfrom django.contrib.contenttypes.fields import GenericForeignKey\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.db import models\nfrom django.db.models.signals import post_migrate\nfrom django.db.utils import DEFAULT_DB_ALIAS\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom sapl.utils import (\n UF,\n YES_NO_CHOICES,\n get_settings_auth_user_model,\n models_with_gr_for_model\n )\n\nTIPO_DOCUMENTO_ADMINISTRATIVO = (('O', _('Ostensivo')),\n ('R', _('Restritivo')))\n\nSEQUENCIA_NUMERACAO = (('A', _('Sequencial por ano')),\n ('L', _('Sequencial por legislatura')),\n ('U', _('Sequencial \u00fanico')))\n\n\[email protected]()\nclass CasaLegislativa(models.Model):\n # TODO ajustar todos os max_length !!!!\n # cod_casa => id (pk)\n\n codigo = models.CharField(max_length=100,\n blank=True,\n verbose_name=_('Codigo'))\n nome = models.CharField(max_length=100, verbose_name=_('Nome'))\n sigla = models.CharField(max_length=100, verbose_name=_('Sigla'))\n endereco = models.CharField(max_length=100, verbose_name=_('Endere\u00e7o'))\n cep = models.CharField(max_length=100, verbose_name=_('CEP'))\n municipio = models.CharField(max_length=100, verbose_name=_('Munic\u00edpio'))\n uf = models.CharField(max_length=100,\n choices=UF,\n verbose_name=_('UF'))\n telefone = models.CharField(\n max_length=100, blank=True, verbose_name=_('Telefone'))\n fax = models.CharField(\n max_length=100, blank=True, verbose_name=_('Fax'))\n logotipo = models.ImageField(\n blank=True,\n upload_to='sapl/casa/logotipo/',\n verbose_name=_('Logotipo'))\n endereco_web = models.URLField(\n max_length=100, blank=True, verbose_name=_('HomePage'))\n email = models.EmailField(\n max_length=100, blank=True, verbose_name=_('E-mail'))\n informacao_geral = models.TextField(\n max_length=100,\n blank=True,\n verbose_name=_('Informa\u00e7\u00e3o Geral'))\n\n class Meta:\n verbose_name = _('Casa Legislativa')\n verbose_name_plural = _('Casa Legislativa')\n\n def __str__(self):\n return _('Casa Legislativa de %(municipio)s') % {\n 'municipio': self.municipio}\n\n\[email protected]()\nclass ProblemaMigracao(models.Model):\n content_type = models.ForeignKey(ContentType,\n verbose_name=_('Tipo de Content'))\n object_id = models.PositiveIntegerField(verbose_name=_('ID do Objeto'))\n content_object = GenericForeignKey('content_type', 'object_id')\n nome_campo = models.CharField(max_length=100,\n blank=True,\n verbose_name=_('Nome do(s) Campo(s)'))\n problema = models.CharField(max_length=300, verbose_name=_('Problema'))\n descricao = models.CharField(max_length=300, verbose_name=_('Descri\u00e7\u00e3o'))\n eh_stub = models.BooleanField(verbose_name=_('\u00c9 stub?'))\n critico = models.BooleanField(\n default=False, verbose_name=_('Cr\u00edtico'))\n\n class Meta:\n verbose_name = _('Problema na Migra\u00e7\u00e3o')\n verbose_name_plural = _('Problemas na Migra\u00e7\u00e3o')\n\n\[email protected]()\nclass Constraint(models.Model):\n nome_tabela = models.CharField(\n max_length=50, verbose_name=_('Nome da tabela'))\n nome_constraint = models.CharField(\n max_length=100, verbose_name=_('Nome da constraint'))\n nome_model = models.CharField(\n max_length=50, verbose_name=_('Nome da model'))\n tipo_constraint = models.CharField(\n max_length=50, verbose_name=_('Tipo da constraint'))\n\n class Meta:\n verbose_name = _('Constraint removida')\n verbose_name_plural = _('Constraints removidas')\n\n\[email protected]()\nclass Argumento(models.Model):\n constraint = models.ForeignKey(Constraint)\n argumento = models.CharField(\n max_length=50, verbose_name=_('Argumento'))\n\n class Meta:\n verbose_name = _('Argumento da constraint')\n verbose_name_plural = _('Argumentos da constraint')\n\n\[email protected]()\nclass AppConfig(models.Model):\n\n POLITICA_PROTOCOLO_CHOICES = (\n ('O', _('Sempre Gerar Protocolo')),\n ('C', _('Perguntar se \u00e9 pra gerar protocolo ao incorporar')),\n ('N', _('Nunca Protocolar ao incorporar uma proposi\u00e7\u00e3o')),\n )\n\n documentos_administrativos = models.CharField(\n max_length=1,\n verbose_name=_('Ostensivo/Restritivo'),\n choices=TIPO_DOCUMENTO_ADMINISTRATIVO, default='O')\n\n sequencia_numeracao = models.CharField(\n max_length=1,\n verbose_name=_('Sequ\u00eancia de numera\u00e7\u00e3o'),\n choices=SEQUENCIA_NUMERACAO, default='A')\n\n # TODO: a ser implementado na vers\u00e3o 3.2\n # painel_aberto = models.BooleanField(\n # verbose_name=_('Painel aberto para usu\u00e1rio an\u00f4nimo'),\n # choices=YES_NO_CHOICES, default=False)\n\n texto_articulado_proposicao = models.BooleanField(\n verbose_name=_('Usar Textos Articulados para Proposi\u00e7\u00f5es'),\n choices=YES_NO_CHOICES, default=False)\n\n texto_articulado_materia = models.BooleanField(\n verbose_name=_('Usar Textos Articulados para Mat\u00e9rias'),\n choices=YES_NO_CHOICES, default=False)\n\n texto_articulado_norma = models.BooleanField(\n verbose_name=_('Usar Textos Articulados para Normas'),\n choices=YES_NO_CHOICES, default=True)\n\n proposicao_incorporacao_obrigatoria = models.CharField(\n verbose_name=_('Regra de incorpora\u00e7\u00e3o de proposi\u00e7\u00f5es e protocolo'),\n max_length=1, choices=POLITICA_PROTOCOLO_CHOICES, default='O')\n\n cronometro_discurso = models.TimeField(\n verbose_name=_('Cron\u00f4metro do Discurso'),\n blank=True,\n null=True)\n\n cronometro_aparte = models.TimeField(\n verbose_name=_('Cron\u00f4metro do Aparte'),\n blank=True,\n null=True)\n\n cronometro_ordem = models.TimeField(\n verbose_name=_('Cron\u00f4metro da Ordem'),\n blank=True,\n null=True)\n\n mostrar_brasao_painel = models.BooleanField(\n default=False,\n verbose_name=_('Mostrar bras\u00e3o da Casa no painel?'))\n\n class Meta:\n verbose_name = _('Configura\u00e7\u00f5es da Aplica\u00e7\u00e3o')\n verbose_name_plural = _('Configura\u00e7\u00f5es da Aplica\u00e7\u00e3o')\n permissions = (\n ('menu_sistemas', _('Renderizar Menu Sistemas')),\n ('view_tabelas_auxiliares', _('Visualizar Tabelas Auxiliares')),\n )\n\n @classmethod\n def attr(cls, attr):\n config = AppConfig.objects.first()\n\n if not config:\n return ''\n\n return getattr(config, attr)\n\n def __str__(self):\n return _('Configura\u00e7\u00f5es da Aplica\u00e7\u00e3o - %(id)s') % {\n 'id': self.id}\n\n\[email protected]()\nclass TipoAutor(models.Model):\n descricao = models.CharField(max_length=50, verbose_name=_('Descri\u00e7\u00e3o'))\n\n content_type = models.OneToOneField(\n ContentType,\n null=True, default=None,\n verbose_name=_('Modelagem no SAPL'))\n\n class Meta:\n ordering = ['descricao']\n verbose_name = _('Tipo de Autor')\n verbose_name_plural = _('Tipos de Autor')\n\n def __str__(self):\n return self.descricao\n\n\[email protected]()\nclass Autor(models.Model):\n\n user = models.OneToOneField(get_settings_auth_user_model(),\n on_delete=models.SET_NULL,\n null=True)\n\n tipo = models.ForeignKey(TipoAutor, verbose_name=_('Tipo do Autor'))\n\n content_type = models.ForeignKey(\n ContentType,\n blank=True, null=True, default=None)\n object_id = models.PositiveIntegerField(\n blank=True, null=True, default=None)\n autor_related = GenericForeignKey('content_type', 'object_id')\n\n nome = models.CharField(\n max_length=60, blank=True, verbose_name=_('Nome do Autor'))\n\n cargo = models.CharField(max_length=50, blank=True)\n\n class Meta:\n verbose_name = _('Autor')\n verbose_name_plural = _('Autores')\n unique_together = (('content_type', 'object_id'), )\n ordering = ('nome',)\n\n def __str__(self):\n\n if self.autor_related:\n return str(self.autor_related)\n else:\n if str(self.cargo):\n return _('%(nome)s - %(cargo)s') % {\n 'nome': self.nome, 'cargo': self.cargo}\n else:\n return str(self.nome)\n \"\"\"if str(self.tipo) == 'Parlamentar' and self.parlamentar:\n return self.parlamentar.nome_parlamentar\n elif str(self.tipo) == 'Comissao' and self.comissao:\n return str(self.comissao)\n elif str(self.tipo) == 'Partido' and self.partido:\n return str(self.partido)\n else:\n \"\"\"\n\n\ndef cria_models_tipo_autor(app_config, verbosity=2, interactive=True,\n using=DEFAULT_DB_ALIAS, **kwargs):\n\n models = models_with_gr_for_model(Autor)\n\n print(\"\\n\\033[93m\\033[1m{}\\033[0m\".format(\n _('Atualizando registros TipoAutor do SAPL:')))\n for model in models:\n content_type = ContentType.objects.get_for_model(model)\n tipo_autor = TipoAutor.objects.filter(\n content_type=content_type.id).exists()\n\n if tipo_autor:\n msg1 = \"Carga de {} n\u00e3o efetuada.\".format(\n TipoAutor._meta.verbose_name)\n msg2 = \" J\u00e1 Existe um {} {} relacionado...\".format(\n TipoAutor._meta.verbose_name,\n model._meta.verbose_name)\n msg = \" {}{}\".format(msg1, msg2)\n else:\n novo_autor = TipoAutor()\n novo_autor.content_type_id = content_type.id\n novo_autor.descricao = model._meta.verbose_name\n novo_autor.save()\n msg1 = \"Carga de {} efetuada.\".format(\n TipoAutor._meta.verbose_name)\n msg2 = \" {} {} criado...\".format(\n TipoAutor._meta.verbose_name, content_type.model)\n msg = \" {}{}\".format(msg1, msg2)\n print(msg)\n # Disconecta fun\u00e7\u00e3o para evitar a chamada repetidas vezes.\n post_migrate.disconnect(receiver=cria_models_tipo_autor)\n\npost_migrate.connect(receiver=cria_models_tipo_autor)\n", "path": "sapl/base/models.py"}]}
2,965
586
gh_patches_debug_1438
rasdani/github-patches
git_diff
matrix-org__synapse-7630
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Update SSO UIAuth login identifier to m.login.sso I'm not sure when exactly we do this, but [MSC2454](https://github.com/matrix-org/matrix-doc/pull/2454) was merged which identified `m.login.sso` as the identifier for SSO + UIAuth. Synapse is currently using `org.matrix.login.sso`. At some point we should switch to the standardized version. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `synapse/api/constants.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # Copyright 2014-2016 OpenMarket Ltd 3 # Copyright 2017 Vector Creations Ltd 4 # Copyright 2018-2019 New Vector Ltd 5 # Copyright 2019 The Matrix.org Foundation C.I.C. 6 # 7 # Licensed under the Apache License, Version 2.0 (the "License"); 8 # you may not use this file except in compliance with the License. 9 # You may obtain a copy of the License at 10 # 11 # http://www.apache.org/licenses/LICENSE-2.0 12 # 13 # Unless required by applicable law or agreed to in writing, software 14 # distributed under the License is distributed on an "AS IS" BASIS, 15 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 # See the License for the specific language governing permissions and 17 # limitations under the License. 18 19 """Contains constants from the specification.""" 20 21 # the "depth" field on events is limited to 2**63 - 1 22 MAX_DEPTH = 2 ** 63 - 1 23 24 # the maximum length for a room alias is 255 characters 25 MAX_ALIAS_LENGTH = 255 26 27 # the maximum length for a user id is 255 characters 28 MAX_USERID_LENGTH = 255 29 30 31 class Membership(object): 32 33 """Represents the membership states of a user in a room.""" 34 35 INVITE = "invite" 36 JOIN = "join" 37 KNOCK = "knock" 38 LEAVE = "leave" 39 BAN = "ban" 40 LIST = (INVITE, JOIN, KNOCK, LEAVE, BAN) 41 42 43 class PresenceState(object): 44 """Represents the presence state of a user.""" 45 46 OFFLINE = "offline" 47 UNAVAILABLE = "unavailable" 48 ONLINE = "online" 49 50 51 class JoinRules(object): 52 PUBLIC = "public" 53 KNOCK = "knock" 54 INVITE = "invite" 55 PRIVATE = "private" 56 57 58 class LoginType(object): 59 PASSWORD = "m.login.password" 60 EMAIL_IDENTITY = "m.login.email.identity" 61 MSISDN = "m.login.msisdn" 62 RECAPTCHA = "m.login.recaptcha" 63 TERMS = "m.login.terms" 64 SSO = "org.matrix.login.sso" 65 DUMMY = "m.login.dummy" 66 67 # Only for C/S API v1 68 APPLICATION_SERVICE = "m.login.application_service" 69 SHARED_SECRET = "org.matrix.login.shared_secret" 70 71 72 class EventTypes(object): 73 Member = "m.room.member" 74 Create = "m.room.create" 75 Tombstone = "m.room.tombstone" 76 JoinRules = "m.room.join_rules" 77 PowerLevels = "m.room.power_levels" 78 Aliases = "m.room.aliases" 79 Redaction = "m.room.redaction" 80 ThirdPartyInvite = "m.room.third_party_invite" 81 RelatedGroups = "m.room.related_groups" 82 83 RoomHistoryVisibility = "m.room.history_visibility" 84 CanonicalAlias = "m.room.canonical_alias" 85 Encrypted = "m.room.encrypted" 86 RoomAvatar = "m.room.avatar" 87 RoomEncryption = "m.room.encryption" 88 GuestAccess = "m.room.guest_access" 89 90 # These are used for validation 91 Message = "m.room.message" 92 Topic = "m.room.topic" 93 Name = "m.room.name" 94 95 ServerACL = "m.room.server_acl" 96 Pinned = "m.room.pinned_events" 97 98 Retention = "m.room.retention" 99 100 Presence = "m.presence" 101 102 103 class RejectedReason(object): 104 AUTH_ERROR = "auth_error" 105 106 107 class RoomCreationPreset(object): 108 PRIVATE_CHAT = "private_chat" 109 PUBLIC_CHAT = "public_chat" 110 TRUSTED_PRIVATE_CHAT = "trusted_private_chat" 111 112 113 class ThirdPartyEntityKind(object): 114 USER = "user" 115 LOCATION = "location" 116 117 118 ServerNoticeMsgType = "m.server_notice" 119 ServerNoticeLimitReached = "m.server_notice.usage_limit_reached" 120 121 122 class UserTypes(object): 123 """Allows for user type specific behaviour. With the benefit of hindsight 124 'admin' and 'guest' users should also be UserTypes. Normal users are type None 125 """ 126 127 SUPPORT = "support" 128 BOT = "bot" 129 ALL_USER_TYPES = (SUPPORT, BOT) 130 131 132 class RelationTypes(object): 133 """The types of relations known to this server. 134 """ 135 136 ANNOTATION = "m.annotation" 137 REPLACE = "m.replace" 138 REFERENCE = "m.reference" 139 140 141 class LimitBlockingTypes(object): 142 """Reasons that a server may be blocked""" 143 144 MONTHLY_ACTIVE_USER = "monthly_active_user" 145 HS_DISABLED = "hs_disabled" 146 147 148 class EventContentFields(object): 149 """Fields found in events' content, regardless of type.""" 150 151 # Labels for the event, cf https://github.com/matrix-org/matrix-doc/pull/2326 152 LABELS = "org.matrix.labels" 153 154 # Timestamp to delete the event after 155 # cf https://github.com/matrix-org/matrix-doc/pull/2228 156 SELF_DESTRUCT_AFTER = "org.matrix.self_destruct_after" 157 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/synapse/api/constants.py b/synapse/api/constants.py --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -61,7 +61,7 @@ MSISDN = "m.login.msisdn" RECAPTCHA = "m.login.recaptcha" TERMS = "m.login.terms" - SSO = "org.matrix.login.sso" + SSO = "m.login.sso" DUMMY = "m.login.dummy" # Only for C/S API v1
{"golden_diff": "diff --git a/synapse/api/constants.py b/synapse/api/constants.py\n--- a/synapse/api/constants.py\n+++ b/synapse/api/constants.py\n@@ -61,7 +61,7 @@\n MSISDN = \"m.login.msisdn\"\n RECAPTCHA = \"m.login.recaptcha\"\n TERMS = \"m.login.terms\"\n- SSO = \"org.matrix.login.sso\"\n+ SSO = \"m.login.sso\"\n DUMMY = \"m.login.dummy\"\n \n # Only for C/S API v1\n", "issue": "Update SSO UIAuth login identifier to m.login.sso\nI'm not sure when exactly we do this, but [MSC2454](https://github.com/matrix-org/matrix-doc/pull/2454) was merged which identified `m.login.sso` as the identifier for SSO + UIAuth. Synapse is currently using `org.matrix.login.sso`. At some point we should switch to the standardized version.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2014-2016 OpenMarket Ltd\n# Copyright 2017 Vector Creations Ltd\n# Copyright 2018-2019 New Vector Ltd\n# Copyright 2019 The Matrix.org Foundation C.I.C.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Contains constants from the specification.\"\"\"\n\n# the \"depth\" field on events is limited to 2**63 - 1\nMAX_DEPTH = 2 ** 63 - 1\n\n# the maximum length for a room alias is 255 characters\nMAX_ALIAS_LENGTH = 255\n\n# the maximum length for a user id is 255 characters\nMAX_USERID_LENGTH = 255\n\n\nclass Membership(object):\n\n \"\"\"Represents the membership states of a user in a room.\"\"\"\n\n INVITE = \"invite\"\n JOIN = \"join\"\n KNOCK = \"knock\"\n LEAVE = \"leave\"\n BAN = \"ban\"\n LIST = (INVITE, JOIN, KNOCK, LEAVE, BAN)\n\n\nclass PresenceState(object):\n \"\"\"Represents the presence state of a user.\"\"\"\n\n OFFLINE = \"offline\"\n UNAVAILABLE = \"unavailable\"\n ONLINE = \"online\"\n\n\nclass JoinRules(object):\n PUBLIC = \"public\"\n KNOCK = \"knock\"\n INVITE = \"invite\"\n PRIVATE = \"private\"\n\n\nclass LoginType(object):\n PASSWORD = \"m.login.password\"\n EMAIL_IDENTITY = \"m.login.email.identity\"\n MSISDN = \"m.login.msisdn\"\n RECAPTCHA = \"m.login.recaptcha\"\n TERMS = \"m.login.terms\"\n SSO = \"org.matrix.login.sso\"\n DUMMY = \"m.login.dummy\"\n\n # Only for C/S API v1\n APPLICATION_SERVICE = \"m.login.application_service\"\n SHARED_SECRET = \"org.matrix.login.shared_secret\"\n\n\nclass EventTypes(object):\n Member = \"m.room.member\"\n Create = \"m.room.create\"\n Tombstone = \"m.room.tombstone\"\n JoinRules = \"m.room.join_rules\"\n PowerLevels = \"m.room.power_levels\"\n Aliases = \"m.room.aliases\"\n Redaction = \"m.room.redaction\"\n ThirdPartyInvite = \"m.room.third_party_invite\"\n RelatedGroups = \"m.room.related_groups\"\n\n RoomHistoryVisibility = \"m.room.history_visibility\"\n CanonicalAlias = \"m.room.canonical_alias\"\n Encrypted = \"m.room.encrypted\"\n RoomAvatar = \"m.room.avatar\"\n RoomEncryption = \"m.room.encryption\"\n GuestAccess = \"m.room.guest_access\"\n\n # These are used for validation\n Message = \"m.room.message\"\n Topic = \"m.room.topic\"\n Name = \"m.room.name\"\n\n ServerACL = \"m.room.server_acl\"\n Pinned = \"m.room.pinned_events\"\n\n Retention = \"m.room.retention\"\n\n Presence = \"m.presence\"\n\n\nclass RejectedReason(object):\n AUTH_ERROR = \"auth_error\"\n\n\nclass RoomCreationPreset(object):\n PRIVATE_CHAT = \"private_chat\"\n PUBLIC_CHAT = \"public_chat\"\n TRUSTED_PRIVATE_CHAT = \"trusted_private_chat\"\n\n\nclass ThirdPartyEntityKind(object):\n USER = \"user\"\n LOCATION = \"location\"\n\n\nServerNoticeMsgType = \"m.server_notice\"\nServerNoticeLimitReached = \"m.server_notice.usage_limit_reached\"\n\n\nclass UserTypes(object):\n \"\"\"Allows for user type specific behaviour. With the benefit of hindsight\n 'admin' and 'guest' users should also be UserTypes. Normal users are type None\n \"\"\"\n\n SUPPORT = \"support\"\n BOT = \"bot\"\n ALL_USER_TYPES = (SUPPORT, BOT)\n\n\nclass RelationTypes(object):\n \"\"\"The types of relations known to this server.\n \"\"\"\n\n ANNOTATION = \"m.annotation\"\n REPLACE = \"m.replace\"\n REFERENCE = \"m.reference\"\n\n\nclass LimitBlockingTypes(object):\n \"\"\"Reasons that a server may be blocked\"\"\"\n\n MONTHLY_ACTIVE_USER = \"monthly_active_user\"\n HS_DISABLED = \"hs_disabled\"\n\n\nclass EventContentFields(object):\n \"\"\"Fields found in events' content, regardless of type.\"\"\"\n\n # Labels for the event, cf https://github.com/matrix-org/matrix-doc/pull/2326\n LABELS = \"org.matrix.labels\"\n\n # Timestamp to delete the event after\n # cf https://github.com/matrix-org/matrix-doc/pull/2228\n SELF_DESTRUCT_AFTER = \"org.matrix.self_destruct_after\"\n", "path": "synapse/api/constants.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2014-2016 OpenMarket Ltd\n# Copyright 2017 Vector Creations Ltd\n# Copyright 2018-2019 New Vector Ltd\n# Copyright 2019 The Matrix.org Foundation C.I.C.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Contains constants from the specification.\"\"\"\n\n# the \"depth\" field on events is limited to 2**63 - 1\nMAX_DEPTH = 2 ** 63 - 1\n\n# the maximum length for a room alias is 255 characters\nMAX_ALIAS_LENGTH = 255\n\n# the maximum length for a user id is 255 characters\nMAX_USERID_LENGTH = 255\n\n\nclass Membership(object):\n\n \"\"\"Represents the membership states of a user in a room.\"\"\"\n\n INVITE = \"invite\"\n JOIN = \"join\"\n KNOCK = \"knock\"\n LEAVE = \"leave\"\n BAN = \"ban\"\n LIST = (INVITE, JOIN, KNOCK, LEAVE, BAN)\n\n\nclass PresenceState(object):\n \"\"\"Represents the presence state of a user.\"\"\"\n\n OFFLINE = \"offline\"\n UNAVAILABLE = \"unavailable\"\n ONLINE = \"online\"\n\n\nclass JoinRules(object):\n PUBLIC = \"public\"\n KNOCK = \"knock\"\n INVITE = \"invite\"\n PRIVATE = \"private\"\n\n\nclass LoginType(object):\n PASSWORD = \"m.login.password\"\n EMAIL_IDENTITY = \"m.login.email.identity\"\n MSISDN = \"m.login.msisdn\"\n RECAPTCHA = \"m.login.recaptcha\"\n TERMS = \"m.login.terms\"\n SSO = \"m.login.sso\"\n DUMMY = \"m.login.dummy\"\n\n # Only for C/S API v1\n APPLICATION_SERVICE = \"m.login.application_service\"\n SHARED_SECRET = \"org.matrix.login.shared_secret\"\n\n\nclass EventTypes(object):\n Member = \"m.room.member\"\n Create = \"m.room.create\"\n Tombstone = \"m.room.tombstone\"\n JoinRules = \"m.room.join_rules\"\n PowerLevels = \"m.room.power_levels\"\n Aliases = \"m.room.aliases\"\n Redaction = \"m.room.redaction\"\n ThirdPartyInvite = \"m.room.third_party_invite\"\n RelatedGroups = \"m.room.related_groups\"\n\n RoomHistoryVisibility = \"m.room.history_visibility\"\n CanonicalAlias = \"m.room.canonical_alias\"\n Encrypted = \"m.room.encrypted\"\n RoomAvatar = \"m.room.avatar\"\n RoomEncryption = \"m.room.encryption\"\n GuestAccess = \"m.room.guest_access\"\n\n # These are used for validation\n Message = \"m.room.message\"\n Topic = \"m.room.topic\"\n Name = \"m.room.name\"\n\n ServerACL = \"m.room.server_acl\"\n Pinned = \"m.room.pinned_events\"\n\n Retention = \"m.room.retention\"\n\n Presence = \"m.presence\"\n\n\nclass RejectedReason(object):\n AUTH_ERROR = \"auth_error\"\n\n\nclass RoomCreationPreset(object):\n PRIVATE_CHAT = \"private_chat\"\n PUBLIC_CHAT = \"public_chat\"\n TRUSTED_PRIVATE_CHAT = \"trusted_private_chat\"\n\n\nclass ThirdPartyEntityKind(object):\n USER = \"user\"\n LOCATION = \"location\"\n\n\nServerNoticeMsgType = \"m.server_notice\"\nServerNoticeLimitReached = \"m.server_notice.usage_limit_reached\"\n\n\nclass UserTypes(object):\n \"\"\"Allows for user type specific behaviour. With the benefit of hindsight\n 'admin' and 'guest' users should also be UserTypes. Normal users are type None\n \"\"\"\n\n SUPPORT = \"support\"\n BOT = \"bot\"\n ALL_USER_TYPES = (SUPPORT, BOT)\n\n\nclass RelationTypes(object):\n \"\"\"The types of relations known to this server.\n \"\"\"\n\n ANNOTATION = \"m.annotation\"\n REPLACE = \"m.replace\"\n REFERENCE = \"m.reference\"\n\n\nclass LimitBlockingTypes(object):\n \"\"\"Reasons that a server may be blocked\"\"\"\n\n MONTHLY_ACTIVE_USER = \"monthly_active_user\"\n HS_DISABLED = \"hs_disabled\"\n\n\nclass EventContentFields(object):\n \"\"\"Fields found in events' content, regardless of type.\"\"\"\n\n # Labels for the event, cf https://github.com/matrix-org/matrix-doc/pull/2326\n LABELS = \"org.matrix.labels\"\n\n # Timestamp to delete the event after\n # cf https://github.com/matrix-org/matrix-doc/pull/2228\n SELF_DESTRUCT_AFTER = \"org.matrix.self_destruct_after\"\n", "path": "synapse/api/constants.py"}]}
1,848
124
gh_patches_debug_23177
rasdani/github-patches
git_diff
searxng__searxng-2006
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [Feature] Raise error when the engine doesn't exist Raise an error when an engine doesn't exist and there is for example an old parameter that disabled a now removed engine. See https://github.com/searxng/searxng/issues/1951#issuecomment-1324872496 for more info. ----- <!-- PLEASE FILL THESE FIELDS, IT REALLY HELPS THE MAINTAINERS OF SearXNG --> **Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG** Commit https://github.com/searxng/searxng/commit/3a765113eefc7ac0b1ee94e9a0ac0db8069ec336 **How did you install SearXNG?** 1. Git clone 2. pip install -r requirements.txt 3. `python -m searx.webapp` **How To Reproduce** Run searxng with this configuration: ```yaml engines: - name: duckduckgo disabled: true general: debug: true server: secret_key: secret use_default_settings: true ``` And you will get this error: ``` Traceback (most recent call last): File "/opt/python/latest/lib/python3.10/runpy.py", line 196, in _run_module_as_main return _run_code(code, main_globals, None, File "/opt/python/latest/lib/python3.10/runpy.py", line 86, in _run_code exec(code, run_globals) File "/workspaces/searx-custom/searxng2/searx/webapp.py", line 1390, in <module> search_initialize(enable_checker=True, check_network=True, enable_metrics=settings['general']['enable_metrics']) File "/workspaces/searx-custom/searxng2/searx/search/__init__.py", line 30, in initialize load_engines(settings_engines) File "/workspaces/searx-custom/searxng2/searx/engines/__init__.py", line 298, in load_engines engine = load_engine(engine_data) File "/workspaces/searx-custom/searxng2/searx/engines/__init__.py", line 121, in load_engine engine_module = engine_data['engine'] KeyError: 'engine' ``` **Expected behavior** Searxng should work fine. **Screenshots & Logs** <!-- If applicable, add screenshots, logs to help explain your problem. --> **Additional context** <!-- Add any other context about the problem here. --> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `searx/engines/__init__.py` Content: ``` 1 # SPDX-License-Identifier: AGPL-3.0-or-later 2 # lint: pylint 3 """This module implements the engine loader. 4 5 Load and initialize the ``engines``, see :py:func:`load_engines` and register 6 :py:obj:`engine_shortcuts`. 7 8 usage:: 9 10 load_engines( settings['engines'] ) 11 12 """ 13 14 import sys 15 import copy 16 from typing import Dict, List, Optional 17 18 from os.path import realpath, dirname 19 from babel.localedata import locale_identifiers 20 from searx import logger, settings 21 from searx.data import ENGINES_LANGUAGES 22 from searx.network import get 23 from searx.utils import load_module, match_language, gen_useragent 24 25 26 logger = logger.getChild('engines') 27 ENGINE_DIR = dirname(realpath(__file__)) 28 BABEL_LANGS = [ 29 lang_parts[0] + '-' + lang_parts[-1] if len(lang_parts) > 1 else lang_parts[0] 30 for lang_parts in (lang_code.split('_') for lang_code in locale_identifiers()) 31 ] 32 ENGINE_DEFAULT_ARGS = { 33 "engine_type": "online", 34 "inactive": False, 35 "disabled": False, 36 "timeout": settings["outgoing"]["request_timeout"], 37 "shortcut": "-", 38 "categories": ["general"], 39 "supported_languages": [], 40 "language_aliases": {}, 41 "paging": False, 42 "safesearch": False, 43 "time_range_support": False, 44 "enable_http": False, 45 "using_tor_proxy": False, 46 "display_error_messages": True, 47 "send_accept_language_header": False, 48 "tokens": [], 49 "about": {}, 50 } 51 # set automatically when an engine does not have any tab category 52 OTHER_CATEGORY = 'other' 53 54 55 class Engine: # pylint: disable=too-few-public-methods 56 """This class is currently never initialized and only used for type hinting.""" 57 58 name: str 59 engine: str 60 shortcut: str 61 categories: List[str] 62 supported_languages: List[str] 63 about: dict 64 inactive: bool 65 disabled: bool 66 language_support: bool 67 paging: bool 68 safesearch: bool 69 time_range_support: bool 70 timeout: float 71 72 73 # Defaults for the namespace of an engine module, see :py:func:`load_engine` 74 75 categories = {'general': []} 76 engines: Dict[str, Engine] = {} 77 engine_shortcuts = {} 78 """Simple map of registered *shortcuts* to name of the engine (or ``None``). 79 80 :: 81 82 engine_shortcuts[engine.shortcut] = engine.name 83 84 :meta hide-value: 85 """ 86 87 88 def load_engine(engine_data: dict) -> Optional[Engine]: 89 """Load engine from ``engine_data``. 90 91 :param dict engine_data: Attributes from YAML ``settings:engines/<engine>`` 92 :return: initialized namespace of the ``<engine>``. 93 94 1. create a namespace and load module of the ``<engine>`` 95 2. update namespace with the defaults from :py:obj:`ENGINE_DEFAULT_ARGS` 96 3. update namespace with values from ``engine_data`` 97 98 If engine *is active*, return namespace of the engine, otherwise return 99 ``None``. 100 101 This function also returns ``None`` if initialization of the namespace fails 102 for one of the following reasons: 103 104 - engine name contains underscore 105 - engine name is not lowercase 106 - required attribute is not set :py:func:`is_missing_required_attributes` 107 108 """ 109 110 engine_name = engine_data['name'] 111 if '_' in engine_name: 112 logger.error('Engine name contains underscore: "{}"'.format(engine_name)) 113 return None 114 115 if engine_name.lower() != engine_name: 116 logger.warn('Engine name is not lowercase: "{}", converting to lowercase'.format(engine_name)) 117 engine_name = engine_name.lower() 118 engine_data['name'] = engine_name 119 120 # load_module 121 engine_module = engine_data['engine'] 122 try: 123 engine = load_module(engine_module + '.py', ENGINE_DIR) 124 except (SyntaxError, KeyboardInterrupt, SystemExit, SystemError, ImportError, RuntimeError): 125 logger.exception('Fatal exception in engine "{}"'.format(engine_module)) 126 sys.exit(1) 127 except BaseException: 128 logger.exception('Cannot load engine "{}"'.format(engine_module)) 129 return None 130 131 update_engine_attributes(engine, engine_data) 132 set_language_attributes(engine) 133 update_attributes_for_tor(engine) 134 135 if not is_engine_active(engine): 136 return None 137 138 if is_missing_required_attributes(engine): 139 return None 140 141 set_loggers(engine, engine_name) 142 143 if not any(cat in settings['categories_as_tabs'] for cat in engine.categories): 144 engine.categories.append(OTHER_CATEGORY) 145 146 return engine 147 148 149 def set_loggers(engine, engine_name): 150 # set the logger for engine 151 engine.logger = logger.getChild(engine_name) 152 # the engine may have load some other engines 153 # may sure the logger is initialized 154 # use sys.modules.copy() to avoid "RuntimeError: dictionary changed size during iteration" 155 # see https://github.com/python/cpython/issues/89516 156 # and https://docs.python.org/3.10/library/sys.html#sys.modules 157 modules = sys.modules.copy() 158 for module_name, module in modules.items(): 159 if ( 160 module_name.startswith("searx.engines") 161 and module_name != "searx.engines.__init__" 162 and not hasattr(module, "logger") 163 ): 164 module_engine_name = module_name.split(".")[-1] 165 module.logger = logger.getChild(module_engine_name) 166 167 168 def update_engine_attributes(engine: Engine, engine_data): 169 # set engine attributes from engine_data 170 for param_name, param_value in engine_data.items(): 171 if param_name == 'categories': 172 if isinstance(param_value, str): 173 param_value = list(map(str.strip, param_value.split(','))) 174 engine.categories = param_value 175 elif hasattr(engine, 'about') and param_name == 'about': 176 engine.about = {**engine.about, **engine_data['about']} 177 else: 178 setattr(engine, param_name, param_value) 179 180 # set default attributes 181 for arg_name, arg_value in ENGINE_DEFAULT_ARGS.items(): 182 if not hasattr(engine, arg_name): 183 setattr(engine, arg_name, copy.deepcopy(arg_value)) 184 185 186 def set_language_attributes(engine: Engine): 187 # assign supported languages from json file 188 if engine.name in ENGINES_LANGUAGES: 189 engine.supported_languages = ENGINES_LANGUAGES[engine.name] 190 191 elif engine.engine in ENGINES_LANGUAGES: 192 # The key of the dictionary ENGINES_LANGUAGES is the *engine name* 193 # configured in settings.xml. When multiple engines are configured in 194 # settings.yml to use the same origin engine (python module) these 195 # additional engines can use the languages from the origin engine. 196 # For this use the configured ``engine: ...`` from settings.yml 197 engine.supported_languages = ENGINES_LANGUAGES[engine.engine] 198 199 if hasattr(engine, 'language'): 200 # For an engine, when there is `language: ...` in the YAML settings, the 201 # engine supports only one language, in this case 202 # engine.supported_languages should contains this value defined in 203 # settings.yml 204 if engine.language not in engine.supported_languages: 205 raise ValueError( 206 "settings.yml - engine: '%s' / language: '%s' not supported" % (engine.name, engine.language) 207 ) 208 209 if isinstance(engine.supported_languages, dict): 210 engine.supported_languages = {engine.language: engine.supported_languages[engine.language]} 211 else: 212 engine.supported_languages = [engine.language] 213 214 # find custom aliases for non standard language codes 215 for engine_lang in engine.supported_languages: 216 iso_lang = match_language(engine_lang, BABEL_LANGS, fallback=None) 217 if ( 218 iso_lang 219 and iso_lang != engine_lang 220 and not engine_lang.startswith(iso_lang) 221 and iso_lang not in engine.supported_languages 222 ): 223 engine.language_aliases[iso_lang] = engine_lang 224 225 # language_support 226 engine.language_support = len(engine.supported_languages) > 0 227 228 # assign language fetching method if auxiliary method exists 229 if hasattr(engine, '_fetch_supported_languages'): 230 headers = { 231 'User-Agent': gen_useragent(), 232 'Accept-Language': "en-US,en;q=0.5", # bing needs to set the English language 233 } 234 engine.fetch_supported_languages = ( 235 # pylint: disable=protected-access 236 lambda: engine._fetch_supported_languages(get(engine.supported_languages_url, headers=headers)) 237 ) 238 239 240 def update_attributes_for_tor(engine: Engine) -> bool: 241 if using_tor_proxy(engine) and hasattr(engine, 'onion_url'): 242 engine.search_url = engine.onion_url + getattr(engine, 'search_path', '') 243 engine.timeout += settings['outgoing'].get('extra_proxy_timeout', 0) 244 245 246 def is_missing_required_attributes(engine): 247 """An attribute is required when its name doesn't start with ``_`` (underline). 248 Required attributes must not be ``None``. 249 250 """ 251 missing = False 252 for engine_attr in dir(engine): 253 if not engine_attr.startswith('_') and getattr(engine, engine_attr) is None: 254 logger.error('Missing engine config attribute: "{0}.{1}"'.format(engine.name, engine_attr)) 255 missing = True 256 return missing 257 258 259 def using_tor_proxy(engine: Engine): 260 """Return True if the engine configuration declares to use Tor.""" 261 return settings['outgoing'].get('using_tor_proxy') or getattr(engine, 'using_tor_proxy', False) 262 263 264 def is_engine_active(engine: Engine): 265 # check if engine is inactive 266 if engine.inactive is True: 267 return False 268 269 # exclude onion engines if not using tor 270 if 'onions' in engine.categories and not using_tor_proxy(engine): 271 return False 272 273 return True 274 275 276 def register_engine(engine: Engine): 277 if engine.name in engines: 278 logger.error('Engine config error: ambiguous name: {0}'.format(engine.name)) 279 sys.exit(1) 280 engines[engine.name] = engine 281 282 if engine.shortcut in engine_shortcuts: 283 logger.error('Engine config error: ambiguous shortcut: {0}'.format(engine.shortcut)) 284 sys.exit(1) 285 engine_shortcuts[engine.shortcut] = engine.name 286 287 for category_name in engine.categories: 288 categories.setdefault(category_name, []).append(engine) 289 290 291 def load_engines(engine_list): 292 """usage: ``engine_list = settings['engines']``""" 293 engines.clear() 294 engine_shortcuts.clear() 295 categories.clear() 296 categories['general'] = [] 297 for engine_data in engine_list: 298 engine = load_engine(engine_data) 299 if engine: 300 register_engine(engine) 301 return engines 302 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/searx/engines/__init__.py b/searx/engines/__init__.py --- a/searx/engines/__init__.py +++ b/searx/engines/__init__.py @@ -106,8 +106,12 @@ - required attribute is not set :py:func:`is_missing_required_attributes` """ + # pylint: disable=too-many-return-statements - engine_name = engine_data['name'] + engine_name = engine_data.get('name') + if engine_name is None: + logger.error('An engine does not have a "name" field') + return None if '_' in engine_name: logger.error('Engine name contains underscore: "{}"'.format(engine_name)) return None @@ -118,7 +122,10 @@ engine_data['name'] = engine_name # load_module - engine_module = engine_data['engine'] + engine_module = engine_data.get('engine') + if engine_module is None: + logger.error('The "engine" field is missing for the engine named "{}"'.format(engine_name)) + return None try: engine = load_module(engine_module + '.py', ENGINE_DIR) except (SyntaxError, KeyboardInterrupt, SystemExit, SystemError, ImportError, RuntimeError):
{"golden_diff": "diff --git a/searx/engines/__init__.py b/searx/engines/__init__.py\n--- a/searx/engines/__init__.py\n+++ b/searx/engines/__init__.py\n@@ -106,8 +106,12 @@\n - required attribute is not set :py:func:`is_missing_required_attributes`\n \n \"\"\"\n+ # pylint: disable=too-many-return-statements\n \n- engine_name = engine_data['name']\n+ engine_name = engine_data.get('name')\n+ if engine_name is None:\n+ logger.error('An engine does not have a \"name\" field')\n+ return None\n if '_' in engine_name:\n logger.error('Engine name contains underscore: \"{}\"'.format(engine_name))\n return None\n@@ -118,7 +122,10 @@\n engine_data['name'] = engine_name\n \n # load_module\n- engine_module = engine_data['engine']\n+ engine_module = engine_data.get('engine')\n+ if engine_module is None:\n+ logger.error('The \"engine\" field is missing for the engine named \"{}\"'.format(engine_name))\n+ return None\n try:\n engine = load_module(engine_module + '.py', ENGINE_DIR)\n except (SyntaxError, KeyboardInterrupt, SystemExit, SystemError, ImportError, RuntimeError):\n", "issue": "[Feature] Raise error when the engine doesn't exist\nRaise an error when an engine doesn't exist and there is for example an old parameter that disabled a now removed engine.\r\n\r\nSee https://github.com/searxng/searxng/issues/1951#issuecomment-1324872496 for more info.\r\n\r\n-----\r\n\r\n<!-- PLEASE FILL THESE FIELDS, IT REALLY HELPS THE MAINTAINERS OF SearXNG -->\r\n\r\n**Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG**\r\n\r\nCommit https://github.com/searxng/searxng/commit/3a765113eefc7ac0b1ee94e9a0ac0db8069ec336\r\n\r\n**How did you install SearXNG?**\r\n1. Git clone\r\n2. pip install -r requirements.txt\r\n3. `python -m searx.webapp`\r\n\r\n**How To Reproduce**\r\nRun searxng with this configuration:\r\n```yaml\r\nengines:\r\n - name: duckduckgo\r\n disabled: true\r\ngeneral:\r\n debug: true\r\nserver:\r\n secret_key: secret\r\nuse_default_settings: true\r\n```\r\n\r\nAnd you will get this error:\r\n```\r\nTraceback (most recent call last):\r\n File \"/opt/python/latest/lib/python3.10/runpy.py\", line 196, in _run_module_as_main\r\n return _run_code(code, main_globals, None,\r\n File \"/opt/python/latest/lib/python3.10/runpy.py\", line 86, in _run_code\r\n exec(code, run_globals)\r\n File \"/workspaces/searx-custom/searxng2/searx/webapp.py\", line 1390, in <module>\r\n search_initialize(enable_checker=True, check_network=True, enable_metrics=settings['general']['enable_metrics'])\r\n File \"/workspaces/searx-custom/searxng2/searx/search/__init__.py\", line 30, in initialize\r\n load_engines(settings_engines)\r\n File \"/workspaces/searx-custom/searxng2/searx/engines/__init__.py\", line 298, in load_engines\r\n engine = load_engine(engine_data)\r\n File \"/workspaces/searx-custom/searxng2/searx/engines/__init__.py\", line 121, in load_engine\r\n engine_module = engine_data['engine']\r\nKeyError: 'engine'\r\n```\r\n\r\n**Expected behavior**\r\nSearxng should work fine.\r\n\r\n**Screenshots & Logs**\r\n<!-- If applicable, add screenshots, logs to help explain your problem. -->\r\n\r\n**Additional context**\r\n<!-- Add any other context about the problem here. -->\r\n\n", "before_files": [{"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n# lint: pylint\n\"\"\"This module implements the engine loader.\n\nLoad and initialize the ``engines``, see :py:func:`load_engines` and register\n:py:obj:`engine_shortcuts`.\n\nusage::\n\n load_engines( settings['engines'] )\n\n\"\"\"\n\nimport sys\nimport copy\nfrom typing import Dict, List, Optional\n\nfrom os.path import realpath, dirname\nfrom babel.localedata import locale_identifiers\nfrom searx import logger, settings\nfrom searx.data import ENGINES_LANGUAGES\nfrom searx.network import get\nfrom searx.utils import load_module, match_language, gen_useragent\n\n\nlogger = logger.getChild('engines')\nENGINE_DIR = dirname(realpath(__file__))\nBABEL_LANGS = [\n lang_parts[0] + '-' + lang_parts[-1] if len(lang_parts) > 1 else lang_parts[0]\n for lang_parts in (lang_code.split('_') for lang_code in locale_identifiers())\n]\nENGINE_DEFAULT_ARGS = {\n \"engine_type\": \"online\",\n \"inactive\": False,\n \"disabled\": False,\n \"timeout\": settings[\"outgoing\"][\"request_timeout\"],\n \"shortcut\": \"-\",\n \"categories\": [\"general\"],\n \"supported_languages\": [],\n \"language_aliases\": {},\n \"paging\": False,\n \"safesearch\": False,\n \"time_range_support\": False,\n \"enable_http\": False,\n \"using_tor_proxy\": False,\n \"display_error_messages\": True,\n \"send_accept_language_header\": False,\n \"tokens\": [],\n \"about\": {},\n}\n# set automatically when an engine does not have any tab category\nOTHER_CATEGORY = 'other'\n\n\nclass Engine: # pylint: disable=too-few-public-methods\n \"\"\"This class is currently never initialized and only used for type hinting.\"\"\"\n\n name: str\n engine: str\n shortcut: str\n categories: List[str]\n supported_languages: List[str]\n about: dict\n inactive: bool\n disabled: bool\n language_support: bool\n paging: bool\n safesearch: bool\n time_range_support: bool\n timeout: float\n\n\n# Defaults for the namespace of an engine module, see :py:func:`load_engine`\n\ncategories = {'general': []}\nengines: Dict[str, Engine] = {}\nengine_shortcuts = {}\n\"\"\"Simple map of registered *shortcuts* to name of the engine (or ``None``).\n\n::\n\n engine_shortcuts[engine.shortcut] = engine.name\n\n:meta hide-value:\n\"\"\"\n\n\ndef load_engine(engine_data: dict) -> Optional[Engine]:\n \"\"\"Load engine from ``engine_data``.\n\n :param dict engine_data: Attributes from YAML ``settings:engines/<engine>``\n :return: initialized namespace of the ``<engine>``.\n\n 1. create a namespace and load module of the ``<engine>``\n 2. update namespace with the defaults from :py:obj:`ENGINE_DEFAULT_ARGS`\n 3. update namespace with values from ``engine_data``\n\n If engine *is active*, return namespace of the engine, otherwise return\n ``None``.\n\n This function also returns ``None`` if initialization of the namespace fails\n for one of the following reasons:\n\n - engine name contains underscore\n - engine name is not lowercase\n - required attribute is not set :py:func:`is_missing_required_attributes`\n\n \"\"\"\n\n engine_name = engine_data['name']\n if '_' in engine_name:\n logger.error('Engine name contains underscore: \"{}\"'.format(engine_name))\n return None\n\n if engine_name.lower() != engine_name:\n logger.warn('Engine name is not lowercase: \"{}\", converting to lowercase'.format(engine_name))\n engine_name = engine_name.lower()\n engine_data['name'] = engine_name\n\n # load_module\n engine_module = engine_data['engine']\n try:\n engine = load_module(engine_module + '.py', ENGINE_DIR)\n except (SyntaxError, KeyboardInterrupt, SystemExit, SystemError, ImportError, RuntimeError):\n logger.exception('Fatal exception in engine \"{}\"'.format(engine_module))\n sys.exit(1)\n except BaseException:\n logger.exception('Cannot load engine \"{}\"'.format(engine_module))\n return None\n\n update_engine_attributes(engine, engine_data)\n set_language_attributes(engine)\n update_attributes_for_tor(engine)\n\n if not is_engine_active(engine):\n return None\n\n if is_missing_required_attributes(engine):\n return None\n\n set_loggers(engine, engine_name)\n\n if not any(cat in settings['categories_as_tabs'] for cat in engine.categories):\n engine.categories.append(OTHER_CATEGORY)\n\n return engine\n\n\ndef set_loggers(engine, engine_name):\n # set the logger for engine\n engine.logger = logger.getChild(engine_name)\n # the engine may have load some other engines\n # may sure the logger is initialized\n # use sys.modules.copy() to avoid \"RuntimeError: dictionary changed size during iteration\"\n # see https://github.com/python/cpython/issues/89516\n # and https://docs.python.org/3.10/library/sys.html#sys.modules\n modules = sys.modules.copy()\n for module_name, module in modules.items():\n if (\n module_name.startswith(\"searx.engines\")\n and module_name != \"searx.engines.__init__\"\n and not hasattr(module, \"logger\")\n ):\n module_engine_name = module_name.split(\".\")[-1]\n module.logger = logger.getChild(module_engine_name)\n\n\ndef update_engine_attributes(engine: Engine, engine_data):\n # set engine attributes from engine_data\n for param_name, param_value in engine_data.items():\n if param_name == 'categories':\n if isinstance(param_value, str):\n param_value = list(map(str.strip, param_value.split(',')))\n engine.categories = param_value\n elif hasattr(engine, 'about') and param_name == 'about':\n engine.about = {**engine.about, **engine_data['about']}\n else:\n setattr(engine, param_name, param_value)\n\n # set default attributes\n for arg_name, arg_value in ENGINE_DEFAULT_ARGS.items():\n if not hasattr(engine, arg_name):\n setattr(engine, arg_name, copy.deepcopy(arg_value))\n\n\ndef set_language_attributes(engine: Engine):\n # assign supported languages from json file\n if engine.name in ENGINES_LANGUAGES:\n engine.supported_languages = ENGINES_LANGUAGES[engine.name]\n\n elif engine.engine in ENGINES_LANGUAGES:\n # The key of the dictionary ENGINES_LANGUAGES is the *engine name*\n # configured in settings.xml. When multiple engines are configured in\n # settings.yml to use the same origin engine (python module) these\n # additional engines can use the languages from the origin engine.\n # For this use the configured ``engine: ...`` from settings.yml\n engine.supported_languages = ENGINES_LANGUAGES[engine.engine]\n\n if hasattr(engine, 'language'):\n # For an engine, when there is `language: ...` in the YAML settings, the\n # engine supports only one language, in this case\n # engine.supported_languages should contains this value defined in\n # settings.yml\n if engine.language not in engine.supported_languages:\n raise ValueError(\n \"settings.yml - engine: '%s' / language: '%s' not supported\" % (engine.name, engine.language)\n )\n\n if isinstance(engine.supported_languages, dict):\n engine.supported_languages = {engine.language: engine.supported_languages[engine.language]}\n else:\n engine.supported_languages = [engine.language]\n\n # find custom aliases for non standard language codes\n for engine_lang in engine.supported_languages:\n iso_lang = match_language(engine_lang, BABEL_LANGS, fallback=None)\n if (\n iso_lang\n and iso_lang != engine_lang\n and not engine_lang.startswith(iso_lang)\n and iso_lang not in engine.supported_languages\n ):\n engine.language_aliases[iso_lang] = engine_lang\n\n # language_support\n engine.language_support = len(engine.supported_languages) > 0\n\n # assign language fetching method if auxiliary method exists\n if hasattr(engine, '_fetch_supported_languages'):\n headers = {\n 'User-Agent': gen_useragent(),\n 'Accept-Language': \"en-US,en;q=0.5\", # bing needs to set the English language\n }\n engine.fetch_supported_languages = (\n # pylint: disable=protected-access\n lambda: engine._fetch_supported_languages(get(engine.supported_languages_url, headers=headers))\n )\n\n\ndef update_attributes_for_tor(engine: Engine) -> bool:\n if using_tor_proxy(engine) and hasattr(engine, 'onion_url'):\n engine.search_url = engine.onion_url + getattr(engine, 'search_path', '')\n engine.timeout += settings['outgoing'].get('extra_proxy_timeout', 0)\n\n\ndef is_missing_required_attributes(engine):\n \"\"\"An attribute is required when its name doesn't start with ``_`` (underline).\n Required attributes must not be ``None``.\n\n \"\"\"\n missing = False\n for engine_attr in dir(engine):\n if not engine_attr.startswith('_') and getattr(engine, engine_attr) is None:\n logger.error('Missing engine config attribute: \"{0}.{1}\"'.format(engine.name, engine_attr))\n missing = True\n return missing\n\n\ndef using_tor_proxy(engine: Engine):\n \"\"\"Return True if the engine configuration declares to use Tor.\"\"\"\n return settings['outgoing'].get('using_tor_proxy') or getattr(engine, 'using_tor_proxy', False)\n\n\ndef is_engine_active(engine: Engine):\n # check if engine is inactive\n if engine.inactive is True:\n return False\n\n # exclude onion engines if not using tor\n if 'onions' in engine.categories and not using_tor_proxy(engine):\n return False\n\n return True\n\n\ndef register_engine(engine: Engine):\n if engine.name in engines:\n logger.error('Engine config error: ambiguous name: {0}'.format(engine.name))\n sys.exit(1)\n engines[engine.name] = engine\n\n if engine.shortcut in engine_shortcuts:\n logger.error('Engine config error: ambiguous shortcut: {0}'.format(engine.shortcut))\n sys.exit(1)\n engine_shortcuts[engine.shortcut] = engine.name\n\n for category_name in engine.categories:\n categories.setdefault(category_name, []).append(engine)\n\n\ndef load_engines(engine_list):\n \"\"\"usage: ``engine_list = settings['engines']``\"\"\"\n engines.clear()\n engine_shortcuts.clear()\n categories.clear()\n categories['general'] = []\n for engine_data in engine_list:\n engine = load_engine(engine_data)\n if engine:\n register_engine(engine)\n return engines\n", "path": "searx/engines/__init__.py"}], "after_files": [{"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n# lint: pylint\n\"\"\"This module implements the engine loader.\n\nLoad and initialize the ``engines``, see :py:func:`load_engines` and register\n:py:obj:`engine_shortcuts`.\n\nusage::\n\n load_engines( settings['engines'] )\n\n\"\"\"\n\nimport sys\nimport copy\nfrom typing import Dict, List, Optional\n\nfrom os.path import realpath, dirname\nfrom babel.localedata import locale_identifiers\nfrom searx import logger, settings\nfrom searx.data import ENGINES_LANGUAGES\nfrom searx.network import get\nfrom searx.utils import load_module, match_language, gen_useragent\n\n\nlogger = logger.getChild('engines')\nENGINE_DIR = dirname(realpath(__file__))\nBABEL_LANGS = [\n lang_parts[0] + '-' + lang_parts[-1] if len(lang_parts) > 1 else lang_parts[0]\n for lang_parts in (lang_code.split('_') for lang_code in locale_identifiers())\n]\nENGINE_DEFAULT_ARGS = {\n \"engine_type\": \"online\",\n \"inactive\": False,\n \"disabled\": False,\n \"timeout\": settings[\"outgoing\"][\"request_timeout\"],\n \"shortcut\": \"-\",\n \"categories\": [\"general\"],\n \"supported_languages\": [],\n \"language_aliases\": {},\n \"paging\": False,\n \"safesearch\": False,\n \"time_range_support\": False,\n \"enable_http\": False,\n \"using_tor_proxy\": False,\n \"display_error_messages\": True,\n \"send_accept_language_header\": False,\n \"tokens\": [],\n \"about\": {},\n}\n# set automatically when an engine does not have any tab category\nOTHER_CATEGORY = 'other'\n\n\nclass Engine: # pylint: disable=too-few-public-methods\n \"\"\"This class is currently never initialized and only used for type hinting.\"\"\"\n\n name: str\n engine: str\n shortcut: str\n categories: List[str]\n supported_languages: List[str]\n about: dict\n inactive: bool\n disabled: bool\n language_support: bool\n paging: bool\n safesearch: bool\n time_range_support: bool\n timeout: float\n\n\n# Defaults for the namespace of an engine module, see :py:func:`load_engine`\n\ncategories = {'general': []}\nengines: Dict[str, Engine] = {}\nengine_shortcuts = {}\n\"\"\"Simple map of registered *shortcuts* to name of the engine (or ``None``).\n\n::\n\n engine_shortcuts[engine.shortcut] = engine.name\n\n:meta hide-value:\n\"\"\"\n\n\ndef load_engine(engine_data: dict) -> Optional[Engine]:\n \"\"\"Load engine from ``engine_data``.\n\n :param dict engine_data: Attributes from YAML ``settings:engines/<engine>``\n :return: initialized namespace of the ``<engine>``.\n\n 1. create a namespace and load module of the ``<engine>``\n 2. update namespace with the defaults from :py:obj:`ENGINE_DEFAULT_ARGS`\n 3. update namespace with values from ``engine_data``\n\n If engine *is active*, return namespace of the engine, otherwise return\n ``None``.\n\n This function also returns ``None`` if initialization of the namespace fails\n for one of the following reasons:\n\n - engine name contains underscore\n - engine name is not lowercase\n - required attribute is not set :py:func:`is_missing_required_attributes`\n\n \"\"\"\n # pylint: disable=too-many-return-statements\n\n engine_name = engine_data.get('name')\n if engine_name is None:\n logger.error('An engine does not have a \"name\" field')\n return None\n if '_' in engine_name:\n logger.error('Engine name contains underscore: \"{}\"'.format(engine_name))\n return None\n\n if engine_name.lower() != engine_name:\n logger.warn('Engine name is not lowercase: \"{}\", converting to lowercase'.format(engine_name))\n engine_name = engine_name.lower()\n engine_data['name'] = engine_name\n\n # load_module\n engine_module = engine_data.get('engine')\n if engine_module is None:\n logger.error('The \"engine\" field is missing for the engine named \"{}\"'.format(engine_name))\n return None\n try:\n engine = load_module(engine_module + '.py', ENGINE_DIR)\n except (SyntaxError, KeyboardInterrupt, SystemExit, SystemError, ImportError, RuntimeError):\n logger.exception('Fatal exception in engine \"{}\"'.format(engine_module))\n sys.exit(1)\n except BaseException:\n logger.exception('Cannot load engine \"{}\"'.format(engine_module))\n return None\n\n update_engine_attributes(engine, engine_data)\n set_language_attributes(engine)\n update_attributes_for_tor(engine)\n\n if not is_engine_active(engine):\n return None\n\n if is_missing_required_attributes(engine):\n return None\n\n set_loggers(engine, engine_name)\n\n if not any(cat in settings['categories_as_tabs'] for cat in engine.categories):\n engine.categories.append(OTHER_CATEGORY)\n\n return engine\n\n\ndef set_loggers(engine, engine_name):\n # set the logger for engine\n engine.logger = logger.getChild(engine_name)\n # the engine may have load some other engines\n # may sure the logger is initialized\n # use sys.modules.copy() to avoid \"RuntimeError: dictionary changed size during iteration\"\n # see https://github.com/python/cpython/issues/89516\n # and https://docs.python.org/3.10/library/sys.html#sys.modules\n modules = sys.modules.copy()\n for module_name, module in modules.items():\n if (\n module_name.startswith(\"searx.engines\")\n and module_name != \"searx.engines.__init__\"\n and not hasattr(module, \"logger\")\n ):\n module_engine_name = module_name.split(\".\")[-1]\n module.logger = logger.getChild(module_engine_name)\n\n\ndef update_engine_attributes(engine: Engine, engine_data):\n # set engine attributes from engine_data\n for param_name, param_value in engine_data.items():\n if param_name == 'categories':\n if isinstance(param_value, str):\n param_value = list(map(str.strip, param_value.split(',')))\n engine.categories = param_value\n elif hasattr(engine, 'about') and param_name == 'about':\n engine.about = {**engine.about, **engine_data['about']}\n else:\n setattr(engine, param_name, param_value)\n\n # set default attributes\n for arg_name, arg_value in ENGINE_DEFAULT_ARGS.items():\n if not hasattr(engine, arg_name):\n setattr(engine, arg_name, copy.deepcopy(arg_value))\n\n\ndef set_language_attributes(engine: Engine):\n # assign supported languages from json file\n if engine.name in ENGINES_LANGUAGES:\n engine.supported_languages = ENGINES_LANGUAGES[engine.name]\n\n elif engine.engine in ENGINES_LANGUAGES:\n # The key of the dictionary ENGINES_LANGUAGES is the *engine name*\n # configured in settings.xml. When multiple engines are configured in\n # settings.yml to use the same origin engine (python module) these\n # additional engines can use the languages from the origin engine.\n # For this use the configured ``engine: ...`` from settings.yml\n engine.supported_languages = ENGINES_LANGUAGES[engine.engine]\n\n if hasattr(engine, 'language'):\n # For an engine, when there is `language: ...` in the YAML settings, the\n # engine supports only one language, in this case\n # engine.supported_languages should contains this value defined in\n # settings.yml\n if engine.language not in engine.supported_languages:\n raise ValueError(\n \"settings.yml - engine: '%s' / language: '%s' not supported\" % (engine.name, engine.language)\n )\n\n if isinstance(engine.supported_languages, dict):\n engine.supported_languages = {engine.language: engine.supported_languages[engine.language]}\n else:\n engine.supported_languages = [engine.language]\n\n # find custom aliases for non standard language codes\n for engine_lang in engine.supported_languages:\n iso_lang = match_language(engine_lang, BABEL_LANGS, fallback=None)\n if (\n iso_lang\n and iso_lang != engine_lang\n and not engine_lang.startswith(iso_lang)\n and iso_lang not in engine.supported_languages\n ):\n engine.language_aliases[iso_lang] = engine_lang\n\n # language_support\n engine.language_support = len(engine.supported_languages) > 0\n\n # assign language fetching method if auxiliary method exists\n if hasattr(engine, '_fetch_supported_languages'):\n headers = {\n 'User-Agent': gen_useragent(),\n 'Accept-Language': \"en-US,en;q=0.5\", # bing needs to set the English language\n }\n engine.fetch_supported_languages = (\n # pylint: disable=protected-access\n lambda: engine._fetch_supported_languages(get(engine.supported_languages_url, headers=headers))\n )\n\n\ndef update_attributes_for_tor(engine: Engine) -> bool:\n if using_tor_proxy(engine) and hasattr(engine, 'onion_url'):\n engine.search_url = engine.onion_url + getattr(engine, 'search_path', '')\n engine.timeout += settings['outgoing'].get('extra_proxy_timeout', 0)\n\n\ndef is_missing_required_attributes(engine):\n \"\"\"An attribute is required when its name doesn't start with ``_`` (underline).\n Required attributes must not be ``None``.\n\n \"\"\"\n missing = False\n for engine_attr in dir(engine):\n if not engine_attr.startswith('_') and getattr(engine, engine_attr) is None:\n logger.error('Missing engine config attribute: \"{0}.{1}\"'.format(engine.name, engine_attr))\n missing = True\n return missing\n\n\ndef using_tor_proxy(engine: Engine):\n \"\"\"Return True if the engine configuration declares to use Tor.\"\"\"\n return settings['outgoing'].get('using_tor_proxy') or getattr(engine, 'using_tor_proxy', False)\n\n\ndef is_engine_active(engine: Engine):\n # check if engine is inactive\n if engine.inactive is True:\n return False\n\n # exclude onion engines if not using tor\n if 'onions' in engine.categories and not using_tor_proxy(engine):\n return False\n\n return True\n\n\ndef register_engine(engine: Engine):\n if engine.name in engines:\n logger.error('Engine config error: ambiguous name: {0}'.format(engine.name))\n sys.exit(1)\n engines[engine.name] = engine\n\n if engine.shortcut in engine_shortcuts:\n logger.error('Engine config error: ambiguous shortcut: {0}'.format(engine.shortcut))\n sys.exit(1)\n engine_shortcuts[engine.shortcut] = engine.name\n\n for category_name in engine.categories:\n categories.setdefault(category_name, []).append(engine)\n\n\ndef load_engines(engine_list):\n \"\"\"usage: ``engine_list = settings['engines']``\"\"\"\n engines.clear()\n engine_shortcuts.clear()\n categories.clear()\n categories['general'] = []\n for engine_data in engine_list:\n engine = load_engine(engine_data)\n if engine:\n register_engine(engine)\n return engines\n", "path": "searx/engines/__init__.py"}]}
4,027
299
gh_patches_debug_14335
rasdani/github-patches
git_diff
web2py__web2py-2099
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Extend RConn to be able to connect to different Redis servers from within the same web2py application Right now it's not possible to connect to different Redis servers from within the same web2py application. Taking a look at the [code of RConn class](https://github.com/web2py/web2py/blob/f06c60b963a373f661e3bb09d5af49d2098902ec/gluon/contrib/redis_utils.py#L39), you can see that the first stablished connection made to a Redis server is linked to the current web2py application. And subsequent calls to RConn from within that web2py application will return the first created connection, no matter what the connection parameters are. This is a problem if you need to connect to different Redis servers from within the same web2py application. Notice this is also a problem if some of the connection arguments change (host, port, password, etc). I'm not shure what's the reason for returning always the first stablished connection, but I think a couple of fixes could be done in order to avoid this issues. I'll prepare a pull request with a proposal. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `gluon/contrib/redis_utils.py` Content: ``` 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 """ 4 Developed by [email protected] 5 License MIT/BSD/GPL 6 7 Serves as base to implement Redis connection object and various utils 8 for redis_cache, redis_session and redis_scheduler in the future 9 Should-could be overriden in case redis doesn't keep up (e.g. cluster support) 10 to ensure compatibility with another - similar - library 11 """ 12 13 import logging 14 from threading import Lock 15 import time 16 from gluon import current 17 18 logger = logging.getLogger("web2py.redis_utils") 19 20 try: 21 import redis 22 from redis.exceptions import WatchError as RWatchError 23 from redis.exceptions import ConnectionError as RConnectionError 24 except ImportError: 25 logger.error("Needs redis library to work") 26 raise RuntimeError('Needs redis library to work') 27 28 29 locker = Lock() 30 31 32 def RConn(*args, **vars): 33 """ 34 Istantiates a StrictRedis connection with parameters, at the first time 35 only 36 """ 37 locker.acquire() 38 try: 39 instance_name = 'redis_conn_' + current.request.application 40 if not hasattr(RConn, instance_name): 41 setattr(RConn, instance_name, redis.StrictRedis(*args, **vars)) 42 return getattr(RConn, instance_name) 43 finally: 44 locker.release() 45 46 def acquire_lock(conn, lockname, identifier, ltime=10): 47 while True: 48 if conn.set(lockname, identifier, ex=ltime, nx=True): 49 return identifier 50 time.sleep(.01) 51 52 53 _LUA_RELEASE_LOCK = """ 54 if redis.call("get", KEYS[1]) == ARGV[1] 55 then 56 return redis.call("del", KEYS[1]) 57 else 58 return 0 59 end 60 """ 61 62 63 def release_lock(instance, lockname, identifier): 64 return instance._release_script( 65 keys=[lockname], args=[identifier]) 66 67 68 def register_release_lock(conn): 69 rtn = conn.register_script(_LUA_RELEASE_LOCK) 70 return rtn 71 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/gluon/contrib/redis_utils.py b/gluon/contrib/redis_utils.py --- a/gluon/contrib/redis_utils.py +++ b/gluon/contrib/redis_utils.py @@ -29,14 +29,16 @@ locker = Lock() -def RConn(*args, **vars): +def RConn(application=None, *args, **vars): """ Istantiates a StrictRedis connection with parameters, at the first time only """ locker.acquire() try: - instance_name = 'redis_conn_' + current.request.application + if application is None: + application = current.request.application + instance_name = 'redis_conn_' + application if not hasattr(RConn, instance_name): setattr(RConn, instance_name, redis.StrictRedis(*args, **vars)) return getattr(RConn, instance_name)
{"golden_diff": "diff --git a/gluon/contrib/redis_utils.py b/gluon/contrib/redis_utils.py\n--- a/gluon/contrib/redis_utils.py\n+++ b/gluon/contrib/redis_utils.py\n@@ -29,14 +29,16 @@\n locker = Lock()\n \n \n-def RConn(*args, **vars):\n+def RConn(application=None, *args, **vars):\n \"\"\"\n Istantiates a StrictRedis connection with parameters, at the first time\n only\n \"\"\"\n locker.acquire()\n try:\n- instance_name = 'redis_conn_' + current.request.application\n+ if application is None:\n+ application = current.request.application\n+ instance_name = 'redis_conn_' + application\n if not hasattr(RConn, instance_name):\n setattr(RConn, instance_name, redis.StrictRedis(*args, **vars))\n return getattr(RConn, instance_name)\n", "issue": "Extend RConn to be able to connect to different Redis servers from within the same web2py application\nRight now it's not possible to connect to different Redis servers from within the same web2py application. Taking a look at the [code of RConn class](https://github.com/web2py/web2py/blob/f06c60b963a373f661e3bb09d5af49d2098902ec/gluon/contrib/redis_utils.py#L39), you can see that the first stablished connection made to a Redis server is linked to the current web2py application. And subsequent calls to RConn from within that web2py application will return the first created connection, no matter what the connection parameters are.\r\n\r\nThis is a problem if you need to connect to different Redis servers from within the same web2py application. Notice this is also a problem if some of the connection arguments change (host, port, password, etc). \r\n\r\nI'm not shure what's the reason for returning always the first stablished connection, but I think a couple of fixes could be done in order to avoid this issues. I'll prepare a pull request with a proposal. \r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nDeveloped by [email protected]\nLicense MIT/BSD/GPL\n\nServes as base to implement Redis connection object and various utils\nfor redis_cache, redis_session and redis_scheduler in the future\nShould-could be overriden in case redis doesn't keep up (e.g. cluster support)\nto ensure compatibility with another - similar - library\n\"\"\"\n\nimport logging\nfrom threading import Lock\nimport time\nfrom gluon import current\n\nlogger = logging.getLogger(\"web2py.redis_utils\")\n\ntry:\n import redis\n from redis.exceptions import WatchError as RWatchError\n from redis.exceptions import ConnectionError as RConnectionError\nexcept ImportError:\n logger.error(\"Needs redis library to work\")\n raise RuntimeError('Needs redis library to work')\n\n\nlocker = Lock()\n\n\ndef RConn(*args, **vars):\n \"\"\"\n Istantiates a StrictRedis connection with parameters, at the first time\n only\n \"\"\"\n locker.acquire()\n try:\n instance_name = 'redis_conn_' + current.request.application\n if not hasattr(RConn, instance_name):\n setattr(RConn, instance_name, redis.StrictRedis(*args, **vars))\n return getattr(RConn, instance_name)\n finally:\n locker.release()\n\ndef acquire_lock(conn, lockname, identifier, ltime=10):\n while True:\n if conn.set(lockname, identifier, ex=ltime, nx=True):\n return identifier\n time.sleep(.01)\n\n\n_LUA_RELEASE_LOCK = \"\"\"\nif redis.call(\"get\", KEYS[1]) == ARGV[1]\nthen\n return redis.call(\"del\", KEYS[1])\nelse\n return 0\nend\n\"\"\"\n\n\ndef release_lock(instance, lockname, identifier):\n return instance._release_script(\n keys=[lockname], args=[identifier])\n\n\ndef register_release_lock(conn):\n rtn = conn.register_script(_LUA_RELEASE_LOCK)\n return rtn\n", "path": "gluon/contrib/redis_utils.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nDeveloped by [email protected]\nLicense MIT/BSD/GPL\n\nServes as base to implement Redis connection object and various utils\nfor redis_cache, redis_session and redis_scheduler in the future\nShould-could be overriden in case redis doesn't keep up (e.g. cluster support)\nto ensure compatibility with another - similar - library\n\"\"\"\n\nimport logging\nfrom threading import Lock\nimport time\nfrom gluon import current\n\nlogger = logging.getLogger(\"web2py.redis_utils\")\n\ntry:\n import redis\n from redis.exceptions import WatchError as RWatchError\n from redis.exceptions import ConnectionError as RConnectionError\nexcept ImportError:\n logger.error(\"Needs redis library to work\")\n raise RuntimeError('Needs redis library to work')\n\n\nlocker = Lock()\n\n\ndef RConn(application=None, *args, **vars):\n \"\"\"\n Istantiates a StrictRedis connection with parameters, at the first time\n only\n \"\"\"\n locker.acquire()\n try:\n if application is None:\n application = current.request.application\n instance_name = 'redis_conn_' + application\n if not hasattr(RConn, instance_name):\n setattr(RConn, instance_name, redis.StrictRedis(*args, **vars))\n return getattr(RConn, instance_name)\n finally:\n locker.release()\n\ndef acquire_lock(conn, lockname, identifier, ltime=10):\n while True:\n if conn.set(lockname, identifier, ex=ltime, nx=True):\n return identifier\n time.sleep(.01)\n\n\n_LUA_RELEASE_LOCK = \"\"\"\nif redis.call(\"get\", KEYS[1]) == ARGV[1]\nthen\n return redis.call(\"del\", KEYS[1])\nelse\n return 0\nend\n\"\"\"\n\n\ndef release_lock(instance, lockname, identifier):\n return instance._release_script(\n keys=[lockname], args=[identifier])\n\n\ndef register_release_lock(conn):\n rtn = conn.register_script(_LUA_RELEASE_LOCK)\n return rtn\n", "path": "gluon/contrib/redis_utils.py"}]}
1,086
196
gh_patches_debug_11402
rasdani/github-patches
git_diff
xorbitsai__inference-777
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- XINFERENCE_HOME环境变量问题 hi , 我这边设置了XINFERENCE_HOME环境变量,但是去指定的目录下看到里面的模型都是软连接,这是什么原因,谢谢! ![image](https://github.com/xorbitsai/inference/assets/9452272/2dade7e0-b1b7-45e1-b1fa-a0ca52ef18e4) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `xinference/constants.py` Content: ``` 1 # Copyright 2022-2023 XProbe Inc. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import os 16 from pathlib import Path 17 18 XINFERENCE_ENV_ENDPOINT = "XINFERENCE_ENDPOINT" 19 XINFERENCE_ENV_MODEL_SRC = "XINFERENCE_MODEL_SRC" 20 XINFERENCE_ENV_HOME_PATH = "XINFERENCE_HOME" 21 XINFERENCE_ENV_HEALTH_CHECK_ATTEMPTS = "XINFERENCE_HEALTH_CHECK_ATTEMPTS" 22 XINFERENCE_ENV_HEALTH_CHECK_INTERVAL = "XINFERENCE_HEALTH_CHECK_INTERVAL" 23 XINFERENCE_ENV_DISABLE_VLLM = "XINFERENCE_DISABLE_VLLM" 24 25 26 def get_xinference_home(): 27 return os.environ.get(XINFERENCE_ENV_HOME_PATH, str(Path.home() / ".xinference")) 28 29 30 XINFERENCE_HOME = get_xinference_home() 31 XINFERENCE_CACHE_DIR = os.path.join(XINFERENCE_HOME, "cache") 32 XINFERENCE_MODEL_DIR = os.path.join(XINFERENCE_HOME, "model") 33 XINFERENCE_LOG_DIR = os.path.join(XINFERENCE_HOME, "logs") 34 XINFERENCE_IMAGE_DIR = os.path.join(XINFERENCE_HOME, "image") 35 36 XINFERENCE_DEFAULT_LOCAL_HOST = "127.0.0.1" 37 XINFERENCE_DEFAULT_DISTRIBUTED_HOST = "0.0.0.0" 38 XINFERENCE_DEFAULT_ENDPOINT_PORT = 9997 39 XINFERENCE_DEFAULT_LOG_FILE_NAME = "xinference.log" 40 XINFERENCE_LOG_MAX_BYTES = 100 * 1024 * 1024 41 XINFERENCE_LOG_BACKUP_COUNT = 30 42 XINFERENCE_HEALTH_CHECK_ATTEMPTS = int( 43 os.environ.get(XINFERENCE_ENV_HEALTH_CHECK_ATTEMPTS, 3) 44 ) 45 XINFERENCE_HEALTH_CHECK_INTERVAL = int( 46 os.environ.get(XINFERENCE_ENV_HEALTH_CHECK_INTERVAL, 3) 47 ) 48 XINFERENCE_DISABLE_VLLM = bool(int(os.environ.get(XINFERENCE_ENV_DISABLE_VLLM, 0))) 49 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/xinference/constants.py b/xinference/constants.py --- a/xinference/constants.py +++ b/xinference/constants.py @@ -23,8 +23,15 @@ XINFERENCE_ENV_DISABLE_VLLM = "XINFERENCE_DISABLE_VLLM" -def get_xinference_home(): - return os.environ.get(XINFERENCE_ENV_HOME_PATH, str(Path.home() / ".xinference")) +def get_xinference_home() -> str: + home_path = os.environ.get(XINFERENCE_ENV_HOME_PATH) + if home_path is None: + home_path = str(Path.home() / ".xinference") + else: + # if user has already set `XINFERENCE_HOME` env, change huggingface and modelscope default download path + os.environ["HUGGINGFACE_HUB_CACHE"] = os.path.join(home_path, "huggingface") + os.environ["MODELSCOPE_CACHE"] = os.path.join(home_path, "modelscope") + return home_path XINFERENCE_HOME = get_xinference_home()
{"golden_diff": "diff --git a/xinference/constants.py b/xinference/constants.py\n--- a/xinference/constants.py\n+++ b/xinference/constants.py\n@@ -23,8 +23,15 @@\n XINFERENCE_ENV_DISABLE_VLLM = \"XINFERENCE_DISABLE_VLLM\"\n \n \n-def get_xinference_home():\n- return os.environ.get(XINFERENCE_ENV_HOME_PATH, str(Path.home() / \".xinference\"))\n+def get_xinference_home() -> str:\n+ home_path = os.environ.get(XINFERENCE_ENV_HOME_PATH)\n+ if home_path is None:\n+ home_path = str(Path.home() / \".xinference\")\n+ else:\n+ # if user has already set `XINFERENCE_HOME` env, change huggingface and modelscope default download path\n+ os.environ[\"HUGGINGFACE_HUB_CACHE\"] = os.path.join(home_path, \"huggingface\")\n+ os.environ[\"MODELSCOPE_CACHE\"] = os.path.join(home_path, \"modelscope\")\n+ return home_path\n \n \n XINFERENCE_HOME = get_xinference_home()\n", "issue": "XINFERENCE_HOME\u73af\u5883\u53d8\u91cf\u95ee\u9898\nhi , \u6211\u8fd9\u8fb9\u8bbe\u7f6e\u4e86XINFERENCE_HOME\u73af\u5883\u53d8\u91cf\uff0c\u4f46\u662f\u53bb\u6307\u5b9a\u7684\u76ee\u5f55\u4e0b\u770b\u5230\u91cc\u9762\u7684\u6a21\u578b\u90fd\u662f\u8f6f\u8fde\u63a5\uff0c\u8fd9\u662f\u4ec0\u4e48\u539f\u56e0\uff0c\u8c22\u8c22!\r\n\r\n![image](https://github.com/xorbitsai/inference/assets/9452272/2dade7e0-b1b7-45e1-b1fa-a0ca52ef18e4)\n", "before_files": [{"content": "# Copyright 2022-2023 XProbe Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nfrom pathlib import Path\n\nXINFERENCE_ENV_ENDPOINT = \"XINFERENCE_ENDPOINT\"\nXINFERENCE_ENV_MODEL_SRC = \"XINFERENCE_MODEL_SRC\"\nXINFERENCE_ENV_HOME_PATH = \"XINFERENCE_HOME\"\nXINFERENCE_ENV_HEALTH_CHECK_ATTEMPTS = \"XINFERENCE_HEALTH_CHECK_ATTEMPTS\"\nXINFERENCE_ENV_HEALTH_CHECK_INTERVAL = \"XINFERENCE_HEALTH_CHECK_INTERVAL\"\nXINFERENCE_ENV_DISABLE_VLLM = \"XINFERENCE_DISABLE_VLLM\"\n\n\ndef get_xinference_home():\n return os.environ.get(XINFERENCE_ENV_HOME_PATH, str(Path.home() / \".xinference\"))\n\n\nXINFERENCE_HOME = get_xinference_home()\nXINFERENCE_CACHE_DIR = os.path.join(XINFERENCE_HOME, \"cache\")\nXINFERENCE_MODEL_DIR = os.path.join(XINFERENCE_HOME, \"model\")\nXINFERENCE_LOG_DIR = os.path.join(XINFERENCE_HOME, \"logs\")\nXINFERENCE_IMAGE_DIR = os.path.join(XINFERENCE_HOME, \"image\")\n\nXINFERENCE_DEFAULT_LOCAL_HOST = \"127.0.0.1\"\nXINFERENCE_DEFAULT_DISTRIBUTED_HOST = \"0.0.0.0\"\nXINFERENCE_DEFAULT_ENDPOINT_PORT = 9997\nXINFERENCE_DEFAULT_LOG_FILE_NAME = \"xinference.log\"\nXINFERENCE_LOG_MAX_BYTES = 100 * 1024 * 1024\nXINFERENCE_LOG_BACKUP_COUNT = 30\nXINFERENCE_HEALTH_CHECK_ATTEMPTS = int(\n os.environ.get(XINFERENCE_ENV_HEALTH_CHECK_ATTEMPTS, 3)\n)\nXINFERENCE_HEALTH_CHECK_INTERVAL = int(\n os.environ.get(XINFERENCE_ENV_HEALTH_CHECK_INTERVAL, 3)\n)\nXINFERENCE_DISABLE_VLLM = bool(int(os.environ.get(XINFERENCE_ENV_DISABLE_VLLM, 0)))\n", "path": "xinference/constants.py"}], "after_files": [{"content": "# Copyright 2022-2023 XProbe Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nfrom pathlib import Path\n\nXINFERENCE_ENV_ENDPOINT = \"XINFERENCE_ENDPOINT\"\nXINFERENCE_ENV_MODEL_SRC = \"XINFERENCE_MODEL_SRC\"\nXINFERENCE_ENV_HOME_PATH = \"XINFERENCE_HOME\"\nXINFERENCE_ENV_HEALTH_CHECK_ATTEMPTS = \"XINFERENCE_HEALTH_CHECK_ATTEMPTS\"\nXINFERENCE_ENV_HEALTH_CHECK_INTERVAL = \"XINFERENCE_HEALTH_CHECK_INTERVAL\"\nXINFERENCE_ENV_DISABLE_VLLM = \"XINFERENCE_DISABLE_VLLM\"\n\n\ndef get_xinference_home() -> str:\n home_path = os.environ.get(XINFERENCE_ENV_HOME_PATH)\n if home_path is None:\n home_path = str(Path.home() / \".xinference\")\n else:\n # if user has already set `XINFERENCE_HOME` env, change huggingface and modelscope default download path\n os.environ[\"HUGGINGFACE_HUB_CACHE\"] = os.path.join(home_path, \"huggingface\")\n os.environ[\"MODELSCOPE_CACHE\"] = os.path.join(home_path, \"modelscope\")\n return home_path\n\n\nXINFERENCE_HOME = get_xinference_home()\nXINFERENCE_CACHE_DIR = os.path.join(XINFERENCE_HOME, \"cache\")\nXINFERENCE_MODEL_DIR = os.path.join(XINFERENCE_HOME, \"model\")\nXINFERENCE_LOG_DIR = os.path.join(XINFERENCE_HOME, \"logs\")\nXINFERENCE_IMAGE_DIR = os.path.join(XINFERENCE_HOME, \"image\")\n\nXINFERENCE_DEFAULT_LOCAL_HOST = \"127.0.0.1\"\nXINFERENCE_DEFAULT_DISTRIBUTED_HOST = \"0.0.0.0\"\nXINFERENCE_DEFAULT_ENDPOINT_PORT = 9997\nXINFERENCE_DEFAULT_LOG_FILE_NAME = \"xinference.log\"\nXINFERENCE_LOG_MAX_BYTES = 100 * 1024 * 1024\nXINFERENCE_LOG_BACKUP_COUNT = 30\nXINFERENCE_HEALTH_CHECK_ATTEMPTS = int(\n os.environ.get(XINFERENCE_ENV_HEALTH_CHECK_ATTEMPTS, 3)\n)\nXINFERENCE_HEALTH_CHECK_INTERVAL = int(\n os.environ.get(XINFERENCE_ENV_HEALTH_CHECK_INTERVAL, 3)\n)\nXINFERENCE_DISABLE_VLLM = bool(int(os.environ.get(XINFERENCE_ENV_DISABLE_VLLM, 0)))\n", "path": "xinference/constants.py"}]}
967
236
gh_patches_debug_31616
rasdani/github-patches
git_diff
jupyterhub__jupyterhub-3077
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Include Default Config Files and Documented CLI Options in docs ### Proposed change Many people deploy this into containerized environments, and as such generating a config file to modify or running `jupyterhub --help-all` to get the options when starting the server is a time-consuming and non-trivial task depending on your environment. It would be great if the repo (or some referenced location) could host a default `jupyterhub_config.py` users could modify without having to create an environment in which to install, generate, and extract the file. Similarly, it'd be great if the docs for configuration would just list the options for starting the process rather than saying "run --help". ### Alternative options As mentioned above, in interactive environments this is easy to deal with and a non-issue, but when writing a dockerfile, working on a thin client before deploying to a cluster that costs you money to access, etc. these are inconvenient, time-consuming, and potentially have a cost associated. Since these aren't exactly tall asks, I think this is pretty reasonable. ### Who would use this feature? Anyone creating a configuration for their deployment as code who isn't setting it up interactively, which I have to imagine is most people. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `docs/source/conf.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # 3 import os 4 import shlex 5 import sys 6 7 # Set paths 8 sys.path.insert(0, os.path.abspath('.')) 9 10 # -- General configuration ------------------------------------------------ 11 12 # Minimal Sphinx version 13 needs_sphinx = '1.4' 14 15 # Sphinx extension modules 16 extensions = [ 17 'sphinx.ext.autodoc', 18 'sphinx.ext.intersphinx', 19 'sphinx.ext.napoleon', 20 'autodoc_traits', 21 'sphinx_copybutton', 22 'sphinx-jsonschema', 23 'recommonmark', 24 ] 25 26 templates_path = ['_templates'] 27 28 # The master toctree document. 29 master_doc = 'index' 30 31 # General information about the project. 32 project = u'JupyterHub' 33 copyright = u'2016, Project Jupyter team' 34 author = u'Project Jupyter team' 35 36 # Autopopulate version 37 from os.path import dirname 38 39 docs = dirname(dirname(__file__)) 40 root = dirname(docs) 41 sys.path.insert(0, root) 42 43 import jupyterhub 44 45 # The short X.Y version. 46 version = '%i.%i' % jupyterhub.version_info[:2] 47 # The full version, including alpha/beta/rc tags. 48 release = jupyterhub.__version__ 49 50 language = None 51 exclude_patterns = [] 52 pygments_style = 'sphinx' 53 todo_include_todos = False 54 55 # Set the default role so we can use `foo` instead of ``foo`` 56 default_role = 'literal' 57 58 # -- Source ------------------------------------------------------------- 59 60 import recommonmark 61 from recommonmark.transform import AutoStructify 62 63 64 def setup(app): 65 app.add_config_value('recommonmark_config', {'enable_eval_rst': True}, True) 66 app.add_css_file('custom.css') 67 app.add_transform(AutoStructify) 68 69 70 source_suffix = ['.rst', '.md'] 71 # source_encoding = 'utf-8-sig' 72 73 # -- Options for HTML output ---------------------------------------------- 74 75 # The theme to use for HTML and HTML Help pages. 76 html_theme = 'pydata_sphinx_theme' 77 78 html_logo = '_static/images/logo/logo.png' 79 html_favicon = '_static/images/logo/favicon.ico' 80 81 # Paths that contain custom static files (such as style sheets) 82 html_static_path = ['_static'] 83 84 htmlhelp_basename = 'JupyterHubdoc' 85 86 # -- Options for LaTeX output --------------------------------------------- 87 88 latex_elements = { 89 # 'papersize': 'letterpaper', 90 # 'pointsize': '10pt', 91 # 'preamble': '', 92 # 'figure_align': 'htbp', 93 } 94 95 # Grouping the document tree into LaTeX files. List of tuples 96 # (source start file, target name, title, 97 # author, documentclass [howto, manual, or own class]). 98 latex_documents = [ 99 ( 100 master_doc, 101 'JupyterHub.tex', 102 u'JupyterHub Documentation', 103 u'Project Jupyter team', 104 'manual', 105 ) 106 ] 107 108 # latex_logo = None 109 # latex_use_parts = False 110 # latex_show_pagerefs = False 111 # latex_show_urls = False 112 # latex_appendices = [] 113 # latex_domain_indices = True 114 115 116 # -- manual page output ------------------------------------------------- 117 118 # One entry per manual page. List of tuples 119 # (source start file, name, description, authors, manual section). 120 man_pages = [(master_doc, 'jupyterhub', u'JupyterHub Documentation', [author], 1)] 121 122 # man_show_urls = False 123 124 125 # -- Texinfo output ----------------------------------------------------- 126 127 # Grouping the document tree into Texinfo files. List of tuples 128 # (source start file, target name, title, author, 129 # dir menu entry, description, category) 130 texinfo_documents = [ 131 ( 132 master_doc, 133 'JupyterHub', 134 u'JupyterHub Documentation', 135 author, 136 'JupyterHub', 137 'One line description of project.', 138 'Miscellaneous', 139 ) 140 ] 141 142 # texinfo_appendices = [] 143 # texinfo_domain_indices = True 144 # texinfo_show_urls = 'footnote' 145 # texinfo_no_detailmenu = False 146 147 148 # -- Epub output -------------------------------------------------------- 149 150 # Bibliographic Dublin Core info. 151 epub_title = project 152 epub_author = author 153 epub_publisher = author 154 epub_copyright = copyright 155 156 # A list of files that should not be packed into the epub file. 157 epub_exclude_files = ['search.html'] 158 159 # -- Intersphinx ---------------------------------------------------------- 160 161 intersphinx_mapping = {'https://docs.python.org/3/': None} 162 163 # -- Read The Docs -------------------------------------------------------- 164 165 on_rtd = os.environ.get('READTHEDOCS', None) == 'True' 166 if on_rtd: 167 # readthedocs.org uses their theme by default, so no need to specify it 168 # build rest-api, since RTD doesn't run make 169 from subprocess import check_call as sh 170 171 sh(['make', 'rest-api'], cwd=docs) 172 173 # -- Spell checking ------------------------------------------------------- 174 175 try: 176 import sphinxcontrib.spelling 177 except ImportError: 178 pass 179 else: 180 extensions.append("sphinxcontrib.spelling") 181 182 spelling_word_list_filename = 'spelling_wordlist.txt' 183 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docs/source/conf.py b/docs/source/conf.py --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -60,11 +60,65 @@ import recommonmark from recommonmark.transform import AutoStructify +# -- Config ------------------------------------------------------------- +from jupyterhub.app import JupyterHub +from docutils import nodes +from sphinx.directives.other import SphinxDirective +from contextlib import redirect_stdout +from io import StringIO + +# create a temp instance of JupyterHub just to get the output of the generate-config +# and help --all commands. +jupyterhub_app = JupyterHub() + + +class ConfigDirective(SphinxDirective): + """Generate the configuration file output for use in the documentation.""" + + has_content = False + required_arguments = 0 + optional_arguments = 0 + final_argument_whitespace = False + option_spec = {} + + def run(self): + # The generated configuration file for this version + generated_config = jupyterhub_app.generate_config_file() + # post-process output + home_dir = os.environ['HOME'] + generated_config = generated_config.replace(home_dir, '$HOME', 1) + par = nodes.literal_block(text=generated_config) + return [par] + + +class HelpAllDirective(SphinxDirective): + """Print the output of jupyterhub help --all for use in the documentation.""" + + has_content = False + required_arguments = 0 + optional_arguments = 0 + final_argument_whitespace = False + option_spec = {} + + def run(self): + # The output of the help command for this version + buffer = StringIO() + with redirect_stdout(buffer): + jupyterhub_app.print_help('--help-all') + all_help = buffer.getvalue() + # post-process output + home_dir = os.environ['HOME'] + all_help = all_help.replace(home_dir, '$HOME', 1) + par = nodes.literal_block(text=all_help) + return [par] + def setup(app): app.add_config_value('recommonmark_config', {'enable_eval_rst': True}, True) app.add_css_file('custom.css') app.add_transform(AutoStructify) + app.add_directive('jupyterhub-generate-config', ConfigDirective) + app.add_directive('jupyterhub-help-all', HelpAllDirective) source_suffix = ['.rst', '.md']
{"golden_diff": "diff --git a/docs/source/conf.py b/docs/source/conf.py\n--- a/docs/source/conf.py\n+++ b/docs/source/conf.py\n@@ -60,11 +60,65 @@\n import recommonmark\n from recommonmark.transform import AutoStructify\n \n+# -- Config -------------------------------------------------------------\n+from jupyterhub.app import JupyterHub\n+from docutils import nodes\n+from sphinx.directives.other import SphinxDirective\n+from contextlib import redirect_stdout\n+from io import StringIO\n+\n+# create a temp instance of JupyterHub just to get the output of the generate-config\n+# and help --all commands.\n+jupyterhub_app = JupyterHub()\n+\n+\n+class ConfigDirective(SphinxDirective):\n+ \"\"\"Generate the configuration file output for use in the documentation.\"\"\"\n+\n+ has_content = False\n+ required_arguments = 0\n+ optional_arguments = 0\n+ final_argument_whitespace = False\n+ option_spec = {}\n+\n+ def run(self):\n+ # The generated configuration file for this version\n+ generated_config = jupyterhub_app.generate_config_file()\n+ # post-process output\n+ home_dir = os.environ['HOME']\n+ generated_config = generated_config.replace(home_dir, '$HOME', 1)\n+ par = nodes.literal_block(text=generated_config)\n+ return [par]\n+\n+\n+class HelpAllDirective(SphinxDirective):\n+ \"\"\"Print the output of jupyterhub help --all for use in the documentation.\"\"\"\n+\n+ has_content = False\n+ required_arguments = 0\n+ optional_arguments = 0\n+ final_argument_whitespace = False\n+ option_spec = {}\n+\n+ def run(self):\n+ # The output of the help command for this version\n+ buffer = StringIO()\n+ with redirect_stdout(buffer):\n+ jupyterhub_app.print_help('--help-all')\n+ all_help = buffer.getvalue()\n+ # post-process output\n+ home_dir = os.environ['HOME']\n+ all_help = all_help.replace(home_dir, '$HOME', 1)\n+ par = nodes.literal_block(text=all_help)\n+ return [par]\n+\n \n def setup(app):\n app.add_config_value('recommonmark_config', {'enable_eval_rst': True}, True)\n app.add_css_file('custom.css')\n app.add_transform(AutoStructify)\n+ app.add_directive('jupyterhub-generate-config', ConfigDirective)\n+ app.add_directive('jupyterhub-help-all', HelpAllDirective)\n \n \n source_suffix = ['.rst', '.md']\n", "issue": "Include Default Config Files and Documented CLI Options in docs\n### Proposed change\r\nMany people deploy this into containerized environments, and as such generating a config file to modify or running `jupyterhub --help-all` to get the options when starting the server is a time-consuming and non-trivial task depending on your environment. It would be great if the repo (or some referenced location) could host a default `jupyterhub_config.py` users could modify without having to create an environment in which to install, generate, and extract the file. Similarly, it'd be great if the docs for configuration would just list the options for starting the process rather than saying \"run --help\".\r\n\r\n\r\n### Alternative options\r\nAs mentioned above, in interactive environments this is easy to deal with and a non-issue, but when writing a dockerfile, working on a thin client before deploying to a cluster that costs you money to access, etc. these are inconvenient, time-consuming, and potentially have a cost associated. Since these aren't exactly tall asks, I think this is pretty reasonable.\r\n\r\n\r\n### Who would use this feature?\r\nAnyone creating a configuration for their deployment as code who isn't setting it up interactively, which I have to imagine is most people.\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\nimport os\nimport shlex\nimport sys\n\n# Set paths\nsys.path.insert(0, os.path.abspath('.'))\n\n# -- General configuration ------------------------------------------------\n\n# Minimal Sphinx version\nneeds_sphinx = '1.4'\n\n# Sphinx extension modules\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.napoleon',\n 'autodoc_traits',\n 'sphinx_copybutton',\n 'sphinx-jsonschema',\n 'recommonmark',\n]\n\ntemplates_path = ['_templates']\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'JupyterHub'\ncopyright = u'2016, Project Jupyter team'\nauthor = u'Project Jupyter team'\n\n# Autopopulate version\nfrom os.path import dirname\n\ndocs = dirname(dirname(__file__))\nroot = dirname(docs)\nsys.path.insert(0, root)\n\nimport jupyterhub\n\n# The short X.Y version.\nversion = '%i.%i' % jupyterhub.version_info[:2]\n# The full version, including alpha/beta/rc tags.\nrelease = jupyterhub.__version__\n\nlanguage = None\nexclude_patterns = []\npygments_style = 'sphinx'\ntodo_include_todos = False\n\n# Set the default role so we can use `foo` instead of ``foo``\ndefault_role = 'literal'\n\n# -- Source -------------------------------------------------------------\n\nimport recommonmark\nfrom recommonmark.transform import AutoStructify\n\n\ndef setup(app):\n app.add_config_value('recommonmark_config', {'enable_eval_rst': True}, True)\n app.add_css_file('custom.css')\n app.add_transform(AutoStructify)\n\n\nsource_suffix = ['.rst', '.md']\n# source_encoding = 'utf-8-sig'\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages.\nhtml_theme = 'pydata_sphinx_theme'\n\nhtml_logo = '_static/images/logo/logo.png'\nhtml_favicon = '_static/images/logo/favicon.ico'\n\n# Paths that contain custom static files (such as style sheets)\nhtml_static_path = ['_static']\n\nhtmlhelp_basename = 'JupyterHubdoc'\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # 'papersize': 'letterpaper',\n # 'pointsize': '10pt',\n # 'preamble': '',\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (\n master_doc,\n 'JupyterHub.tex',\n u'JupyterHub Documentation',\n u'Project Jupyter team',\n 'manual',\n )\n]\n\n# latex_logo = None\n# latex_use_parts = False\n# latex_show_pagerefs = False\n# latex_show_urls = False\n# latex_appendices = []\n# latex_domain_indices = True\n\n\n# -- manual page output -------------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, 'jupyterhub', u'JupyterHub Documentation', [author], 1)]\n\n# man_show_urls = False\n\n\n# -- Texinfo output -----------------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n 'JupyterHub',\n u'JupyterHub Documentation',\n author,\n 'JupyterHub',\n 'One line description of project.',\n 'Miscellaneous',\n )\n]\n\n# texinfo_appendices = []\n# texinfo_domain_indices = True\n# texinfo_show_urls = 'footnote'\n# texinfo_no_detailmenu = False\n\n\n# -- Epub output --------------------------------------------------------\n\n# Bibliographic Dublin Core info.\nepub_title = project\nepub_author = author\nepub_publisher = author\nepub_copyright = copyright\n\n# A list of files that should not be packed into the epub file.\nepub_exclude_files = ['search.html']\n\n# -- Intersphinx ----------------------------------------------------------\n\nintersphinx_mapping = {'https://docs.python.org/3/': None}\n\n# -- Read The Docs --------------------------------------------------------\n\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\nif on_rtd:\n # readthedocs.org uses their theme by default, so no need to specify it\n # build rest-api, since RTD doesn't run make\n from subprocess import check_call as sh\n\n sh(['make', 'rest-api'], cwd=docs)\n\n# -- Spell checking -------------------------------------------------------\n\ntry:\n import sphinxcontrib.spelling\nexcept ImportError:\n pass\nelse:\n extensions.append(\"sphinxcontrib.spelling\")\n\nspelling_word_list_filename = 'spelling_wordlist.txt'\n", "path": "docs/source/conf.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\nimport os\nimport shlex\nimport sys\n\n# Set paths\nsys.path.insert(0, os.path.abspath('.'))\n\n# -- General configuration ------------------------------------------------\n\n# Minimal Sphinx version\nneeds_sphinx = '1.4'\n\n# Sphinx extension modules\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.napoleon',\n 'autodoc_traits',\n 'sphinx_copybutton',\n 'sphinx-jsonschema',\n 'recommonmark',\n]\n\ntemplates_path = ['_templates']\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'JupyterHub'\ncopyright = u'2016, Project Jupyter team'\nauthor = u'Project Jupyter team'\n\n# Autopopulate version\nfrom os.path import dirname\n\ndocs = dirname(dirname(__file__))\nroot = dirname(docs)\nsys.path.insert(0, root)\n\nimport jupyterhub\n\n# The short X.Y version.\nversion = '%i.%i' % jupyterhub.version_info[:2]\n# The full version, including alpha/beta/rc tags.\nrelease = jupyterhub.__version__\n\nlanguage = None\nexclude_patterns = []\npygments_style = 'sphinx'\ntodo_include_todos = False\n\n# Set the default role so we can use `foo` instead of ``foo``\ndefault_role = 'literal'\n\n# -- Source -------------------------------------------------------------\n\nimport recommonmark\nfrom recommonmark.transform import AutoStructify\n\n# -- Config -------------------------------------------------------------\nfrom jupyterhub.app import JupyterHub\nfrom docutils import nodes\nfrom sphinx.directives.other import SphinxDirective\nfrom contextlib import redirect_stdout\nfrom io import StringIO\n\n# create a temp instance of JupyterHub just to get the output of the generate-config\n# and help --all commands.\njupyterhub_app = JupyterHub()\n\n\nclass ConfigDirective(SphinxDirective):\n \"\"\"Generate the configuration file output for use in the documentation.\"\"\"\n\n has_content = False\n required_arguments = 0\n optional_arguments = 0\n final_argument_whitespace = False\n option_spec = {}\n\n def run(self):\n # The generated configuration file for this version\n generated_config = jupyterhub_app.generate_config_file()\n # post-process output\n home_dir = os.environ['HOME']\n generated_config = generated_config.replace(home_dir, '$HOME', 1)\n par = nodes.literal_block(text=generated_config)\n return [par]\n\n\nclass HelpAllDirective(SphinxDirective):\n \"\"\"Print the output of jupyterhub help --all for use in the documentation.\"\"\"\n\n has_content = False\n required_arguments = 0\n optional_arguments = 0\n final_argument_whitespace = False\n option_spec = {}\n\n def run(self):\n # The output of the help command for this version\n buffer = StringIO()\n with redirect_stdout(buffer):\n jupyterhub_app.print_help('--help-all')\n all_help = buffer.getvalue()\n # post-process output\n home_dir = os.environ['HOME']\n all_help = all_help.replace(home_dir, '$HOME', 1)\n par = nodes.literal_block(text=all_help)\n return [par]\n\n\ndef setup(app):\n app.add_config_value('recommonmark_config', {'enable_eval_rst': True}, True)\n app.add_css_file('custom.css')\n app.add_transform(AutoStructify)\n app.add_directive('jupyterhub-generate-config', ConfigDirective)\n app.add_directive('jupyterhub-help-all', HelpAllDirective)\n\n\nsource_suffix = ['.rst', '.md']\n# source_encoding = 'utf-8-sig'\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages.\nhtml_theme = 'pydata_sphinx_theme'\n\nhtml_logo = '_static/images/logo/logo.png'\nhtml_favicon = '_static/images/logo/favicon.ico'\n\n# Paths that contain custom static files (such as style sheets)\nhtml_static_path = ['_static']\n\nhtmlhelp_basename = 'JupyterHubdoc'\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # 'papersize': 'letterpaper',\n # 'pointsize': '10pt',\n # 'preamble': '',\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (\n master_doc,\n 'JupyterHub.tex',\n u'JupyterHub Documentation',\n u'Project Jupyter team',\n 'manual',\n )\n]\n\n# latex_logo = None\n# latex_use_parts = False\n# latex_show_pagerefs = False\n# latex_show_urls = False\n# latex_appendices = []\n# latex_domain_indices = True\n\n\n# -- manual page output -------------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, 'jupyterhub', u'JupyterHub Documentation', [author], 1)]\n\n# man_show_urls = False\n\n\n# -- Texinfo output -----------------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n 'JupyterHub',\n u'JupyterHub Documentation',\n author,\n 'JupyterHub',\n 'One line description of project.',\n 'Miscellaneous',\n )\n]\n\n# texinfo_appendices = []\n# texinfo_domain_indices = True\n# texinfo_show_urls = 'footnote'\n# texinfo_no_detailmenu = False\n\n\n# -- Epub output --------------------------------------------------------\n\n# Bibliographic Dublin Core info.\nepub_title = project\nepub_author = author\nepub_publisher = author\nepub_copyright = copyright\n\n# A list of files that should not be packed into the epub file.\nepub_exclude_files = ['search.html']\n\n# -- Intersphinx ----------------------------------------------------------\n\nintersphinx_mapping = {'https://docs.python.org/3/': None}\n\n# -- Read The Docs --------------------------------------------------------\n\non_rtd = os.environ.get('READTHEDOCS', None) == 'True'\nif on_rtd:\n # readthedocs.org uses their theme by default, so no need to specify it\n # build rest-api, since RTD doesn't run make\n from subprocess import check_call as sh\n\n sh(['make', 'rest-api'], cwd=docs)\n\n# -- Spell checking -------------------------------------------------------\n\ntry:\n import sphinxcontrib.spelling\nexcept ImportError:\n pass\nelse:\n extensions.append(\"sphinxcontrib.spelling\")\n\nspelling_word_list_filename = 'spelling_wordlist.txt'\n", "path": "docs/source/conf.py"}]}
2,051
554
gh_patches_debug_22760
rasdani/github-patches
git_diff
carpentries__amy-1065
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Bulk import workflow encounters IntegrityError when saving an organization Currently, we allow organizations with the domain that contains the `www` subdomain. For eg: Google can exist as `www.google.com` as well as `google.com`, leading to `IntegrityError` while saving the first while the second exists. Shouldn't we enforce one URL pattern and trim/add `www` to the `domain` field when saving an organization? Testcase: ``` py In [5]: Organization.objects.create(fullname='Google', domain='google.com') Out[5]: <Organization: google.com> In [6]: Organization.objects.create(fullname='Google', domain='www.google.com') --------------------------------------------------------------------------- IntegrityError Traceback (most recent call last) ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pydata/api.py` Content: ``` 1 from functools import lru_cache 2 from json import JSONDecodeError 3 from urllib.parse import urljoin, urlparse 4 5 import requests 6 from django.conf import settings 7 8 from workshops.models import ( 9 Person, 10 Role, 11 Organization, 12 Sponsorship, 13 Task, 14 ) 15 from workshops.util import create_username 16 17 18 class BaseAPIClient(requests.Session): 19 """ 20 An API client that abstracts away the work of dealing with URLs. 21 Usage: 22 > client = APIClient(event) 23 > list(client) -> returns a list of all objects returned by the API. 24 > client[23] -> returns the object with pk=23 25 """ 26 ROOT_ENDPOINT = 'api/' 27 28 @lru_cache(maxsize=None) 29 def __new__(cls, event): 30 """ 31 Returns an instance of APIClient. 32 Throws NotImplementedError if an API does not exist at the root URL. 33 """ 34 try: 35 r = requests.get(urljoin(event.url, cls.ROOT_ENDPOINT)) 36 r.raise_for_status() 37 r.json() 38 except (requests.exceptions.HTTPError, JSONDecodeError): 39 raise NotImplementedError('Conference site does not support an API') 40 return super().__new__(cls) 41 42 def __init__(self, event): 43 '''Populate API endpoint and set up basic authentication''' 44 super().__init__() 45 self.event = event 46 self.endpoint = urljoin(event.url, self.ENDPOINT) 47 self.auth = ( 48 settings.PYDATA_USERNAME_SECRET, settings.PYDATA_PASSWORD_SECRET) 49 50 def __iter__(self): 51 try: 52 r = self.get(self.endpoint) 53 r.raise_for_status() 54 pydata_objs = r.json() 55 except (requests.exceptions.HTTPError, JSONDecodeError) as e: 56 raise IOError('Cannot fetch instances from API: {}'.format(str(e))) 57 for obj in pydata_objs: 58 yield self.parse(obj) 59 60 def __contains__(self, pk): 61 try: 62 self.get(self.endpoint + str(pk)).raise_for_status() 63 except requests.exceptions.HTTPError: 64 return False 65 else: 66 return True 67 68 def __getitem__(self, pk): 69 if pk not in self: 70 raise KeyError( 71 '{} does not exist'.format(self.model._meta.verbose_name) 72 ) 73 obj = self.get(self.endpoint + str(pk)).json() 74 return self.parse(obj) 75 76 77 class PersonAPIClient(BaseAPIClient): 78 ENDPOINT = 'api/speaker/' 79 model = Person 80 81 def parse(self, speaker): 82 speaker['name'] = speaker['name'].strip() 83 personal = speaker['name'].rsplit(' ', 1)[0] 84 family = speaker['name'].rsplit(' ', 1)[-1] 85 return Person( 86 username=speaker['username'], 87 personal=personal, 88 family=family, 89 email=speaker['email'], 90 url=speaker['absolute_url'], 91 ) 92 93 94 class TaskAPIClient(BaseAPIClient): 95 ENDPOINT = 'api/presentation/' 96 model = Task 97 98 def parse(self, presentation): 99 return Task( 100 event=self.event, 101 person=Person.objects.get_or_create( 102 email=presentation['speaker']['email'], 103 defaults={ 104 'username': create_username('', presentation['speaker']['username']), 105 'personal': presentation['speaker']['name'].rsplit(' ', 1)[0], 106 'family': presentation['speaker']['name'].rsplit(' ', 1)[-1], 107 'url': presentation['speaker']['absolute_url'], 108 } 109 )[0], 110 role=Role.objects.get(name='presenter'), 111 title=presentation['title'], 112 url=presentation['absolute_url'], 113 ) 114 115 116 class SponsorshipAPIClient(BaseAPIClient): 117 ENDPOINT = 'api/sponsor/' 118 model = Sponsorship 119 120 def parse(self, sponsor): 121 return Sponsorship( 122 organization=Organization.objects.get_or_create( 123 domain=urlparse(sponsor['external_url']).netloc, 124 defaults={ 125 'fullname': sponsor['name'], 126 'notes': sponsor['annotation'], 127 }, 128 )[0], 129 event=self.event, 130 amount=sponsor['level']['cost'], 131 contact=Person.objects.get_or_create( 132 email=sponsor['contact_email'], 133 defaults={ 134 'username': create_username('', sponsor['contact_name']), 135 'personal': sponsor['contact_name'].rsplit(' ', 1)[0], 136 'family': sponsor['contact_name'].rsplit(' ', 1)[-1], 137 }, 138 )[0], 139 ) 140 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pydata/api.py b/pydata/api.py --- a/pydata/api.py +++ b/pydata/api.py @@ -4,6 +4,7 @@ import requests from django.conf import settings +from django.db.models import Q from workshops.models import ( Person, @@ -118,14 +119,18 @@ model = Sponsorship def parse(self, sponsor): + domain = urlparse(sponsor['external_url']).netloc + organization = Organization.objects.filter( + Q(fullname=sponsor['name']) | Q(domain=domain) + ).first() + if not organization: + organization = Organization.objects.create( + fullname=sponsor['name'], + domain=domain, + notes=sponsor['annotation'], + ) return Sponsorship( - organization=Organization.objects.get_or_create( - domain=urlparse(sponsor['external_url']).netloc, - defaults={ - 'fullname': sponsor['name'], - 'notes': sponsor['annotation'], - }, - )[0], + organization=organization, event=self.event, amount=sponsor['level']['cost'], contact=Person.objects.get_or_create(
{"golden_diff": "diff --git a/pydata/api.py b/pydata/api.py\n--- a/pydata/api.py\n+++ b/pydata/api.py\n@@ -4,6 +4,7 @@\n \n import requests\n from django.conf import settings\n+from django.db.models import Q\n \n from workshops.models import (\n Person,\n@@ -118,14 +119,18 @@\n model = Sponsorship\n \n def parse(self, sponsor):\n+ domain = urlparse(sponsor['external_url']).netloc\n+ organization = Organization.objects.filter(\n+ Q(fullname=sponsor['name']) | Q(domain=domain)\n+ ).first()\n+ if not organization:\n+ organization = Organization.objects.create(\n+ fullname=sponsor['name'],\n+ domain=domain,\n+ notes=sponsor['annotation'],\n+ )\n return Sponsorship(\n- organization=Organization.objects.get_or_create(\n- domain=urlparse(sponsor['external_url']).netloc,\n- defaults={\n- 'fullname': sponsor['name'],\n- 'notes': sponsor['annotation'],\n- },\n- )[0],\n+ organization=organization,\n event=self.event,\n amount=sponsor['level']['cost'],\n contact=Person.objects.get_or_create(\n", "issue": "Bulk import workflow encounters IntegrityError when saving an organization\nCurrently, we allow organizations with the domain that contains the `www` subdomain. For eg: Google can exist as `www.google.com` as well as `google.com`, leading to `IntegrityError` while saving the first while the second exists.\n\nShouldn't we enforce one URL pattern and trim/add `www` to the `domain` field when saving an organization?\n\nTestcase:\n\n``` py\nIn [5]: Organization.objects.create(fullname='Google', domain='google.com')\nOut[5]: <Organization: google.com>\n\nIn [6]: Organization.objects.create(fullname='Google', domain='www.google.com')\n---------------------------------------------------------------------------\nIntegrityError Traceback (most recent call last)\n```\n\n", "before_files": [{"content": "from functools import lru_cache\nfrom json import JSONDecodeError\nfrom urllib.parse import urljoin, urlparse\n\nimport requests\nfrom django.conf import settings\n\nfrom workshops.models import (\n Person,\n Role,\n Organization,\n Sponsorship,\n Task,\n)\nfrom workshops.util import create_username\n\n\nclass BaseAPIClient(requests.Session):\n \"\"\"\n An API client that abstracts away the work of dealing with URLs.\n Usage:\n > client = APIClient(event)\n > list(client) -> returns a list of all objects returned by the API.\n > client[23] -> returns the object with pk=23\n \"\"\"\n ROOT_ENDPOINT = 'api/'\n\n @lru_cache(maxsize=None)\n def __new__(cls, event):\n \"\"\"\n Returns an instance of APIClient.\n Throws NotImplementedError if an API does not exist at the root URL.\n \"\"\"\n try:\n r = requests.get(urljoin(event.url, cls.ROOT_ENDPOINT))\n r.raise_for_status()\n r.json()\n except (requests.exceptions.HTTPError, JSONDecodeError):\n raise NotImplementedError('Conference site does not support an API')\n return super().__new__(cls)\n\n def __init__(self, event):\n '''Populate API endpoint and set up basic authentication'''\n super().__init__()\n self.event = event\n self.endpoint = urljoin(event.url, self.ENDPOINT)\n self.auth = (\n settings.PYDATA_USERNAME_SECRET, settings.PYDATA_PASSWORD_SECRET)\n\n def __iter__(self):\n try:\n r = self.get(self.endpoint)\n r.raise_for_status()\n pydata_objs = r.json()\n except (requests.exceptions.HTTPError, JSONDecodeError) as e:\n raise IOError('Cannot fetch instances from API: {}'.format(str(e)))\n for obj in pydata_objs:\n yield self.parse(obj)\n\n def __contains__(self, pk):\n try:\n self.get(self.endpoint + str(pk)).raise_for_status()\n except requests.exceptions.HTTPError:\n return False\n else:\n return True\n\n def __getitem__(self, pk):\n if pk not in self:\n raise KeyError(\n '{} does not exist'.format(self.model._meta.verbose_name)\n )\n obj = self.get(self.endpoint + str(pk)).json()\n return self.parse(obj)\n\n\nclass PersonAPIClient(BaseAPIClient):\n ENDPOINT = 'api/speaker/'\n model = Person\n\n def parse(self, speaker):\n speaker['name'] = speaker['name'].strip()\n personal = speaker['name'].rsplit(' ', 1)[0]\n family = speaker['name'].rsplit(' ', 1)[-1]\n return Person(\n username=speaker['username'],\n personal=personal,\n family=family,\n email=speaker['email'],\n url=speaker['absolute_url'],\n )\n\n\nclass TaskAPIClient(BaseAPIClient):\n ENDPOINT = 'api/presentation/'\n model = Task\n\n def parse(self, presentation):\n return Task(\n event=self.event,\n person=Person.objects.get_or_create(\n email=presentation['speaker']['email'],\n defaults={\n 'username': create_username('', presentation['speaker']['username']),\n 'personal': presentation['speaker']['name'].rsplit(' ', 1)[0],\n 'family': presentation['speaker']['name'].rsplit(' ', 1)[-1],\n 'url': presentation['speaker']['absolute_url'],\n }\n )[0],\n role=Role.objects.get(name='presenter'),\n title=presentation['title'],\n url=presentation['absolute_url'],\n )\n\n\nclass SponsorshipAPIClient(BaseAPIClient):\n ENDPOINT = 'api/sponsor/'\n model = Sponsorship\n\n def parse(self, sponsor):\n return Sponsorship(\n organization=Organization.objects.get_or_create(\n domain=urlparse(sponsor['external_url']).netloc,\n defaults={\n 'fullname': sponsor['name'],\n 'notes': sponsor['annotation'],\n },\n )[0],\n event=self.event,\n amount=sponsor['level']['cost'],\n contact=Person.objects.get_or_create(\n email=sponsor['contact_email'],\n defaults={\n 'username': create_username('', sponsor['contact_name']),\n 'personal': sponsor['contact_name'].rsplit(' ', 1)[0],\n 'family': sponsor['contact_name'].rsplit(' ', 1)[-1],\n },\n )[0],\n )\n", "path": "pydata/api.py"}], "after_files": [{"content": "from functools import lru_cache\nfrom json import JSONDecodeError\nfrom urllib.parse import urljoin, urlparse\n\nimport requests\nfrom django.conf import settings\nfrom django.db.models import Q\n\nfrom workshops.models import (\n Person,\n Role,\n Organization,\n Sponsorship,\n Task,\n)\nfrom workshops.util import create_username\n\n\nclass BaseAPIClient(requests.Session):\n \"\"\"\n An API client that abstracts away the work of dealing with URLs.\n Usage:\n > client = APIClient(event)\n > list(client) -> returns a list of all objects returned by the API.\n > client[23] -> returns the object with pk=23\n \"\"\"\n ROOT_ENDPOINT = 'api/'\n\n @lru_cache(maxsize=None)\n def __new__(cls, event):\n \"\"\"\n Returns an instance of APIClient.\n Throws NotImplementedError if an API does not exist at the root URL.\n \"\"\"\n try:\n r = requests.get(urljoin(event.url, cls.ROOT_ENDPOINT))\n r.raise_for_status()\n r.json()\n except (requests.exceptions.HTTPError, JSONDecodeError):\n raise NotImplementedError('Conference site does not support an API')\n return super().__new__(cls)\n\n def __init__(self, event):\n '''Populate API endpoint and set up basic authentication'''\n super().__init__()\n self.event = event\n self.endpoint = urljoin(event.url, self.ENDPOINT)\n self.auth = (\n settings.PYDATA_USERNAME_SECRET, settings.PYDATA_PASSWORD_SECRET)\n\n def __iter__(self):\n try:\n r = self.get(self.endpoint)\n r.raise_for_status()\n pydata_objs = r.json()\n except (requests.exceptions.HTTPError, JSONDecodeError) as e:\n raise IOError('Cannot fetch instances from API: {}'.format(str(e)))\n for obj in pydata_objs:\n yield self.parse(obj)\n\n def __contains__(self, pk):\n try:\n self.get(self.endpoint + str(pk)).raise_for_status()\n except requests.exceptions.HTTPError:\n return False\n else:\n return True\n\n def __getitem__(self, pk):\n if pk not in self:\n raise KeyError(\n '{} does not exist'.format(self.model._meta.verbose_name)\n )\n obj = self.get(self.endpoint + str(pk)).json()\n return self.parse(obj)\n\n\nclass PersonAPIClient(BaseAPIClient):\n ENDPOINT = 'api/speaker/'\n model = Person\n\n def parse(self, speaker):\n speaker['name'] = speaker['name'].strip()\n personal = speaker['name'].rsplit(' ', 1)[0]\n family = speaker['name'].rsplit(' ', 1)[-1]\n return Person(\n username=speaker['username'],\n personal=personal,\n family=family,\n email=speaker['email'],\n url=speaker['absolute_url'],\n )\n\n\nclass TaskAPIClient(BaseAPIClient):\n ENDPOINT = 'api/presentation/'\n model = Task\n\n def parse(self, presentation):\n return Task(\n event=self.event,\n person=Person.objects.get_or_create(\n email=presentation['speaker']['email'],\n defaults={\n 'username': create_username('', presentation['speaker']['username']),\n 'personal': presentation['speaker']['name'].rsplit(' ', 1)[0],\n 'family': presentation['speaker']['name'].rsplit(' ', 1)[-1],\n 'url': presentation['speaker']['absolute_url'],\n }\n )[0],\n role=Role.objects.get(name='presenter'),\n title=presentation['title'],\n url=presentation['absolute_url'],\n )\n\n\nclass SponsorshipAPIClient(BaseAPIClient):\n ENDPOINT = 'api/sponsor/'\n model = Sponsorship\n\n def parse(self, sponsor):\n domain = urlparse(sponsor['external_url']).netloc\n organization = Organization.objects.filter(\n Q(fullname=sponsor['name']) | Q(domain=domain)\n ).first()\n if not organization:\n organization = Organization.objects.create(\n fullname=sponsor['name'],\n domain=domain,\n notes=sponsor['annotation'],\n )\n return Sponsorship(\n organization=organization,\n event=self.event,\n amount=sponsor['level']['cost'],\n contact=Person.objects.get_or_create(\n email=sponsor['contact_email'],\n defaults={\n 'username': create_username('', sponsor['contact_name']),\n 'personal': sponsor['contact_name'].rsplit(' ', 1)[0],\n 'family': sponsor['contact_name'].rsplit(' ', 1)[-1],\n },\n )[0],\n )\n", "path": "pydata/api.py"}]}
1,665
263
gh_patches_debug_24112
rasdani/github-patches
git_diff
pytorch__ignite-153
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Complete engines.rst sphinx package reference --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ignite/engines/engine.py` Content: ``` 1 import logging 2 import time 3 from enum import Enum 4 5 from ignite._utils import _to_hours_mins_secs 6 7 8 class Events(Enum): 9 EPOCH_STARTED = "epoch_started" 10 EPOCH_COMPLETED = "epoch_completed" 11 STARTED = "started" 12 COMPLETED = "completed" 13 ITERATION_STARTED = "iteration_started" 14 ITERATION_COMPLETED = "iteration_completed" 15 EXCEPTION_RAISED = "exception_raised" 16 17 18 class State(object): 19 def __init__(self, **kwargs): 20 self.iteration = 0 21 self.output = None 22 self.batch = None 23 for k, v in kwargs.items(): 24 setattr(self, k, v) 25 26 27 class Engine(object): 28 """Runs a given process_function over each batch of a dataset, emitting events as it goes. 29 30 Args: 31 process_function (Callable): A function receiving a handle to the engine and the current batch 32 in each iteration, outputing data to be stored in the state 33 34 """ 35 def __init__(self, process_function): 36 self._event_handlers = {} 37 self._logger = logging.getLogger(__name__ + "." + self.__class__.__name__) 38 self._logger.addHandler(logging.NullHandler()) 39 self._process_function = process_function 40 self.should_terminate = False 41 self.state = None 42 43 if self._process_function is None: 44 raise ValueError("Engine must be given a processing function in order to run") 45 46 def add_event_handler(self, event_name, handler, *args, **kwargs): 47 """Add an event handler to be executed when the specified event is fired 48 49 Args: 50 event_name (Events): event from ignite.engines.Events to attach the handler to 51 handler (Callable): the callable event handler that should be invoked 52 *args: optional args to be passed to `handler` 53 **kwargs: optional keyword args to be passed to `handler` 54 55 """ 56 if event_name not in Events.__members__.values(): 57 self._logger.error("attempt to add event handler to an invalid event %s ", event_name) 58 raise ValueError("Event {} is not a valid event for this Engine".format(event_name)) 59 60 if event_name not in self._event_handlers: 61 self._event_handlers[event_name] = [] 62 63 self._event_handlers[event_name].append((handler, args, kwargs)) 64 self._logger.debug("added handler for event %s ", event_name) 65 66 def on(self, event_name, *args, **kwargs): 67 """Decorator shortcut for add_event_handler 68 69 Args: 70 event_name (Events): event to attach the handler to 71 *args: optional args to be passed to `handler` 72 **kwargs: optional keyword args to be passed to `handler` 73 74 """ 75 def decorator(f): 76 self.add_event_handler(event_name, f, *args, **kwargs) 77 return f 78 return decorator 79 80 def _fire_event(self, event_name, *event_args): 81 if event_name in self._event_handlers.keys(): 82 self._logger.debug("firing handlers for event %s ", event_name) 83 for func, args, kwargs in self._event_handlers[event_name]: 84 func(self, *(event_args + args), **kwargs) 85 86 def terminate(self): 87 """Sends terminate signal to the engine, so that it terminates after the current iteration 88 """ 89 self._logger.info("Terminate signaled. Engine will stop after current iteration is finished") 90 self.should_terminate = True 91 92 def _run_once_on_dataset(self): 93 try: 94 start_time = time.time() 95 for batch in self.state.dataloader: 96 self.state.batch = batch 97 self.state.iteration += 1 98 self._fire_event(Events.ITERATION_STARTED) 99 self.state.output = self._process_function(self, batch) 100 self._fire_event(Events.ITERATION_COMPLETED) 101 if self.should_terminate: 102 break 103 104 time_taken = time.time() - start_time 105 hours, mins, secs = _to_hours_mins_secs(time_taken) 106 return hours, mins, secs 107 except BaseException as e: 108 self._logger.error("Current run is terminating due to exception: %s", str(e)) 109 self._handle_exception(e) 110 111 def _handle_exception(self, e): 112 if Events.EXCEPTION_RAISED in self._event_handlers: 113 self._fire_event(Events.EXCEPTION_RAISED, e) 114 else: 115 raise e 116 117 def run(self, data, max_epochs=1): 118 """Runs the process_function over the passed data. 119 120 Args: 121 data (Iterable): Collection of batches allowing repeated iteration (e.g., list or DataLoader) 122 max_epochs (int, optional): max epochs to run for (default: 1) 123 124 Returns: 125 State: output state 126 """ 127 self.state = State(dataloader=data, epoch=0, max_epochs=max_epochs, metrics={}) 128 129 try: 130 self._logger.info("Training starting with max_epochs={}".format(max_epochs)) 131 start_time = time.time() 132 self._fire_event(Events.STARTED) 133 while self.state.epoch < max_epochs and not self.should_terminate: 134 self.state.epoch += 1 135 self._fire_event(Events.EPOCH_STARTED) 136 hours, mins, secs = self._run_once_on_dataset() 137 self._logger.info("Epoch[%s] Complete. Time taken: %02d:%02d:%02d", self.state.epoch, hours, mins, secs) 138 if self.should_terminate: 139 break 140 self._fire_event(Events.EPOCH_COMPLETED) 141 142 self._fire_event(Events.COMPLETED) 143 time_taken = time.time() - start_time 144 hours, mins, secs = _to_hours_mins_secs(time_taken) 145 self._logger.info("Training complete. Time taken %02d:%02d:%02d" % (hours, mins, secs)) 146 147 except BaseException as e: 148 self._logger.error("Training is terminating due to exception: %s", str(e)) 149 self._handle_exception(e) 150 151 return self.state 152 ``` Path: `docs/source/conf.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # 3 # Configuration file for the Sphinx documentation builder. 4 # 5 # This file does only contain a selection of the most common options. For a 6 # full list see the documentation: 7 # http://www.sphinx-doc.org/en/stable/config 8 9 # -- Path setup -------------------------------------------------------------- 10 11 # If extensions (or modules to document with autodoc) are in another directory, 12 # add these directories to sys.path here. If the directory is relative to the 13 # documentation root, use os.path.abspath to make it absolute, like shown here. 14 # 15 # import os 16 # import sys 17 # sys.path.insert(0, os.path.abspath('.')) 18 import ignite 19 import sphinx_rtd_theme 20 21 # -- Project information ----------------------------------------------------- 22 23 project = 'ignite' 24 copyright = '2018, Torch Contributors' 25 author = 'Torch Contributors' 26 27 # The short X.Y version 28 version = 'master (' + ignite.__version__ + ' )' 29 # The full version, including alpha/beta/rc tags 30 release = 'master' 31 32 33 # -- General configuration --------------------------------------------------- 34 35 # If your documentation needs a minimal Sphinx version, state it here. 36 # 37 # needs_sphinx = '1.0' 38 39 # Add any Sphinx extension module names here, as strings. They can be 40 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 41 # ones. 42 extensions = [ 43 'sphinx.ext.autodoc', 44 'sphinx.ext.doctest', 45 'sphinx.ext.intersphinx', 46 'sphinx.ext.todo', 47 'sphinx.ext.mathjax', 48 'sphinx.ext.viewcode', 49 ] 50 51 # Add any paths that contain templates here, relative to this directory. 52 templates_path = ['_templates'] 53 54 # The suffix(es) of source filenames. 55 # You can specify multiple suffix as a list of string: 56 # 57 # source_suffix = ['.rst', '.md'] 58 source_suffix = '.rst' 59 60 # The master toctree document. 61 master_doc = 'index' 62 63 # The language for content autogenerated by Sphinx. Refer to documentation 64 # for a list of supported languages. 65 # 66 # This is also used if you do content translation via gettext catalogs. 67 # Usually you set "language" from the command line for these cases. 68 language = None 69 70 # List of patterns, relative to source directory, that match files and 71 # directories to ignore when looking for source files. 72 # This pattern also affects html_static_path and html_extra_path . 73 exclude_patterns = [] 74 75 # The name of the Pygments (syntax highlighting) style to use. 76 pygments_style = 'sphinx' 77 78 79 # -- Options for HTML output ------------------------------------------------- 80 81 # The theme to use for HTML and HTML Help pages. See the documentation for 82 # a list of builtin themes. 83 # 84 html_theme = 'sphinx_rtd_theme' 85 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] 86 87 html_theme_options = { 88 'collapse_navigation': False, 89 'display_version': True, 90 'logo_only': True, 91 } 92 93 html_logo = '_static/img/pytorch-logo-dark.svg' 94 95 # Theme options are theme-specific and customize the look and feel of a theme 96 # further. For a list of options available for each theme, see the 97 # documentation. 98 # 99 # html_theme_options = {} 100 101 # Add any paths that contain custom static files (such as style sheets) here, 102 # relative to this directory. They are copied after the builtin static files, 103 # so a file named "default.css" will overwrite the builtin "default.css". 104 html_static_path = ['_static'] 105 106 html_context = { 107 'css_files': [ 108 'https://fonts.googleapis.com/css?family=Lato', 109 '_static/css/pytorch_theme.css' 110 ], 111 } 112 113 114 # -- Options for HTMLHelp output --------------------------------------------- 115 116 # Output file base name for HTML help builder. 117 htmlhelp_basename = 'ignitedoc' 118 119 120 # -- Options for LaTeX output ------------------------------------------------ 121 122 latex_elements = { 123 # The paper size ('letterpaper' or 'a4paper'). 124 # 125 # 'papersize': 'letterpaper', 126 127 # The font size ('10pt', '11pt' or '12pt'). 128 # 129 # 'pointsize': '10pt', 130 131 # Additional stuff for the LaTeX preamble. 132 # 133 # 'preamble': '', 134 135 # Latex figure (float) alignment 136 # 137 # 'figure_align': 'htbp', 138 } 139 140 # Grouping the document tree into LaTeX files. List of tuples 141 # (source start file, target name, title, 142 # author, documentclass [howto, manual, or own class]). 143 latex_documents = [ 144 (master_doc, 'ignite.tex', 'ignite Documentation', 145 'Torch Contributors', 'manual'), 146 ] 147 148 149 # -- Options for manual page output ------------------------------------------ 150 151 # One entry per manual page. List of tuples 152 # (source start file, name, description, authors, manual section). 153 man_pages = [ 154 (master_doc, 'ignite', 'ignite Documentation', 155 [author], 1) 156 ] 157 158 159 # -- Options for Texinfo output ---------------------------------------------- 160 161 # Grouping the document tree into Texinfo files. List of tuples 162 # (source start file, target name, title, author, 163 # dir menu entry, description, category) 164 texinfo_documents = [ 165 (master_doc, 'ignite', 'ignite Documentation', 166 author, 'ignite', 'One line description of project.', 167 'Miscellaneous'), 168 ] 169 170 171 # -- Extension configuration ------------------------------------------------- 172 173 # -- Options for intersphinx extension --------------------------------------- 174 175 # Example configuration for intersphinx: refer to the Python standard library. 176 intersphinx_mapping = {'https://docs.python.org/': None} 177 178 # -- Options for todo extension ---------------------------------------------- 179 180 # If true, `todo` and `todoList` produce output, else they produce nothing. 181 todo_include_todos = True 182 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docs/source/conf.py b/docs/source/conf.py --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -40,12 +40,14 @@ # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ - 'sphinx.ext.autodoc', + 'sphinx.ext.autosummary', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', + 'sphinx.ext.coverage', 'sphinx.ext.mathjax', - 'sphinx.ext.viewcode', + 'sphinx.ext.napoleon', + 'sphinx.ext.viewcode' ] # Add any paths that contain templates here, relative to this directory. diff --git a/ignite/engines/engine.py b/ignite/engines/engine.py --- a/ignite/engines/engine.py +++ b/ignite/engines/engine.py @@ -6,6 +6,7 @@ class Events(Enum): + """Events that are fired by the :class:`ignite.engines.Engine` during execution""" EPOCH_STARTED = "epoch_started" EPOCH_COMPLETED = "epoch_completed" STARTED = "started" @@ -16,6 +17,7 @@ class State(object): + """An object that is used to pass internal and user-defined state between event handlers""" def __init__(self, **kwargs): self.iteration = 0 self.output = None
{"golden_diff": "diff --git a/docs/source/conf.py b/docs/source/conf.py\n--- a/docs/source/conf.py\n+++ b/docs/source/conf.py\n@@ -40,12 +40,14 @@\n # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n # ones.\n extensions = [\n- 'sphinx.ext.autodoc',\n+ 'sphinx.ext.autosummary',\n 'sphinx.ext.doctest',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.todo',\n+ 'sphinx.ext.coverage',\n 'sphinx.ext.mathjax',\n- 'sphinx.ext.viewcode',\n+ 'sphinx.ext.napoleon',\n+ 'sphinx.ext.viewcode'\n ]\n \n # Add any paths that contain templates here, relative to this directory.\ndiff --git a/ignite/engines/engine.py b/ignite/engines/engine.py\n--- a/ignite/engines/engine.py\n+++ b/ignite/engines/engine.py\n@@ -6,6 +6,7 @@\n \n \n class Events(Enum):\n+ \"\"\"Events that are fired by the :class:`ignite.engines.Engine` during execution\"\"\"\n EPOCH_STARTED = \"epoch_started\"\n EPOCH_COMPLETED = \"epoch_completed\"\n STARTED = \"started\"\n@@ -16,6 +17,7 @@\n \n \n class State(object):\n+ \"\"\"An object that is used to pass internal and user-defined state between event handlers\"\"\"\n def __init__(self, **kwargs):\n self.iteration = 0\n self.output = None\n", "issue": "Complete engines.rst sphinx package reference\n\n", "before_files": [{"content": "import logging\nimport time\nfrom enum import Enum\n\nfrom ignite._utils import _to_hours_mins_secs\n\n\nclass Events(Enum):\n EPOCH_STARTED = \"epoch_started\"\n EPOCH_COMPLETED = \"epoch_completed\"\n STARTED = \"started\"\n COMPLETED = \"completed\"\n ITERATION_STARTED = \"iteration_started\"\n ITERATION_COMPLETED = \"iteration_completed\"\n EXCEPTION_RAISED = \"exception_raised\"\n\n\nclass State(object):\n def __init__(self, **kwargs):\n self.iteration = 0\n self.output = None\n self.batch = None\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n\nclass Engine(object):\n \"\"\"Runs a given process_function over each batch of a dataset, emitting events as it goes.\n\n Args:\n process_function (Callable): A function receiving a handle to the engine and the current batch\n in each iteration, outputing data to be stored in the state\n\n \"\"\"\n def __init__(self, process_function):\n self._event_handlers = {}\n self._logger = logging.getLogger(__name__ + \".\" + self.__class__.__name__)\n self._logger.addHandler(logging.NullHandler())\n self._process_function = process_function\n self.should_terminate = False\n self.state = None\n\n if self._process_function is None:\n raise ValueError(\"Engine must be given a processing function in order to run\")\n\n def add_event_handler(self, event_name, handler, *args, **kwargs):\n \"\"\"Add an event handler to be executed when the specified event is fired\n\n Args:\n event_name (Events): event from ignite.engines.Events to attach the handler to\n handler (Callable): the callable event handler that should be invoked\n *args: optional args to be passed to `handler`\n **kwargs: optional keyword args to be passed to `handler`\n\n \"\"\"\n if event_name not in Events.__members__.values():\n self._logger.error(\"attempt to add event handler to an invalid event %s \", event_name)\n raise ValueError(\"Event {} is not a valid event for this Engine\".format(event_name))\n\n if event_name not in self._event_handlers:\n self._event_handlers[event_name] = []\n\n self._event_handlers[event_name].append((handler, args, kwargs))\n self._logger.debug(\"added handler for event %s \", event_name)\n\n def on(self, event_name, *args, **kwargs):\n \"\"\"Decorator shortcut for add_event_handler\n\n Args:\n event_name (Events): event to attach the handler to\n *args: optional args to be passed to `handler`\n **kwargs: optional keyword args to be passed to `handler`\n\n \"\"\"\n def decorator(f):\n self.add_event_handler(event_name, f, *args, **kwargs)\n return f\n return decorator\n\n def _fire_event(self, event_name, *event_args):\n if event_name in self._event_handlers.keys():\n self._logger.debug(\"firing handlers for event %s \", event_name)\n for func, args, kwargs in self._event_handlers[event_name]:\n func(self, *(event_args + args), **kwargs)\n\n def terminate(self):\n \"\"\"Sends terminate signal to the engine, so that it terminates after the current iteration\n \"\"\"\n self._logger.info(\"Terminate signaled. Engine will stop after current iteration is finished\")\n self.should_terminate = True\n\n def _run_once_on_dataset(self):\n try:\n start_time = time.time()\n for batch in self.state.dataloader:\n self.state.batch = batch\n self.state.iteration += 1\n self._fire_event(Events.ITERATION_STARTED)\n self.state.output = self._process_function(self, batch)\n self._fire_event(Events.ITERATION_COMPLETED)\n if self.should_terminate:\n break\n\n time_taken = time.time() - start_time\n hours, mins, secs = _to_hours_mins_secs(time_taken)\n return hours, mins, secs\n except BaseException as e:\n self._logger.error(\"Current run is terminating due to exception: %s\", str(e))\n self._handle_exception(e)\n\n def _handle_exception(self, e):\n if Events.EXCEPTION_RAISED in self._event_handlers:\n self._fire_event(Events.EXCEPTION_RAISED, e)\n else:\n raise e\n\n def run(self, data, max_epochs=1):\n \"\"\"Runs the process_function over the passed data.\n\n Args:\n data (Iterable): Collection of batches allowing repeated iteration (e.g., list or DataLoader)\n max_epochs (int, optional): max epochs to run for (default: 1)\n\n Returns:\n State: output state\n \"\"\"\n self.state = State(dataloader=data, epoch=0, max_epochs=max_epochs, metrics={})\n\n try:\n self._logger.info(\"Training starting with max_epochs={}\".format(max_epochs))\n start_time = time.time()\n self._fire_event(Events.STARTED)\n while self.state.epoch < max_epochs and not self.should_terminate:\n self.state.epoch += 1\n self._fire_event(Events.EPOCH_STARTED)\n hours, mins, secs = self._run_once_on_dataset()\n self._logger.info(\"Epoch[%s] Complete. Time taken: %02d:%02d:%02d\", self.state.epoch, hours, mins, secs)\n if self.should_terminate:\n break\n self._fire_event(Events.EPOCH_COMPLETED)\n\n self._fire_event(Events.COMPLETED)\n time_taken = time.time() - start_time\n hours, mins, secs = _to_hours_mins_secs(time_taken)\n self._logger.info(\"Training complete. Time taken %02d:%02d:%02d\" % (hours, mins, secs))\n\n except BaseException as e:\n self._logger.error(\"Training is terminating due to exception: %s\", str(e))\n self._handle_exception(e)\n\n return self.state\n", "path": "ignite/engines/engine.py"}, {"content": "# -*- coding: utf-8 -*-\n#\n# Configuration file for the Sphinx documentation builder.\n#\n# This file does only contain a selection of the most common options. For a\n# full list see the documentation:\n# http://www.sphinx-doc.org/en/stable/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\nimport ignite\nimport sphinx_rtd_theme\n\n# -- Project information -----------------------------------------------------\n\nproject = 'ignite'\ncopyright = '2018, Torch Contributors'\nauthor = 'Torch Contributors'\n\n# The short X.Y version\nversion = 'master (' + ignite.__version__ + ' )'\n# The full version, including alpha/beta/rc tags\nrelease = 'master'\n\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.doctest',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.todo',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.viewcode',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path .\nexclude_patterns = []\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'sphinx_rtd_theme'\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\nhtml_theme_options = {\n 'collapse_navigation': False,\n 'display_version': True,\n 'logo_only': True,\n}\n\nhtml_logo = '_static/img/pytorch-logo-dark.svg'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\n# html_theme_options = {}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\nhtml_context = {\n 'css_files': [\n 'https://fonts.googleapis.com/css?family=Lato',\n '_static/css/pytorch_theme.css'\n ],\n}\n\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'ignitedoc'\n\n\n# -- Options for LaTeX output ------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'ignite.tex', 'ignite Documentation',\n 'Torch Contributors', 'manual'),\n]\n\n\n# -- Options for manual page output ------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'ignite', 'ignite Documentation',\n [author], 1)\n]\n\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'ignite', 'ignite Documentation',\n author, 'ignite', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n\n# -- Extension configuration -------------------------------------------------\n\n# -- Options for intersphinx extension ---------------------------------------\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {'https://docs.python.org/': None}\n\n# -- Options for todo extension ----------------------------------------------\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n", "path": "docs/source/conf.py"}], "after_files": [{"content": "import logging\nimport time\nfrom enum import Enum\n\nfrom ignite._utils import _to_hours_mins_secs\n\n\nclass Events(Enum):\n \"\"\"Events that are fired by the :class:`ignite.engines.Engine` during execution\"\"\"\n EPOCH_STARTED = \"epoch_started\"\n EPOCH_COMPLETED = \"epoch_completed\"\n STARTED = \"started\"\n COMPLETED = \"completed\"\n ITERATION_STARTED = \"iteration_started\"\n ITERATION_COMPLETED = \"iteration_completed\"\n EXCEPTION_RAISED = \"exception_raised\"\n\n\nclass State(object):\n \"\"\"An object that is used to pass internal and user-defined state between event handlers\"\"\"\n def __init__(self, **kwargs):\n self.iteration = 0\n self.output = None\n self.batch = None\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n\nclass Engine(object):\n \"\"\"Runs a given process_function over each batch of a dataset, emitting events as it goes.\n\n Args:\n process_function (Callable): A function receiving a handle to the engine and the current batch\n in each iteration, outputing data to be stored in the state\n\n \"\"\"\n def __init__(self, process_function):\n self._event_handlers = {}\n self._logger = logging.getLogger(__name__ + \".\" + self.__class__.__name__)\n self._logger.addHandler(logging.NullHandler())\n self._process_function = process_function\n self.should_terminate = False\n self.state = None\n\n if self._process_function is None:\n raise ValueError(\"Engine must be given a processing function in order to run\")\n\n def add_event_handler(self, event_name, handler, *args, **kwargs):\n \"\"\"Add an event handler to be executed when the specified event is fired\n\n Args:\n event_name (Events): event from ignite.engines.Events to attach the handler to\n handler (Callable): the callable event handler that should be invoked\n *args: optional args to be passed to `handler`\n **kwargs: optional keyword args to be passed to `handler`\n\n \"\"\"\n if event_name not in Events.__members__.values():\n self._logger.error(\"attempt to add event handler to an invalid event %s \", event_name)\n raise ValueError(\"Event {} is not a valid event for this Engine\".format(event_name))\n\n if event_name not in self._event_handlers:\n self._event_handlers[event_name] = []\n\n self._event_handlers[event_name].append((handler, args, kwargs))\n self._logger.debug(\"added handler for event %s \", event_name)\n\n def on(self, event_name, *args, **kwargs):\n \"\"\"Decorator shortcut for add_event_handler\n\n Args:\n event_name (Events): event to attach the handler to\n *args: optional args to be passed to `handler`\n **kwargs: optional keyword args to be passed to `handler`\n\n \"\"\"\n def decorator(f):\n self.add_event_handler(event_name, f, *args, **kwargs)\n return f\n return decorator\n\n def _fire_event(self, event_name, *event_args):\n if event_name in self._event_handlers.keys():\n self._logger.debug(\"firing handlers for event %s \", event_name)\n for func, args, kwargs in self._event_handlers[event_name]:\n func(self, *(event_args + args), **kwargs)\n\n def terminate(self):\n \"\"\"Sends terminate signal to the engine, so that it terminates after the current iteration\n \"\"\"\n self._logger.info(\"Terminate signaled. Engine will stop after current iteration is finished\")\n self.should_terminate = True\n\n def _run_once_on_dataset(self):\n try:\n start_time = time.time()\n for batch in self.state.dataloader:\n self.state.batch = batch\n self.state.iteration += 1\n self._fire_event(Events.ITERATION_STARTED)\n self.state.output = self._process_function(self, batch)\n self._fire_event(Events.ITERATION_COMPLETED)\n if self.should_terminate:\n break\n\n time_taken = time.time() - start_time\n hours, mins, secs = _to_hours_mins_secs(time_taken)\n return hours, mins, secs\n except BaseException as e:\n self._logger.error(\"Current run is terminating due to exception: %s\", str(e))\n self._handle_exception(e)\n\n def _handle_exception(self, e):\n if Events.EXCEPTION_RAISED in self._event_handlers:\n self._fire_event(Events.EXCEPTION_RAISED, e)\n else:\n raise e\n\n def run(self, data, max_epochs=1):\n \"\"\"Runs the process_function over the passed data.\n\n Args:\n data (Iterable): Collection of batches allowing repeated iteration (e.g., list or DataLoader)\n max_epochs (int, optional): max epochs to run for (default: 1)\n\n Returns:\n State: output state\n \"\"\"\n self.state = State(dataloader=data, epoch=0, max_epochs=max_epochs, metrics={})\n\n try:\n self._logger.info(\"Training starting with max_epochs={}\".format(max_epochs))\n start_time = time.time()\n self._fire_event(Events.STARTED)\n while self.state.epoch < max_epochs and not self.should_terminate:\n self.state.epoch += 1\n self._fire_event(Events.EPOCH_STARTED)\n hours, mins, secs = self._run_once_on_dataset()\n self._logger.info(\"Epoch[%s] Complete. Time taken: %02d:%02d:%02d\", self.state.epoch, hours, mins, secs)\n if self.should_terminate:\n break\n self._fire_event(Events.EPOCH_COMPLETED)\n\n self._fire_event(Events.COMPLETED)\n time_taken = time.time() - start_time\n hours, mins, secs = _to_hours_mins_secs(time_taken)\n self._logger.info(\"Training complete. Time taken %02d:%02d:%02d\" % (hours, mins, secs))\n\n except BaseException as e:\n self._logger.error(\"Training is terminating due to exception: %s\", str(e))\n self._handle_exception(e)\n\n return self.state\n", "path": "ignite/engines/engine.py"}, {"content": "# -*- coding: utf-8 -*-\n#\n# Configuration file for the Sphinx documentation builder.\n#\n# This file does only contain a selection of the most common options. For a\n# full list see the documentation:\n# http://www.sphinx-doc.org/en/stable/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\nimport ignite\nimport sphinx_rtd_theme\n\n# -- Project information -----------------------------------------------------\n\nproject = 'ignite'\ncopyright = '2018, Torch Contributors'\nauthor = 'Torch Contributors'\n\n# The short X.Y version\nversion = 'master (' + ignite.__version__ + ' )'\n# The full version, including alpha/beta/rc tags\nrelease = 'master'\n\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autosummary',\n 'sphinx.ext.doctest',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.todo',\n 'sphinx.ext.coverage',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.viewcode'\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path .\nexclude_patterns = []\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'sphinx_rtd_theme'\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\nhtml_theme_options = {\n 'collapse_navigation': False,\n 'display_version': True,\n 'logo_only': True,\n}\n\nhtml_logo = '_static/img/pytorch-logo-dark.svg'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\n# html_theme_options = {}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\nhtml_context = {\n 'css_files': [\n 'https://fonts.googleapis.com/css?family=Lato',\n '_static/css/pytorch_theme.css'\n ],\n}\n\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'ignitedoc'\n\n\n# -- Options for LaTeX output ------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'ignite.tex', 'ignite Documentation',\n 'Torch Contributors', 'manual'),\n]\n\n\n# -- Options for manual page output ------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'ignite', 'ignite Documentation',\n [author], 1)\n]\n\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'ignite', 'ignite Documentation',\n author, 'ignite', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n\n# -- Extension configuration -------------------------------------------------\n\n# -- Options for intersphinx extension ---------------------------------------\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {'https://docs.python.org/': None}\n\n# -- Options for todo extension ----------------------------------------------\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n", "path": "docs/source/conf.py"}]}
3,598
331
gh_patches_debug_23049
rasdani/github-patches
git_diff
StackStorm__st2-5775
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add query type to linux.dig action ## SUMMARY I would like the ability to query TXT records and noticed there is no way to specify a query type to the dig action. ### STACKSTORM VERSION `st2 3.6.0, on Python 3.6.8` ## Steps to reproduce the problem I attempted a few ways to add "TXT" to the query by adding to queryopts or try appending to the string hostname. Upon looking at the code I realized nothing like that would work. ## Expected Results Get a list returned of TXT records ## Some sample code to add it ``` class DigAction(Action): def run(self, rand, count, nameserver, hostname, queryopts, querytype): # Add querytype parameter opt_list = [] output = [] cmd_args = ["dig"] if nameserver: nameserver = "@" + nameserver cmd_args.append(nameserver) if isinstance(queryopts, str) and "," in queryopts: opt_list = queryopts.split(",") else: opt_list.append(queryopts) cmd_args.extend(["+" + option for option in opt_list]) cmd_args.append(hostname) cmd_args.append(querytype) # append query type (Default is set to "A" in dig.yaml) try: raw_result = subprocess.Popen( cmd_args, stderr=subprocess.PIPE, stdout=subprocess.PIPE ).communicate()[0] if sys.version_info >= (3,): # This function might call getpreferred encoding unless we pass # do_setlocale=False. encoding = locale.getpreferredencoding(do_setlocale=False) result_list_str = raw_result.decode(encoding) else: result_list_str = str(raw_result) if querytype.lower() == "txt": # improve the output formatting result of TXT records result_list_str = result_list_str.replace('"', '') # strip quotes so we don't see \" wrapped around output result_list = list(filter(None, result_list_str.split("\n"))) ``` I only spent a few minutes on this code to test making it work for me. It could be improved on to make sure works for other types as well. I added inline comments to show the only lines I added --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `contrib/linux/actions/dig.py` Content: ``` 1 #! /usr/bin/python 2 3 # Copyright 2020 The StackStorm Authors. 4 # Copyright 2019 Extreme Networks, Inc. 5 # 6 # Licensed under the Apache License, Version 2.0 (the "License"); 7 # you may not use this file except in compliance with the License. 8 # You may obtain a copy of the License at 9 # 10 # http://www.apache.org/licenses/LICENSE-2.0 11 # 12 # Unless required by applicable law or agreed to in writing, software 13 # distributed under the License is distributed on an "AS IS" BASIS, 14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 # See the License for the specific language governing permissions and 16 # limitations under the License. 17 18 import errno 19 import locale 20 import subprocess 21 import random 22 import sys 23 24 from st2common.runners.base_action import Action 25 26 27 class DigAction(Action): 28 def run(self, rand, count, nameserver, hostname, queryopts): 29 opt_list = [] 30 output = [] 31 32 cmd_args = ["dig"] 33 if nameserver: 34 nameserver = "@" + nameserver 35 cmd_args.append(nameserver) 36 37 if isinstance(queryopts, str) and "," in queryopts: 38 opt_list = queryopts.split(",") 39 else: 40 opt_list.append(queryopts) 41 42 cmd_args.extend(["+" + option for option in opt_list]) 43 44 cmd_args.append(hostname) 45 46 try: 47 raw_result = subprocess.Popen( 48 cmd_args, stderr=subprocess.PIPE, stdout=subprocess.PIPE 49 ).communicate()[0] 50 51 if sys.version_info >= (3,): 52 # This function might call getpreferred encoding unless we pass 53 # do_setlocale=False. 54 encoding = locale.getpreferredencoding(do_setlocale=False) 55 result_list_str = raw_result.decode(encoding) 56 else: 57 result_list_str = str(raw_result) 58 59 result_list = list(filter(None, result_list_str.split("\n"))) 60 61 # NOTE: Python3 supports the FileNotFoundError, the errono.ENOENT is for py2 compat 62 # for Python3: 63 # except FileNotFoundError as e: 64 except OSError as e: 65 if e.errno == errno.ENOENT: 66 return ( 67 False, 68 "Can't find dig installed in the path (usually /usr/bin/dig). If " 69 "dig isn't installed, you can install it with 'sudo yum install " 70 "bind-utils' or 'sudo apt install dnsutils'", 71 ) 72 else: 73 raise e 74 75 if int(count) > len(result_list) or count <= 0: 76 count = len(result_list) 77 78 output = result_list[0:count] 79 if rand is True: 80 random.shuffle(output) 81 return output 82 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/contrib/linux/actions/dig.py b/contrib/linux/actions/dig.py --- a/contrib/linux/actions/dig.py +++ b/contrib/linux/actions/dig.py @@ -25,7 +25,7 @@ class DigAction(Action): - def run(self, rand, count, nameserver, hostname, queryopts): + def run(self, rand, count, nameserver, hostname, queryopts, querytype): opt_list = [] output = [] @@ -42,6 +42,7 @@ cmd_args.extend(["+" + option for option in opt_list]) cmd_args.append(hostname) + cmd_args.append(querytype) try: raw_result = subprocess.Popen( @@ -56,6 +57,10 @@ else: result_list_str = str(raw_result) + # Better format the output when the type is TXT + if querytype.lower() == "txt": + result_list_str = result_list_str.replace('"', "") + result_list = list(filter(None, result_list_str.split("\n"))) # NOTE: Python3 supports the FileNotFoundError, the errono.ENOENT is for py2 compat
{"golden_diff": "diff --git a/contrib/linux/actions/dig.py b/contrib/linux/actions/dig.py\n--- a/contrib/linux/actions/dig.py\n+++ b/contrib/linux/actions/dig.py\n@@ -25,7 +25,7 @@\n \n \n class DigAction(Action):\n- def run(self, rand, count, nameserver, hostname, queryopts):\n+ def run(self, rand, count, nameserver, hostname, queryopts, querytype):\n opt_list = []\n output = []\n \n@@ -42,6 +42,7 @@\n cmd_args.extend([\"+\" + option for option in opt_list])\n \n cmd_args.append(hostname)\n+ cmd_args.append(querytype)\n \n try:\n raw_result = subprocess.Popen(\n@@ -56,6 +57,10 @@\n else:\n result_list_str = str(raw_result)\n \n+ # Better format the output when the type is TXT\n+ if querytype.lower() == \"txt\":\n+ result_list_str = result_list_str.replace('\"', \"\")\n+\n result_list = list(filter(None, result_list_str.split(\"\\n\")))\n \n # NOTE: Python3 supports the FileNotFoundError, the errono.ENOENT is for py2 compat\n", "issue": "Add query type to linux.dig action\n## SUMMARY\r\n\r\nI would like the ability to query TXT records and noticed there is no way to specify a query type to the dig action. \r\n\r\n### STACKSTORM VERSION\r\n\r\n`st2 3.6.0, on Python 3.6.8`\r\n\r\n## Steps to reproduce the problem\r\n\r\nI attempted a few ways to add \"TXT\" to the query by adding to queryopts or try appending to the string hostname. Upon looking at the code I realized nothing like that would work.\r\n\r\n## Expected Results\r\n\r\nGet a list returned of TXT records\r\n\r\n## Some sample code to add it\r\n\r\n```\r\nclass DigAction(Action):\r\n def run(self, rand, count, nameserver, hostname, queryopts, querytype): # Add querytype parameter\r\n opt_list = []\r\n output = []\r\n\r\n cmd_args = [\"dig\"]\r\n if nameserver:\r\n nameserver = \"@\" + nameserver\r\n cmd_args.append(nameserver)\r\n\r\n if isinstance(queryopts, str) and \",\" in queryopts:\r\n opt_list = queryopts.split(\",\")\r\n else:\r\n opt_list.append(queryopts)\r\n\r\n cmd_args.extend([\"+\" + option for option in opt_list])\r\n\r\n cmd_args.append(hostname)\r\n cmd_args.append(querytype) # append query type (Default is set to \"A\" in dig.yaml)\r\n\r\n try:\r\n raw_result = subprocess.Popen(\r\n cmd_args, stderr=subprocess.PIPE, stdout=subprocess.PIPE\r\n ).communicate()[0]\r\n\r\n if sys.version_info >= (3,):\r\n # This function might call getpreferred encoding unless we pass\r\n # do_setlocale=False.\r\n encoding = locale.getpreferredencoding(do_setlocale=False)\r\n result_list_str = raw_result.decode(encoding)\r\n else:\r\n result_list_str = str(raw_result)\r\n\r\n if querytype.lower() == \"txt\": # improve the output formatting result of TXT records\r\n result_list_str = result_list_str.replace('\"', '') # strip quotes so we don't see \\\" wrapped around output\r\n result_list = list(filter(None, result_list_str.split(\"\\n\")))\r\n```\r\n\r\nI only spent a few minutes on this code to test making it work for me. It could be improved on to make sure works for other types as well. I added inline comments to show the only lines I added\n", "before_files": [{"content": "#! /usr/bin/python\n\n# Copyright 2020 The StackStorm Authors.\n# Copyright 2019 Extreme Networks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport errno\nimport locale\nimport subprocess\nimport random\nimport sys\n\nfrom st2common.runners.base_action import Action\n\n\nclass DigAction(Action):\n def run(self, rand, count, nameserver, hostname, queryopts):\n opt_list = []\n output = []\n\n cmd_args = [\"dig\"]\n if nameserver:\n nameserver = \"@\" + nameserver\n cmd_args.append(nameserver)\n\n if isinstance(queryopts, str) and \",\" in queryopts:\n opt_list = queryopts.split(\",\")\n else:\n opt_list.append(queryopts)\n\n cmd_args.extend([\"+\" + option for option in opt_list])\n\n cmd_args.append(hostname)\n\n try:\n raw_result = subprocess.Popen(\n cmd_args, stderr=subprocess.PIPE, stdout=subprocess.PIPE\n ).communicate()[0]\n\n if sys.version_info >= (3,):\n # This function might call getpreferred encoding unless we pass\n # do_setlocale=False.\n encoding = locale.getpreferredencoding(do_setlocale=False)\n result_list_str = raw_result.decode(encoding)\n else:\n result_list_str = str(raw_result)\n\n result_list = list(filter(None, result_list_str.split(\"\\n\")))\n\n # NOTE: Python3 supports the FileNotFoundError, the errono.ENOENT is for py2 compat\n # for Python3:\n # except FileNotFoundError as e:\n except OSError as e:\n if e.errno == errno.ENOENT:\n return (\n False,\n \"Can't find dig installed in the path (usually /usr/bin/dig). If \"\n \"dig isn't installed, you can install it with 'sudo yum install \"\n \"bind-utils' or 'sudo apt install dnsutils'\",\n )\n else:\n raise e\n\n if int(count) > len(result_list) or count <= 0:\n count = len(result_list)\n\n output = result_list[0:count]\n if rand is True:\n random.shuffle(output)\n return output\n", "path": "contrib/linux/actions/dig.py"}], "after_files": [{"content": "#! /usr/bin/python\n\n# Copyright 2020 The StackStorm Authors.\n# Copyright 2019 Extreme Networks, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport errno\nimport locale\nimport subprocess\nimport random\nimport sys\n\nfrom st2common.runners.base_action import Action\n\n\nclass DigAction(Action):\n def run(self, rand, count, nameserver, hostname, queryopts, querytype):\n opt_list = []\n output = []\n\n cmd_args = [\"dig\"]\n if nameserver:\n nameserver = \"@\" + nameserver\n cmd_args.append(nameserver)\n\n if isinstance(queryopts, str) and \",\" in queryopts:\n opt_list = queryopts.split(\",\")\n else:\n opt_list.append(queryopts)\n\n cmd_args.extend([\"+\" + option for option in opt_list])\n\n cmd_args.append(hostname)\n cmd_args.append(querytype)\n\n try:\n raw_result = subprocess.Popen(\n cmd_args, stderr=subprocess.PIPE, stdout=subprocess.PIPE\n ).communicate()[0]\n\n if sys.version_info >= (3,):\n # This function might call getpreferred encoding unless we pass\n # do_setlocale=False.\n encoding = locale.getpreferredencoding(do_setlocale=False)\n result_list_str = raw_result.decode(encoding)\n else:\n result_list_str = str(raw_result)\n\n # Better format the output when the type is TXT\n if querytype.lower() == \"txt\":\n result_list_str = result_list_str.replace('\"', \"\")\n\n result_list = list(filter(None, result_list_str.split(\"\\n\")))\n\n # NOTE: Python3 supports the FileNotFoundError, the errono.ENOENT is for py2 compat\n # for Python3:\n # except FileNotFoundError as e:\n except OSError as e:\n if e.errno == errno.ENOENT:\n return (\n False,\n \"Can't find dig installed in the path (usually /usr/bin/dig). If \"\n \"dig isn't installed, you can install it with 'sudo yum install \"\n \"bind-utils' or 'sudo apt install dnsutils'\",\n )\n else:\n raise e\n\n if int(count) > len(result_list) or count <= 0:\n count = len(result_list)\n\n output = result_list[0:count]\n if rand is True:\n random.shuffle(output)\n return output\n", "path": "contrib/linux/actions/dig.py"}]}
1,468
261
gh_patches_debug_35549
rasdani/github-patches
git_diff
pytorch__ignite-882
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add helper decorators to use event handler or method with specified rank ## 🚀 Feature Idea is to simplify the following code: ```python import torch.distributed as dist if dist.get_rank() == 0: @trainer.on(...) def some_handler(_): ... tb_logger = TensorboardLogger(log_dir="experiments/tb_logs") ``` into ```python from ignite.utils import one_rank_only @trainer.on(...) @one_rank_only # equivalent @one_rank_only(rank=0) def some_handler(_): ... # implicitly work only on rank 0 tb_logger = TensorboardLogger(log_dir="experiments/tb_logs") ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `examples/contrib/mnist/mnist_with_neptune_logger.py` Content: ``` 1 """ 2 MNIST example with training and validation monitoring using Neptune. 3 4 Requirements: 5 Neptune: `pip install neptune-client` 6 7 Usage: 8 9 Run the example: 10 ```bash 11 python mnist_with_neptune_logger.py 12 ``` 13 14 Go to https://neptune.ai and explore your experiment. 15 16 Note: 17 You can see an example experiment here: 18 https://ui.neptune.ai/o/shared/org/pytorch-ignite-integration/e/PYTOR-26/charts 19 """ 20 import sys 21 from argparse import ArgumentParser 22 import logging 23 24 import torch 25 from torch.utils.data import DataLoader 26 from torch import nn 27 import torch.nn.functional as F 28 from torch.optim import SGD 29 from torchvision.datasets import MNIST 30 from torchvision.transforms import Compose, ToTensor, Normalize 31 32 from ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator 33 from ignite.metrics import Accuracy, Loss 34 from ignite.handlers import Checkpoint 35 36 from ignite.contrib.handlers.neptune_logger import * 37 38 LOG_INTERVAL = 10 39 40 41 class Net(nn.Module): 42 def __init__(self): 43 super(Net, self).__init__() 44 self.conv1 = nn.Conv2d(1, 10, kernel_size=5) 45 self.conv2 = nn.Conv2d(10, 20, kernel_size=5) 46 self.conv2_drop = nn.Dropout2d() 47 self.fc1 = nn.Linear(320, 50) 48 self.fc2 = nn.Linear(50, 10) 49 50 def forward(self, x): 51 x = F.relu(F.max_pool2d(self.conv1(x), 2)) 52 x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) 53 x = x.view(-1, 320) 54 x = F.relu(self.fc1(x)) 55 x = F.dropout(x, training=self.training) 56 x = self.fc2(x) 57 return F.log_softmax(x, dim=-1) 58 59 60 def get_data_loaders(train_batch_size, val_batch_size): 61 data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))]) 62 63 train_loader = DataLoader( 64 MNIST(download=True, root=".", transform=data_transform, train=True), batch_size=train_batch_size, shuffle=True 65 ) 66 67 val_loader = DataLoader( 68 MNIST(download=False, root=".", transform=data_transform, train=False), batch_size=val_batch_size, shuffle=False 69 ) 70 return train_loader, val_loader 71 72 73 def run(train_batch_size, val_batch_size, epochs, lr, momentum): 74 train_loader, val_loader = get_data_loaders(train_batch_size, val_batch_size) 75 model = Net() 76 device = "cpu" 77 78 if torch.cuda.is_available(): 79 device = "cuda" 80 81 optimizer = SGD(model.parameters(), lr=lr, momentum=momentum) 82 criterion = nn.CrossEntropyLoss() 83 trainer = create_supervised_trainer(model, optimizer, criterion, device=device) 84 85 if sys.version_info > (3,): 86 from ignite.contrib.metrics.gpu_info import GpuInfo 87 88 try: 89 GpuInfo().attach(trainer) 90 except RuntimeError: 91 print( 92 "INFO: By default, in this example it is possible to log GPU information (used memory, utilization). " 93 "As there is no pynvml python package installed, GPU information won't be logged. Otherwise, please " 94 "install it : `pip install pynvml`" 95 ) 96 97 metrics = {"accuracy": Accuracy(), "loss": Loss(criterion)} 98 99 train_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device) 100 validation_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device) 101 102 @trainer.on(Events.EPOCH_COMPLETED) 103 def compute_metrics(engine): 104 train_evaluator.run(train_loader) 105 validation_evaluator.run(val_loader) 106 107 npt_logger = NeptuneLogger( 108 api_token="ANONYMOUS", 109 project_name="shared/pytorch-ignite-integration", 110 name="ignite-mnist-example", 111 params={ 112 "train_batch_size": train_batch_size, 113 "val_batch_size": val_batch_size, 114 "epochs": epochs, 115 "lr": lr, 116 "momentum": momentum, 117 }, 118 ) 119 120 npt_logger.attach( 121 trainer, 122 log_handler=OutputHandler( 123 tag="training", output_transform=lambda loss: {"batchloss": loss}, metric_names="all" 124 ), 125 event_name=Events.ITERATION_COMPLETED(every=100), 126 ) 127 128 npt_logger.attach( 129 train_evaluator, 130 log_handler=OutputHandler(tag="training", metric_names=["loss", "accuracy"], another_engine=trainer), 131 event_name=Events.EPOCH_COMPLETED, 132 ) 133 134 npt_logger.attach( 135 validation_evaluator, 136 log_handler=OutputHandler(tag="validation", metric_names=["loss", "accuracy"], another_engine=trainer), 137 event_name=Events.EPOCH_COMPLETED, 138 ) 139 140 npt_logger.attach( 141 trainer, log_handler=OptimizerParamsHandler(optimizer), event_name=Events.ITERATION_COMPLETED(every=100) 142 ) 143 144 npt_logger.attach( 145 trainer, log_handler=WeightsScalarHandler(model), event_name=Events.ITERATION_COMPLETED(every=100) 146 ) 147 148 npt_logger.attach(trainer, log_handler=GradsScalarHandler(model), event_name=Events.ITERATION_COMPLETED(every=100)) 149 150 def score_function(engine): 151 return engine.state.metrics['accuracy'] 152 153 to_save = {'model': model} 154 handler = Checkpoint(to_save, NeptuneSaver(npt_logger), n_saved=2, 155 filename_prefix='best', score_function=score_function, 156 score_name="validation_accuracy", 157 global_step_transform=global_step_from_engine(trainer)) 158 validation_evaluator.add_event_handler(Events.COMPLETED, handler) 159 160 # kick everything off 161 trainer.run(train_loader, max_epochs=epochs) 162 npt_logger.close() 163 164 165 if __name__ == "__main__": 166 parser = ArgumentParser() 167 parser.add_argument("--batch_size", type=int, default=64, help="input batch size for training (default: 64)") 168 parser.add_argument( 169 "--val_batch_size", type=int, default=1000, help="input batch size for validation (default: 1000)" 170 ) 171 parser.add_argument("--epochs", type=int, default=10, help="number of epochs to train (default: 10)") 172 parser.add_argument("--lr", type=float, default=0.01, help="learning rate (default: 0.01)") 173 parser.add_argument("--momentum", type=float, default=0.5, help="SGD momentum (default: 0.5)") 174 175 args = parser.parse_args() 176 177 # Setup engine logger 178 logger = logging.getLogger("ignite.engine.engine.Engine") 179 handler = logging.StreamHandler() 180 formatter = logging.Formatter("%(asctime)s %(name)-12s %(levelname)-8s %(message)s") 181 handler.setFormatter(formatter) 182 logger.addHandler(handler) 183 logger.setLevel(logging.INFO) 184 185 run(args.batch_size, args.val_batch_size, args.epochs, args.lr, args.momentum) 186 ``` Path: `ignite/utils.py` Content: ``` 1 import collections.abc as collections 2 import logging 3 from typing import Union, Optional, Callable, Any, Type, Tuple 4 5 import torch 6 7 __all__ = ["convert_tensor", "apply_to_tensor", "apply_to_type", "to_onehot", "setup_logger"] 8 9 10 def convert_tensor( 11 input_: Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes], 12 device: Optional[Union[str, torch.device]] = None, 13 non_blocking: bool = False, 14 ) -> Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes]: 15 """Move tensors to relevant device.""" 16 17 def _func(tensor: torch.Tensor) -> torch.Tensor: 18 return tensor.to(device=device, non_blocking=non_blocking) if device is not None else tensor 19 20 return apply_to_tensor(input_, _func) 21 22 23 def apply_to_tensor( 24 input_: Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes], func: Callable 25 ) -> Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes]: 26 """Apply a function on a tensor or mapping, or sequence of tensors. 27 """ 28 return apply_to_type(input_, torch.Tensor, func) 29 30 31 def apply_to_type( 32 input_: Union[Any, collections.Sequence, collections.Mapping, str, bytes], 33 input_type: Union[Type, Tuple[Type[Any], Any]], 34 func: Callable, 35 ) -> Union[Any, collections.Sequence, collections.Mapping, str, bytes]: 36 """Apply a function on a object of `input_type` or mapping, or sequence of objects of `input_type`. 37 """ 38 if isinstance(input_, input_type): 39 return func(input_) 40 elif isinstance(input_, (str, bytes)): 41 return input_ 42 elif isinstance(input_, collections.Mapping): 43 return type(input_)({k: apply_to_type(sample, input_type, func) for k, sample in input_.items()}) 44 elif isinstance(input_, tuple) and hasattr(input_, "_fields"): # namedtuple 45 return type(input_)(*(apply_to_type(sample, input_type, func) for sample in input_)) 46 elif isinstance(input_, collections.Sequence): 47 return type(input_)([apply_to_type(sample, input_type, func) for sample in input_]) 48 else: 49 raise TypeError(("input must contain {}, dicts or lists; found {}".format(input_type, type(input_)))) 50 51 52 def to_onehot(indices: torch.Tensor, num_classes: int) -> torch.Tensor: 53 """Convert a tensor of indices of any shape `(N, ...)` to a 54 tensor of one-hot indicators of shape `(N, num_classes, ...) and of type uint8. Output's device is equal to the 55 input's device`. 56 """ 57 onehot = torch.zeros(indices.shape[0], num_classes, *indices.shape[1:], dtype=torch.uint8, device=indices.device) 58 return onehot.scatter_(1, indices.unsqueeze(1), 1) 59 60 61 def setup_logger( 62 name: str, 63 level: int = logging.INFO, 64 format: str = "%(asctime)s %(name)s %(levelname)s: %(message)s", 65 filepath: Optional[str] = None, 66 distributed_rank: int = 0, 67 ) -> logging.Logger: 68 """Setups logger: name, level, format etc. 69 70 Args: 71 name (str): new name for the logger. 72 level (int): logging level, e.g. CRITICAL, ERROR, WARNING, INFO, DEBUG 73 format (str): logging format. By default, `%(asctime)s %(name)s %(levelname)s: %(message)s` 74 filepath (str, optional): Optional logging file path. If not None, logs are written to the file. 75 distributed_rank (int, optional): Optional, rank in distributed configuration to avoid logger setup for workers. 76 77 Returns: 78 logging.Logger 79 80 For example, to improve logs readability when training with a trainer and evaluator: 81 82 .. code-block:: python 83 84 from ignite.utils import setup_logger 85 86 trainer = ... 87 evaluator = ... 88 89 trainer.logger = setup_logger("trainer") 90 evaluator.logger = setup_logger("evaluator") 91 92 trainer.run(data, max_epochs=10) 93 94 # Logs will look like 95 # 2020-01-21 12:46:07,356 trainer INFO: Engine run starting with max_epochs=5. 96 # 2020-01-21 12:46:07,358 trainer INFO: Epoch[1] Complete. Time taken: 00:5:23 97 # 2020-01-21 12:46:07,358 evaluator INFO: Engine run starting with max_epochs=1. 98 # 2020-01-21 12:46:07,358 evaluator INFO: Epoch[1] Complete. Time taken: 00:01:02 99 # ... 100 101 """ 102 logger = logging.getLogger(name) 103 104 if distributed_rank > 0: 105 return logger 106 107 logger.setLevel(level) 108 109 # Remove previous handlers 110 if logger.hasHandlers(): 111 for h in list(logger.handlers): 112 logger.removeHandler(h) 113 114 formatter = logging.Formatter(format) 115 116 ch = logging.StreamHandler() 117 ch.setLevel(level) 118 ch.setFormatter(formatter) 119 logger.addHandler(ch) 120 121 if filepath is not None: 122 fh = logging.FileHandler(filepath) 123 fh.setLevel(level) 124 fh.setFormatter(formatter) 125 logger.addHandler(fh) 126 127 return logger 128 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/examples/contrib/mnist/mnist_with_neptune_logger.py b/examples/contrib/mnist/mnist_with_neptune_logger.py --- a/examples/contrib/mnist/mnist_with_neptune_logger.py +++ b/examples/contrib/mnist/mnist_with_neptune_logger.py @@ -148,13 +148,18 @@ npt_logger.attach(trainer, log_handler=GradsScalarHandler(model), event_name=Events.ITERATION_COMPLETED(every=100)) def score_function(engine): - return engine.state.metrics['accuracy'] - - to_save = {'model': model} - handler = Checkpoint(to_save, NeptuneSaver(npt_logger), n_saved=2, - filename_prefix='best', score_function=score_function, - score_name="validation_accuracy", - global_step_transform=global_step_from_engine(trainer)) + return engine.state.metrics["accuracy"] + + to_save = {"model": model} + handler = Checkpoint( + to_save, + NeptuneSaver(npt_logger), + n_saved=2, + filename_prefix="best", + score_function=score_function, + score_name="validation_accuracy", + global_step_transform=global_step_from_engine(trainer), + ) validation_evaluator.add_event_handler(Events.COMPLETED, handler) # kick everything off diff --git a/ignite/utils.py b/ignite/utils.py --- a/ignite/utils.py +++ b/ignite/utils.py @@ -1,10 +1,12 @@ import collections.abc as collections import logging +from functools import wraps from typing import Union, Optional, Callable, Any, Type, Tuple import torch +import torch.distributed as dist -__all__ = ["convert_tensor", "apply_to_tensor", "apply_to_type", "to_onehot", "setup_logger"] +__all__ = ["convert_tensor", "apply_to_tensor", "apply_to_type", "to_onehot", "setup_logger", "one_rank_only"] def convert_tensor( @@ -125,3 +127,37 @@ logger.addHandler(fh) return logger + + +def one_rank_only(rank: int = 0, barrier: bool = False): + """Decorator to filter handlers wrt a rank number + + Args: + rank (int): rank number of the handler (default: 0). + barrier (bool): synchronisation with a barrier (default: False). + + .. code-block:: python + engine = ... + + @engine.on(...) + @one_rank_only() # means @one_rank_only(rank=0) + def some_handler(_): + ... + + @engine.on(...) + @one_rank_only(rank=1) + def some_handler(_): + ... + """ + def _one_rank_only(func): + @wraps(func) + def wrapper(*args, **kwargs): + ret = None + if dist.get_rank() == rank: + ret = func(*args, **kwargs) + if barrier: + dist.barrier() + return ret + return wrapper + + return _one_rank_only
{"golden_diff": "diff --git a/examples/contrib/mnist/mnist_with_neptune_logger.py b/examples/contrib/mnist/mnist_with_neptune_logger.py\n--- a/examples/contrib/mnist/mnist_with_neptune_logger.py\n+++ b/examples/contrib/mnist/mnist_with_neptune_logger.py\n@@ -148,13 +148,18 @@\n npt_logger.attach(trainer, log_handler=GradsScalarHandler(model), event_name=Events.ITERATION_COMPLETED(every=100))\n \n def score_function(engine):\n- return engine.state.metrics['accuracy']\n-\n- to_save = {'model': model}\n- handler = Checkpoint(to_save, NeptuneSaver(npt_logger), n_saved=2,\n- filename_prefix='best', score_function=score_function,\n- score_name=\"validation_accuracy\",\n- global_step_transform=global_step_from_engine(trainer))\n+ return engine.state.metrics[\"accuracy\"]\n+\n+ to_save = {\"model\": model}\n+ handler = Checkpoint(\n+ to_save,\n+ NeptuneSaver(npt_logger),\n+ n_saved=2,\n+ filename_prefix=\"best\",\n+ score_function=score_function,\n+ score_name=\"validation_accuracy\",\n+ global_step_transform=global_step_from_engine(trainer),\n+ )\n validation_evaluator.add_event_handler(Events.COMPLETED, handler)\n \n # kick everything off\ndiff --git a/ignite/utils.py b/ignite/utils.py\n--- a/ignite/utils.py\n+++ b/ignite/utils.py\n@@ -1,10 +1,12 @@\n import collections.abc as collections\n import logging\n+from functools import wraps\n from typing import Union, Optional, Callable, Any, Type, Tuple\n \n import torch\n+import torch.distributed as dist\n \n-__all__ = [\"convert_tensor\", \"apply_to_tensor\", \"apply_to_type\", \"to_onehot\", \"setup_logger\"]\n+__all__ = [\"convert_tensor\", \"apply_to_tensor\", \"apply_to_type\", \"to_onehot\", \"setup_logger\", \"one_rank_only\"]\n \n \n def convert_tensor(\n@@ -125,3 +127,37 @@\n logger.addHandler(fh)\n \n return logger\n+\n+\n+def one_rank_only(rank: int = 0, barrier: bool = False):\n+ \"\"\"Decorator to filter handlers wrt a rank number\n+\n+ Args:\n+ rank (int): rank number of the handler (default: 0).\n+ barrier (bool): synchronisation with a barrier (default: False).\n+\n+ .. code-block:: python\n+ engine = ...\n+\n+ @engine.on(...)\n+ @one_rank_only() # means @one_rank_only(rank=0)\n+ def some_handler(_):\n+ ...\n+\n+ @engine.on(...)\n+ @one_rank_only(rank=1)\n+ def some_handler(_):\n+ ...\n+ \"\"\"\n+ def _one_rank_only(func):\n+ @wraps(func)\n+ def wrapper(*args, **kwargs):\n+ ret = None\n+ if dist.get_rank() == rank:\n+ ret = func(*args, **kwargs)\n+ if barrier:\n+ dist.barrier()\n+ return ret\n+ return wrapper\n+\n+ return _one_rank_only\n", "issue": "Add helper decorators to use event handler or method with specified rank \n## \ud83d\ude80 Feature\r\n\r\nIdea is to simplify the following code:\r\n```python\r\nimport torch.distributed as dist\r\n\r\nif dist.get_rank() == 0:\r\n \r\n @trainer.on(...)\r\n def some_handler(_):\r\n ...\r\n\r\n tb_logger = TensorboardLogger(log_dir=\"experiments/tb_logs\") \r\n```\r\ninto \r\n\r\n```python\r\nfrom ignite.utils import one_rank_only\r\n\r\[email protected](...)\r\n@one_rank_only # equivalent @one_rank_only(rank=0)\r\ndef some_handler(_):\r\n ...\r\n\r\n# implicitly work only on rank 0\r\ntb_logger = TensorboardLogger(log_dir=\"experiments/tb_logs\") \r\n```\r\n\n", "before_files": [{"content": "\"\"\"\n MNIST example with training and validation monitoring using Neptune.\n\n Requirements:\n Neptune: `pip install neptune-client`\n\n Usage:\n\n Run the example:\n ```bash\n python mnist_with_neptune_logger.py\n ```\n\n Go to https://neptune.ai and explore your experiment.\n\nNote:\n You can see an example experiment here:\n https://ui.neptune.ai/o/shared/org/pytorch-ignite-integration/e/PYTOR-26/charts\n\"\"\"\nimport sys\nfrom argparse import ArgumentParser\nimport logging\n\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torch.optim import SGD\nfrom torchvision.datasets import MNIST\nfrom torchvision.transforms import Compose, ToTensor, Normalize\n\nfrom ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator\nfrom ignite.metrics import Accuracy, Loss\nfrom ignite.handlers import Checkpoint\n\nfrom ignite.contrib.handlers.neptune_logger import *\n\nLOG_INTERVAL = 10\n\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, 10, kernel_size=5)\n self.conv2 = nn.Conv2d(10, 20, kernel_size=5)\n self.conv2_drop = nn.Dropout2d()\n self.fc1 = nn.Linear(320, 50)\n self.fc2 = nn.Linear(50, 10)\n\n def forward(self, x):\n x = F.relu(F.max_pool2d(self.conv1(x), 2))\n x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))\n x = x.view(-1, 320)\n x = F.relu(self.fc1(x))\n x = F.dropout(x, training=self.training)\n x = self.fc2(x)\n return F.log_softmax(x, dim=-1)\n\n\ndef get_data_loaders(train_batch_size, val_batch_size):\n data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))])\n\n train_loader = DataLoader(\n MNIST(download=True, root=\".\", transform=data_transform, train=True), batch_size=train_batch_size, shuffle=True\n )\n\n val_loader = DataLoader(\n MNIST(download=False, root=\".\", transform=data_transform, train=False), batch_size=val_batch_size, shuffle=False\n )\n return train_loader, val_loader\n\n\ndef run(train_batch_size, val_batch_size, epochs, lr, momentum):\n train_loader, val_loader = get_data_loaders(train_batch_size, val_batch_size)\n model = Net()\n device = \"cpu\"\n\n if torch.cuda.is_available():\n device = \"cuda\"\n\n optimizer = SGD(model.parameters(), lr=lr, momentum=momentum)\n criterion = nn.CrossEntropyLoss()\n trainer = create_supervised_trainer(model, optimizer, criterion, device=device)\n\n if sys.version_info > (3,):\n from ignite.contrib.metrics.gpu_info import GpuInfo\n\n try:\n GpuInfo().attach(trainer)\n except RuntimeError:\n print(\n \"INFO: By default, in this example it is possible to log GPU information (used memory, utilization). \"\n \"As there is no pynvml python package installed, GPU information won't be logged. Otherwise, please \"\n \"install it : `pip install pynvml`\"\n )\n\n metrics = {\"accuracy\": Accuracy(), \"loss\": Loss(criterion)}\n\n train_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device)\n validation_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device)\n\n @trainer.on(Events.EPOCH_COMPLETED)\n def compute_metrics(engine):\n train_evaluator.run(train_loader)\n validation_evaluator.run(val_loader)\n\n npt_logger = NeptuneLogger(\n api_token=\"ANONYMOUS\",\n project_name=\"shared/pytorch-ignite-integration\",\n name=\"ignite-mnist-example\",\n params={\n \"train_batch_size\": train_batch_size,\n \"val_batch_size\": val_batch_size,\n \"epochs\": epochs,\n \"lr\": lr,\n \"momentum\": momentum,\n },\n )\n\n npt_logger.attach(\n trainer,\n log_handler=OutputHandler(\n tag=\"training\", output_transform=lambda loss: {\"batchloss\": loss}, metric_names=\"all\"\n ),\n event_name=Events.ITERATION_COMPLETED(every=100),\n )\n\n npt_logger.attach(\n train_evaluator,\n log_handler=OutputHandler(tag=\"training\", metric_names=[\"loss\", \"accuracy\"], another_engine=trainer),\n event_name=Events.EPOCH_COMPLETED,\n )\n\n npt_logger.attach(\n validation_evaluator,\n log_handler=OutputHandler(tag=\"validation\", metric_names=[\"loss\", \"accuracy\"], another_engine=trainer),\n event_name=Events.EPOCH_COMPLETED,\n )\n\n npt_logger.attach(\n trainer, log_handler=OptimizerParamsHandler(optimizer), event_name=Events.ITERATION_COMPLETED(every=100)\n )\n\n npt_logger.attach(\n trainer, log_handler=WeightsScalarHandler(model), event_name=Events.ITERATION_COMPLETED(every=100)\n )\n\n npt_logger.attach(trainer, log_handler=GradsScalarHandler(model), event_name=Events.ITERATION_COMPLETED(every=100))\n\n def score_function(engine):\n return engine.state.metrics['accuracy']\n\n to_save = {'model': model}\n handler = Checkpoint(to_save, NeptuneSaver(npt_logger), n_saved=2,\n filename_prefix='best', score_function=score_function,\n score_name=\"validation_accuracy\",\n global_step_transform=global_step_from_engine(trainer))\n validation_evaluator.add_event_handler(Events.COMPLETED, handler)\n\n # kick everything off\n trainer.run(train_loader, max_epochs=epochs)\n npt_logger.close()\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n parser.add_argument(\"--batch_size\", type=int, default=64, help=\"input batch size for training (default: 64)\")\n parser.add_argument(\n \"--val_batch_size\", type=int, default=1000, help=\"input batch size for validation (default: 1000)\"\n )\n parser.add_argument(\"--epochs\", type=int, default=10, help=\"number of epochs to train (default: 10)\")\n parser.add_argument(\"--lr\", type=float, default=0.01, help=\"learning rate (default: 0.01)\")\n parser.add_argument(\"--momentum\", type=float, default=0.5, help=\"SGD momentum (default: 0.5)\")\n\n args = parser.parse_args()\n\n # Setup engine logger\n logger = logging.getLogger(\"ignite.engine.engine.Engine\")\n handler = logging.StreamHandler()\n formatter = logging.Formatter(\"%(asctime)s %(name)-12s %(levelname)-8s %(message)s\")\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(logging.INFO)\n\n run(args.batch_size, args.val_batch_size, args.epochs, args.lr, args.momentum)\n", "path": "examples/contrib/mnist/mnist_with_neptune_logger.py"}, {"content": "import collections.abc as collections\nimport logging\nfrom typing import Union, Optional, Callable, Any, Type, Tuple\n\nimport torch\n\n__all__ = [\"convert_tensor\", \"apply_to_tensor\", \"apply_to_type\", \"to_onehot\", \"setup_logger\"]\n\n\ndef convert_tensor(\n input_: Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes],\n device: Optional[Union[str, torch.device]] = None,\n non_blocking: bool = False,\n) -> Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes]:\n \"\"\"Move tensors to relevant device.\"\"\"\n\n def _func(tensor: torch.Tensor) -> torch.Tensor:\n return tensor.to(device=device, non_blocking=non_blocking) if device is not None else tensor\n\n return apply_to_tensor(input_, _func)\n\n\ndef apply_to_tensor(\n input_: Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes], func: Callable\n) -> Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes]:\n \"\"\"Apply a function on a tensor or mapping, or sequence of tensors.\n \"\"\"\n return apply_to_type(input_, torch.Tensor, func)\n\n\ndef apply_to_type(\n input_: Union[Any, collections.Sequence, collections.Mapping, str, bytes],\n input_type: Union[Type, Tuple[Type[Any], Any]],\n func: Callable,\n) -> Union[Any, collections.Sequence, collections.Mapping, str, bytes]:\n \"\"\"Apply a function on a object of `input_type` or mapping, or sequence of objects of `input_type`.\n \"\"\"\n if isinstance(input_, input_type):\n return func(input_)\n elif isinstance(input_, (str, bytes)):\n return input_\n elif isinstance(input_, collections.Mapping):\n return type(input_)({k: apply_to_type(sample, input_type, func) for k, sample in input_.items()})\n elif isinstance(input_, tuple) and hasattr(input_, \"_fields\"): # namedtuple\n return type(input_)(*(apply_to_type(sample, input_type, func) for sample in input_))\n elif isinstance(input_, collections.Sequence):\n return type(input_)([apply_to_type(sample, input_type, func) for sample in input_])\n else:\n raise TypeError((\"input must contain {}, dicts or lists; found {}\".format(input_type, type(input_))))\n\n\ndef to_onehot(indices: torch.Tensor, num_classes: int) -> torch.Tensor:\n \"\"\"Convert a tensor of indices of any shape `(N, ...)` to a\n tensor of one-hot indicators of shape `(N, num_classes, ...) and of type uint8. Output's device is equal to the\n input's device`.\n \"\"\"\n onehot = torch.zeros(indices.shape[0], num_classes, *indices.shape[1:], dtype=torch.uint8, device=indices.device)\n return onehot.scatter_(1, indices.unsqueeze(1), 1)\n\n\ndef setup_logger(\n name: str,\n level: int = logging.INFO,\n format: str = \"%(asctime)s %(name)s %(levelname)s: %(message)s\",\n filepath: Optional[str] = None,\n distributed_rank: int = 0,\n) -> logging.Logger:\n \"\"\"Setups logger: name, level, format etc.\n\n Args:\n name (str): new name for the logger.\n level (int): logging level, e.g. CRITICAL, ERROR, WARNING, INFO, DEBUG\n format (str): logging format. By default, `%(asctime)s %(name)s %(levelname)s: %(message)s`\n filepath (str, optional): Optional logging file path. If not None, logs are written to the file.\n distributed_rank (int, optional): Optional, rank in distributed configuration to avoid logger setup for workers.\n\n Returns:\n logging.Logger\n\n For example, to improve logs readability when training with a trainer and evaluator:\n\n .. code-block:: python\n\n from ignite.utils import setup_logger\n\n trainer = ...\n evaluator = ...\n\n trainer.logger = setup_logger(\"trainer\")\n evaluator.logger = setup_logger(\"evaluator\")\n\n trainer.run(data, max_epochs=10)\n\n # Logs will look like\n # 2020-01-21 12:46:07,356 trainer INFO: Engine run starting with max_epochs=5.\n # 2020-01-21 12:46:07,358 trainer INFO: Epoch[1] Complete. Time taken: 00:5:23\n # 2020-01-21 12:46:07,358 evaluator INFO: Engine run starting with max_epochs=1.\n # 2020-01-21 12:46:07,358 evaluator INFO: Epoch[1] Complete. Time taken: 00:01:02\n # ...\n\n \"\"\"\n logger = logging.getLogger(name)\n\n if distributed_rank > 0:\n return logger\n\n logger.setLevel(level)\n\n # Remove previous handlers\n if logger.hasHandlers():\n for h in list(logger.handlers):\n logger.removeHandler(h)\n\n formatter = logging.Formatter(format)\n\n ch = logging.StreamHandler()\n ch.setLevel(level)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n if filepath is not None:\n fh = logging.FileHandler(filepath)\n fh.setLevel(level)\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n\n return logger\n", "path": "ignite/utils.py"}], "after_files": [{"content": "\"\"\"\n MNIST example with training and validation monitoring using Neptune.\n\n Requirements:\n Neptune: `pip install neptune-client`\n\n Usage:\n\n Run the example:\n ```bash\n python mnist_with_neptune_logger.py\n ```\n\n Go to https://neptune.ai and explore your experiment.\n\nNote:\n You can see an example experiment here:\n https://ui.neptune.ai/o/shared/org/pytorch-ignite-integration/e/PYTOR-26/charts\n\"\"\"\nimport sys\nfrom argparse import ArgumentParser\nimport logging\n\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torch.optim import SGD\nfrom torchvision.datasets import MNIST\nfrom torchvision.transforms import Compose, ToTensor, Normalize\n\nfrom ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator\nfrom ignite.metrics import Accuracy, Loss\nfrom ignite.handlers import Checkpoint\n\nfrom ignite.contrib.handlers.neptune_logger import *\n\nLOG_INTERVAL = 10\n\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, 10, kernel_size=5)\n self.conv2 = nn.Conv2d(10, 20, kernel_size=5)\n self.conv2_drop = nn.Dropout2d()\n self.fc1 = nn.Linear(320, 50)\n self.fc2 = nn.Linear(50, 10)\n\n def forward(self, x):\n x = F.relu(F.max_pool2d(self.conv1(x), 2))\n x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))\n x = x.view(-1, 320)\n x = F.relu(self.fc1(x))\n x = F.dropout(x, training=self.training)\n x = self.fc2(x)\n return F.log_softmax(x, dim=-1)\n\n\ndef get_data_loaders(train_batch_size, val_batch_size):\n data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))])\n\n train_loader = DataLoader(\n MNIST(download=True, root=\".\", transform=data_transform, train=True), batch_size=train_batch_size, shuffle=True\n )\n\n val_loader = DataLoader(\n MNIST(download=False, root=\".\", transform=data_transform, train=False), batch_size=val_batch_size, shuffle=False\n )\n return train_loader, val_loader\n\n\ndef run(train_batch_size, val_batch_size, epochs, lr, momentum):\n train_loader, val_loader = get_data_loaders(train_batch_size, val_batch_size)\n model = Net()\n device = \"cpu\"\n\n if torch.cuda.is_available():\n device = \"cuda\"\n\n optimizer = SGD(model.parameters(), lr=lr, momentum=momentum)\n criterion = nn.CrossEntropyLoss()\n trainer = create_supervised_trainer(model, optimizer, criterion, device=device)\n\n if sys.version_info > (3,):\n from ignite.contrib.metrics.gpu_info import GpuInfo\n\n try:\n GpuInfo().attach(trainer)\n except RuntimeError:\n print(\n \"INFO: By default, in this example it is possible to log GPU information (used memory, utilization). \"\n \"As there is no pynvml python package installed, GPU information won't be logged. Otherwise, please \"\n \"install it : `pip install pynvml`\"\n )\n\n metrics = {\"accuracy\": Accuracy(), \"loss\": Loss(criterion)}\n\n train_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device)\n validation_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device)\n\n @trainer.on(Events.EPOCH_COMPLETED)\n def compute_metrics(engine):\n train_evaluator.run(train_loader)\n validation_evaluator.run(val_loader)\n\n npt_logger = NeptuneLogger(\n api_token=\"ANONYMOUS\",\n project_name=\"shared/pytorch-ignite-integration\",\n name=\"ignite-mnist-example\",\n params={\n \"train_batch_size\": train_batch_size,\n \"val_batch_size\": val_batch_size,\n \"epochs\": epochs,\n \"lr\": lr,\n \"momentum\": momentum,\n },\n )\n\n npt_logger.attach(\n trainer,\n log_handler=OutputHandler(\n tag=\"training\", output_transform=lambda loss: {\"batchloss\": loss}, metric_names=\"all\"\n ),\n event_name=Events.ITERATION_COMPLETED(every=100),\n )\n\n npt_logger.attach(\n train_evaluator,\n log_handler=OutputHandler(tag=\"training\", metric_names=[\"loss\", \"accuracy\"], another_engine=trainer),\n event_name=Events.EPOCH_COMPLETED,\n )\n\n npt_logger.attach(\n validation_evaluator,\n log_handler=OutputHandler(tag=\"validation\", metric_names=[\"loss\", \"accuracy\"], another_engine=trainer),\n event_name=Events.EPOCH_COMPLETED,\n )\n\n npt_logger.attach(\n trainer, log_handler=OptimizerParamsHandler(optimizer), event_name=Events.ITERATION_COMPLETED(every=100)\n )\n\n npt_logger.attach(\n trainer, log_handler=WeightsScalarHandler(model), event_name=Events.ITERATION_COMPLETED(every=100)\n )\n\n npt_logger.attach(trainer, log_handler=GradsScalarHandler(model), event_name=Events.ITERATION_COMPLETED(every=100))\n\n def score_function(engine):\n return engine.state.metrics[\"accuracy\"]\n\n to_save = {\"model\": model}\n handler = Checkpoint(\n to_save,\n NeptuneSaver(npt_logger),\n n_saved=2,\n filename_prefix=\"best\",\n score_function=score_function,\n score_name=\"validation_accuracy\",\n global_step_transform=global_step_from_engine(trainer),\n )\n validation_evaluator.add_event_handler(Events.COMPLETED, handler)\n\n # kick everything off\n trainer.run(train_loader, max_epochs=epochs)\n npt_logger.close()\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n parser.add_argument(\"--batch_size\", type=int, default=64, help=\"input batch size for training (default: 64)\")\n parser.add_argument(\n \"--val_batch_size\", type=int, default=1000, help=\"input batch size for validation (default: 1000)\"\n )\n parser.add_argument(\"--epochs\", type=int, default=10, help=\"number of epochs to train (default: 10)\")\n parser.add_argument(\"--lr\", type=float, default=0.01, help=\"learning rate (default: 0.01)\")\n parser.add_argument(\"--momentum\", type=float, default=0.5, help=\"SGD momentum (default: 0.5)\")\n\n args = parser.parse_args()\n\n # Setup engine logger\n logger = logging.getLogger(\"ignite.engine.engine.Engine\")\n handler = logging.StreamHandler()\n formatter = logging.Formatter(\"%(asctime)s %(name)-12s %(levelname)-8s %(message)s\")\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(logging.INFO)\n\n run(args.batch_size, args.val_batch_size, args.epochs, args.lr, args.momentum)\n", "path": "examples/contrib/mnist/mnist_with_neptune_logger.py"}, {"content": "import collections.abc as collections\nimport logging\nfrom functools import wraps\nfrom typing import Union, Optional, Callable, Any, Type, Tuple\n\nimport torch\nimport torch.distributed as dist\n\n__all__ = [\"convert_tensor\", \"apply_to_tensor\", \"apply_to_type\", \"to_onehot\", \"setup_logger\", \"one_rank_only\"]\n\n\ndef convert_tensor(\n input_: Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes],\n device: Optional[Union[str, torch.device]] = None,\n non_blocking: bool = False,\n) -> Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes]:\n \"\"\"Move tensors to relevant device.\"\"\"\n\n def _func(tensor: torch.Tensor) -> torch.Tensor:\n return tensor.to(device=device, non_blocking=non_blocking) if device is not None else tensor\n\n return apply_to_tensor(input_, _func)\n\n\ndef apply_to_tensor(\n input_: Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes], func: Callable\n) -> Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes]:\n \"\"\"Apply a function on a tensor or mapping, or sequence of tensors.\n \"\"\"\n return apply_to_type(input_, torch.Tensor, func)\n\n\ndef apply_to_type(\n input_: Union[Any, collections.Sequence, collections.Mapping, str, bytes],\n input_type: Union[Type, Tuple[Type[Any], Any]],\n func: Callable,\n) -> Union[Any, collections.Sequence, collections.Mapping, str, bytes]:\n \"\"\"Apply a function on a object of `input_type` or mapping, or sequence of objects of `input_type`.\n \"\"\"\n if isinstance(input_, input_type):\n return func(input_)\n elif isinstance(input_, (str, bytes)):\n return input_\n elif isinstance(input_, collections.Mapping):\n return type(input_)({k: apply_to_type(sample, input_type, func) for k, sample in input_.items()})\n elif isinstance(input_, tuple) and hasattr(input_, \"_fields\"): # namedtuple\n return type(input_)(*(apply_to_type(sample, input_type, func) for sample in input_))\n elif isinstance(input_, collections.Sequence):\n return type(input_)([apply_to_type(sample, input_type, func) for sample in input_])\n else:\n raise TypeError((\"input must contain {}, dicts or lists; found {}\".format(input_type, type(input_))))\n\n\ndef to_onehot(indices: torch.Tensor, num_classes: int) -> torch.Tensor:\n \"\"\"Convert a tensor of indices of any shape `(N, ...)` to a\n tensor of one-hot indicators of shape `(N, num_classes, ...) and of type uint8. Output's device is equal to the\n input's device`.\n \"\"\"\n onehot = torch.zeros(indices.shape[0], num_classes, *indices.shape[1:], dtype=torch.uint8, device=indices.device)\n return onehot.scatter_(1, indices.unsqueeze(1), 1)\n\n\ndef setup_logger(\n name: str,\n level: int = logging.INFO,\n format: str = \"%(asctime)s %(name)s %(levelname)s: %(message)s\",\n filepath: Optional[str] = None,\n distributed_rank: int = 0,\n) -> logging.Logger:\n \"\"\"Setups logger: name, level, format etc.\n\n Args:\n name (str): new name for the logger.\n level (int): logging level, e.g. CRITICAL, ERROR, WARNING, INFO, DEBUG\n format (str): logging format. By default, `%(asctime)s %(name)s %(levelname)s: %(message)s`\n filepath (str, optional): Optional logging file path. If not None, logs are written to the file.\n distributed_rank (int, optional): Optional, rank in distributed configuration to avoid logger setup for workers.\n\n Returns:\n logging.Logger\n\n For example, to improve logs readability when training with a trainer and evaluator:\n\n .. code-block:: python\n\n from ignite.utils import setup_logger\n\n trainer = ...\n evaluator = ...\n\n trainer.logger = setup_logger(\"trainer\")\n evaluator.logger = setup_logger(\"evaluator\")\n\n trainer.run(data, max_epochs=10)\n\n # Logs will look like\n # 2020-01-21 12:46:07,356 trainer INFO: Engine run starting with max_epochs=5.\n # 2020-01-21 12:46:07,358 trainer INFO: Epoch[1] Complete. Time taken: 00:5:23\n # 2020-01-21 12:46:07,358 evaluator INFO: Engine run starting with max_epochs=1.\n # 2020-01-21 12:46:07,358 evaluator INFO: Epoch[1] Complete. Time taken: 00:01:02\n # ...\n\n \"\"\"\n logger = logging.getLogger(name)\n\n if distributed_rank > 0:\n return logger\n\n logger.setLevel(level)\n\n # Remove previous handlers\n if logger.hasHandlers():\n for h in list(logger.handlers):\n logger.removeHandler(h)\n\n formatter = logging.Formatter(format)\n\n ch = logging.StreamHandler()\n ch.setLevel(level)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n if filepath is not None:\n fh = logging.FileHandler(filepath)\n fh.setLevel(level)\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n\n return logger\n\n\ndef one_rank_only(rank: int = 0, barrier: bool = False):\n \"\"\"Decorator to filter handlers wrt a rank number\n\n Args:\n rank (int): rank number of the handler (default: 0).\n barrier (bool): synchronisation with a barrier (default: False).\n\n .. code-block:: python\n engine = ...\n\n @engine.on(...)\n @one_rank_only() # means @one_rank_only(rank=0)\n def some_handler(_):\n ...\n\n @engine.on(...)\n @one_rank_only(rank=1)\n def some_handler(_):\n ...\n \"\"\"\n def _one_rank_only(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n ret = None\n if dist.get_rank() == rank:\n ret = func(*args, **kwargs)\n if barrier:\n dist.barrier()\n return ret\n return wrapper\n\n return _one_rank_only\n", "path": "ignite/utils.py"}]}
3,911
703
gh_patches_debug_12944
rasdani/github-patches
git_diff
Nitrate__Nitrate-438
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Drop Django 1.11 AC: - Remove from `tox.ini` - Remove from `.travis.yml` - Update Django verison range in `setup.py` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 from setuptools import setup, find_packages 4 5 6 with open('VERSION.txt', 'r') as f: 7 pkg_version = f.read().strip() 8 9 10 def get_long_description(): 11 with open('README.rst', 'r') as f: 12 return f.read() 13 14 15 install_requires = [ 16 'beautifulsoup4 >= 4.1.1', 17 'django >= 1.11,<3.0', 18 'django-contrib-comments == 1.8.0', 19 'django-tinymce == 2.7.0', 20 'django-uuslug == 1.1.8', 21 'html2text', 22 'odfpy >= 0.9.6', 23 'python-bugzilla', 24 'xmltodict', 25 'kobo == 0.9.0' 26 ] 27 28 extras_require = { 29 'mysql': ['mysqlclient >= 1.2.3'], 30 'pgsql': ['psycopg2 == 2.7.5'], 31 32 # Required for tcms.auth.backends.KerberosBackend 33 'krbauth': [ 34 'kerberos == 1.2.5' 35 ], 36 37 # Packages for building documentation 38 'docs': [ 39 'Sphinx >= 1.1.2', 40 'sphinx_rtd_theme', 41 ], 42 43 # Necessary packages for running tests 44 'tests': [ 45 'beautifulsoup4', 46 'coverage', 47 'factory_boy', 48 'flake8', 49 'mock', 50 'pytest < 4.2.0', 51 'pytest-cov', 52 'pytest-django', 53 ], 54 55 # Contain tools that assists the development 56 'devtools': [ 57 'django-debug-toolbar == 1.7', 58 'tox', 59 'django-extensions', 60 'pygraphviz', 61 'future-breakpoint', 62 ], 63 64 # Required packages required to run async tasks 65 'async': [ 66 'celery == 4.2.0', 67 ], 68 69 'multiauth': [ 70 'social-auth-app-django == 3.1.0', 71 ] 72 } 73 74 setup( 75 name='Nitrate', 76 version=pkg_version, 77 description='Test Case Management System', 78 long_description=get_long_description(), 79 author='Nitrate Team', 80 maintainer='Chenxiong Qi', 81 maintainer_email='[email protected]', 82 url='https://github.com/Nitrate/Nitrate/', 83 license='GPLv2+', 84 keywords='test case', 85 install_requires=install_requires, 86 extras_require=extras_require, 87 python_requires='>=3.6', 88 package_dir={'': 'src'}, 89 packages=find_packages('src', exclude=['test*']), 90 include_package_data=True, 91 zip_safe=False, 92 classifiers=[ 93 'Framework :: Django', 94 'Framework :: Django :: 1.11', 95 'Framework :: Django :: 2.0', 96 'Framework :: Django :: 2.1', 97 'Intended Audience :: Developers', 98 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)', 99 'Programming Language :: Python :: 3', 100 'Programming Language :: Python :: 3.6', 101 'Programming Language :: Python :: 3.7', 102 'Programming Language :: Python :: 3 :: Only', 103 'Topic :: Software Development :: Quality Assurance', 104 'Topic :: Software Development :: Testing', 105 ], 106 project_urls={ 107 'Issue Tracker': 'https://github.com/Nitrate/Nitrate/issues', 108 'Source Code': 'https://github.com/Nitrate/Nitrate', 109 'Documentation': 'https://nitrate.readthedocs.io/', 110 }, 111 ) 112 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -14,7 +14,7 @@ install_requires = [ 'beautifulsoup4 >= 4.1.1', - 'django >= 1.11,<3.0', + 'django >= 2.0,<3.0', 'django-contrib-comments == 1.8.0', 'django-tinymce == 2.7.0', 'django-uuslug == 1.1.8', @@ -91,7 +91,6 @@ zip_safe=False, classifiers=[ 'Framework :: Django', - 'Framework :: Django :: 1.11', 'Framework :: Django :: 2.0', 'Framework :: Django :: 2.1', 'Intended Audience :: Developers',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -14,7 +14,7 @@\n \n install_requires = [\n 'beautifulsoup4 >= 4.1.1',\n- 'django >= 1.11,<3.0',\n+ 'django >= 2.0,<3.0',\n 'django-contrib-comments == 1.8.0',\n 'django-tinymce == 2.7.0',\n 'django-uuslug == 1.1.8',\n@@ -91,7 +91,6 @@\n zip_safe=False,\n classifiers=[\n 'Framework :: Django',\n- 'Framework :: Django :: 1.11',\n 'Framework :: Django :: 2.0',\n 'Framework :: Django :: 2.1',\n 'Intended Audience :: Developers',\n", "issue": "Drop Django 1.11\nAC:\r\n\r\n- Remove from `tox.ini`\r\n- Remove from `.travis.yml`\r\n- Update Django verison range in `setup.py`\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\n\nwith open('VERSION.txt', 'r') as f:\n pkg_version = f.read().strip()\n\n\ndef get_long_description():\n with open('README.rst', 'r') as f:\n return f.read()\n\n\ninstall_requires = [\n 'beautifulsoup4 >= 4.1.1',\n 'django >= 1.11,<3.0',\n 'django-contrib-comments == 1.8.0',\n 'django-tinymce == 2.7.0',\n 'django-uuslug == 1.1.8',\n 'html2text',\n 'odfpy >= 0.9.6',\n 'python-bugzilla',\n 'xmltodict',\n 'kobo == 0.9.0'\n]\n\nextras_require = {\n 'mysql': ['mysqlclient >= 1.2.3'],\n 'pgsql': ['psycopg2 == 2.7.5'],\n\n # Required for tcms.auth.backends.KerberosBackend\n 'krbauth': [\n 'kerberos == 1.2.5'\n ],\n\n # Packages for building documentation\n 'docs': [\n 'Sphinx >= 1.1.2',\n 'sphinx_rtd_theme',\n ],\n\n # Necessary packages for running tests\n 'tests': [\n 'beautifulsoup4',\n 'coverage',\n 'factory_boy',\n 'flake8',\n 'mock',\n 'pytest < 4.2.0',\n 'pytest-cov',\n 'pytest-django',\n ],\n\n # Contain tools that assists the development\n 'devtools': [\n 'django-debug-toolbar == 1.7',\n 'tox',\n 'django-extensions',\n 'pygraphviz',\n 'future-breakpoint',\n ],\n\n # Required packages required to run async tasks\n 'async': [\n 'celery == 4.2.0',\n ],\n\n 'multiauth': [\n 'social-auth-app-django == 3.1.0',\n ]\n}\n\nsetup(\n name='Nitrate',\n version=pkg_version,\n description='Test Case Management System',\n long_description=get_long_description(),\n author='Nitrate Team',\n maintainer='Chenxiong Qi',\n maintainer_email='[email protected]',\n url='https://github.com/Nitrate/Nitrate/',\n license='GPLv2+',\n keywords='test case',\n install_requires=install_requires,\n extras_require=extras_require,\n python_requires='>=3.6',\n package_dir={'': 'src'},\n packages=find_packages('src', exclude=['test*']),\n include_package_data=True,\n zip_safe=False,\n classifiers=[\n 'Framework :: Django',\n 'Framework :: Django :: 1.11',\n 'Framework :: Django :: 2.0',\n 'Framework :: Django :: 2.1',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Software Development :: Testing',\n ],\n project_urls={\n 'Issue Tracker': 'https://github.com/Nitrate/Nitrate/issues',\n 'Source Code': 'https://github.com/Nitrate/Nitrate',\n 'Documentation': 'https://nitrate.readthedocs.io/',\n },\n)\n", "path": "setup.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\n\nwith open('VERSION.txt', 'r') as f:\n pkg_version = f.read().strip()\n\n\ndef get_long_description():\n with open('README.rst', 'r') as f:\n return f.read()\n\n\ninstall_requires = [\n 'beautifulsoup4 >= 4.1.1',\n 'django >= 2.0,<3.0',\n 'django-contrib-comments == 1.8.0',\n 'django-tinymce == 2.7.0',\n 'django-uuslug == 1.1.8',\n 'html2text',\n 'odfpy >= 0.9.6',\n 'python-bugzilla',\n 'xmltodict',\n 'kobo == 0.9.0'\n]\n\nextras_require = {\n 'mysql': ['mysqlclient >= 1.2.3'],\n 'pgsql': ['psycopg2 == 2.7.5'],\n\n # Required for tcms.auth.backends.KerberosBackend\n 'krbauth': [\n 'kerberos == 1.2.5'\n ],\n\n # Packages for building documentation\n 'docs': [\n 'Sphinx >= 1.1.2',\n 'sphinx_rtd_theme',\n ],\n\n # Necessary packages for running tests\n 'tests': [\n 'beautifulsoup4',\n 'coverage',\n 'factory_boy',\n 'flake8',\n 'mock',\n 'pytest < 4.2.0',\n 'pytest-cov',\n 'pytest-django',\n ],\n\n # Contain tools that assists the development\n 'devtools': [\n 'django-debug-toolbar == 1.7',\n 'tox',\n 'django-extensions',\n 'pygraphviz',\n 'future-breakpoint',\n ],\n\n # Required packages required to run async tasks\n 'async': [\n 'celery == 4.2.0',\n ],\n\n 'multiauth': [\n 'social-auth-app-django == 3.1.0',\n ]\n}\n\nsetup(\n name='Nitrate',\n version=pkg_version,\n description='Test Case Management System',\n long_description=get_long_description(),\n author='Nitrate Team',\n maintainer='Chenxiong Qi',\n maintainer_email='[email protected]',\n url='https://github.com/Nitrate/Nitrate/',\n license='GPLv2+',\n keywords='test case',\n install_requires=install_requires,\n extras_require=extras_require,\n python_requires='>=3.6',\n package_dir={'': 'src'},\n packages=find_packages('src', exclude=['test*']),\n include_package_data=True,\n zip_safe=False,\n classifiers=[\n 'Framework :: Django',\n 'Framework :: Django :: 2.0',\n 'Framework :: Django :: 2.1',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Software Development :: Testing',\n ],\n project_urls={\n 'Issue Tracker': 'https://github.com/Nitrate/Nitrate/issues',\n 'Source Code': 'https://github.com/Nitrate/Nitrate',\n 'Documentation': 'https://nitrate.readthedocs.io/',\n },\n)\n", "path": "setup.py"}]}
1,321
189
gh_patches_debug_5982
rasdani/github-patches
git_diff
mdn__kuma-6250
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Macro search results are mangled for non-en-US locales See for example https://wiki.developer.mozilla.org/en-US/search?locale=*&kumascript_macros=WebExtAllExamples&topic=none This lists all pages that call WebExtAllExamples, across all locales. One entry looks like: <img width="893" alt="Screen Shot 2019-11-21 at 4 30 25 PM" src="https://user-images.githubusercontent.com/432915/69387936-3e5d4780-0c7c-11ea-9347-5916d638d12d.png"> This is the German translation of the https://wiki.developer.mozilla.org/en-US/docs/Mozilla/Add-ons/WebExtensions/Examples page. But the first link, "**Beispiele für Erweiterungen**", has the en-US locale in the URL, like this: https://wiki.developer.mozilla.org/en-US/docs/Mozilla/Add-ons/WebExtensions/Beispiele - note the translated slug but the en-US locale. If I click it, I get "Create a new page", because that page doesn't exist. After the short description, the entry is supposed to have "`${url} Score: 82.20941 translated from ${original}`, where `url` is the localized page, and `original` is the en-US version. But these are wrong too: * `url`: https://wiki.developer.mozilla.org/en-US/docs/Mozilla/Add-ons/WebExtensions/Beispiele - nonexistent page with en-US locale but de slug * `original`: https://developer.mozilla.org/de/docs/Mozilla/Add-ons/WebExtensions/Beispiele - the proper value for `url` I've seen some cases where the "`${url} Score: 82.20941 translated from ${original}` bit doesn't appear, and then there is no usable link to the actual page, and I have to guess what the locale is, to be able to fix the link. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `kuma/search/fields.py` Content: ``` 1 from django.conf import settings 2 from rest_framework import serializers 3 4 from kuma.core.urlresolvers import reverse 5 6 7 class SearchQueryField(serializers.ReadOnlyField): 8 """ 9 Field that returns the search query of the current request. 10 """ 11 def __init__(self, *args, **kwargs): 12 kwargs['source'] = '*' 13 super(SearchQueryField, self).__init__(*args, **kwargs) 14 15 def to_representation(self, value): 16 request = self.context.get('request') 17 if request is None: 18 return '' 19 else: 20 return request.query_params.get('q', None) 21 22 23 class SiteURLField(serializers.ReadOnlyField): 24 """ 25 A serializer field for creating URL for the given objects with the 26 given ``args``/``kwargs`` and a required ``locale`` attribute. 27 """ 28 def __init__(self, url_name, args=None, kwargs=None): 29 self.url_name = url_name 30 self.args = args or [] 31 self.kwargs = kwargs or [] 32 super(SiteURLField, self).__init__(source='*') 33 34 def to_representation(self, value): 35 if not value: 36 return None 37 args = [getattr(value, arg) for arg in self.args] 38 kwargs = {arg: getattr(value, arg) for arg in self.kwargs} 39 locale = getattr(value, 'locale', settings.LANGUAGE_CODE) 40 path = reverse(self.url_name, locale=locale, args=args, kwargs=kwargs) 41 return '%s%s' % (settings.SITE_URL, path) 42 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/kuma/search/fields.py b/kuma/search/fields.py --- a/kuma/search/fields.py +++ b/kuma/search/fields.py @@ -37,5 +37,4 @@ args = [getattr(value, arg) for arg in self.args] kwargs = {arg: getattr(value, arg) for arg in self.kwargs} locale = getattr(value, 'locale', settings.LANGUAGE_CODE) - path = reverse(self.url_name, locale=locale, args=args, kwargs=kwargs) - return '%s%s' % (settings.SITE_URL, path) + return reverse(self.url_name, locale=locale, args=args, kwargs=kwargs)
{"golden_diff": "diff --git a/kuma/search/fields.py b/kuma/search/fields.py\n--- a/kuma/search/fields.py\n+++ b/kuma/search/fields.py\n@@ -37,5 +37,4 @@\n args = [getattr(value, arg) for arg in self.args]\n kwargs = {arg: getattr(value, arg) for arg in self.kwargs}\n locale = getattr(value, 'locale', settings.LANGUAGE_CODE)\n- path = reverse(self.url_name, locale=locale, args=args, kwargs=kwargs)\n- return '%s%s' % (settings.SITE_URL, path)\n+ return reverse(self.url_name, locale=locale, args=args, kwargs=kwargs)\n", "issue": "Macro search results are mangled for non-en-US locales\nSee for example https://wiki.developer.mozilla.org/en-US/search?locale=*&kumascript_macros=WebExtAllExamples&topic=none\r\n\r\nThis lists all pages that call WebExtAllExamples, across all locales. One entry looks like:\r\n\r\n<img width=\"893\" alt=\"Screen Shot 2019-11-21 at 4 30 25 PM\" src=\"https://user-images.githubusercontent.com/432915/69387936-3e5d4780-0c7c-11ea-9347-5916d638d12d.png\">\r\n\r\nThis is the German translation of the https://wiki.developer.mozilla.org/en-US/docs/Mozilla/Add-ons/WebExtensions/Examples page.\r\n\r\nBut the first link, \"**Beispiele f\u00fcr Erweiterungen**\", has the en-US locale in the URL, like this: https://wiki.developer.mozilla.org/en-US/docs/Mozilla/Add-ons/WebExtensions/Beispiele - note the translated slug but the en-US locale. If I click it, I get \"Create a new page\", because that page doesn't exist.\r\n\r\nAfter the short description, the entry is supposed to have \"`${url} Score: 82.20941 translated from ${original}`, where `url` is the localized page, and `original` is the en-US version. But these are wrong too:\r\n\r\n* `url`: https://wiki.developer.mozilla.org/en-US/docs/Mozilla/Add-ons/WebExtensions/Beispiele - nonexistent page with en-US locale but de slug\r\n* `original`: https://developer.mozilla.org/de/docs/Mozilla/Add-ons/WebExtensions/Beispiele - the proper value for `url`\r\n\r\n I've seen some cases where the \"`${url} Score: 82.20941 translated from ${original}` bit doesn't appear, and then there is no usable link to the actual page, and I have to guess what the locale is, to be able to fix the link.\r\n\r\n\n", "before_files": [{"content": "from django.conf import settings\nfrom rest_framework import serializers\n\nfrom kuma.core.urlresolvers import reverse\n\n\nclass SearchQueryField(serializers.ReadOnlyField):\n \"\"\"\n Field that returns the search query of the current request.\n \"\"\"\n def __init__(self, *args, **kwargs):\n kwargs['source'] = '*'\n super(SearchQueryField, self).__init__(*args, **kwargs)\n\n def to_representation(self, value):\n request = self.context.get('request')\n if request is None:\n return ''\n else:\n return request.query_params.get('q', None)\n\n\nclass SiteURLField(serializers.ReadOnlyField):\n \"\"\"\n A serializer field for creating URL for the given objects with the\n given ``args``/``kwargs`` and a required ``locale`` attribute.\n \"\"\"\n def __init__(self, url_name, args=None, kwargs=None):\n self.url_name = url_name\n self.args = args or []\n self.kwargs = kwargs or []\n super(SiteURLField, self).__init__(source='*')\n\n def to_representation(self, value):\n if not value:\n return None\n args = [getattr(value, arg) for arg in self.args]\n kwargs = {arg: getattr(value, arg) for arg in self.kwargs}\n locale = getattr(value, 'locale', settings.LANGUAGE_CODE)\n path = reverse(self.url_name, locale=locale, args=args, kwargs=kwargs)\n return '%s%s' % (settings.SITE_URL, path)\n", "path": "kuma/search/fields.py"}], "after_files": [{"content": "from django.conf import settings\nfrom rest_framework import serializers\n\nfrom kuma.core.urlresolvers import reverse\n\n\nclass SearchQueryField(serializers.ReadOnlyField):\n \"\"\"\n Field that returns the search query of the current request.\n \"\"\"\n def __init__(self, *args, **kwargs):\n kwargs['source'] = '*'\n super(SearchQueryField, self).__init__(*args, **kwargs)\n\n def to_representation(self, value):\n request = self.context.get('request')\n if request is None:\n return ''\n else:\n return request.query_params.get('q', None)\n\n\nclass SiteURLField(serializers.ReadOnlyField):\n \"\"\"\n A serializer field for creating URL for the given objects with the\n given ``args``/``kwargs`` and a required ``locale`` attribute.\n \"\"\"\n def __init__(self, url_name, args=None, kwargs=None):\n self.url_name = url_name\n self.args = args or []\n self.kwargs = kwargs or []\n super(SiteURLField, self).__init__(source='*')\n\n def to_representation(self, value):\n if not value:\n return None\n args = [getattr(value, arg) for arg in self.args]\n kwargs = {arg: getattr(value, arg) for arg in self.kwargs}\n locale = getattr(value, 'locale', settings.LANGUAGE_CODE)\n return reverse(self.url_name, locale=locale, args=args, kwargs=kwargs)\n", "path": "kuma/search/fields.py"}]}
1,106
149
gh_patches_debug_18047
rasdani/github-patches
git_diff
easybuilders__easybuild-easyblocks-2591
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- GTK+ check in OpenCV easyblock https://github.com/easybuilders/easybuild-easyconfigs/pull/13900 and https://github.com/easybuilders/easybuild-easyconfigs/pull/13893 mean that newer `GTK+` are now either `GTK2` and `GTK3`. The OpenCV block checks (https://github.com/easybuilders/easybuild-easyblocks/blob/develop/easybuild/easyblocks/o/opencv.py#L152) for `GTK+` and sets `-DWITH_GTK=OFF` if this is not found. The check will need updating. I do not know if OpenCV can build with both GTK2 and GTK3 at the same time. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `easybuild/easyblocks/o/opencv.py` Content: ``` 1 ## 2 # Copyright 2018-2021 Ghent University 3 # 4 # This file is part of EasyBuild, 5 # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en), 6 # with support of Ghent University (http://ugent.be/hpc), 7 # the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be), 8 # Flemish Research Foundation (FWO) (http://www.fwo.be/en) 9 # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en). 10 # 11 # https://github.com/easybuilders/easybuild 12 # 13 # EasyBuild is free software: you can redistribute it and/or modify 14 # it under the terms of the GNU General Public License as published by 15 # the Free Software Foundation v2. 16 # 17 # EasyBuild is distributed in the hope that it will be useful, 18 # but WITHOUT ANY WARRANTY; without even the implied warranty of 19 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 # GNU General Public License for more details. 21 # 22 # You should have received a copy of the GNU General Public License 23 # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>. 24 ## 25 """ 26 EasyBuild support for building and installing OpenCV, implemented as an easyblock 27 28 @author: Kenneth Hoste (Ghent University) 29 """ 30 import glob 31 import os 32 from distutils.version import LooseVersion 33 34 from easybuild.easyblocks.generic.cmakemake import CMakeMake 35 from easybuild.easyblocks.generic.pythonpackage import det_pylibdir 36 from easybuild.framework.easyconfig import CUSTOM 37 from easybuild.tools.build_log import EasyBuildError 38 from easybuild.tools.config import build_option 39 from easybuild.tools.filetools import compute_checksum, copy 40 from easybuild.tools.modules import get_software_libdir, get_software_root, get_software_version 41 from easybuild.tools.systemtools import X86_64, get_cpu_architecture, get_cpu_features, get_shared_lib_ext 42 from easybuild.tools.toolchain.compiler import OPTARCH_GENERIC 43 44 45 class EB_OpenCV(CMakeMake): 46 """Support for building/installing OpenCV.""" 47 48 @staticmethod 49 def extra_options(): 50 """Custom easyconfig parameters specific to OpenCV.""" 51 extra_vars = CMakeMake.extra_options() 52 extra_vars.update({ 53 'cpu_dispatch': ['NONE', "Value to pass to -DCPU_DISPATCH configuration option", CUSTOM], 54 }) 55 extra_vars['separate_build_dir'][0] = True 56 return extra_vars 57 58 def __init__(self, *args, **kwargs): 59 """Initialisation of custom class variables for OpenCV.""" 60 super(EB_OpenCV, self).__init__(*args, **kwargs) 61 62 # can't be set before prepare_step is run 63 self.pylibdir = None 64 65 def prepare_step(self, *args, **kwargs): 66 """Prepare environment for installing OpenCV.""" 67 super(EB_OpenCV, self).prepare_step(*args, **kwargs) 68 69 self.pylibdir = det_pylibdir() 70 71 if get_cpu_architecture() == X86_64: 72 # IPP are Intel's Integrated Performance Primitives - so only make sense on X86_64 73 ippicv_tgz = glob.glob(os.path.join(self.builddir, 'ippicv*.tgz')) 74 if ippicv_tgz: 75 if len(ippicv_tgz) == 1: 76 # copy ippicv tarball in the right place 77 # expected location is 3rdparty/ippicv/downloads/linux-<md5sum>/ 78 ippicv_tgz = ippicv_tgz[0] 79 ippicv_tgz_md5 = compute_checksum(ippicv_tgz, checksum_type='md5') 80 target_subdir = os.path.join('3rdparty', 'ippicv', 'downloads', 'linux-%s' % ippicv_tgz_md5) 81 copy([ippicv_tgz], os.path.join(self.cfg['start_dir'], target_subdir)) 82 83 self.cfg.update('configopts', '-DWITH_IPP=ON') 84 85 # for recent OpenCV 3.x versions (and newer), we must also specify the download location 86 # to prevent that the ippicv tarball is re-downloaded 87 if LooseVersion(self.version) >= LooseVersion('3.4.4'): 88 self.cfg.update('configopts', '-DOPENCV_DOWNLOAD_PATH=%s' % self.builddir) 89 else: 90 raise EasyBuildError("Found multiple ippicv*.tgz source tarballs in %s: %s", 91 self.builddir, ippicv_tgz) 92 93 def configure_step(self): 94 """Custom configuration procedure for OpenCV.""" 95 96 # enable Python support if unspecified and Python is a dependency 97 if 'BUILD_PYTHON_SUPPORT' not in self.cfg['configopts']: 98 if get_software_root('Python'): 99 self.cfg.update('configopts', "-DBUILD_PYTHON_SUPPORT=ON -DBUILD_NEW_PYTHON_SUPPORT=ON") 100 101 # recent OpenCV 3.x versions (and newer) use an alternative configure option to specify the location 102 # where the OpenCV Python bindings should be installed 103 py_pkgs_path = os.path.join(self.installdir, self.pylibdir) 104 if LooseVersion(self.version) >= LooseVersion('3.4.4'): 105 self.cfg.update('configopts', '-DOPENCV_PYTHON_INSTALL_PATH=%s' % py_pkgs_path) 106 else: 107 self.cfg.update('configopts', '-DPYTHON_PACKAGES_PATH=%s' % py_pkgs_path) 108 else: 109 self.cfg.update('configopts', "-DBUILD_PYTHON_SUPPORT=OFF -DBUILD_NEW_PYTHON_SUPPORT=OFF") 110 111 # enable CUDA support if CUDA is a dependency 112 if 'WITH_CUDA' not in self.cfg['configopts']: 113 if get_software_root('CUDA'): 114 self.cfg.update('configopts', '-DWITH_CUDA=ON') 115 else: 116 self.cfg.update('configopts', '-DWITH_CUDA=OFF') 117 118 # disable bundled protobuf if it is a dependency 119 if 'BUILD_PROTOBUF' not in self.cfg['configopts']: 120 if get_software_root('protobuf'): 121 self.cfg.update('configopts', '-DBUILD_PROTOBUF=OFF') 122 else: 123 self.cfg.update('configopts', '-DBUILD_PROTOBUF=ON') 124 125 # configure for dependency libraries 126 for dep in ['JasPer', 'libjpeg-turbo', 'libpng', 'LibTIFF', 'libwebp', 'OpenEXR', 'zlib']: 127 if dep in ['libpng', 'LibTIFF', 'libwebp']: 128 # strip off 'lib' 129 opt_name = dep[3:].upper() 130 elif dep == 'libjpeg-turbo': 131 opt_name = 'JPEG' 132 else: 133 opt_name = dep.upper() 134 135 shlib_ext = get_shared_lib_ext() 136 if dep == 'zlib': 137 lib_file = 'libz.%s' % shlib_ext 138 else: 139 lib_file = 'lib%s.%s' % (opt_name.lower(), shlib_ext) 140 141 dep_root = get_software_root(dep) 142 if dep_root: 143 if dep == 'OpenEXR': 144 self.cfg.update('configopts', '-D%s_ROOT=%s' % (opt_name, dep_root)) 145 else: 146 inc_path = os.path.join(dep_root, 'include') 147 self.cfg.update('configopts', '-D%s_INCLUDE_DIR=%s' % (opt_name, inc_path)) 148 libdir = get_software_libdir(dep, only_one=True) 149 lib_path = os.path.join(dep_root, libdir, lib_file) 150 self.cfg.update('configopts', '-D%s_LIBRARY=%s' % (opt_name, lib_path)) 151 152 # GTK+3 is used by default, use GTK+2 or none explicitely to avoid picking up a system GTK 153 if get_software_root('GTK+'): 154 if LooseVersion(get_software_version('GTK+')) < LooseVersion('3.0'): 155 self.cfg.update('configopts', '-DWITH_GTK_2_X=ON') 156 else: 157 self.cfg.update('configopts', '-DWITH_GTK=OFF') 158 159 # configure optimisation for CPU architecture 160 # see https://github.com/opencv/opencv/wiki/CPU-optimizations-build-options 161 if self.toolchain.options.get('optarch') and 'CPU_BASELINE' not in self.cfg['configopts']: 162 optarch = build_option('optarch') 163 if optarch is None: 164 # optimize for host arch (let OpenCV detect it) 165 self.cfg.update('configopts', '-DCPU_BASELINE=DETECT') 166 elif optarch == OPTARCH_GENERIC: 167 # optimize for generic x86 architecture (lowest supported by OpenCV is SSE3) 168 self.cfg.update('configopts', '-DCPU_BASELINE=SSE3') 169 else: 170 raise EasyBuildError("Don't know how to configure OpenCV in accordance with --optarch='%s'", optarch) 171 172 if self.cfg['cpu_dispatch']: 173 # using 'NONE' as value is equivalent with disabling the build of fat binaries (which is done by default) 174 self.cfg.update('configopts', '-DCPU_DISPATCH=%s' % self.cfg['cpu_dispatch']) 175 176 # make sure that host CPU supports FP16 (unless -DCPU_BASELINE_DISABLE is already specified) 177 # Intel Sandy Bridge does not support FP16! 178 if 'CPU_BASELINE_DISABLE' not in self.cfg['configopts']: 179 avail_cpu_features = get_cpu_features() 180 if 'f16c' not in avail_cpu_features: 181 self.cfg.update('configopts', '-DCPU_BASELINE_DISABLE=FP16') 182 183 super(EB_OpenCV, self).configure_step() 184 185 def install_step(self): 186 """ 187 Custom installation procedure for OpenCV: also copy IPP library into lib subdirectory of installation directory. 188 """ 189 super(EB_OpenCV, self).install_step() 190 191 if 'WITH_IPP=ON' in self.cfg['configopts']: 192 common_dir = os.path.join('3rdparty', 'ippicv', 'ippicv_lnx') 193 194 # for some recent OpenCV 3.x versions, libippicv.a is now in a subdirectory named 'icv' 195 if LooseVersion(self.version) >= LooseVersion('3.4.4'): 196 ipp_libs = glob.glob(os.path.join(common_dir, 'icv', 'lib', 'intel64', 'libippicv.*')) 197 else: 198 ipp_libs = glob.glob(os.path.join(common_dir, 'lib', 'intel64', 'libippicv.*')) 199 200 copy(ipp_libs, os.path.join(self.installdir, 'lib')) 201 202 def sanity_check_step(self): 203 """Custom sanity check for OpenCV.""" 204 opencv_bins = ['annotation', 'interactive-calibration', 'version', 'visualisation'] 205 if LooseVersion(self.version) < LooseVersion('4.0'): 206 opencv_bins.extend(['createsamples', 'traincascade']) 207 208 libfile = 'libopencv_core.%s' % get_shared_lib_ext() 209 custom_paths = { 210 'files': [os.path.join('bin', 'opencv_%s' % x) for x in opencv_bins] + [os.path.join('lib64', libfile)], 211 'dirs': ['include'], 212 } 213 if 'WITH_IPP=ON' in self.cfg['configopts']: 214 custom_paths['files'].append(os.path.join('lib', 'libippicv.a')) 215 216 custom_commands = [] 217 if get_software_root('Python'): 218 custom_commands.append("python -c 'import cv2'") 219 220 super(EB_OpenCV, self).sanity_check_step(custom_paths=custom_paths, custom_commands=custom_commands) 221 222 def make_module_extra(self): 223 """Custom extra module file entries for OpenCV.""" 224 txt = super(EB_OpenCV, self).make_module_extra() 225 226 txt += self.module_generator.prepend_paths('CLASSPATH', os.path.join('share', 'OpenCV', 'java')) 227 228 if os.path.exists(os.path.join(self.installdir, self.pylibdir)): 229 txt += self.module_generator.prepend_paths('PYTHONPATH', self.pylibdir) 230 231 return txt 232 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/easybuild/easyblocks/o/opencv.py b/easybuild/easyblocks/o/opencv.py --- a/easybuild/easyblocks/o/opencv.py +++ b/easybuild/easyblocks/o/opencv.py @@ -26,6 +26,7 @@ EasyBuild support for building and installing OpenCV, implemented as an easyblock @author: Kenneth Hoste (Ghent University) +@author: Simon Branford (University of Birmingham) """ import glob import os @@ -153,6 +154,10 @@ if get_software_root('GTK+'): if LooseVersion(get_software_version('GTK+')) < LooseVersion('3.0'): self.cfg.update('configopts', '-DWITH_GTK_2_X=ON') + elif get_software_root('GTK3'): + pass + elif get_software_root('GTK2'): + self.cfg.update('configopts', '-DWITH_GTK_2_X=ON') else: self.cfg.update('configopts', '-DWITH_GTK=OFF')
{"golden_diff": "diff --git a/easybuild/easyblocks/o/opencv.py b/easybuild/easyblocks/o/opencv.py\n--- a/easybuild/easyblocks/o/opencv.py\n+++ b/easybuild/easyblocks/o/opencv.py\n@@ -26,6 +26,7 @@\n EasyBuild support for building and installing OpenCV, implemented as an easyblock\n \n @author: Kenneth Hoste (Ghent University)\n+@author: Simon Branford (University of Birmingham)\n \"\"\"\n import glob\n import os\n@@ -153,6 +154,10 @@\n if get_software_root('GTK+'):\n if LooseVersion(get_software_version('GTK+')) < LooseVersion('3.0'):\n self.cfg.update('configopts', '-DWITH_GTK_2_X=ON')\n+ elif get_software_root('GTK3'):\n+ pass\n+ elif get_software_root('GTK2'):\n+ self.cfg.update('configopts', '-DWITH_GTK_2_X=ON')\n else:\n self.cfg.update('configopts', '-DWITH_GTK=OFF')\n", "issue": "GTK+ check in OpenCV easyblock\nhttps://github.com/easybuilders/easybuild-easyconfigs/pull/13900 and https://github.com/easybuilders/easybuild-easyconfigs/pull/13893 mean that newer `GTK+` are now either `GTK2` and `GTK3`. The OpenCV block checks (https://github.com/easybuilders/easybuild-easyblocks/blob/develop/easybuild/easyblocks/o/opencv.py#L152) for `GTK+` and sets `-DWITH_GTK=OFF` if this is not found. The check will need updating.\r\n\r\nI do not know if OpenCV can build with both GTK2 and GTK3 at the same time.\n", "before_files": [{"content": "##\n# Copyright 2018-2021 Ghent University\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),\n# with support of Ghent University (http://ugent.be/hpc),\n# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),\n# Flemish Research Foundation (FWO) (http://www.fwo.be/en)\n# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).\n#\n# https://github.com/easybuilders/easybuild\n#\n# EasyBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation v2.\n#\n# EasyBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.\n##\n\"\"\"\nEasyBuild support for building and installing OpenCV, implemented as an easyblock\n\n@author: Kenneth Hoste (Ghent University)\n\"\"\"\nimport glob\nimport os\nfrom distutils.version import LooseVersion\n\nfrom easybuild.easyblocks.generic.cmakemake import CMakeMake\nfrom easybuild.easyblocks.generic.pythonpackage import det_pylibdir\nfrom easybuild.framework.easyconfig import CUSTOM\nfrom easybuild.tools.build_log import EasyBuildError\nfrom easybuild.tools.config import build_option\nfrom easybuild.tools.filetools import compute_checksum, copy\nfrom easybuild.tools.modules import get_software_libdir, get_software_root, get_software_version\nfrom easybuild.tools.systemtools import X86_64, get_cpu_architecture, get_cpu_features, get_shared_lib_ext\nfrom easybuild.tools.toolchain.compiler import OPTARCH_GENERIC\n\n\nclass EB_OpenCV(CMakeMake):\n \"\"\"Support for building/installing OpenCV.\"\"\"\n\n @staticmethod\n def extra_options():\n \"\"\"Custom easyconfig parameters specific to OpenCV.\"\"\"\n extra_vars = CMakeMake.extra_options()\n extra_vars.update({\n 'cpu_dispatch': ['NONE', \"Value to pass to -DCPU_DISPATCH configuration option\", CUSTOM],\n })\n extra_vars['separate_build_dir'][0] = True\n return extra_vars\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialisation of custom class variables for OpenCV.\"\"\"\n super(EB_OpenCV, self).__init__(*args, **kwargs)\n\n # can't be set before prepare_step is run\n self.pylibdir = None\n\n def prepare_step(self, *args, **kwargs):\n \"\"\"Prepare environment for installing OpenCV.\"\"\"\n super(EB_OpenCV, self).prepare_step(*args, **kwargs)\n\n self.pylibdir = det_pylibdir()\n\n if get_cpu_architecture() == X86_64:\n # IPP are Intel's Integrated Performance Primitives - so only make sense on X86_64\n ippicv_tgz = glob.glob(os.path.join(self.builddir, 'ippicv*.tgz'))\n if ippicv_tgz:\n if len(ippicv_tgz) == 1:\n # copy ippicv tarball in the right place\n # expected location is 3rdparty/ippicv/downloads/linux-<md5sum>/\n ippicv_tgz = ippicv_tgz[0]\n ippicv_tgz_md5 = compute_checksum(ippicv_tgz, checksum_type='md5')\n target_subdir = os.path.join('3rdparty', 'ippicv', 'downloads', 'linux-%s' % ippicv_tgz_md5)\n copy([ippicv_tgz], os.path.join(self.cfg['start_dir'], target_subdir))\n\n self.cfg.update('configopts', '-DWITH_IPP=ON')\n\n # for recent OpenCV 3.x versions (and newer), we must also specify the download location\n # to prevent that the ippicv tarball is re-downloaded\n if LooseVersion(self.version) >= LooseVersion('3.4.4'):\n self.cfg.update('configopts', '-DOPENCV_DOWNLOAD_PATH=%s' % self.builddir)\n else:\n raise EasyBuildError(\"Found multiple ippicv*.tgz source tarballs in %s: %s\",\n self.builddir, ippicv_tgz)\n\n def configure_step(self):\n \"\"\"Custom configuration procedure for OpenCV.\"\"\"\n\n # enable Python support if unspecified and Python is a dependency\n if 'BUILD_PYTHON_SUPPORT' not in self.cfg['configopts']:\n if get_software_root('Python'):\n self.cfg.update('configopts', \"-DBUILD_PYTHON_SUPPORT=ON -DBUILD_NEW_PYTHON_SUPPORT=ON\")\n\n # recent OpenCV 3.x versions (and newer) use an alternative configure option to specify the location\n # where the OpenCV Python bindings should be installed\n py_pkgs_path = os.path.join(self.installdir, self.pylibdir)\n if LooseVersion(self.version) >= LooseVersion('3.4.4'):\n self.cfg.update('configopts', '-DOPENCV_PYTHON_INSTALL_PATH=%s' % py_pkgs_path)\n else:\n self.cfg.update('configopts', '-DPYTHON_PACKAGES_PATH=%s' % py_pkgs_path)\n else:\n self.cfg.update('configopts', \"-DBUILD_PYTHON_SUPPORT=OFF -DBUILD_NEW_PYTHON_SUPPORT=OFF\")\n\n # enable CUDA support if CUDA is a dependency\n if 'WITH_CUDA' not in self.cfg['configopts']:\n if get_software_root('CUDA'):\n self.cfg.update('configopts', '-DWITH_CUDA=ON')\n else:\n self.cfg.update('configopts', '-DWITH_CUDA=OFF')\n\n # disable bundled protobuf if it is a dependency\n if 'BUILD_PROTOBUF' not in self.cfg['configopts']:\n if get_software_root('protobuf'):\n self.cfg.update('configopts', '-DBUILD_PROTOBUF=OFF')\n else:\n self.cfg.update('configopts', '-DBUILD_PROTOBUF=ON')\n\n # configure for dependency libraries\n for dep in ['JasPer', 'libjpeg-turbo', 'libpng', 'LibTIFF', 'libwebp', 'OpenEXR', 'zlib']:\n if dep in ['libpng', 'LibTIFF', 'libwebp']:\n # strip off 'lib'\n opt_name = dep[3:].upper()\n elif dep == 'libjpeg-turbo':\n opt_name = 'JPEG'\n else:\n opt_name = dep.upper()\n\n shlib_ext = get_shared_lib_ext()\n if dep == 'zlib':\n lib_file = 'libz.%s' % shlib_ext\n else:\n lib_file = 'lib%s.%s' % (opt_name.lower(), shlib_ext)\n\n dep_root = get_software_root(dep)\n if dep_root:\n if dep == 'OpenEXR':\n self.cfg.update('configopts', '-D%s_ROOT=%s' % (opt_name, dep_root))\n else:\n inc_path = os.path.join(dep_root, 'include')\n self.cfg.update('configopts', '-D%s_INCLUDE_DIR=%s' % (opt_name, inc_path))\n libdir = get_software_libdir(dep, only_one=True)\n lib_path = os.path.join(dep_root, libdir, lib_file)\n self.cfg.update('configopts', '-D%s_LIBRARY=%s' % (opt_name, lib_path))\n\n # GTK+3 is used by default, use GTK+2 or none explicitely to avoid picking up a system GTK\n if get_software_root('GTK+'):\n if LooseVersion(get_software_version('GTK+')) < LooseVersion('3.0'):\n self.cfg.update('configopts', '-DWITH_GTK_2_X=ON')\n else:\n self.cfg.update('configopts', '-DWITH_GTK=OFF')\n\n # configure optimisation for CPU architecture\n # see https://github.com/opencv/opencv/wiki/CPU-optimizations-build-options\n if self.toolchain.options.get('optarch') and 'CPU_BASELINE' not in self.cfg['configopts']:\n optarch = build_option('optarch')\n if optarch is None:\n # optimize for host arch (let OpenCV detect it)\n self.cfg.update('configopts', '-DCPU_BASELINE=DETECT')\n elif optarch == OPTARCH_GENERIC:\n # optimize for generic x86 architecture (lowest supported by OpenCV is SSE3)\n self.cfg.update('configopts', '-DCPU_BASELINE=SSE3')\n else:\n raise EasyBuildError(\"Don't know how to configure OpenCV in accordance with --optarch='%s'\", optarch)\n\n if self.cfg['cpu_dispatch']:\n # using 'NONE' as value is equivalent with disabling the build of fat binaries (which is done by default)\n self.cfg.update('configopts', '-DCPU_DISPATCH=%s' % self.cfg['cpu_dispatch'])\n\n # make sure that host CPU supports FP16 (unless -DCPU_BASELINE_DISABLE is already specified)\n # Intel Sandy Bridge does not support FP16!\n if 'CPU_BASELINE_DISABLE' not in self.cfg['configopts']:\n avail_cpu_features = get_cpu_features()\n if 'f16c' not in avail_cpu_features:\n self.cfg.update('configopts', '-DCPU_BASELINE_DISABLE=FP16')\n\n super(EB_OpenCV, self).configure_step()\n\n def install_step(self):\n \"\"\"\n Custom installation procedure for OpenCV: also copy IPP library into lib subdirectory of installation directory.\n \"\"\"\n super(EB_OpenCV, self).install_step()\n\n if 'WITH_IPP=ON' in self.cfg['configopts']:\n common_dir = os.path.join('3rdparty', 'ippicv', 'ippicv_lnx')\n\n # for some recent OpenCV 3.x versions, libippicv.a is now in a subdirectory named 'icv'\n if LooseVersion(self.version) >= LooseVersion('3.4.4'):\n ipp_libs = glob.glob(os.path.join(common_dir, 'icv', 'lib', 'intel64', 'libippicv.*'))\n else:\n ipp_libs = glob.glob(os.path.join(common_dir, 'lib', 'intel64', 'libippicv.*'))\n\n copy(ipp_libs, os.path.join(self.installdir, 'lib'))\n\n def sanity_check_step(self):\n \"\"\"Custom sanity check for OpenCV.\"\"\"\n opencv_bins = ['annotation', 'interactive-calibration', 'version', 'visualisation']\n if LooseVersion(self.version) < LooseVersion('4.0'):\n opencv_bins.extend(['createsamples', 'traincascade'])\n\n libfile = 'libopencv_core.%s' % get_shared_lib_ext()\n custom_paths = {\n 'files': [os.path.join('bin', 'opencv_%s' % x) for x in opencv_bins] + [os.path.join('lib64', libfile)],\n 'dirs': ['include'],\n }\n if 'WITH_IPP=ON' in self.cfg['configopts']:\n custom_paths['files'].append(os.path.join('lib', 'libippicv.a'))\n\n custom_commands = []\n if get_software_root('Python'):\n custom_commands.append(\"python -c 'import cv2'\")\n\n super(EB_OpenCV, self).sanity_check_step(custom_paths=custom_paths, custom_commands=custom_commands)\n\n def make_module_extra(self):\n \"\"\"Custom extra module file entries for OpenCV.\"\"\"\n txt = super(EB_OpenCV, self).make_module_extra()\n\n txt += self.module_generator.prepend_paths('CLASSPATH', os.path.join('share', 'OpenCV', 'java'))\n\n if os.path.exists(os.path.join(self.installdir, self.pylibdir)):\n txt += self.module_generator.prepend_paths('PYTHONPATH', self.pylibdir)\n\n return txt\n", "path": "easybuild/easyblocks/o/opencv.py"}], "after_files": [{"content": "##\n# Copyright 2018-2021 Ghent University\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),\n# with support of Ghent University (http://ugent.be/hpc),\n# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),\n# Flemish Research Foundation (FWO) (http://www.fwo.be/en)\n# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).\n#\n# https://github.com/easybuilders/easybuild\n#\n# EasyBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation v2.\n#\n# EasyBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.\n##\n\"\"\"\nEasyBuild support for building and installing OpenCV, implemented as an easyblock\n\n@author: Kenneth Hoste (Ghent University)\n@author: Simon Branford (University of Birmingham)\n\"\"\"\nimport glob\nimport os\nfrom distutils.version import LooseVersion\n\nfrom easybuild.easyblocks.generic.cmakemake import CMakeMake\nfrom easybuild.easyblocks.generic.pythonpackage import det_pylibdir\nfrom easybuild.framework.easyconfig import CUSTOM\nfrom easybuild.tools.build_log import EasyBuildError\nfrom easybuild.tools.config import build_option\nfrom easybuild.tools.filetools import compute_checksum, copy\nfrom easybuild.tools.modules import get_software_libdir, get_software_root, get_software_version\nfrom easybuild.tools.systemtools import X86_64, get_cpu_architecture, get_cpu_features, get_shared_lib_ext\nfrom easybuild.tools.toolchain.compiler import OPTARCH_GENERIC\n\n\nclass EB_OpenCV(CMakeMake):\n \"\"\"Support for building/installing OpenCV.\"\"\"\n\n @staticmethod\n def extra_options():\n \"\"\"Custom easyconfig parameters specific to OpenCV.\"\"\"\n extra_vars = CMakeMake.extra_options()\n extra_vars.update({\n 'cpu_dispatch': ['NONE', \"Value to pass to -DCPU_DISPATCH configuration option\", CUSTOM],\n })\n extra_vars['separate_build_dir'][0] = True\n return extra_vars\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialisation of custom class variables for OpenCV.\"\"\"\n super(EB_OpenCV, self).__init__(*args, **kwargs)\n\n # can't be set before prepare_step is run\n self.pylibdir = None\n\n def prepare_step(self, *args, **kwargs):\n \"\"\"Prepare environment for installing OpenCV.\"\"\"\n super(EB_OpenCV, self).prepare_step(*args, **kwargs)\n\n self.pylibdir = det_pylibdir()\n\n if get_cpu_architecture() == X86_64:\n # IPP are Intel's Integrated Performance Primitives - so only make sense on X86_64\n ippicv_tgz = glob.glob(os.path.join(self.builddir, 'ippicv*.tgz'))\n if ippicv_tgz:\n if len(ippicv_tgz) == 1:\n # copy ippicv tarball in the right place\n # expected location is 3rdparty/ippicv/downloads/linux-<md5sum>/\n ippicv_tgz = ippicv_tgz[0]\n ippicv_tgz_md5 = compute_checksum(ippicv_tgz, checksum_type='md5')\n target_subdir = os.path.join('3rdparty', 'ippicv', 'downloads', 'linux-%s' % ippicv_tgz_md5)\n copy([ippicv_tgz], os.path.join(self.cfg['start_dir'], target_subdir))\n\n self.cfg.update('configopts', '-DWITH_IPP=ON')\n\n # for recent OpenCV 3.x versions (and newer), we must also specify the download location\n # to prevent that the ippicv tarball is re-downloaded\n if LooseVersion(self.version) >= LooseVersion('3.4.4'):\n self.cfg.update('configopts', '-DOPENCV_DOWNLOAD_PATH=%s' % self.builddir)\n else:\n raise EasyBuildError(\"Found multiple ippicv*.tgz source tarballs in %s: %s\",\n self.builddir, ippicv_tgz)\n\n def configure_step(self):\n \"\"\"Custom configuration procedure for OpenCV.\"\"\"\n\n # enable Python support if unspecified and Python is a dependency\n if 'BUILD_PYTHON_SUPPORT' not in self.cfg['configopts']:\n if get_software_root('Python'):\n self.cfg.update('configopts', \"-DBUILD_PYTHON_SUPPORT=ON -DBUILD_NEW_PYTHON_SUPPORT=ON\")\n\n # recent OpenCV 3.x versions (and newer) use an alternative configure option to specify the location\n # where the OpenCV Python bindings should be installed\n py_pkgs_path = os.path.join(self.installdir, self.pylibdir)\n if LooseVersion(self.version) >= LooseVersion('3.4.4'):\n self.cfg.update('configopts', '-DOPENCV_PYTHON_INSTALL_PATH=%s' % py_pkgs_path)\n else:\n self.cfg.update('configopts', '-DPYTHON_PACKAGES_PATH=%s' % py_pkgs_path)\n else:\n self.cfg.update('configopts', \"-DBUILD_PYTHON_SUPPORT=OFF -DBUILD_NEW_PYTHON_SUPPORT=OFF\")\n\n # enable CUDA support if CUDA is a dependency\n if 'WITH_CUDA' not in self.cfg['configopts']:\n if get_software_root('CUDA'):\n self.cfg.update('configopts', '-DWITH_CUDA=ON')\n else:\n self.cfg.update('configopts', '-DWITH_CUDA=OFF')\n\n # disable bundled protobuf if it is a dependency\n if 'BUILD_PROTOBUF' not in self.cfg['configopts']:\n if get_software_root('protobuf'):\n self.cfg.update('configopts', '-DBUILD_PROTOBUF=OFF')\n else:\n self.cfg.update('configopts', '-DBUILD_PROTOBUF=ON')\n\n # configure for dependency libraries\n for dep in ['JasPer', 'libjpeg-turbo', 'libpng', 'LibTIFF', 'libwebp', 'OpenEXR', 'zlib']:\n if dep in ['libpng', 'LibTIFF', 'libwebp']:\n # strip off 'lib'\n opt_name = dep[3:].upper()\n elif dep == 'libjpeg-turbo':\n opt_name = 'JPEG'\n else:\n opt_name = dep.upper()\n\n shlib_ext = get_shared_lib_ext()\n if dep == 'zlib':\n lib_file = 'libz.%s' % shlib_ext\n else:\n lib_file = 'lib%s.%s' % (opt_name.lower(), shlib_ext)\n\n dep_root = get_software_root(dep)\n if dep_root:\n if dep == 'OpenEXR':\n self.cfg.update('configopts', '-D%s_ROOT=%s' % (opt_name, dep_root))\n else:\n inc_path = os.path.join(dep_root, 'include')\n self.cfg.update('configopts', '-D%s_INCLUDE_DIR=%s' % (opt_name, inc_path))\n libdir = get_software_libdir(dep, only_one=True)\n lib_path = os.path.join(dep_root, libdir, lib_file)\n self.cfg.update('configopts', '-D%s_LIBRARY=%s' % (opt_name, lib_path))\n\n # GTK+3 is used by default, use GTK+2 or none explicitely to avoid picking up a system GTK\n if get_software_root('GTK+'):\n if LooseVersion(get_software_version('GTK+')) < LooseVersion('3.0'):\n self.cfg.update('configopts', '-DWITH_GTK_2_X=ON')\n elif get_software_root('GTK3'):\n pass\n elif get_software_root('GTK2'):\n self.cfg.update('configopts', '-DWITH_GTK_2_X=ON')\n else:\n self.cfg.update('configopts', '-DWITH_GTK=OFF')\n\n # configure optimisation for CPU architecture\n # see https://github.com/opencv/opencv/wiki/CPU-optimizations-build-options\n if self.toolchain.options.get('optarch') and 'CPU_BASELINE' not in self.cfg['configopts']:\n optarch = build_option('optarch')\n if optarch is None:\n # optimize for host arch (let OpenCV detect it)\n self.cfg.update('configopts', '-DCPU_BASELINE=DETECT')\n elif optarch == OPTARCH_GENERIC:\n # optimize for generic x86 architecture (lowest supported by OpenCV is SSE3)\n self.cfg.update('configopts', '-DCPU_BASELINE=SSE3')\n else:\n raise EasyBuildError(\"Don't know how to configure OpenCV in accordance with --optarch='%s'\", optarch)\n\n if self.cfg['cpu_dispatch']:\n # using 'NONE' as value is equivalent with disabling the build of fat binaries (which is done by default)\n self.cfg.update('configopts', '-DCPU_DISPATCH=%s' % self.cfg['cpu_dispatch'])\n\n # make sure that host CPU supports FP16 (unless -DCPU_BASELINE_DISABLE is already specified)\n # Intel Sandy Bridge does not support FP16!\n if 'CPU_BASELINE_DISABLE' not in self.cfg['configopts']:\n avail_cpu_features = get_cpu_features()\n if 'f16c' not in avail_cpu_features:\n self.cfg.update('configopts', '-DCPU_BASELINE_DISABLE=FP16')\n\n super(EB_OpenCV, self).configure_step()\n\n def install_step(self):\n \"\"\"\n Custom installation procedure for OpenCV: also copy IPP library into lib subdirectory of installation directory.\n \"\"\"\n super(EB_OpenCV, self).install_step()\n\n if 'WITH_IPP=ON' in self.cfg['configopts']:\n common_dir = os.path.join('3rdparty', 'ippicv', 'ippicv_lnx')\n\n # for some recent OpenCV 3.x versions, libippicv.a is now in a subdirectory named 'icv'\n if LooseVersion(self.version) >= LooseVersion('3.4.4'):\n ipp_libs = glob.glob(os.path.join(common_dir, 'icv', 'lib', 'intel64', 'libippicv.*'))\n else:\n ipp_libs = glob.glob(os.path.join(common_dir, 'lib', 'intel64', 'libippicv.*'))\n\n copy(ipp_libs, os.path.join(self.installdir, 'lib'))\n\n def sanity_check_step(self):\n \"\"\"Custom sanity check for OpenCV.\"\"\"\n opencv_bins = ['annotation', 'interactive-calibration', 'version', 'visualisation']\n if LooseVersion(self.version) < LooseVersion('4.0'):\n opencv_bins.extend(['createsamples', 'traincascade'])\n\n libfile = 'libopencv_core.%s' % get_shared_lib_ext()\n custom_paths = {\n 'files': [os.path.join('bin', 'opencv_%s' % x) for x in opencv_bins] + [os.path.join('lib64', libfile)],\n 'dirs': ['include'],\n }\n if 'WITH_IPP=ON' in self.cfg['configopts']:\n custom_paths['files'].append(os.path.join('lib', 'libippicv.a'))\n\n custom_commands = []\n if get_software_root('Python'):\n custom_commands.append(\"python -c 'import cv2'\")\n\n super(EB_OpenCV, self).sanity_check_step(custom_paths=custom_paths, custom_commands=custom_commands)\n\n def make_module_extra(self):\n \"\"\"Custom extra module file entries for OpenCV.\"\"\"\n txt = super(EB_OpenCV, self).make_module_extra()\n\n txt += self.module_generator.prepend_paths('CLASSPATH', os.path.join('share', 'OpenCV', 'java'))\n\n if os.path.exists(os.path.join(self.installdir, self.pylibdir)):\n txt += self.module_generator.prepend_paths('PYTHONPATH', self.pylibdir)\n\n return txt\n", "path": "easybuild/easyblocks/o/opencv.py"}]}
3,693
240
gh_patches_debug_44301
rasdani/github-patches
git_diff
modin-project__modin-5681
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Don't trigger axes computation when doing binary operations When we do a binary operation and return a Series object, we don't need to trigger axes computation in columnarize. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `modin/core/dataframe/algebra/binary.py` Content: ``` 1 # Licensed to Modin Development Team under one or more contributor license agreements. 2 # See the NOTICE file distributed with this work for additional information regarding 3 # copyright ownership. The Modin Development Team licenses this file to you under the 4 # Apache License, Version 2.0 (the "License"); you may not use this file except in 5 # compliance with the License. You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software distributed under 10 # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF 11 # ANY KIND, either express or implied. See the License for the specific language 12 # governing permissions and limitations under the License. 13 14 """Module houses builder class for Binary operator.""" 15 16 import numpy as np 17 import pandas 18 19 from .operator import Operator 20 21 22 def coerce_int_to_float64(dtype: np.dtype) -> np.dtype: 23 """ 24 Coerce dtype to float64 if it is a variant of integer. 25 26 If dtype is integer, function returns float64 datatype. 27 If not, returns the datatype argument itself. 28 29 Parameters 30 ---------- 31 dtype : np.dtype 32 NumPy datatype. 33 34 Returns 35 ------- 36 dtype : np.dtype 37 Returns float64 for all int datatypes or returns the datatype itself 38 for other types. 39 40 Notes 41 ----- 42 Used to precompute datatype in case of division in pandas. 43 """ 44 if dtype in np.sctypes["int"] + np.sctypes["uint"]: 45 return np.dtype(np.float64) 46 else: 47 return dtype 48 49 50 def compute_dtypes_common_cast(first, second) -> np.dtype: 51 """ 52 Precompute data types for binary operations by finding common type between operands. 53 54 Parameters 55 ---------- 56 first : PandasQueryCompiler 57 First operand for which the binary operation would be performed later. 58 second : PandasQueryCompiler 59 Second operand for which the binary operation would be performed later. 60 61 Returns 62 ------- 63 dtypes 64 The pandas series with precomputed dtypes. 65 66 Notes 67 ----- 68 The dtypes of the operands are supposed to be known. 69 """ 70 dtypes_first = first._modin_frame._dtypes.to_dict() 71 dtypes_second = second._modin_frame._dtypes.to_dict() 72 columns_first = set(first.columns) 73 columns_second = set(second.columns) 74 common_columns = columns_first.intersection(columns_second) 75 mismatch_columns = columns_first.union(columns_second) - common_columns 76 # If at least one column doesn't match, the result of the non matching column would be nan. 77 nan_dtype = np.dtype(type(np.nan)) 78 dtypes = pandas.Series( 79 [ 80 pandas.core.dtypes.cast.find_common_type( 81 [ 82 dtypes_first[x], 83 dtypes_second[x], 84 ] 85 ) 86 for x in common_columns 87 ], 88 index=common_columns, 89 ) 90 dtypes = pandas.concat( 91 [ 92 dtypes, 93 pandas.Series( 94 [nan_dtype] * (len(mismatch_columns)), 95 index=mismatch_columns, 96 ), 97 ] 98 ) 99 dtypes = dtypes.sort_index() 100 return dtypes 101 102 103 def compute_dtypes_boolean(first, second) -> np.dtype: 104 """ 105 Precompute data types for boolean operations. 106 107 Parameters 108 ---------- 109 first : PandasQueryCompiler 110 First operand for which the binary operation would be performed later. 111 second : PandasQueryCompiler 112 Second operand for which the binary operation would be performed later. 113 114 Returns 115 ------- 116 dtypes 117 The pandas series with precomputed dtypes. 118 119 Notes 120 ----- 121 Finds a union of columns and finds dtypes for all these columns. 122 """ 123 columns_first = set(first.columns) 124 columns_second = set(second.columns) 125 columns_union = columns_first.union(columns_second) 126 dtypes = pandas.Series([np.dtype(bool)] * len(columns_union), index=columns_union) 127 dtypes = dtypes.sort_index() 128 return dtypes 129 130 131 class Binary(Operator): 132 """Builder class for Binary operator.""" 133 134 @classmethod 135 def register( 136 cls, 137 func, 138 join_type="outer", 139 labels="replace", 140 infer_dtypes=None, 141 ): 142 """ 143 Build template binary operator. 144 145 Parameters 146 ---------- 147 func : callable(pandas.DataFrame, [pandas.DataFrame, list-like, scalar]) -> pandas.DataFrame 148 Binary function to execute. Have to be able to accept at least two arguments. 149 join_type : {'left', 'right', 'outer', 'inner', None}, default: 'outer' 150 Type of join that will be used if indices of operands are not aligned. 151 labels : {"keep", "replace", "drop"}, default: "replace" 152 Whether keep labels from left Modin DataFrame, replace them with labels 153 from joined DataFrame or drop altogether to make them be computed lazily later. 154 infer_dtypes : {"common_cast", "float", "bool", None}, default: None 155 How dtypes should be inferred. 156 * If "common_cast", casts to common dtype of operand columns. 157 * If "float", performs type casting by finding common dtype. 158 If the common dtype is any of the integer types, perform type casting to float. 159 Used in case of truediv. 160 * If "bool", dtypes would be a boolean series with same size as that of operands. 161 * If ``None``, do not infer new dtypes (they will be computed manually once accessed). 162 163 Returns 164 ------- 165 callable 166 Function that takes query compiler and executes binary operation. 167 """ 168 169 def caller( 170 query_compiler, other, broadcast=False, *args, dtypes=None, **kwargs 171 ): 172 """ 173 Apply binary `func` to passed operands. 174 175 Parameters 176 ---------- 177 query_compiler : QueryCompiler 178 Left operand of `func`. 179 other : QueryCompiler, list-like object or scalar 180 Right operand of `func`. 181 broadcast : bool, default: False 182 If `other` is a one-column query compiler, indicates whether it is a Series or not. 183 Frames and Series have to be processed differently, however we can't distinguish them 184 at the query compiler level, so this parameter is a hint that passed from a high level API. 185 *args : args, 186 Arguments that will be passed to `func`. 187 dtypes : "copy" or None, default: None 188 Whether to keep old dtypes or infer new dtypes from data. 189 **kwargs : kwargs, 190 Arguments that will be passed to `func`. 191 192 Returns 193 ------- 194 QueryCompiler 195 Result of binary function. 196 """ 197 axis = kwargs.get("axis", 0) 198 if isinstance(other, type(query_compiler)): 199 if broadcast: 200 assert ( 201 len(other.columns) == 1 202 ), "Invalid broadcast argument for `broadcast_apply`, too many columns: {}".format( 203 len(other.columns) 204 ) 205 # Transpose on `axis=1` because we always represent an individual 206 # column or row as a single-column Modin DataFrame 207 if axis == 1: 208 other = other.transpose() 209 return query_compiler.__constructor__( 210 query_compiler._modin_frame.broadcast_apply( 211 axis, 212 lambda left, right: func( 213 left, right.squeeze(), *args, **kwargs 214 ), 215 other._modin_frame, 216 join_type=join_type, 217 labels=labels, 218 dtypes=dtypes, 219 ) 220 ) 221 else: 222 if ( 223 other._modin_frame._dtypes is not None 224 and query_compiler._modin_frame._dtypes is not None 225 ): 226 if infer_dtypes == "bool": 227 dtypes = compute_dtypes_boolean(query_compiler, other) 228 if infer_dtypes == "common_cast": 229 dtypes = compute_dtypes_common_cast(query_compiler, other) 230 elif infer_dtypes == "float": 231 dtypes = compute_dtypes_common_cast(query_compiler, other) 232 dtypes = dtypes.apply(coerce_int_to_float64) 233 return query_compiler.__constructor__( 234 query_compiler._modin_frame.n_ary_op( 235 lambda x, y: func(x, y, *args, **kwargs), 236 [other._modin_frame], 237 join_type=join_type, 238 dtypes=dtypes, 239 ) 240 ) 241 else: 242 # TODO: it's possible to chunk the `other` and broadcast them to partitions 243 # accordingly, in that way we will be able to use more efficient `._modin_frame.map()` 244 if isinstance(other, (dict, list, np.ndarray, pandas.Series)): 245 new_modin_frame = query_compiler._modin_frame.apply_full_axis( 246 axis, 247 lambda df: func(df, other, *args, **kwargs), 248 new_index=query_compiler.index, 249 new_columns=query_compiler.columns, 250 dtypes=dtypes, 251 ) 252 else: 253 new_modin_frame = query_compiler._modin_frame.map( 254 lambda df: func(df, other, *args, **kwargs), 255 dtypes=dtypes, 256 ) 257 return query_compiler.__constructor__(new_modin_frame) 258 259 return caller 260 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/modin/core/dataframe/algebra/binary.py b/modin/core/dataframe/algebra/binary.py --- a/modin/core/dataframe/algebra/binary.py +++ b/modin/core/dataframe/algebra/binary.py @@ -15,6 +15,7 @@ import numpy as np import pandas +from pandas.api.types import is_scalar from .operator import Operator @@ -195,6 +196,8 @@ Result of binary function. """ axis = kwargs.get("axis", 0) + shape_hint = None + self_columns = query_compiler._modin_frame._columns_cache if isinstance(other, type(query_compiler)): if broadcast: assert ( @@ -206,6 +209,17 @@ # column or row as a single-column Modin DataFrame if axis == 1: other = other.transpose() + + if ( + self_columns is not None + and other._modin_frame._columns_cache is not None + ): + if ( + len(self_columns) == 1 + and len(other.columns) == 1 + and self_columns.equals(other.columns) + ): + shape_hint = "column" return query_compiler.__constructor__( query_compiler._modin_frame.broadcast_apply( axis, @@ -216,7 +230,8 @@ join_type=join_type, labels=labels, dtypes=dtypes, - ) + ), + shape_hint=shape_hint, ) else: if ( @@ -230,13 +245,24 @@ elif infer_dtypes == "float": dtypes = compute_dtypes_common_cast(query_compiler, other) dtypes = dtypes.apply(coerce_int_to_float64) + if ( + self_columns is not None + and other._modin_frame._columns_cache is not None + ): + if ( + len(self_columns) == 1 + and len(other.columns) == 1 + and query_compiler.columns.equals(other.columns) + ): + shape_hint = "column" return query_compiler.__constructor__( query_compiler._modin_frame.n_ary_op( lambda x, y: func(x, y, *args, **kwargs), [other._modin_frame], join_type=join_type, dtypes=dtypes, - ) + ), + shape_hint=shape_hint, ) else: # TODO: it's possible to chunk the `other` and broadcast them to partitions @@ -250,10 +276,18 @@ dtypes=dtypes, ) else: + if ( + self_columns is not None + and len(self_columns) == 1 + and is_scalar(other) + ): + shape_hint = "column" new_modin_frame = query_compiler._modin_frame.map( lambda df: func(df, other, *args, **kwargs), dtypes=dtypes, ) - return query_compiler.__constructor__(new_modin_frame) + return query_compiler.__constructor__( + new_modin_frame, shape_hint=shape_hint + ) return caller
{"golden_diff": "diff --git a/modin/core/dataframe/algebra/binary.py b/modin/core/dataframe/algebra/binary.py\n--- a/modin/core/dataframe/algebra/binary.py\n+++ b/modin/core/dataframe/algebra/binary.py\n@@ -15,6 +15,7 @@\n \n import numpy as np\n import pandas\n+from pandas.api.types import is_scalar\n \n from .operator import Operator\n \n@@ -195,6 +196,8 @@\n Result of binary function.\n \"\"\"\n axis = kwargs.get(\"axis\", 0)\n+ shape_hint = None\n+ self_columns = query_compiler._modin_frame._columns_cache\n if isinstance(other, type(query_compiler)):\n if broadcast:\n assert (\n@@ -206,6 +209,17 @@\n # column or row as a single-column Modin DataFrame\n if axis == 1:\n other = other.transpose()\n+\n+ if (\n+ self_columns is not None\n+ and other._modin_frame._columns_cache is not None\n+ ):\n+ if (\n+ len(self_columns) == 1\n+ and len(other.columns) == 1\n+ and self_columns.equals(other.columns)\n+ ):\n+ shape_hint = \"column\"\n return query_compiler.__constructor__(\n query_compiler._modin_frame.broadcast_apply(\n axis,\n@@ -216,7 +230,8 @@\n join_type=join_type,\n labels=labels,\n dtypes=dtypes,\n- )\n+ ),\n+ shape_hint=shape_hint,\n )\n else:\n if (\n@@ -230,13 +245,24 @@\n elif infer_dtypes == \"float\":\n dtypes = compute_dtypes_common_cast(query_compiler, other)\n dtypes = dtypes.apply(coerce_int_to_float64)\n+ if (\n+ self_columns is not None\n+ and other._modin_frame._columns_cache is not None\n+ ):\n+ if (\n+ len(self_columns) == 1\n+ and len(other.columns) == 1\n+ and query_compiler.columns.equals(other.columns)\n+ ):\n+ shape_hint = \"column\"\n return query_compiler.__constructor__(\n query_compiler._modin_frame.n_ary_op(\n lambda x, y: func(x, y, *args, **kwargs),\n [other._modin_frame],\n join_type=join_type,\n dtypes=dtypes,\n- )\n+ ),\n+ shape_hint=shape_hint,\n )\n else:\n # TODO: it's possible to chunk the `other` and broadcast them to partitions\n@@ -250,10 +276,18 @@\n dtypes=dtypes,\n )\n else:\n+ if (\n+ self_columns is not None\n+ and len(self_columns) == 1\n+ and is_scalar(other)\n+ ):\n+ shape_hint = \"column\"\n new_modin_frame = query_compiler._modin_frame.map(\n lambda df: func(df, other, *args, **kwargs),\n dtypes=dtypes,\n )\n- return query_compiler.__constructor__(new_modin_frame)\n+ return query_compiler.__constructor__(\n+ new_modin_frame, shape_hint=shape_hint\n+ )\n \n return caller\n", "issue": "Don't trigger axes computation when doing binary operations\nWhen we do a binary operation and return a Series object, we don't need to trigger axes computation in columnarize.\r\n\n", "before_files": [{"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\n\"\"\"Module houses builder class for Binary operator.\"\"\"\n\nimport numpy as np\nimport pandas\n\nfrom .operator import Operator\n\n\ndef coerce_int_to_float64(dtype: np.dtype) -> np.dtype:\n \"\"\"\n Coerce dtype to float64 if it is a variant of integer.\n\n If dtype is integer, function returns float64 datatype.\n If not, returns the datatype argument itself.\n\n Parameters\n ----------\n dtype : np.dtype\n NumPy datatype.\n\n Returns\n -------\n dtype : np.dtype\n Returns float64 for all int datatypes or returns the datatype itself\n for other types.\n\n Notes\n -----\n Used to precompute datatype in case of division in pandas.\n \"\"\"\n if dtype in np.sctypes[\"int\"] + np.sctypes[\"uint\"]:\n return np.dtype(np.float64)\n else:\n return dtype\n\n\ndef compute_dtypes_common_cast(first, second) -> np.dtype:\n \"\"\"\n Precompute data types for binary operations by finding common type between operands.\n\n Parameters\n ----------\n first : PandasQueryCompiler\n First operand for which the binary operation would be performed later.\n second : PandasQueryCompiler\n Second operand for which the binary operation would be performed later.\n\n Returns\n -------\n dtypes\n The pandas series with precomputed dtypes.\n\n Notes\n -----\n The dtypes of the operands are supposed to be known.\n \"\"\"\n dtypes_first = first._modin_frame._dtypes.to_dict()\n dtypes_second = second._modin_frame._dtypes.to_dict()\n columns_first = set(first.columns)\n columns_second = set(second.columns)\n common_columns = columns_first.intersection(columns_second)\n mismatch_columns = columns_first.union(columns_second) - common_columns\n # If at least one column doesn't match, the result of the non matching column would be nan.\n nan_dtype = np.dtype(type(np.nan))\n dtypes = pandas.Series(\n [\n pandas.core.dtypes.cast.find_common_type(\n [\n dtypes_first[x],\n dtypes_second[x],\n ]\n )\n for x in common_columns\n ],\n index=common_columns,\n )\n dtypes = pandas.concat(\n [\n dtypes,\n pandas.Series(\n [nan_dtype] * (len(mismatch_columns)),\n index=mismatch_columns,\n ),\n ]\n )\n dtypes = dtypes.sort_index()\n return dtypes\n\n\ndef compute_dtypes_boolean(first, second) -> np.dtype:\n \"\"\"\n Precompute data types for boolean operations.\n\n Parameters\n ----------\n first : PandasQueryCompiler\n First operand for which the binary operation would be performed later.\n second : PandasQueryCompiler\n Second operand for which the binary operation would be performed later.\n\n Returns\n -------\n dtypes\n The pandas series with precomputed dtypes.\n\n Notes\n -----\n Finds a union of columns and finds dtypes for all these columns.\n \"\"\"\n columns_first = set(first.columns)\n columns_second = set(second.columns)\n columns_union = columns_first.union(columns_second)\n dtypes = pandas.Series([np.dtype(bool)] * len(columns_union), index=columns_union)\n dtypes = dtypes.sort_index()\n return dtypes\n\n\nclass Binary(Operator):\n \"\"\"Builder class for Binary operator.\"\"\"\n\n @classmethod\n def register(\n cls,\n func,\n join_type=\"outer\",\n labels=\"replace\",\n infer_dtypes=None,\n ):\n \"\"\"\n Build template binary operator.\n\n Parameters\n ----------\n func : callable(pandas.DataFrame, [pandas.DataFrame, list-like, scalar]) -> pandas.DataFrame\n Binary function to execute. Have to be able to accept at least two arguments.\n join_type : {'left', 'right', 'outer', 'inner', None}, default: 'outer'\n Type of join that will be used if indices of operands are not aligned.\n labels : {\"keep\", \"replace\", \"drop\"}, default: \"replace\"\n Whether keep labels from left Modin DataFrame, replace them with labels\n from joined DataFrame or drop altogether to make them be computed lazily later.\n infer_dtypes : {\"common_cast\", \"float\", \"bool\", None}, default: None\n How dtypes should be inferred.\n * If \"common_cast\", casts to common dtype of operand columns.\n * If \"float\", performs type casting by finding common dtype.\n If the common dtype is any of the integer types, perform type casting to float.\n Used in case of truediv.\n * If \"bool\", dtypes would be a boolean series with same size as that of operands.\n * If ``None``, do not infer new dtypes (they will be computed manually once accessed).\n\n Returns\n -------\n callable\n Function that takes query compiler and executes binary operation.\n \"\"\"\n\n def caller(\n query_compiler, other, broadcast=False, *args, dtypes=None, **kwargs\n ):\n \"\"\"\n Apply binary `func` to passed operands.\n\n Parameters\n ----------\n query_compiler : QueryCompiler\n Left operand of `func`.\n other : QueryCompiler, list-like object or scalar\n Right operand of `func`.\n broadcast : bool, default: False\n If `other` is a one-column query compiler, indicates whether it is a Series or not.\n Frames and Series have to be processed differently, however we can't distinguish them\n at the query compiler level, so this parameter is a hint that passed from a high level API.\n *args : args,\n Arguments that will be passed to `func`.\n dtypes : \"copy\" or None, default: None\n Whether to keep old dtypes or infer new dtypes from data.\n **kwargs : kwargs,\n Arguments that will be passed to `func`.\n\n Returns\n -------\n QueryCompiler\n Result of binary function.\n \"\"\"\n axis = kwargs.get(\"axis\", 0)\n if isinstance(other, type(query_compiler)):\n if broadcast:\n assert (\n len(other.columns) == 1\n ), \"Invalid broadcast argument for `broadcast_apply`, too many columns: {}\".format(\n len(other.columns)\n )\n # Transpose on `axis=1` because we always represent an individual\n # column or row as a single-column Modin DataFrame\n if axis == 1:\n other = other.transpose()\n return query_compiler.__constructor__(\n query_compiler._modin_frame.broadcast_apply(\n axis,\n lambda left, right: func(\n left, right.squeeze(), *args, **kwargs\n ),\n other._modin_frame,\n join_type=join_type,\n labels=labels,\n dtypes=dtypes,\n )\n )\n else:\n if (\n other._modin_frame._dtypes is not None\n and query_compiler._modin_frame._dtypes is not None\n ):\n if infer_dtypes == \"bool\":\n dtypes = compute_dtypes_boolean(query_compiler, other)\n if infer_dtypes == \"common_cast\":\n dtypes = compute_dtypes_common_cast(query_compiler, other)\n elif infer_dtypes == \"float\":\n dtypes = compute_dtypes_common_cast(query_compiler, other)\n dtypes = dtypes.apply(coerce_int_to_float64)\n return query_compiler.__constructor__(\n query_compiler._modin_frame.n_ary_op(\n lambda x, y: func(x, y, *args, **kwargs),\n [other._modin_frame],\n join_type=join_type,\n dtypes=dtypes,\n )\n )\n else:\n # TODO: it's possible to chunk the `other` and broadcast them to partitions\n # accordingly, in that way we will be able to use more efficient `._modin_frame.map()`\n if isinstance(other, (dict, list, np.ndarray, pandas.Series)):\n new_modin_frame = query_compiler._modin_frame.apply_full_axis(\n axis,\n lambda df: func(df, other, *args, **kwargs),\n new_index=query_compiler.index,\n new_columns=query_compiler.columns,\n dtypes=dtypes,\n )\n else:\n new_modin_frame = query_compiler._modin_frame.map(\n lambda df: func(df, other, *args, **kwargs),\n dtypes=dtypes,\n )\n return query_compiler.__constructor__(new_modin_frame)\n\n return caller\n", "path": "modin/core/dataframe/algebra/binary.py"}], "after_files": [{"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\n\"\"\"Module houses builder class for Binary operator.\"\"\"\n\nimport numpy as np\nimport pandas\nfrom pandas.api.types import is_scalar\n\nfrom .operator import Operator\n\n\ndef coerce_int_to_float64(dtype: np.dtype) -> np.dtype:\n \"\"\"\n Coerce dtype to float64 if it is a variant of integer.\n\n If dtype is integer, function returns float64 datatype.\n If not, returns the datatype argument itself.\n\n Parameters\n ----------\n dtype : np.dtype\n NumPy datatype.\n\n Returns\n -------\n dtype : np.dtype\n Returns float64 for all int datatypes or returns the datatype itself\n for other types.\n\n Notes\n -----\n Used to precompute datatype in case of division in pandas.\n \"\"\"\n if dtype in np.sctypes[\"int\"] + np.sctypes[\"uint\"]:\n return np.dtype(np.float64)\n else:\n return dtype\n\n\ndef compute_dtypes_common_cast(first, second) -> np.dtype:\n \"\"\"\n Precompute data types for binary operations by finding common type between operands.\n\n Parameters\n ----------\n first : PandasQueryCompiler\n First operand for which the binary operation would be performed later.\n second : PandasQueryCompiler\n Second operand for which the binary operation would be performed later.\n\n Returns\n -------\n dtypes\n The pandas series with precomputed dtypes.\n\n Notes\n -----\n The dtypes of the operands are supposed to be known.\n \"\"\"\n dtypes_first = first._modin_frame._dtypes.to_dict()\n dtypes_second = second._modin_frame._dtypes.to_dict()\n columns_first = set(first.columns)\n columns_second = set(second.columns)\n common_columns = columns_first.intersection(columns_second)\n mismatch_columns = columns_first.union(columns_second) - common_columns\n # If at least one column doesn't match, the result of the non matching column would be nan.\n nan_dtype = np.dtype(type(np.nan))\n dtypes = pandas.Series(\n [\n pandas.core.dtypes.cast.find_common_type(\n [\n dtypes_first[x],\n dtypes_second[x],\n ]\n )\n for x in common_columns\n ],\n index=common_columns,\n )\n dtypes = pandas.concat(\n [\n dtypes,\n pandas.Series(\n [nan_dtype] * (len(mismatch_columns)),\n index=mismatch_columns,\n ),\n ]\n )\n dtypes = dtypes.sort_index()\n return dtypes\n\n\ndef compute_dtypes_boolean(first, second) -> np.dtype:\n \"\"\"\n Precompute data types for boolean operations.\n\n Parameters\n ----------\n first : PandasQueryCompiler\n First operand for which the binary operation would be performed later.\n second : PandasQueryCompiler\n Second operand for which the binary operation would be performed later.\n\n Returns\n -------\n dtypes\n The pandas series with precomputed dtypes.\n\n Notes\n -----\n Finds a union of columns and finds dtypes for all these columns.\n \"\"\"\n columns_first = set(first.columns)\n columns_second = set(second.columns)\n columns_union = columns_first.union(columns_second)\n dtypes = pandas.Series([np.dtype(bool)] * len(columns_union), index=columns_union)\n dtypes = dtypes.sort_index()\n return dtypes\n\n\nclass Binary(Operator):\n \"\"\"Builder class for Binary operator.\"\"\"\n\n @classmethod\n def register(\n cls,\n func,\n join_type=\"outer\",\n labels=\"replace\",\n infer_dtypes=None,\n ):\n \"\"\"\n Build template binary operator.\n\n Parameters\n ----------\n func : callable(pandas.DataFrame, [pandas.DataFrame, list-like, scalar]) -> pandas.DataFrame\n Binary function to execute. Have to be able to accept at least two arguments.\n join_type : {'left', 'right', 'outer', 'inner', None}, default: 'outer'\n Type of join that will be used if indices of operands are not aligned.\n labels : {\"keep\", \"replace\", \"drop\"}, default: \"replace\"\n Whether keep labels from left Modin DataFrame, replace them with labels\n from joined DataFrame or drop altogether to make them be computed lazily later.\n infer_dtypes : {\"common_cast\", \"float\", \"bool\", None}, default: None\n How dtypes should be inferred.\n * If \"common_cast\", casts to common dtype of operand columns.\n * If \"float\", performs type casting by finding common dtype.\n If the common dtype is any of the integer types, perform type casting to float.\n Used in case of truediv.\n * If \"bool\", dtypes would be a boolean series with same size as that of operands.\n * If ``None``, do not infer new dtypes (they will be computed manually once accessed).\n\n Returns\n -------\n callable\n Function that takes query compiler and executes binary operation.\n \"\"\"\n\n def caller(\n query_compiler, other, broadcast=False, *args, dtypes=None, **kwargs\n ):\n \"\"\"\n Apply binary `func` to passed operands.\n\n Parameters\n ----------\n query_compiler : QueryCompiler\n Left operand of `func`.\n other : QueryCompiler, list-like object or scalar\n Right operand of `func`.\n broadcast : bool, default: False\n If `other` is a one-column query compiler, indicates whether it is a Series or not.\n Frames and Series have to be processed differently, however we can't distinguish them\n at the query compiler level, so this parameter is a hint that passed from a high level API.\n *args : args,\n Arguments that will be passed to `func`.\n dtypes : \"copy\" or None, default: None\n Whether to keep old dtypes or infer new dtypes from data.\n **kwargs : kwargs,\n Arguments that will be passed to `func`.\n\n Returns\n -------\n QueryCompiler\n Result of binary function.\n \"\"\"\n axis = kwargs.get(\"axis\", 0)\n shape_hint = None\n self_columns = query_compiler._modin_frame._columns_cache\n if isinstance(other, type(query_compiler)):\n if broadcast:\n assert (\n len(other.columns) == 1\n ), \"Invalid broadcast argument for `broadcast_apply`, too many columns: {}\".format(\n len(other.columns)\n )\n # Transpose on `axis=1` because we always represent an individual\n # column or row as a single-column Modin DataFrame\n if axis == 1:\n other = other.transpose()\n\n if (\n self_columns is not None\n and other._modin_frame._columns_cache is not None\n ):\n if (\n len(self_columns) == 1\n and len(other.columns) == 1\n and self_columns.equals(other.columns)\n ):\n shape_hint = \"column\"\n return query_compiler.__constructor__(\n query_compiler._modin_frame.broadcast_apply(\n axis,\n lambda left, right: func(\n left, right.squeeze(), *args, **kwargs\n ),\n other._modin_frame,\n join_type=join_type,\n labels=labels,\n dtypes=dtypes,\n ),\n shape_hint=shape_hint,\n )\n else:\n if (\n other._modin_frame._dtypes is not None\n and query_compiler._modin_frame._dtypes is not None\n ):\n if infer_dtypes == \"bool\":\n dtypes = compute_dtypes_boolean(query_compiler, other)\n if infer_dtypes == \"common_cast\":\n dtypes = compute_dtypes_common_cast(query_compiler, other)\n elif infer_dtypes == \"float\":\n dtypes = compute_dtypes_common_cast(query_compiler, other)\n dtypes = dtypes.apply(coerce_int_to_float64)\n if (\n self_columns is not None\n and other._modin_frame._columns_cache is not None\n ):\n if (\n len(self_columns) == 1\n and len(other.columns) == 1\n and query_compiler.columns.equals(other.columns)\n ):\n shape_hint = \"column\"\n return query_compiler.__constructor__(\n query_compiler._modin_frame.n_ary_op(\n lambda x, y: func(x, y, *args, **kwargs),\n [other._modin_frame],\n join_type=join_type,\n dtypes=dtypes,\n ),\n shape_hint=shape_hint,\n )\n else:\n # TODO: it's possible to chunk the `other` and broadcast them to partitions\n # accordingly, in that way we will be able to use more efficient `._modin_frame.map()`\n if isinstance(other, (dict, list, np.ndarray, pandas.Series)):\n new_modin_frame = query_compiler._modin_frame.apply_full_axis(\n axis,\n lambda df: func(df, other, *args, **kwargs),\n new_index=query_compiler.index,\n new_columns=query_compiler.columns,\n dtypes=dtypes,\n )\n else:\n if (\n self_columns is not None\n and len(self_columns) == 1\n and is_scalar(other)\n ):\n shape_hint = \"column\"\n new_modin_frame = query_compiler._modin_frame.map(\n lambda df: func(df, other, *args, **kwargs),\n dtypes=dtypes,\n )\n return query_compiler.__constructor__(\n new_modin_frame, shape_hint=shape_hint\n )\n\n return caller\n", "path": "modin/core/dataframe/algebra/binary.py"}]}
2,937
725
gh_patches_debug_10682
rasdani/github-patches
git_diff
encode__starlette-1609
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Gzip Middleware content-length is incorrect The following exception is thrown when I use uvicorn to drive my starlette project. After control variates, I am sure this is caused by Gzip Middleware. ``` File "C:\Users\AberS\Documents\Github\index.py\.venv\lib\site-packages\h11\_writers.py", line 102, in send_eom raise LocalProtocolError("Too little data for declared Content-Length") h11._util.LocalProtocolError: Too little data for declared Content-Length ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `starlette/middleware/base.py` Content: ``` 1 import typing 2 3 import anyio 4 5 from starlette.requests import Request 6 from starlette.responses import Response, StreamingResponse 7 from starlette.types import ASGIApp, Receive, Scope, Send 8 9 RequestResponseEndpoint = typing.Callable[[Request], typing.Awaitable[Response]] 10 DispatchFunction = typing.Callable[ 11 [Request, RequestResponseEndpoint], typing.Awaitable[Response] 12 ] 13 14 15 class BaseHTTPMiddleware: 16 def __init__( 17 self, app: ASGIApp, dispatch: typing.Optional[DispatchFunction] = None 18 ) -> None: 19 self.app = app 20 self.dispatch_func = self.dispatch if dispatch is None else dispatch 21 22 async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: 23 if scope["type"] != "http": 24 await self.app(scope, receive, send) 25 return 26 27 async def call_next(request: Request) -> Response: 28 app_exc: typing.Optional[Exception] = None 29 send_stream, recv_stream = anyio.create_memory_object_stream() 30 31 async def coro() -> None: 32 nonlocal app_exc 33 34 async with send_stream: 35 try: 36 await self.app(scope, request.receive, send_stream.send) 37 except Exception as exc: 38 app_exc = exc 39 40 task_group.start_soon(coro) 41 42 try: 43 message = await recv_stream.receive() 44 except anyio.EndOfStream: 45 if app_exc is not None: 46 raise app_exc 47 raise RuntimeError("No response returned.") 48 49 assert message["type"] == "http.response.start" 50 51 async def body_stream() -> typing.AsyncGenerator[bytes, None]: 52 async with recv_stream: 53 async for message in recv_stream: 54 assert message["type"] == "http.response.body" 55 yield message.get("body", b"") 56 57 if app_exc is not None: 58 raise app_exc 59 60 response = StreamingResponse( 61 status_code=message["status"], content=body_stream() 62 ) 63 response.raw_headers = message["headers"] 64 return response 65 66 async with anyio.create_task_group() as task_group: 67 request = Request(scope, receive=receive) 68 response = await self.dispatch_func(request, call_next) 69 await response(scope, receive, send) 70 task_group.cancel_scope.cancel() 71 72 async def dispatch( 73 self, request: Request, call_next: RequestResponseEndpoint 74 ) -> Response: 75 raise NotImplementedError() # pragma: no cover 76 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/starlette/middleware/base.py b/starlette/middleware/base.py --- a/starlette/middleware/base.py +++ b/starlette/middleware/base.py @@ -52,7 +52,11 @@ async with recv_stream: async for message in recv_stream: assert message["type"] == "http.response.body" - yield message.get("body", b"") + body = message.get("body", b"") + if body: + yield body + if not message.get("more_body", False): + break if app_exc is not None: raise app_exc
{"golden_diff": "diff --git a/starlette/middleware/base.py b/starlette/middleware/base.py\n--- a/starlette/middleware/base.py\n+++ b/starlette/middleware/base.py\n@@ -52,7 +52,11 @@\n async with recv_stream:\n async for message in recv_stream:\n assert message[\"type\"] == \"http.response.body\"\n- yield message.get(\"body\", b\"\")\n+ body = message.get(\"body\", b\"\")\n+ if body:\n+ yield body\n+ if not message.get(\"more_body\", False):\n+ break\n \n if app_exc is not None:\n raise app_exc\n", "issue": "Gzip Middleware content-length is incorrect\nThe following exception is thrown when I use uvicorn to drive my starlette project. After control variates, I am sure this is caused by Gzip Middleware.\r\n\r\n```\r\n File \"C:\\Users\\AberS\\Documents\\Github\\index.py\\.venv\\lib\\site-packages\\h11\\_writers.py\", line 102, in send_eom\r\n raise LocalProtocolError(\"Too little data for declared Content-Length\") \r\nh11._util.LocalProtocolError: Too little data for declared Content-Length\r\n```\r\n\n", "before_files": [{"content": "import typing\n\nimport anyio\n\nfrom starlette.requests import Request\nfrom starlette.responses import Response, StreamingResponse\nfrom starlette.types import ASGIApp, Receive, Scope, Send\n\nRequestResponseEndpoint = typing.Callable[[Request], typing.Awaitable[Response]]\nDispatchFunction = typing.Callable[\n [Request, RequestResponseEndpoint], typing.Awaitable[Response]\n]\n\n\nclass BaseHTTPMiddleware:\n def __init__(\n self, app: ASGIApp, dispatch: typing.Optional[DispatchFunction] = None\n ) -> None:\n self.app = app\n self.dispatch_func = self.dispatch if dispatch is None else dispatch\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n if scope[\"type\"] != \"http\":\n await self.app(scope, receive, send)\n return\n\n async def call_next(request: Request) -> Response:\n app_exc: typing.Optional[Exception] = None\n send_stream, recv_stream = anyio.create_memory_object_stream()\n\n async def coro() -> None:\n nonlocal app_exc\n\n async with send_stream:\n try:\n await self.app(scope, request.receive, send_stream.send)\n except Exception as exc:\n app_exc = exc\n\n task_group.start_soon(coro)\n\n try:\n message = await recv_stream.receive()\n except anyio.EndOfStream:\n if app_exc is not None:\n raise app_exc\n raise RuntimeError(\"No response returned.\")\n\n assert message[\"type\"] == \"http.response.start\"\n\n async def body_stream() -> typing.AsyncGenerator[bytes, None]:\n async with recv_stream:\n async for message in recv_stream:\n assert message[\"type\"] == \"http.response.body\"\n yield message.get(\"body\", b\"\")\n\n if app_exc is not None:\n raise app_exc\n\n response = StreamingResponse(\n status_code=message[\"status\"], content=body_stream()\n )\n response.raw_headers = message[\"headers\"]\n return response\n\n async with anyio.create_task_group() as task_group:\n request = Request(scope, receive=receive)\n response = await self.dispatch_func(request, call_next)\n await response(scope, receive, send)\n task_group.cancel_scope.cancel()\n\n async def dispatch(\n self, request: Request, call_next: RequestResponseEndpoint\n ) -> Response:\n raise NotImplementedError() # pragma: no cover\n", "path": "starlette/middleware/base.py"}], "after_files": [{"content": "import typing\n\nimport anyio\n\nfrom starlette.requests import Request\nfrom starlette.responses import Response, StreamingResponse\nfrom starlette.types import ASGIApp, Receive, Scope, Send\n\nRequestResponseEndpoint = typing.Callable[[Request], typing.Awaitable[Response]]\nDispatchFunction = typing.Callable[\n [Request, RequestResponseEndpoint], typing.Awaitable[Response]\n]\n\n\nclass BaseHTTPMiddleware:\n def __init__(\n self, app: ASGIApp, dispatch: typing.Optional[DispatchFunction] = None\n ) -> None:\n self.app = app\n self.dispatch_func = self.dispatch if dispatch is None else dispatch\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n if scope[\"type\"] != \"http\":\n await self.app(scope, receive, send)\n return\n\n async def call_next(request: Request) -> Response:\n app_exc: typing.Optional[Exception] = None\n send_stream, recv_stream = anyio.create_memory_object_stream()\n\n async def coro() -> None:\n nonlocal app_exc\n\n async with send_stream:\n try:\n await self.app(scope, request.receive, send_stream.send)\n except Exception as exc:\n app_exc = exc\n\n task_group.start_soon(coro)\n\n try:\n message = await recv_stream.receive()\n except anyio.EndOfStream:\n if app_exc is not None:\n raise app_exc\n raise RuntimeError(\"No response returned.\")\n\n assert message[\"type\"] == \"http.response.start\"\n\n async def body_stream() -> typing.AsyncGenerator[bytes, None]:\n async with recv_stream:\n async for message in recv_stream:\n assert message[\"type\"] == \"http.response.body\"\n body = message.get(\"body\", b\"\")\n if body:\n yield body\n if not message.get(\"more_body\", False):\n break\n\n if app_exc is not None:\n raise app_exc\n\n response = StreamingResponse(\n status_code=message[\"status\"], content=body_stream()\n )\n response.raw_headers = message[\"headers\"]\n return response\n\n async with anyio.create_task_group() as task_group:\n request = Request(scope, receive=receive)\n response = await self.dispatch_func(request, call_next)\n await response(scope, receive, send)\n task_group.cancel_scope.cancel()\n\n async def dispatch(\n self, request: Request, call_next: RequestResponseEndpoint\n ) -> Response:\n raise NotImplementedError() # pragma: no cover\n", "path": "starlette/middleware/base.py"}]}
1,053
137
gh_patches_debug_20790
rasdani/github-patches
git_diff
rwth-i6__returnn-1464
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Compile native op: native signal handler When running `tools/compile_native_op.py` for example for `NativeLstm2` op, if the output file is specified it now looks like this: ``` /var/tmp/agerstenberger/returnn_native/native_signal_handler/3eb0034669/native_signal_handler.so /var/tmp/agerstenberger/returnn_tf_cache/ops/NativeLstm2/8c9954fa8e/NativeLstm2.so /var/tmp/agerstenberger/returnn_tf_cache/ops/GradOfNativeLstm2/d1a9d7605d/GradOfNativeLstm2.so ``` You would not expect to find native_signal_handler.so here. Also the `i6_core` job `CompileNativeOpJob` does not check names of the op but just copies the first entry and the second entry as gradient .so., which is now wrong. So now i'm asking, should we fix it here or do a more robust check in `i6_core`? A fix here is very simply just moving the line ```python NativeCodeCompiler.CollectedCompilers = [] ``` after the init function is called. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `tools/compile_native_op.py` Content: ``` 1 #!/usr/bin/env python3 2 3 """ 4 This explicitly compiles some of the native ops, and will tell you the so-filenames. 5 Normally all native ops (e.g. NativeLstm2 etc) are compiled on-the-fly within RETURNN. 6 When you export the computation graph (e.g. via ``compile_tf_graph.py``), 7 you explicitly must load these native ops. 8 """ 9 10 from __future__ import annotations 11 12 import os 13 import sys 14 import typing 15 16 import _setup_returnn_env # noqa 17 from returnn import __main__ as rnn 18 from returnn.log import log 19 import argparse 20 import returnn.util.basic as util 21 22 23 config = None # type: typing.Optional["returnn.config.Config"] 24 25 26 def init(config_filename, log_verbosity): 27 """ 28 :param str config_filename: filename to config-file 29 :param int log_verbosity: 30 """ 31 rnn.init_better_exchook() 32 rnn.init_thread_join_hack() 33 if config_filename: 34 print("Using config file %r." % config_filename) 35 assert os.path.exists(config_filename) 36 rnn.init_config(config_filename=config_filename, command_line_options=[]) 37 global config 38 config = rnn.config 39 config.set("log", None) 40 config.set("log_verbosity", log_verbosity) 41 config.set("use_tensorflow", True) 42 rnn.init_log() 43 print("Returnn compile-native-op starting up.", file=log.v1) 44 rnn.returnn_greeting() 45 rnn.init_backend_engine() 46 assert util.BackendEngine.is_tensorflow_selected(), "this is only for TensorFlow" 47 rnn.init_faulthandler() 48 if "network" in config.typed_dict: 49 print("Loading network") 50 from returnn.tf.network import TFNetwork 51 52 network = TFNetwork(name="", config=config, rnd_seed=1, train_flag=False, eval_flag=True, search_flag=False) 53 network.construct_from_dict(config.typed_dict["network"]) 54 55 56 def main(argv): 57 """ 58 Main entry. 59 """ 60 from returnn.tf.util.basic import CudaEnv, NativeCodeCompiler 61 62 CudaEnv.verbose_find_cuda = True 63 NativeCodeCompiler.CollectedCompilers = [] 64 65 argparser = argparse.ArgumentParser(description="Compile some op") 66 argparser.add_argument("--config", help="filename to config-file") 67 argparser.add_argument("--native_op", help="op name. e.g. 'LstmGenericBase'") 68 argparser.add_argument( 69 "--blas_lib", default=None, help="specify which blas lib to use (path to .so or file name to search for)" 70 ) 71 argparser.add_argument( 72 "--search_for_numpy_blas", 73 dest="search_for_numpy_blas", 74 action="store_true", 75 help="search for blas inside numpys .libs folder", 76 ) 77 argparser.add_argument( 78 "--no_search_for_numpy_blas", 79 dest="search_for_numpy_blas", 80 action="store_false", 81 help="do not search for blas inside numpys .libs folder", 82 ) 83 argparser.add_argument("--verbosity", default=4, type=int, help="5 for all seqs (default: 4)") 84 argparser.add_argument("--output_file", help="if given, will write the list of libs to this file") 85 args = argparser.parse_args(argv[1:]) 86 init(config_filename=args.config, log_verbosity=args.verbosity) 87 88 import returnn.native_op as native_op 89 from returnn.tf.native_op import make_op, OpMaker 90 91 if args.native_op: 92 print("Loading native op %r" % args.native_op) 93 op_gen = getattr(native_op, args.native_op) 94 assert issubclass(op_gen, native_op.NativeOpGenBase) 95 make_op( 96 op_gen, 97 compiler_opts={"verbose": True}, 98 search_for_numpy_blas=args.search_for_numpy_blas, 99 blas_lib=args.blas_lib, 100 ) 101 102 libs = [] 103 if OpMaker.with_cuda and OpMaker.tf_blas_gemm_workaround: 104 print("CUDA BLAS lib:", OpMaker.cuda_blas_gemm_so_filename()) 105 libs.append(OpMaker.cuda_blas_gemm_so_filename()) 106 elif OpMaker.with_cuda is False: 107 print("No CUDA.") 108 109 for compiler in NativeCodeCompiler.CollectedCompilers: 110 assert isinstance(compiler, NativeCodeCompiler) 111 print(compiler) 112 # noinspection PyProtectedMember 113 libs.append(compiler._so_filename) 114 115 if libs: 116 print("libs:") 117 for fn in libs: 118 print(fn) 119 else: 120 print("no libs compiled. use --native_op or --config") 121 122 if args.output_file: 123 with open(args.output_file, "w") as f: 124 for fn in libs: 125 f.write(fn + "\n") 126 print("Wrote lib list to file:", args.output_file) 127 128 129 if __name__ == "__main__": 130 main(sys.argv) 131 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/tools/compile_native_op.py b/tools/compile_native_op.py --- a/tools/compile_native_op.py +++ b/tools/compile_native_op.py @@ -57,10 +57,10 @@ """ Main entry. """ - from returnn.tf.util.basic import CudaEnv, NativeCodeCompiler + from returnn.tf.util.basic import CudaEnv, OpCodeCompiler CudaEnv.verbose_find_cuda = True - NativeCodeCompiler.CollectedCompilers = [] + OpCodeCompiler.CollectedCompilers = [] argparser = argparse.ArgumentParser(description="Compile some op") argparser.add_argument("--config", help="filename to config-file") @@ -106,8 +106,8 @@ elif OpMaker.with_cuda is False: print("No CUDA.") - for compiler in NativeCodeCompiler.CollectedCompilers: - assert isinstance(compiler, NativeCodeCompiler) + for compiler in OpCodeCompiler.CollectedCompilers: + assert isinstance(compiler, OpCodeCompiler) print(compiler) # noinspection PyProtectedMember libs.append(compiler._so_filename)
{"golden_diff": "diff --git a/tools/compile_native_op.py b/tools/compile_native_op.py\n--- a/tools/compile_native_op.py\n+++ b/tools/compile_native_op.py\n@@ -57,10 +57,10 @@\n \"\"\"\n Main entry.\n \"\"\"\n- from returnn.tf.util.basic import CudaEnv, NativeCodeCompiler\n+ from returnn.tf.util.basic import CudaEnv, OpCodeCompiler\n \n CudaEnv.verbose_find_cuda = True\n- NativeCodeCompiler.CollectedCompilers = []\n+ OpCodeCompiler.CollectedCompilers = []\n \n argparser = argparse.ArgumentParser(description=\"Compile some op\")\n argparser.add_argument(\"--config\", help=\"filename to config-file\")\n@@ -106,8 +106,8 @@\n elif OpMaker.with_cuda is False:\n print(\"No CUDA.\")\n \n- for compiler in NativeCodeCompiler.CollectedCompilers:\n- assert isinstance(compiler, NativeCodeCompiler)\n+ for compiler in OpCodeCompiler.CollectedCompilers:\n+ assert isinstance(compiler, OpCodeCompiler)\n print(compiler)\n # noinspection PyProtectedMember\n libs.append(compiler._so_filename)\n", "issue": "Compile native op: native signal handler\nWhen running `tools/compile_native_op.py` for example for `NativeLstm2` op, if the output file is specified it now looks like this:\r\n```\r\n/var/tmp/agerstenberger/returnn_native/native_signal_handler/3eb0034669/native_signal_handler.so\r\n/var/tmp/agerstenberger/returnn_tf_cache/ops/NativeLstm2/8c9954fa8e/NativeLstm2.so\r\n/var/tmp/agerstenberger/returnn_tf_cache/ops/GradOfNativeLstm2/d1a9d7605d/GradOfNativeLstm2.so\r\n```\r\n\r\nYou would not expect to find native_signal_handler.so here. \r\nAlso the `i6_core` job `CompileNativeOpJob` does not check names of the op but just copies the first entry and the second entry as gradient .so., which is now wrong.\r\n\r\nSo now i'm asking, should we fix it here or do a more robust check in `i6_core`?\r\n\r\nA fix here is very simply just moving the line\r\n```python\r\nNativeCodeCompiler.CollectedCompilers = []\r\n```\r\nafter the init function is called.\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n\"\"\"\nThis explicitly compiles some of the native ops, and will tell you the so-filenames.\nNormally all native ops (e.g. NativeLstm2 etc) are compiled on-the-fly within RETURNN.\nWhen you export the computation graph (e.g. via ``compile_tf_graph.py``),\nyou explicitly must load these native ops.\n\"\"\"\n\nfrom __future__ import annotations\n\nimport os\nimport sys\nimport typing\n\nimport _setup_returnn_env # noqa\nfrom returnn import __main__ as rnn\nfrom returnn.log import log\nimport argparse\nimport returnn.util.basic as util\n\n\nconfig = None # type: typing.Optional[\"returnn.config.Config\"]\n\n\ndef init(config_filename, log_verbosity):\n \"\"\"\n :param str config_filename: filename to config-file\n :param int log_verbosity:\n \"\"\"\n rnn.init_better_exchook()\n rnn.init_thread_join_hack()\n if config_filename:\n print(\"Using config file %r.\" % config_filename)\n assert os.path.exists(config_filename)\n rnn.init_config(config_filename=config_filename, command_line_options=[])\n global config\n config = rnn.config\n config.set(\"log\", None)\n config.set(\"log_verbosity\", log_verbosity)\n config.set(\"use_tensorflow\", True)\n rnn.init_log()\n print(\"Returnn compile-native-op starting up.\", file=log.v1)\n rnn.returnn_greeting()\n rnn.init_backend_engine()\n assert util.BackendEngine.is_tensorflow_selected(), \"this is only for TensorFlow\"\n rnn.init_faulthandler()\n if \"network\" in config.typed_dict:\n print(\"Loading network\")\n from returnn.tf.network import TFNetwork\n\n network = TFNetwork(name=\"\", config=config, rnd_seed=1, train_flag=False, eval_flag=True, search_flag=False)\n network.construct_from_dict(config.typed_dict[\"network\"])\n\n\ndef main(argv):\n \"\"\"\n Main entry.\n \"\"\"\n from returnn.tf.util.basic import CudaEnv, NativeCodeCompiler\n\n CudaEnv.verbose_find_cuda = True\n NativeCodeCompiler.CollectedCompilers = []\n\n argparser = argparse.ArgumentParser(description=\"Compile some op\")\n argparser.add_argument(\"--config\", help=\"filename to config-file\")\n argparser.add_argument(\"--native_op\", help=\"op name. e.g. 'LstmGenericBase'\")\n argparser.add_argument(\n \"--blas_lib\", default=None, help=\"specify which blas lib to use (path to .so or file name to search for)\"\n )\n argparser.add_argument(\n \"--search_for_numpy_blas\",\n dest=\"search_for_numpy_blas\",\n action=\"store_true\",\n help=\"search for blas inside numpys .libs folder\",\n )\n argparser.add_argument(\n \"--no_search_for_numpy_blas\",\n dest=\"search_for_numpy_blas\",\n action=\"store_false\",\n help=\"do not search for blas inside numpys .libs folder\",\n )\n argparser.add_argument(\"--verbosity\", default=4, type=int, help=\"5 for all seqs (default: 4)\")\n argparser.add_argument(\"--output_file\", help=\"if given, will write the list of libs to this file\")\n args = argparser.parse_args(argv[1:])\n init(config_filename=args.config, log_verbosity=args.verbosity)\n\n import returnn.native_op as native_op\n from returnn.tf.native_op import make_op, OpMaker\n\n if args.native_op:\n print(\"Loading native op %r\" % args.native_op)\n op_gen = getattr(native_op, args.native_op)\n assert issubclass(op_gen, native_op.NativeOpGenBase)\n make_op(\n op_gen,\n compiler_opts={\"verbose\": True},\n search_for_numpy_blas=args.search_for_numpy_blas,\n blas_lib=args.blas_lib,\n )\n\n libs = []\n if OpMaker.with_cuda and OpMaker.tf_blas_gemm_workaround:\n print(\"CUDA BLAS lib:\", OpMaker.cuda_blas_gemm_so_filename())\n libs.append(OpMaker.cuda_blas_gemm_so_filename())\n elif OpMaker.with_cuda is False:\n print(\"No CUDA.\")\n\n for compiler in NativeCodeCompiler.CollectedCompilers:\n assert isinstance(compiler, NativeCodeCompiler)\n print(compiler)\n # noinspection PyProtectedMember\n libs.append(compiler._so_filename)\n\n if libs:\n print(\"libs:\")\n for fn in libs:\n print(fn)\n else:\n print(\"no libs compiled. use --native_op or --config\")\n\n if args.output_file:\n with open(args.output_file, \"w\") as f:\n for fn in libs:\n f.write(fn + \"\\n\")\n print(\"Wrote lib list to file:\", args.output_file)\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n", "path": "tools/compile_native_op.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\n\"\"\"\nThis explicitly compiles some of the native ops, and will tell you the so-filenames.\nNormally all native ops (e.g. NativeLstm2 etc) are compiled on-the-fly within RETURNN.\nWhen you export the computation graph (e.g. via ``compile_tf_graph.py``),\nyou explicitly must load these native ops.\n\"\"\"\n\nfrom __future__ import annotations\n\nimport os\nimport sys\nimport typing\n\nimport _setup_returnn_env # noqa\nfrom returnn import __main__ as rnn\nfrom returnn.log import log\nimport argparse\nimport returnn.util.basic as util\n\n\nconfig = None # type: typing.Optional[\"returnn.config.Config\"]\n\n\ndef init(config_filename, log_verbosity):\n \"\"\"\n :param str config_filename: filename to config-file\n :param int log_verbosity:\n \"\"\"\n rnn.init_better_exchook()\n rnn.init_thread_join_hack()\n if config_filename:\n print(\"Using config file %r.\" % config_filename)\n assert os.path.exists(config_filename)\n rnn.init_config(config_filename=config_filename, command_line_options=[])\n global config\n config = rnn.config\n config.set(\"log\", None)\n config.set(\"log_verbosity\", log_verbosity)\n config.set(\"use_tensorflow\", True)\n rnn.init_log()\n print(\"Returnn compile-native-op starting up.\", file=log.v1)\n rnn.returnn_greeting()\n rnn.init_backend_engine()\n assert util.BackendEngine.is_tensorflow_selected(), \"this is only for TensorFlow\"\n rnn.init_faulthandler()\n if \"network\" in config.typed_dict:\n print(\"Loading network\")\n from returnn.tf.network import TFNetwork\n\n network = TFNetwork(name=\"\", config=config, rnd_seed=1, train_flag=False, eval_flag=True, search_flag=False)\n network.construct_from_dict(config.typed_dict[\"network\"])\n\n\ndef main(argv):\n \"\"\"\n Main entry.\n \"\"\"\n from returnn.tf.util.basic import CudaEnv, OpCodeCompiler\n\n CudaEnv.verbose_find_cuda = True\n OpCodeCompiler.CollectedCompilers = []\n\n argparser = argparse.ArgumentParser(description=\"Compile some op\")\n argparser.add_argument(\"--config\", help=\"filename to config-file\")\n argparser.add_argument(\"--native_op\", help=\"op name. e.g. 'LstmGenericBase'\")\n argparser.add_argument(\n \"--blas_lib\", default=None, help=\"specify which blas lib to use (path to .so or file name to search for)\"\n )\n argparser.add_argument(\n \"--search_for_numpy_blas\",\n dest=\"search_for_numpy_blas\",\n action=\"store_true\",\n help=\"search for blas inside numpys .libs folder\",\n )\n argparser.add_argument(\n \"--no_search_for_numpy_blas\",\n dest=\"search_for_numpy_blas\",\n action=\"store_false\",\n help=\"do not search for blas inside numpys .libs folder\",\n )\n argparser.add_argument(\"--verbosity\", default=4, type=int, help=\"5 for all seqs (default: 4)\")\n argparser.add_argument(\"--output_file\", help=\"if given, will write the list of libs to this file\")\n args = argparser.parse_args(argv[1:])\n init(config_filename=args.config, log_verbosity=args.verbosity)\n\n import returnn.native_op as native_op\n from returnn.tf.native_op import make_op, OpMaker\n\n if args.native_op:\n print(\"Loading native op %r\" % args.native_op)\n op_gen = getattr(native_op, args.native_op)\n assert issubclass(op_gen, native_op.NativeOpGenBase)\n make_op(\n op_gen,\n compiler_opts={\"verbose\": True},\n search_for_numpy_blas=args.search_for_numpy_blas,\n blas_lib=args.blas_lib,\n )\n\n libs = []\n if OpMaker.with_cuda and OpMaker.tf_blas_gemm_workaround:\n print(\"CUDA BLAS lib:\", OpMaker.cuda_blas_gemm_so_filename())\n libs.append(OpMaker.cuda_blas_gemm_so_filename())\n elif OpMaker.with_cuda is False:\n print(\"No CUDA.\")\n\n for compiler in OpCodeCompiler.CollectedCompilers:\n assert isinstance(compiler, OpCodeCompiler)\n print(compiler)\n # noinspection PyProtectedMember\n libs.append(compiler._so_filename)\n\n if libs:\n print(\"libs:\")\n for fn in libs:\n print(fn)\n else:\n print(\"no libs compiled. use --native_op or --config\")\n\n if args.output_file:\n with open(args.output_file, \"w\") as f:\n for fn in libs:\n f.write(fn + \"\\n\")\n print(\"Wrote lib list to file:\", args.output_file)\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n", "path": "tools/compile_native_op.py"}]}
1,851
248
gh_patches_debug_23570
rasdani/github-patches
git_diff
OCHA-DAP__hdx-ckan-452
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Implement GA tracking of downloads From Luis: _I've done some research about how to track the number of downloads in the website. We can track those events using Google Analytics as you suggested. There is a slight change of code that has to be implemented following Google Analytic's developer manual [here](https://developers.google.com/analytics/devguides/collection/analyticsjs/events). It is a bit more refined than copying and pasting code, although at a glance it doesn't seem to be extremely complicated._ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ckanext-metadata_fields/ckanext/metadata_fields/plugin.py` Content: ``` 1 ''' 2 Created on Apr 10, 2014 3 4 @author:alexandru-m-g 5 ''' 6 import logging 7 8 import ckan.plugins as plugins 9 import ckan.plugins.toolkit as tk 10 from routes.mapper import SubMapper 11 12 import ckanext.metadata_fields.custom_validator as vd 13 import ckanext.metadata_fields.update as update 14 15 def list_of_all_groups(): 16 groups = tk.get_action('group_list')(data_dict={'all_fields': True}) 17 return groups 18 19 20 class HdxMetadataFieldsPlugin(plugins.SingletonPlugin, tk.DefaultDatasetForm): 21 plugins.implements(plugins.IConfigurer, inherit=False) 22 plugins.implements(plugins.IRoutes, inherit=True) 23 plugins.implements(plugins.IDatasetForm, inherit=False) 24 plugins.implements(plugins.ITemplateHelpers) 25 plugins.implements(plugins.IActions) 26 27 def update_config(self, config): 28 tk.add_template_directory(config, 'templates') 29 30 def before_map(self, map): 31 with SubMapper(map, controller='ckanext.metadata_fields.dataset_controller:DatasetController') as m: 32 m.connect('add dataset', '/dataset/new', action='new') 33 m.connect('/dataset/{action}/{id}', 34 requirements=dict(action='|'.join([ 35 'new_metadata', 36 'new_resource', 37 ]))) 38 return map 39 40 def is_fallback(self): 41 return True 42 43 def package_types(self): 44 # default - no specific package type 45 return [] 46 47 def _modify_package_schema(self, schema): 48 49 schema.update({ 50 'package_creator': [tk.get_validator('not_empty'), 51 tk.get_converter('convert_to_extras')], 52 'groups_list': [vd.groups_not_empty], 53 'caveats' : [tk.get_validator('ignore_missing'), 54 tk.get_converter('convert_to_extras')], 55 'dataset_source' : [tk.get_validator('not_empty'), 56 tk.get_converter('convert_to_extras')], 57 'dataset_date' : [tk.get_validator('ignore_missing'), 58 tk.get_converter('convert_to_extras')], 59 'methodology' : [tk.get_validator('ignore_missing'), 60 tk.get_converter('convert_to_extras')], 61 }) 62 63 return schema 64 65 66 def create_package_schema(self): 67 schema = super(HdxMetadataFieldsPlugin, self).create_package_schema() 68 schema = self._modify_package_schema(schema) 69 return schema 70 71 def update_package_schema(self): 72 schema = super(HdxMetadataFieldsPlugin, self).update_package_schema() 73 schema = self._modify_package_schema(schema) 74 return schema 75 76 def show_package_schema(self): 77 schema = super(HdxMetadataFieldsPlugin, self).show_package_schema() 78 79 schema.update({ 80 'package_creator': [tk.get_converter('convert_from_extras'), 81 tk.get_validator('ignore_missing')], 82 'caveats' : [tk.get_converter('convert_from_extras'), 83 tk.get_validator('ignore_missing')], 84 'dataset_source' : [tk.get_converter('convert_from_extras'), 85 tk.get_validator('ignore_missing')], 86 'dataset_date' : [tk.get_converter('convert_from_extras'), 87 tk.get_validator('ignore_missing')], 88 'methodology' : [tk.get_converter('convert_from_extras'), 89 tk.get_validator('ignore_missing')], 90 }) 91 return schema 92 93 94 def get_helpers(self): 95 return {'list_of_all_groups': list_of_all_groups} 96 97 def get_actions(self): 98 return {'package_update': update.package_update} 99 100 101 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ckanext-metadata_fields/ckanext/metadata_fields/plugin.py b/ckanext-metadata_fields/ckanext/metadata_fields/plugin.py --- a/ckanext-metadata_fields/ckanext/metadata_fields/plugin.py +++ b/ckanext-metadata_fields/ckanext/metadata_fields/plugin.py @@ -47,6 +47,7 @@ def _modify_package_schema(self, schema): schema.update({ + 'notes': [tk.get_validator('not_empty')], #Notes == description. Makes description required 'package_creator': [tk.get_validator('not_empty'), tk.get_converter('convert_to_extras')], 'groups_list': [vd.groups_not_empty], @@ -75,8 +76,8 @@ def show_package_schema(self): schema = super(HdxMetadataFieldsPlugin, self).show_package_schema() - schema.update({ + 'notes': [tk.get_validator('not_empty')], #Notes == description. Makes description required 'package_creator': [tk.get_converter('convert_from_extras'), tk.get_validator('ignore_missing')], 'caveats' : [tk.get_converter('convert_from_extras'),
{"golden_diff": "diff --git a/ckanext-metadata_fields/ckanext/metadata_fields/plugin.py b/ckanext-metadata_fields/ckanext/metadata_fields/plugin.py\n--- a/ckanext-metadata_fields/ckanext/metadata_fields/plugin.py\n+++ b/ckanext-metadata_fields/ckanext/metadata_fields/plugin.py\n@@ -47,6 +47,7 @@\n def _modify_package_schema(self, schema):\n \n schema.update({\n+ 'notes': [tk.get_validator('not_empty')], #Notes == description. Makes description required\n 'package_creator': [tk.get_validator('not_empty'),\n tk.get_converter('convert_to_extras')],\n 'groups_list': [vd.groups_not_empty],\n@@ -75,8 +76,8 @@\n \n def show_package_schema(self):\n schema = super(HdxMetadataFieldsPlugin, self).show_package_schema()\n-\n schema.update({\n+ 'notes': [tk.get_validator('not_empty')], #Notes == description. Makes description required\n 'package_creator': [tk.get_converter('convert_from_extras'),\n tk.get_validator('ignore_missing')],\n 'caveats' : [tk.get_converter('convert_from_extras'),\n", "issue": "Implement GA tracking of downloads\nFrom Luis: \n\n_I've done some research about how to track the number of downloads in the website. We can track those events using Google Analytics as you suggested. There is a slight change of code that has to be implemented following Google Analytic's developer manual [here](https://developers.google.com/analytics/devguides/collection/analyticsjs/events). It is a bit more refined than copying and pasting code, although at a glance it doesn't seem to be extremely complicated._\n\n", "before_files": [{"content": "'''\nCreated on Apr 10, 2014\n\n@author:alexandru-m-g\n'''\nimport logging\n\nimport ckan.plugins as plugins\nimport ckan.plugins.toolkit as tk\nfrom routes.mapper import SubMapper\n\nimport ckanext.metadata_fields.custom_validator as vd\nimport ckanext.metadata_fields.update as update\n\ndef list_of_all_groups():\n groups = tk.get_action('group_list')(data_dict={'all_fields': True})\n return groups\n\n\nclass HdxMetadataFieldsPlugin(plugins.SingletonPlugin, tk.DefaultDatasetForm):\n plugins.implements(plugins.IConfigurer, inherit=False)\n plugins.implements(plugins.IRoutes, inherit=True)\n plugins.implements(plugins.IDatasetForm, inherit=False)\n plugins.implements(plugins.ITemplateHelpers)\n plugins.implements(plugins.IActions)\n\n def update_config(self, config):\n tk.add_template_directory(config, 'templates')\n\n def before_map(self, map):\n with SubMapper(map, controller='ckanext.metadata_fields.dataset_controller:DatasetController') as m:\n m.connect('add dataset', '/dataset/new', action='new')\n m.connect('/dataset/{action}/{id}',\n requirements=dict(action='|'.join([\n 'new_metadata',\n 'new_resource',\n ])))\n return map\n \n def is_fallback(self):\n return True\n\n def package_types(self):\n # default - no specific package type\n return []\n\n def _modify_package_schema(self, schema):\n \n schema.update({\n 'package_creator': [tk.get_validator('not_empty'),\n tk.get_converter('convert_to_extras')],\n 'groups_list': [vd.groups_not_empty],\n 'caveats' : [tk.get_validator('ignore_missing'),\n tk.get_converter('convert_to_extras')],\n 'dataset_source' : [tk.get_validator('not_empty'),\n tk.get_converter('convert_to_extras')],\n 'dataset_date' : [tk.get_validator('ignore_missing'),\n tk.get_converter('convert_to_extras')],\n 'methodology' : [tk.get_validator('ignore_missing'),\n tk.get_converter('convert_to_extras')],\n })\n\n return schema\n\n\n def create_package_schema(self):\n schema = super(HdxMetadataFieldsPlugin, self).create_package_schema()\n schema = self._modify_package_schema(schema)\n return schema\n\n def update_package_schema(self):\n schema = super(HdxMetadataFieldsPlugin, self).update_package_schema()\n schema = self._modify_package_schema(schema)\n return schema\n\n def show_package_schema(self):\n schema = super(HdxMetadataFieldsPlugin, self).show_package_schema()\n\n schema.update({\n 'package_creator': [tk.get_converter('convert_from_extras'),\n tk.get_validator('ignore_missing')],\n 'caveats' : [tk.get_converter('convert_from_extras'),\n tk.get_validator('ignore_missing')],\n 'dataset_source' : [tk.get_converter('convert_from_extras'),\n tk.get_validator('ignore_missing')],\n 'dataset_date' : [tk.get_converter('convert_from_extras'),\n tk.get_validator('ignore_missing')],\n 'methodology' : [tk.get_converter('convert_from_extras'),\n tk.get_validator('ignore_missing')],\n })\n return schema\n \n \n def get_helpers(self):\n return {'list_of_all_groups': list_of_all_groups}\n \n def get_actions(self):\n return {'package_update': update.package_update}\n\n\n", "path": "ckanext-metadata_fields/ckanext/metadata_fields/plugin.py"}], "after_files": [{"content": "'''\nCreated on Apr 10, 2014\n\n@author:alexandru-m-g\n'''\nimport logging\n\nimport ckan.plugins as plugins\nimport ckan.plugins.toolkit as tk\nfrom routes.mapper import SubMapper\n\nimport ckanext.metadata_fields.custom_validator as vd\nimport ckanext.metadata_fields.update as update\n\ndef list_of_all_groups():\n groups = tk.get_action('group_list')(data_dict={'all_fields': True})\n return groups\n\n\nclass HdxMetadataFieldsPlugin(plugins.SingletonPlugin, tk.DefaultDatasetForm):\n plugins.implements(plugins.IConfigurer, inherit=False)\n plugins.implements(plugins.IRoutes, inherit=True)\n plugins.implements(plugins.IDatasetForm, inherit=False)\n plugins.implements(plugins.ITemplateHelpers)\n plugins.implements(plugins.IActions)\n\n def update_config(self, config):\n tk.add_template_directory(config, 'templates')\n\n def before_map(self, map):\n with SubMapper(map, controller='ckanext.metadata_fields.dataset_controller:DatasetController') as m:\n m.connect('add dataset', '/dataset/new', action='new')\n m.connect('/dataset/{action}/{id}',\n requirements=dict(action='|'.join([\n 'new_metadata',\n 'new_resource',\n ])))\n return map\n \n def is_fallback(self):\n return True\n\n def package_types(self):\n # default - no specific package type\n return []\n\n def _modify_package_schema(self, schema):\n \n schema.update({\n 'notes': [tk.get_validator('not_empty')], #Notes == description. Makes description required\n 'package_creator': [tk.get_validator('not_empty'),\n tk.get_converter('convert_to_extras')],\n 'groups_list': [vd.groups_not_empty],\n 'caveats' : [tk.get_validator('ignore_missing'),\n tk.get_converter('convert_to_extras')],\n 'dataset_source' : [tk.get_validator('not_empty'),\n tk.get_converter('convert_to_extras')],\n 'dataset_date' : [tk.get_validator('ignore_missing'),\n tk.get_converter('convert_to_extras')],\n 'methodology' : [tk.get_validator('ignore_missing'),\n tk.get_converter('convert_to_extras')],\n })\n\n return schema\n\n\n def create_package_schema(self):\n schema = super(HdxMetadataFieldsPlugin, self).create_package_schema()\n schema = self._modify_package_schema(schema)\n return schema\n\n def update_package_schema(self):\n schema = super(HdxMetadataFieldsPlugin, self).update_package_schema()\n schema = self._modify_package_schema(schema)\n return schema\n\n def show_package_schema(self):\n schema = super(HdxMetadataFieldsPlugin, self).show_package_schema()\n schema.update({\n 'notes': [tk.get_validator('not_empty')], #Notes == description. Makes description required\n 'package_creator': [tk.get_converter('convert_from_extras'),\n tk.get_validator('ignore_missing')],\n 'caveats' : [tk.get_converter('convert_from_extras'),\n tk.get_validator('ignore_missing')],\n 'dataset_source' : [tk.get_converter('convert_from_extras'),\n tk.get_validator('ignore_missing')],\n 'dataset_date' : [tk.get_converter('convert_from_extras'),\n tk.get_validator('ignore_missing')],\n 'methodology' : [tk.get_converter('convert_from_extras'),\n tk.get_validator('ignore_missing')],\n })\n return schema\n \n \n def get_helpers(self):\n return {'list_of_all_groups': list_of_all_groups}\n \n def get_actions(self):\n return {'package_update': update.package_update}\n\n\n", "path": "ckanext-metadata_fields/ckanext/metadata_fields/plugin.py"}]}
1,304
260
gh_patches_debug_51517
rasdani/github-patches
git_diff
pypa__setuptools-689
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- AttributeError: 'module' object has no attribute 'chdir' in 25.0.1 The new `setuptools == 25.0.1` just failed on our CI with `AttributeError: 'module' object has no attribute 'chdir'`. The new expression [`here and os.path.chdir(here)`](https://github.com/pypa/setuptools/blob/21ab99e53f0c263a2210cf51525d6edcae1ae9a7/setup.py#L194) in `setup.py` was probably meant to use `os.chdir()`, since `os.path` has no `chdir()`. _(Note: Lots of buildout related noise in the traceback, but I didn't want to truncate it and risk omitting relevant info)_ ``` Getting distribution for 'setuptools'. Traceback (most recent call last): File "<string>", line 1, in <module> File "/var/lib/jenkins/zope/eggs/setuptools-18.2-py2.7.egg/setuptools/command/easy_install.py", line 2245, in main distclass=DistributionWithoutHelpCommands, **kw File "/usr/local/python/2.7.10/lib/python2.7/distutils/core.py", line 151, in setup dist.run_commands() File "/usr/local/python/2.7.10/lib/python2.7/distutils/dist.py", line 953, in run_commands self.run_command(cmd) File "/usr/local/python/2.7.10/lib/python2.7/distutils/dist.py", line 972, in run_command cmd_obj.run() File "/var/lib/jenkins/zope/eggs/setuptools-18.2-py2.7.egg/setuptools/command/easy_install.py", line 380, in run self.easy_install(spec, not self.no_deps) File "/var/lib/jenkins/zope/eggs/setuptools-18.2-py2.7.egg/setuptools/command/easy_install.py", line 610, in easy_install return self.install_item(None, spec, tmpdir, deps, True) File "/var/lib/jenkins/zope/eggs/setuptools-18.2-py2.7.egg/setuptools/command/easy_install.py", line 659, in install_item dists = self.install_eggs(spec, download, tmpdir) File "/var/lib/jenkins/zope/eggs/setuptools-18.2-py2.7.egg/setuptools/command/easy_install.py", line 842, in install_eggs return self.build_and_install(setup_script, setup_base) File "/var/lib/jenkins/zope/eggs/setuptools-18.2-py2.7.egg/setuptools/command/easy_install.py", line 1070, in build_and_install self.run_setup(setup_script, setup_base, args) File "/var/lib/jenkins/zope/eggs/setuptools-18.2-py2.7.egg/setuptools/command/easy_install.py", line 1056, in run_setup run_setup(setup_script, args) File "/var/lib/jenkins/zope/eggs/setuptools-18.2-py2.7.egg/setuptools/sandbox.py", line 240, in run_setup raise File "/usr/local/python/2.7.10/lib/python2.7/contextlib.py", line 35, in __exit__ self.gen.throw(type, value, traceback) File "/var/lib/jenkins/zope/eggs/setuptools-18.2-py2.7.egg/setuptools/sandbox.py", line 193, in setup_context yield File "/usr/local/python/2.7.10/lib/python2.7/contextlib.py", line 35, in __exit__ self.gen.throw(type, value, traceback) File "/var/lib/jenkins/zope/eggs/setuptools-18.2-py2.7.egg/setuptools/sandbox.py", line 164, in save_modules saved_exc.resume() File "/var/lib/jenkins/zope/eggs/setuptools-18.2-py2.7.egg/setuptools/sandbox.py", line 139, in resume compat.reraise(type, exc, self._tb) File "/var/lib/jenkins/zope/eggs/setuptools-18.2-py2.7.egg/setuptools/sandbox.py", line 152, in save_modules yield saved File "/var/lib/jenkins/zope/eggs/setuptools-18.2-py2.7.egg/setuptools/sandbox.py", line 193, in setup_context yield File "/var/lib/jenkins/zope/eggs/setuptools-18.2-py2.7.egg/setuptools/sandbox.py", line 237, in run_setup DirectorySandbox(setup_dir).run(runner) File "/var/lib/jenkins/zope/eggs/setuptools-18.2-py2.7.egg/setuptools/sandbox.py", line 267, in run return func() File "/var/lib/jenkins/zope/eggs/setuptools-18.2-py2.7.egg/setuptools/sandbox.py", line 236, in runner _execfile(setup_script, ns) File "/var/lib/jenkins/zope/eggs/setuptools-18.2-py2.7.egg/setuptools/sandbox.py", line 46, in _execfile exec(code, globals, locals) File "/tmp/easy_install-6d2nJI/setuptools-25.0.1/setup.py", line 194, in <module> AttributeError: 'module' object has no attribute 'chdir' ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 #!/usr/bin/env python 2 """ 3 Distutils setup file, used to install or test 'setuptools' 4 """ 5 6 import io 7 import os 8 import sys 9 import textwrap 10 11 import setuptools 12 13 14 here = os.path.dirname(__file__) 15 16 17 def require_metadata(): 18 "Prevent improper installs without necessary metadata. See #659" 19 if not os.path.exists('setuptools.egg-info'): 20 msg = "Cannot build setuptools without metadata. Run bootstrap.py" 21 raise RuntimeError(msg) 22 23 24 def read_commands(): 25 command_ns = {} 26 cmd_module_path = 'setuptools/command/__init__.py' 27 init_path = os.path.join(here, cmd_module_path) 28 with open(init_path) as init_file: 29 exec(init_file.read(), command_ns) 30 return command_ns['__all__'] 31 32 33 def _gen_console_scripts(): 34 yield "easy_install = setuptools.command.easy_install:main" 35 36 # Gentoo distributions manage the python-version-specific scripts 37 # themselves, so those platforms define an environment variable to 38 # suppress the creation of the version-specific scripts. 39 var_names = ( 40 'SETUPTOOLS_DISABLE_VERSIONED_EASY_INSTALL_SCRIPT', 41 'DISTRIBUTE_DISABLE_VERSIONED_EASY_INSTALL_SCRIPT', 42 ) 43 if any(os.environ.get(var) not in (None, "", "0") for var in var_names): 44 return 45 yield ("easy_install-{shortver} = setuptools.command.easy_install:main" 46 .format(shortver=sys.version[:3])) 47 48 49 readme_path = os.path.join(here, 'README.rst') 50 with io.open(readme_path, encoding='utf-8') as readme_file: 51 long_description = readme_file.read() 52 53 package_data = dict( 54 setuptools=['script (dev).tmpl', 'script.tmpl', 'site-patch.py'], 55 ) 56 57 force_windows_specific_files = ( 58 os.environ.get("SETUPTOOLS_INSTALL_WINDOWS_SPECIFIC_FILES") 59 not in (None, "", "0") 60 ) 61 62 include_windows_files = ( 63 sys.platform == 'win32' or 64 os.name == 'java' and os._name == 'nt' or 65 force_windows_specific_files 66 ) 67 68 if include_windows_files: 69 package_data.setdefault('setuptools', []).extend(['*.exe']) 70 package_data.setdefault('setuptools.command', []).extend(['*.xml']) 71 72 needs_pytest = set(['ptr', 'pytest', 'test']).intersection(sys.argv) 73 pytest_runner = ['pytest-runner'] if needs_pytest else [] 74 needs_wheel = set(['release', 'bdist_wheel']).intersection(sys.argv) 75 wheel = ['wheel'] if needs_wheel else [] 76 77 78 def pypi_link(pkg_filename): 79 """ 80 Given the filename, including md5 fragment, construct the 81 dependency link for PyPI. 82 """ 83 root = 'https://pypi.python.org/packages/source' 84 name, sep, rest = pkg_filename.partition('-') 85 parts = root, name[0], name, pkg_filename 86 return '/'.join(parts) 87 88 89 setup_params = dict( 90 name="setuptools", 91 version="25.0.1", 92 description="Easily download, build, install, upgrade, and uninstall " 93 "Python packages", 94 author="Python Packaging Authority", 95 author_email="[email protected]", 96 long_description=long_description, 97 keywords="CPAN PyPI distutils eggs package management", 98 url="https://github.com/pypa/setuptools", 99 src_root=None, 100 packages=setuptools.find_packages(exclude=['*.tests']), 101 package_data=package_data, 102 103 py_modules=['easy_install'], 104 105 zip_safe=True, 106 107 entry_points={ 108 "distutils.commands": [ 109 "%(cmd)s = setuptools.command.%(cmd)s:%(cmd)s" % locals() 110 for cmd in read_commands() 111 ], 112 "distutils.setup_keywords": [ 113 "eager_resources = setuptools.dist:assert_string_list", 114 "namespace_packages = setuptools.dist:check_nsp", 115 "extras_require = setuptools.dist:check_extras", 116 "install_requires = setuptools.dist:check_requirements", 117 "tests_require = setuptools.dist:check_requirements", 118 "setup_requires = setuptools.dist:check_requirements", 119 "python_requires = setuptools.dist:check_specifier", 120 "entry_points = setuptools.dist:check_entry_points", 121 "test_suite = setuptools.dist:check_test_suite", 122 "zip_safe = setuptools.dist:assert_bool", 123 "package_data = setuptools.dist:check_package_data", 124 "exclude_package_data = setuptools.dist:check_package_data", 125 "include_package_data = setuptools.dist:assert_bool", 126 "packages = setuptools.dist:check_packages", 127 "dependency_links = setuptools.dist:assert_string_list", 128 "test_loader = setuptools.dist:check_importable", 129 "test_runner = setuptools.dist:check_importable", 130 "use_2to3 = setuptools.dist:assert_bool", 131 "convert_2to3_doctests = setuptools.dist:assert_string_list", 132 "use_2to3_fixers = setuptools.dist:assert_string_list", 133 "use_2to3_exclude_fixers = setuptools.dist:assert_string_list", 134 ], 135 "egg_info.writers": [ 136 "PKG-INFO = setuptools.command.egg_info:write_pkg_info", 137 "requires.txt = setuptools.command.egg_info:write_requirements", 138 "entry_points.txt = setuptools.command.egg_info:write_entries", 139 "eager_resources.txt = setuptools.command.egg_info:overwrite_arg", 140 "namespace_packages.txt = setuptools.command.egg_info:overwrite_arg", 141 "top_level.txt = setuptools.command.egg_info:write_toplevel_names", 142 "depends.txt = setuptools.command.egg_info:warn_depends_obsolete", 143 "dependency_links.txt = setuptools.command.egg_info:overwrite_arg", 144 ], 145 "console_scripts": list(_gen_console_scripts()), 146 147 "setuptools.installation": 148 ['eggsecutable = setuptools.command.easy_install:bootstrap'], 149 }, 150 151 152 classifiers=textwrap.dedent(""" 153 Development Status :: 5 - Production/Stable 154 Intended Audience :: Developers 155 License :: OSI Approved :: MIT License 156 Operating System :: OS Independent 157 Programming Language :: Python :: 2.6 158 Programming Language :: Python :: 2.7 159 Programming Language :: Python :: 3 160 Programming Language :: Python :: 3.3 161 Programming Language :: Python :: 3.4 162 Programming Language :: Python :: 3.5 163 Topic :: Software Development :: Libraries :: Python Modules 164 Topic :: System :: Archiving :: Packaging 165 Topic :: System :: Systems Administration 166 Topic :: Utilities 167 """).strip().splitlines(), 168 extras_require={ 169 "ssl:sys_platform=='win32'": "wincertstore==0.2", 170 "certs": "certifi==2016.2.28", 171 }, 172 dependency_links=[ 173 pypi_link( 174 'certifi-2016.2.28.tar.gz#md5=5d672aa766e1f773c75cfeccd02d3650', 175 ), 176 pypi_link( 177 'wincertstore-0.2.zip#md5=ae728f2f007185648d0c7a8679b361e2', 178 ), 179 ], 180 scripts=[], 181 tests_require=[ 182 'setuptools[ssl]', 183 'pytest-flake8', 184 # workaround for pytest-flake8 #7 185 'flake8<3dev', 186 'pytest>=2.8', 187 ] + (['mock'] if sys.version_info[:2] < (3, 3) else []), 188 setup_requires=[ 189 ] + pytest_runner + wheel, 190 ) 191 192 if __name__ == '__main__': 193 # allow setup.py to run from another directory 194 here and os.path.chdir(here) 195 require_metadata() 196 dist = setuptools.setup(**setup_params) 197 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -191,6 +191,6 @@ if __name__ == '__main__': # allow setup.py to run from another directory - here and os.path.chdir(here) + here and os.chdir(here) require_metadata() dist = setuptools.setup(**setup_params)
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -191,6 +191,6 @@\n \n if __name__ == '__main__':\n # allow setup.py to run from another directory\n- here and os.path.chdir(here)\n+ here and os.chdir(here)\n require_metadata()\n dist = setuptools.setup(**setup_params)\n", "issue": "AttributeError: 'module' object has no attribute 'chdir' in 25.0.1\nThe new `setuptools == 25.0.1` just failed on our CI with `AttributeError: 'module' object has no attribute 'chdir'`.\n\nThe new expression [`here and os.path.chdir(here)`](https://github.com/pypa/setuptools/blob/21ab99e53f0c263a2210cf51525d6edcae1ae9a7/setup.py#L194) in `setup.py` was probably meant to use `os.chdir()`, since `os.path` has no `chdir()`.\n\n_(Note: Lots of buildout related noise in the traceback, but I didn't want to truncate it and risk omitting relevant info)_\n\n```\nGetting distribution for 'setuptools'.\nTraceback (most recent call last):\n File \"<string>\", line 1, in <module>\n File \"/var/lib/jenkins/zope/eggs/setuptools-18.2-py2.7.egg/setuptools/command/easy_install.py\", line 2245, in main\n distclass=DistributionWithoutHelpCommands, **kw\n File \"/usr/local/python/2.7.10/lib/python2.7/distutils/core.py\", line 151, in setup\n dist.run_commands()\n File \"/usr/local/python/2.7.10/lib/python2.7/distutils/dist.py\", line 953, in run_commands\n self.run_command(cmd)\n File \"/usr/local/python/2.7.10/lib/python2.7/distutils/dist.py\", line 972, in run_command\n cmd_obj.run()\n File \"/var/lib/jenkins/zope/eggs/setuptools-18.2-py2.7.egg/setuptools/command/easy_install.py\", line 380, in run\n self.easy_install(spec, not self.no_deps)\n File \"/var/lib/jenkins/zope/eggs/setuptools-18.2-py2.7.egg/setuptools/command/easy_install.py\", line 610, in easy_install\n return self.install_item(None, spec, tmpdir, deps, True)\n File \"/var/lib/jenkins/zope/eggs/setuptools-18.2-py2.7.egg/setuptools/command/easy_install.py\", line 659, in install_item\n dists = self.install_eggs(spec, download, tmpdir)\n File \"/var/lib/jenkins/zope/eggs/setuptools-18.2-py2.7.egg/setuptools/command/easy_install.py\", line 842, in install_eggs\n return self.build_and_install(setup_script, setup_base)\n File \"/var/lib/jenkins/zope/eggs/setuptools-18.2-py2.7.egg/setuptools/command/easy_install.py\", line 1070, in build_and_install\n self.run_setup(setup_script, setup_base, args)\n File \"/var/lib/jenkins/zope/eggs/setuptools-18.2-py2.7.egg/setuptools/command/easy_install.py\", line 1056, in run_setup\n run_setup(setup_script, args)\n File \"/var/lib/jenkins/zope/eggs/setuptools-18.2-py2.7.egg/setuptools/sandbox.py\", line 240, in run_setup\n raise\n File \"/usr/local/python/2.7.10/lib/python2.7/contextlib.py\", line 35, in __exit__\n self.gen.throw(type, value, traceback)\n File \"/var/lib/jenkins/zope/eggs/setuptools-18.2-py2.7.egg/setuptools/sandbox.py\", line 193, in setup_context\n yield\n File \"/usr/local/python/2.7.10/lib/python2.7/contextlib.py\", line 35, in __exit__\n self.gen.throw(type, value, traceback)\n File \"/var/lib/jenkins/zope/eggs/setuptools-18.2-py2.7.egg/setuptools/sandbox.py\", line 164, in save_modules\n saved_exc.resume()\n File \"/var/lib/jenkins/zope/eggs/setuptools-18.2-py2.7.egg/setuptools/sandbox.py\", line 139, in resume\n compat.reraise(type, exc, self._tb)\n File \"/var/lib/jenkins/zope/eggs/setuptools-18.2-py2.7.egg/setuptools/sandbox.py\", line 152, in save_modules\n yield saved\n File \"/var/lib/jenkins/zope/eggs/setuptools-18.2-py2.7.egg/setuptools/sandbox.py\", line 193, in setup_context\n yield\n File \"/var/lib/jenkins/zope/eggs/setuptools-18.2-py2.7.egg/setuptools/sandbox.py\", line 237, in run_setup\n DirectorySandbox(setup_dir).run(runner)\n File \"/var/lib/jenkins/zope/eggs/setuptools-18.2-py2.7.egg/setuptools/sandbox.py\", line 267, in run\n return func()\n File \"/var/lib/jenkins/zope/eggs/setuptools-18.2-py2.7.egg/setuptools/sandbox.py\", line 236, in runner\n _execfile(setup_script, ns)\n File \"/var/lib/jenkins/zope/eggs/setuptools-18.2-py2.7.egg/setuptools/sandbox.py\", line 46, in _execfile\n exec(code, globals, locals)\n File \"/tmp/easy_install-6d2nJI/setuptools-25.0.1/setup.py\", line 194, in <module>\n\nAttributeError: 'module' object has no attribute 'chdir'\n```\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"\nDistutils setup file, used to install or test 'setuptools'\n\"\"\"\n\nimport io\nimport os\nimport sys\nimport textwrap\n\nimport setuptools\n\n\nhere = os.path.dirname(__file__)\n\n\ndef require_metadata():\n \"Prevent improper installs without necessary metadata. See #659\"\n if not os.path.exists('setuptools.egg-info'):\n msg = \"Cannot build setuptools without metadata. Run bootstrap.py\"\n raise RuntimeError(msg)\n\n\ndef read_commands():\n command_ns = {}\n cmd_module_path = 'setuptools/command/__init__.py'\n init_path = os.path.join(here, cmd_module_path)\n with open(init_path) as init_file:\n exec(init_file.read(), command_ns)\n return command_ns['__all__']\n\n\ndef _gen_console_scripts():\n yield \"easy_install = setuptools.command.easy_install:main\"\n\n # Gentoo distributions manage the python-version-specific scripts\n # themselves, so those platforms define an environment variable to\n # suppress the creation of the version-specific scripts.\n var_names = (\n 'SETUPTOOLS_DISABLE_VERSIONED_EASY_INSTALL_SCRIPT',\n 'DISTRIBUTE_DISABLE_VERSIONED_EASY_INSTALL_SCRIPT',\n )\n if any(os.environ.get(var) not in (None, \"\", \"0\") for var in var_names):\n return\n yield (\"easy_install-{shortver} = setuptools.command.easy_install:main\"\n .format(shortver=sys.version[:3]))\n\n\nreadme_path = os.path.join(here, 'README.rst')\nwith io.open(readme_path, encoding='utf-8') as readme_file:\n long_description = readme_file.read()\n\npackage_data = dict(\n setuptools=['script (dev).tmpl', 'script.tmpl', 'site-patch.py'],\n)\n\nforce_windows_specific_files = (\n os.environ.get(\"SETUPTOOLS_INSTALL_WINDOWS_SPECIFIC_FILES\")\n not in (None, \"\", \"0\")\n)\n\ninclude_windows_files = (\n sys.platform == 'win32' or\n os.name == 'java' and os._name == 'nt' or\n force_windows_specific_files\n)\n\nif include_windows_files:\n package_data.setdefault('setuptools', []).extend(['*.exe'])\n package_data.setdefault('setuptools.command', []).extend(['*.xml'])\n\nneeds_pytest = set(['ptr', 'pytest', 'test']).intersection(sys.argv)\npytest_runner = ['pytest-runner'] if needs_pytest else []\nneeds_wheel = set(['release', 'bdist_wheel']).intersection(sys.argv)\nwheel = ['wheel'] if needs_wheel else []\n\n\ndef pypi_link(pkg_filename):\n \"\"\"\n Given the filename, including md5 fragment, construct the\n dependency link for PyPI.\n \"\"\"\n root = 'https://pypi.python.org/packages/source'\n name, sep, rest = pkg_filename.partition('-')\n parts = root, name[0], name, pkg_filename\n return '/'.join(parts)\n\n\nsetup_params = dict(\n name=\"setuptools\",\n version=\"25.0.1\",\n description=\"Easily download, build, install, upgrade, and uninstall \"\n \"Python packages\",\n author=\"Python Packaging Authority\",\n author_email=\"[email protected]\",\n long_description=long_description,\n keywords=\"CPAN PyPI distutils eggs package management\",\n url=\"https://github.com/pypa/setuptools\",\n src_root=None,\n packages=setuptools.find_packages(exclude=['*.tests']),\n package_data=package_data,\n\n py_modules=['easy_install'],\n\n zip_safe=True,\n\n entry_points={\n \"distutils.commands\": [\n \"%(cmd)s = setuptools.command.%(cmd)s:%(cmd)s\" % locals()\n for cmd in read_commands()\n ],\n \"distutils.setup_keywords\": [\n \"eager_resources = setuptools.dist:assert_string_list\",\n \"namespace_packages = setuptools.dist:check_nsp\",\n \"extras_require = setuptools.dist:check_extras\",\n \"install_requires = setuptools.dist:check_requirements\",\n \"tests_require = setuptools.dist:check_requirements\",\n \"setup_requires = setuptools.dist:check_requirements\",\n \"python_requires = setuptools.dist:check_specifier\",\n \"entry_points = setuptools.dist:check_entry_points\",\n \"test_suite = setuptools.dist:check_test_suite\",\n \"zip_safe = setuptools.dist:assert_bool\",\n \"package_data = setuptools.dist:check_package_data\",\n \"exclude_package_data = setuptools.dist:check_package_data\",\n \"include_package_data = setuptools.dist:assert_bool\",\n \"packages = setuptools.dist:check_packages\",\n \"dependency_links = setuptools.dist:assert_string_list\",\n \"test_loader = setuptools.dist:check_importable\",\n \"test_runner = setuptools.dist:check_importable\",\n \"use_2to3 = setuptools.dist:assert_bool\",\n \"convert_2to3_doctests = setuptools.dist:assert_string_list\",\n \"use_2to3_fixers = setuptools.dist:assert_string_list\",\n \"use_2to3_exclude_fixers = setuptools.dist:assert_string_list\",\n ],\n \"egg_info.writers\": [\n \"PKG-INFO = setuptools.command.egg_info:write_pkg_info\",\n \"requires.txt = setuptools.command.egg_info:write_requirements\",\n \"entry_points.txt = setuptools.command.egg_info:write_entries\",\n \"eager_resources.txt = setuptools.command.egg_info:overwrite_arg\",\n \"namespace_packages.txt = setuptools.command.egg_info:overwrite_arg\",\n \"top_level.txt = setuptools.command.egg_info:write_toplevel_names\",\n \"depends.txt = setuptools.command.egg_info:warn_depends_obsolete\",\n \"dependency_links.txt = setuptools.command.egg_info:overwrite_arg\",\n ],\n \"console_scripts\": list(_gen_console_scripts()),\n\n \"setuptools.installation\":\n ['eggsecutable = setuptools.command.easy_install:bootstrap'],\n },\n\n\n classifiers=textwrap.dedent(\"\"\"\n Development Status :: 5 - Production/Stable\n Intended Audience :: Developers\n License :: OSI Approved :: MIT License\n Operating System :: OS Independent\n Programming Language :: Python :: 2.6\n Programming Language :: Python :: 2.7\n Programming Language :: Python :: 3\n Programming Language :: Python :: 3.3\n Programming Language :: Python :: 3.4\n Programming Language :: Python :: 3.5\n Topic :: Software Development :: Libraries :: Python Modules\n Topic :: System :: Archiving :: Packaging\n Topic :: System :: Systems Administration\n Topic :: Utilities\n \"\"\").strip().splitlines(),\n extras_require={\n \"ssl:sys_platform=='win32'\": \"wincertstore==0.2\",\n \"certs\": \"certifi==2016.2.28\",\n },\n dependency_links=[\n pypi_link(\n 'certifi-2016.2.28.tar.gz#md5=5d672aa766e1f773c75cfeccd02d3650',\n ),\n pypi_link(\n 'wincertstore-0.2.zip#md5=ae728f2f007185648d0c7a8679b361e2',\n ),\n ],\n scripts=[],\n tests_require=[\n 'setuptools[ssl]',\n 'pytest-flake8',\n # workaround for pytest-flake8 #7\n 'flake8<3dev',\n 'pytest>=2.8',\n ] + (['mock'] if sys.version_info[:2] < (3, 3) else []),\n setup_requires=[\n ] + pytest_runner + wheel,\n)\n\nif __name__ == '__main__':\n # allow setup.py to run from another directory\n here and os.path.chdir(here)\n require_metadata()\n dist = setuptools.setup(**setup_params)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\"\"\"\nDistutils setup file, used to install or test 'setuptools'\n\"\"\"\n\nimport io\nimport os\nimport sys\nimport textwrap\n\nimport setuptools\n\n\nhere = os.path.dirname(__file__)\n\n\ndef require_metadata():\n \"Prevent improper installs without necessary metadata. See #659\"\n if not os.path.exists('setuptools.egg-info'):\n msg = \"Cannot build setuptools without metadata. Run bootstrap.py\"\n raise RuntimeError(msg)\n\n\ndef read_commands():\n command_ns = {}\n cmd_module_path = 'setuptools/command/__init__.py'\n init_path = os.path.join(here, cmd_module_path)\n with open(init_path) as init_file:\n exec(init_file.read(), command_ns)\n return command_ns['__all__']\n\n\ndef _gen_console_scripts():\n yield \"easy_install = setuptools.command.easy_install:main\"\n\n # Gentoo distributions manage the python-version-specific scripts\n # themselves, so those platforms define an environment variable to\n # suppress the creation of the version-specific scripts.\n var_names = (\n 'SETUPTOOLS_DISABLE_VERSIONED_EASY_INSTALL_SCRIPT',\n 'DISTRIBUTE_DISABLE_VERSIONED_EASY_INSTALL_SCRIPT',\n )\n if any(os.environ.get(var) not in (None, \"\", \"0\") for var in var_names):\n return\n yield (\"easy_install-{shortver} = setuptools.command.easy_install:main\"\n .format(shortver=sys.version[:3]))\n\n\nreadme_path = os.path.join(here, 'README.rst')\nwith io.open(readme_path, encoding='utf-8') as readme_file:\n long_description = readme_file.read()\n\npackage_data = dict(\n setuptools=['script (dev).tmpl', 'script.tmpl', 'site-patch.py'],\n)\n\nforce_windows_specific_files = (\n os.environ.get(\"SETUPTOOLS_INSTALL_WINDOWS_SPECIFIC_FILES\")\n not in (None, \"\", \"0\")\n)\n\ninclude_windows_files = (\n sys.platform == 'win32' or\n os.name == 'java' and os._name == 'nt' or\n force_windows_specific_files\n)\n\nif include_windows_files:\n package_data.setdefault('setuptools', []).extend(['*.exe'])\n package_data.setdefault('setuptools.command', []).extend(['*.xml'])\n\nneeds_pytest = set(['ptr', 'pytest', 'test']).intersection(sys.argv)\npytest_runner = ['pytest-runner'] if needs_pytest else []\nneeds_wheel = set(['release', 'bdist_wheel']).intersection(sys.argv)\nwheel = ['wheel'] if needs_wheel else []\n\n\ndef pypi_link(pkg_filename):\n \"\"\"\n Given the filename, including md5 fragment, construct the\n dependency link for PyPI.\n \"\"\"\n root = 'https://pypi.python.org/packages/source'\n name, sep, rest = pkg_filename.partition('-')\n parts = root, name[0], name, pkg_filename\n return '/'.join(parts)\n\n\nsetup_params = dict(\n name=\"setuptools\",\n version=\"25.0.1\",\n description=\"Easily download, build, install, upgrade, and uninstall \"\n \"Python packages\",\n author=\"Python Packaging Authority\",\n author_email=\"[email protected]\",\n long_description=long_description,\n keywords=\"CPAN PyPI distutils eggs package management\",\n url=\"https://github.com/pypa/setuptools\",\n src_root=None,\n packages=setuptools.find_packages(exclude=['*.tests']),\n package_data=package_data,\n\n py_modules=['easy_install'],\n\n zip_safe=True,\n\n entry_points={\n \"distutils.commands\": [\n \"%(cmd)s = setuptools.command.%(cmd)s:%(cmd)s\" % locals()\n for cmd in read_commands()\n ],\n \"distutils.setup_keywords\": [\n \"eager_resources = setuptools.dist:assert_string_list\",\n \"namespace_packages = setuptools.dist:check_nsp\",\n \"extras_require = setuptools.dist:check_extras\",\n \"install_requires = setuptools.dist:check_requirements\",\n \"tests_require = setuptools.dist:check_requirements\",\n \"setup_requires = setuptools.dist:check_requirements\",\n \"python_requires = setuptools.dist:check_specifier\",\n \"entry_points = setuptools.dist:check_entry_points\",\n \"test_suite = setuptools.dist:check_test_suite\",\n \"zip_safe = setuptools.dist:assert_bool\",\n \"package_data = setuptools.dist:check_package_data\",\n \"exclude_package_data = setuptools.dist:check_package_data\",\n \"include_package_data = setuptools.dist:assert_bool\",\n \"packages = setuptools.dist:check_packages\",\n \"dependency_links = setuptools.dist:assert_string_list\",\n \"test_loader = setuptools.dist:check_importable\",\n \"test_runner = setuptools.dist:check_importable\",\n \"use_2to3 = setuptools.dist:assert_bool\",\n \"convert_2to3_doctests = setuptools.dist:assert_string_list\",\n \"use_2to3_fixers = setuptools.dist:assert_string_list\",\n \"use_2to3_exclude_fixers = setuptools.dist:assert_string_list\",\n ],\n \"egg_info.writers\": [\n \"PKG-INFO = setuptools.command.egg_info:write_pkg_info\",\n \"requires.txt = setuptools.command.egg_info:write_requirements\",\n \"entry_points.txt = setuptools.command.egg_info:write_entries\",\n \"eager_resources.txt = setuptools.command.egg_info:overwrite_arg\",\n \"namespace_packages.txt = setuptools.command.egg_info:overwrite_arg\",\n \"top_level.txt = setuptools.command.egg_info:write_toplevel_names\",\n \"depends.txt = setuptools.command.egg_info:warn_depends_obsolete\",\n \"dependency_links.txt = setuptools.command.egg_info:overwrite_arg\",\n ],\n \"console_scripts\": list(_gen_console_scripts()),\n\n \"setuptools.installation\":\n ['eggsecutable = setuptools.command.easy_install:bootstrap'],\n },\n\n\n classifiers=textwrap.dedent(\"\"\"\n Development Status :: 5 - Production/Stable\n Intended Audience :: Developers\n License :: OSI Approved :: MIT License\n Operating System :: OS Independent\n Programming Language :: Python :: 2.6\n Programming Language :: Python :: 2.7\n Programming Language :: Python :: 3\n Programming Language :: Python :: 3.3\n Programming Language :: Python :: 3.4\n Programming Language :: Python :: 3.5\n Topic :: Software Development :: Libraries :: Python Modules\n Topic :: System :: Archiving :: Packaging\n Topic :: System :: Systems Administration\n Topic :: Utilities\n \"\"\").strip().splitlines(),\n extras_require={\n \"ssl:sys_platform=='win32'\": \"wincertstore==0.2\",\n \"certs\": \"certifi==2016.2.28\",\n },\n dependency_links=[\n pypi_link(\n 'certifi-2016.2.28.tar.gz#md5=5d672aa766e1f773c75cfeccd02d3650',\n ),\n pypi_link(\n 'wincertstore-0.2.zip#md5=ae728f2f007185648d0c7a8679b361e2',\n ),\n ],\n scripts=[],\n tests_require=[\n 'setuptools[ssl]',\n 'pytest-flake8',\n # workaround for pytest-flake8 #7\n 'flake8<3dev',\n 'pytest>=2.8',\n ] + (['mock'] if sys.version_info[:2] < (3, 3) else []),\n setup_requires=[\n ] + pytest_runner + wheel,\n)\n\nif __name__ == '__main__':\n # allow setup.py to run from another directory\n here and os.chdir(here)\n require_metadata()\n dist = setuptools.setup(**setup_params)\n", "path": "setup.py"}]}
3,792
86
gh_patches_debug_40555
rasdani/github-patches
git_diff
bids-standard__pybids-759
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Lag transformation Thinking of implementing Lag, since it's such a common transformation in practice. ---- Current docs: > ### Lag(Input, Output=None): > [TODO] > > Arguments: > * Input(string, mandatory): The name of the variable to operate on. > * Output (string, optional): the optional list of column names to write out to. By default, computation is done in-place (i.e., input columnise overwritten). ---- It's a bit under-specified, so while fleshing it out (and implementing) we have opportunities to think about what would make it most useful. Questions: 1) Should we permit a lag != 1? If so, is definitially `Lag(Lag(x, 1), 1) = Lag(x, 2)`? 2) Should we permit negative lags? 3) Should we allow taking the difference in the same step? So we get `Lag(x, diff=True) = Lag(x, diff=False) - x`? (or `x - Lag(x, diff=False)`)? 4) Should we default to overwriting columns or generating new columns from the original name? BEP 012 [would suggest](https://github.com/bids-standard/bids-specification/pull/519/files#diff-093ab536060c8ee949aa452200ec2676377a2fcd1a8674f91c8e6a1cfd42281fR127) calling `Lag(x) = x_shift_back`, though `x_lag` might be clearer. Any other thoughts? cc @adelavega --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `bids/modeling/transformations/compute.py` Content: ``` 1 """ 2 Transformations that primarily involve numerical computation on variables. 3 """ 4 import math 5 import numpy as np 6 import pandas as pd 7 from bids.utils import listify 8 from .base import Transformation 9 from bids.modeling import hrf 10 from bids.variables import SparseRunVariable, DenseRunVariable 11 12 13 def _fractional_gcd(vals, res=0.001): 14 from functools import reduce 15 from math import gcd 16 return reduce(gcd, (int(np.round(val / res)) for val in vals)) * res 17 18 19 class Convolve(Transformation): 20 """Convolve the input variable with an HRF. 21 22 Parameters 23 ---------- 24 var : Variable 25 The variable to convolve. 26 model : str 27 The name of the HRF model to apply. Must be one of 'spm', 28 'glover', or 'fir'. 29 derivative : bool 30 Whether or not to include the temporal derivative. 31 dispersion : bool 32 Whether or not to include the dispersion derivative. 33 fir_delays : iterable 34 A list or iterable of delays to use if model is 35 'fir' (ignored otherwise). Spacing between delays must be fixed. 36 37 Notes 38 ----- 39 Uses the HRF convolution functions implemented in nistats. 40 """ 41 42 _input_type = 'variable' 43 _return_type = 'variable' 44 45 def _transform(self, var, model='spm', derivative=False, dispersion=False, 46 fir_delays=None): 47 48 model = model.lower() 49 50 df = var.to_df(entities=False) 51 52 if isinstance(var, SparseRunVariable): 53 sampling_rate = self.collection.sampling_rate 54 dur = var.get_duration() 55 resample_frames = np.linspace( 56 0, dur, int(math.ceil(dur * sampling_rate)), endpoint=False) 57 safety = 2 # Double frequency to resolve events 58 else: 59 resample_frames = df['onset'].values 60 sampling_rate = var.sampling_rate 61 safety = 1 # Maximum signal resolution is already 0.5 * SR 62 63 vals = df[['onset', 'duration', 'amplitude']].values.T 64 65 if model in ['spm', 'glover']: 66 if derivative: 67 model += ' + derivative' 68 if dispersion: 69 model += ' + dispersion' 70 elif model != 'fir': 71 raise ValueError("Model must be one of 'spm', 'glover', or 'fir'.") 72 73 # Sampling at >100Hz will never be useful, but can be wildly expensive 74 max_freq, min_interval = 100, 0.01 75 # Sampling at <1Hz can degrade signals 76 min_freq, max_interval = 1, 1 77 78 # Given the sampling rate, determine an oversampling factor to ensure that 79 # events can be modeled with reasonable precision 80 unique_onsets = np.unique(df.onset) 81 unique_durations = np.unique(df.duration) 82 # Align existing data ticks with, event onsets and offsets, up to ms resolution 83 # Note that GCD ignores zeros, so 0 onsets and impulse responses (0 durations) do 84 # not harm this. 85 required_resolution = _fractional_gcd( 86 np.concatenate((unique_onsets, unique_durations)), 87 res=min_interval) 88 # Bound the effective sampling rate between min_freq and max_freq 89 effective_sr = max(min_freq, min(safety / required_resolution, max_freq)) 90 convolved = hrf.compute_regressor( 91 vals, model, resample_frames, fir_delays=fir_delays, min_onset=0, 92 oversampling=np.ceil(effective_sr / sampling_rate) 93 ) 94 95 return DenseRunVariable( 96 name=var.name, values=convolved[0], run_info=var.run_info, 97 source=var.source, sampling_rate=sampling_rate) 98 99 100 class Demean(Transformation): 101 102 def _transform(self, data): 103 return data - data.mean() 104 105 106 class Orthogonalize(Transformation): 107 108 _variables_used = ('variables', 'other') 109 _densify = ('variables', 'other') 110 _aligned_required = 'force_dense' 111 _aligned_variables = ('other') 112 113 def _transform(self, var, other): 114 115 other = listify(other) 116 117 # Set up X matrix and slice into it based on target variable indices 118 X = np.array([self._variables[c].values.values.squeeze() 119 for c in other]).T 120 X = X[var.index, :] 121 assert len(X) == len(var) 122 y = var.values 123 _aX = np.c_[np.ones(len(y)), X] 124 coefs, resids, rank, s = np.linalg.lstsq(_aX, y, rcond=None) 125 result = pd.DataFrame(y - X.dot(coefs[1:]), index=var.index) 126 return result 127 128 129 class Product(Transformation): 130 131 _loopable = False 132 _groupable = False 133 _aligned_required = True 134 _output_required = True 135 136 def _transform(self, data): 137 data = pd.concat(data, axis=1, sort=True) 138 return data.product(1) 139 140 141 class Scale(Transformation): 142 """Scale a variable. 143 144 Parameters 145 ---------- 146 data : :obj:`pandas.Series` or :obj:`pandas.DataFrame` 147 The variables to scale. 148 demean : bool 149 If True, demean each column. 150 rescale : bool 151 If True, divide variables by their standard deviation. 152 replace_na : str 153 Whether/when to replace missing values with 0. If 154 None, no replacement is performed. If 'before', missing values are 155 replaced with 0's before scaling. If 'after', missing values are 156 replaced with 0 after scaling. 157 158 Notes 159 ----- 160 If a constant column is passed in, and replace_na is None or 'before', an 161 exception will be raised. 162 """ 163 164 def _transform(self, data, demean=True, rescale=True, replace_na=None): 165 if data.nunique() == 1 and replace_na in {None, 'before'}: 166 val = data.unique()[0] 167 raise ValueError("Cannot scale a column with constant value ({})! " 168 "If you want a constant column of 0's returned, " 169 "set replace_na to 'after'.".format(val)) 170 if replace_na == 'before': 171 data = data.fillna(0.) 172 if demean: 173 data -= data.mean() 174 if rescale: 175 data /= data.std() 176 if replace_na == 'after': 177 data = data.fillna(0.) 178 return data 179 180 181 class Sum(Transformation): 182 183 _loopable = False 184 _groupable = False 185 _aligned_required = True 186 _output_required = True 187 188 def _transform(self, data, weights=None): 189 data = pd.concat(data, axis=1, sort=True) 190 if weights is None: 191 weights = np.ones(data.shape[1]) 192 else: 193 weights = np.array(weights) 194 if len(weights.ravel()) != data.shape[1]: 195 raise ValueError("If weights are passed to sum(), the number " 196 "of elements must equal number of variables" 197 " being summed.") 198 return (data * weights).sum(axis=1) 199 200 201 class Threshold(Transformation): 202 """Threshold and/or binarize a variable. 203 204 Parameters 205 ---------- 206 data :obj:`pandas.Series` or :obj:`pandas.DataFrame` 207 The pandas structure to threshold. 208 threshold : float 209 The value to binarize around (values above will 210 be assigned 1, values below will be assigned 0). 211 binarize : bool 212 If True, binarizes all non-zero values (i.e., every 213 non-zero value will be set to 1). 214 above : bool 215 Specifies which values to retain with respect to the 216 cut-off. If True, all value above the threshold will be kept; if 217 False, all values below the threshold will be kept. Defaults to 218 True. 219 signed : bool 220 Specifies whether to treat the threshold as signed 221 (default) or unsigned. For example, when passing above=True and 222 threshold=3, if signed=True, all and only values above +3 would be 223 retained. If signed=False, all absolute values > 3 would be retained 224 (i.e.,values in the range -3 < X < 3 would be set to 0). 225 """ 226 227 _groupable = False 228 229 def _transform(self, data, threshold=0., binarize=False, above=True, 230 signed=True): 231 if not signed: 232 threshold = np.abs(threshold) 233 data = data.abs() 234 keep = data >= threshold if above else data <= threshold 235 data[~keep] = 0 236 if binarize: 237 data[keep] = 1 238 return data 239 240 241 class And_(Transformation): 242 """Logical AND on two or more variables. 243 244 Parameters 245 ---------- 246 dfs : list of :obj:`pandas.DataFrame` 247 variables to enter into the conjunction. 248 """ 249 250 _loopable = False 251 _groupable = False 252 _output_required = True 253 _aligned_required = True 254 255 def _transform(self, dfs): 256 df = pd.concat(dfs, axis=1, sort=True) 257 return df.all(axis=1).astype(int) 258 259 260 class Not(Transformation): 261 """Logical negation of a variable. 262 263 Parameters 264 ---------- 265 var : :obj:`pandas.Series` 266 Variable to negate. Must be convertible to bool. 267 """ 268 269 _loopable = True 270 _groupable = False 271 272 def _transform(self, var): 273 return ~var.astype(bool) 274 275 276 class Or_(Transformation): 277 """Logical OR (inclusive) on two or more variables. 278 279 Parameters 280 ---------- 281 dfs : list of :obj:`pandas.DataFrame` 282 variables to enter into the disjunction. 283 """ 284 285 _loopable = False 286 _groupable = False 287 _output_required = True 288 _aligned_required = True 289 290 def _transform(self, dfs): 291 df = pd.concat(dfs, axis=1, sort=True) 292 return df.any(axis=1).astype(int) 293 ``` Path: `bids/modeling/transformations/__init__.py` Content: ``` 1 from .compute import (Sum, Product, Scale, Orthogonalize, Threshold, And_, Or_, 2 Not, Demean, Convolve) 3 from .munge import (Split, Rename, Assign, Copy, Factor, Filter, Select, 4 Delete, DropNA, Replace, ToDense, Group, Resample) 5 from .base import TransformerManager 6 7 And = And_ 8 Or = Or_ 9 10 __all__ = [ 11 'And', 12 'And_', 13 'Assign', 14 'Convolve', 15 'Copy', 16 'Demean', 17 'Delete', 18 'DropNA', 19 'Factor', 20 'Filter', 21 'Group', 22 'Not', 23 'Or', 24 'Or_', 25 'Orthogonalize', 26 'Product', 27 'Rename', 28 'Replace', 29 'Resample', 30 'Scale', 31 'Select', 32 'Split', 33 'Sum', 34 'Threshold', 35 'ToDense', 36 'TransformerManager' 37 ] 38 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/bids/modeling/transformations/__init__.py b/bids/modeling/transformations/__init__.py --- a/bids/modeling/transformations/__init__.py +++ b/bids/modeling/transformations/__init__.py @@ -1,5 +1,5 @@ from .compute import (Sum, Product, Scale, Orthogonalize, Threshold, And_, Or_, - Not, Demean, Convolve) + Not, Demean, Convolve, Lag) from .munge import (Split, Rename, Assign, Copy, Factor, Filter, Select, Delete, DropNA, Replace, ToDense, Group, Resample) from .base import TransformerManager @@ -19,6 +19,7 @@ 'Factor', 'Filter', 'Group', + 'Lag', 'Not', 'Or', 'Or_', diff --git a/bids/modeling/transformations/compute.py b/bids/modeling/transformations/compute.py --- a/bids/modeling/transformations/compute.py +++ b/bids/modeling/transformations/compute.py @@ -4,6 +4,7 @@ import math import numpy as np import pandas as pd +from scipy import ndimage as ndi from bids.utils import listify from .base import Transformation from bids.modeling import hrf @@ -290,3 +291,68 @@ def _transform(self, dfs): df = pd.concat(dfs, axis=1, sort=True) return df.any(axis=1).astype(int) + + +class Lag(Transformation): + """Lag variable + + Returns a variable that is lagged by a specified number of time points. + Spline interpolation of the requested ``order`` is used for non-integer + shifts. + Points outside the input are filled according to the given ``mode``. + Negative shifts move values toward the beginning of the sequence. + If ``difference`` is ``True``, the backward difference is calculated for + positive shifts, and the forward difference is calculated for negative + shifts. + + Additional ``mode``s may be defined if there is need for them. + The ``scipy.ndimage.shift`` method provides the reference implementation + for all current modes. "Constant" is equivalent to the shift parameter + "cval". + + + Parameters + ---------- + var : :obj:`numpy.ndarray` + variable to lag + shift : float, optional + number of places to shift the values (default: 1) + order : int, optional + order of spline interpolation, from 0-5 (default: 3) + mode : string + the `mode` parameter determines how the input array is extended + beyond its boundaries. Default is 'nearest'. + The following values are accepted: + + "nearest" (a a a a | a b c d | d d d d) + The input is extended by replicating the boundary values + "reflect" (d c b a | a b c d | d c b a) + The input is extended by reflecting the array over the edge + "constant" (k k k k | a b c d | k k k k) + The input is extended by filling all values beyond the edge + with the same constant value, defined by the ``constant`` parameter + constant : float, optional + value to fill past edges of input if ``mode`` is ``"constant"``. + (default: 0) + difference : boolean, optional + Calculate the backward (if shift is positive) or forward (if shift is + negative) difference. + For the forward difference dx of an array x, dx[i] = x[i+1] - x[i]. + For the backward difference dx of an array x, dx[i] = x[i] - x[i-1]. + (default: ``False``) + """ + + _input_type = 'numpy' + _return_type = 'numpy' + + def _transform(self, var, shift=1, order=3, mode="nearest", + constant=0.0, difference=False): + var = var.flatten() + shifted = ndi.shift(var, shift=shift, order=order, mode=mode, + cval=constant) + if not difference: + return shifted + elif shift >= 0: + return var - shifted + else: + return shifted - var
{"golden_diff": "diff --git a/bids/modeling/transformations/__init__.py b/bids/modeling/transformations/__init__.py\n--- a/bids/modeling/transformations/__init__.py\n+++ b/bids/modeling/transformations/__init__.py\n@@ -1,5 +1,5 @@\n from .compute import (Sum, Product, Scale, Orthogonalize, Threshold, And_, Or_,\n- Not, Demean, Convolve)\n+ Not, Demean, Convolve, Lag)\n from .munge import (Split, Rename, Assign, Copy, Factor, Filter, Select,\n Delete, DropNA, Replace, ToDense, Group, Resample)\n from .base import TransformerManager\n@@ -19,6 +19,7 @@\n 'Factor',\n 'Filter',\n 'Group',\n+ 'Lag',\n 'Not',\n 'Or',\n 'Or_',\ndiff --git a/bids/modeling/transformations/compute.py b/bids/modeling/transformations/compute.py\n--- a/bids/modeling/transformations/compute.py\n+++ b/bids/modeling/transformations/compute.py\n@@ -4,6 +4,7 @@\n import math\n import numpy as np\n import pandas as pd\n+from scipy import ndimage as ndi\n from bids.utils import listify\n from .base import Transformation\n from bids.modeling import hrf\n@@ -290,3 +291,68 @@\n def _transform(self, dfs):\n df = pd.concat(dfs, axis=1, sort=True)\n return df.any(axis=1).astype(int)\n+\n+\n+class Lag(Transformation):\n+ \"\"\"Lag variable\n+\n+ Returns a variable that is lagged by a specified number of time points.\n+ Spline interpolation of the requested ``order`` is used for non-integer\n+ shifts.\n+ Points outside the input are filled according to the given ``mode``.\n+ Negative shifts move values toward the beginning of the sequence.\n+ If ``difference`` is ``True``, the backward difference is calculated for\n+ positive shifts, and the forward difference is calculated for negative\n+ shifts.\n+\n+ Additional ``mode``s may be defined if there is need for them.\n+ The ``scipy.ndimage.shift`` method provides the reference implementation\n+ for all current modes. \"Constant\" is equivalent to the shift parameter\n+ \"cval\".\n+\n+\n+ Parameters\n+ ----------\n+ var : :obj:`numpy.ndarray`\n+ variable to lag\n+ shift : float, optional\n+ number of places to shift the values (default: 1)\n+ order : int, optional\n+ order of spline interpolation, from 0-5 (default: 3)\n+ mode : string\n+ the `mode` parameter determines how the input array is extended\n+ beyond its boundaries. Default is 'nearest'.\n+ The following values are accepted:\n+\n+ \"nearest\" (a a a a | a b c d | d d d d)\n+ The input is extended by replicating the boundary values\n+ \"reflect\" (d c b a | a b c d | d c b a)\n+ The input is extended by reflecting the array over the edge\n+ \"constant\" (k k k k | a b c d | k k k k)\n+ The input is extended by filling all values beyond the edge\n+ with the same constant value, defined by the ``constant`` parameter\n+ constant : float, optional\n+ value to fill past edges of input if ``mode`` is ``\"constant\"``.\n+ (default: 0)\n+ difference : boolean, optional\n+ Calculate the backward (if shift is positive) or forward (if shift is\n+ negative) difference.\n+ For the forward difference dx of an array x, dx[i] = x[i+1] - x[i].\n+ For the backward difference dx of an array x, dx[i] = x[i] - x[i-1].\n+ (default: ``False``)\n+ \"\"\"\n+\n+ _input_type = 'numpy'\n+ _return_type = 'numpy'\n+\n+ def _transform(self, var, shift=1, order=3, mode=\"nearest\",\n+ constant=0.0, difference=False):\n+ var = var.flatten()\n+ shifted = ndi.shift(var, shift=shift, order=order, mode=mode,\n+ cval=constant)\n+ if not difference:\n+ return shifted\n+ elif shift >= 0:\n+ return var - shifted\n+ else:\n+ return shifted - var\n", "issue": "Lag transformation\nThinking of implementing Lag, since it's such a common transformation in practice.\r\n\r\n----\r\n\r\nCurrent docs:\r\n\r\n> ### Lag(Input, Output=None):\r\n> [TODO]\r\n> \r\n> Arguments:\r\n> * Input(string, mandatory): The name of the variable to operate on.\r\n> * Output (string, optional): the optional list of column names to write out to. By default, computation is done in-place (i.e., input columnise overwritten).\r\n\r\n----\r\n\r\nIt's a bit under-specified, so while fleshing it out (and implementing) we have opportunities to think about what would make it most useful. Questions:\r\n\r\n1) Should we permit a lag != 1? If so, is definitially `Lag(Lag(x, 1), 1) = Lag(x, 2)`?\r\n2) Should we permit negative lags?\r\n3) Should we allow taking the difference in the same step? So we get `Lag(x, diff=True) = Lag(x, diff=False) - x`? (or `x - Lag(x, diff=False)`)?\r\n4) Should we default to overwriting columns or generating new columns from the original name? BEP 012 [would suggest](https://github.com/bids-standard/bids-specification/pull/519/files#diff-093ab536060c8ee949aa452200ec2676377a2fcd1a8674f91c8e6a1cfd42281fR127) calling `Lag(x) = x_shift_back`, though `x_lag` might be clearer.\r\n\r\nAny other thoughts?\r\n\r\ncc @adelavega \n", "before_files": [{"content": "\"\"\"\nTransformations that primarily involve numerical computation on variables.\n\"\"\"\nimport math\nimport numpy as np\nimport pandas as pd\nfrom bids.utils import listify\nfrom .base import Transformation\nfrom bids.modeling import hrf\nfrom bids.variables import SparseRunVariable, DenseRunVariable\n\n\ndef _fractional_gcd(vals, res=0.001):\n from functools import reduce\n from math import gcd\n return reduce(gcd, (int(np.round(val / res)) for val in vals)) * res\n\n\nclass Convolve(Transformation):\n \"\"\"Convolve the input variable with an HRF.\n\n Parameters\n ----------\n var : Variable\n The variable to convolve.\n model : str\n The name of the HRF model to apply. Must be one of 'spm',\n 'glover', or 'fir'.\n derivative : bool\n Whether or not to include the temporal derivative.\n dispersion : bool\n Whether or not to include the dispersion derivative.\n fir_delays : iterable\n A list or iterable of delays to use if model is\n 'fir' (ignored otherwise). Spacing between delays must be fixed.\n\n Notes\n -----\n Uses the HRF convolution functions implemented in nistats.\n \"\"\"\n\n _input_type = 'variable'\n _return_type = 'variable'\n\n def _transform(self, var, model='spm', derivative=False, dispersion=False,\n fir_delays=None):\n\n model = model.lower()\n\n df = var.to_df(entities=False)\n\n if isinstance(var, SparseRunVariable):\n sampling_rate = self.collection.sampling_rate\n dur = var.get_duration()\n resample_frames = np.linspace(\n 0, dur, int(math.ceil(dur * sampling_rate)), endpoint=False)\n safety = 2 # Double frequency to resolve events\n else:\n resample_frames = df['onset'].values\n sampling_rate = var.sampling_rate\n safety = 1 # Maximum signal resolution is already 0.5 * SR\n\n vals = df[['onset', 'duration', 'amplitude']].values.T\n\n if model in ['spm', 'glover']:\n if derivative:\n model += ' + derivative'\n if dispersion:\n model += ' + dispersion'\n elif model != 'fir':\n raise ValueError(\"Model must be one of 'spm', 'glover', or 'fir'.\")\n\n # Sampling at >100Hz will never be useful, but can be wildly expensive\n max_freq, min_interval = 100, 0.01\n # Sampling at <1Hz can degrade signals\n min_freq, max_interval = 1, 1\n\n # Given the sampling rate, determine an oversampling factor to ensure that\n # events can be modeled with reasonable precision\n unique_onsets = np.unique(df.onset)\n unique_durations = np.unique(df.duration)\n # Align existing data ticks with, event onsets and offsets, up to ms resolution\n # Note that GCD ignores zeros, so 0 onsets and impulse responses (0 durations) do\n # not harm this.\n required_resolution = _fractional_gcd(\n np.concatenate((unique_onsets, unique_durations)),\n res=min_interval)\n # Bound the effective sampling rate between min_freq and max_freq\n effective_sr = max(min_freq, min(safety / required_resolution, max_freq))\n convolved = hrf.compute_regressor(\n vals, model, resample_frames, fir_delays=fir_delays, min_onset=0,\n oversampling=np.ceil(effective_sr / sampling_rate)\n )\n\n return DenseRunVariable(\n name=var.name, values=convolved[0], run_info=var.run_info,\n source=var.source, sampling_rate=sampling_rate)\n\n\nclass Demean(Transformation):\n\n def _transform(self, data):\n return data - data.mean()\n\n\nclass Orthogonalize(Transformation):\n\n _variables_used = ('variables', 'other')\n _densify = ('variables', 'other')\n _aligned_required = 'force_dense'\n _aligned_variables = ('other')\n\n def _transform(self, var, other):\n\n other = listify(other)\n\n # Set up X matrix and slice into it based on target variable indices\n X = np.array([self._variables[c].values.values.squeeze()\n for c in other]).T\n X = X[var.index, :]\n assert len(X) == len(var)\n y = var.values\n _aX = np.c_[np.ones(len(y)), X]\n coefs, resids, rank, s = np.linalg.lstsq(_aX, y, rcond=None)\n result = pd.DataFrame(y - X.dot(coefs[1:]), index=var.index)\n return result\n\n\nclass Product(Transformation):\n\n _loopable = False\n _groupable = False\n _aligned_required = True\n _output_required = True\n\n def _transform(self, data):\n data = pd.concat(data, axis=1, sort=True)\n return data.product(1)\n\n\nclass Scale(Transformation):\n \"\"\"Scale a variable.\n\n Parameters\n ----------\n data : :obj:`pandas.Series` or :obj:`pandas.DataFrame`\n The variables to scale.\n demean : bool\n If True, demean each column.\n rescale : bool\n If True, divide variables by their standard deviation.\n replace_na : str\n Whether/when to replace missing values with 0. If\n None, no replacement is performed. If 'before', missing values are\n replaced with 0's before scaling. If 'after', missing values are\n replaced with 0 after scaling.\n\n Notes\n -----\n If a constant column is passed in, and replace_na is None or 'before', an\n exception will be raised.\n \"\"\"\n\n def _transform(self, data, demean=True, rescale=True, replace_na=None):\n if data.nunique() == 1 and replace_na in {None, 'before'}:\n val = data.unique()[0]\n raise ValueError(\"Cannot scale a column with constant value ({})! \"\n \"If you want a constant column of 0's returned, \"\n \"set replace_na to 'after'.\".format(val))\n if replace_na == 'before':\n data = data.fillna(0.)\n if demean:\n data -= data.mean()\n if rescale:\n data /= data.std()\n if replace_na == 'after':\n data = data.fillna(0.)\n return data\n\n\nclass Sum(Transformation):\n\n _loopable = False\n _groupable = False\n _aligned_required = True\n _output_required = True\n\n def _transform(self, data, weights=None):\n data = pd.concat(data, axis=1, sort=True)\n if weights is None:\n weights = np.ones(data.shape[1])\n else:\n weights = np.array(weights)\n if len(weights.ravel()) != data.shape[1]:\n raise ValueError(\"If weights are passed to sum(), the number \"\n \"of elements must equal number of variables\"\n \" being summed.\")\n return (data * weights).sum(axis=1)\n\n\nclass Threshold(Transformation):\n \"\"\"Threshold and/or binarize a variable.\n\n Parameters\n ----------\n data :obj:`pandas.Series` or :obj:`pandas.DataFrame`\n The pandas structure to threshold.\n threshold : float\n The value to binarize around (values above will\n be assigned 1, values below will be assigned 0).\n binarize : bool\n If True, binarizes all non-zero values (i.e., every\n non-zero value will be set to 1).\n above : bool\n Specifies which values to retain with respect to the\n cut-off. If True, all value above the threshold will be kept; if\n False, all values below the threshold will be kept. Defaults to\n True.\n signed : bool\n Specifies whether to treat the threshold as signed\n (default) or unsigned. For example, when passing above=True and\n threshold=3, if signed=True, all and only values above +3 would be\n retained. If signed=False, all absolute values > 3 would be retained\n (i.e.,values in the range -3 < X < 3 would be set to 0).\n \"\"\"\n\n _groupable = False\n\n def _transform(self, data, threshold=0., binarize=False, above=True,\n signed=True):\n if not signed:\n threshold = np.abs(threshold)\n data = data.abs()\n keep = data >= threshold if above else data <= threshold\n data[~keep] = 0\n if binarize:\n data[keep] = 1\n return data\n\n\nclass And_(Transformation):\n \"\"\"Logical AND on two or more variables.\n\n Parameters\n ----------\n dfs : list of :obj:`pandas.DataFrame`\n variables to enter into the conjunction.\n \"\"\"\n\n _loopable = False\n _groupable = False\n _output_required = True\n _aligned_required = True\n\n def _transform(self, dfs):\n df = pd.concat(dfs, axis=1, sort=True)\n return df.all(axis=1).astype(int)\n\n\nclass Not(Transformation):\n \"\"\"Logical negation of a variable.\n\n Parameters\n ----------\n var : :obj:`pandas.Series`\n Variable to negate. Must be convertible to bool.\n \"\"\"\n\n _loopable = True\n _groupable = False\n\n def _transform(self, var):\n return ~var.astype(bool)\n\n\nclass Or_(Transformation):\n \"\"\"Logical OR (inclusive) on two or more variables.\n\n Parameters\n ----------\n dfs : list of :obj:`pandas.DataFrame`\n variables to enter into the disjunction.\n \"\"\"\n\n _loopable = False\n _groupable = False\n _output_required = True\n _aligned_required = True\n\n def _transform(self, dfs):\n df = pd.concat(dfs, axis=1, sort=True)\n return df.any(axis=1).astype(int)\n", "path": "bids/modeling/transformations/compute.py"}, {"content": "from .compute import (Sum, Product, Scale, Orthogonalize, Threshold, And_, Or_,\n Not, Demean, Convolve)\nfrom .munge import (Split, Rename, Assign, Copy, Factor, Filter, Select,\n Delete, DropNA, Replace, ToDense, Group, Resample)\nfrom .base import TransformerManager\n\nAnd = And_\nOr = Or_\n\n__all__ = [\n 'And',\n 'And_',\n 'Assign',\n 'Convolve',\n 'Copy',\n 'Demean',\n 'Delete',\n 'DropNA',\n 'Factor',\n 'Filter',\n 'Group',\n 'Not',\n 'Or',\n 'Or_',\n 'Orthogonalize',\n 'Product',\n 'Rename',\n 'Replace',\n 'Resample',\n 'Scale',\n 'Select',\n 'Split',\n 'Sum',\n 'Threshold',\n 'ToDense',\n 'TransformerManager'\n]\n", "path": "bids/modeling/transformations/__init__.py"}], "after_files": [{"content": "\"\"\"\nTransformations that primarily involve numerical computation on variables.\n\"\"\"\nimport math\nimport numpy as np\nimport pandas as pd\nfrom scipy import ndimage as ndi\nfrom bids.utils import listify\nfrom .base import Transformation\nfrom bids.modeling import hrf\nfrom bids.variables import SparseRunVariable, DenseRunVariable\n\n\ndef _fractional_gcd(vals, res=0.001):\n from functools import reduce\n from math import gcd\n return reduce(gcd, (int(np.round(val / res)) for val in vals)) * res\n\n\nclass Convolve(Transformation):\n \"\"\"Convolve the input variable with an HRF.\n\n Parameters\n ----------\n var : Variable\n The variable to convolve.\n model : str\n The name of the HRF model to apply. Must be one of 'spm',\n 'glover', or 'fir'.\n derivative : bool\n Whether or not to include the temporal derivative.\n dispersion : bool\n Whether or not to include the dispersion derivative.\n fir_delays : iterable\n A list or iterable of delays to use if model is\n 'fir' (ignored otherwise). Spacing between delays must be fixed.\n\n Notes\n -----\n Uses the HRF convolution functions implemented in nistats.\n \"\"\"\n\n _input_type = 'variable'\n _return_type = 'variable'\n\n def _transform(self, var, model='spm', derivative=False, dispersion=False,\n fir_delays=None):\n\n model = model.lower()\n\n df = var.to_df(entities=False)\n\n if isinstance(var, SparseRunVariable):\n sampling_rate = self.collection.sampling_rate\n dur = var.get_duration()\n resample_frames = np.linspace(\n 0, dur, int(math.ceil(dur * sampling_rate)), endpoint=False)\n safety = 2 # Double frequency to resolve events\n else:\n resample_frames = df['onset'].values\n sampling_rate = var.sampling_rate\n safety = 1 # Maximum signal resolution is already 0.5 * SR\n\n vals = df[['onset', 'duration', 'amplitude']].values.T\n\n if model in ['spm', 'glover']:\n if derivative:\n model += ' + derivative'\n if dispersion:\n model += ' + dispersion'\n elif model != 'fir':\n raise ValueError(\"Model must be one of 'spm', 'glover', or 'fir'.\")\n\n # Sampling at >100Hz will never be useful, but can be wildly expensive\n max_freq, min_interval = 100, 0.01\n # Sampling at <1Hz can degrade signals\n min_freq, max_interval = 1, 1\n\n # Given the sampling rate, determine an oversampling factor to ensure that\n # events can be modeled with reasonable precision\n unique_onsets = np.unique(df.onset)\n unique_durations = np.unique(df.duration)\n # Align existing data ticks with, event onsets and offsets, up to ms resolution\n # Note that GCD ignores zeros, so 0 onsets and impulse responses (0 durations) do\n # not harm this.\n required_resolution = _fractional_gcd(\n np.concatenate((unique_onsets, unique_durations)),\n res=min_interval)\n # Bound the effective sampling rate between min_freq and max_freq\n effective_sr = max(min_freq, min(safety / required_resolution, max_freq))\n convolved = hrf.compute_regressor(\n vals, model, resample_frames, fir_delays=fir_delays, min_onset=0,\n oversampling=np.ceil(effective_sr / sampling_rate)\n )\n\n return DenseRunVariable(\n name=var.name, values=convolved[0], run_info=var.run_info,\n source=var.source, sampling_rate=sampling_rate)\n\n\nclass Demean(Transformation):\n\n def _transform(self, data):\n return data - data.mean()\n\n\nclass Orthogonalize(Transformation):\n\n _variables_used = ('variables', 'other')\n _densify = ('variables', 'other')\n _aligned_required = 'force_dense'\n _aligned_variables = ('other')\n\n def _transform(self, var, other):\n\n other = listify(other)\n\n # Set up X matrix and slice into it based on target variable indices\n X = np.array([self._variables[c].values.values.squeeze()\n for c in other]).T\n X = X[var.index, :]\n assert len(X) == len(var)\n y = var.values\n _aX = np.c_[np.ones(len(y)), X]\n coefs, resids, rank, s = np.linalg.lstsq(_aX, y, rcond=None)\n result = pd.DataFrame(y - X.dot(coefs[1:]), index=var.index)\n return result\n\n\nclass Product(Transformation):\n\n _loopable = False\n _groupable = False\n _aligned_required = True\n _output_required = True\n\n def _transform(self, data):\n data = pd.concat(data, axis=1, sort=True)\n return data.product(1)\n\n\nclass Scale(Transformation):\n \"\"\"Scale a variable.\n\n Parameters\n ----------\n data : :obj:`pandas.Series` or :obj:`pandas.DataFrame`\n The variables to scale.\n demean : bool\n If True, demean each column.\n rescale : bool\n If True, divide variables by their standard deviation.\n replace_na : str\n Whether/when to replace missing values with 0. If\n None, no replacement is performed. If 'before', missing values are\n replaced with 0's before scaling. If 'after', missing values are\n replaced with 0 after scaling.\n\n Notes\n -----\n If a constant column is passed in, and replace_na is None or 'before', an\n exception will be raised.\n \"\"\"\n\n def _transform(self, data, demean=True, rescale=True, replace_na=None):\n if data.nunique() == 1 and replace_na in {None, 'before'}:\n val = data.unique()[0]\n raise ValueError(\"Cannot scale a column with constant value ({})! \"\n \"If you want a constant column of 0's returned, \"\n \"set replace_na to 'after'.\".format(val))\n if replace_na == 'before':\n data = data.fillna(0.)\n if demean:\n data -= data.mean()\n if rescale:\n data /= data.std()\n if replace_na == 'after':\n data = data.fillna(0.)\n return data\n\n\nclass Sum(Transformation):\n\n _loopable = False\n _groupable = False\n _aligned_required = True\n _output_required = True\n\n def _transform(self, data, weights=None):\n data = pd.concat(data, axis=1, sort=True)\n if weights is None:\n weights = np.ones(data.shape[1])\n else:\n weights = np.array(weights)\n if len(weights.ravel()) != data.shape[1]:\n raise ValueError(\"If weights are passed to sum(), the number \"\n \"of elements must equal number of variables\"\n \" being summed.\")\n return (data * weights).sum(axis=1)\n\n\nclass Threshold(Transformation):\n \"\"\"Threshold and/or binarize a variable.\n\n Parameters\n ----------\n data :obj:`pandas.Series` or :obj:`pandas.DataFrame`\n The pandas structure to threshold.\n threshold : float\n The value to binarize around (values above will\n be assigned 1, values below will be assigned 0).\n binarize : bool\n If True, binarizes all non-zero values (i.e., every\n non-zero value will be set to 1).\n above : bool\n Specifies which values to retain with respect to the\n cut-off. If True, all value above the threshold will be kept; if\n False, all values below the threshold will be kept. Defaults to\n True.\n signed : bool\n Specifies whether to treat the threshold as signed\n (default) or unsigned. For example, when passing above=True and\n threshold=3, if signed=True, all and only values above +3 would be\n retained. If signed=False, all absolute values > 3 would be retained\n (i.e.,values in the range -3 < X < 3 would be set to 0).\n \"\"\"\n\n _groupable = False\n\n def _transform(self, data, threshold=0., binarize=False, above=True,\n signed=True):\n if not signed:\n threshold = np.abs(threshold)\n data = data.abs()\n keep = data >= threshold if above else data <= threshold\n data[~keep] = 0\n if binarize:\n data[keep] = 1\n return data\n\n\nclass And_(Transformation):\n \"\"\"Logical AND on two or more variables.\n\n Parameters\n ----------\n dfs : list of :obj:`pandas.DataFrame`\n variables to enter into the conjunction.\n \"\"\"\n\n _loopable = False\n _groupable = False\n _output_required = True\n _aligned_required = True\n\n def _transform(self, dfs):\n df = pd.concat(dfs, axis=1, sort=True)\n return df.all(axis=1).astype(int)\n\n\nclass Not(Transformation):\n \"\"\"Logical negation of a variable.\n\n Parameters\n ----------\n var : :obj:`pandas.Series`\n Variable to negate. Must be convertible to bool.\n \"\"\"\n\n _loopable = True\n _groupable = False\n\n def _transform(self, var):\n return ~var.astype(bool)\n\n\nclass Or_(Transformation):\n \"\"\"Logical OR (inclusive) on two or more variables.\n\n Parameters\n ----------\n dfs : list of :obj:`pandas.DataFrame`\n variables to enter into the disjunction.\n \"\"\"\n\n _loopable = False\n _groupable = False\n _output_required = True\n _aligned_required = True\n\n def _transform(self, dfs):\n df = pd.concat(dfs, axis=1, sort=True)\n return df.any(axis=1).astype(int)\n\n\nclass Lag(Transformation):\n \"\"\"Lag variable\n\n Returns a variable that is lagged by a specified number of time points.\n Spline interpolation of the requested ``order`` is used for non-integer\n shifts.\n Points outside the input are filled according to the given ``mode``.\n Negative shifts move values toward the beginning of the sequence.\n If ``difference`` is ``True``, the backward difference is calculated for\n positive shifts, and the forward difference is calculated for negative\n shifts.\n\n Additional ``mode``s may be defined if there is need for them.\n The ``scipy.ndimage.shift`` method provides the reference implementation\n for all current modes. \"Constant\" is equivalent to the shift parameter\n \"cval\".\n\n\n Parameters\n ----------\n var : :obj:`numpy.ndarray`\n variable to lag\n shift : float, optional\n number of places to shift the values (default: 1)\n order : int, optional\n order of spline interpolation, from 0-5 (default: 3)\n mode : string\n the `mode` parameter determines how the input array is extended\n beyond its boundaries. Default is 'nearest'.\n The following values are accepted:\n\n \"nearest\" (a a a a | a b c d | d d d d)\n The input is extended by replicating the boundary values\n \"reflect\" (d c b a | a b c d | d c b a)\n The input is extended by reflecting the array over the edge\n \"constant\" (k k k k | a b c d | k k k k)\n The input is extended by filling all values beyond the edge\n with the same constant value, defined by the ``constant`` parameter\n constant : float, optional\n value to fill past edges of input if ``mode`` is ``\"constant\"``.\n (default: 0)\n difference : boolean, optional\n Calculate the backward (if shift is positive) or forward (if shift is\n negative) difference.\n For the forward difference dx of an array x, dx[i] = x[i+1] - x[i].\n For the backward difference dx of an array x, dx[i] = x[i] - x[i-1].\n (default: ``False``)\n \"\"\"\n\n _input_type = 'numpy'\n _return_type = 'numpy'\n\n def _transform(self, var, shift=1, order=3, mode=\"nearest\",\n constant=0.0, difference=False):\n var = var.flatten()\n shifted = ndi.shift(var, shift=shift, order=order, mode=mode,\n cval=constant)\n if not difference:\n return shifted\n elif shift >= 0:\n return var - shifted\n else:\n return shifted - var\n", "path": "bids/modeling/transformations/compute.py"}, {"content": "from .compute import (Sum, Product, Scale, Orthogonalize, Threshold, And_, Or_,\n Not, Demean, Convolve, Lag)\nfrom .munge import (Split, Rename, Assign, Copy, Factor, Filter, Select,\n Delete, DropNA, Replace, ToDense, Group, Resample)\nfrom .base import TransformerManager\n\nAnd = And_\nOr = Or_\n\n__all__ = [\n 'And',\n 'And_',\n 'Assign',\n 'Convolve',\n 'Copy',\n 'Demean',\n 'Delete',\n 'DropNA',\n 'Factor',\n 'Filter',\n 'Group',\n 'Lag',\n 'Not',\n 'Or',\n 'Or_',\n 'Orthogonalize',\n 'Product',\n 'Rename',\n 'Replace',\n 'Resample',\n 'Scale',\n 'Select',\n 'Split',\n 'Sum',\n 'Threshold',\n 'ToDense',\n 'TransformerManager'\n]\n", "path": "bids/modeling/transformations/__init__.py"}]}
3,915
1,016
gh_patches_debug_14000
rasdani/github-patches
git_diff
ivy-llc__ivy-22412
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- scan --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ivy/functional/frontends/jax/lax/control_flow_operators.py` Content: ``` 1 # global 2 import ivy 3 from ivy.functional.frontends.jax.func_wrapper import to_ivy_arrays_and_back 4 5 6 @to_ivy_arrays_and_back 7 def cond(pred, true_fun, false_fun, *operands, operand=None, linear=None): 8 if operand is not None: 9 if operands: 10 raise ivy.utils.exceptions.IvyException( 11 "if `operand` is passed, positional `operands` should not be passed" 12 ) 13 operands = (operand,) 14 15 if pred: 16 return true_fun(*operands) 17 return false_fun(*operands) 18 19 20 @to_ivy_arrays_and_back 21 def map(f, xs): 22 return ivy.stack([f(x) for x in xs]) 23 24 25 @to_ivy_arrays_and_back 26 def switch(index, branches, *operands, operand=None): 27 if operand is not None: 28 if operands: 29 raise ivy.utils.exceptions.IvyException( 30 "if `operand` is passed, positional `operands` should not be passed" 31 ) 32 operands = (operand,) 33 34 index = max(index, 0) 35 index = min(len(branches) - 1, index) 36 return branches[index](*operands) 37 38 39 @to_ivy_arrays_and_back 40 def fori_loop(lower, upper, body_fun, init_val): 41 if not (callable(body_fun)): 42 raise ivy.exceptions.IvyException( 43 "jax.lax.fori_loop: Argument body_fun should be callable." 44 ) 45 val = init_val 46 for i in range(lower, upper): 47 val = body_fun(i, val) 48 return val 49 50 51 @to_ivy_arrays_and_back 52 def while_loop(cond_fun, body_fun, init_val): 53 if not (callable(body_fun) and callable(cond_fun)): 54 raise ivy.exceptions.IvyException( 55 "jax.lax.while_loop: Arguments body_fun and cond_fun should be callable." 56 ) 57 val = init_val 58 while cond_fun(val): 59 val = body_fun(val) 60 return val 61 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ivy/functional/frontends/jax/lax/control_flow_operators.py b/ivy/functional/frontends/jax/lax/control_flow_operators.py --- a/ivy/functional/frontends/jax/lax/control_flow_operators.py +++ b/ivy/functional/frontends/jax/lax/control_flow_operators.py @@ -58,3 +58,29 @@ while cond_fun(val): val = body_fun(val) return val + + +@to_ivy_arrays_and_back +def scan(f, init, xs, length=None, reverse=False, unroll=1): + if not (callable(f)): + raise ivy.exceptions.IvyException( + "jax.lax.scan: Argument f should be callable." + ) + if xs is None and length is None: + raise ivy.exceptions.IvyException( + "jax.lax.scan: Either xs or length must be provided." + ) + + if length is not None and (not isinstance(length, int) or length < 0): + raise ivy.exceptions.IvyException( + "jax.lax.scan: length must be a non-negative integer." + ) + if xs is None: + xs = [None] * length + + carry = init + ys = [] + for x in xs: + carry, y = f(carry, x) + ys.append(y) + return carry, ivy.stack(ys)
{"golden_diff": "diff --git a/ivy/functional/frontends/jax/lax/control_flow_operators.py b/ivy/functional/frontends/jax/lax/control_flow_operators.py\n--- a/ivy/functional/frontends/jax/lax/control_flow_operators.py\n+++ b/ivy/functional/frontends/jax/lax/control_flow_operators.py\n@@ -58,3 +58,29 @@\n while cond_fun(val):\n val = body_fun(val)\n return val\n+\n+\n+@to_ivy_arrays_and_back\n+def scan(f, init, xs, length=None, reverse=False, unroll=1):\n+ if not (callable(f)):\n+ raise ivy.exceptions.IvyException(\n+ \"jax.lax.scan: Argument f should be callable.\"\n+ )\n+ if xs is None and length is None:\n+ raise ivy.exceptions.IvyException(\n+ \"jax.lax.scan: Either xs or length must be provided.\"\n+ )\n+\n+ if length is not None and (not isinstance(length, int) or length < 0):\n+ raise ivy.exceptions.IvyException(\n+ \"jax.lax.scan: length must be a non-negative integer.\"\n+ )\n+ if xs is None:\n+ xs = [None] * length\n+\n+ carry = init\n+ ys = []\n+ for x in xs:\n+ carry, y = f(carry, x)\n+ ys.append(y)\n+ return carry, ivy.stack(ys)\n", "issue": "scan\n\n", "before_files": [{"content": "# global\nimport ivy\nfrom ivy.functional.frontends.jax.func_wrapper import to_ivy_arrays_and_back\n\n\n@to_ivy_arrays_and_back\ndef cond(pred, true_fun, false_fun, *operands, operand=None, linear=None):\n if operand is not None:\n if operands:\n raise ivy.utils.exceptions.IvyException(\n \"if `operand` is passed, positional `operands` should not be passed\"\n )\n operands = (operand,)\n\n if pred:\n return true_fun(*operands)\n return false_fun(*operands)\n\n\n@to_ivy_arrays_and_back\ndef map(f, xs):\n return ivy.stack([f(x) for x in xs])\n\n\n@to_ivy_arrays_and_back\ndef switch(index, branches, *operands, operand=None):\n if operand is not None:\n if operands:\n raise ivy.utils.exceptions.IvyException(\n \"if `operand` is passed, positional `operands` should not be passed\"\n )\n operands = (operand,)\n\n index = max(index, 0)\n index = min(len(branches) - 1, index)\n return branches[index](*operands)\n\n\n@to_ivy_arrays_and_back\ndef fori_loop(lower, upper, body_fun, init_val):\n if not (callable(body_fun)):\n raise ivy.exceptions.IvyException(\n \"jax.lax.fori_loop: Argument body_fun should be callable.\"\n )\n val = init_val\n for i in range(lower, upper):\n val = body_fun(i, val)\n return val\n\n\n@to_ivy_arrays_and_back\ndef while_loop(cond_fun, body_fun, init_val):\n if not (callable(body_fun) and callable(cond_fun)):\n raise ivy.exceptions.IvyException(\n \"jax.lax.while_loop: Arguments body_fun and cond_fun should be callable.\"\n )\n val = init_val\n while cond_fun(val):\n val = body_fun(val)\n return val\n", "path": "ivy/functional/frontends/jax/lax/control_flow_operators.py"}], "after_files": [{"content": "# global\nimport ivy\nfrom ivy.functional.frontends.jax.func_wrapper import to_ivy_arrays_and_back\n\n\n@to_ivy_arrays_and_back\ndef cond(pred, true_fun, false_fun, *operands, operand=None, linear=None):\n if operand is not None:\n if operands:\n raise ivy.utils.exceptions.IvyException(\n \"if `operand` is passed, positional `operands` should not be passed\"\n )\n operands = (operand,)\n\n if pred:\n return true_fun(*operands)\n return false_fun(*operands)\n\n\n@to_ivy_arrays_and_back\ndef map(f, xs):\n return ivy.stack([f(x) for x in xs])\n\n\n@to_ivy_arrays_and_back\ndef switch(index, branches, *operands, operand=None):\n if operand is not None:\n if operands:\n raise ivy.utils.exceptions.IvyException(\n \"if `operand` is passed, positional `operands` should not be passed\"\n )\n operands = (operand,)\n\n index = max(index, 0)\n index = min(len(branches) - 1, index)\n return branches[index](*operands)\n\n\n@to_ivy_arrays_and_back\ndef fori_loop(lower, upper, body_fun, init_val):\n if not (callable(body_fun)):\n raise ivy.exceptions.IvyException(\n \"jax.lax.fori_loop: Argument body_fun should be callable.\"\n )\n val = init_val\n for i in range(lower, upper):\n val = body_fun(i, val)\n return val\n\n\n@to_ivy_arrays_and_back\ndef while_loop(cond_fun, body_fun, init_val):\n if not (callable(body_fun) and callable(cond_fun)):\n raise ivy.exceptions.IvyException(\n \"jax.lax.while_loop: Arguments body_fun and cond_fun should be callable.\"\n )\n val = init_val\n while cond_fun(val):\n val = body_fun(val)\n return val\n\n\n@to_ivy_arrays_and_back\ndef scan(f, init, xs, length=None, reverse=False, unroll=1):\n if not (callable(f)):\n raise ivy.exceptions.IvyException(\n \"jax.lax.scan: Argument f should be callable.\"\n )\n if xs is None and length is None:\n raise ivy.exceptions.IvyException(\n \"jax.lax.scan: Either xs or length must be provided.\"\n )\n\n if length is not None and (not isinstance(length, int) or length < 0):\n raise ivy.exceptions.IvyException(\n \"jax.lax.scan: length must be a non-negative integer.\"\n )\n if xs is None:\n xs = [None] * length\n\n carry = init\n ys = []\n for x in xs:\n carry, y = f(carry, x)\n ys.append(y)\n return carry, ivy.stack(ys)\n", "path": "ivy/functional/frontends/jax/lax/control_flow_operators.py"}]}
809
324
gh_patches_debug_11100
rasdani/github-patches
git_diff
PyGithub__PyGithub-2078
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add an option to delete workflow runs This worked for me: ```python # get repo repo = g.get_repo("repo name") # list workflows for workflow in repo.get_workflows(): print("ID: '{}', Name: '{}', State: '{}'".format(workflow.id, workflow.name, workflow.state)) # get runs and delete workflow run: if workflow id match for run in repo.get_workflow_runs(): print("ID: '{}', Satus: '{}', Run Number: '{}', Workflow ID: '{}'".format(run.id, run.status, run.run_number, run.workflow_id)) if run.workflow_id == xyz: print("delete ID: '{}'".format(run.workflow_id)) repo._requester.requestJson("DELETE", run.url) ``` Please add this into a function: `repo._requester.requestJson("DELETE", run.url) ` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `github/WorkflowRun.py` Content: ``` 1 ############################ Copyrights and license ############################ 2 # # 3 # Copyright 2020 Steve Kowalik <[email protected]> # 4 # # 5 # This file is part of PyGithub. # 6 # http://pygithub.readthedocs.io/ # 7 # # 8 # PyGithub is free software: you can redistribute it and/or modify it under # 9 # the terms of the GNU Lesser General Public License as published by the Free # 10 # Software Foundation, either version 3 of the License, or (at your option) # 11 # any later version. # 12 # # 13 # PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY # 14 # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # 15 # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # 16 # details. # 17 # # 18 # You should have received a copy of the GNU Lesser General Public License # 19 # along with PyGithub. If not, see <http://www.gnu.org/licenses/>. # 20 # # 21 ################################################################################ 22 23 from collections import namedtuple 24 25 import github.GithubObject 26 import github.PullRequest 27 28 29 class WorkflowRun(github.GithubObject.CompletableGithubObject): 30 """ 31 This class represents Workflow Runs. The reference can be found here https://docs.github.com/en/rest/reference/actions#workflow-runs 32 """ 33 34 def __repr__(self): 35 return self.get__repr__({"id": self._id.value, "url": self._url.value}) 36 37 @property 38 def id(self): 39 """ 40 :type: int 41 """ 42 self._completeIfNotSet(self._id) 43 return self._id.value 44 45 @property 46 def head_branch(self): 47 """ 48 :type: string 49 """ 50 self._completeIfNotSet(self._head_branch) 51 return self._head_branch.value 52 53 @property 54 def head_sha(self): 55 """ 56 :type: string 57 """ 58 self._completeIfNotSet(self._head_sha) 59 return self._head_sha.value 60 61 @property 62 def run_number(self): 63 """ 64 :type: int 65 """ 66 self._completeIfNotSet(self._run_number) 67 return self._run_number.value 68 69 @property 70 def event(self): 71 """ 72 :type: string 73 """ 74 self._completeIfNotSet(self._event) 75 return self._event.value 76 77 @property 78 def status(self): 79 """ 80 :type: string 81 """ 82 self._completeIfNotSet(self._status) 83 return self._status.value 84 85 @property 86 def conclusion(self): 87 """ 88 :type: string 89 """ 90 self._completeIfNotSet(self._conclusion) 91 return self._conclusion.value 92 93 @property 94 def workflow_id(self): 95 """ 96 :type: int 97 """ 98 self._completeIfNotSet(self._workflow_id) 99 return self._workflow_id.value 100 101 @property 102 def url(self): 103 """ 104 :type: string 105 """ 106 self._completeIfNotSet(self._url) 107 return self._url.value 108 109 @property 110 def html_url(self): 111 """ 112 :type: string 113 """ 114 self._completeIfNotSet(self._html_url) 115 return self._html_url.value 116 117 @property 118 def pull_requests(self): 119 """ 120 :type: list of :class:`github.PullRequest.PullRequest` 121 """ 122 self._completeIfNotSet(self._pull_requests) 123 return self._pull_requests.value 124 125 @property 126 def created_at(self): 127 """ 128 :type: datetime.datetime 129 """ 130 self._completeIfNotSet(self._created_at) 131 return self._created_at.value 132 133 @property 134 def updated_at(self): 135 """ 136 :type: datetime.datetime 137 """ 138 self._completeIfNotSet(self._updated_at) 139 return self._updated_at.value 140 141 @property 142 def jobs_url(self): 143 """ 144 :type: string 145 """ 146 self._completeIfNotSet(self._jobs_url) 147 return self._jobs_url.value 148 149 @property 150 def logs_url(self): 151 """ 152 :type: string 153 """ 154 self._completeIfNotSet(self._logs_url) 155 return self._logs_url.value 156 157 @property 158 def check_suite_url(self): 159 """ 160 :type: string 161 """ 162 self._completeIfNotSet(self._check_suite_url) 163 return self._check_suite_url.value 164 165 @property 166 def artifacts_url(self): 167 """ 168 :type: string 169 """ 170 self._completeIfNotSet(self._artifacts_url) 171 return self._artifacts_url.value 172 173 @property 174 def cancel_url(self): 175 """ 176 :type: string 177 """ 178 self._completeIfNotSet(self._cancel_url) 179 return self._cancel_url.value 180 181 @property 182 def rerun_url(self): 183 """ 184 :type: string 185 """ 186 self._completeIfNotSet(self._rerun_url) 187 return self._rerun_url.value 188 189 @property 190 def workflow_url(self): 191 """ 192 :type: string 193 """ 194 self._completeIfNotSet(self._workflow_url) 195 return self._workflow_url.value 196 197 @property 198 def head_commit(self): 199 """ 200 :type: :class:`github.GitCommit.GitCommit` 201 """ 202 self._completeIfNotSet(self._head_commit) 203 return self._head_commit.value 204 205 @property 206 def repository(self): 207 """ 208 :type: :class:`github.Repository.Repository` 209 """ 210 self._completeIfNotSet(self._repository) 211 return self._repository.value 212 213 @property 214 def head_repository(self): 215 """ 216 :type: :class:`github.Repository.Repository` 217 """ 218 self._completeIfNotSet(self._head_repository) 219 return self._head_repository.value 220 221 def cancel(self): 222 """ 223 :calls: `POST /repos/{owner}/{repo}/actions/runs/{run_id}/cancel <https://docs.github.com/en/rest/reference/actions#workflow-runs>`_ 224 :rtype: bool 225 """ 226 status, _, _ = self._requester.requestJson("POST", self.cancel_url) 227 return status == 202 228 229 def rerun(self): 230 """ 231 :calls: `POST /repos/{owner}/{repo}/actions/runs/{run_id}/rerun <https://docs.github.com/en/rest/reference/actions#workflow-runs>`_ 232 :rtype: bool 233 """ 234 status, _, _ = self._requester.requestJson("POST", self.rerun_url) 235 return status == 201 236 237 def timing(self): 238 """ 239 :calls: `GET /repos/{owner}/{repo}/actions/runs/{run_id}/timing <https://docs.github.com/en/rest/reference/actions#workflow-runs>`_ 240 :rtype: namedtuple with billable and run_duration_ms members 241 """ 242 headers, data = self._requester.requestJsonAndCheck("GET", f"{self.url}/timing") 243 timingdata = namedtuple("TimingData", data.keys()) 244 return timingdata._make(data.values()) 245 246 def _initAttributes(self): 247 self._id = github.GithubObject.NotSet 248 self._head_branch = github.GithubObject.NotSet 249 self._head_sha = github.GithubObject.NotSet 250 self._run_number = github.GithubObject.NotSet 251 self._event = github.GithubObject.NotSet 252 self._status = github.GithubObject.NotSet 253 self._conclusion = github.GithubObject.NotSet 254 self._workflow_id = github.GithubObject.NotSet 255 self._url = github.GithubObject.NotSet 256 self._html_url = github.GithubObject.NotSet 257 self._pull_requests = github.GithubObject.NotSet 258 self._created_at = github.GithubObject.NotSet 259 self._updated_at = github.GithubObject.NotSet 260 self._jobs_url = github.GithubObject.NotSet 261 self._logs_url = github.GithubObject.NotSet 262 self._check_suite_url = github.GithubObject.NotSet 263 self._artifacts_url = github.GithubObject.NotSet 264 self._cancel_url = github.GithubObject.NotSet 265 self._rerun_url = github.GithubObject.NotSet 266 self._workflow_url = github.GithubObject.NotSet 267 self._head_commit = github.GithubObject.NotSet 268 self._repository = github.GithubObject.NotSet 269 self._head_repository = github.GithubObject.NotSet 270 271 def _useAttributes(self, attributes): 272 if "id" in attributes: # pragma no branch 273 self._id = self._makeIntAttribute(attributes["id"]) 274 if "head_branch" in attributes: # pragma no branch 275 self._head_branch = self._makeStringAttribute(attributes["head_branch"]) 276 if "head_sha" in attributes: # pragma no branch 277 self._head_sha = self._makeStringAttribute(attributes["head_sha"]) 278 if "run_number" in attributes: # pragma no branch 279 self._run_number = self._makeIntAttribute(attributes["run_number"]) 280 if "event" in attributes: # pragma no branch 281 self._event = self._makeStringAttribute(attributes["event"]) 282 if "status" in attributes: # pragma no branch 283 self._status = self._makeStringAttribute(attributes["status"]) 284 if "conclusion" in attributes: # pragma no branch 285 self._conclusion = self._makeStringAttribute(attributes["conclusion"]) 286 if "workflow_id" in attributes: # pragma no branch 287 self._workflow_id = self._makeIntAttribute(attributes["workflow_id"]) 288 if "url" in attributes: # pragma no branch 289 self._url = self._makeStringAttribute(attributes["url"]) 290 if "html_url" in attributes: # pragma no branch 291 self._html_url = self._makeStringAttribute(attributes["html_url"]) 292 if "pull_requests" in attributes: # pragma no branch 293 self._pull_requests = self._makeListOfClassesAttribute( 294 github.PullRequest.PullRequest, attributes["pull_requests"] 295 ) 296 if "created_at" in attributes: # pragma no branch 297 self._created_at = self._makeDatetimeAttribute(attributes["created_at"]) 298 if "updated_at" in attributes: # pragma no branch 299 self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"]) 300 if "jobs_url" in attributes: # pragma no branch 301 self._jobs_url = self._makeStringAttribute(attributes["jobs_url"]) 302 if "logs_url" in attributes: # pragma no branch 303 self._logs_url = self._makeStringAttribute(attributes["logs_url"]) 304 if "check_suite_url" in attributes: # pragma no branch 305 self._check_suite_url = self._makeStringAttribute( 306 attributes["check_suite_url"] 307 ) 308 if "artifacts_url" in attributes: # pragma no branch 309 self._artifacts_url = self._makeStringAttribute(attributes["artifacts_url"]) 310 if "cancel_url" in attributes: # pragma no branch 311 self._cancel_url = self._makeStringAttribute(attributes["cancel_url"]) 312 if "rerun_url" in attributes: # pragma no branch 313 self._rerun_url = self._makeStringAttribute(attributes["rerun_url"]) 314 if "workflow_url" in attributes: # pragma no branch 315 self._workflow_url = self._makeStringAttribute(attributes["workflow_url"]) 316 if "head_commit" in attributes: # pragma no branch 317 self._head_commit = self._makeClassAttribute( 318 github.GitCommit.GitCommit, attributes["head_commit"] 319 ) 320 if "repository" in attributes: # pragma no branch 321 self._repository = self._makeClassAttribute( 322 github.Repository.Repository, attributes["repository"] 323 ) 324 if "head_repository" in attributes: # pragma no branch 325 self._head_repository = self._makeClassAttribute( 326 github.Repository.Repository, attributes["head_repository"] 327 ) 328 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/github/WorkflowRun.py b/github/WorkflowRun.py --- a/github/WorkflowRun.py +++ b/github/WorkflowRun.py @@ -243,6 +243,14 @@ timingdata = namedtuple("TimingData", data.keys()) return timingdata._make(data.values()) + def delete(self): + """ + :calls: `DELETE /repos/{owner}/{repo}/actions/runs/{run_id} <https://docs.github.com/en/rest/reference/actions#workflow-runs>`_ + :rtype: bool + """ + status, _, _ = self._requester.requestJson("DELETE", self.url) + return status == 204 + def _initAttributes(self): self._id = github.GithubObject.NotSet self._head_branch = github.GithubObject.NotSet
{"golden_diff": "diff --git a/github/WorkflowRun.py b/github/WorkflowRun.py\n--- a/github/WorkflowRun.py\n+++ b/github/WorkflowRun.py\n@@ -243,6 +243,14 @@\n timingdata = namedtuple(\"TimingData\", data.keys())\n return timingdata._make(data.values())\n \n+ def delete(self):\n+ \"\"\"\n+ :calls: `DELETE /repos/{owner}/{repo}/actions/runs/{run_id} <https://docs.github.com/en/rest/reference/actions#workflow-runs>`_\n+ :rtype: bool\n+ \"\"\"\n+ status, _, _ = self._requester.requestJson(\"DELETE\", self.url)\n+ return status == 204\n+\n def _initAttributes(self):\n self._id = github.GithubObject.NotSet\n self._head_branch = github.GithubObject.NotSet\n", "issue": "Add an option to delete workflow runs\nThis worked for me:\r\n\r\n```python\r\n# get repo\r\nrepo = g.get_repo(\"repo name\")\r\n\r\n# list workflows\r\nfor workflow in repo.get_workflows():\r\n print(\"ID: '{}', Name: '{}', State: '{}'\".format(workflow.id, workflow.name, workflow.state))\r\n\r\n# get runs and delete workflow run: if workflow id match\r\nfor run in repo.get_workflow_runs():\r\n print(\"ID: '{}', Satus: '{}', Run Number: '{}', Workflow ID: '{}'\".format(run.id, run.status, run.run_number, run.workflow_id))\r\n if run.workflow_id == xyz:\r\n print(\"delete ID: '{}'\".format(run.workflow_id))\r\n repo._requester.requestJson(\"DELETE\", run.url) \r\n```\r\n\r\nPlease add this into a function:\r\n`repo._requester.requestJson(\"DELETE\", run.url) `\r\n\n", "before_files": [{"content": "############################ Copyrights and license ############################\n# #\n# Copyright 2020 Steve Kowalik <[email protected]> #\n# #\n# This file is part of PyGithub. #\n# http://pygithub.readthedocs.io/ #\n# #\n# PyGithub is free software: you can redistribute it and/or modify it under #\n# the terms of the GNU Lesser General Public License as published by the Free #\n# Software Foundation, either version 3 of the License, or (at your option) #\n# any later version. #\n# #\n# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #\n# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #\n# details. #\n# #\n# You should have received a copy of the GNU Lesser General Public License #\n# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #\n# #\n################################################################################\n\nfrom collections import namedtuple\n\nimport github.GithubObject\nimport github.PullRequest\n\n\nclass WorkflowRun(github.GithubObject.CompletableGithubObject):\n \"\"\"\n This class represents Workflow Runs. The reference can be found here https://docs.github.com/en/rest/reference/actions#workflow-runs\n \"\"\"\n\n def __repr__(self):\n return self.get__repr__({\"id\": self._id.value, \"url\": self._url.value})\n\n @property\n def id(self):\n \"\"\"\n :type: int\n \"\"\"\n self._completeIfNotSet(self._id)\n return self._id.value\n\n @property\n def head_branch(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._head_branch)\n return self._head_branch.value\n\n @property\n def head_sha(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._head_sha)\n return self._head_sha.value\n\n @property\n def run_number(self):\n \"\"\"\n :type: int\n \"\"\"\n self._completeIfNotSet(self._run_number)\n return self._run_number.value\n\n @property\n def event(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._event)\n return self._event.value\n\n @property\n def status(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._status)\n return self._status.value\n\n @property\n def conclusion(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._conclusion)\n return self._conclusion.value\n\n @property\n def workflow_id(self):\n \"\"\"\n :type: int\n \"\"\"\n self._completeIfNotSet(self._workflow_id)\n return self._workflow_id.value\n\n @property\n def url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._url)\n return self._url.value\n\n @property\n def html_url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._html_url)\n return self._html_url.value\n\n @property\n def pull_requests(self):\n \"\"\"\n :type: list of :class:`github.PullRequest.PullRequest`\n \"\"\"\n self._completeIfNotSet(self._pull_requests)\n return self._pull_requests.value\n\n @property\n def created_at(self):\n \"\"\"\n :type: datetime.datetime\n \"\"\"\n self._completeIfNotSet(self._created_at)\n return self._created_at.value\n\n @property\n def updated_at(self):\n \"\"\"\n :type: datetime.datetime\n \"\"\"\n self._completeIfNotSet(self._updated_at)\n return self._updated_at.value\n\n @property\n def jobs_url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._jobs_url)\n return self._jobs_url.value\n\n @property\n def logs_url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._logs_url)\n return self._logs_url.value\n\n @property\n def check_suite_url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._check_suite_url)\n return self._check_suite_url.value\n\n @property\n def artifacts_url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._artifacts_url)\n return self._artifacts_url.value\n\n @property\n def cancel_url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._cancel_url)\n return self._cancel_url.value\n\n @property\n def rerun_url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._rerun_url)\n return self._rerun_url.value\n\n @property\n def workflow_url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._workflow_url)\n return self._workflow_url.value\n\n @property\n def head_commit(self):\n \"\"\"\n :type: :class:`github.GitCommit.GitCommit`\n \"\"\"\n self._completeIfNotSet(self._head_commit)\n return self._head_commit.value\n\n @property\n def repository(self):\n \"\"\"\n :type: :class:`github.Repository.Repository`\n \"\"\"\n self._completeIfNotSet(self._repository)\n return self._repository.value\n\n @property\n def head_repository(self):\n \"\"\"\n :type: :class:`github.Repository.Repository`\n \"\"\"\n self._completeIfNotSet(self._head_repository)\n return self._head_repository.value\n\n def cancel(self):\n \"\"\"\n :calls: `POST /repos/{owner}/{repo}/actions/runs/{run_id}/cancel <https://docs.github.com/en/rest/reference/actions#workflow-runs>`_\n :rtype: bool\n \"\"\"\n status, _, _ = self._requester.requestJson(\"POST\", self.cancel_url)\n return status == 202\n\n def rerun(self):\n \"\"\"\n :calls: `POST /repos/{owner}/{repo}/actions/runs/{run_id}/rerun <https://docs.github.com/en/rest/reference/actions#workflow-runs>`_\n :rtype: bool\n \"\"\"\n status, _, _ = self._requester.requestJson(\"POST\", self.rerun_url)\n return status == 201\n\n def timing(self):\n \"\"\"\n :calls: `GET /repos/{owner}/{repo}/actions/runs/{run_id}/timing <https://docs.github.com/en/rest/reference/actions#workflow-runs>`_\n :rtype: namedtuple with billable and run_duration_ms members\n \"\"\"\n headers, data = self._requester.requestJsonAndCheck(\"GET\", f\"{self.url}/timing\")\n timingdata = namedtuple(\"TimingData\", data.keys())\n return timingdata._make(data.values())\n\n def _initAttributes(self):\n self._id = github.GithubObject.NotSet\n self._head_branch = github.GithubObject.NotSet\n self._head_sha = github.GithubObject.NotSet\n self._run_number = github.GithubObject.NotSet\n self._event = github.GithubObject.NotSet\n self._status = github.GithubObject.NotSet\n self._conclusion = github.GithubObject.NotSet\n self._workflow_id = github.GithubObject.NotSet\n self._url = github.GithubObject.NotSet\n self._html_url = github.GithubObject.NotSet\n self._pull_requests = github.GithubObject.NotSet\n self._created_at = github.GithubObject.NotSet\n self._updated_at = github.GithubObject.NotSet\n self._jobs_url = github.GithubObject.NotSet\n self._logs_url = github.GithubObject.NotSet\n self._check_suite_url = github.GithubObject.NotSet\n self._artifacts_url = github.GithubObject.NotSet\n self._cancel_url = github.GithubObject.NotSet\n self._rerun_url = github.GithubObject.NotSet\n self._workflow_url = github.GithubObject.NotSet\n self._head_commit = github.GithubObject.NotSet\n self._repository = github.GithubObject.NotSet\n self._head_repository = github.GithubObject.NotSet\n\n def _useAttributes(self, attributes):\n if \"id\" in attributes: # pragma no branch\n self._id = self._makeIntAttribute(attributes[\"id\"])\n if \"head_branch\" in attributes: # pragma no branch\n self._head_branch = self._makeStringAttribute(attributes[\"head_branch\"])\n if \"head_sha\" in attributes: # pragma no branch\n self._head_sha = self._makeStringAttribute(attributes[\"head_sha\"])\n if \"run_number\" in attributes: # pragma no branch\n self._run_number = self._makeIntAttribute(attributes[\"run_number\"])\n if \"event\" in attributes: # pragma no branch\n self._event = self._makeStringAttribute(attributes[\"event\"])\n if \"status\" in attributes: # pragma no branch\n self._status = self._makeStringAttribute(attributes[\"status\"])\n if \"conclusion\" in attributes: # pragma no branch\n self._conclusion = self._makeStringAttribute(attributes[\"conclusion\"])\n if \"workflow_id\" in attributes: # pragma no branch\n self._workflow_id = self._makeIntAttribute(attributes[\"workflow_id\"])\n if \"url\" in attributes: # pragma no branch\n self._url = self._makeStringAttribute(attributes[\"url\"])\n if \"html_url\" in attributes: # pragma no branch\n self._html_url = self._makeStringAttribute(attributes[\"html_url\"])\n if \"pull_requests\" in attributes: # pragma no branch\n self._pull_requests = self._makeListOfClassesAttribute(\n github.PullRequest.PullRequest, attributes[\"pull_requests\"]\n )\n if \"created_at\" in attributes: # pragma no branch\n self._created_at = self._makeDatetimeAttribute(attributes[\"created_at\"])\n if \"updated_at\" in attributes: # pragma no branch\n self._updated_at = self._makeDatetimeAttribute(attributes[\"updated_at\"])\n if \"jobs_url\" in attributes: # pragma no branch\n self._jobs_url = self._makeStringAttribute(attributes[\"jobs_url\"])\n if \"logs_url\" in attributes: # pragma no branch\n self._logs_url = self._makeStringAttribute(attributes[\"logs_url\"])\n if \"check_suite_url\" in attributes: # pragma no branch\n self._check_suite_url = self._makeStringAttribute(\n attributes[\"check_suite_url\"]\n )\n if \"artifacts_url\" in attributes: # pragma no branch\n self._artifacts_url = self._makeStringAttribute(attributes[\"artifacts_url\"])\n if \"cancel_url\" in attributes: # pragma no branch\n self._cancel_url = self._makeStringAttribute(attributes[\"cancel_url\"])\n if \"rerun_url\" in attributes: # pragma no branch\n self._rerun_url = self._makeStringAttribute(attributes[\"rerun_url\"])\n if \"workflow_url\" in attributes: # pragma no branch\n self._workflow_url = self._makeStringAttribute(attributes[\"workflow_url\"])\n if \"head_commit\" in attributes: # pragma no branch\n self._head_commit = self._makeClassAttribute(\n github.GitCommit.GitCommit, attributes[\"head_commit\"]\n )\n if \"repository\" in attributes: # pragma no branch\n self._repository = self._makeClassAttribute(\n github.Repository.Repository, attributes[\"repository\"]\n )\n if \"head_repository\" in attributes: # pragma no branch\n self._head_repository = self._makeClassAttribute(\n github.Repository.Repository, attributes[\"head_repository\"]\n )\n", "path": "github/WorkflowRun.py"}], "after_files": [{"content": "############################ Copyrights and license ############################\n# #\n# Copyright 2020 Steve Kowalik <[email protected]> #\n# #\n# This file is part of PyGithub. #\n# http://pygithub.readthedocs.io/ #\n# #\n# PyGithub is free software: you can redistribute it and/or modify it under #\n# the terms of the GNU Lesser General Public License as published by the Free #\n# Software Foundation, either version 3 of the License, or (at your option) #\n# any later version. #\n# #\n# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #\n# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #\n# details. #\n# #\n# You should have received a copy of the GNU Lesser General Public License #\n# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #\n# #\n################################################################################\n\nfrom collections import namedtuple\n\nimport github.GithubObject\nimport github.PullRequest\n\n\nclass WorkflowRun(github.GithubObject.CompletableGithubObject):\n \"\"\"\n This class represents Workflow Runs. The reference can be found here https://docs.github.com/en/rest/reference/actions#workflow-runs\n \"\"\"\n\n def __repr__(self):\n return self.get__repr__({\"id\": self._id.value, \"url\": self._url.value})\n\n @property\n def id(self):\n \"\"\"\n :type: int\n \"\"\"\n self._completeIfNotSet(self._id)\n return self._id.value\n\n @property\n def head_branch(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._head_branch)\n return self._head_branch.value\n\n @property\n def head_sha(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._head_sha)\n return self._head_sha.value\n\n @property\n def run_number(self):\n \"\"\"\n :type: int\n \"\"\"\n self._completeIfNotSet(self._run_number)\n return self._run_number.value\n\n @property\n def event(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._event)\n return self._event.value\n\n @property\n def status(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._status)\n return self._status.value\n\n @property\n def conclusion(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._conclusion)\n return self._conclusion.value\n\n @property\n def workflow_id(self):\n \"\"\"\n :type: int\n \"\"\"\n self._completeIfNotSet(self._workflow_id)\n return self._workflow_id.value\n\n @property\n def url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._url)\n return self._url.value\n\n @property\n def html_url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._html_url)\n return self._html_url.value\n\n @property\n def pull_requests(self):\n \"\"\"\n :type: list of :class:`github.PullRequest.PullRequest`\n \"\"\"\n self._completeIfNotSet(self._pull_requests)\n return self._pull_requests.value\n\n @property\n def created_at(self):\n \"\"\"\n :type: datetime.datetime\n \"\"\"\n self._completeIfNotSet(self._created_at)\n return self._created_at.value\n\n @property\n def updated_at(self):\n \"\"\"\n :type: datetime.datetime\n \"\"\"\n self._completeIfNotSet(self._updated_at)\n return self._updated_at.value\n\n @property\n def jobs_url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._jobs_url)\n return self._jobs_url.value\n\n @property\n def logs_url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._logs_url)\n return self._logs_url.value\n\n @property\n def check_suite_url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._check_suite_url)\n return self._check_suite_url.value\n\n @property\n def artifacts_url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._artifacts_url)\n return self._artifacts_url.value\n\n @property\n def cancel_url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._cancel_url)\n return self._cancel_url.value\n\n @property\n def rerun_url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._rerun_url)\n return self._rerun_url.value\n\n @property\n def workflow_url(self):\n \"\"\"\n :type: string\n \"\"\"\n self._completeIfNotSet(self._workflow_url)\n return self._workflow_url.value\n\n @property\n def head_commit(self):\n \"\"\"\n :type: :class:`github.GitCommit.GitCommit`\n \"\"\"\n self._completeIfNotSet(self._head_commit)\n return self._head_commit.value\n\n @property\n def repository(self):\n \"\"\"\n :type: :class:`github.Repository.Repository`\n \"\"\"\n self._completeIfNotSet(self._repository)\n return self._repository.value\n\n @property\n def head_repository(self):\n \"\"\"\n :type: :class:`github.Repository.Repository`\n \"\"\"\n self._completeIfNotSet(self._head_repository)\n return self._head_repository.value\n\n def cancel(self):\n \"\"\"\n :calls: `POST /repos/{owner}/{repo}/actions/runs/{run_id}/cancel <https://docs.github.com/en/rest/reference/actions#workflow-runs>`_\n :rtype: bool\n \"\"\"\n status, _, _ = self._requester.requestJson(\"POST\", self.cancel_url)\n return status == 202\n\n def rerun(self):\n \"\"\"\n :calls: `POST /repos/{owner}/{repo}/actions/runs/{run_id}/rerun <https://docs.github.com/en/rest/reference/actions#workflow-runs>`_\n :rtype: bool\n \"\"\"\n status, _, _ = self._requester.requestJson(\"POST\", self.rerun_url)\n return status == 201\n\n def timing(self):\n \"\"\"\n :calls: `GET /repos/{owner}/{repo}/actions/runs/{run_id}/timing <https://docs.github.com/en/rest/reference/actions#workflow-runs>`_\n :rtype: namedtuple with billable and run_duration_ms members\n \"\"\"\n headers, data = self._requester.requestJsonAndCheck(\"GET\", f\"{self.url}/timing\")\n timingdata = namedtuple(\"TimingData\", data.keys())\n return timingdata._make(data.values())\n\n def delete(self):\n \"\"\"\n :calls: `DELETE /repos/{owner}/{repo}/actions/runs/{run_id} <https://docs.github.com/en/rest/reference/actions#workflow-runs>`_\n :rtype: bool\n \"\"\"\n status, _, _ = self._requester.requestJson(\"DELETE\", self.url)\n return status == 204\n\n def _initAttributes(self):\n self._id = github.GithubObject.NotSet\n self._head_branch = github.GithubObject.NotSet\n self._head_sha = github.GithubObject.NotSet\n self._run_number = github.GithubObject.NotSet\n self._event = github.GithubObject.NotSet\n self._status = github.GithubObject.NotSet\n self._conclusion = github.GithubObject.NotSet\n self._workflow_id = github.GithubObject.NotSet\n self._url = github.GithubObject.NotSet\n self._html_url = github.GithubObject.NotSet\n self._pull_requests = github.GithubObject.NotSet\n self._created_at = github.GithubObject.NotSet\n self._updated_at = github.GithubObject.NotSet\n self._jobs_url = github.GithubObject.NotSet\n self._logs_url = github.GithubObject.NotSet\n self._check_suite_url = github.GithubObject.NotSet\n self._artifacts_url = github.GithubObject.NotSet\n self._cancel_url = github.GithubObject.NotSet\n self._rerun_url = github.GithubObject.NotSet\n self._workflow_url = github.GithubObject.NotSet\n self._head_commit = github.GithubObject.NotSet\n self._repository = github.GithubObject.NotSet\n self._head_repository = github.GithubObject.NotSet\n\n def _useAttributes(self, attributes):\n if \"id\" in attributes: # pragma no branch\n self._id = self._makeIntAttribute(attributes[\"id\"])\n if \"head_branch\" in attributes: # pragma no branch\n self._head_branch = self._makeStringAttribute(attributes[\"head_branch\"])\n if \"head_sha\" in attributes: # pragma no branch\n self._head_sha = self._makeStringAttribute(attributes[\"head_sha\"])\n if \"run_number\" in attributes: # pragma no branch\n self._run_number = self._makeIntAttribute(attributes[\"run_number\"])\n if \"event\" in attributes: # pragma no branch\n self._event = self._makeStringAttribute(attributes[\"event\"])\n if \"status\" in attributes: # pragma no branch\n self._status = self._makeStringAttribute(attributes[\"status\"])\n if \"conclusion\" in attributes: # pragma no branch\n self._conclusion = self._makeStringAttribute(attributes[\"conclusion\"])\n if \"workflow_id\" in attributes: # pragma no branch\n self._workflow_id = self._makeIntAttribute(attributes[\"workflow_id\"])\n if \"url\" in attributes: # pragma no branch\n self._url = self._makeStringAttribute(attributes[\"url\"])\n if \"html_url\" in attributes: # pragma no branch\n self._html_url = self._makeStringAttribute(attributes[\"html_url\"])\n if \"pull_requests\" in attributes: # pragma no branch\n self._pull_requests = self._makeListOfClassesAttribute(\n github.PullRequest.PullRequest, attributes[\"pull_requests\"]\n )\n if \"created_at\" in attributes: # pragma no branch\n self._created_at = self._makeDatetimeAttribute(attributes[\"created_at\"])\n if \"updated_at\" in attributes: # pragma no branch\n self._updated_at = self._makeDatetimeAttribute(attributes[\"updated_at\"])\n if \"jobs_url\" in attributes: # pragma no branch\n self._jobs_url = self._makeStringAttribute(attributes[\"jobs_url\"])\n if \"logs_url\" in attributes: # pragma no branch\n self._logs_url = self._makeStringAttribute(attributes[\"logs_url\"])\n if \"check_suite_url\" in attributes: # pragma no branch\n self._check_suite_url = self._makeStringAttribute(\n attributes[\"check_suite_url\"]\n )\n if \"artifacts_url\" in attributes: # pragma no branch\n self._artifacts_url = self._makeStringAttribute(attributes[\"artifacts_url\"])\n if \"cancel_url\" in attributes: # pragma no branch\n self._cancel_url = self._makeStringAttribute(attributes[\"cancel_url\"])\n if \"rerun_url\" in attributes: # pragma no branch\n self._rerun_url = self._makeStringAttribute(attributes[\"rerun_url\"])\n if \"workflow_url\" in attributes: # pragma no branch\n self._workflow_url = self._makeStringAttribute(attributes[\"workflow_url\"])\n if \"head_commit\" in attributes: # pragma no branch\n self._head_commit = self._makeClassAttribute(\n github.GitCommit.GitCommit, attributes[\"head_commit\"]\n )\n if \"repository\" in attributes: # pragma no branch\n self._repository = self._makeClassAttribute(\n github.Repository.Repository, attributes[\"repository\"]\n )\n if \"head_repository\" in attributes: # pragma no branch\n self._head_repository = self._makeClassAttribute(\n github.Repository.Repository, attributes[\"head_repository\"]\n )\n", "path": "github/WorkflowRun.py"}]}
3,945
187
gh_patches_debug_57168
rasdani/github-patches
git_diff
vyperlang__vyper-2526
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- vyper.exceptions.TypeCheckFailure: pack_arguments did not return a value ### Version Information * vyper Version (output of `vyper --version`): 0.2.16 * OS: osx * Python Version (output of `python --version`): python3 ### I tried to compile my codes using "vyper file_name.vy" and this is the error I get Please include information like: *Error compiling: bounty.v ![Screenshot 2021-11-03 at 9 42 39 PM](https://user-images.githubusercontent.com/54287741/140189733-b037b084-b805-4f25-aa85-eee83b0b1727.png) y vyper.exceptions.TypeCheckFailure: pack_arguments did not return a value This is an unhandled internal compiler error. Please create an issue on Github to notify the developers. * vyper * the code that caused the failure (see [this link](https://help.github.com/articles/basic-writing-and-formatting-syntax/) for help with formatting code) * please try running your example with the --debug flag turned on ### How can it be fixed? Fill this in if you know how to fix it. ![Screenshot 2021-11-03 at 9 42 39 PM](https://user-images.githubusercontent.com/54287741/140189784-eb38f9fd-2994-4537-8a4a-8a6cf5f96d30.png) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `vyper/old_codegen/external_call.py` Content: ``` 1 import vyper.utils as util 2 from vyper import ast as vy_ast 3 from vyper.exceptions import StateAccessViolation, StructureException, TypeCheckFailure 4 from vyper.old_codegen.abi import abi_encode, abi_type_of 5 from vyper.old_codegen.lll_node import Encoding, LLLnode 6 from vyper.old_codegen.parser_utils import ( 7 calculate_type_for_external_return, 8 get_element_ptr, 9 getpos, 10 unwrap_location, 11 ) 12 from vyper.old_codegen.types import TupleType, canonicalize_type, get_type_for_exact_size 13 from vyper.old_codegen.types.check import check_assign 14 15 16 def _pack_arguments(contract_sig, args, context, pos): 17 # abi encoding just treats all args as a big tuple 18 args_tuple_t = TupleType([x.typ for x in args]) 19 args_as_tuple = LLLnode.from_list(["multi"] + [x for x in args], typ=args_tuple_t) 20 args_abi_t = abi_type_of(args_tuple_t) 21 22 # sanity typecheck - make sure the arguments can be assigned 23 dst_tuple_t = TupleType([arg.typ for arg in contract_sig.args][: len(args)]) 24 _tmp = LLLnode("fake node", location="memory", typ=dst_tuple_t) 25 check_assign(_tmp, args_as_tuple, pos) 26 27 if contract_sig.return_type is not None: 28 return_abi_t = abi_type_of(calculate_type_for_external_return(contract_sig.return_type)) 29 30 # we use the same buffer for args and returndata, 31 # so allocate enough space here for the returndata too. 32 buflen = max(args_abi_t.size_bound(), return_abi_t.size_bound()) 33 else: 34 buflen = args_abi_t.size_bound() 35 36 buflen += 32 # padding for the method id 37 38 buf_t = get_type_for_exact_size(buflen) 39 buf = context.new_internal_variable(buf_t) 40 41 args_ofst = buf + 28 42 args_len = args_abi_t.size_bound() + 4 43 44 abi_signature = contract_sig.name + canonicalize_type(dst_tuple_t) 45 46 # layout: 47 # 32 bytes | args 48 # 0x..00<method_id_4bytes> | args 49 # the reason for the left padding is just so the alignment is easier. 50 # if we were only targeting constantinople, we could align 51 # to buf (and also keep code size small) by using 52 # (mstore buf (shl signature.method_id 224)) 53 mstore_method_id = [["mstore", buf, util.abi_method_id(abi_signature)]] 54 55 if len(args) == 0: 56 encode_args = ["pass"] 57 else: 58 encode_args = abi_encode(buf + 32, args_as_tuple, pos) 59 60 return buf, mstore_method_id + [encode_args], args_ofst, args_len 61 62 63 def _returndata_encoding(contract_sig): 64 if contract_sig.is_from_json: 65 return Encoding.JSON_ABI 66 return Encoding.ABI 67 68 69 def _unpack_returndata(buf, contract_sig, context, pos): 70 return_t = contract_sig.return_type 71 if return_t is None: 72 return ["pass"], 0, 0 73 74 return_t = calculate_type_for_external_return(return_t) 75 # if the abi signature has a different type than 76 # the vyper type, we need to wrap and unwrap the type 77 # so that the ABI decoding works correctly 78 should_unwrap_abi_tuple = return_t != contract_sig.return_type 79 80 abi_return_t = abi_type_of(return_t) 81 82 min_return_size = abi_return_t.min_size() 83 max_return_size = abi_return_t.size_bound() 84 assert 0 < min_return_size <= max_return_size 85 86 ret_ofst = buf 87 ret_len = max_return_size 88 89 # revert when returndatasize is not in bounds 90 ret = [] 91 # runtime: min_return_size <= returndatasize 92 # TODO move the -1 optimization to LLL optimizer 93 ret += [["assert", ["gt", "returndatasize", min_return_size - 1]]] 94 95 # add as the last LLLnode a pointer to the return data structure 96 97 # the return type has been wrapped by the calling contract; 98 # unwrap it so downstream code isn't confused. 99 # basically this expands to buf+32 if the return type has been wrapped 100 # in a tuple AND its ABI type is dynamic. 101 # in most cases, this simply will evaluate to ret. 102 # in the special case where the return type has been wrapped 103 # in a tuple AND its ABI type is dynamic, it expands to buf+32. 104 buf = LLLnode(buf, typ=return_t, encoding=_returndata_encoding(contract_sig), location="memory") 105 106 if should_unwrap_abi_tuple: 107 buf = get_element_ptr(buf, 0, pos=None, array_bounds_check=False) 108 109 ret += [buf] 110 111 return ret, ret_ofst, ret_len 112 113 114 def _external_call_helper( 115 contract_address, contract_sig, args_lll, context, pos=None, value=None, gas=None 116 ): 117 118 if value is None: 119 value = 0 120 if gas is None: 121 gas = "gas" 122 123 # sanity check 124 assert len(contract_sig.args) == len(args_lll) 125 126 if context.is_constant() and contract_sig.mutability not in ("view", "pure"): 127 # TODO is this already done in type checker? 128 raise StateAccessViolation( 129 f"May not call state modifying function '{contract_sig.name}' " 130 f"within {context.pp_constancy()}.", 131 pos, 132 ) 133 134 sub = ["seq"] 135 136 buf, arg_packer, args_ofst, args_len = _pack_arguments(contract_sig, args_lll, context, pos) 137 138 ret_unpacker, ret_ofst, ret_len = _unpack_returndata(buf, contract_sig, context, pos) 139 140 sub += arg_packer 141 142 if contract_sig.return_type is None: 143 # if we do not expect return data, check that a contract exists at the 144 # target address. we must perform this check BEFORE the call because 145 # the contract might selfdestruct. on the other hand we can omit this 146 # when we _do_ expect return data because we later check 147 # `returndatasize` (that check works even if the contract 148 # selfdestructs). 149 sub.append(["assert", ["extcodesize", contract_address]]) 150 151 if context.is_constant() or contract_sig.mutability in ("view", "pure"): 152 call_op = ["staticcall", gas, contract_address, args_ofst, args_len, ret_ofst, ret_len] 153 else: 154 call_op = ["call", gas, contract_address, value, args_ofst, args_len, ret_ofst, ret_len] 155 156 sub.append(["assert", call_op]) 157 158 if contract_sig.return_type is not None: 159 sub += ret_unpacker 160 161 ret = LLLnode.from_list( 162 # set the encoding to ABI here, downstream code will decode and add clampers. 163 sub, 164 typ=contract_sig.return_type, 165 location="memory", 166 encoding=_returndata_encoding(contract_sig), 167 pos=pos, 168 ) 169 170 return ret 171 172 173 # TODO push me up to expr.py 174 def get_gas_and_value(stmt_expr, context): 175 from vyper.old_codegen.expr import Expr # TODO rethink this circular import 176 177 value, gas = None, None 178 for kw in stmt_expr.keywords: 179 if kw.arg == "gas": 180 gas = Expr.parse_value_expr(kw.value, context) 181 elif kw.arg == "value": 182 value = Expr.parse_value_expr(kw.value, context) 183 else: 184 raise TypeCheckFailure("Unexpected keyword argument") 185 return value, gas 186 187 188 def lll_for_external_call(stmt_expr, context): 189 from vyper.old_codegen.expr import Expr # TODO rethink this circular import 190 191 pos = getpos(stmt_expr) 192 value, gas = get_gas_and_value(stmt_expr, context) 193 args_lll = [Expr(x, context).lll_node for x in stmt_expr.args] 194 195 if isinstance(stmt_expr.func, vy_ast.Attribute) and isinstance( 196 stmt_expr.func.value, vy_ast.Call 197 ): 198 # e.g. `Foo(address).bar()` 199 200 # sanity check 201 assert len(stmt_expr.func.value.args) == 1 202 contract_name = stmt_expr.func.value.func.id 203 contract_address = Expr.parse_value_expr(stmt_expr.func.value.args[0], context) 204 205 elif ( 206 isinstance(stmt_expr.func.value, vy_ast.Attribute) 207 and stmt_expr.func.value.attr in context.globals 208 # TODO check for self? 209 and hasattr(context.globals[stmt_expr.func.value.attr].typ, "name") 210 ): 211 # e.g. `self.foo.bar()` 212 213 # sanity check 214 assert stmt_expr.func.value.value.id == "self", stmt_expr 215 216 contract_name = context.globals[stmt_expr.func.value.attr].typ.name 217 type_ = stmt_expr.func.value._metadata["type"] 218 var = context.globals[stmt_expr.func.value.attr] 219 contract_address = unwrap_location( 220 LLLnode.from_list( 221 type_.position.position, 222 typ=var.typ, 223 location="storage", 224 pos=pos, 225 annotation="self." + stmt_expr.func.value.attr, 226 ) 227 ) 228 else: 229 # TODO catch this during type checking 230 raise StructureException("Unsupported operator.", stmt_expr) 231 232 method_name = stmt_expr.func.attr 233 contract_sig = context.sigs[contract_name][method_name] 234 235 ret = _external_call_helper( 236 contract_address, 237 contract_sig, 238 args_lll, 239 context, 240 pos, 241 value=value, 242 gas=gas, 243 ) 244 ret.annotation = stmt_expr.get("node_source_code") 245 246 return ret 247 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/vyper/old_codegen/external_call.py b/vyper/old_codegen/external_call.py --- a/vyper/old_codegen/external_call.py +++ b/vyper/old_codegen/external_call.py @@ -121,7 +121,7 @@ gas = "gas" # sanity check - assert len(contract_sig.args) == len(args_lll) + assert len(contract_sig.base_args) <= len(args_lll) <= len(contract_sig.args) if context.is_constant() and contract_sig.mutability not in ("view", "pure"): # TODO is this already done in type checker?
{"golden_diff": "diff --git a/vyper/old_codegen/external_call.py b/vyper/old_codegen/external_call.py\n--- a/vyper/old_codegen/external_call.py\n+++ b/vyper/old_codegen/external_call.py\n@@ -121,7 +121,7 @@\n gas = \"gas\"\n \n # sanity check\n- assert len(contract_sig.args) == len(args_lll)\n+ assert len(contract_sig.base_args) <= len(args_lll) <= len(contract_sig.args)\n \n if context.is_constant() and contract_sig.mutability not in (\"view\", \"pure\"):\n # TODO is this already done in type checker?\n", "issue": "vyper.exceptions.TypeCheckFailure: pack_arguments did not return a value\n### Version Information\r\n\r\n* vyper Version (output of `vyper --version`): 0.2.16\r\n* OS: osx\r\n* Python Version (output of `python --version`): python3\r\n\r\n### I tried to compile my codes using \"vyper file_name.vy\" and this is the error I get\r\n\r\nPlease include information like:\r\n\r\n*Error compiling: bounty.v\r\n![Screenshot 2021-11-03 at 9 42 39 PM](https://user-images.githubusercontent.com/54287741/140189733-b037b084-b805-4f25-aa85-eee83b0b1727.png)\r\ny\r\nvyper.exceptions.TypeCheckFailure: pack_arguments did not return a value\r\n\r\nThis is an unhandled internal compiler error. Please create an issue on Github to notify the developers.\r\n* vyper\r\n* the code that caused the failure (see [this link](https://help.github.com/articles/basic-writing-and-formatting-syntax/) for help with formatting code)\r\n* please try running your example with the --debug flag turned on\r\n\r\n\r\n### How can it be fixed?\r\n\r\nFill this in if you know how to fix it.\r\n![Screenshot 2021-11-03 at 9 42 39 PM](https://user-images.githubusercontent.com/54287741/140189784-eb38f9fd-2994-4537-8a4a-8a6cf5f96d30.png)\r\n\r\n\n", "before_files": [{"content": "import vyper.utils as util\nfrom vyper import ast as vy_ast\nfrom vyper.exceptions import StateAccessViolation, StructureException, TypeCheckFailure\nfrom vyper.old_codegen.abi import abi_encode, abi_type_of\nfrom vyper.old_codegen.lll_node import Encoding, LLLnode\nfrom vyper.old_codegen.parser_utils import (\n calculate_type_for_external_return,\n get_element_ptr,\n getpos,\n unwrap_location,\n)\nfrom vyper.old_codegen.types import TupleType, canonicalize_type, get_type_for_exact_size\nfrom vyper.old_codegen.types.check import check_assign\n\n\ndef _pack_arguments(contract_sig, args, context, pos):\n # abi encoding just treats all args as a big tuple\n args_tuple_t = TupleType([x.typ for x in args])\n args_as_tuple = LLLnode.from_list([\"multi\"] + [x for x in args], typ=args_tuple_t)\n args_abi_t = abi_type_of(args_tuple_t)\n\n # sanity typecheck - make sure the arguments can be assigned\n dst_tuple_t = TupleType([arg.typ for arg in contract_sig.args][: len(args)])\n _tmp = LLLnode(\"fake node\", location=\"memory\", typ=dst_tuple_t)\n check_assign(_tmp, args_as_tuple, pos)\n\n if contract_sig.return_type is not None:\n return_abi_t = abi_type_of(calculate_type_for_external_return(contract_sig.return_type))\n\n # we use the same buffer for args and returndata,\n # so allocate enough space here for the returndata too.\n buflen = max(args_abi_t.size_bound(), return_abi_t.size_bound())\n else:\n buflen = args_abi_t.size_bound()\n\n buflen += 32 # padding for the method id\n\n buf_t = get_type_for_exact_size(buflen)\n buf = context.new_internal_variable(buf_t)\n\n args_ofst = buf + 28\n args_len = args_abi_t.size_bound() + 4\n\n abi_signature = contract_sig.name + canonicalize_type(dst_tuple_t)\n\n # layout:\n # 32 bytes | args\n # 0x..00<method_id_4bytes> | args\n # the reason for the left padding is just so the alignment is easier.\n # if we were only targeting constantinople, we could align\n # to buf (and also keep code size small) by using\n # (mstore buf (shl signature.method_id 224))\n mstore_method_id = [[\"mstore\", buf, util.abi_method_id(abi_signature)]]\n\n if len(args) == 0:\n encode_args = [\"pass\"]\n else:\n encode_args = abi_encode(buf + 32, args_as_tuple, pos)\n\n return buf, mstore_method_id + [encode_args], args_ofst, args_len\n\n\ndef _returndata_encoding(contract_sig):\n if contract_sig.is_from_json:\n return Encoding.JSON_ABI\n return Encoding.ABI\n\n\ndef _unpack_returndata(buf, contract_sig, context, pos):\n return_t = contract_sig.return_type\n if return_t is None:\n return [\"pass\"], 0, 0\n\n return_t = calculate_type_for_external_return(return_t)\n # if the abi signature has a different type than\n # the vyper type, we need to wrap and unwrap the type\n # so that the ABI decoding works correctly\n should_unwrap_abi_tuple = return_t != contract_sig.return_type\n\n abi_return_t = abi_type_of(return_t)\n\n min_return_size = abi_return_t.min_size()\n max_return_size = abi_return_t.size_bound()\n assert 0 < min_return_size <= max_return_size\n\n ret_ofst = buf\n ret_len = max_return_size\n\n # revert when returndatasize is not in bounds\n ret = []\n # runtime: min_return_size <= returndatasize\n # TODO move the -1 optimization to LLL optimizer\n ret += [[\"assert\", [\"gt\", \"returndatasize\", min_return_size - 1]]]\n\n # add as the last LLLnode a pointer to the return data structure\n\n # the return type has been wrapped by the calling contract;\n # unwrap it so downstream code isn't confused.\n # basically this expands to buf+32 if the return type has been wrapped\n # in a tuple AND its ABI type is dynamic.\n # in most cases, this simply will evaluate to ret.\n # in the special case where the return type has been wrapped\n # in a tuple AND its ABI type is dynamic, it expands to buf+32.\n buf = LLLnode(buf, typ=return_t, encoding=_returndata_encoding(contract_sig), location=\"memory\")\n\n if should_unwrap_abi_tuple:\n buf = get_element_ptr(buf, 0, pos=None, array_bounds_check=False)\n\n ret += [buf]\n\n return ret, ret_ofst, ret_len\n\n\ndef _external_call_helper(\n contract_address, contract_sig, args_lll, context, pos=None, value=None, gas=None\n):\n\n if value is None:\n value = 0\n if gas is None:\n gas = \"gas\"\n\n # sanity check\n assert len(contract_sig.args) == len(args_lll)\n\n if context.is_constant() and contract_sig.mutability not in (\"view\", \"pure\"):\n # TODO is this already done in type checker?\n raise StateAccessViolation(\n f\"May not call state modifying function '{contract_sig.name}' \"\n f\"within {context.pp_constancy()}.\",\n pos,\n )\n\n sub = [\"seq\"]\n\n buf, arg_packer, args_ofst, args_len = _pack_arguments(contract_sig, args_lll, context, pos)\n\n ret_unpacker, ret_ofst, ret_len = _unpack_returndata(buf, contract_sig, context, pos)\n\n sub += arg_packer\n\n if contract_sig.return_type is None:\n # if we do not expect return data, check that a contract exists at the\n # target address. we must perform this check BEFORE the call because\n # the contract might selfdestruct. on the other hand we can omit this\n # when we _do_ expect return data because we later check\n # `returndatasize` (that check works even if the contract\n # selfdestructs).\n sub.append([\"assert\", [\"extcodesize\", contract_address]])\n\n if context.is_constant() or contract_sig.mutability in (\"view\", \"pure\"):\n call_op = [\"staticcall\", gas, contract_address, args_ofst, args_len, ret_ofst, ret_len]\n else:\n call_op = [\"call\", gas, contract_address, value, args_ofst, args_len, ret_ofst, ret_len]\n\n sub.append([\"assert\", call_op])\n\n if contract_sig.return_type is not None:\n sub += ret_unpacker\n\n ret = LLLnode.from_list(\n # set the encoding to ABI here, downstream code will decode and add clampers.\n sub,\n typ=contract_sig.return_type,\n location=\"memory\",\n encoding=_returndata_encoding(contract_sig),\n pos=pos,\n )\n\n return ret\n\n\n# TODO push me up to expr.py\ndef get_gas_and_value(stmt_expr, context):\n from vyper.old_codegen.expr import Expr # TODO rethink this circular import\n\n value, gas = None, None\n for kw in stmt_expr.keywords:\n if kw.arg == \"gas\":\n gas = Expr.parse_value_expr(kw.value, context)\n elif kw.arg == \"value\":\n value = Expr.parse_value_expr(kw.value, context)\n else:\n raise TypeCheckFailure(\"Unexpected keyword argument\")\n return value, gas\n\n\ndef lll_for_external_call(stmt_expr, context):\n from vyper.old_codegen.expr import Expr # TODO rethink this circular import\n\n pos = getpos(stmt_expr)\n value, gas = get_gas_and_value(stmt_expr, context)\n args_lll = [Expr(x, context).lll_node for x in stmt_expr.args]\n\n if isinstance(stmt_expr.func, vy_ast.Attribute) and isinstance(\n stmt_expr.func.value, vy_ast.Call\n ):\n # e.g. `Foo(address).bar()`\n\n # sanity check\n assert len(stmt_expr.func.value.args) == 1\n contract_name = stmt_expr.func.value.func.id\n contract_address = Expr.parse_value_expr(stmt_expr.func.value.args[0], context)\n\n elif (\n isinstance(stmt_expr.func.value, vy_ast.Attribute)\n and stmt_expr.func.value.attr in context.globals\n # TODO check for self?\n and hasattr(context.globals[stmt_expr.func.value.attr].typ, \"name\")\n ):\n # e.g. `self.foo.bar()`\n\n # sanity check\n assert stmt_expr.func.value.value.id == \"self\", stmt_expr\n\n contract_name = context.globals[stmt_expr.func.value.attr].typ.name\n type_ = stmt_expr.func.value._metadata[\"type\"]\n var = context.globals[stmt_expr.func.value.attr]\n contract_address = unwrap_location(\n LLLnode.from_list(\n type_.position.position,\n typ=var.typ,\n location=\"storage\",\n pos=pos,\n annotation=\"self.\" + stmt_expr.func.value.attr,\n )\n )\n else:\n # TODO catch this during type checking\n raise StructureException(\"Unsupported operator.\", stmt_expr)\n\n method_name = stmt_expr.func.attr\n contract_sig = context.sigs[contract_name][method_name]\n\n ret = _external_call_helper(\n contract_address,\n contract_sig,\n args_lll,\n context,\n pos,\n value=value,\n gas=gas,\n )\n ret.annotation = stmt_expr.get(\"node_source_code\")\n\n return ret\n", "path": "vyper/old_codegen/external_call.py"}], "after_files": [{"content": "import vyper.utils as util\nfrom vyper import ast as vy_ast\nfrom vyper.exceptions import StateAccessViolation, StructureException, TypeCheckFailure\nfrom vyper.old_codegen.abi import abi_encode, abi_type_of\nfrom vyper.old_codegen.lll_node import Encoding, LLLnode\nfrom vyper.old_codegen.parser_utils import (\n calculate_type_for_external_return,\n get_element_ptr,\n getpos,\n unwrap_location,\n)\nfrom vyper.old_codegen.types import TupleType, canonicalize_type, get_type_for_exact_size\nfrom vyper.old_codegen.types.check import check_assign\n\n\ndef _pack_arguments(contract_sig, args, context, pos):\n # abi encoding just treats all args as a big tuple\n args_tuple_t = TupleType([x.typ for x in args])\n args_as_tuple = LLLnode.from_list([\"multi\"] + [x for x in args], typ=args_tuple_t)\n args_abi_t = abi_type_of(args_tuple_t)\n\n # sanity typecheck - make sure the arguments can be assigned\n dst_tuple_t = TupleType([arg.typ for arg in contract_sig.args][: len(args)])\n _tmp = LLLnode(\"fake node\", location=\"memory\", typ=dst_tuple_t)\n check_assign(_tmp, args_as_tuple, pos)\n\n if contract_sig.return_type is not None:\n return_abi_t = abi_type_of(calculate_type_for_external_return(contract_sig.return_type))\n\n # we use the same buffer for args and returndata,\n # so allocate enough space here for the returndata too.\n buflen = max(args_abi_t.size_bound(), return_abi_t.size_bound())\n else:\n buflen = args_abi_t.size_bound()\n\n buflen += 32 # padding for the method id\n\n buf_t = get_type_for_exact_size(buflen)\n buf = context.new_internal_variable(buf_t)\n\n args_ofst = buf + 28\n args_len = args_abi_t.size_bound() + 4\n\n abi_signature = contract_sig.name + canonicalize_type(dst_tuple_t)\n\n # layout:\n # 32 bytes | args\n # 0x..00<method_id_4bytes> | args\n # the reason for the left padding is just so the alignment is easier.\n # if we were only targeting constantinople, we could align\n # to buf (and also keep code size small) by using\n # (mstore buf (shl signature.method_id 224))\n mstore_method_id = [[\"mstore\", buf, util.abi_method_id(abi_signature)]]\n\n if len(args) == 0:\n encode_args = [\"pass\"]\n else:\n encode_args = abi_encode(buf + 32, args_as_tuple, pos)\n\n return buf, mstore_method_id + [encode_args], args_ofst, args_len\n\n\ndef _returndata_encoding(contract_sig):\n if contract_sig.is_from_json:\n return Encoding.JSON_ABI\n return Encoding.ABI\n\n\ndef _unpack_returndata(buf, contract_sig, context, pos):\n return_t = contract_sig.return_type\n if return_t is None:\n return [\"pass\"], 0, 0\n\n return_t = calculate_type_for_external_return(return_t)\n # if the abi signature has a different type than\n # the vyper type, we need to wrap and unwrap the type\n # so that the ABI decoding works correctly\n should_unwrap_abi_tuple = return_t != contract_sig.return_type\n\n abi_return_t = abi_type_of(return_t)\n\n min_return_size = abi_return_t.min_size()\n max_return_size = abi_return_t.size_bound()\n assert 0 < min_return_size <= max_return_size\n\n ret_ofst = buf\n ret_len = max_return_size\n\n # revert when returndatasize is not in bounds\n ret = []\n # runtime: min_return_size <= returndatasize\n # TODO move the -1 optimization to LLL optimizer\n ret += [[\"assert\", [\"gt\", \"returndatasize\", min_return_size - 1]]]\n\n # add as the last LLLnode a pointer to the return data structure\n\n # the return type has been wrapped by the calling contract;\n # unwrap it so downstream code isn't confused.\n # basically this expands to buf+32 if the return type has been wrapped\n # in a tuple AND its ABI type is dynamic.\n # in most cases, this simply will evaluate to ret.\n # in the special case where the return type has been wrapped\n # in a tuple AND its ABI type is dynamic, it expands to buf+32.\n buf = LLLnode(buf, typ=return_t, encoding=_returndata_encoding(contract_sig), location=\"memory\")\n\n if should_unwrap_abi_tuple:\n buf = get_element_ptr(buf, 0, pos=None, array_bounds_check=False)\n\n ret += [buf]\n\n return ret, ret_ofst, ret_len\n\n\ndef _external_call_helper(\n contract_address, contract_sig, args_lll, context, pos=None, value=None, gas=None\n):\n\n if value is None:\n value = 0\n if gas is None:\n gas = \"gas\"\n\n # sanity check\n assert len(contract_sig.base_args) <= len(args_lll) <= len(contract_sig.args)\n\n if context.is_constant() and contract_sig.mutability not in (\"view\", \"pure\"):\n # TODO is this already done in type checker?\n raise StateAccessViolation(\n f\"May not call state modifying function '{contract_sig.name}' \"\n f\"within {context.pp_constancy()}.\",\n pos,\n )\n\n sub = [\"seq\"]\n\n buf, arg_packer, args_ofst, args_len = _pack_arguments(contract_sig, args_lll, context, pos)\n\n ret_unpacker, ret_ofst, ret_len = _unpack_returndata(buf, contract_sig, context, pos)\n\n sub += arg_packer\n\n if contract_sig.return_type is None:\n # if we do not expect return data, check that a contract exists at the\n # target address. we must perform this check BEFORE the call because\n # the contract might selfdestruct. on the other hand we can omit this\n # when we _do_ expect return data because we later check\n # `returndatasize` (that check works even if the contract\n # selfdestructs).\n sub.append([\"assert\", [\"extcodesize\", contract_address]])\n\n if context.is_constant() or contract_sig.mutability in (\"view\", \"pure\"):\n call_op = [\"staticcall\", gas, contract_address, args_ofst, args_len, ret_ofst, ret_len]\n else:\n call_op = [\"call\", gas, contract_address, value, args_ofst, args_len, ret_ofst, ret_len]\n\n sub.append([\"assert\", call_op])\n\n if contract_sig.return_type is not None:\n sub += ret_unpacker\n\n ret = LLLnode.from_list(\n # set the encoding to ABI here, downstream code will decode and add clampers.\n sub,\n typ=contract_sig.return_type,\n location=\"memory\",\n encoding=_returndata_encoding(contract_sig),\n pos=pos,\n )\n\n return ret\n\n\n# TODO push me up to expr.py\ndef get_gas_and_value(stmt_expr, context):\n from vyper.old_codegen.expr import Expr # TODO rethink this circular import\n\n value, gas = None, None\n for kw in stmt_expr.keywords:\n if kw.arg == \"gas\":\n gas = Expr.parse_value_expr(kw.value, context)\n elif kw.arg == \"value\":\n value = Expr.parse_value_expr(kw.value, context)\n else:\n raise TypeCheckFailure(\"Unexpected keyword argument\")\n return value, gas\n\n\ndef lll_for_external_call(stmt_expr, context):\n from vyper.old_codegen.expr import Expr # TODO rethink this circular import\n\n pos = getpos(stmt_expr)\n value, gas = get_gas_and_value(stmt_expr, context)\n args_lll = [Expr(x, context).lll_node for x in stmt_expr.args]\n\n if isinstance(stmt_expr.func, vy_ast.Attribute) and isinstance(\n stmt_expr.func.value, vy_ast.Call\n ):\n # e.g. `Foo(address).bar()`\n\n # sanity check\n assert len(stmt_expr.func.value.args) == 1\n contract_name = stmt_expr.func.value.func.id\n contract_address = Expr.parse_value_expr(stmt_expr.func.value.args[0], context)\n\n elif (\n isinstance(stmt_expr.func.value, vy_ast.Attribute)\n and stmt_expr.func.value.attr in context.globals\n # TODO check for self?\n and hasattr(context.globals[stmt_expr.func.value.attr].typ, \"name\")\n ):\n # e.g. `self.foo.bar()`\n\n # sanity check\n assert stmt_expr.func.value.value.id == \"self\", stmt_expr\n\n contract_name = context.globals[stmt_expr.func.value.attr].typ.name\n type_ = stmt_expr.func.value._metadata[\"type\"]\n var = context.globals[stmt_expr.func.value.attr]\n contract_address = unwrap_location(\n LLLnode.from_list(\n type_.position.position,\n typ=var.typ,\n location=\"storage\",\n pos=pos,\n annotation=\"self.\" + stmt_expr.func.value.attr,\n )\n )\n else:\n # TODO catch this during type checking\n raise StructureException(\"Unsupported operator.\", stmt_expr)\n\n method_name = stmt_expr.func.attr\n contract_sig = context.sigs[contract_name][method_name]\n\n ret = _external_call_helper(\n contract_address,\n contract_sig,\n args_lll,\n context,\n pos,\n value=value,\n gas=gas,\n )\n ret.annotation = stmt_expr.get(\"node_source_code\")\n\n return ret\n", "path": "vyper/old_codegen/external_call.py"}]}
3,425
141
gh_patches_debug_57090
rasdani/github-patches
git_diff
sbi-dev__sbi-398
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- SNPE with NSF fails when sampling with MCMC This occurs in a very particular setting: `SNPE` inference with `NSF` density estimator and `sample_with_mcmc=True` (no matter which type of MCMC. - it works with `sample_with_mcmc=False`, - and it works with `SNLE`! I tried to chase it down, but no success so far. You can reproduce it locally by running ``` pytest -s tests/linearGaussian_snpe_test.py::test_c2st_snpe_external_data_on_linearGaussian ``` and setting https://github.com/mackelab/sbi/blob/6b5ed7be1d7522546b06c39aec1f206a354cc2ef/tests/linearGaussian_snpe_test.py#L286 to `True`. This is the error trace: ```python > samples = posterior.sample((num_samples,)) tests/linearGaussian_snpe_test.py:289: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ sbi/inference/posteriors/direct_posterior.py:336: in sample samples = self._sample_posterior_mcmc( sbi/inference/posteriors/base_posterior.py:333: in _sample_posterior_mcmc samples = self._slice_np_mcmc( sbi/inference/posteriors/base_posterior.py:397: in _slice_np_mcmc posterior_sampler.gen(int(warmup_steps)) sbi/mcmc/slice_numpy.py:93: in gen self._tune_bracket_width(rng) sbi/mcmc/slice_numpy.py:145: in _tune_bracket_width x[i], wi = self._sample_from_conditional(i, x[i], rng) sbi/mcmc/slice_numpy.py:173: in _sample_from_conditional while Li(lx) >= logu and cxi - lx < self.max_width: sbi/mcmc/slice_numpy.py:162: in <lambda> Li = lambda t: self.lp_f(np.concatenate([self.x[:i], [t], self.x[i + 1 :]])) sbi/inference/posteriors/direct_posterior.py:477: in np_potential target_log_prob = self.posterior_nn.log_prob( .sbi_env/lib/python3.8/site-packages/nflows/distributions/base.py:40: in log_prob return self._log_prob(inputs, context) .sbi_env/lib/python3.8/site-packages/nflows/flows/base.py:39: in _log_prob noise, logabsdet = self._transform(inputs, context=embedded_context) .sbi_env/lib/python3.8/site-packages/torch/nn/modules/module.py:722: in _call_impl result = self.forward(*input, **kwargs) .sbi_env/lib/python3.8/site-packages/nflows/transforms/base.py:56: in forward return self._cascade(inputs, funcs, context) .sbi_env/lib/python3.8/site-packages/nflows/transforms/base.py:50: in _cascade outputs, logabsdet = func(outputs, context) .sbi_env/lib/python3.8/site-packages/torch/nn/modules/module.py:722: in _call_impl result = self.forward(*input, **kwargs) .sbi_env/lib/python3.8/site-packages/nflows/transforms/base.py:56: in forward return self._cascade(inputs, funcs, context) .sbi_env/lib/python3.8/site-packages/nflows/transforms/base.py:50: in _cascade outputs, logabsdet = func(outputs, context) .sbi_env/lib/python3.8/site-packages/torch/nn/modules/module.py:722: in _call_impl result = self.forward(*input, **kwargs) .sbi_env/lib/python3.8/site-packages/nflows/transforms/base.py:56: in forward return self._cascade(inputs, funcs, context) .sbi_env/lib/python3.8/site-packages/nflows/transforms/base.py:50: in _cascade outputs, logabsdet = func(outputs, context) .sbi_env/lib/python3.8/site-packages/torch/nn/modules/module.py:722: in _call_impl result = self.forward(*input, **kwargs) .sbi_env/lib/python3.8/site-packages/nflows/transforms/coupling.py:84: in forward transform_split, logabsdet = self._coupling_transform_forward( .sbi_env/lib/python3.8/site-packages/nflows/transforms/coupling.py:194: in _coupling_transform_forward return self._coupling_transform(inputs, transform_params, inverse=False) .sbi_env/lib/python3.8/site-packages/nflows/transforms/coupling.py:211: in _coupling_transform outputs, logabsdet = self._piecewise_cdf(inputs, transform_params, inverse) .sbi_env/lib/python3.8/site-packages/nflows/transforms/coupling.py:492: in _piecewise_cdf return spline_fn( .sbi_env/lib/python3.8/site-packages/nflows/transforms/splines/rational_quadratic.py:45: in unconstrained_rational_quadratic_spline ) = rational_quadratic_spline( _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ inputs = tensor([]), unnormalized_widths = tensor([], size=(0, 10)), unnormalized_heights = tensor([], size=(0, 10)), unnormalized_derivatives = tensor([], size=(0, 11)) inverse = False, left = -3.0, right = 3.0, bottom = -3.0, top = 3.0, min_bin_width = 0.001, min_bin_height = 0.001, min_derivative = 0.001 def rational_quadratic_spline( inputs, unnormalized_widths, unnormalized_heights, unnormalized_derivatives, inverse=False, left=0.0, right=1.0, bottom=0.0, top=1.0, min_bin_width=DEFAULT_MIN_BIN_WIDTH, min_bin_height=DEFAULT_MIN_BIN_HEIGHT, min_derivative=DEFAULT_MIN_DERIVATIVE, ): > if torch.min(inputs) < left or torch.max(inputs) > right: E RuntimeError: operation does not have an identity. .sbi_env/lib/python3.8/site-packages/nflows/transforms/splines/rational_quadratic.py:77: RuntimeError ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 # 4 # This file is part of sbi, a toolkit for simulation-based inference. sbi is licensed 5 # under the Affero General Public License v3, see <https://www.gnu.org/licenses/>. 6 # 7 # Note: To use the 'upload' functionality of this file, you must: 8 # $ pipenv install twine --dev 9 10 import io 11 import os 12 import sys 13 from shutil import rmtree 14 15 from setuptools import find_packages, setup, Command 16 17 # Package meta-data. 18 NAME = "sbi" 19 DESCRIPTION = "Simulation-based inference." 20 KEYWORDS = "bayesian parameter inference system_identification simulator PyTorch" 21 URL = "https://github.com/mackelab/sbi" 22 EMAIL = "[email protected]" 23 AUTHOR = "Álvaro Tejero-Cantero, Jakob H. Macke, Jan-Matthis Lückmann, Conor M. Durkan, Michael Deistler, Jan Bölts" 24 REQUIRES_PYTHON = ">=3.6.0" 25 26 REQUIRED = [ 27 "joblib", 28 "matplotlib", 29 "numpy", 30 "pillow", 31 "pyknos>=0.12", 32 "pyro-ppl>=1.3.1", 33 "scipy", 34 "tensorboard", 35 "torch>=1.5.1", 36 "tqdm", 37 ] 38 39 EXTRAS = { 40 "dev": [ 41 "autoflake", 42 "black", 43 "deepdiff", 44 "flake8", 45 "isort", 46 "jupyter", 47 "mkdocs", 48 "mkdocs-material", 49 "markdown-include", 50 "mkdocs-redirects", 51 "mkdocstrings", 52 "nbconvert", 53 "pep517", 54 "pytest", 55 "pyyaml", 56 "scikit-learn", 57 "torchtestcase", 58 "twine", 59 ], 60 } 61 62 here = os.path.abspath(os.path.dirname(__file__)) 63 64 # Import the README and use it as the long-description. 65 try: 66 with io.open(os.path.join(here, "README.md"), encoding="utf-8") as f: 67 long_description = "\n" + f.read() 68 except FileNotFoundError: 69 long_description = DESCRIPTION 70 71 # Load the package's __version__.py module as a dictionary. 72 about = {} 73 project_slug = NAME.lower().replace("-", "_").replace(" ", "_") 74 with open(os.path.join(here, project_slug, "__version__.py")) as f: 75 exec(f.read(), about) 76 77 78 class UploadCommand(Command): 79 """Support setup.py upload.""" 80 81 description = "Build and publish the package." 82 user_options = [] 83 84 @staticmethod 85 def status(s): 86 """Prints things in bold.""" 87 print("\033[1m{0}\033[0m".format(s)) 88 89 def initialize_options(self): 90 pass 91 92 def finalize_options(self): 93 pass 94 95 def run(self): 96 try: 97 self.status("Removing previous builds…") 98 rmtree(os.path.join(here, "dist")) 99 except OSError: 100 pass 101 102 self.status("Building Source and Wheel (universal) distribution…") 103 os.system("{0} setup.py sdist bdist_wheel --universal".format(sys.executable)) 104 105 self.status("Uploading the package to PyPI via Twine…") 106 os.system("twine upload dist/*") 107 108 self.status("Pushing git tags…") 109 os.system("git tag v{0}".format(about["__version__"])) 110 os.system("git push --tags") 111 112 sys.exit() 113 114 115 setup( 116 name=NAME, 117 version=about["__version__"], 118 description=DESCRIPTION, 119 keywords=KEYWORDS, 120 long_description=long_description, 121 long_description_content_type="text/markdown", 122 author=AUTHOR, 123 author_email=EMAIL, 124 python_requires=REQUIRES_PYTHON, 125 url=URL, 126 packages=find_packages(exclude=["tests", "*.tests", "*.tests.*", "tests.*"]), 127 install_requires=REQUIRED, 128 extras_require=EXTRAS, 129 include_package_data=True, 130 license="AGPLv3", 131 classifiers=[ 132 # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers 133 "Development Status :: 3 - Alpha", 134 "License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)", 135 "Intended Audience :: Developers", 136 "Intended Audience :: Education", 137 "Intended Audience :: Science/Research", 138 "Topic :: Adaptive Technologies", 139 "Topic :: Scientific/Engineering", 140 "Topic :: Scientific/Engineering :: Artificial Intelligence", 141 "Topic :: Scientific/Engineering :: Mathematics", 142 "Programming Language :: Python", 143 "Programming Language :: Python :: 3", 144 "Programming Language :: Python :: 3.6", 145 "Programming Language :: Python :: 3.7", 146 "Programming Language :: Python :: 3.8", 147 ], 148 # $ setup.py publish support. 149 cmdclass=dict(upload=UploadCommand), 150 ) 151 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -28,7 +28,7 @@ "matplotlib", "numpy", "pillow", - "pyknos>=0.12", + "pyknos>=0.14", "pyro-ppl>=1.3.1", "scipy", "tensorboard",
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -28,7 +28,7 @@\n \"matplotlib\",\n \"numpy\",\n \"pillow\",\n- \"pyknos>=0.12\",\n+ \"pyknos>=0.14\",\n \"pyro-ppl>=1.3.1\",\n \"scipy\",\n \"tensorboard\",\n", "issue": "SNPE with NSF fails when sampling with MCMC\nThis occurs in a very particular setting: `SNPE` inference with `NSF` density estimator and `sample_with_mcmc=True` (no matter which type of MCMC. \r\n\r\n- it works with `sample_with_mcmc=False`, \r\n- and it works with `SNLE`! \r\n\r\nI tried to chase it down, but no success so far. You can reproduce it locally by running\r\n\r\n```\r\npytest -s tests/linearGaussian_snpe_test.py::test_c2st_snpe_external_data_on_linearGaussian\r\n```\r\n\r\nand setting \r\nhttps://github.com/mackelab/sbi/blob/6b5ed7be1d7522546b06c39aec1f206a354cc2ef/tests/linearGaussian_snpe_test.py#L286\r\n\r\nto `True`. \r\n\r\nThis is the error trace:\r\n```python\r\n\r\n> samples = posterior.sample((num_samples,))\r\n\r\ntests/linearGaussian_snpe_test.py:289:\r\n_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _\r\nsbi/inference/posteriors/direct_posterior.py:336: in sample\r\n samples = self._sample_posterior_mcmc(\r\nsbi/inference/posteriors/base_posterior.py:333: in _sample_posterior_mcmc\r\n samples = self._slice_np_mcmc(\r\nsbi/inference/posteriors/base_posterior.py:397: in _slice_np_mcmc\r\n posterior_sampler.gen(int(warmup_steps))\r\nsbi/mcmc/slice_numpy.py:93: in gen\r\n self._tune_bracket_width(rng)\r\nsbi/mcmc/slice_numpy.py:145: in _tune_bracket_width\r\n x[i], wi = self._sample_from_conditional(i, x[i], rng)\r\nsbi/mcmc/slice_numpy.py:173: in _sample_from_conditional\r\n while Li(lx) >= logu and cxi - lx < self.max_width:\r\nsbi/mcmc/slice_numpy.py:162: in <lambda>\r\n Li = lambda t: self.lp_f(np.concatenate([self.x[:i], [t], self.x[i + 1 :]]))\r\nsbi/inference/posteriors/direct_posterior.py:477: in np_potential\r\n target_log_prob = self.posterior_nn.log_prob(\r\n.sbi_env/lib/python3.8/site-packages/nflows/distributions/base.py:40: in log_prob\r\n return self._log_prob(inputs, context)\r\n.sbi_env/lib/python3.8/site-packages/nflows/flows/base.py:39: in _log_prob\r\n noise, logabsdet = self._transform(inputs, context=embedded_context)\r\n.sbi_env/lib/python3.8/site-packages/torch/nn/modules/module.py:722: in _call_impl\r\n result = self.forward(*input, **kwargs)\r\n.sbi_env/lib/python3.8/site-packages/nflows/transforms/base.py:56: in forward\r\n return self._cascade(inputs, funcs, context)\r\n.sbi_env/lib/python3.8/site-packages/nflows/transforms/base.py:50: in _cascade\r\n outputs, logabsdet = func(outputs, context)\r\n.sbi_env/lib/python3.8/site-packages/torch/nn/modules/module.py:722: in _call_impl\r\n result = self.forward(*input, **kwargs)\r\n.sbi_env/lib/python3.8/site-packages/nflows/transforms/base.py:56: in forward\r\n return self._cascade(inputs, funcs, context)\r\n.sbi_env/lib/python3.8/site-packages/nflows/transforms/base.py:50: in _cascade\r\n outputs, logabsdet = func(outputs, context)\r\n.sbi_env/lib/python3.8/site-packages/torch/nn/modules/module.py:722: in _call_impl\r\n result = self.forward(*input, **kwargs)\r\n.sbi_env/lib/python3.8/site-packages/nflows/transforms/base.py:56: in forward\r\n return self._cascade(inputs, funcs, context)\r\n.sbi_env/lib/python3.8/site-packages/nflows/transforms/base.py:50: in _cascade\r\n outputs, logabsdet = func(outputs, context)\r\n.sbi_env/lib/python3.8/site-packages/torch/nn/modules/module.py:722: in _call_impl\r\n result = self.forward(*input, **kwargs)\r\n.sbi_env/lib/python3.8/site-packages/nflows/transforms/coupling.py:84: in forward\r\n transform_split, logabsdet = self._coupling_transform_forward(\r\n.sbi_env/lib/python3.8/site-packages/nflows/transforms/coupling.py:194: in _coupling_transform_forward\r\n return self._coupling_transform(inputs, transform_params, inverse=False)\r\n.sbi_env/lib/python3.8/site-packages/nflows/transforms/coupling.py:211: in _coupling_transform\r\n outputs, logabsdet = self._piecewise_cdf(inputs, transform_params, inverse)\r\n.sbi_env/lib/python3.8/site-packages/nflows/transforms/coupling.py:492: in _piecewise_cdf\r\n return spline_fn(\r\n.sbi_env/lib/python3.8/site-packages/nflows/transforms/splines/rational_quadratic.py:45: in unconstrained_rational_quadratic_spline\r\n ) = rational_quadratic_spline(\r\n_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _\r\n\r\ninputs = tensor([]), unnormalized_widths = tensor([], size=(0, 10)), unnormalized_heights = tensor([], size=(0, 10)), unnormalized_derivatives = tensor([], size=(0, 11))\r\ninverse = False, left = -3.0, right = 3.0, bottom = -3.0, top = 3.0, min_bin_width = 0.001, min_bin_height = 0.001, min_derivative = 0.001\r\n\r\n def rational_quadratic_spline(\r\n inputs,\r\n unnormalized_widths,\r\n unnormalized_heights,\r\n unnormalized_derivatives,\r\n inverse=False,\r\n left=0.0,\r\n right=1.0,\r\n bottom=0.0,\r\n top=1.0,\r\n min_bin_width=DEFAULT_MIN_BIN_WIDTH,\r\n min_bin_height=DEFAULT_MIN_BIN_HEIGHT,\r\n min_derivative=DEFAULT_MIN_DERIVATIVE,\r\n ):\r\n> if torch.min(inputs) < left or torch.max(inputs) > right:\r\nE RuntimeError: operation does not have an identity.\r\n\r\n.sbi_env/lib/python3.8/site-packages/nflows/transforms/splines/rational_quadratic.py:77: RuntimeError\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# This file is part of sbi, a toolkit for simulation-based inference. sbi is licensed\n# under the Affero General Public License v3, see <https://www.gnu.org/licenses/>.\n#\n# Note: To use the 'upload' functionality of this file, you must:\n# $ pipenv install twine --dev\n\nimport io\nimport os\nimport sys\nfrom shutil import rmtree\n\nfrom setuptools import find_packages, setup, Command\n\n# Package meta-data.\nNAME = \"sbi\"\nDESCRIPTION = \"Simulation-based inference.\"\nKEYWORDS = \"bayesian parameter inference system_identification simulator PyTorch\"\nURL = \"https://github.com/mackelab/sbi\"\nEMAIL = \"[email protected]\"\nAUTHOR = \"\u00c1lvaro Tejero-Cantero, Jakob H. Macke, Jan-Matthis L\u00fcckmann, Conor M. Durkan, Michael Deistler, Jan B\u00f6lts\"\nREQUIRES_PYTHON = \">=3.6.0\"\n\nREQUIRED = [\n \"joblib\",\n \"matplotlib\",\n \"numpy\",\n \"pillow\",\n \"pyknos>=0.12\",\n \"pyro-ppl>=1.3.1\",\n \"scipy\",\n \"tensorboard\",\n \"torch>=1.5.1\",\n \"tqdm\",\n]\n\nEXTRAS = {\n \"dev\": [\n \"autoflake\",\n \"black\",\n \"deepdiff\",\n \"flake8\",\n \"isort\",\n \"jupyter\",\n \"mkdocs\",\n \"mkdocs-material\",\n \"markdown-include\",\n \"mkdocs-redirects\",\n \"mkdocstrings\",\n \"nbconvert\",\n \"pep517\",\n \"pytest\",\n \"pyyaml\",\n \"scikit-learn\",\n \"torchtestcase\",\n \"twine\",\n ],\n}\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n# Import the README and use it as the long-description.\ntry:\n with io.open(os.path.join(here, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = \"\\n\" + f.read()\nexcept FileNotFoundError:\n long_description = DESCRIPTION\n\n# Load the package's __version__.py module as a dictionary.\nabout = {}\nproject_slug = NAME.lower().replace(\"-\", \"_\").replace(\" \", \"_\")\nwith open(os.path.join(here, project_slug, \"__version__.py\")) as f:\n exec(f.read(), about)\n\n\nclass UploadCommand(Command):\n \"\"\"Support setup.py upload.\"\"\"\n\n description = \"Build and publish the package.\"\n user_options = []\n\n @staticmethod\n def status(s):\n \"\"\"Prints things in bold.\"\"\"\n print(\"\\033[1m{0}\\033[0m\".format(s))\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n try:\n self.status(\"Removing previous builds\u2026\")\n rmtree(os.path.join(here, \"dist\"))\n except OSError:\n pass\n\n self.status(\"Building Source and Wheel (universal) distribution\u2026\")\n os.system(\"{0} setup.py sdist bdist_wheel --universal\".format(sys.executable))\n\n self.status(\"Uploading the package to PyPI via Twine\u2026\")\n os.system(\"twine upload dist/*\")\n\n self.status(\"Pushing git tags\u2026\")\n os.system(\"git tag v{0}\".format(about[\"__version__\"]))\n os.system(\"git push --tags\")\n\n sys.exit()\n\n\nsetup(\n name=NAME,\n version=about[\"__version__\"],\n description=DESCRIPTION,\n keywords=KEYWORDS,\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=AUTHOR,\n author_email=EMAIL,\n python_requires=REQUIRES_PYTHON,\n url=URL,\n packages=find_packages(exclude=[\"tests\", \"*.tests\", \"*.tests.*\", \"tests.*\"]),\n install_requires=REQUIRED,\n extras_require=EXTRAS,\n include_package_data=True,\n license=\"AGPLv3\",\n classifiers=[\n # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers\n \"Development Status :: 3 - Alpha\",\n \"License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Science/Research\",\n \"Topic :: Adaptive Technologies\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Scientific/Engineering :: Mathematics\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n # $ setup.py publish support.\n cmdclass=dict(upload=UploadCommand),\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# This file is part of sbi, a toolkit for simulation-based inference. sbi is licensed\n# under the Affero General Public License v3, see <https://www.gnu.org/licenses/>.\n#\n# Note: To use the 'upload' functionality of this file, you must:\n# $ pipenv install twine --dev\n\nimport io\nimport os\nimport sys\nfrom shutil import rmtree\n\nfrom setuptools import find_packages, setup, Command\n\n# Package meta-data.\nNAME = \"sbi\"\nDESCRIPTION = \"Simulation-based inference.\"\nKEYWORDS = \"bayesian parameter inference system_identification simulator PyTorch\"\nURL = \"https://github.com/mackelab/sbi\"\nEMAIL = \"[email protected]\"\nAUTHOR = \"\u00c1lvaro Tejero-Cantero, Jakob H. Macke, Jan-Matthis L\u00fcckmann, Conor M. Durkan, Michael Deistler, Jan B\u00f6lts\"\nREQUIRES_PYTHON = \">=3.6.0\"\n\nREQUIRED = [\n \"joblib\",\n \"matplotlib\",\n \"numpy\",\n \"pillow\",\n \"pyknos>=0.14\",\n \"pyro-ppl>=1.3.1\",\n \"scipy\",\n \"tensorboard\",\n \"torch>=1.5.1\",\n \"tqdm\",\n]\n\nEXTRAS = {\n \"dev\": [\n \"autoflake\",\n \"black\",\n \"deepdiff\",\n \"flake8\",\n \"isort\",\n \"jupyter\",\n \"mkdocs\",\n \"mkdocs-material\",\n \"markdown-include\",\n \"mkdocs-redirects\",\n \"mkdocstrings\",\n \"nbconvert\",\n \"pep517\",\n \"pytest\",\n \"pyyaml\",\n \"scikit-learn\",\n \"torchtestcase\",\n \"twine\",\n ],\n}\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n# Import the README and use it as the long-description.\ntry:\n with io.open(os.path.join(here, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = \"\\n\" + f.read()\nexcept FileNotFoundError:\n long_description = DESCRIPTION\n\n# Load the package's __version__.py module as a dictionary.\nabout = {}\nproject_slug = NAME.lower().replace(\"-\", \"_\").replace(\" \", \"_\")\nwith open(os.path.join(here, project_slug, \"__version__.py\")) as f:\n exec(f.read(), about)\n\n\nclass UploadCommand(Command):\n \"\"\"Support setup.py upload.\"\"\"\n\n description = \"Build and publish the package.\"\n user_options = []\n\n @staticmethod\n def status(s):\n \"\"\"Prints things in bold.\"\"\"\n print(\"\\033[1m{0}\\033[0m\".format(s))\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n try:\n self.status(\"Removing previous builds\u2026\")\n rmtree(os.path.join(here, \"dist\"))\n except OSError:\n pass\n\n self.status(\"Building Source and Wheel (universal) distribution\u2026\")\n os.system(\"{0} setup.py sdist bdist_wheel --universal\".format(sys.executable))\n\n self.status(\"Uploading the package to PyPI via Twine\u2026\")\n os.system(\"twine upload dist/*\")\n\n self.status(\"Pushing git tags\u2026\")\n os.system(\"git tag v{0}\".format(about[\"__version__\"]))\n os.system(\"git push --tags\")\n\n sys.exit()\n\n\nsetup(\n name=NAME,\n version=about[\"__version__\"],\n description=DESCRIPTION,\n keywords=KEYWORDS,\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=AUTHOR,\n author_email=EMAIL,\n python_requires=REQUIRES_PYTHON,\n url=URL,\n packages=find_packages(exclude=[\"tests\", \"*.tests\", \"*.tests.*\", \"tests.*\"]),\n install_requires=REQUIRED,\n extras_require=EXTRAS,\n include_package_data=True,\n license=\"AGPLv3\",\n classifiers=[\n # Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers\n \"Development Status :: 3 - Alpha\",\n \"License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Science/Research\",\n \"Topic :: Adaptive Technologies\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Scientific/Engineering :: Mathematics\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n ],\n # $ setup.py publish support.\n cmdclass=dict(upload=UploadCommand),\n)\n", "path": "setup.py"}]}
3,300
92
gh_patches_debug_27218
rasdani/github-patches
git_diff
fedora-infra__bodhi-2906
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Drop bodhi.server.services.zz_redirects This module exists to redirect legacy Bodhi 1 URLs to the Bodhi 2 counterparts, but I don't think we need it anymore. Bodhi 2 is not backwards compatible with Bodhi 1, and Bodhi 4 will also be further incompatible. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `bodhi/server/services/zz_redirects.py` Content: ``` 1 # Copyright © 2015-2017 Red Hat, Inc. 2 # 3 # This file is part of Bodhi. 4 # 5 # This program is free software; you can redistribute it and/or 6 # modify it under the terms of the GNU General Public License 7 # as published by the Free Software Foundation; either version 2 8 # of the License, or (at your option) any later version. 9 # 10 # This program is distributed in the hope that it will be useful, 11 # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 # GNU General Public License for more details. 14 # 15 # You should have received a copy of the GNU General Public License 16 # along with this program; if not, write to the Free Software 17 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 18 """ 19 Handle general redirect stuff. 20 21 This module name gets a 'zz_' tacked on the front so that it comes last. 22 We need to catch /updates/{id}/request and /updates/{id}/edit first and those 23 get defined in the other service modules. 24 """ 25 26 from cornice import Service 27 from pyramid.httpexceptions import HTTPFound 28 29 import bodhi.server.security 30 31 32 zz_bodhi1_update_redirect = Service( 33 name='bodhi1_update_redirect', path='/updates/{id}/{title}', 34 description='Redirect to old updates/ALIAS/TITLE urls', 35 cors_origins=bodhi.server.security.cors_origins_rw) 36 37 38 @zz_bodhi1_update_redirect.get() 39 def zz_get_bodhi1_update_redirect(request): 40 """ 41 Redirect users from the Bodhi 1 update URL to the new path. 42 43 Args: 44 request (pyramid.request): The current web request. 45 Returns: 46 pyramid.httpexceptions.HTTPFound: A redirect to the same update in Bodhi's current URL 47 heirarchy. 48 """ 49 return HTTPFound("/updates/{0}".format(request.matchdict['id'])) 50 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/bodhi/server/services/zz_redirects.py b/bodhi/server/services/zz_redirects.py deleted file mode 100644 --- a/bodhi/server/services/zz_redirects.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright © 2015-2017 Red Hat, Inc. -# -# This file is part of Bodhi. -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License -# as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -""" -Handle general redirect stuff. - -This module name gets a 'zz_' tacked on the front so that it comes last. -We need to catch /updates/{id}/request and /updates/{id}/edit first and those -get defined in the other service modules. -""" - -from cornice import Service -from pyramid.httpexceptions import HTTPFound - -import bodhi.server.security - - -zz_bodhi1_update_redirect = Service( - name='bodhi1_update_redirect', path='/updates/{id}/{title}', - description='Redirect to old updates/ALIAS/TITLE urls', - cors_origins=bodhi.server.security.cors_origins_rw) - - -@zz_bodhi1_update_redirect.get() -def zz_get_bodhi1_update_redirect(request): - """ - Redirect users from the Bodhi 1 update URL to the new path. - - Args: - request (pyramid.request): The current web request. - Returns: - pyramid.httpexceptions.HTTPFound: A redirect to the same update in Bodhi's current URL - heirarchy. - """ - return HTTPFound("/updates/{0}".format(request.matchdict['id']))
{"golden_diff": "diff --git a/bodhi/server/services/zz_redirects.py b/bodhi/server/services/zz_redirects.py\ndeleted file mode 100644\n--- a/bodhi/server/services/zz_redirects.py\n+++ /dev/null\n@@ -1,49 +0,0 @@\n-# Copyright \u00a9 2015-2017 Red Hat, Inc.\n-#\n-# This file is part of Bodhi.\n-#\n-# This program is free software; you can redistribute it and/or\n-# modify it under the terms of the GNU General Public License\n-# as published by the Free Software Foundation; either version 2\n-# of the License, or (at your option) any later version.\n-#\n-# This program is distributed in the hope that it will be useful,\n-# but WITHOUT ANY WARRANTY; without even the implied warranty of\n-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n-# GNU General Public License for more details.\n-#\n-# You should have received a copy of the GNU General Public License\n-# along with this program; if not, write to the Free Software\n-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n-\"\"\"\n-Handle general redirect stuff.\n-\n-This module name gets a 'zz_' tacked on the front so that it comes last.\n-We need to catch /updates/{id}/request and /updates/{id}/edit first and those\n-get defined in the other service modules.\n-\"\"\"\n-\n-from cornice import Service\n-from pyramid.httpexceptions import HTTPFound\n-\n-import bodhi.server.security\n-\n-\n-zz_bodhi1_update_redirect = Service(\n- name='bodhi1_update_redirect', path='/updates/{id}/{title}',\n- description='Redirect to old updates/ALIAS/TITLE urls',\n- cors_origins=bodhi.server.security.cors_origins_rw)\n-\n-\n-@zz_bodhi1_update_redirect.get()\n-def zz_get_bodhi1_update_redirect(request):\n- \"\"\"\n- Redirect users from the Bodhi 1 update URL to the new path.\n-\n- Args:\n- request (pyramid.request): The current web request.\n- Returns:\n- pyramid.httpexceptions.HTTPFound: A redirect to the same update in Bodhi's current URL\n- heirarchy.\n- \"\"\"\n- return HTTPFound(\"/updates/{0}\".format(request.matchdict['id']))\n", "issue": "Drop bodhi.server.services.zz_redirects\nThis module exists to redirect legacy Bodhi 1 URLs to the Bodhi 2 counterparts, but I don't think we need it anymore. Bodhi 2 is not backwards compatible with Bodhi 1, and Bodhi 4 will also be further incompatible.\n", "before_files": [{"content": "# Copyright \u00a9 2015-2017 Red Hat, Inc.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\"\"\"\nHandle general redirect stuff.\n\nThis module name gets a 'zz_' tacked on the front so that it comes last.\nWe need to catch /updates/{id}/request and /updates/{id}/edit first and those\nget defined in the other service modules.\n\"\"\"\n\nfrom cornice import Service\nfrom pyramid.httpexceptions import HTTPFound\n\nimport bodhi.server.security\n\n\nzz_bodhi1_update_redirect = Service(\n name='bodhi1_update_redirect', path='/updates/{id}/{title}',\n description='Redirect to old updates/ALIAS/TITLE urls',\n cors_origins=bodhi.server.security.cors_origins_rw)\n\n\n@zz_bodhi1_update_redirect.get()\ndef zz_get_bodhi1_update_redirect(request):\n \"\"\"\n Redirect users from the Bodhi 1 update URL to the new path.\n\n Args:\n request (pyramid.request): The current web request.\n Returns:\n pyramid.httpexceptions.HTTPFound: A redirect to the same update in Bodhi's current URL\n heirarchy.\n \"\"\"\n return HTTPFound(\"/updates/{0}\".format(request.matchdict['id']))\n", "path": "bodhi/server/services/zz_redirects.py"}], "after_files": [{"content": null, "path": "bodhi/server/services/zz_redirects.py"}]}
854
538
gh_patches_debug_6351
rasdani/github-patches
git_diff
Kinto__kinto-657
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [cliquet] Add timeout option for Redis client original: https://github.com/mozilla-services/cliquet/issues/582 all in the title. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `kinto/core/storage/redis.py` Content: ``` 1 from __future__ import absolute_import, unicode_literals 2 from functools import wraps 3 4 import redis 5 from six.moves.urllib import parse as urlparse 6 7 from kinto.core import utils, logger 8 from kinto.core.storage import ( 9 exceptions, DEFAULT_ID_FIELD, 10 DEFAULT_MODIFIED_FIELD, DEFAULT_DELETED_FIELD) 11 from kinto.core.storage.memory import MemoryBasedStorage 12 13 14 def wrap_redis_error(func): 15 @wraps(func) 16 def wrapped(*args, **kwargs): 17 try: 18 return func(*args, **kwargs) 19 except redis.RedisError as e: 20 logger.exception(e) 21 raise exceptions.BackendError(original=e) 22 return wrapped 23 24 25 def create_from_config(config, prefix=''): 26 """Redis client instantiation from settings. 27 """ 28 settings = config.get_settings() 29 uri = settings[prefix + 'url'] 30 uri = urlparse.urlparse(uri) 31 pool_size = int(settings[prefix + 'pool_size']) 32 kwargs = { 33 "max_connections": pool_size, 34 "host": uri.hostname or 'localhost', 35 "port": uri.port or 6379, 36 "password": uri.password or None, 37 "db": int(uri.path[1:]) if uri.path else 0 38 } 39 connection_pool = redis.BlockingConnectionPool(**kwargs) 40 return redis.StrictRedis(connection_pool=connection_pool) 41 42 43 class Storage(MemoryBasedStorage): 44 """Storage backend implementation using Redis. 45 46 .. warning:: 47 48 Useful for very low server load, but won't scale since records sorting 49 and filtering are performed in memory. 50 51 Enable in configuration:: 52 53 kinto.storage_backend = kinto.core.storage.redis 54 55 *(Optional)* Instance location URI can be customized:: 56 57 kinto.storage_url = redis://localhost:6379/0 58 59 A threaded connection pool is enabled by default:: 60 61 kinto.storage_pool_size = 50 62 """ 63 64 def __init__(self, client, *args, **kwargs): 65 super(Storage, self).__init__(*args, **kwargs) 66 self._client = client 67 68 @property 69 def settings(self): 70 return dict(self._client.connection_pool.connection_kwargs) 71 72 def _encode(self, record): 73 return utils.json.dumps(record) 74 75 def _decode(self, record): 76 return utils.json.loads(record.decode('utf-8')) 77 78 @wrap_redis_error 79 def flush(self, auth=None): 80 self._client.flushdb() 81 82 @wrap_redis_error 83 def collection_timestamp(self, collection_id, parent_id, auth=None): 84 timestamp = self._client.get( 85 '{0}.{1}.timestamp'.format(collection_id, parent_id)) 86 if timestamp: 87 return int(timestamp) 88 return self._bump_timestamp(collection_id, parent_id) 89 90 @wrap_redis_error 91 def _bump_timestamp(self, collection_id, parent_id, record=None, 92 modified_field=None, last_modified=None): 93 94 key = '{0}.{1}.timestamp'.format(collection_id, parent_id) 95 while 1: 96 with self._client.pipeline() as pipe: 97 try: 98 pipe.watch(key) 99 previous = pipe.get(key) 100 pipe.multi() 101 # XXX factorize code from memory and redis backends. 102 is_specified = (record is not None and 103 modified_field in record or 104 last_modified is not None) 105 if is_specified: 106 # If there is a timestamp in the new record, 107 # try to use it. 108 if last_modified is not None: 109 current = last_modified 110 else: 111 current = record[modified_field] 112 else: 113 current = utils.msec_time() 114 115 if previous and int(previous) >= current: 116 collection_timestamp = int(previous) + 1 117 else: 118 collection_timestamp = current 119 120 # Return the newly generated timestamp as the current one 121 # only if nothing else was specified. 122 if not is_specified: 123 current = collection_timestamp 124 125 pipe.set(key, collection_timestamp) 126 pipe.execute() 127 return current 128 except redis.WatchError: # pragma: no cover 129 # Our timestamp has been modified by someone else, let's 130 # retry. 131 # XXX: untested. 132 continue 133 134 @wrap_redis_error 135 def create(self, collection_id, parent_id, record, id_generator=None, 136 unique_fields=None, id_field=DEFAULT_ID_FIELD, 137 modified_field=DEFAULT_MODIFIED_FIELD, 138 auth=None): 139 self.check_unicity(collection_id, parent_id, record, 140 unique_fields=unique_fields, id_field=id_field, 141 for_creation=True) 142 143 record = record.copy() 144 id_generator = id_generator or self.id_generator 145 _id = record.setdefault(id_field, id_generator()) 146 self.set_record_timestamp(collection_id, parent_id, record, 147 modified_field=modified_field) 148 149 record_key = '{0}.{1}.{2}.records'.format(collection_id, 150 parent_id, 151 _id) 152 with self._client.pipeline() as multi: 153 multi.set( 154 record_key, 155 self._encode(record) 156 ) 157 multi.sadd( 158 '{0}.{1}.records'.format(collection_id, parent_id), 159 _id 160 ) 161 multi.srem( 162 '{0}.{1}.deleted'.format(collection_id, parent_id), 163 _id 164 ) 165 multi.execute() 166 167 return record 168 169 @wrap_redis_error 170 def get(self, collection_id, parent_id, object_id, 171 id_field=DEFAULT_ID_FIELD, 172 modified_field=DEFAULT_MODIFIED_FIELD, 173 auth=None): 174 record_key = '{0}.{1}.{2}.records'.format(collection_id, 175 parent_id, 176 object_id) 177 encoded_item = self._client.get(record_key) 178 if encoded_item is None: 179 raise exceptions.RecordNotFoundError(object_id) 180 181 return self._decode(encoded_item) 182 183 @wrap_redis_error 184 def update(self, collection_id, parent_id, object_id, record, 185 unique_fields=None, id_field=DEFAULT_ID_FIELD, 186 modified_field=DEFAULT_MODIFIED_FIELD, 187 auth=None): 188 record = record.copy() 189 record[id_field] = object_id 190 self.check_unicity(collection_id, parent_id, record, 191 unique_fields=unique_fields, id_field=id_field) 192 193 self.set_record_timestamp(collection_id, parent_id, record, 194 modified_field=modified_field) 195 196 record_key = '{0}.{1}.{2}.records'.format(collection_id, 197 parent_id, 198 object_id) 199 with self._client.pipeline() as multi: 200 multi.set( 201 record_key, 202 self._encode(record) 203 ) 204 multi.sadd( 205 '{0}.{1}.records'.format(collection_id, parent_id), 206 object_id 207 ) 208 multi.execute() 209 210 return record 211 212 @wrap_redis_error 213 def delete(self, collection_id, parent_id, object_id, 214 id_field=DEFAULT_ID_FIELD, with_deleted=True, 215 modified_field=DEFAULT_MODIFIED_FIELD, 216 deleted_field=DEFAULT_DELETED_FIELD, 217 auth=None, last_modified=None): 218 record_key = '{0}.{1}.{2}.records'.format(collection_id, 219 parent_id, 220 object_id) 221 with self._client.pipeline() as multi: 222 multi.get(record_key) 223 multi.delete(record_key) 224 multi.srem( 225 '{0}.{1}.records'.format(collection_id, parent_id), 226 object_id 227 ) 228 responses = multi.execute() 229 230 encoded_item = responses[0] 231 if encoded_item is None: 232 raise exceptions.RecordNotFoundError(object_id) 233 234 existing = self._decode(encoded_item) 235 236 # Need to delete the last_modified field. 237 del existing[modified_field] 238 239 self.set_record_timestamp(collection_id, parent_id, existing, 240 modified_field=modified_field, 241 last_modified=last_modified) 242 existing = self.strip_deleted_record(collection_id, parent_id, 243 existing) 244 245 if with_deleted: 246 deleted_record_key = '{0}.{1}.{2}.deleted'.format(collection_id, 247 parent_id, 248 object_id) 249 with self._client.pipeline() as multi: 250 multi.set( 251 deleted_record_key, 252 self._encode(existing) 253 ) 254 multi.sadd( 255 '{0}.{1}.deleted'.format(collection_id, parent_id), 256 object_id 257 ) 258 multi.execute() 259 260 return existing 261 262 @wrap_redis_error 263 def purge_deleted(self, collection_id, parent_id, before=None, 264 id_field=DEFAULT_ID_FIELD, 265 modified_field=DEFAULT_MODIFIED_FIELD, 266 auth=None): 267 deleted_ids = '{0}.{1}.deleted'.format(collection_id, parent_id) 268 ids = self._client.smembers(deleted_ids) 269 270 keys = ['{0}.{1}.{2}.deleted'.format(collection_id, parent_id, 271 _id.decode('utf-8')) 272 for _id in ids] 273 274 if len(keys) == 0: 275 deleted = [] 276 else: 277 encoded_results = self._client.mget(keys) 278 deleted = [self._decode(r) for r in encoded_results if r] 279 if before is not None: 280 to_remove = [d['id'] for d in deleted 281 if d[modified_field] < before] 282 else: 283 to_remove = [d['id'] for d in deleted] 284 285 if len(to_remove) > 0: 286 with self._client.pipeline() as pipe: 287 pipe.delete(*['{0}.{1}.{2}.deleted'.format( 288 collection_id, parent_id, _id) for _id in to_remove]) 289 pipe.srem(deleted_ids, *to_remove) 290 pipe.execute() 291 number_deleted = len(to_remove) 292 return number_deleted 293 294 @wrap_redis_error 295 def get_all(self, collection_id, parent_id, filters=None, sorting=None, 296 pagination_rules=None, limit=None, include_deleted=False, 297 id_field=DEFAULT_ID_FIELD, 298 modified_field=DEFAULT_MODIFIED_FIELD, 299 deleted_field=DEFAULT_DELETED_FIELD, 300 auth=None): 301 records_ids_key = '{0}.{1}.records'.format(collection_id, parent_id) 302 ids = self._client.smembers(records_ids_key) 303 304 keys = ('{0}.{1}.{2}.records'.format(collection_id, parent_id, 305 _id.decode('utf-8')) 306 for _id in ids) 307 308 if len(ids) == 0: 309 records = [] 310 else: 311 encoded_results = self._client.mget(keys) 312 records = [self._decode(r) for r in encoded_results if r] 313 314 deleted = [] 315 if include_deleted: 316 deleted_ids = '{0}.{1}.deleted'.format(collection_id, parent_id) 317 ids = self._client.smembers(deleted_ids) 318 319 keys = ['{0}.{1}.{2}.deleted'.format(collection_id, parent_id, 320 _id.decode('utf-8')) 321 for _id in ids] 322 323 if len(keys) == 0: 324 deleted = [] 325 else: 326 encoded_results = self._client.mget(keys) 327 deleted = [self._decode(r) for r in encoded_results if r] 328 329 records, count = self.extract_record_set(collection_id, 330 records + deleted, 331 filters, sorting, 332 id_field, deleted_field, 333 pagination_rules, limit) 334 335 return records, count 336 337 338 def load_from_config(config): 339 client = create_from_config(config, prefix='storage_') 340 return Storage(client) 341 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/kinto/core/storage/redis.py b/kinto/core/storage/redis.py --- a/kinto/core/storage/redis.py +++ b/kinto/core/storage/redis.py @@ -36,6 +36,10 @@ "password": uri.password or None, "db": int(uri.path[1:]) if uri.path else 0 } + block_timeout = settings.get(prefix + 'pool_timeout') + if block_timeout is not None: + kwargs["timeout"] = float(block_timeout) + connection_pool = redis.BlockingConnectionPool(**kwargs) return redis.StrictRedis(connection_pool=connection_pool)
{"golden_diff": "diff --git a/kinto/core/storage/redis.py b/kinto/core/storage/redis.py\n--- a/kinto/core/storage/redis.py\n+++ b/kinto/core/storage/redis.py\n@@ -36,6 +36,10 @@\n \"password\": uri.password or None,\n \"db\": int(uri.path[1:]) if uri.path else 0\n }\n+ block_timeout = settings.get(prefix + 'pool_timeout')\n+ if block_timeout is not None:\n+ kwargs[\"timeout\"] = float(block_timeout)\n+\n connection_pool = redis.BlockingConnectionPool(**kwargs)\n return redis.StrictRedis(connection_pool=connection_pool)\n", "issue": "[cliquet] Add timeout option for Redis client\noriginal: https://github.com/mozilla-services/cliquet/issues/582\n\nall in the title.\n\n", "before_files": [{"content": "from __future__ import absolute_import, unicode_literals\nfrom functools import wraps\n\nimport redis\nfrom six.moves.urllib import parse as urlparse\n\nfrom kinto.core import utils, logger\nfrom kinto.core.storage import (\n exceptions, DEFAULT_ID_FIELD,\n DEFAULT_MODIFIED_FIELD, DEFAULT_DELETED_FIELD)\nfrom kinto.core.storage.memory import MemoryBasedStorage\n\n\ndef wrap_redis_error(func):\n @wraps(func)\n def wrapped(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except redis.RedisError as e:\n logger.exception(e)\n raise exceptions.BackendError(original=e)\n return wrapped\n\n\ndef create_from_config(config, prefix=''):\n \"\"\"Redis client instantiation from settings.\n \"\"\"\n settings = config.get_settings()\n uri = settings[prefix + 'url']\n uri = urlparse.urlparse(uri)\n pool_size = int(settings[prefix + 'pool_size'])\n kwargs = {\n \"max_connections\": pool_size,\n \"host\": uri.hostname or 'localhost',\n \"port\": uri.port or 6379,\n \"password\": uri.password or None,\n \"db\": int(uri.path[1:]) if uri.path else 0\n }\n connection_pool = redis.BlockingConnectionPool(**kwargs)\n return redis.StrictRedis(connection_pool=connection_pool)\n\n\nclass Storage(MemoryBasedStorage):\n \"\"\"Storage backend implementation using Redis.\n\n .. warning::\n\n Useful for very low server load, but won't scale since records sorting\n and filtering are performed in memory.\n\n Enable in configuration::\n\n kinto.storage_backend = kinto.core.storage.redis\n\n *(Optional)* Instance location URI can be customized::\n\n kinto.storage_url = redis://localhost:6379/0\n\n A threaded connection pool is enabled by default::\n\n kinto.storage_pool_size = 50\n \"\"\"\n\n def __init__(self, client, *args, **kwargs):\n super(Storage, self).__init__(*args, **kwargs)\n self._client = client\n\n @property\n def settings(self):\n return dict(self._client.connection_pool.connection_kwargs)\n\n def _encode(self, record):\n return utils.json.dumps(record)\n\n def _decode(self, record):\n return utils.json.loads(record.decode('utf-8'))\n\n @wrap_redis_error\n def flush(self, auth=None):\n self._client.flushdb()\n\n @wrap_redis_error\n def collection_timestamp(self, collection_id, parent_id, auth=None):\n timestamp = self._client.get(\n '{0}.{1}.timestamp'.format(collection_id, parent_id))\n if timestamp:\n return int(timestamp)\n return self._bump_timestamp(collection_id, parent_id)\n\n @wrap_redis_error\n def _bump_timestamp(self, collection_id, parent_id, record=None,\n modified_field=None, last_modified=None):\n\n key = '{0}.{1}.timestamp'.format(collection_id, parent_id)\n while 1:\n with self._client.pipeline() as pipe:\n try:\n pipe.watch(key)\n previous = pipe.get(key)\n pipe.multi()\n # XXX factorize code from memory and redis backends.\n is_specified = (record is not None and\n modified_field in record or\n last_modified is not None)\n if is_specified:\n # If there is a timestamp in the new record,\n # try to use it.\n if last_modified is not None:\n current = last_modified\n else:\n current = record[modified_field]\n else:\n current = utils.msec_time()\n\n if previous and int(previous) >= current:\n collection_timestamp = int(previous) + 1\n else:\n collection_timestamp = current\n\n # Return the newly generated timestamp as the current one\n # only if nothing else was specified.\n if not is_specified:\n current = collection_timestamp\n\n pipe.set(key, collection_timestamp)\n pipe.execute()\n return current\n except redis.WatchError: # pragma: no cover\n # Our timestamp has been modified by someone else, let's\n # retry.\n # XXX: untested.\n continue\n\n @wrap_redis_error\n def create(self, collection_id, parent_id, record, id_generator=None,\n unique_fields=None, id_field=DEFAULT_ID_FIELD,\n modified_field=DEFAULT_MODIFIED_FIELD,\n auth=None):\n self.check_unicity(collection_id, parent_id, record,\n unique_fields=unique_fields, id_field=id_field,\n for_creation=True)\n\n record = record.copy()\n id_generator = id_generator or self.id_generator\n _id = record.setdefault(id_field, id_generator())\n self.set_record_timestamp(collection_id, parent_id, record,\n modified_field=modified_field)\n\n record_key = '{0}.{1}.{2}.records'.format(collection_id,\n parent_id,\n _id)\n with self._client.pipeline() as multi:\n multi.set(\n record_key,\n self._encode(record)\n )\n multi.sadd(\n '{0}.{1}.records'.format(collection_id, parent_id),\n _id\n )\n multi.srem(\n '{0}.{1}.deleted'.format(collection_id, parent_id),\n _id\n )\n multi.execute()\n\n return record\n\n @wrap_redis_error\n def get(self, collection_id, parent_id, object_id,\n id_field=DEFAULT_ID_FIELD,\n modified_field=DEFAULT_MODIFIED_FIELD,\n auth=None):\n record_key = '{0}.{1}.{2}.records'.format(collection_id,\n parent_id,\n object_id)\n encoded_item = self._client.get(record_key)\n if encoded_item is None:\n raise exceptions.RecordNotFoundError(object_id)\n\n return self._decode(encoded_item)\n\n @wrap_redis_error\n def update(self, collection_id, parent_id, object_id, record,\n unique_fields=None, id_field=DEFAULT_ID_FIELD,\n modified_field=DEFAULT_MODIFIED_FIELD,\n auth=None):\n record = record.copy()\n record[id_field] = object_id\n self.check_unicity(collection_id, parent_id, record,\n unique_fields=unique_fields, id_field=id_field)\n\n self.set_record_timestamp(collection_id, parent_id, record,\n modified_field=modified_field)\n\n record_key = '{0}.{1}.{2}.records'.format(collection_id,\n parent_id,\n object_id)\n with self._client.pipeline() as multi:\n multi.set(\n record_key,\n self._encode(record)\n )\n multi.sadd(\n '{0}.{1}.records'.format(collection_id, parent_id),\n object_id\n )\n multi.execute()\n\n return record\n\n @wrap_redis_error\n def delete(self, collection_id, parent_id, object_id,\n id_field=DEFAULT_ID_FIELD, with_deleted=True,\n modified_field=DEFAULT_MODIFIED_FIELD,\n deleted_field=DEFAULT_DELETED_FIELD,\n auth=None, last_modified=None):\n record_key = '{0}.{1}.{2}.records'.format(collection_id,\n parent_id,\n object_id)\n with self._client.pipeline() as multi:\n multi.get(record_key)\n multi.delete(record_key)\n multi.srem(\n '{0}.{1}.records'.format(collection_id, parent_id),\n object_id\n )\n responses = multi.execute()\n\n encoded_item = responses[0]\n if encoded_item is None:\n raise exceptions.RecordNotFoundError(object_id)\n\n existing = self._decode(encoded_item)\n\n # Need to delete the last_modified field.\n del existing[modified_field]\n\n self.set_record_timestamp(collection_id, parent_id, existing,\n modified_field=modified_field,\n last_modified=last_modified)\n existing = self.strip_deleted_record(collection_id, parent_id,\n existing)\n\n if with_deleted:\n deleted_record_key = '{0}.{1}.{2}.deleted'.format(collection_id,\n parent_id,\n object_id)\n with self._client.pipeline() as multi:\n multi.set(\n deleted_record_key,\n self._encode(existing)\n )\n multi.sadd(\n '{0}.{1}.deleted'.format(collection_id, parent_id),\n object_id\n )\n multi.execute()\n\n return existing\n\n @wrap_redis_error\n def purge_deleted(self, collection_id, parent_id, before=None,\n id_field=DEFAULT_ID_FIELD,\n modified_field=DEFAULT_MODIFIED_FIELD,\n auth=None):\n deleted_ids = '{0}.{1}.deleted'.format(collection_id, parent_id)\n ids = self._client.smembers(deleted_ids)\n\n keys = ['{0}.{1}.{2}.deleted'.format(collection_id, parent_id,\n _id.decode('utf-8'))\n for _id in ids]\n\n if len(keys) == 0:\n deleted = []\n else:\n encoded_results = self._client.mget(keys)\n deleted = [self._decode(r) for r in encoded_results if r]\n if before is not None:\n to_remove = [d['id'] for d in deleted\n if d[modified_field] < before]\n else:\n to_remove = [d['id'] for d in deleted]\n\n if len(to_remove) > 0:\n with self._client.pipeline() as pipe:\n pipe.delete(*['{0}.{1}.{2}.deleted'.format(\n collection_id, parent_id, _id) for _id in to_remove])\n pipe.srem(deleted_ids, *to_remove)\n pipe.execute()\n number_deleted = len(to_remove)\n return number_deleted\n\n @wrap_redis_error\n def get_all(self, collection_id, parent_id, filters=None, sorting=None,\n pagination_rules=None, limit=None, include_deleted=False,\n id_field=DEFAULT_ID_FIELD,\n modified_field=DEFAULT_MODIFIED_FIELD,\n deleted_field=DEFAULT_DELETED_FIELD,\n auth=None):\n records_ids_key = '{0}.{1}.records'.format(collection_id, parent_id)\n ids = self._client.smembers(records_ids_key)\n\n keys = ('{0}.{1}.{2}.records'.format(collection_id, parent_id,\n _id.decode('utf-8'))\n for _id in ids)\n\n if len(ids) == 0:\n records = []\n else:\n encoded_results = self._client.mget(keys)\n records = [self._decode(r) for r in encoded_results if r]\n\n deleted = []\n if include_deleted:\n deleted_ids = '{0}.{1}.deleted'.format(collection_id, parent_id)\n ids = self._client.smembers(deleted_ids)\n\n keys = ['{0}.{1}.{2}.deleted'.format(collection_id, parent_id,\n _id.decode('utf-8'))\n for _id in ids]\n\n if len(keys) == 0:\n deleted = []\n else:\n encoded_results = self._client.mget(keys)\n deleted = [self._decode(r) for r in encoded_results if r]\n\n records, count = self.extract_record_set(collection_id,\n records + deleted,\n filters, sorting,\n id_field, deleted_field,\n pagination_rules, limit)\n\n return records, count\n\n\ndef load_from_config(config):\n client = create_from_config(config, prefix='storage_')\n return Storage(client)\n", "path": "kinto/core/storage/redis.py"}], "after_files": [{"content": "from __future__ import absolute_import, unicode_literals\nfrom functools import wraps\n\nimport redis\nfrom six.moves.urllib import parse as urlparse\n\nfrom kinto.core import utils, logger\nfrom kinto.core.storage import (\n exceptions, DEFAULT_ID_FIELD,\n DEFAULT_MODIFIED_FIELD, DEFAULT_DELETED_FIELD)\nfrom kinto.core.storage.memory import MemoryBasedStorage\n\n\ndef wrap_redis_error(func):\n @wraps(func)\n def wrapped(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except redis.RedisError as e:\n logger.exception(e)\n raise exceptions.BackendError(original=e)\n return wrapped\n\n\ndef create_from_config(config, prefix=''):\n \"\"\"Redis client instantiation from settings.\n \"\"\"\n settings = config.get_settings()\n uri = settings[prefix + 'url']\n uri = urlparse.urlparse(uri)\n pool_size = int(settings[prefix + 'pool_size'])\n kwargs = {\n \"max_connections\": pool_size,\n \"host\": uri.hostname or 'localhost',\n \"port\": uri.port or 6379,\n \"password\": uri.password or None,\n \"db\": int(uri.path[1:]) if uri.path else 0\n }\n block_timeout = settings.get(prefix + 'pool_timeout')\n if block_timeout is not None:\n kwargs[\"timeout\"] = float(block_timeout)\n\n connection_pool = redis.BlockingConnectionPool(**kwargs)\n return redis.StrictRedis(connection_pool=connection_pool)\n\n\nclass Storage(MemoryBasedStorage):\n \"\"\"Storage backend implementation using Redis.\n\n .. warning::\n\n Useful for very low server load, but won't scale since records sorting\n and filtering are performed in memory.\n\n Enable in configuration::\n\n kinto.storage_backend = kinto.core.storage.redis\n\n *(Optional)* Instance location URI can be customized::\n\n kinto.storage_url = redis://localhost:6379/0\n\n A threaded connection pool is enabled by default::\n\n kinto.storage_pool_size = 50\n \"\"\"\n\n def __init__(self, client, *args, **kwargs):\n super(Storage, self).__init__(*args, **kwargs)\n self._client = client\n\n @property\n def settings(self):\n return dict(self._client.connection_pool.connection_kwargs)\n\n def _encode(self, record):\n return utils.json.dumps(record)\n\n def _decode(self, record):\n return utils.json.loads(record.decode('utf-8'))\n\n @wrap_redis_error\n def flush(self, auth=None):\n self._client.flushdb()\n\n @wrap_redis_error\n def collection_timestamp(self, collection_id, parent_id, auth=None):\n timestamp = self._client.get(\n '{0}.{1}.timestamp'.format(collection_id, parent_id))\n if timestamp:\n return int(timestamp)\n return self._bump_timestamp(collection_id, parent_id)\n\n @wrap_redis_error\n def _bump_timestamp(self, collection_id, parent_id, record=None,\n modified_field=None, last_modified=None):\n\n key = '{0}.{1}.timestamp'.format(collection_id, parent_id)\n while 1:\n with self._client.pipeline() as pipe:\n try:\n pipe.watch(key)\n previous = pipe.get(key)\n pipe.multi()\n # XXX factorize code from memory and redis backends.\n is_specified = (record is not None and\n modified_field in record or\n last_modified is not None)\n if is_specified:\n # If there is a timestamp in the new record,\n # try to use it.\n if last_modified is not None:\n current = last_modified\n else:\n current = record[modified_field]\n else:\n current = utils.msec_time()\n\n if previous and int(previous) >= current:\n collection_timestamp = int(previous) + 1\n else:\n collection_timestamp = current\n\n # Return the newly generated timestamp as the current one\n # only if nothing else was specified.\n if not is_specified:\n current = collection_timestamp\n\n pipe.set(key, collection_timestamp)\n pipe.execute()\n return current\n except redis.WatchError: # pragma: no cover\n # Our timestamp has been modified by someone else, let's\n # retry.\n # XXX: untested.\n continue\n\n @wrap_redis_error\n def create(self, collection_id, parent_id, record, id_generator=None,\n unique_fields=None, id_field=DEFAULT_ID_FIELD,\n modified_field=DEFAULT_MODIFIED_FIELD,\n auth=None):\n self.check_unicity(collection_id, parent_id, record,\n unique_fields=unique_fields, id_field=id_field,\n for_creation=True)\n\n record = record.copy()\n id_generator = id_generator or self.id_generator\n _id = record.setdefault(id_field, id_generator())\n self.set_record_timestamp(collection_id, parent_id, record,\n modified_field=modified_field)\n\n record_key = '{0}.{1}.{2}.records'.format(collection_id,\n parent_id,\n _id)\n with self._client.pipeline() as multi:\n multi.set(\n record_key,\n self._encode(record)\n )\n multi.sadd(\n '{0}.{1}.records'.format(collection_id, parent_id),\n _id\n )\n multi.srem(\n '{0}.{1}.deleted'.format(collection_id, parent_id),\n _id\n )\n multi.execute()\n\n return record\n\n @wrap_redis_error\n def get(self, collection_id, parent_id, object_id,\n id_field=DEFAULT_ID_FIELD,\n modified_field=DEFAULT_MODIFIED_FIELD,\n auth=None):\n record_key = '{0}.{1}.{2}.records'.format(collection_id,\n parent_id,\n object_id)\n encoded_item = self._client.get(record_key)\n if encoded_item is None:\n raise exceptions.RecordNotFoundError(object_id)\n\n return self._decode(encoded_item)\n\n @wrap_redis_error\n def update(self, collection_id, parent_id, object_id, record,\n unique_fields=None, id_field=DEFAULT_ID_FIELD,\n modified_field=DEFAULT_MODIFIED_FIELD,\n auth=None):\n record = record.copy()\n record[id_field] = object_id\n self.check_unicity(collection_id, parent_id, record,\n unique_fields=unique_fields, id_field=id_field)\n\n self.set_record_timestamp(collection_id, parent_id, record,\n modified_field=modified_field)\n\n record_key = '{0}.{1}.{2}.records'.format(collection_id,\n parent_id,\n object_id)\n with self._client.pipeline() as multi:\n multi.set(\n record_key,\n self._encode(record)\n )\n multi.sadd(\n '{0}.{1}.records'.format(collection_id, parent_id),\n object_id\n )\n multi.execute()\n\n return record\n\n @wrap_redis_error\n def delete(self, collection_id, parent_id, object_id,\n id_field=DEFAULT_ID_FIELD, with_deleted=True,\n modified_field=DEFAULT_MODIFIED_FIELD,\n deleted_field=DEFAULT_DELETED_FIELD,\n auth=None, last_modified=None):\n record_key = '{0}.{1}.{2}.records'.format(collection_id,\n parent_id,\n object_id)\n with self._client.pipeline() as multi:\n multi.get(record_key)\n multi.delete(record_key)\n multi.srem(\n '{0}.{1}.records'.format(collection_id, parent_id),\n object_id\n )\n responses = multi.execute()\n\n encoded_item = responses[0]\n if encoded_item is None:\n raise exceptions.RecordNotFoundError(object_id)\n\n existing = self._decode(encoded_item)\n\n # Need to delete the last_modified field.\n del existing[modified_field]\n\n self.set_record_timestamp(collection_id, parent_id, existing,\n modified_field=modified_field,\n last_modified=last_modified)\n existing = self.strip_deleted_record(collection_id, parent_id,\n existing)\n\n if with_deleted:\n deleted_record_key = '{0}.{1}.{2}.deleted'.format(collection_id,\n parent_id,\n object_id)\n with self._client.pipeline() as multi:\n multi.set(\n deleted_record_key,\n self._encode(existing)\n )\n multi.sadd(\n '{0}.{1}.deleted'.format(collection_id, parent_id),\n object_id\n )\n multi.execute()\n\n return existing\n\n @wrap_redis_error\n def purge_deleted(self, collection_id, parent_id, before=None,\n id_field=DEFAULT_ID_FIELD,\n modified_field=DEFAULT_MODIFIED_FIELD,\n auth=None):\n deleted_ids = '{0}.{1}.deleted'.format(collection_id, parent_id)\n ids = self._client.smembers(deleted_ids)\n\n keys = ['{0}.{1}.{2}.deleted'.format(collection_id, parent_id,\n _id.decode('utf-8'))\n for _id in ids]\n\n if len(keys) == 0:\n deleted = []\n else:\n encoded_results = self._client.mget(keys)\n deleted = [self._decode(r) for r in encoded_results if r]\n if before is not None:\n to_remove = [d['id'] for d in deleted\n if d[modified_field] < before]\n else:\n to_remove = [d['id'] for d in deleted]\n\n if len(to_remove) > 0:\n with self._client.pipeline() as pipe:\n pipe.delete(*['{0}.{1}.{2}.deleted'.format(\n collection_id, parent_id, _id) for _id in to_remove])\n pipe.srem(deleted_ids, *to_remove)\n pipe.execute()\n number_deleted = len(to_remove)\n return number_deleted\n\n @wrap_redis_error\n def get_all(self, collection_id, parent_id, filters=None, sorting=None,\n pagination_rules=None, limit=None, include_deleted=False,\n id_field=DEFAULT_ID_FIELD,\n modified_field=DEFAULT_MODIFIED_FIELD,\n deleted_field=DEFAULT_DELETED_FIELD,\n auth=None):\n records_ids_key = '{0}.{1}.records'.format(collection_id, parent_id)\n ids = self._client.smembers(records_ids_key)\n\n keys = ('{0}.{1}.{2}.records'.format(collection_id, parent_id,\n _id.decode('utf-8'))\n for _id in ids)\n\n if len(ids) == 0:\n records = []\n else:\n encoded_results = self._client.mget(keys)\n records = [self._decode(r) for r in encoded_results if r]\n\n deleted = []\n if include_deleted:\n deleted_ids = '{0}.{1}.deleted'.format(collection_id, parent_id)\n ids = self._client.smembers(deleted_ids)\n\n keys = ['{0}.{1}.{2}.deleted'.format(collection_id, parent_id,\n _id.decode('utf-8'))\n for _id in ids]\n\n if len(keys) == 0:\n deleted = []\n else:\n encoded_results = self._client.mget(keys)\n deleted = [self._decode(r) for r in encoded_results if r]\n\n records, count = self.extract_record_set(collection_id,\n records + deleted,\n filters, sorting,\n id_field, deleted_field,\n pagination_rules, limit)\n\n return records, count\n\n\ndef load_from_config(config):\n client = create_from_config(config, prefix='storage_')\n return Storage(client)\n", "path": "kinto/core/storage/redis.py"}]}
3,624
138
gh_patches_debug_53402
rasdani/github-patches
git_diff
dask__distributed-7785
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add Prometheus counter for `SystemMonitor.last_time` to improve GIL contention metric Currently, the loose coupling between the system monitor's update interval and the Prometheus scraping interval can cause artifacts like a relative GIL contention > 1 (https://github.com/dask/distributed/pull/7651#issuecomment-1490571845). By exposing the system monitor's update timestamp as a Counter, we would have a synchronized timestamp available in Prometheus to serve as the basis for rate calculations. This should make such artifacts impossible. cc @ntabris, @gjoseph92, @milesgranger: Thoughts? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `distributed/http/scheduler/prometheus/core.py` Content: ``` 1 from __future__ import annotations 2 3 from collections.abc import Iterator 4 from time import time 5 6 import prometheus_client 7 import toolz 8 from prometheus_client.core import CounterMetricFamily, GaugeMetricFamily 9 10 from distributed.http.prometheus import PrometheusCollector 11 from distributed.http.scheduler.prometheus.semaphore import SemaphoreMetricCollector 12 from distributed.http.scheduler.prometheus.stealing import WorkStealingMetricCollector 13 from distributed.http.utils import RequestHandler 14 from distributed.scheduler import ALL_TASK_STATES, Scheduler 15 16 17 class SchedulerMetricCollector(PrometheusCollector): 18 server: Scheduler 19 20 def __init__(self, server: Scheduler): 21 super().__init__(server) 22 self.subsystem = "scheduler" 23 24 def collect(self) -> Iterator[GaugeMetricFamily | CounterMetricFamily]: 25 yield GaugeMetricFamily( 26 self.build_name("clients"), 27 "Number of clients connected", 28 value=len([k for k in self.server.clients if k != "fire-and-forget"]), 29 ) 30 31 yield GaugeMetricFamily( 32 self.build_name("desired_workers"), 33 "Number of workers scheduler needs for task graph", 34 value=self.server.adaptive_target(), 35 ) 36 37 worker_states = GaugeMetricFamily( 38 self.build_name("workers"), 39 "Number of workers known by scheduler", 40 labels=["state"], 41 ) 42 worker_states.add_metric(["idle"], len(self.server.idle)) 43 worker_states.add_metric( 44 ["partially_saturated"], 45 len(self.server.running) 46 - len(self.server.idle) 47 - len(self.server.saturated), 48 ) 49 worker_states.add_metric(["saturated"], len(self.server.saturated)) 50 worker_states.add_metric( 51 ["paused_or_retiring"], len(self.server.workers) - len(self.server.running) 52 ) 53 yield worker_states 54 55 if self.server.monitor.monitor_gil_contention: 56 yield CounterMetricFamily( 57 self.build_name("gil_contention"), 58 "GIL contention metric", 59 value=self.server.monitor._cumulative_gil_contention, 60 ) 61 62 tasks = GaugeMetricFamily( 63 self.build_name("tasks"), 64 "Number of tasks known by scheduler", 65 labels=["state"], 66 ) 67 68 task_counter = toolz.merge_with( 69 sum, (tp.states for tp in self.server.task_prefixes.values()) 70 ) 71 72 suspicious_tasks = CounterMetricFamily( 73 self.build_name("tasks_suspicious"), 74 "Total number of times a task has been marked suspicious", 75 labels=["task_prefix_name"], 76 ) 77 78 for tp in self.server.task_prefixes.values(): 79 suspicious_tasks.add_metric([tp.name], tp.suspicious) 80 yield suspicious_tasks 81 82 yield CounterMetricFamily( 83 self.build_name("tasks_forgotten"), 84 ( 85 "Total number of processed tasks no longer in memory and already " 86 "removed from the scheduler job queue\n" 87 "Note: Task groups on the scheduler which have all tasks " 88 "in the forgotten state are not included." 89 ), 90 value=task_counter.get("forgotten", 0.0), 91 ) 92 93 for state in ALL_TASK_STATES: 94 if state != "forgotten": 95 tasks.add_metric([state], task_counter.get(state, 0.0)) 96 yield tasks 97 98 time_spent_compute_tasks = CounterMetricFamily( 99 self.build_name("tasks_compute"), 100 "Total amount of compute time spent in each prefix", 101 labels=["task_prefix_name"], 102 unit="seconds", 103 ) 104 105 for tp in self.server.task_prefixes.values(): 106 time_spent_compute_tasks.add_metric([tp.name], tp.all_durations["compute"]) 107 yield time_spent_compute_tasks 108 109 time_spent_transfer_tasks = CounterMetricFamily( 110 self.build_name("tasks_transfer"), 111 "Total amount of transfer time spent in each prefix", 112 labels=["task_prefix_name"], 113 unit="seconds", 114 ) 115 116 for tp in self.server.task_prefixes.values(): 117 time_spent_transfer_tasks.add_metric( 118 [tp.name], tp.all_durations["transfer"] 119 ) 120 yield time_spent_transfer_tasks 121 122 nbytes_tasks = GaugeMetricFamily( 123 self.build_name("tasks_output"), 124 "Current number of bytes in memory (without duplicates) for each prefix", 125 labels=["task_prefix_name"], 126 unit="bytes", 127 ) 128 for tp in self.server.task_prefixes.values(): 129 nbytes_tasks.add_metric([tp.name], tp.nbytes_total) 130 yield nbytes_tasks 131 132 prefix_state_counts = CounterMetricFamily( 133 self.build_name("prefix_state_totals"), 134 "Accumulated count of task prefix in each state", 135 labels=["task_prefix_name", "state"], 136 ) 137 138 for tp in self.server.task_prefixes.values(): 139 for state, count in tp.state_counts.items(): 140 prefix_state_counts.add_metric([tp.name, state], count) 141 yield prefix_state_counts 142 143 now = time() 144 max_tick_duration = max( 145 self.server.digests_max["tick_duration"], 146 now - self.server._last_tick, 147 ) 148 yield GaugeMetricFamily( 149 self.build_name("tick_duration_maximum_seconds"), 150 "Maximum tick duration observed since Prometheus last scraped metrics", 151 value=max_tick_duration, 152 ) 153 154 yield CounterMetricFamily( 155 self.build_name("tick_count_total"), 156 "Total number of ticks observed since the server started", 157 value=self.server._tick_counter, 158 ) 159 160 self.server.digests_max.clear() 161 162 163 COLLECTORS = [ 164 SchedulerMetricCollector, 165 SemaphoreMetricCollector, 166 WorkStealingMetricCollector, 167 ] 168 169 170 class PrometheusHandler(RequestHandler): 171 _collectors = None 172 173 def __init__(self, *args, dask_server=None, **kwargs): 174 super().__init__(*args, dask_server=dask_server, **kwargs) 175 176 if PrometheusHandler._collectors: 177 # Especially during testing, multiple schedulers are started 178 # sequentially in the same python process 179 for _collector in PrometheusHandler._collectors: 180 _collector.server = self.server 181 return 182 183 PrometheusHandler._collectors = tuple( 184 collector(self.server) for collector in COLLECTORS 185 ) 186 # Register collectors 187 for instantiated_collector in PrometheusHandler._collectors: 188 prometheus_client.REGISTRY.register(instantiated_collector) 189 190 def get(self): 191 self.write(prometheus_client.generate_latest()) 192 self.set_header("Content-Type", "text/plain; version=0.0.4") 193 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/distributed/http/scheduler/prometheus/core.py b/distributed/http/scheduler/prometheus/core.py --- a/distributed/http/scheduler/prometheus/core.py +++ b/distributed/http/scheduler/prometheus/core.py @@ -59,6 +59,12 @@ value=self.server.monitor._cumulative_gil_contention, ) + yield CounterMetricFamily( + self.build_name("last_time"), + "SystemMonitor last time", + value=self.server.monitor.last_time, + ) + tasks = GaugeMetricFamily( self.build_name("tasks"), "Number of tasks known by scheduler",
{"golden_diff": "diff --git a/distributed/http/scheduler/prometheus/core.py b/distributed/http/scheduler/prometheus/core.py\n--- a/distributed/http/scheduler/prometheus/core.py\n+++ b/distributed/http/scheduler/prometheus/core.py\n@@ -59,6 +59,12 @@\n value=self.server.monitor._cumulative_gil_contention,\n )\n \n+ yield CounterMetricFamily(\n+ self.build_name(\"last_time\"),\n+ \"SystemMonitor last time\",\n+ value=self.server.monitor.last_time,\n+ )\n+\n tasks = GaugeMetricFamily(\n self.build_name(\"tasks\"),\n \"Number of tasks known by scheduler\",\n", "issue": "Add Prometheus counter for `SystemMonitor.last_time` to improve GIL contention metric\nCurrently, the loose coupling between the system monitor's update interval and the Prometheus scraping interval can cause artifacts like a relative GIL contention > 1 (https://github.com/dask/distributed/pull/7651#issuecomment-1490571845). By exposing the system monitor's update timestamp as a Counter, we would have a synchronized timestamp available in Prometheus to serve as the basis for rate calculations. This should make such artifacts impossible.\r\n\r\ncc @ntabris, @gjoseph92, @milesgranger: Thoughts?\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom collections.abc import Iterator\nfrom time import time\n\nimport prometheus_client\nimport toolz\nfrom prometheus_client.core import CounterMetricFamily, GaugeMetricFamily\n\nfrom distributed.http.prometheus import PrometheusCollector\nfrom distributed.http.scheduler.prometheus.semaphore import SemaphoreMetricCollector\nfrom distributed.http.scheduler.prometheus.stealing import WorkStealingMetricCollector\nfrom distributed.http.utils import RequestHandler\nfrom distributed.scheduler import ALL_TASK_STATES, Scheduler\n\n\nclass SchedulerMetricCollector(PrometheusCollector):\n server: Scheduler\n\n def __init__(self, server: Scheduler):\n super().__init__(server)\n self.subsystem = \"scheduler\"\n\n def collect(self) -> Iterator[GaugeMetricFamily | CounterMetricFamily]:\n yield GaugeMetricFamily(\n self.build_name(\"clients\"),\n \"Number of clients connected\",\n value=len([k for k in self.server.clients if k != \"fire-and-forget\"]),\n )\n\n yield GaugeMetricFamily(\n self.build_name(\"desired_workers\"),\n \"Number of workers scheduler needs for task graph\",\n value=self.server.adaptive_target(),\n )\n\n worker_states = GaugeMetricFamily(\n self.build_name(\"workers\"),\n \"Number of workers known by scheduler\",\n labels=[\"state\"],\n )\n worker_states.add_metric([\"idle\"], len(self.server.idle))\n worker_states.add_metric(\n [\"partially_saturated\"],\n len(self.server.running)\n - len(self.server.idle)\n - len(self.server.saturated),\n )\n worker_states.add_metric([\"saturated\"], len(self.server.saturated))\n worker_states.add_metric(\n [\"paused_or_retiring\"], len(self.server.workers) - len(self.server.running)\n )\n yield worker_states\n\n if self.server.monitor.monitor_gil_contention:\n yield CounterMetricFamily(\n self.build_name(\"gil_contention\"),\n \"GIL contention metric\",\n value=self.server.monitor._cumulative_gil_contention,\n )\n\n tasks = GaugeMetricFamily(\n self.build_name(\"tasks\"),\n \"Number of tasks known by scheduler\",\n labels=[\"state\"],\n )\n\n task_counter = toolz.merge_with(\n sum, (tp.states for tp in self.server.task_prefixes.values())\n )\n\n suspicious_tasks = CounterMetricFamily(\n self.build_name(\"tasks_suspicious\"),\n \"Total number of times a task has been marked suspicious\",\n labels=[\"task_prefix_name\"],\n )\n\n for tp in self.server.task_prefixes.values():\n suspicious_tasks.add_metric([tp.name], tp.suspicious)\n yield suspicious_tasks\n\n yield CounterMetricFamily(\n self.build_name(\"tasks_forgotten\"),\n (\n \"Total number of processed tasks no longer in memory and already \"\n \"removed from the scheduler job queue\\n\"\n \"Note: Task groups on the scheduler which have all tasks \"\n \"in the forgotten state are not included.\"\n ),\n value=task_counter.get(\"forgotten\", 0.0),\n )\n\n for state in ALL_TASK_STATES:\n if state != \"forgotten\":\n tasks.add_metric([state], task_counter.get(state, 0.0))\n yield tasks\n\n time_spent_compute_tasks = CounterMetricFamily(\n self.build_name(\"tasks_compute\"),\n \"Total amount of compute time spent in each prefix\",\n labels=[\"task_prefix_name\"],\n unit=\"seconds\",\n )\n\n for tp in self.server.task_prefixes.values():\n time_spent_compute_tasks.add_metric([tp.name], tp.all_durations[\"compute\"])\n yield time_spent_compute_tasks\n\n time_spent_transfer_tasks = CounterMetricFamily(\n self.build_name(\"tasks_transfer\"),\n \"Total amount of transfer time spent in each prefix\",\n labels=[\"task_prefix_name\"],\n unit=\"seconds\",\n )\n\n for tp in self.server.task_prefixes.values():\n time_spent_transfer_tasks.add_metric(\n [tp.name], tp.all_durations[\"transfer\"]\n )\n yield time_spent_transfer_tasks\n\n nbytes_tasks = GaugeMetricFamily(\n self.build_name(\"tasks_output\"),\n \"Current number of bytes in memory (without duplicates) for each prefix\",\n labels=[\"task_prefix_name\"],\n unit=\"bytes\",\n )\n for tp in self.server.task_prefixes.values():\n nbytes_tasks.add_metric([tp.name], tp.nbytes_total)\n yield nbytes_tasks\n\n prefix_state_counts = CounterMetricFamily(\n self.build_name(\"prefix_state_totals\"),\n \"Accumulated count of task prefix in each state\",\n labels=[\"task_prefix_name\", \"state\"],\n )\n\n for tp in self.server.task_prefixes.values():\n for state, count in tp.state_counts.items():\n prefix_state_counts.add_metric([tp.name, state], count)\n yield prefix_state_counts\n\n now = time()\n max_tick_duration = max(\n self.server.digests_max[\"tick_duration\"],\n now - self.server._last_tick,\n )\n yield GaugeMetricFamily(\n self.build_name(\"tick_duration_maximum_seconds\"),\n \"Maximum tick duration observed since Prometheus last scraped metrics\",\n value=max_tick_duration,\n )\n\n yield CounterMetricFamily(\n self.build_name(\"tick_count_total\"),\n \"Total number of ticks observed since the server started\",\n value=self.server._tick_counter,\n )\n\n self.server.digests_max.clear()\n\n\nCOLLECTORS = [\n SchedulerMetricCollector,\n SemaphoreMetricCollector,\n WorkStealingMetricCollector,\n]\n\n\nclass PrometheusHandler(RequestHandler):\n _collectors = None\n\n def __init__(self, *args, dask_server=None, **kwargs):\n super().__init__(*args, dask_server=dask_server, **kwargs)\n\n if PrometheusHandler._collectors:\n # Especially during testing, multiple schedulers are started\n # sequentially in the same python process\n for _collector in PrometheusHandler._collectors:\n _collector.server = self.server\n return\n\n PrometheusHandler._collectors = tuple(\n collector(self.server) for collector in COLLECTORS\n )\n # Register collectors\n for instantiated_collector in PrometheusHandler._collectors:\n prometheus_client.REGISTRY.register(instantiated_collector)\n\n def get(self):\n self.write(prometheus_client.generate_latest())\n self.set_header(\"Content-Type\", \"text/plain; version=0.0.4\")\n", "path": "distributed/http/scheduler/prometheus/core.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom collections.abc import Iterator\nfrom time import time\n\nimport prometheus_client\nimport toolz\nfrom prometheus_client.core import CounterMetricFamily, GaugeMetricFamily\n\nfrom distributed.http.prometheus import PrometheusCollector\nfrom distributed.http.scheduler.prometheus.semaphore import SemaphoreMetricCollector\nfrom distributed.http.scheduler.prometheus.stealing import WorkStealingMetricCollector\nfrom distributed.http.utils import RequestHandler\nfrom distributed.scheduler import ALL_TASK_STATES, Scheduler\n\n\nclass SchedulerMetricCollector(PrometheusCollector):\n server: Scheduler\n\n def __init__(self, server: Scheduler):\n super().__init__(server)\n self.subsystem = \"scheduler\"\n\n def collect(self) -> Iterator[GaugeMetricFamily | CounterMetricFamily]:\n yield GaugeMetricFamily(\n self.build_name(\"clients\"),\n \"Number of clients connected\",\n value=len([k for k in self.server.clients if k != \"fire-and-forget\"]),\n )\n\n yield GaugeMetricFamily(\n self.build_name(\"desired_workers\"),\n \"Number of workers scheduler needs for task graph\",\n value=self.server.adaptive_target(),\n )\n\n worker_states = GaugeMetricFamily(\n self.build_name(\"workers\"),\n \"Number of workers known by scheduler\",\n labels=[\"state\"],\n )\n worker_states.add_metric([\"idle\"], len(self.server.idle))\n worker_states.add_metric(\n [\"partially_saturated\"],\n len(self.server.running)\n - len(self.server.idle)\n - len(self.server.saturated),\n )\n worker_states.add_metric([\"saturated\"], len(self.server.saturated))\n worker_states.add_metric(\n [\"paused_or_retiring\"], len(self.server.workers) - len(self.server.running)\n )\n yield worker_states\n\n if self.server.monitor.monitor_gil_contention:\n yield CounterMetricFamily(\n self.build_name(\"gil_contention\"),\n \"GIL contention metric\",\n value=self.server.monitor._cumulative_gil_contention,\n )\n\n yield CounterMetricFamily(\n self.build_name(\"last_time\"),\n \"SystemMonitor last time\",\n value=self.server.monitor.last_time,\n )\n\n tasks = GaugeMetricFamily(\n self.build_name(\"tasks\"),\n \"Number of tasks known by scheduler\",\n labels=[\"state\"],\n )\n\n task_counter = toolz.merge_with(\n sum, (tp.states for tp in self.server.task_prefixes.values())\n )\n\n suspicious_tasks = CounterMetricFamily(\n self.build_name(\"tasks_suspicious\"),\n \"Total number of times a task has been marked suspicious\",\n labels=[\"task_prefix_name\"],\n )\n\n for tp in self.server.task_prefixes.values():\n suspicious_tasks.add_metric([tp.name], tp.suspicious)\n yield suspicious_tasks\n\n yield CounterMetricFamily(\n self.build_name(\"tasks_forgotten\"),\n (\n \"Total number of processed tasks no longer in memory and already \"\n \"removed from the scheduler job queue\\n\"\n \"Note: Task groups on the scheduler which have all tasks \"\n \"in the forgotten state are not included.\"\n ),\n value=task_counter.get(\"forgotten\", 0.0),\n )\n\n for state in ALL_TASK_STATES:\n if state != \"forgotten\":\n tasks.add_metric([state], task_counter.get(state, 0.0))\n yield tasks\n\n time_spent_compute_tasks = CounterMetricFamily(\n self.build_name(\"tasks_compute\"),\n \"Total amount of compute time spent in each prefix\",\n labels=[\"task_prefix_name\"],\n unit=\"seconds\",\n )\n\n for tp in self.server.task_prefixes.values():\n time_spent_compute_tasks.add_metric([tp.name], tp.all_durations[\"compute\"])\n yield time_spent_compute_tasks\n\n time_spent_transfer_tasks = CounterMetricFamily(\n self.build_name(\"tasks_transfer\"),\n \"Total amount of transfer time spent in each prefix\",\n labels=[\"task_prefix_name\"],\n unit=\"seconds\",\n )\n\n for tp in self.server.task_prefixes.values():\n time_spent_transfer_tasks.add_metric(\n [tp.name], tp.all_durations[\"transfer\"]\n )\n yield time_spent_transfer_tasks\n\n nbytes_tasks = GaugeMetricFamily(\n self.build_name(\"tasks_output\"),\n \"Current number of bytes in memory (without duplicates) for each prefix\",\n labels=[\"task_prefix_name\"],\n unit=\"bytes\",\n )\n for tp in self.server.task_prefixes.values():\n nbytes_tasks.add_metric([tp.name], tp.nbytes_total)\n yield nbytes_tasks\n\n prefix_state_counts = CounterMetricFamily(\n self.build_name(\"prefix_state_totals\"),\n \"Accumulated count of task prefix in each state\",\n labels=[\"task_prefix_name\", \"state\"],\n )\n\n for tp in self.server.task_prefixes.values():\n for state, count in tp.state_counts.items():\n prefix_state_counts.add_metric([tp.name, state], count)\n yield prefix_state_counts\n\n now = time()\n max_tick_duration = max(\n self.server.digests_max[\"tick_duration\"],\n now - self.server._last_tick,\n )\n yield GaugeMetricFamily(\n self.build_name(\"tick_duration_maximum_seconds\"),\n \"Maximum tick duration observed since Prometheus last scraped metrics\",\n value=max_tick_duration,\n )\n\n yield CounterMetricFamily(\n self.build_name(\"tick_count_total\"),\n \"Total number of ticks observed since the server started\",\n value=self.server._tick_counter,\n )\n\n self.server.digests_max.clear()\n\n\nCOLLECTORS = [\n SchedulerMetricCollector,\n SemaphoreMetricCollector,\n WorkStealingMetricCollector,\n]\n\n\nclass PrometheusHandler(RequestHandler):\n _collectors = None\n\n def __init__(self, *args, dask_server=None, **kwargs):\n super().__init__(*args, dask_server=dask_server, **kwargs)\n\n if PrometheusHandler._collectors:\n # Especially during testing, multiple schedulers are started\n # sequentially in the same python process\n for _collector in PrometheusHandler._collectors:\n _collector.server = self.server\n return\n\n PrometheusHandler._collectors = tuple(\n collector(self.server) for collector in COLLECTORS\n )\n # Register collectors\n for instantiated_collector in PrometheusHandler._collectors:\n prometheus_client.REGISTRY.register(instantiated_collector)\n\n def get(self):\n self.write(prometheus_client.generate_latest())\n self.set_header(\"Content-Type\", \"text/plain; version=0.0.4\")\n", "path": "distributed/http/scheduler/prometheus/core.py"}]}
2,214
137
gh_patches_debug_27331
rasdani/github-patches
git_diff
netbox-community__netbox-5447
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Showing config context with multiple tags assigned fails with MultipleObjectsReturned ### Environment * Python version: 3.8.6 * NetBox version: 2.9.10 ### Steps to Reproduce 1. create a virtual machine 2. add two tags (which result in adding data to config context) 3. Open Config context of that VM <!-- What did you expect to happen? --> ### Expected Behavior See config context <!-- What happened instead? --> ### Observed Behavior See an error ``` <class 'virtualization.models.VirtualMachine.MultipleObjectsReturned'> get() returned more than one VirtualMachine -- it returned 2! ``` ``` netbox_1 | Internal Server Error: /virtualization/virtual-machines/70/config-context/ netbox_1 | Traceback (most recent call last): netbox_1 | File "/usr/local/lib/python3.8/site-packages/django/core/handlers/exception.py", line 47, in inner netbox_1 | response = get_response(request) netbox_1 | File "/usr/local/lib/python3.8/site-packages/django/core/handlers/base.py", line 179, in _get_response netbox_1 | response = wrapped_callback(request, *callback_args, **callback_kwargs) netbox_1 | File "/usr/local/lib/python3.8/site-packages/django/views/generic/base.py", line 73, in view netbox_1 | return self.dispatch(request, *args, **kwargs) netbox_1 | File "/opt/netbox/netbox/utilities/views.py", line 124, in dispatch netbox_1 | return super().dispatch(request, *args, **kwargs) netbox_1 | File "/usr/local/lib/python3.8/site-packages/django/views/generic/base.py", line 101, in dispatch netbox_1 | return handler(request, *args, **kwargs) netbox_1 | File "/opt/netbox/netbox/extras/views.py", line 146, in get netbox_1 | obj = get_object_or_404(self.queryset, pk=pk) netbox_1 | File "/usr/local/lib/python3.8/site-packages/django/shortcuts.py", line 76, in get_object_or_404 netbox_1 | return queryset.get(*args, **kwargs) netbox_1 | File "/usr/local/lib/python3.8/site-packages/cacheops/query.py", line 353, in get netbox_1 | return qs._no_monkey.get(qs, *args, **kwargs) netbox_1 | File "/usr/local/lib/python3.8/site-packages/django/db/models/query.py", line 433, in get netbox_1 | raise self.model.MultipleObjectsReturned( netbox_1 | virtualization.models.VirtualMachine.MultipleObjectsReturned: get() returned more than one VirtualMachine -- it returned 2! netbox_1 | 192.168.80.7 - - [29/Nov/2020:18:45:03 +0000] "GET /virtualization/virtual-machines/70/config-context/ HTTP/1.0" 500 1855 "-" "<cut>" ``` Note: I wrote this already in https://github.com/netbox-community/netbox/issues/5314#issuecomment-724722310 and [a change](https://github.com/netbox-community/netbox/commit/0d27abc6fc22a8d40183a59eceef5dda57e99eae) got introduced for 2.9 to fix it but in 2.10 it is still present. I got asked to create a new issue. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `netbox/extras/querysets.py` Content: ``` 1 from collections import OrderedDict 2 3 from django.db.models import OuterRef, Subquery, Q 4 5 from utilities.query_functions import EmptyGroupByJSONBAgg, OrderableJSONBAgg 6 from utilities.querysets import RestrictedQuerySet 7 8 9 class CustomFieldQueryset: 10 """ 11 Annotate custom fields on objects within a QuerySet. 12 """ 13 def __init__(self, queryset, custom_fields): 14 self.queryset = queryset 15 self.model = queryset.model 16 self.custom_fields = custom_fields 17 18 def __iter__(self): 19 for obj in self.queryset: 20 values_dict = {cfv.field_id: cfv.value for cfv in obj.custom_field_values.all()} 21 obj.custom_fields = OrderedDict([(field, values_dict.get(field.pk)) for field in self.custom_fields]) 22 yield obj 23 24 25 class ConfigContextQuerySet(RestrictedQuerySet): 26 27 def get_for_object(self, obj, aggregate_data=False): 28 """ 29 Return all applicable ConfigContexts for a given object. Only active ConfigContexts will be included. 30 31 Args: 32 aggregate_data: If True, use the JSONBAgg aggregate function to return only the list of JSON data objects 33 """ 34 35 # `device_role` for Device; `role` for VirtualMachine 36 role = getattr(obj, 'device_role', None) or obj.role 37 38 # Virtualization cluster for VirtualMachine 39 cluster = getattr(obj, 'cluster', None) 40 cluster_group = getattr(cluster, 'group', None) 41 42 # Get the group of the assigned tenant, if any 43 tenant_group = obj.tenant.group if obj.tenant else None 44 45 # Match against the directly assigned region as well as any parent regions. 46 region = getattr(obj.site, 'region', None) 47 if region: 48 regions = region.get_ancestors(include_self=True) 49 else: 50 regions = [] 51 52 queryset = self.filter( 53 Q(regions__in=regions) | Q(regions=None), 54 Q(sites=obj.site) | Q(sites=None), 55 Q(roles=role) | Q(roles=None), 56 Q(platforms=obj.platform) | Q(platforms=None), 57 Q(cluster_groups=cluster_group) | Q(cluster_groups=None), 58 Q(clusters=cluster) | Q(clusters=None), 59 Q(tenant_groups=tenant_group) | Q(tenant_groups=None), 60 Q(tenants=obj.tenant) | Q(tenants=None), 61 Q(tags__slug__in=obj.tags.slugs()) | Q(tags=None), 62 is_active=True, 63 ).order_by('weight', 'name').distinct() 64 65 if aggregate_data: 66 return queryset.aggregate( 67 config_context_data=OrderableJSONBAgg('data', ordering=['weight', 'name']) 68 )['config_context_data'] 69 70 return queryset 71 72 73 class ConfigContextModelQuerySet(RestrictedQuerySet): 74 """ 75 QuerySet manager used by models which support ConfigContext (device and virtual machine). 76 77 Includes a method which appends an annotation of aggregated config context JSON data objects. This is 78 implemented as a subquery which performs all the joins necessary to filter relevant config context objects. 79 This offers a substantial performance gain over ConfigContextQuerySet.get_for_object() when dealing with 80 multiple objects. 81 82 This allows the annotation to be entirely optional. 83 """ 84 85 def annotate_config_context_data(self): 86 """ 87 Attach the subquery annotation to the base queryset 88 """ 89 from extras.models import ConfigContext 90 return self.annotate( 91 config_context_data=Subquery( 92 ConfigContext.objects.filter( 93 self._get_config_context_filters() 94 ).annotate( 95 _data=EmptyGroupByJSONBAgg('data', ordering=['weight', 'name']) 96 ).values("_data") 97 ) 98 ).distinct() 99 100 def _get_config_context_filters(self): 101 # Construct the set of Q objects for the specific object types 102 base_query = Q( 103 Q(platforms=OuterRef('platform')) | Q(platforms=None), 104 Q(tenant_groups=OuterRef('tenant__group')) | Q(tenant_groups=None), 105 Q(tenants=OuterRef('tenant')) | Q(tenants=None), 106 Q(tags=OuterRef('tags')) | Q(tags=None), 107 is_active=True, 108 ) 109 110 if self.model._meta.model_name == 'device': 111 base_query.add((Q(roles=OuterRef('device_role')) | Q(roles=None)), Q.AND) 112 base_query.add((Q(sites=OuterRef('site')) | Q(sites=None)), Q.AND) 113 region_field = 'site__region' 114 115 elif self.model._meta.model_name == 'virtualmachine': 116 base_query.add((Q(roles=OuterRef('role')) | Q(roles=None)), Q.AND) 117 base_query.add((Q(cluster_groups=OuterRef('cluster__group')) | Q(cluster_groups=None)), Q.AND) 118 base_query.add((Q(clusters=OuterRef('cluster')) | Q(clusters=None)), Q.AND) 119 base_query.add((Q(sites=OuterRef('cluster__site')) | Q(sites=None)), Q.AND) 120 region_field = 'cluster__site__region' 121 122 base_query.add( 123 (Q( 124 regions__tree_id=OuterRef(f'{region_field}__tree_id'), 125 regions__level__lte=OuterRef(f'{region_field}__level'), 126 regions__lft__lte=OuterRef(f'{region_field}__lft'), 127 regions__rght__gte=OuterRef(f'{region_field}__rght'), 128 ) | Q(regions=None)), 129 Q.AND 130 ) 131 132 return base_query 133 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/netbox/extras/querysets.py b/netbox/extras/querysets.py --- a/netbox/extras/querysets.py +++ b/netbox/extras/querysets.py @@ -2,6 +2,7 @@ from django.db.models import OuterRef, Subquery, Q +from extras.models.tags import TaggedItem from utilities.query_functions import EmptyGroupByJSONBAgg, OrderableJSONBAgg from utilities.querysets import RestrictedQuerySet @@ -99,11 +100,25 @@ def _get_config_context_filters(self): # Construct the set of Q objects for the specific object types + tag_query_filters = { + "object_id": OuterRef(OuterRef('pk')), + "content_type__app_label": self.model._meta.app_label, + "content_type__model": self.model._meta.model_name + } base_query = Q( Q(platforms=OuterRef('platform')) | Q(platforms=None), Q(tenant_groups=OuterRef('tenant__group')) | Q(tenant_groups=None), Q(tenants=OuterRef('tenant')) | Q(tenants=None), - Q(tags=OuterRef('tags')) | Q(tags=None), + Q( + tags__pk__in=Subquery( + TaggedItem.objects.filter( + **tag_query_filters + ).values_list( + 'tag_id', + flat=True + ) + ) + ) | Q(tags=None), is_active=True, )
{"golden_diff": "diff --git a/netbox/extras/querysets.py b/netbox/extras/querysets.py\n--- a/netbox/extras/querysets.py\n+++ b/netbox/extras/querysets.py\n@@ -2,6 +2,7 @@\n \n from django.db.models import OuterRef, Subquery, Q\n \n+from extras.models.tags import TaggedItem\n from utilities.query_functions import EmptyGroupByJSONBAgg, OrderableJSONBAgg\n from utilities.querysets import RestrictedQuerySet\n \n@@ -99,11 +100,25 @@\n \n def _get_config_context_filters(self):\n # Construct the set of Q objects for the specific object types\n+ tag_query_filters = {\n+ \"object_id\": OuterRef(OuterRef('pk')),\n+ \"content_type__app_label\": self.model._meta.app_label,\n+ \"content_type__model\": self.model._meta.model_name\n+ }\n base_query = Q(\n Q(platforms=OuterRef('platform')) | Q(platforms=None),\n Q(tenant_groups=OuterRef('tenant__group')) | Q(tenant_groups=None),\n Q(tenants=OuterRef('tenant')) | Q(tenants=None),\n- Q(tags=OuterRef('tags')) | Q(tags=None),\n+ Q(\n+ tags__pk__in=Subquery(\n+ TaggedItem.objects.filter(\n+ **tag_query_filters\n+ ).values_list(\n+ 'tag_id',\n+ flat=True\n+ )\n+ )\n+ ) | Q(tags=None),\n is_active=True,\n )\n", "issue": "Showing config context with multiple tags assigned fails with MultipleObjectsReturned\n### Environment\r\n* Python version: 3.8.6\r\n* NetBox version: 2.9.10\r\n\r\n### Steps to Reproduce\r\n1. create a virtual machine\r\n2. add two tags (which result in adding data to config context)\r\n3. Open Config context of that VM\r\n\r\n<!-- What did you expect to happen? -->\r\n### Expected Behavior\r\nSee config context\r\n\r\n<!-- What happened instead? -->\r\n### Observed Behavior\r\nSee an error\r\n```\r\n<class 'virtualization.models.VirtualMachine.MultipleObjectsReturned'>\r\n\r\nget() returned more than one VirtualMachine -- it returned 2!\r\n```\r\n```\r\nnetbox_1 | Internal Server Error: /virtualization/virtual-machines/70/config-context/\r\nnetbox_1 | Traceback (most recent call last):\r\nnetbox_1 | File \"/usr/local/lib/python3.8/site-packages/django/core/handlers/exception.py\", line 47, in inner\r\nnetbox_1 | response = get_response(request)\r\nnetbox_1 | File \"/usr/local/lib/python3.8/site-packages/django/core/handlers/base.py\", line 179, in _get_response\r\nnetbox_1 | response = wrapped_callback(request, *callback_args, **callback_kwargs)\r\nnetbox_1 | File \"/usr/local/lib/python3.8/site-packages/django/views/generic/base.py\", line 73, in view\r\nnetbox_1 | return self.dispatch(request, *args, **kwargs)\r\nnetbox_1 | File \"/opt/netbox/netbox/utilities/views.py\", line 124, in dispatch\r\nnetbox_1 | return super().dispatch(request, *args, **kwargs)\r\nnetbox_1 | File \"/usr/local/lib/python3.8/site-packages/django/views/generic/base.py\", line 101, in dispatch\r\nnetbox_1 | return handler(request, *args, **kwargs)\r\nnetbox_1 | File \"/opt/netbox/netbox/extras/views.py\", line 146, in get\r\nnetbox_1 | obj = get_object_or_404(self.queryset, pk=pk)\r\nnetbox_1 | File \"/usr/local/lib/python3.8/site-packages/django/shortcuts.py\", line 76, in get_object_or_404\r\nnetbox_1 | return queryset.get(*args, **kwargs)\r\nnetbox_1 | File \"/usr/local/lib/python3.8/site-packages/cacheops/query.py\", line 353, in get\r\nnetbox_1 | return qs._no_monkey.get(qs, *args, **kwargs)\r\nnetbox_1 | File \"/usr/local/lib/python3.8/site-packages/django/db/models/query.py\", line 433, in get\r\nnetbox_1 | raise self.model.MultipleObjectsReturned(\r\nnetbox_1 | virtualization.models.VirtualMachine.MultipleObjectsReturned: get() returned more than one VirtualMachine -- it returned 2!\r\nnetbox_1 | 192.168.80.7 - - [29/Nov/2020:18:45:03 +0000] \"GET /virtualization/virtual-machines/70/config-context/ HTTP/1.0\" 500 1855 \"-\" \"<cut>\"\r\n```\r\n\r\nNote: I wrote this already in https://github.com/netbox-community/netbox/issues/5314#issuecomment-724722310 and [a change](https://github.com/netbox-community/netbox/commit/0d27abc6fc22a8d40183a59eceef5dda57e99eae) got introduced for 2.9 to fix it but in 2.10 it is still present.\r\nI got asked to create a new issue.\n", "before_files": [{"content": "from collections import OrderedDict\n\nfrom django.db.models import OuterRef, Subquery, Q\n\nfrom utilities.query_functions import EmptyGroupByJSONBAgg, OrderableJSONBAgg\nfrom utilities.querysets import RestrictedQuerySet\n\n\nclass CustomFieldQueryset:\n \"\"\"\n Annotate custom fields on objects within a QuerySet.\n \"\"\"\n def __init__(self, queryset, custom_fields):\n self.queryset = queryset\n self.model = queryset.model\n self.custom_fields = custom_fields\n\n def __iter__(self):\n for obj in self.queryset:\n values_dict = {cfv.field_id: cfv.value for cfv in obj.custom_field_values.all()}\n obj.custom_fields = OrderedDict([(field, values_dict.get(field.pk)) for field in self.custom_fields])\n yield obj\n\n\nclass ConfigContextQuerySet(RestrictedQuerySet):\n\n def get_for_object(self, obj, aggregate_data=False):\n \"\"\"\n Return all applicable ConfigContexts for a given object. Only active ConfigContexts will be included.\n\n Args:\n aggregate_data: If True, use the JSONBAgg aggregate function to return only the list of JSON data objects\n \"\"\"\n\n # `device_role` for Device; `role` for VirtualMachine\n role = getattr(obj, 'device_role', None) or obj.role\n\n # Virtualization cluster for VirtualMachine\n cluster = getattr(obj, 'cluster', None)\n cluster_group = getattr(cluster, 'group', None)\n\n # Get the group of the assigned tenant, if any\n tenant_group = obj.tenant.group if obj.tenant else None\n\n # Match against the directly assigned region as well as any parent regions.\n region = getattr(obj.site, 'region', None)\n if region:\n regions = region.get_ancestors(include_self=True)\n else:\n regions = []\n\n queryset = self.filter(\n Q(regions__in=regions) | Q(regions=None),\n Q(sites=obj.site) | Q(sites=None),\n Q(roles=role) | Q(roles=None),\n Q(platforms=obj.platform) | Q(platforms=None),\n Q(cluster_groups=cluster_group) | Q(cluster_groups=None),\n Q(clusters=cluster) | Q(clusters=None),\n Q(tenant_groups=tenant_group) | Q(tenant_groups=None),\n Q(tenants=obj.tenant) | Q(tenants=None),\n Q(tags__slug__in=obj.tags.slugs()) | Q(tags=None),\n is_active=True,\n ).order_by('weight', 'name').distinct()\n\n if aggregate_data:\n return queryset.aggregate(\n config_context_data=OrderableJSONBAgg('data', ordering=['weight', 'name'])\n )['config_context_data']\n\n return queryset\n\n\nclass ConfigContextModelQuerySet(RestrictedQuerySet):\n \"\"\"\n QuerySet manager used by models which support ConfigContext (device and virtual machine).\n\n Includes a method which appends an annotation of aggregated config context JSON data objects. This is\n implemented as a subquery which performs all the joins necessary to filter relevant config context objects.\n This offers a substantial performance gain over ConfigContextQuerySet.get_for_object() when dealing with\n multiple objects.\n\n This allows the annotation to be entirely optional.\n \"\"\"\n\n def annotate_config_context_data(self):\n \"\"\"\n Attach the subquery annotation to the base queryset\n \"\"\"\n from extras.models import ConfigContext\n return self.annotate(\n config_context_data=Subquery(\n ConfigContext.objects.filter(\n self._get_config_context_filters()\n ).annotate(\n _data=EmptyGroupByJSONBAgg('data', ordering=['weight', 'name'])\n ).values(\"_data\")\n )\n ).distinct()\n\n def _get_config_context_filters(self):\n # Construct the set of Q objects for the specific object types\n base_query = Q(\n Q(platforms=OuterRef('platform')) | Q(platforms=None),\n Q(tenant_groups=OuterRef('tenant__group')) | Q(tenant_groups=None),\n Q(tenants=OuterRef('tenant')) | Q(tenants=None),\n Q(tags=OuterRef('tags')) | Q(tags=None),\n is_active=True,\n )\n\n if self.model._meta.model_name == 'device':\n base_query.add((Q(roles=OuterRef('device_role')) | Q(roles=None)), Q.AND)\n base_query.add((Q(sites=OuterRef('site')) | Q(sites=None)), Q.AND)\n region_field = 'site__region'\n\n elif self.model._meta.model_name == 'virtualmachine':\n base_query.add((Q(roles=OuterRef('role')) | Q(roles=None)), Q.AND)\n base_query.add((Q(cluster_groups=OuterRef('cluster__group')) | Q(cluster_groups=None)), Q.AND)\n base_query.add((Q(clusters=OuterRef('cluster')) | Q(clusters=None)), Q.AND)\n base_query.add((Q(sites=OuterRef('cluster__site')) | Q(sites=None)), Q.AND)\n region_field = 'cluster__site__region'\n\n base_query.add(\n (Q(\n regions__tree_id=OuterRef(f'{region_field}__tree_id'),\n regions__level__lte=OuterRef(f'{region_field}__level'),\n regions__lft__lte=OuterRef(f'{region_field}__lft'),\n regions__rght__gte=OuterRef(f'{region_field}__rght'),\n ) | Q(regions=None)),\n Q.AND\n )\n\n return base_query\n", "path": "netbox/extras/querysets.py"}], "after_files": [{"content": "from collections import OrderedDict\n\nfrom django.db.models import OuterRef, Subquery, Q\n\nfrom extras.models.tags import TaggedItem\nfrom utilities.query_functions import EmptyGroupByJSONBAgg, OrderableJSONBAgg\nfrom utilities.querysets import RestrictedQuerySet\n\n\nclass CustomFieldQueryset:\n \"\"\"\n Annotate custom fields on objects within a QuerySet.\n \"\"\"\n def __init__(self, queryset, custom_fields):\n self.queryset = queryset\n self.model = queryset.model\n self.custom_fields = custom_fields\n\n def __iter__(self):\n for obj in self.queryset:\n values_dict = {cfv.field_id: cfv.value for cfv in obj.custom_field_values.all()}\n obj.custom_fields = OrderedDict([(field, values_dict.get(field.pk)) for field in self.custom_fields])\n yield obj\n\n\nclass ConfigContextQuerySet(RestrictedQuerySet):\n\n def get_for_object(self, obj, aggregate_data=False):\n \"\"\"\n Return all applicable ConfigContexts for a given object. Only active ConfigContexts will be included.\n\n Args:\n aggregate_data: If True, use the JSONBAgg aggregate function to return only the list of JSON data objects\n \"\"\"\n\n # `device_role` for Device; `role` for VirtualMachine\n role = getattr(obj, 'device_role', None) or obj.role\n\n # Virtualization cluster for VirtualMachine\n cluster = getattr(obj, 'cluster', None)\n cluster_group = getattr(cluster, 'group', None)\n\n # Get the group of the assigned tenant, if any\n tenant_group = obj.tenant.group if obj.tenant else None\n\n # Match against the directly assigned region as well as any parent regions.\n region = getattr(obj.site, 'region', None)\n if region:\n regions = region.get_ancestors(include_self=True)\n else:\n regions = []\n\n queryset = self.filter(\n Q(regions__in=regions) | Q(regions=None),\n Q(sites=obj.site) | Q(sites=None),\n Q(roles=role) | Q(roles=None),\n Q(platforms=obj.platform) | Q(platforms=None),\n Q(cluster_groups=cluster_group) | Q(cluster_groups=None),\n Q(clusters=cluster) | Q(clusters=None),\n Q(tenant_groups=tenant_group) | Q(tenant_groups=None),\n Q(tenants=obj.tenant) | Q(tenants=None),\n Q(tags__slug__in=obj.tags.slugs()) | Q(tags=None),\n is_active=True,\n ).order_by('weight', 'name').distinct()\n\n if aggregate_data:\n return queryset.aggregate(\n config_context_data=OrderableJSONBAgg('data', ordering=['weight', 'name'])\n )['config_context_data']\n\n return queryset\n\n\nclass ConfigContextModelQuerySet(RestrictedQuerySet):\n \"\"\"\n QuerySet manager used by models which support ConfigContext (device and virtual machine).\n\n Includes a method which appends an annotation of aggregated config context JSON data objects. This is\n implemented as a subquery which performs all the joins necessary to filter relevant config context objects.\n This offers a substantial performance gain over ConfigContextQuerySet.get_for_object() when dealing with\n multiple objects.\n\n This allows the annotation to be entirely optional.\n \"\"\"\n\n def annotate_config_context_data(self):\n \"\"\"\n Attach the subquery annotation to the base queryset\n \"\"\"\n from extras.models import ConfigContext\n return self.annotate(\n config_context_data=Subquery(\n ConfigContext.objects.filter(\n self._get_config_context_filters()\n ).annotate(\n _data=EmptyGroupByJSONBAgg('data', ordering=['weight', 'name'])\n ).values(\"_data\")\n )\n ).distinct()\n\n def _get_config_context_filters(self):\n # Construct the set of Q objects for the specific object types\n tag_query_filters = {\n \"object_id\": OuterRef(OuterRef('pk')),\n \"content_type__app_label\": self.model._meta.app_label,\n \"content_type__model\": self.model._meta.model_name\n }\n base_query = Q(\n Q(platforms=OuterRef('platform')) | Q(platforms=None),\n Q(tenant_groups=OuterRef('tenant__group')) | Q(tenant_groups=None),\n Q(tenants=OuterRef('tenant')) | Q(tenants=None),\n Q(\n tags__pk__in=Subquery(\n TaggedItem.objects.filter(\n **tag_query_filters\n ).values_list(\n 'tag_id',\n flat=True\n )\n )\n ) | Q(tags=None),\n is_active=True,\n )\n\n if self.model._meta.model_name == 'device':\n base_query.add((Q(roles=OuterRef('device_role')) | Q(roles=None)), Q.AND)\n base_query.add((Q(sites=OuterRef('site')) | Q(sites=None)), Q.AND)\n region_field = 'site__region'\n\n elif self.model._meta.model_name == 'virtualmachine':\n base_query.add((Q(roles=OuterRef('role')) | Q(roles=None)), Q.AND)\n base_query.add((Q(cluster_groups=OuterRef('cluster__group')) | Q(cluster_groups=None)), Q.AND)\n base_query.add((Q(clusters=OuterRef('cluster')) | Q(clusters=None)), Q.AND)\n base_query.add((Q(sites=OuterRef('cluster__site')) | Q(sites=None)), Q.AND)\n region_field = 'cluster__site__region'\n\n base_query.add(\n (Q(\n regions__tree_id=OuterRef(f'{region_field}__tree_id'),\n regions__level__lte=OuterRef(f'{region_field}__level'),\n regions__lft__lte=OuterRef(f'{region_field}__lft'),\n regions__rght__gte=OuterRef(f'{region_field}__rght'),\n ) | Q(regions=None)),\n Q.AND\n )\n\n return base_query\n", "path": "netbox/extras/querysets.py"}]}
2,631
338
gh_patches_debug_34392
rasdani/github-patches
git_diff
pytorch__pytorch-3404
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Bug when “numpy.str_” occur in dataloader using default_collate If, for some reason, my `sample` contains data of type `numpy.str_` instead of `str`, e.g., `type(sample['filepath']) == type(numpy.str_)` then when it passed to `DataLoader`, a `KeyError` would occur. I checked and figure that maybe I found a bug. Consider function `default_collate` below: ```python def default_collate(batch): "Puts each data field into a tensor with outer dimension batch size" if torch.is_tensor(batch[0]): out = None if _use_shared_memory: # If we're in a background process, concatenate directly into a # shared memory tensor to avoid an extra copy numel = sum([x.numel() for x in batch]) storage = batch[0].storage()._new_shared(numel) out = batch[0].new(storage) return torch.stack(batch, 0, out=out) elif type(batch[0]).__module__ == 'numpy': elem = batch[0] if type(elem).__name__ == 'ndarray': return torch.stack([torch.from_numpy(b) for b in batch], 0) if elem.shape == (): # scalars py_type = float if elem.dtype.name.startswith('float') else int return numpy_type_map[elem.dtype.name](list(map(py_type, batch))) elif isinstance(batch[0], int): return torch.LongTensor(batch) elif isinstance(batch[0], float): return torch.DoubleTensor(batch) elif isinstance(batch[0], string_classes): return batch elif isinstance(batch[0], collections.Mapping): return {key: default_collate([d[key] for d in batch]) for key in batch[0]} elif isinstance(batch[0], collections.Sequence): transposed = zip(*batch) return [default_collate(samples) for samples in transposed] raise TypeError(("batch must contain tensors, numbers, dicts or lists; found {}" .format(type(batch[0])))) ``` When the data is a `numpy.str_`, it would pass `elif type(batch[0]).__module__ == 'numpy':` and then treated like either a 'ndarray' or a 'float' scale. What I should do is set my `sample['filepath'] = str(sample['filepath'])`, but it would be nice if the function could at least giving more information about the error. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `torch/utils/data/dataloader.py` Content: ``` 1 import torch 2 import torch.multiprocessing as multiprocessing 3 from .sampler import SequentialSampler, RandomSampler, BatchSampler 4 import collections 5 import sys 6 import traceback 7 import threading 8 from torch._six import string_classes 9 10 11 if sys.version_info[0] == 2: 12 import Queue as queue 13 else: 14 import queue 15 16 17 _use_shared_memory = False 18 """Whether to use shared memory in default_collate""" 19 20 21 class ExceptionWrapper(object): 22 "Wraps an exception plus traceback to communicate across threads" 23 24 def __init__(self, exc_info): 25 self.exc_type = exc_info[0] 26 self.exc_msg = "".join(traceback.format_exception(*exc_info)) 27 28 29 def _worker_loop(dataset, index_queue, data_queue, collate_fn): 30 global _use_shared_memory 31 _use_shared_memory = True 32 33 torch.set_num_threads(1) 34 while True: 35 r = index_queue.get() 36 if r is None: 37 data_queue.put(None) 38 break 39 idx, batch_indices = r 40 try: 41 samples = collate_fn([dataset[i] for i in batch_indices]) 42 except Exception: 43 data_queue.put((idx, ExceptionWrapper(sys.exc_info()))) 44 else: 45 data_queue.put((idx, samples)) 46 47 48 def _pin_memory_loop(in_queue, out_queue, done_event): 49 while True: 50 try: 51 r = in_queue.get() 52 except Exception: 53 if done_event.is_set(): 54 return 55 raise 56 if r is None: 57 break 58 if isinstance(r[1], ExceptionWrapper): 59 out_queue.put(r) 60 continue 61 idx, batch = r 62 try: 63 batch = pin_memory_batch(batch) 64 except Exception: 65 out_queue.put((idx, ExceptionWrapper(sys.exc_info()))) 66 else: 67 out_queue.put((idx, batch)) 68 69 70 numpy_type_map = { 71 'float64': torch.DoubleTensor, 72 'float32': torch.FloatTensor, 73 'float16': torch.HalfTensor, 74 'int64': torch.LongTensor, 75 'int32': torch.IntTensor, 76 'int16': torch.ShortTensor, 77 'int8': torch.CharTensor, 78 'uint8': torch.ByteTensor, 79 } 80 81 82 def default_collate(batch): 83 "Puts each data field into a tensor with outer dimension batch size" 84 if torch.is_tensor(batch[0]): 85 out = None 86 if _use_shared_memory: 87 # If we're in a background process, concatenate directly into a 88 # shared memory tensor to avoid an extra copy 89 numel = sum([x.numel() for x in batch]) 90 storage = batch[0].storage()._new_shared(numel) 91 out = batch[0].new(storage) 92 return torch.stack(batch, 0, out=out) 93 elif type(batch[0]).__module__ == 'numpy': 94 elem = batch[0] 95 if type(elem).__name__ == 'ndarray': 96 return torch.stack([torch.from_numpy(b) for b in batch], 0) 97 if elem.shape == (): # scalars 98 py_type = float if elem.dtype.name.startswith('float') else int 99 return numpy_type_map[elem.dtype.name](list(map(py_type, batch))) 100 elif isinstance(batch[0], int): 101 return torch.LongTensor(batch) 102 elif isinstance(batch[0], float): 103 return torch.DoubleTensor(batch) 104 elif isinstance(batch[0], string_classes): 105 return batch 106 elif isinstance(batch[0], collections.Mapping): 107 return {key: default_collate([d[key] for d in batch]) for key in batch[0]} 108 elif isinstance(batch[0], collections.Sequence): 109 transposed = zip(*batch) 110 return [default_collate(samples) for samples in transposed] 111 112 raise TypeError(("batch must contain tensors, numbers, dicts or lists; found {}" 113 .format(type(batch[0])))) 114 115 116 def pin_memory_batch(batch): 117 if torch.is_tensor(batch): 118 return batch.pin_memory() 119 elif isinstance(batch, string_classes): 120 return batch 121 elif isinstance(batch, collections.Mapping): 122 return {k: pin_memory_batch(sample) for k, sample in batch.items()} 123 elif isinstance(batch, collections.Sequence): 124 return [pin_memory_batch(sample) for sample in batch] 125 else: 126 return batch 127 128 129 class DataLoaderIter(object): 130 "Iterates once over the DataLoader's dataset, as specified by the sampler" 131 132 def __init__(self, loader): 133 self.dataset = loader.dataset 134 self.collate_fn = loader.collate_fn 135 self.batch_sampler = loader.batch_sampler 136 self.num_workers = loader.num_workers 137 self.pin_memory = loader.pin_memory 138 self.done_event = threading.Event() 139 140 self.sample_iter = iter(self.batch_sampler) 141 142 if self.num_workers > 0: 143 self.index_queue = multiprocessing.SimpleQueue() 144 self.data_queue = multiprocessing.SimpleQueue() 145 self.batches_outstanding = 0 146 self.shutdown = False 147 self.send_idx = 0 148 self.rcvd_idx = 0 149 self.reorder_dict = {} 150 151 self.workers = [ 152 multiprocessing.Process( 153 target=_worker_loop, 154 args=(self.dataset, self.index_queue, self.data_queue, self.collate_fn)) 155 for _ in range(self.num_workers)] 156 157 for w in self.workers: 158 w.daemon = True # ensure that the worker exits on process exit 159 w.start() 160 161 if self.pin_memory: 162 in_data = self.data_queue 163 self.data_queue = queue.Queue() 164 self.pin_thread = threading.Thread( 165 target=_pin_memory_loop, 166 args=(in_data, self.data_queue, self.done_event)) 167 self.pin_thread.daemon = True 168 self.pin_thread.start() 169 170 # prime the prefetch loop 171 for _ in range(2 * self.num_workers): 172 self._put_indices() 173 174 def __len__(self): 175 return len(self.batch_sampler) 176 177 def __next__(self): 178 if self.num_workers == 0: # same-process loading 179 indices = next(self.sample_iter) # may raise StopIteration 180 batch = self.collate_fn([self.dataset[i] for i in indices]) 181 if self.pin_memory: 182 batch = pin_memory_batch(batch) 183 return batch 184 185 # check if the next sample has already been generated 186 if self.rcvd_idx in self.reorder_dict: 187 batch = self.reorder_dict.pop(self.rcvd_idx) 188 return self._process_next_batch(batch) 189 190 if self.batches_outstanding == 0: 191 self._shutdown_workers() 192 raise StopIteration 193 194 while True: 195 assert (not self.shutdown and self.batches_outstanding > 0) 196 idx, batch = self.data_queue.get() 197 self.batches_outstanding -= 1 198 if idx != self.rcvd_idx: 199 # store out-of-order samples 200 self.reorder_dict[idx] = batch 201 continue 202 return self._process_next_batch(batch) 203 204 next = __next__ # Python 2 compatibility 205 206 def __iter__(self): 207 return self 208 209 def _put_indices(self): 210 assert self.batches_outstanding < 2 * self.num_workers 211 indices = next(self.sample_iter, None) 212 if indices is None: 213 return 214 self.index_queue.put((self.send_idx, indices)) 215 self.batches_outstanding += 1 216 self.send_idx += 1 217 218 def _process_next_batch(self, batch): 219 self.rcvd_idx += 1 220 self._put_indices() 221 if isinstance(batch, ExceptionWrapper): 222 raise batch.exc_type(batch.exc_msg) 223 return batch 224 225 def __getstate__(self): 226 # TODO: add limited pickling support for sharing an iterator 227 # across multiple threads for HOGWILD. 228 # Probably the best way to do this is by moving the sample pushing 229 # to a separate thread and then just sharing the data queue 230 # but signalling the end is tricky without a non-blocking API 231 raise NotImplementedError("DataLoaderIterator cannot be pickled") 232 233 def _shutdown_workers(self): 234 if not self.shutdown: 235 self.shutdown = True 236 self.done_event.set() 237 for _ in self.workers: 238 self.index_queue.put(None) 239 240 def __del__(self): 241 if self.num_workers > 0: 242 self._shutdown_workers() 243 244 245 class DataLoader(object): 246 """ 247 Data loader. Combines a dataset and a sampler, and provides 248 single- or multi-process iterators over the dataset. 249 250 Arguments: 251 dataset (Dataset): dataset from which to load the data. 252 batch_size (int, optional): how many samples per batch to load 253 (default: 1). 254 shuffle (bool, optional): set to ``True`` to have the data reshuffled 255 at every epoch (default: False). 256 sampler (Sampler, optional): defines the strategy to draw samples from 257 the dataset. If specified, ``shuffle`` must be False. 258 batch_sampler (Sampler, optional): like sampler, but returns a batch of 259 indices at a time. Mutually exclusive with batch_size, shuffle, 260 sampler, and drop_last. 261 num_workers (int, optional): how many subprocesses to use for data 262 loading. 0 means that the data will be loaded in the main process 263 (default: 0) 264 collate_fn (callable, optional): merges a list of samples to form a mini-batch. 265 pin_memory (bool, optional): If ``True``, the data loader will copy tensors 266 into CUDA pinned memory before returning them. 267 drop_last (bool, optional): set to ``True`` to drop the last incomplete batch, 268 if the dataset size is not divisible by the batch size. If False and 269 the size of dataset is not divisible by the batch size, then the last batch 270 will be smaller. (default: False) 271 """ 272 273 def __init__(self, dataset, batch_size=1, shuffle=False, sampler=None, batch_sampler=None, 274 num_workers=0, collate_fn=default_collate, pin_memory=False, drop_last=False): 275 self.dataset = dataset 276 self.batch_size = batch_size 277 self.num_workers = num_workers 278 self.collate_fn = collate_fn 279 self.pin_memory = pin_memory 280 self.drop_last = drop_last 281 282 if batch_sampler is not None: 283 if batch_size > 1 or shuffle or sampler is not None or drop_last: 284 raise ValueError('batch_sampler is mutually exclusive with ' 285 'batch_size, shuffle, sampler, and drop_last') 286 287 if sampler is not None and shuffle: 288 raise ValueError('sampler is mutually exclusive with shuffle') 289 290 if batch_sampler is None: 291 if sampler is None: 292 if shuffle: 293 sampler = RandomSampler(dataset) 294 else: 295 sampler = SequentialSampler(dataset) 296 batch_sampler = BatchSampler(sampler, batch_size, drop_last) 297 298 self.sampler = sampler 299 self.batch_sampler = batch_sampler 300 301 def __iter__(self): 302 return DataLoaderIter(self) 303 304 def __len__(self): 305 return len(self.batch_sampler) 306 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/torch/utils/data/dataloader.py b/torch/utils/data/dataloader.py --- a/torch/utils/data/dataloader.py +++ b/torch/utils/data/dataloader.py @@ -2,6 +2,7 @@ import torch.multiprocessing as multiprocessing from .sampler import SequentialSampler, RandomSampler, BatchSampler import collections +import re import sys import traceback import threading @@ -81,6 +82,9 @@ def default_collate(batch): "Puts each data field into a tensor with outer dimension batch size" + + error_msg = "batch must contain tensors, numbers, dicts or lists; found {}" + elem_type = type(batch[0]) if torch.is_tensor(batch[0]): out = None if _use_shared_memory: @@ -90,9 +94,14 @@ storage = batch[0].storage()._new_shared(numel) out = batch[0].new(storage) return torch.stack(batch, 0, out=out) - elif type(batch[0]).__module__ == 'numpy': + elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \ + and elem_type.__name__ != 'string_': elem = batch[0] - if type(elem).__name__ == 'ndarray': + if elem_type.__name__ == 'ndarray': + # array of string classes and object + if re.search('[SaUO]', elem.dtype.str) is not None: + raise TypeError(error_msg.format(elem.dtype)) + return torch.stack([torch.from_numpy(b) for b in batch], 0) if elem.shape == (): # scalars py_type = float if elem.dtype.name.startswith('float') else int @@ -109,8 +118,7 @@ transposed = zip(*batch) return [default_collate(samples) for samples in transposed] - raise TypeError(("batch must contain tensors, numbers, dicts or lists; found {}" - .format(type(batch[0])))) + raise TypeError((error_msg.format(type(batch[0])))) def pin_memory_batch(batch):
{"golden_diff": "diff --git a/torch/utils/data/dataloader.py b/torch/utils/data/dataloader.py\n--- a/torch/utils/data/dataloader.py\n+++ b/torch/utils/data/dataloader.py\n@@ -2,6 +2,7 @@\n import torch.multiprocessing as multiprocessing\n from .sampler import SequentialSampler, RandomSampler, BatchSampler\n import collections\n+import re\n import sys\n import traceback\n import threading\n@@ -81,6 +82,9 @@\n \n def default_collate(batch):\n \"Puts each data field into a tensor with outer dimension batch size\"\n+\n+ error_msg = \"batch must contain tensors, numbers, dicts or lists; found {}\"\n+ elem_type = type(batch[0])\n if torch.is_tensor(batch[0]):\n out = None\n if _use_shared_memory:\n@@ -90,9 +94,14 @@\n storage = batch[0].storage()._new_shared(numel)\n out = batch[0].new(storage)\n return torch.stack(batch, 0, out=out)\n- elif type(batch[0]).__module__ == 'numpy':\n+ elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \\\n+ and elem_type.__name__ != 'string_':\n elem = batch[0]\n- if type(elem).__name__ == 'ndarray':\n+ if elem_type.__name__ == 'ndarray':\n+ # array of string classes and object\n+ if re.search('[SaUO]', elem.dtype.str) is not None:\n+ raise TypeError(error_msg.format(elem.dtype))\n+\n return torch.stack([torch.from_numpy(b) for b in batch], 0)\n if elem.shape == (): # scalars\n py_type = float if elem.dtype.name.startswith('float') else int\n@@ -109,8 +118,7 @@\n transposed = zip(*batch)\n return [default_collate(samples) for samples in transposed]\n \n- raise TypeError((\"batch must contain tensors, numbers, dicts or lists; found {}\"\n- .format(type(batch[0]))))\n+ raise TypeError((error_msg.format(type(batch[0]))))\n \n \n def pin_memory_batch(batch):\n", "issue": "Bug when \u201cnumpy.str_\u201d occur in dataloader using default_collate\nIf, for some reason, my `sample` contains data of type `numpy.str_` instead of `str`, e.g., `type(sample['filepath']) == type(numpy.str_)` then when it passed to `DataLoader`, a `KeyError` would occur. I checked and figure that maybe I found a bug. Consider function `default_collate` below:\r\n```python\r\ndef default_collate(batch):\r\n \"Puts each data field into a tensor with outer dimension batch size\"\r\n if torch.is_tensor(batch[0]):\r\n out = None\r\n if _use_shared_memory:\r\n # If we're in a background process, concatenate directly into a\r\n # shared memory tensor to avoid an extra copy\r\n numel = sum([x.numel() for x in batch])\r\n storage = batch[0].storage()._new_shared(numel)\r\n out = batch[0].new(storage)\r\n return torch.stack(batch, 0, out=out)\r\n elif type(batch[0]).__module__ == 'numpy':\r\n elem = batch[0]\r\n if type(elem).__name__ == 'ndarray':\r\n return torch.stack([torch.from_numpy(b) for b in batch], 0)\r\n if elem.shape == (): # scalars\r\n py_type = float if elem.dtype.name.startswith('float') else int\r\n return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))\r\n elif isinstance(batch[0], int):\r\n return torch.LongTensor(batch)\r\n elif isinstance(batch[0], float):\r\n return torch.DoubleTensor(batch)\r\n elif isinstance(batch[0], string_classes):\r\n return batch\r\n elif isinstance(batch[0], collections.Mapping):\r\n return {key: default_collate([d[key] for d in batch]) for key in batch[0]}\r\n elif isinstance(batch[0], collections.Sequence):\r\n transposed = zip(*batch)\r\n return [default_collate(samples) for samples in transposed]\r\n\r\n raise TypeError((\"batch must contain tensors, numbers, dicts or lists; found {}\"\r\n .format(type(batch[0]))))\r\n```\r\nWhen the data is a `numpy.str_`, it would pass `elif type(batch[0]).__module__ == 'numpy':` and then treated like either a 'ndarray' or a 'float' scale. What I should do is set my `sample['filepath'] = str(sample['filepath'])`, but it would be nice if the function could at least giving more information about the error.\r\n\n", "before_files": [{"content": "import torch\nimport torch.multiprocessing as multiprocessing\nfrom .sampler import SequentialSampler, RandomSampler, BatchSampler\nimport collections\nimport sys\nimport traceback\nimport threading\nfrom torch._six import string_classes\n\n\nif sys.version_info[0] == 2:\n import Queue as queue\nelse:\n import queue\n\n\n_use_shared_memory = False\n\"\"\"Whether to use shared memory in default_collate\"\"\"\n\n\nclass ExceptionWrapper(object):\n \"Wraps an exception plus traceback to communicate across threads\"\n\n def __init__(self, exc_info):\n self.exc_type = exc_info[0]\n self.exc_msg = \"\".join(traceback.format_exception(*exc_info))\n\n\ndef _worker_loop(dataset, index_queue, data_queue, collate_fn):\n global _use_shared_memory\n _use_shared_memory = True\n\n torch.set_num_threads(1)\n while True:\n r = index_queue.get()\n if r is None:\n data_queue.put(None)\n break\n idx, batch_indices = r\n try:\n samples = collate_fn([dataset[i] for i in batch_indices])\n except Exception:\n data_queue.put((idx, ExceptionWrapper(sys.exc_info())))\n else:\n data_queue.put((idx, samples))\n\n\ndef _pin_memory_loop(in_queue, out_queue, done_event):\n while True:\n try:\n r = in_queue.get()\n except Exception:\n if done_event.is_set():\n return\n raise\n if r is None:\n break\n if isinstance(r[1], ExceptionWrapper):\n out_queue.put(r)\n continue\n idx, batch = r\n try:\n batch = pin_memory_batch(batch)\n except Exception:\n out_queue.put((idx, ExceptionWrapper(sys.exc_info())))\n else:\n out_queue.put((idx, batch))\n\n\nnumpy_type_map = {\n 'float64': torch.DoubleTensor,\n 'float32': torch.FloatTensor,\n 'float16': torch.HalfTensor,\n 'int64': torch.LongTensor,\n 'int32': torch.IntTensor,\n 'int16': torch.ShortTensor,\n 'int8': torch.CharTensor,\n 'uint8': torch.ByteTensor,\n}\n\n\ndef default_collate(batch):\n \"Puts each data field into a tensor with outer dimension batch size\"\n if torch.is_tensor(batch[0]):\n out = None\n if _use_shared_memory:\n # If we're in a background process, concatenate directly into a\n # shared memory tensor to avoid an extra copy\n numel = sum([x.numel() for x in batch])\n storage = batch[0].storage()._new_shared(numel)\n out = batch[0].new(storage)\n return torch.stack(batch, 0, out=out)\n elif type(batch[0]).__module__ == 'numpy':\n elem = batch[0]\n if type(elem).__name__ == 'ndarray':\n return torch.stack([torch.from_numpy(b) for b in batch], 0)\n if elem.shape == (): # scalars\n py_type = float if elem.dtype.name.startswith('float') else int\n return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))\n elif isinstance(batch[0], int):\n return torch.LongTensor(batch)\n elif isinstance(batch[0], float):\n return torch.DoubleTensor(batch)\n elif isinstance(batch[0], string_classes):\n return batch\n elif isinstance(batch[0], collections.Mapping):\n return {key: default_collate([d[key] for d in batch]) for key in batch[0]}\n elif isinstance(batch[0], collections.Sequence):\n transposed = zip(*batch)\n return [default_collate(samples) for samples in transposed]\n\n raise TypeError((\"batch must contain tensors, numbers, dicts or lists; found {}\"\n .format(type(batch[0]))))\n\n\ndef pin_memory_batch(batch):\n if torch.is_tensor(batch):\n return batch.pin_memory()\n elif isinstance(batch, string_classes):\n return batch\n elif isinstance(batch, collections.Mapping):\n return {k: pin_memory_batch(sample) for k, sample in batch.items()}\n elif isinstance(batch, collections.Sequence):\n return [pin_memory_batch(sample) for sample in batch]\n else:\n return batch\n\n\nclass DataLoaderIter(object):\n \"Iterates once over the DataLoader's dataset, as specified by the sampler\"\n\n def __init__(self, loader):\n self.dataset = loader.dataset\n self.collate_fn = loader.collate_fn\n self.batch_sampler = loader.batch_sampler\n self.num_workers = loader.num_workers\n self.pin_memory = loader.pin_memory\n self.done_event = threading.Event()\n\n self.sample_iter = iter(self.batch_sampler)\n\n if self.num_workers > 0:\n self.index_queue = multiprocessing.SimpleQueue()\n self.data_queue = multiprocessing.SimpleQueue()\n self.batches_outstanding = 0\n self.shutdown = False\n self.send_idx = 0\n self.rcvd_idx = 0\n self.reorder_dict = {}\n\n self.workers = [\n multiprocessing.Process(\n target=_worker_loop,\n args=(self.dataset, self.index_queue, self.data_queue, self.collate_fn))\n for _ in range(self.num_workers)]\n\n for w in self.workers:\n w.daemon = True # ensure that the worker exits on process exit\n w.start()\n\n if self.pin_memory:\n in_data = self.data_queue\n self.data_queue = queue.Queue()\n self.pin_thread = threading.Thread(\n target=_pin_memory_loop,\n args=(in_data, self.data_queue, self.done_event))\n self.pin_thread.daemon = True\n self.pin_thread.start()\n\n # prime the prefetch loop\n for _ in range(2 * self.num_workers):\n self._put_indices()\n\n def __len__(self):\n return len(self.batch_sampler)\n\n def __next__(self):\n if self.num_workers == 0: # same-process loading\n indices = next(self.sample_iter) # may raise StopIteration\n batch = self.collate_fn([self.dataset[i] for i in indices])\n if self.pin_memory:\n batch = pin_memory_batch(batch)\n return batch\n\n # check if the next sample has already been generated\n if self.rcvd_idx in self.reorder_dict:\n batch = self.reorder_dict.pop(self.rcvd_idx)\n return self._process_next_batch(batch)\n\n if self.batches_outstanding == 0:\n self._shutdown_workers()\n raise StopIteration\n\n while True:\n assert (not self.shutdown and self.batches_outstanding > 0)\n idx, batch = self.data_queue.get()\n self.batches_outstanding -= 1\n if idx != self.rcvd_idx:\n # store out-of-order samples\n self.reorder_dict[idx] = batch\n continue\n return self._process_next_batch(batch)\n\n next = __next__ # Python 2 compatibility\n\n def __iter__(self):\n return self\n\n def _put_indices(self):\n assert self.batches_outstanding < 2 * self.num_workers\n indices = next(self.sample_iter, None)\n if indices is None:\n return\n self.index_queue.put((self.send_idx, indices))\n self.batches_outstanding += 1\n self.send_idx += 1\n\n def _process_next_batch(self, batch):\n self.rcvd_idx += 1\n self._put_indices()\n if isinstance(batch, ExceptionWrapper):\n raise batch.exc_type(batch.exc_msg)\n return batch\n\n def __getstate__(self):\n # TODO: add limited pickling support for sharing an iterator\n # across multiple threads for HOGWILD.\n # Probably the best way to do this is by moving the sample pushing\n # to a separate thread and then just sharing the data queue\n # but signalling the end is tricky without a non-blocking API\n raise NotImplementedError(\"DataLoaderIterator cannot be pickled\")\n\n def _shutdown_workers(self):\n if not self.shutdown:\n self.shutdown = True\n self.done_event.set()\n for _ in self.workers:\n self.index_queue.put(None)\n\n def __del__(self):\n if self.num_workers > 0:\n self._shutdown_workers()\n\n\nclass DataLoader(object):\n \"\"\"\n Data loader. Combines a dataset and a sampler, and provides\n single- or multi-process iterators over the dataset.\n\n Arguments:\n dataset (Dataset): dataset from which to load the data.\n batch_size (int, optional): how many samples per batch to load\n (default: 1).\n shuffle (bool, optional): set to ``True`` to have the data reshuffled\n at every epoch (default: False).\n sampler (Sampler, optional): defines the strategy to draw samples from\n the dataset. If specified, ``shuffle`` must be False.\n batch_sampler (Sampler, optional): like sampler, but returns a batch of\n indices at a time. Mutually exclusive with batch_size, shuffle,\n sampler, and drop_last.\n num_workers (int, optional): how many subprocesses to use for data\n loading. 0 means that the data will be loaded in the main process\n (default: 0)\n collate_fn (callable, optional): merges a list of samples to form a mini-batch.\n pin_memory (bool, optional): If ``True``, the data loader will copy tensors\n into CUDA pinned memory before returning them.\n drop_last (bool, optional): set to ``True`` to drop the last incomplete batch,\n if the dataset size is not divisible by the batch size. If False and\n the size of dataset is not divisible by the batch size, then the last batch\n will be smaller. (default: False)\n \"\"\"\n\n def __init__(self, dataset, batch_size=1, shuffle=False, sampler=None, batch_sampler=None,\n num_workers=0, collate_fn=default_collate, pin_memory=False, drop_last=False):\n self.dataset = dataset\n self.batch_size = batch_size\n self.num_workers = num_workers\n self.collate_fn = collate_fn\n self.pin_memory = pin_memory\n self.drop_last = drop_last\n\n if batch_sampler is not None:\n if batch_size > 1 or shuffle or sampler is not None or drop_last:\n raise ValueError('batch_sampler is mutually exclusive with '\n 'batch_size, shuffle, sampler, and drop_last')\n\n if sampler is not None and shuffle:\n raise ValueError('sampler is mutually exclusive with shuffle')\n\n if batch_sampler is None:\n if sampler is None:\n if shuffle:\n sampler = RandomSampler(dataset)\n else:\n sampler = SequentialSampler(dataset)\n batch_sampler = BatchSampler(sampler, batch_size, drop_last)\n\n self.sampler = sampler\n self.batch_sampler = batch_sampler\n\n def __iter__(self):\n return DataLoaderIter(self)\n\n def __len__(self):\n return len(self.batch_sampler)\n", "path": "torch/utils/data/dataloader.py"}], "after_files": [{"content": "import torch\nimport torch.multiprocessing as multiprocessing\nfrom .sampler import SequentialSampler, RandomSampler, BatchSampler\nimport collections\nimport re\nimport sys\nimport traceback\nimport threading\nfrom torch._six import string_classes\n\n\nif sys.version_info[0] == 2:\n import Queue as queue\nelse:\n import queue\n\n\n_use_shared_memory = False\n\"\"\"Whether to use shared memory in default_collate\"\"\"\n\n\nclass ExceptionWrapper(object):\n \"Wraps an exception plus traceback to communicate across threads\"\n\n def __init__(self, exc_info):\n self.exc_type = exc_info[0]\n self.exc_msg = \"\".join(traceback.format_exception(*exc_info))\n\n\ndef _worker_loop(dataset, index_queue, data_queue, collate_fn):\n global _use_shared_memory\n _use_shared_memory = True\n\n torch.set_num_threads(1)\n while True:\n r = index_queue.get()\n if r is None:\n data_queue.put(None)\n break\n idx, batch_indices = r\n try:\n samples = collate_fn([dataset[i] for i in batch_indices])\n except Exception:\n data_queue.put((idx, ExceptionWrapper(sys.exc_info())))\n else:\n data_queue.put((idx, samples))\n\n\ndef _pin_memory_loop(in_queue, out_queue, done_event):\n while True:\n try:\n r = in_queue.get()\n except Exception:\n if done_event.is_set():\n return\n raise\n if r is None:\n break\n if isinstance(r[1], ExceptionWrapper):\n out_queue.put(r)\n continue\n idx, batch = r\n try:\n batch = pin_memory_batch(batch)\n except Exception:\n out_queue.put((idx, ExceptionWrapper(sys.exc_info())))\n else:\n out_queue.put((idx, batch))\n\n\nnumpy_type_map = {\n 'float64': torch.DoubleTensor,\n 'float32': torch.FloatTensor,\n 'float16': torch.HalfTensor,\n 'int64': torch.LongTensor,\n 'int32': torch.IntTensor,\n 'int16': torch.ShortTensor,\n 'int8': torch.CharTensor,\n 'uint8': torch.ByteTensor,\n}\n\n\ndef default_collate(batch):\n \"Puts each data field into a tensor with outer dimension batch size\"\n\n error_msg = \"batch must contain tensors, numbers, dicts or lists; found {}\"\n elem_type = type(batch[0])\n if torch.is_tensor(batch[0]):\n out = None\n if _use_shared_memory:\n # If we're in a background process, concatenate directly into a\n # shared memory tensor to avoid an extra copy\n numel = sum([x.numel() for x in batch])\n storage = batch[0].storage()._new_shared(numel)\n out = batch[0].new(storage)\n return torch.stack(batch, 0, out=out)\n elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \\\n and elem_type.__name__ != 'string_':\n elem = batch[0]\n if elem_type.__name__ == 'ndarray':\n # array of string classes and object\n if re.search('[SaUO]', elem.dtype.str) is not None:\n raise TypeError(error_msg.format(elem.dtype))\n\n return torch.stack([torch.from_numpy(b) for b in batch], 0)\n if elem.shape == (): # scalars\n py_type = float if elem.dtype.name.startswith('float') else int\n return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))\n elif isinstance(batch[0], int):\n return torch.LongTensor(batch)\n elif isinstance(batch[0], float):\n return torch.DoubleTensor(batch)\n elif isinstance(batch[0], string_classes):\n return batch\n elif isinstance(batch[0], collections.Mapping):\n return {key: default_collate([d[key] for d in batch]) for key in batch[0]}\n elif isinstance(batch[0], collections.Sequence):\n transposed = zip(*batch)\n return [default_collate(samples) for samples in transposed]\n\n raise TypeError((error_msg.format(type(batch[0]))))\n\n\ndef pin_memory_batch(batch):\n if torch.is_tensor(batch):\n return batch.pin_memory()\n elif isinstance(batch, string_classes):\n return batch\n elif isinstance(batch, collections.Mapping):\n return {k: pin_memory_batch(sample) for k, sample in batch.items()}\n elif isinstance(batch, collections.Sequence):\n return [pin_memory_batch(sample) for sample in batch]\n else:\n return batch\n\n\nclass DataLoaderIter(object):\n \"Iterates once over the DataLoader's dataset, as specified by the sampler\"\n\n def __init__(self, loader):\n self.dataset = loader.dataset\n self.collate_fn = loader.collate_fn\n self.batch_sampler = loader.batch_sampler\n self.num_workers = loader.num_workers\n self.pin_memory = loader.pin_memory\n self.done_event = threading.Event()\n\n self.sample_iter = iter(self.batch_sampler)\n\n if self.num_workers > 0:\n self.index_queue = multiprocessing.SimpleQueue()\n self.data_queue = multiprocessing.SimpleQueue()\n self.batches_outstanding = 0\n self.shutdown = False\n self.send_idx = 0\n self.rcvd_idx = 0\n self.reorder_dict = {}\n\n self.workers = [\n multiprocessing.Process(\n target=_worker_loop,\n args=(self.dataset, self.index_queue, self.data_queue, self.collate_fn))\n for _ in range(self.num_workers)]\n\n for w in self.workers:\n w.daemon = True # ensure that the worker exits on process exit\n w.start()\n\n if self.pin_memory:\n in_data = self.data_queue\n self.data_queue = queue.Queue()\n self.pin_thread = threading.Thread(\n target=_pin_memory_loop,\n args=(in_data, self.data_queue, self.done_event))\n self.pin_thread.daemon = True\n self.pin_thread.start()\n\n # prime the prefetch loop\n for _ in range(2 * self.num_workers):\n self._put_indices()\n\n def __len__(self):\n return len(self.batch_sampler)\n\n def __next__(self):\n if self.num_workers == 0: # same-process loading\n indices = next(self.sample_iter) # may raise StopIteration\n batch = self.collate_fn([self.dataset[i] for i in indices])\n if self.pin_memory:\n batch = pin_memory_batch(batch)\n return batch\n\n # check if the next sample has already been generated\n if self.rcvd_idx in self.reorder_dict:\n batch = self.reorder_dict.pop(self.rcvd_idx)\n return self._process_next_batch(batch)\n\n if self.batches_outstanding == 0:\n self._shutdown_workers()\n raise StopIteration\n\n while True:\n assert (not self.shutdown and self.batches_outstanding > 0)\n idx, batch = self.data_queue.get()\n self.batches_outstanding -= 1\n if idx != self.rcvd_idx:\n # store out-of-order samples\n self.reorder_dict[idx] = batch\n continue\n return self._process_next_batch(batch)\n\n next = __next__ # Python 2 compatibility\n\n def __iter__(self):\n return self\n\n def _put_indices(self):\n assert self.batches_outstanding < 2 * self.num_workers\n indices = next(self.sample_iter, None)\n if indices is None:\n return\n self.index_queue.put((self.send_idx, indices))\n self.batches_outstanding += 1\n self.send_idx += 1\n\n def _process_next_batch(self, batch):\n self.rcvd_idx += 1\n self._put_indices()\n if isinstance(batch, ExceptionWrapper):\n raise batch.exc_type(batch.exc_msg)\n return batch\n\n def __getstate__(self):\n # TODO: add limited pickling support for sharing an iterator\n # across multiple threads for HOGWILD.\n # Probably the best way to do this is by moving the sample pushing\n # to a separate thread and then just sharing the data queue\n # but signalling the end is tricky without a non-blocking API\n raise NotImplementedError(\"DataLoaderIterator cannot be pickled\")\n\n def _shutdown_workers(self):\n if not self.shutdown:\n self.shutdown = True\n self.done_event.set()\n for _ in self.workers:\n self.index_queue.put(None)\n\n def __del__(self):\n if self.num_workers > 0:\n self._shutdown_workers()\n\n\nclass DataLoader(object):\n \"\"\"\n Data loader. Combines a dataset and a sampler, and provides\n single- or multi-process iterators over the dataset.\n\n Arguments:\n dataset (Dataset): dataset from which to load the data.\n batch_size (int, optional): how many samples per batch to load\n (default: 1).\n shuffle (bool, optional): set to ``True`` to have the data reshuffled\n at every epoch (default: False).\n sampler (Sampler, optional): defines the strategy to draw samples from\n the dataset. If specified, ``shuffle`` must be False.\n batch_sampler (Sampler, optional): like sampler, but returns a batch of\n indices at a time. Mutually exclusive with batch_size, shuffle,\n sampler, and drop_last.\n num_workers (int, optional): how many subprocesses to use for data\n loading. 0 means that the data will be loaded in the main process\n (default: 0)\n collate_fn (callable, optional): merges a list of samples to form a mini-batch.\n pin_memory (bool, optional): If ``True``, the data loader will copy tensors\n into CUDA pinned memory before returning them.\n drop_last (bool, optional): set to ``True`` to drop the last incomplete batch,\n if the dataset size is not divisible by the batch size. If False and\n the size of dataset is not divisible by the batch size, then the last batch\n will be smaller. (default: False)\n \"\"\"\n\n def __init__(self, dataset, batch_size=1, shuffle=False, sampler=None, batch_sampler=None,\n num_workers=0, collate_fn=default_collate, pin_memory=False, drop_last=False):\n self.dataset = dataset\n self.batch_size = batch_size\n self.num_workers = num_workers\n self.collate_fn = collate_fn\n self.pin_memory = pin_memory\n self.drop_last = drop_last\n\n if batch_sampler is not None:\n if batch_size > 1 or shuffle or sampler is not None or drop_last:\n raise ValueError('batch_sampler is mutually exclusive with '\n 'batch_size, shuffle, sampler, and drop_last')\n\n if sampler is not None and shuffle:\n raise ValueError('sampler is mutually exclusive with shuffle')\n\n if batch_sampler is None:\n if sampler is None:\n if shuffle:\n sampler = RandomSampler(dataset)\n else:\n sampler = SequentialSampler(dataset)\n batch_sampler = BatchSampler(sampler, batch_size, drop_last)\n\n self.sampler = sampler\n self.batch_sampler = batch_sampler\n\n def __iter__(self):\n return DataLoaderIter(self)\n\n def __len__(self):\n return len(self.batch_sampler)\n", "path": "torch/utils/data/dataloader.py"}]}
3,987
479
gh_patches_debug_30030
rasdani/github-patches
git_diff
OCA__server-tools-316
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [8.0][dead_mans_switch_client] Module crashes runbots I'm seeing more and more runbots with :x: because of this module. [This seems the offending line](https://github.com/OCA/server-tools/blob/8.0/dead_mans_switch_client/models/dead_mans_switch_client.py#L54). Any clue on how to fix it? Example runbot: https://runbot.odoo-community.org/runbot/build/3137787 CC @hbrunn. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `dead_mans_switch_client/__openerp__.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # © 2015 Therp BV <http://therp.nl> 3 # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). 4 { 5 "name": "Dead man's switch (client)", 6 "version": "8.0.1.0.0", 7 "author": "Therp BV,Odoo Community Association (OCA)", 8 "license": "AGPL-3", 9 "category": "Monitoring", 10 "summary": "Be notified when customers' odoo instances go down", 11 "depends": [ 12 'base', 13 ], 14 "data": [ 15 "data/ir_actions.xml", 16 "data/ir_cron.xml", 17 ], 18 } 19 ``` Path: `dead_mans_switch_client/models/dead_mans_switch_client.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # © 2015 Therp BV <http://therp.nl> 3 # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). 4 import json 5 import logging 6 import os 7 try: 8 import psutil 9 except ImportError: 10 psutil = None 11 import urllib2 12 from openerp import api, models 13 14 15 class DeadMansSwitchClient(models.AbstractModel): 16 _name = 'dead.mans.switch.client' 17 _register = True 18 19 @api.model 20 def _get_data(self): 21 ram = 0 22 cpu = 0 23 if psutil: 24 process = psutil.Process(os.getpid()) 25 # psutil changed its api through versions 26 if process.parent: 27 if hasattr(process.parent, '__call__'): 28 process = process.parent() 29 else: 30 process = process.parent 31 if hasattr(process, 'memory_percent'): 32 ram = process.memory_percent() 33 if hasattr(process, 'cpu_percent'): 34 cpu = process.cpu_percent() 35 user_count = 0 36 if 'im_chat.presence' in self.env.registry: 37 user_count = len(self.env['im_chat.presence'].search([ 38 ('status', '!=', 'offline'), 39 ])) 40 return { 41 'database_uuid': self.env['ir.config_parameter'].get_param( 42 'database.uuid'), 43 'cpu': cpu, 44 'ram': ram, 45 'user_count': user_count, 46 } 47 48 @api.model 49 def alive(self): 50 url = self.env['ir.config_parameter'].get_param( 51 'dead_mans_switch_client.url') 52 logger = logging.getLogger(__name__) 53 if not url: 54 logger.error('No server configured!') 55 return 56 data = self._get_data() 57 logger.debug('sending %s', data) 58 urllib2.urlopen( 59 urllib2.Request( 60 url, 61 json.dumps({ 62 'jsonrpc': '2.0', 63 'method': 'call', 64 'params': data, 65 }), 66 { 67 'Content-Type': 'application/json', 68 })) 69 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/dead_mans_switch_client/__openerp__.py b/dead_mans_switch_client/__openerp__.py --- a/dead_mans_switch_client/__openerp__.py +++ b/dead_mans_switch_client/__openerp__.py @@ -3,7 +3,7 @@ # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). { "name": "Dead man's switch (client)", - "version": "8.0.1.0.0", + "version": "8.0.1.0.1", "author": "Therp BV,Odoo Community Association (OCA)", "license": "AGPL-3", "category": "Monitoring", @@ -15,4 +15,7 @@ "data/ir_actions.xml", "data/ir_cron.xml", ], + "demo": [ + "demo/dead_mans_switch_client_demo.yml", + ], } diff --git a/dead_mans_switch_client/models/dead_mans_switch_client.py b/dead_mans_switch_client/models/dead_mans_switch_client.py --- a/dead_mans_switch_client/models/dead_mans_switch_client.py +++ b/dead_mans_switch_client/models/dead_mans_switch_client.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # © 2015 Therp BV <http://therp.nl> +# © 2015 Grupo ESOC Ingeniería de Servicios, S.L.U. - Jairo Llopis # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). import json import logging @@ -66,3 +67,19 @@ { 'Content-Type': 'application/json', })) + + @api.model + def _install_default_url(self): + """Set up a default URL.""" + conf = self.env["ir.config_parameter"] + name = "dead_mans_switch_client.url" + param = conf.get_param(name) + + if not param: + url = "{}/dead_mans_switch/alive".format( + conf.get_param( + "report.url", + conf.get_param( + "web.base.url", + "http://localhost"))) + conf.set_param(name, url)
{"golden_diff": "diff --git a/dead_mans_switch_client/__openerp__.py b/dead_mans_switch_client/__openerp__.py\n--- a/dead_mans_switch_client/__openerp__.py\n+++ b/dead_mans_switch_client/__openerp__.py\n@@ -3,7 +3,7 @@\n # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).\n {\n \"name\": \"Dead man's switch (client)\",\n- \"version\": \"8.0.1.0.0\",\n+ \"version\": \"8.0.1.0.1\",\n \"author\": \"Therp BV,Odoo Community Association (OCA)\",\n \"license\": \"AGPL-3\",\n \"category\": \"Monitoring\",\n@@ -15,4 +15,7 @@\n \"data/ir_actions.xml\",\n \"data/ir_cron.xml\",\n ],\n+ \"demo\": [\n+ \"demo/dead_mans_switch_client_demo.yml\",\n+ ],\n }\ndiff --git a/dead_mans_switch_client/models/dead_mans_switch_client.py b/dead_mans_switch_client/models/dead_mans_switch_client.py\n--- a/dead_mans_switch_client/models/dead_mans_switch_client.py\n+++ b/dead_mans_switch_client/models/dead_mans_switch_client.py\n@@ -1,5 +1,6 @@\n # -*- coding: utf-8 -*-\n # \u00a9 2015 Therp BV <http://therp.nl>\n+# \u00a9 2015 Grupo ESOC Ingenier\u00eda de Servicios, S.L.U. - Jairo Llopis\n # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).\n import json\n import logging\n@@ -66,3 +67,19 @@\n {\n 'Content-Type': 'application/json',\n }))\n+\n+ @api.model\n+ def _install_default_url(self):\n+ \"\"\"Set up a default URL.\"\"\"\n+ conf = self.env[\"ir.config_parameter\"]\n+ name = \"dead_mans_switch_client.url\"\n+ param = conf.get_param(name)\n+\n+ if not param:\n+ url = \"{}/dead_mans_switch/alive\".format(\n+ conf.get_param(\n+ \"report.url\",\n+ conf.get_param(\n+ \"web.base.url\",\n+ \"http://localhost\")))\n+ conf.set_param(name, url)\n", "issue": "[8.0][dead_mans_switch_client] Module crashes runbots\nI'm seeing more and more runbots with :x: because of this module. [This seems the offending line](https://github.com/OCA/server-tools/blob/8.0/dead_mans_switch_client/models/dead_mans_switch_client.py#L54). Any clue on how to fix it?\n\nExample runbot: https://runbot.odoo-community.org/runbot/build/3137787\n\nCC @hbrunn.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# \u00a9 2015 Therp BV <http://therp.nl>\n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).\n{\n \"name\": \"Dead man's switch (client)\",\n \"version\": \"8.0.1.0.0\",\n \"author\": \"Therp BV,Odoo Community Association (OCA)\",\n \"license\": \"AGPL-3\",\n \"category\": \"Monitoring\",\n \"summary\": \"Be notified when customers' odoo instances go down\",\n \"depends\": [\n 'base',\n ],\n \"data\": [\n \"data/ir_actions.xml\",\n \"data/ir_cron.xml\",\n ],\n}\n", "path": "dead_mans_switch_client/__openerp__.py"}, {"content": "# -*- coding: utf-8 -*-\n# \u00a9 2015 Therp BV <http://therp.nl>\n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).\nimport json\nimport logging\nimport os\ntry:\n import psutil\nexcept ImportError:\n psutil = None\nimport urllib2\nfrom openerp import api, models\n\n\nclass DeadMansSwitchClient(models.AbstractModel):\n _name = 'dead.mans.switch.client'\n _register = True\n\n @api.model\n def _get_data(self):\n ram = 0\n cpu = 0\n if psutil:\n process = psutil.Process(os.getpid())\n # psutil changed its api through versions\n if process.parent:\n if hasattr(process.parent, '__call__'):\n process = process.parent()\n else:\n process = process.parent\n if hasattr(process, 'memory_percent'):\n ram = process.memory_percent()\n if hasattr(process, 'cpu_percent'):\n cpu = process.cpu_percent()\n user_count = 0\n if 'im_chat.presence' in self.env.registry:\n user_count = len(self.env['im_chat.presence'].search([\n ('status', '!=', 'offline'),\n ]))\n return {\n 'database_uuid': self.env['ir.config_parameter'].get_param(\n 'database.uuid'),\n 'cpu': cpu,\n 'ram': ram,\n 'user_count': user_count,\n }\n\n @api.model\n def alive(self):\n url = self.env['ir.config_parameter'].get_param(\n 'dead_mans_switch_client.url')\n logger = logging.getLogger(__name__)\n if not url:\n logger.error('No server configured!')\n return\n data = self._get_data()\n logger.debug('sending %s', data)\n urllib2.urlopen(\n urllib2.Request(\n url,\n json.dumps({\n 'jsonrpc': '2.0',\n 'method': 'call',\n 'params': data,\n }),\n {\n 'Content-Type': 'application/json',\n }))\n", "path": "dead_mans_switch_client/models/dead_mans_switch_client.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# \u00a9 2015 Therp BV <http://therp.nl>\n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).\n{\n \"name\": \"Dead man's switch (client)\",\n \"version\": \"8.0.1.0.1\",\n \"author\": \"Therp BV,Odoo Community Association (OCA)\",\n \"license\": \"AGPL-3\",\n \"category\": \"Monitoring\",\n \"summary\": \"Be notified when customers' odoo instances go down\",\n \"depends\": [\n 'base',\n ],\n \"data\": [\n \"data/ir_actions.xml\",\n \"data/ir_cron.xml\",\n ],\n \"demo\": [\n \"demo/dead_mans_switch_client_demo.yml\",\n ],\n}\n", "path": "dead_mans_switch_client/__openerp__.py"}, {"content": "# -*- coding: utf-8 -*-\n# \u00a9 2015 Therp BV <http://therp.nl>\n# \u00a9 2015 Grupo ESOC Ingenier\u00eda de Servicios, S.L.U. - Jairo Llopis\n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).\nimport json\nimport logging\nimport os\ntry:\n import psutil\nexcept ImportError:\n psutil = None\nimport urllib2\nfrom openerp import api, models\n\n\nclass DeadMansSwitchClient(models.AbstractModel):\n _name = 'dead.mans.switch.client'\n _register = True\n\n @api.model\n def _get_data(self):\n ram = 0\n cpu = 0\n if psutil:\n process = psutil.Process(os.getpid())\n # psutil changed its api through versions\n if process.parent:\n if hasattr(process.parent, '__call__'):\n process = process.parent()\n else:\n process = process.parent\n if hasattr(process, 'memory_percent'):\n ram = process.memory_percent()\n if hasattr(process, 'cpu_percent'):\n cpu = process.cpu_percent()\n user_count = 0\n if 'im_chat.presence' in self.env.registry:\n user_count = len(self.env['im_chat.presence'].search([\n ('status', '!=', 'offline'),\n ]))\n return {\n 'database_uuid': self.env['ir.config_parameter'].get_param(\n 'database.uuid'),\n 'cpu': cpu,\n 'ram': ram,\n 'user_count': user_count,\n }\n\n @api.model\n def alive(self):\n url = self.env['ir.config_parameter'].get_param(\n 'dead_mans_switch_client.url')\n logger = logging.getLogger(__name__)\n if not url:\n logger.error('No server configured!')\n return\n data = self._get_data()\n logger.debug('sending %s', data)\n urllib2.urlopen(\n urllib2.Request(\n url,\n json.dumps({\n 'jsonrpc': '2.0',\n 'method': 'call',\n 'params': data,\n }),\n {\n 'Content-Type': 'application/json',\n }))\n\n @api.model\n def _install_default_url(self):\n \"\"\"Set up a default URL.\"\"\"\n conf = self.env[\"ir.config_parameter\"]\n name = \"dead_mans_switch_client.url\"\n param = conf.get_param(name)\n\n if not param:\n url = \"{}/dead_mans_switch/alive\".format(\n conf.get_param(\n \"report.url\",\n conf.get_param(\n \"web.base.url\",\n \"http://localhost\")))\n conf.set_param(name, url)\n", "path": "dead_mans_switch_client/models/dead_mans_switch_client.py"}]}
1,164
528
gh_patches_debug_39733
rasdani/github-patches
git_diff
getsentry__sentry-python-674
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- aiohttp integration ability to use contextvars in logger Hi! I was looking to add a custom [logging filter](https://docs.python.org/3/library/logging.html#logging.Filter) to my aiohttp server during exception that are catched [here](https://github.com/aio-libs/aiohttp/blob/16a49c143fc0abab75163fb78738fff3d3e17f49/aiohttp/web_protocol.py#L387). It's useful and easy to do, but, it occurs that, in my custom logging filter, I can't find [contextvars](https://docs.python.org/3/library/contextvars.html) that I have created in my request handler task. Two things caused this problem: - [aiohttp handled requests in a sub-Task, and catched exceptions in the parent Task](https://github.com/aio-libs/aiohttp/blob/6a5ab96bd9cb404b4abfd5160fe8f34a29d941e5/aiohttp/web_protocol.py#L415-L416). → This was fixed in https://github.com/aio-libs/aiohttp/commit/9997cae (because users asked to be able to access `contextvars` -- like us). It was even [backported to aiohttp version 3.7](https://github.com/aio-libs/aiohttp/commit/29eccad84e8200b5c90856c8732da0fdbbcef904). - [Sentry-aiohttp integration handles requests in a sub-Task too](https://github.com/getsentry/sentry-python/blob/cd646579d04e2fad6a8994304314ac52fec2f83c/sentry_sdk/integrations/aiohttp.py#L113). Python documentation on Tasks [here](https://docs.python.org/3/library/asyncio-task.html#asyncio.Task). One important thing is that they reset *contextvars*. To summarize: ``` aiohttp logging exception + ^ | | | asyncio.create_task(handle_request()) | contextvars didn't go up again | | (it's fixed now) | | v | Sentry | + | | | | asyncio.create_task(handle_request()) | contextvars don't go up | | v | I set contextvars +---------------------+ Exception ``` As long as the issue is not fixed in Sentry, I still can't use `contextvars` to log custom data using the standard Python `logging` library. The only solution is to disable Sentry, then logging works OK with contextvars. Any idea how to fix this in Sentry-aiohttp code? I'd be happy to open a PR, but I'm not familiar enough with Sentry code, or Python in general, thus I need some help at least. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sentry_sdk/integrations/aiohttp.py` Content: ``` 1 import sys 2 import weakref 3 4 from sentry_sdk._compat import reraise 5 from sentry_sdk.hub import Hub 6 from sentry_sdk.integrations import Integration, DidNotEnable 7 from sentry_sdk.integrations.logging import ignore_logger 8 from sentry_sdk.integrations._wsgi_common import ( 9 _filter_headers, 10 request_body_within_bounds, 11 ) 12 from sentry_sdk.tracing import Span 13 from sentry_sdk.utils import ( 14 capture_internal_exceptions, 15 event_from_exception, 16 transaction_from_function, 17 HAS_REAL_CONTEXTVARS, 18 AnnotatedValue, 19 ) 20 21 try: 22 import asyncio 23 24 from aiohttp import __version__ as AIOHTTP_VERSION 25 from aiohttp.web import Application, HTTPException, UrlDispatcher 26 except ImportError: 27 raise DidNotEnable("AIOHTTP not installed") 28 29 from sentry_sdk._types import MYPY 30 31 if MYPY: 32 from aiohttp.web_request import Request 33 from aiohttp.abc import AbstractMatchInfo 34 from typing import Any 35 from typing import Dict 36 from typing import Optional 37 from typing import Tuple 38 from typing import Callable 39 from typing import Union 40 41 from sentry_sdk.utils import ExcInfo 42 from sentry_sdk._types import EventProcessor 43 44 45 class AioHttpIntegration(Integration): 46 identifier = "aiohttp" 47 48 @staticmethod 49 def setup_once(): 50 # type: () -> None 51 52 try: 53 version = tuple(map(int, AIOHTTP_VERSION.split("."))) 54 except (TypeError, ValueError): 55 raise DidNotEnable("AIOHTTP version unparseable: {}".format(version)) 56 57 if version < (3, 4): 58 raise DidNotEnable("AIOHTTP 3.4 or newer required.") 59 60 if not HAS_REAL_CONTEXTVARS: 61 # We better have contextvars or we're going to leak state between 62 # requests. 63 raise RuntimeError( 64 "The aiohttp integration for Sentry requires Python 3.7+ " 65 " or aiocontextvars package" 66 ) 67 68 ignore_logger("aiohttp.server") 69 70 old_handle = Application._handle 71 72 async def sentry_app_handle(self, request, *args, **kwargs): 73 # type: (Any, Request, *Any, **Any) -> Any 74 async def inner(): 75 # type: () -> Any 76 hub = Hub.current 77 if hub.get_integration(AioHttpIntegration) is None: 78 return await old_handle(self, request, *args, **kwargs) 79 80 weak_request = weakref.ref(request) 81 82 with Hub(Hub.current) as hub: 83 with hub.configure_scope() as scope: 84 scope.clear_breadcrumbs() 85 scope.add_event_processor(_make_request_processor(weak_request)) 86 87 span = Span.continue_from_headers(request.headers) 88 span.op = "http.server" 89 # If this transaction name makes it to the UI, AIOHTTP's 90 # URL resolver did not find a route or died trying. 91 span.transaction = "generic AIOHTTP request" 92 93 with hub.start_span(span): 94 try: 95 response = await old_handle(self, request) 96 except HTTPException as e: 97 span.set_http_status(e.status_code) 98 raise 99 except asyncio.CancelledError: 100 span.set_status("cancelled") 101 raise 102 except Exception: 103 # This will probably map to a 500 but seems like we 104 # have no way to tell. Do not set span status. 105 reraise(*_capture_exception(hub)) 106 107 span.set_http_status(response.status) 108 return response 109 110 # Explicitly wrap in task such that current contextvar context is 111 # copied. Just doing `return await inner()` will leak scope data 112 # between requests. 113 return await asyncio.get_event_loop().create_task(inner()) 114 115 Application._handle = sentry_app_handle 116 117 old_urldispatcher_resolve = UrlDispatcher.resolve 118 119 async def sentry_urldispatcher_resolve(self, request): 120 # type: (UrlDispatcher, Request) -> AbstractMatchInfo 121 rv = await old_urldispatcher_resolve(self, request) 122 123 name = None 124 125 try: 126 name = transaction_from_function(rv.handler) 127 except Exception: 128 pass 129 130 if name is not None: 131 with Hub.current.configure_scope() as scope: 132 scope.transaction = name 133 134 return rv 135 136 UrlDispatcher.resolve = sentry_urldispatcher_resolve 137 138 139 def _make_request_processor(weak_request): 140 # type: (Callable[[], Request]) -> EventProcessor 141 def aiohttp_processor( 142 event, # type: Dict[str, Any] 143 hint, # type: Dict[str, Tuple[type, BaseException, Any]] 144 ): 145 # type: (...) -> Dict[str, Any] 146 request = weak_request() 147 if request is None: 148 return event 149 150 with capture_internal_exceptions(): 151 request_info = event.setdefault("request", {}) 152 153 request_info["url"] = "%s://%s%s" % ( 154 request.scheme, 155 request.host, 156 request.path, 157 ) 158 159 request_info["query_string"] = request.query_string 160 request_info["method"] = request.method 161 request_info["env"] = {"REMOTE_ADDR": request.remote} 162 163 hub = Hub.current 164 request_info["headers"] = _filter_headers(dict(request.headers)) 165 166 # Just attach raw data here if it is within bounds, if available. 167 # Unfortunately there's no way to get structured data from aiohttp 168 # without awaiting on some coroutine. 169 request_info["data"] = get_aiohttp_request_data(hub, request) 170 171 return event 172 173 return aiohttp_processor 174 175 176 def _capture_exception(hub): 177 # type: (Hub) -> ExcInfo 178 exc_info = sys.exc_info() 179 event, hint = event_from_exception( 180 exc_info, 181 client_options=hub.client.options, # type: ignore 182 mechanism={"type": "aiohttp", "handled": False}, 183 ) 184 hub.capture_event(event, hint=hint) 185 return exc_info 186 187 188 BODY_NOT_READ_MESSAGE = "[Can't show request body due to implementation details.]" 189 190 191 def get_aiohttp_request_data(hub, request): 192 # type: (Hub, Request) -> Union[Optional[str], AnnotatedValue] 193 bytes_body = request._read_bytes 194 195 if bytes_body is not None: 196 # we have body to show 197 if not request_body_within_bounds(hub.client, len(bytes_body)): 198 199 return AnnotatedValue( 200 "", 201 {"rem": [["!config", "x", 0, len(bytes_body)]], "len": len(bytes_body)}, 202 ) 203 encoding = request.charset or "utf-8" 204 return bytes_body.decode(encoding, "replace") 205 206 if request.can_read_body: 207 # body exists but we can't show it 208 return BODY_NOT_READ_MESSAGE 209 210 # request has no body 211 return None 212 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sentry_sdk/integrations/aiohttp.py b/sentry_sdk/integrations/aiohttp.py --- a/sentry_sdk/integrations/aiohttp.py +++ b/sentry_sdk/integrations/aiohttp.py @@ -71,46 +71,41 @@ async def sentry_app_handle(self, request, *args, **kwargs): # type: (Any, Request, *Any, **Any) -> Any - async def inner(): - # type: () -> Any - hub = Hub.current - if hub.get_integration(AioHttpIntegration) is None: - return await old_handle(self, request, *args, **kwargs) - - weak_request = weakref.ref(request) - - with Hub(Hub.current) as hub: - with hub.configure_scope() as scope: - scope.clear_breadcrumbs() - scope.add_event_processor(_make_request_processor(weak_request)) - - span = Span.continue_from_headers(request.headers) - span.op = "http.server" - # If this transaction name makes it to the UI, AIOHTTP's - # URL resolver did not find a route or died trying. - span.transaction = "generic AIOHTTP request" - - with hub.start_span(span): - try: - response = await old_handle(self, request) - except HTTPException as e: - span.set_http_status(e.status_code) - raise - except asyncio.CancelledError: - span.set_status("cancelled") - raise - except Exception: - # This will probably map to a 500 but seems like we - # have no way to tell. Do not set span status. - reraise(*_capture_exception(hub)) - - span.set_http_status(response.status) - return response - - # Explicitly wrap in task such that current contextvar context is - # copied. Just doing `return await inner()` will leak scope data - # between requests. - return await asyncio.get_event_loop().create_task(inner()) + hub = Hub.current + if hub.get_integration(AioHttpIntegration) is None: + return await old_handle(self, request, *args, **kwargs) + + weak_request = weakref.ref(request) + + with Hub(Hub.current) as hub: + # Scope data will not leak between requests because aiohttp + # create a task to wrap each request. + with hub.configure_scope() as scope: + scope.clear_breadcrumbs() + scope.add_event_processor(_make_request_processor(weak_request)) + + span = Span.continue_from_headers(request.headers) + span.op = "http.server" + # If this transaction name makes it to the UI, AIOHTTP's + # URL resolver did not find a route or died trying. + span.transaction = "generic AIOHTTP request" + + with hub.start_span(span): + try: + response = await old_handle(self, request) + except HTTPException as e: + span.set_http_status(e.status_code) + raise + except asyncio.CancelledError: + span.set_status("cancelled") + raise + except Exception: + # This will probably map to a 500 but seems like we + # have no way to tell. Do not set span status. + reraise(*_capture_exception(hub)) + + span.set_http_status(response.status) + return response Application._handle = sentry_app_handle
{"golden_diff": "diff --git a/sentry_sdk/integrations/aiohttp.py b/sentry_sdk/integrations/aiohttp.py\n--- a/sentry_sdk/integrations/aiohttp.py\n+++ b/sentry_sdk/integrations/aiohttp.py\n@@ -71,46 +71,41 @@\n \n async def sentry_app_handle(self, request, *args, **kwargs):\n # type: (Any, Request, *Any, **Any) -> Any\n- async def inner():\n- # type: () -> Any\n- hub = Hub.current\n- if hub.get_integration(AioHttpIntegration) is None:\n- return await old_handle(self, request, *args, **kwargs)\n-\n- weak_request = weakref.ref(request)\n-\n- with Hub(Hub.current) as hub:\n- with hub.configure_scope() as scope:\n- scope.clear_breadcrumbs()\n- scope.add_event_processor(_make_request_processor(weak_request))\n-\n- span = Span.continue_from_headers(request.headers)\n- span.op = \"http.server\"\n- # If this transaction name makes it to the UI, AIOHTTP's\n- # URL resolver did not find a route or died trying.\n- span.transaction = \"generic AIOHTTP request\"\n-\n- with hub.start_span(span):\n- try:\n- response = await old_handle(self, request)\n- except HTTPException as e:\n- span.set_http_status(e.status_code)\n- raise\n- except asyncio.CancelledError:\n- span.set_status(\"cancelled\")\n- raise\n- except Exception:\n- # This will probably map to a 500 but seems like we\n- # have no way to tell. Do not set span status.\n- reraise(*_capture_exception(hub))\n-\n- span.set_http_status(response.status)\n- return response\n-\n- # Explicitly wrap in task such that current contextvar context is\n- # copied. Just doing `return await inner()` will leak scope data\n- # between requests.\n- return await asyncio.get_event_loop().create_task(inner())\n+ hub = Hub.current\n+ if hub.get_integration(AioHttpIntegration) is None:\n+ return await old_handle(self, request, *args, **kwargs)\n+\n+ weak_request = weakref.ref(request)\n+\n+ with Hub(Hub.current) as hub:\n+ # Scope data will not leak between requests because aiohttp\n+ # create a task to wrap each request.\n+ with hub.configure_scope() as scope:\n+ scope.clear_breadcrumbs()\n+ scope.add_event_processor(_make_request_processor(weak_request))\n+\n+ span = Span.continue_from_headers(request.headers)\n+ span.op = \"http.server\"\n+ # If this transaction name makes it to the UI, AIOHTTP's\n+ # URL resolver did not find a route or died trying.\n+ span.transaction = \"generic AIOHTTP request\"\n+\n+ with hub.start_span(span):\n+ try:\n+ response = await old_handle(self, request)\n+ except HTTPException as e:\n+ span.set_http_status(e.status_code)\n+ raise\n+ except asyncio.CancelledError:\n+ span.set_status(\"cancelled\")\n+ raise\n+ except Exception:\n+ # This will probably map to a 500 but seems like we\n+ # have no way to tell. Do not set span status.\n+ reraise(*_capture_exception(hub))\n+\n+ span.set_http_status(response.status)\n+ return response\n \n Application._handle = sentry_app_handle\n", "issue": "aiohttp integration ability to use contextvars in logger \nHi!\r\n\r\nI was looking to add a custom [logging filter](https://docs.python.org/3/library/logging.html#logging.Filter) to my aiohttp server during exception that are catched [here](https://github.com/aio-libs/aiohttp/blob/16a49c143fc0abab75163fb78738fff3d3e17f49/aiohttp/web_protocol.py#L387).\r\n\r\nIt's useful and easy to do, but, it occurs that, in my custom logging filter, I can't find [contextvars](https://docs.python.org/3/library/contextvars.html) that I have created in my request handler task.\r\n\r\nTwo things caused this problem:\r\n- [aiohttp handled requests in a sub-Task, and catched exceptions in the parent Task](https://github.com/aio-libs/aiohttp/blob/6a5ab96bd9cb404b4abfd5160fe8f34a29d941e5/aiohttp/web_protocol.py#L415-L416).\r\n \u2192 This was fixed in https://github.com/aio-libs/aiohttp/commit/9997cae (because users asked to be able to access `contextvars` -- like us). It was even [backported to aiohttp version 3.7](https://github.com/aio-libs/aiohttp/commit/29eccad84e8200b5c90856c8732da0fdbbcef904).\r\n- [Sentry-aiohttp integration handles requests in a sub-Task too](https://github.com/getsentry/sentry-python/blob/cd646579d04e2fad6a8994304314ac52fec2f83c/sentry_sdk/integrations/aiohttp.py#L113).\r\n\r\nPython documentation on Tasks [here](https://docs.python.org/3/library/asyncio-task.html#asyncio.Task). One important thing is that they reset *contextvars*.\r\n\r\nTo summarize:\r\n```\r\n aiohttp logging exception\r\n + ^\r\n | |\r\n | asyncio.create_task(handle_request()) | contextvars didn't go up again\r\n | | (it's fixed now)\r\n | |\r\n v |\r\n Sentry |\r\n + |\r\n | |\r\n | asyncio.create_task(handle_request()) | contextvars don't go up\r\n | |\r\n v |\r\nI set contextvars +---------------------+\r\n\r\n Exception\r\n```\r\n\r\nAs long as the issue is not fixed in Sentry, I still can't use `contextvars` to log custom data using the standard Python `logging` library.\r\nThe only solution is to disable Sentry, then logging works OK with contextvars.\r\n\r\nAny idea how to fix this in Sentry-aiohttp code?\r\nI'd be happy to open a PR, but I'm not familiar enough with Sentry code, or Python in general, thus I need some help at least.\n", "before_files": [{"content": "import sys\nimport weakref\n\nfrom sentry_sdk._compat import reraise\nfrom sentry_sdk.hub import Hub\nfrom sentry_sdk.integrations import Integration, DidNotEnable\nfrom sentry_sdk.integrations.logging import ignore_logger\nfrom sentry_sdk.integrations._wsgi_common import (\n _filter_headers,\n request_body_within_bounds,\n)\nfrom sentry_sdk.tracing import Span\nfrom sentry_sdk.utils import (\n capture_internal_exceptions,\n event_from_exception,\n transaction_from_function,\n HAS_REAL_CONTEXTVARS,\n AnnotatedValue,\n)\n\ntry:\n import asyncio\n\n from aiohttp import __version__ as AIOHTTP_VERSION\n from aiohttp.web import Application, HTTPException, UrlDispatcher\nexcept ImportError:\n raise DidNotEnable(\"AIOHTTP not installed\")\n\nfrom sentry_sdk._types import MYPY\n\nif MYPY:\n from aiohttp.web_request import Request\n from aiohttp.abc import AbstractMatchInfo\n from typing import Any\n from typing import Dict\n from typing import Optional\n from typing import Tuple\n from typing import Callable\n from typing import Union\n\n from sentry_sdk.utils import ExcInfo\n from sentry_sdk._types import EventProcessor\n\n\nclass AioHttpIntegration(Integration):\n identifier = \"aiohttp\"\n\n @staticmethod\n def setup_once():\n # type: () -> None\n\n try:\n version = tuple(map(int, AIOHTTP_VERSION.split(\".\")))\n except (TypeError, ValueError):\n raise DidNotEnable(\"AIOHTTP version unparseable: {}\".format(version))\n\n if version < (3, 4):\n raise DidNotEnable(\"AIOHTTP 3.4 or newer required.\")\n\n if not HAS_REAL_CONTEXTVARS:\n # We better have contextvars or we're going to leak state between\n # requests.\n raise RuntimeError(\n \"The aiohttp integration for Sentry requires Python 3.7+ \"\n \" or aiocontextvars package\"\n )\n\n ignore_logger(\"aiohttp.server\")\n\n old_handle = Application._handle\n\n async def sentry_app_handle(self, request, *args, **kwargs):\n # type: (Any, Request, *Any, **Any) -> Any\n async def inner():\n # type: () -> Any\n hub = Hub.current\n if hub.get_integration(AioHttpIntegration) is None:\n return await old_handle(self, request, *args, **kwargs)\n\n weak_request = weakref.ref(request)\n\n with Hub(Hub.current) as hub:\n with hub.configure_scope() as scope:\n scope.clear_breadcrumbs()\n scope.add_event_processor(_make_request_processor(weak_request))\n\n span = Span.continue_from_headers(request.headers)\n span.op = \"http.server\"\n # If this transaction name makes it to the UI, AIOHTTP's\n # URL resolver did not find a route or died trying.\n span.transaction = \"generic AIOHTTP request\"\n\n with hub.start_span(span):\n try:\n response = await old_handle(self, request)\n except HTTPException as e:\n span.set_http_status(e.status_code)\n raise\n except asyncio.CancelledError:\n span.set_status(\"cancelled\")\n raise\n except Exception:\n # This will probably map to a 500 but seems like we\n # have no way to tell. Do not set span status.\n reraise(*_capture_exception(hub))\n\n span.set_http_status(response.status)\n return response\n\n # Explicitly wrap in task such that current contextvar context is\n # copied. Just doing `return await inner()` will leak scope data\n # between requests.\n return await asyncio.get_event_loop().create_task(inner())\n\n Application._handle = sentry_app_handle\n\n old_urldispatcher_resolve = UrlDispatcher.resolve\n\n async def sentry_urldispatcher_resolve(self, request):\n # type: (UrlDispatcher, Request) -> AbstractMatchInfo\n rv = await old_urldispatcher_resolve(self, request)\n\n name = None\n\n try:\n name = transaction_from_function(rv.handler)\n except Exception:\n pass\n\n if name is not None:\n with Hub.current.configure_scope() as scope:\n scope.transaction = name\n\n return rv\n\n UrlDispatcher.resolve = sentry_urldispatcher_resolve\n\n\ndef _make_request_processor(weak_request):\n # type: (Callable[[], Request]) -> EventProcessor\n def aiohttp_processor(\n event, # type: Dict[str, Any]\n hint, # type: Dict[str, Tuple[type, BaseException, Any]]\n ):\n # type: (...) -> Dict[str, Any]\n request = weak_request()\n if request is None:\n return event\n\n with capture_internal_exceptions():\n request_info = event.setdefault(\"request\", {})\n\n request_info[\"url\"] = \"%s://%s%s\" % (\n request.scheme,\n request.host,\n request.path,\n )\n\n request_info[\"query_string\"] = request.query_string\n request_info[\"method\"] = request.method\n request_info[\"env\"] = {\"REMOTE_ADDR\": request.remote}\n\n hub = Hub.current\n request_info[\"headers\"] = _filter_headers(dict(request.headers))\n\n # Just attach raw data here if it is within bounds, if available.\n # Unfortunately there's no way to get structured data from aiohttp\n # without awaiting on some coroutine.\n request_info[\"data\"] = get_aiohttp_request_data(hub, request)\n\n return event\n\n return aiohttp_processor\n\n\ndef _capture_exception(hub):\n # type: (Hub) -> ExcInfo\n exc_info = sys.exc_info()\n event, hint = event_from_exception(\n exc_info,\n client_options=hub.client.options, # type: ignore\n mechanism={\"type\": \"aiohttp\", \"handled\": False},\n )\n hub.capture_event(event, hint=hint)\n return exc_info\n\n\nBODY_NOT_READ_MESSAGE = \"[Can't show request body due to implementation details.]\"\n\n\ndef get_aiohttp_request_data(hub, request):\n # type: (Hub, Request) -> Union[Optional[str], AnnotatedValue]\n bytes_body = request._read_bytes\n\n if bytes_body is not None:\n # we have body to show\n if not request_body_within_bounds(hub.client, len(bytes_body)):\n\n return AnnotatedValue(\n \"\",\n {\"rem\": [[\"!config\", \"x\", 0, len(bytes_body)]], \"len\": len(bytes_body)},\n )\n encoding = request.charset or \"utf-8\"\n return bytes_body.decode(encoding, \"replace\")\n\n if request.can_read_body:\n # body exists but we can't show it\n return BODY_NOT_READ_MESSAGE\n\n # request has no body\n return None\n", "path": "sentry_sdk/integrations/aiohttp.py"}], "after_files": [{"content": "import sys\nimport weakref\n\nfrom sentry_sdk._compat import reraise\nfrom sentry_sdk.hub import Hub\nfrom sentry_sdk.integrations import Integration, DidNotEnable\nfrom sentry_sdk.integrations.logging import ignore_logger\nfrom sentry_sdk.integrations._wsgi_common import (\n _filter_headers,\n request_body_within_bounds,\n)\nfrom sentry_sdk.tracing import Span\nfrom sentry_sdk.utils import (\n capture_internal_exceptions,\n event_from_exception,\n transaction_from_function,\n HAS_REAL_CONTEXTVARS,\n AnnotatedValue,\n)\n\ntry:\n import asyncio\n\n from aiohttp import __version__ as AIOHTTP_VERSION\n from aiohttp.web import Application, HTTPException, UrlDispatcher\nexcept ImportError:\n raise DidNotEnable(\"AIOHTTP not installed\")\n\nfrom sentry_sdk._types import MYPY\n\nif MYPY:\n from aiohttp.web_request import Request\n from aiohttp.abc import AbstractMatchInfo\n from typing import Any\n from typing import Dict\n from typing import Optional\n from typing import Tuple\n from typing import Callable\n from typing import Union\n\n from sentry_sdk.utils import ExcInfo\n from sentry_sdk._types import EventProcessor\n\n\nclass AioHttpIntegration(Integration):\n identifier = \"aiohttp\"\n\n @staticmethod\n def setup_once():\n # type: () -> None\n\n try:\n version = tuple(map(int, AIOHTTP_VERSION.split(\".\")))\n except (TypeError, ValueError):\n raise DidNotEnable(\"AIOHTTP version unparseable: {}\".format(version))\n\n if version < (3, 4):\n raise DidNotEnable(\"AIOHTTP 3.4 or newer required.\")\n\n if not HAS_REAL_CONTEXTVARS:\n # We better have contextvars or we're going to leak state between\n # requests.\n raise RuntimeError(\n \"The aiohttp integration for Sentry requires Python 3.7+ \"\n \" or aiocontextvars package\"\n )\n\n ignore_logger(\"aiohttp.server\")\n\n old_handle = Application._handle\n\n async def sentry_app_handle(self, request, *args, **kwargs):\n # type: (Any, Request, *Any, **Any) -> Any\n hub = Hub.current\n if hub.get_integration(AioHttpIntegration) is None:\n return await old_handle(self, request, *args, **kwargs)\n\n weak_request = weakref.ref(request)\n\n with Hub(Hub.current) as hub:\n # Scope data will not leak between requests because aiohttp\n # create a task to wrap each request.\n with hub.configure_scope() as scope:\n scope.clear_breadcrumbs()\n scope.add_event_processor(_make_request_processor(weak_request))\n\n span = Span.continue_from_headers(request.headers)\n span.op = \"http.server\"\n # If this transaction name makes it to the UI, AIOHTTP's\n # URL resolver did not find a route or died trying.\n span.transaction = \"generic AIOHTTP request\"\n\n with hub.start_span(span):\n try:\n response = await old_handle(self, request)\n except HTTPException as e:\n span.set_http_status(e.status_code)\n raise\n except asyncio.CancelledError:\n span.set_status(\"cancelled\")\n raise\n except Exception:\n # This will probably map to a 500 but seems like we\n # have no way to tell. Do not set span status.\n reraise(*_capture_exception(hub))\n\n span.set_http_status(response.status)\n return response\n\n Application._handle = sentry_app_handle\n\n old_urldispatcher_resolve = UrlDispatcher.resolve\n\n async def sentry_urldispatcher_resolve(self, request):\n # type: (UrlDispatcher, Request) -> AbstractMatchInfo\n rv = await old_urldispatcher_resolve(self, request)\n\n name = None\n\n try:\n name = transaction_from_function(rv.handler)\n except Exception:\n pass\n\n if name is not None:\n with Hub.current.configure_scope() as scope:\n scope.transaction = name\n\n return rv\n\n UrlDispatcher.resolve = sentry_urldispatcher_resolve\n\n\ndef _make_request_processor(weak_request):\n # type: (Callable[[], Request]) -> EventProcessor\n def aiohttp_processor(\n event, # type: Dict[str, Any]\n hint, # type: Dict[str, Tuple[type, BaseException, Any]]\n ):\n # type: (...) -> Dict[str, Any]\n request = weak_request()\n if request is None:\n return event\n\n with capture_internal_exceptions():\n request_info = event.setdefault(\"request\", {})\n\n request_info[\"url\"] = \"%s://%s%s\" % (\n request.scheme,\n request.host,\n request.path,\n )\n\n request_info[\"query_string\"] = request.query_string\n request_info[\"method\"] = request.method\n request_info[\"env\"] = {\"REMOTE_ADDR\": request.remote}\n\n hub = Hub.current\n request_info[\"headers\"] = _filter_headers(dict(request.headers))\n\n # Just attach raw data here if it is within bounds, if available.\n # Unfortunately there's no way to get structured data from aiohttp\n # without awaiting on some coroutine.\n request_info[\"data\"] = get_aiohttp_request_data(hub, request)\n\n return event\n\n return aiohttp_processor\n\n\ndef _capture_exception(hub):\n # type: (Hub) -> ExcInfo\n exc_info = sys.exc_info()\n event, hint = event_from_exception(\n exc_info,\n client_options=hub.client.options, # type: ignore\n mechanism={\"type\": \"aiohttp\", \"handled\": False},\n )\n hub.capture_event(event, hint=hint)\n return exc_info\n\n\nBODY_NOT_READ_MESSAGE = \"[Can't show request body due to implementation details.]\"\n\n\ndef get_aiohttp_request_data(hub, request):\n # type: (Hub, Request) -> Union[Optional[str], AnnotatedValue]\n bytes_body = request._read_bytes\n\n if bytes_body is not None:\n # we have body to show\n if not request_body_within_bounds(hub.client, len(bytes_body)):\n\n return AnnotatedValue(\n \"\",\n {\"rem\": [[\"!config\", \"x\", 0, len(bytes_body)]], \"len\": len(bytes_body)},\n )\n encoding = request.charset or \"utf-8\"\n return bytes_body.decode(encoding, \"replace\")\n\n if request.can_read_body:\n # body exists but we can't show it\n return BODY_NOT_READ_MESSAGE\n\n # request has no body\n return None\n", "path": "sentry_sdk/integrations/aiohttp.py"}]}
2,967
781
gh_patches_debug_27865
rasdani/github-patches
git_diff
pulp__pulpcore-2344
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Reduce memory usage of the pipeline Author: @bmbouter (bmbouter) Redmine Issue: 9635, https://pulp.plan.io/issues/9635 --- ## Motivation It would be nice if users could specify a desired maximum amount of RAM to be used during sync. For example, a user can say I only want 1500 MB of RAM to be used max. ## What is already in place The stages pipeline restricts memory usage by only allowing 1000 declarative content objects between each stage (so for 8-9 stages that's 8000-9000 declarative content objects. This happens [here](https://github.com/pulp/pulpcore/blob/main/pulpcore/plugin/stages/api.py#L217). Interestingly the docstring says this defaults to 100, but it seems to actually be 1000! Also the stages perform batching, so they will only taking in a limited number of items (the batch size). That happens [with minsize](https://github.com/pulp/pulpcore/blob/main/pulpcore/plugin/stages/api.py#L84). ## Why this isn't enough These are count-based mechnisms and don't correspond to actual MB or GB of memory used. Some content units vary a lot in how much memory each DeclarativeContent objects take up. Another lesser problem is that it doesn't help plugin writers restrict their usage of memory in FirstStage. ## Idea Add a new param called `max_mb` to base Remote, which defaults to None. If specified, the user will be specifying the desired maximum MB used by process syncing. Have the queues between the stages, and the bather implementation, both check the total memory the current process is using and asyncio.sleep() polling until it goes down. This should keep the maximum amount used by all objects roughly to that number. ## Details Introduce a new `MBSizeQueue` which is a wrapper around `asyncio.Queue` used today. It will have the same `put()` call, only wait if the amount of memory in use is greater than the remote is configured for. Then introduce the same memory checking feature in the batcher. I'm not completely sure this second part is needed though. We have to be very careful not to deadlock with this feature. For example, we have to account for the base case where even a single item is larger than the memory desired. Repos in pulp_rpm have had a single unit use more than 1.2G if I remember right, so if someone was syncing with 800 MB and we weren't careful to allow that unit to still flow through the pipeline we'd deadlock..... --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pulpcore/plugin/stages/api.py` Content: ``` 1 import asyncio 2 import logging 3 4 from gettext import gettext as _ 5 6 from django.conf import settings 7 8 from .profiler import ProfilingQueue 9 10 11 log = logging.getLogger(__name__) 12 13 14 class Stage: 15 """ 16 The base class for all Stages API stages. 17 18 To make a stage, inherit from this class and implement :meth:`run` on the subclass. 19 """ 20 21 def __init__(self): 22 self._in_q = None 23 self._out_q = None 24 25 def _connect(self, in_q, out_q): 26 """ 27 Connect to queues within a pipeline. 28 29 Args: 30 in_q (asyncio.Queue): The stage input queue. 31 out_q (asyncio.Queue): The stage output queue. 32 """ 33 self._in_q = in_q 34 self._out_q = out_q 35 36 async def __call__(self): 37 """ 38 This coroutine makes the stage callable. 39 40 It calls :meth:`run` and signals the next stage that its work is finished. 41 """ 42 log.debug(_("%(name)s - begin."), {"name": self}) 43 await self.run() 44 await self._out_q.put(None) 45 log.debug(_("%(name)s - put end-marker."), {"name": self}) 46 47 async def run(self): 48 """ 49 The coroutine that is run as part of this stage. 50 51 Returns: 52 The coroutine that runs this stage. 53 54 """ 55 raise NotImplementedError(_("A plugin writer must implement this method")) 56 57 async def items(self): 58 """ 59 Asynchronous iterator yielding items of :class:`DeclarativeContent` from `self._in_q`. 60 61 The iterator will get instances of :class:`DeclarativeContent` one by one as they get 62 available. 63 64 Yields: 65 An instance of :class:`DeclarativeContent` 66 67 Examples: 68 Used in stages to get d_content instances one by one from `self._in_q`:: 69 70 class MyStage(Stage): 71 async def run(self): 72 async for d_content in self.items(): 73 # process declarative content 74 await self.put(d_content) 75 76 """ 77 while True: 78 content = await self._in_q.get() 79 if content is None: 80 break 81 log.debug("%(name)s - next: %(content)s.", {"name": self, "content": content}) 82 yield content 83 84 async def batches(self, minsize=500): 85 """ 86 Asynchronous iterator yielding batches of :class:`DeclarativeContent` from `self._in_q`. 87 88 The iterator will try to get as many instances of 89 :class:`DeclarativeContent` as possible without blocking, but 90 at least `minsize` instances. 91 92 Args: 93 minsize (int): The minimum batch size to yield (unless it is the final batch) 94 95 Yields: 96 A list of :class:`DeclarativeContent` instances 97 98 Examples: 99 Used in stages to get large chunks of d_content instances from `self._in_q`:: 100 101 class MyStage(Stage): 102 async def run(self): 103 async for batch in self.batches(): 104 for d_content in batch: 105 # process declarative content 106 await self.put(d_content) 107 108 """ 109 batch = [] 110 shutdown = False 111 no_block = False 112 thaw_queue_event = asyncio.Event() 113 114 def add_to_batch(content): 115 nonlocal batch 116 nonlocal shutdown 117 nonlocal no_block 118 nonlocal thaw_queue_event 119 120 if content is None: 121 shutdown = True 122 log.debug(_("%(name)s - shutdown."), {"name": self}) 123 else: 124 if not content.does_batch: 125 no_block = True 126 content._thaw_queue_event = thaw_queue_event 127 batch.append(content) 128 129 get_listener = asyncio.ensure_future(self._in_q.get()) 130 thaw_event_listener = asyncio.ensure_future(thaw_queue_event.wait()) 131 while not shutdown: 132 done, pending = await asyncio.wait( 133 [thaw_event_listener, get_listener], return_when=asyncio.FIRST_COMPLETED 134 ) 135 if thaw_event_listener in done: 136 thaw_event_listener = asyncio.ensure_future(thaw_queue_event.wait()) 137 no_block = True 138 if get_listener in done: 139 content = await get_listener 140 add_to_batch(content) 141 get_listener = asyncio.ensure_future(self._in_q.get()) 142 while not shutdown: 143 try: 144 content = self._in_q.get_nowait() 145 except asyncio.QueueEmpty: 146 break 147 else: 148 add_to_batch(content) 149 150 if batch and (len(batch) >= minsize or shutdown or no_block): 151 log.debug( 152 _("%(name)s - next batch[%(length)d]."), {"name": self, "length": len(batch)} 153 ) 154 for content in batch: 155 content._thaw_queue_event = None 156 thaw_queue_event.clear() 157 yield batch 158 batch = [] 159 no_block = False 160 thaw_event_listener.cancel() 161 get_listener.cancel() 162 163 async def put(self, item): 164 """ 165 Coroutine to pass items to the next stage. 166 167 Args: 168 item: A handled instance of :class:`pulpcore.plugin.stages.DeclarativeContent` 169 170 Raises: 171 ValueError: When `item` is None. 172 """ 173 if item is None: 174 raise ValueError(_("(None) not permitted.")) 175 await self._out_q.put(item) 176 log.debug("{name} - put: {content}".format(name=self, content=item)) 177 178 def __str__(self): 179 return "[{id}] {name}".format(id=id(self), name=self.__class__.__name__) 180 181 182 async def create_pipeline(stages, maxsize=1000): 183 """ 184 A coroutine that builds a Stages API linear pipeline from the list `stages` and runs it. 185 186 Each stage is an instance of a class derived from :class:`pulpcore.plugin.stages.Stage` that 187 implements the :meth:`run` coroutine. This coroutine reads asyncromously either from the 188 `items()` iterator or the `batches()` iterator and outputs the items with `put()`. Here is an 189 example of the simplest stage that only passes data:: 190 191 class MyStage(Stage): 192 async def run(self): 193 async for d_content in self.items(): # Fetch items from the previous stage 194 await self.put(d_content) # Hand them over to the next stage 195 196 Args: 197 stages (list of coroutines): A list of Stages API compatible coroutines. 198 maxsize (int): The maximum amount of items a queue between two stages should hold. Optional 199 and defaults to 100. 200 201 Returns: 202 A single coroutine that can be used to run, wait, or cancel the entire pipeline with. 203 Raises: 204 ValueError: When a stage instance is specified more than once. 205 """ 206 futures = [] 207 history = set() 208 in_q = None 209 for i, stage in enumerate(stages): 210 if stage in history: 211 raise ValueError(_("Each stage instance must be unique.")) 212 history.add(stage) 213 if i < len(stages) - 1: 214 if settings.PROFILE_STAGES_API: 215 out_q = ProfilingQueue.make_and_record_queue(stages[i + 1], i + 1, maxsize) 216 else: 217 out_q = asyncio.Queue(maxsize=maxsize) 218 else: 219 out_q = None 220 stage._connect(in_q, out_q) 221 futures.append(asyncio.ensure_future(stage())) 222 in_q = out_q 223 224 try: 225 await asyncio.gather(*futures) 226 except Exception: 227 # One of the stages raised an exception, cancel all stages... 228 pending = [] 229 for task in futures: 230 if not task.done(): 231 task.cancel() 232 pending.append(task) 233 # ...and run until all Exceptions show up 234 if pending: 235 await asyncio.wait(pending, timeout=60) 236 raise 237 238 239 class EndStage(Stage): 240 """ 241 A Stages API stage that drains incoming items and does nothing with the items. This is 242 required at the end of all pipelines. 243 244 Without this stage, the `maxsize` of the last stage's `_out_q` could fill up and block the 245 entire pipeline. 246 """ 247 248 async def __call__(self): 249 """ 250 This method drains items from the last queue and drops them. 251 252 Importantly it does not try to put items into the nonexistent next queue. 253 """ 254 # We overwrite __call__ here to avoid trying to put None in `self._out_q`. 255 async for _ in self.items(): # noqa 256 pass 257 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pulpcore/plugin/stages/api.py b/pulpcore/plugin/stages/api.py --- a/pulpcore/plugin/stages/api.py +++ b/pulpcore/plugin/stages/api.py @@ -179,12 +179,12 @@ return "[{id}] {name}".format(id=id(self), name=self.__class__.__name__) -async def create_pipeline(stages, maxsize=1000): +async def create_pipeline(stages, maxsize=1): """ A coroutine that builds a Stages API linear pipeline from the list `stages` and runs it. Each stage is an instance of a class derived from :class:`pulpcore.plugin.stages.Stage` that - implements the :meth:`run` coroutine. This coroutine reads asyncromously either from the + implements the :meth:`run` coroutine. This coroutine reads asynchronously either from the `items()` iterator or the `batches()` iterator and outputs the items with `put()`. Here is an example of the simplest stage that only passes data:: @@ -196,7 +196,7 @@ Args: stages (list of coroutines): A list of Stages API compatible coroutines. maxsize (int): The maximum amount of items a queue between two stages should hold. Optional - and defaults to 100. + and defaults to 1. Returns: A single coroutine that can be used to run, wait, or cancel the entire pipeline with.
{"golden_diff": "diff --git a/pulpcore/plugin/stages/api.py b/pulpcore/plugin/stages/api.py\n--- a/pulpcore/plugin/stages/api.py\n+++ b/pulpcore/plugin/stages/api.py\n@@ -179,12 +179,12 @@\n return \"[{id}] {name}\".format(id=id(self), name=self.__class__.__name__)\n \n \n-async def create_pipeline(stages, maxsize=1000):\n+async def create_pipeline(stages, maxsize=1):\n \"\"\"\n A coroutine that builds a Stages API linear pipeline from the list `stages` and runs it.\n \n Each stage is an instance of a class derived from :class:`pulpcore.plugin.stages.Stage` that\n- implements the :meth:`run` coroutine. This coroutine reads asyncromously either from the\n+ implements the :meth:`run` coroutine. This coroutine reads asynchronously either from the\n `items()` iterator or the `batches()` iterator and outputs the items with `put()`. Here is an\n example of the simplest stage that only passes data::\n \n@@ -196,7 +196,7 @@\n Args:\n stages (list of coroutines): A list of Stages API compatible coroutines.\n maxsize (int): The maximum amount of items a queue between two stages should hold. Optional\n- and defaults to 100.\n+ and defaults to 1.\n \n Returns:\n A single coroutine that can be used to run, wait, or cancel the entire pipeline with.\n", "issue": "Reduce memory usage of the pipeline\nAuthor: @bmbouter (bmbouter)\n\n\nRedmine Issue: 9635, https://pulp.plan.io/issues/9635\n\n---\n\n## Motivation\r\n\r\nIt would be nice if users could specify a desired maximum amount of RAM to be used during sync. For example, a user can say I only want 1500 MB of RAM to be used max.\r\n\r\n## What is already in place\r\n\r\nThe stages pipeline restricts memory usage by only allowing 1000 declarative content objects between each stage (so for 8-9 stages that's 8000-9000 declarative content objects. This happens [here](https://github.com/pulp/pulpcore/blob/main/pulpcore/plugin/stages/api.py#L217).\r\n\r\nInterestingly the docstring says this defaults to 100, but it seems to actually be 1000!\r\n\r\nAlso the stages perform batching, so they will only taking in a limited number of items (the batch size). That happens [with minsize](https://github.com/pulp/pulpcore/blob/main/pulpcore/plugin/stages/api.py#L84).\r\n\r\n## Why this isn't enough\r\n\r\nThese are count-based mechnisms and don't correspond to actual MB or GB of memory used. Some content units vary a lot in how much memory each DeclarativeContent objects take up.\r\n\r\nAnother lesser problem is that it doesn't help plugin writers restrict their usage of memory in FirstStage.\r\n\r\n## Idea\r\n\r\nAdd a new param called `max_mb` to base Remote, which defaults to None. If specified, the user will be specifying the desired maximum MB used by process syncing.\r\n\r\nHave the queues between the stages, and the bather implementation, both check the total memory the current process is using and asyncio.sleep() polling until it goes down. This should keep the maximum amount used by all objects roughly to that number.\r\n\r\n## Details\r\n\r\nIntroduce a new `MBSizeQueue` which is a wrapper around `asyncio.Queue` used today. It will have the same `put()` call, only wait if the amount of memory in use is greater than the remote is configured for.\r\n\r\nThen introduce the same memory checking feature in the batcher. I'm not completely sure this second part is needed though.\r\n\r\nWe have to be very careful not to deadlock with this feature. For example, we have to account for the base case where even a single item is larger than the memory desired. Repos in pulp_rpm have had a single unit use more than 1.2G if I remember right, so if someone was syncing with 800 MB and we weren't careful to allow that unit to still flow through the pipeline we'd deadlock.....\n\n\n\n", "before_files": [{"content": "import asyncio\nimport logging\n\nfrom gettext import gettext as _\n\nfrom django.conf import settings\n\nfrom .profiler import ProfilingQueue\n\n\nlog = logging.getLogger(__name__)\n\n\nclass Stage:\n \"\"\"\n The base class for all Stages API stages.\n\n To make a stage, inherit from this class and implement :meth:`run` on the subclass.\n \"\"\"\n\n def __init__(self):\n self._in_q = None\n self._out_q = None\n\n def _connect(self, in_q, out_q):\n \"\"\"\n Connect to queues within a pipeline.\n\n Args:\n in_q (asyncio.Queue): The stage input queue.\n out_q (asyncio.Queue): The stage output queue.\n \"\"\"\n self._in_q = in_q\n self._out_q = out_q\n\n async def __call__(self):\n \"\"\"\n This coroutine makes the stage callable.\n\n It calls :meth:`run` and signals the next stage that its work is finished.\n \"\"\"\n log.debug(_(\"%(name)s - begin.\"), {\"name\": self})\n await self.run()\n await self._out_q.put(None)\n log.debug(_(\"%(name)s - put end-marker.\"), {\"name\": self})\n\n async def run(self):\n \"\"\"\n The coroutine that is run as part of this stage.\n\n Returns:\n The coroutine that runs this stage.\n\n \"\"\"\n raise NotImplementedError(_(\"A plugin writer must implement this method\"))\n\n async def items(self):\n \"\"\"\n Asynchronous iterator yielding items of :class:`DeclarativeContent` from `self._in_q`.\n\n The iterator will get instances of :class:`DeclarativeContent` one by one as they get\n available.\n\n Yields:\n An instance of :class:`DeclarativeContent`\n\n Examples:\n Used in stages to get d_content instances one by one from `self._in_q`::\n\n class MyStage(Stage):\n async def run(self):\n async for d_content in self.items():\n # process declarative content\n await self.put(d_content)\n\n \"\"\"\n while True:\n content = await self._in_q.get()\n if content is None:\n break\n log.debug(\"%(name)s - next: %(content)s.\", {\"name\": self, \"content\": content})\n yield content\n\n async def batches(self, minsize=500):\n \"\"\"\n Asynchronous iterator yielding batches of :class:`DeclarativeContent` from `self._in_q`.\n\n The iterator will try to get as many instances of\n :class:`DeclarativeContent` as possible without blocking, but\n at least `minsize` instances.\n\n Args:\n minsize (int): The minimum batch size to yield (unless it is the final batch)\n\n Yields:\n A list of :class:`DeclarativeContent` instances\n\n Examples:\n Used in stages to get large chunks of d_content instances from `self._in_q`::\n\n class MyStage(Stage):\n async def run(self):\n async for batch in self.batches():\n for d_content in batch:\n # process declarative content\n await self.put(d_content)\n\n \"\"\"\n batch = []\n shutdown = False\n no_block = False\n thaw_queue_event = asyncio.Event()\n\n def add_to_batch(content):\n nonlocal batch\n nonlocal shutdown\n nonlocal no_block\n nonlocal thaw_queue_event\n\n if content is None:\n shutdown = True\n log.debug(_(\"%(name)s - shutdown.\"), {\"name\": self})\n else:\n if not content.does_batch:\n no_block = True\n content._thaw_queue_event = thaw_queue_event\n batch.append(content)\n\n get_listener = asyncio.ensure_future(self._in_q.get())\n thaw_event_listener = asyncio.ensure_future(thaw_queue_event.wait())\n while not shutdown:\n done, pending = await asyncio.wait(\n [thaw_event_listener, get_listener], return_when=asyncio.FIRST_COMPLETED\n )\n if thaw_event_listener in done:\n thaw_event_listener = asyncio.ensure_future(thaw_queue_event.wait())\n no_block = True\n if get_listener in done:\n content = await get_listener\n add_to_batch(content)\n get_listener = asyncio.ensure_future(self._in_q.get())\n while not shutdown:\n try:\n content = self._in_q.get_nowait()\n except asyncio.QueueEmpty:\n break\n else:\n add_to_batch(content)\n\n if batch and (len(batch) >= minsize or shutdown or no_block):\n log.debug(\n _(\"%(name)s - next batch[%(length)d].\"), {\"name\": self, \"length\": len(batch)}\n )\n for content in batch:\n content._thaw_queue_event = None\n thaw_queue_event.clear()\n yield batch\n batch = []\n no_block = False\n thaw_event_listener.cancel()\n get_listener.cancel()\n\n async def put(self, item):\n \"\"\"\n Coroutine to pass items to the next stage.\n\n Args:\n item: A handled instance of :class:`pulpcore.plugin.stages.DeclarativeContent`\n\n Raises:\n ValueError: When `item` is None.\n \"\"\"\n if item is None:\n raise ValueError(_(\"(None) not permitted.\"))\n await self._out_q.put(item)\n log.debug(\"{name} - put: {content}\".format(name=self, content=item))\n\n def __str__(self):\n return \"[{id}] {name}\".format(id=id(self), name=self.__class__.__name__)\n\n\nasync def create_pipeline(stages, maxsize=1000):\n \"\"\"\n A coroutine that builds a Stages API linear pipeline from the list `stages` and runs it.\n\n Each stage is an instance of a class derived from :class:`pulpcore.plugin.stages.Stage` that\n implements the :meth:`run` coroutine. This coroutine reads asyncromously either from the\n `items()` iterator or the `batches()` iterator and outputs the items with `put()`. Here is an\n example of the simplest stage that only passes data::\n\n class MyStage(Stage):\n async def run(self):\n async for d_content in self.items(): # Fetch items from the previous stage\n await self.put(d_content) # Hand them over to the next stage\n\n Args:\n stages (list of coroutines): A list of Stages API compatible coroutines.\n maxsize (int): The maximum amount of items a queue between two stages should hold. Optional\n and defaults to 100.\n\n Returns:\n A single coroutine that can be used to run, wait, or cancel the entire pipeline with.\n Raises:\n ValueError: When a stage instance is specified more than once.\n \"\"\"\n futures = []\n history = set()\n in_q = None\n for i, stage in enumerate(stages):\n if stage in history:\n raise ValueError(_(\"Each stage instance must be unique.\"))\n history.add(stage)\n if i < len(stages) - 1:\n if settings.PROFILE_STAGES_API:\n out_q = ProfilingQueue.make_and_record_queue(stages[i + 1], i + 1, maxsize)\n else:\n out_q = asyncio.Queue(maxsize=maxsize)\n else:\n out_q = None\n stage._connect(in_q, out_q)\n futures.append(asyncio.ensure_future(stage()))\n in_q = out_q\n\n try:\n await asyncio.gather(*futures)\n except Exception:\n # One of the stages raised an exception, cancel all stages...\n pending = []\n for task in futures:\n if not task.done():\n task.cancel()\n pending.append(task)\n # ...and run until all Exceptions show up\n if pending:\n await asyncio.wait(pending, timeout=60)\n raise\n\n\nclass EndStage(Stage):\n \"\"\"\n A Stages API stage that drains incoming items and does nothing with the items. This is\n required at the end of all pipelines.\n\n Without this stage, the `maxsize` of the last stage's `_out_q` could fill up and block the\n entire pipeline.\n \"\"\"\n\n async def __call__(self):\n \"\"\"\n This method drains items from the last queue and drops them.\n\n Importantly it does not try to put items into the nonexistent next queue.\n \"\"\"\n # We overwrite __call__ here to avoid trying to put None in `self._out_q`.\n async for _ in self.items(): # noqa\n pass\n", "path": "pulpcore/plugin/stages/api.py"}], "after_files": [{"content": "import asyncio\nimport logging\n\nfrom gettext import gettext as _\n\nfrom django.conf import settings\n\nfrom .profiler import ProfilingQueue\n\n\nlog = logging.getLogger(__name__)\n\n\nclass Stage:\n \"\"\"\n The base class for all Stages API stages.\n\n To make a stage, inherit from this class and implement :meth:`run` on the subclass.\n \"\"\"\n\n def __init__(self):\n self._in_q = None\n self._out_q = None\n\n def _connect(self, in_q, out_q):\n \"\"\"\n Connect to queues within a pipeline.\n\n Args:\n in_q (asyncio.Queue): The stage input queue.\n out_q (asyncio.Queue): The stage output queue.\n \"\"\"\n self._in_q = in_q\n self._out_q = out_q\n\n async def __call__(self):\n \"\"\"\n This coroutine makes the stage callable.\n\n It calls :meth:`run` and signals the next stage that its work is finished.\n \"\"\"\n log.debug(_(\"%(name)s - begin.\"), {\"name\": self})\n await self.run()\n await self._out_q.put(None)\n log.debug(_(\"%(name)s - put end-marker.\"), {\"name\": self})\n\n async def run(self):\n \"\"\"\n The coroutine that is run as part of this stage.\n\n Returns:\n The coroutine that runs this stage.\n\n \"\"\"\n raise NotImplementedError(_(\"A plugin writer must implement this method\"))\n\n async def items(self):\n \"\"\"\n Asynchronous iterator yielding items of :class:`DeclarativeContent` from `self._in_q`.\n\n The iterator will get instances of :class:`DeclarativeContent` one by one as they get\n available.\n\n Yields:\n An instance of :class:`DeclarativeContent`\n\n Examples:\n Used in stages to get d_content instances one by one from `self._in_q`::\n\n class MyStage(Stage):\n async def run(self):\n async for d_content in self.items():\n # process declarative content\n await self.put(d_content)\n\n \"\"\"\n while True:\n content = await self._in_q.get()\n if content is None:\n break\n log.debug(\"%(name)s - next: %(content)s.\", {\"name\": self, \"content\": content})\n yield content\n\n async def batches(self, minsize=500):\n \"\"\"\n Asynchronous iterator yielding batches of :class:`DeclarativeContent` from `self._in_q`.\n\n The iterator will try to get as many instances of\n :class:`DeclarativeContent` as possible without blocking, but\n at least `minsize` instances.\n\n Args:\n minsize (int): The minimum batch size to yield (unless it is the final batch)\n\n Yields:\n A list of :class:`DeclarativeContent` instances\n\n Examples:\n Used in stages to get large chunks of d_content instances from `self._in_q`::\n\n class MyStage(Stage):\n async def run(self):\n async for batch in self.batches():\n for d_content in batch:\n # process declarative content\n await self.put(d_content)\n\n \"\"\"\n batch = []\n shutdown = False\n no_block = False\n thaw_queue_event = asyncio.Event()\n\n def add_to_batch(content):\n nonlocal batch\n nonlocal shutdown\n nonlocal no_block\n nonlocal thaw_queue_event\n\n if content is None:\n shutdown = True\n log.debug(_(\"%(name)s - shutdown.\"), {\"name\": self})\n else:\n if not content.does_batch:\n no_block = True\n content._thaw_queue_event = thaw_queue_event\n batch.append(content)\n\n get_listener = asyncio.ensure_future(self._in_q.get())\n thaw_event_listener = asyncio.ensure_future(thaw_queue_event.wait())\n while not shutdown:\n done, pending = await asyncio.wait(\n [thaw_event_listener, get_listener], return_when=asyncio.FIRST_COMPLETED\n )\n if thaw_event_listener in done:\n thaw_event_listener = asyncio.ensure_future(thaw_queue_event.wait())\n no_block = True\n if get_listener in done:\n content = await get_listener\n add_to_batch(content)\n get_listener = asyncio.ensure_future(self._in_q.get())\n while not shutdown:\n try:\n content = self._in_q.get_nowait()\n except asyncio.QueueEmpty:\n break\n else:\n add_to_batch(content)\n\n if batch and (len(batch) >= minsize or shutdown or no_block):\n log.debug(\n _(\"%(name)s - next batch[%(length)d].\"), {\"name\": self, \"length\": len(batch)}\n )\n for content in batch:\n content._thaw_queue_event = None\n thaw_queue_event.clear()\n yield batch\n batch = []\n no_block = False\n thaw_event_listener.cancel()\n get_listener.cancel()\n\n async def put(self, item):\n \"\"\"\n Coroutine to pass items to the next stage.\n\n Args:\n item: A handled instance of :class:`pulpcore.plugin.stages.DeclarativeContent`\n\n Raises:\n ValueError: When `item` is None.\n \"\"\"\n if item is None:\n raise ValueError(_(\"(None) not permitted.\"))\n await self._out_q.put(item)\n log.debug(\"{name} - put: {content}\".format(name=self, content=item))\n\n def __str__(self):\n return \"[{id}] {name}\".format(id=id(self), name=self.__class__.__name__)\n\n\nasync def create_pipeline(stages, maxsize=1):\n \"\"\"\n A coroutine that builds a Stages API linear pipeline from the list `stages` and runs it.\n\n Each stage is an instance of a class derived from :class:`pulpcore.plugin.stages.Stage` that\n implements the :meth:`run` coroutine. This coroutine reads asynchronously either from the\n `items()` iterator or the `batches()` iterator and outputs the items with `put()`. Here is an\n example of the simplest stage that only passes data::\n\n class MyStage(Stage):\n async def run(self):\n async for d_content in self.items(): # Fetch items from the previous stage\n await self.put(d_content) # Hand them over to the next stage\n\n Args:\n stages (list of coroutines): A list of Stages API compatible coroutines.\n maxsize (int): The maximum amount of items a queue between two stages should hold. Optional\n and defaults to 1.\n\n Returns:\n A single coroutine that can be used to run, wait, or cancel the entire pipeline with.\n Raises:\n ValueError: When a stage instance is specified more than once.\n \"\"\"\n futures = []\n history = set()\n in_q = None\n for i, stage in enumerate(stages):\n if stage in history:\n raise ValueError(_(\"Each stage instance must be unique.\"))\n history.add(stage)\n if i < len(stages) - 1:\n if settings.PROFILE_STAGES_API:\n out_q = ProfilingQueue.make_and_record_queue(stages[i + 1], i + 1, maxsize)\n else:\n out_q = asyncio.Queue(maxsize=maxsize)\n else:\n out_q = None\n stage._connect(in_q, out_q)\n futures.append(asyncio.ensure_future(stage()))\n in_q = out_q\n\n try:\n await asyncio.gather(*futures)\n except Exception:\n # One of the stages raised an exception, cancel all stages...\n pending = []\n for task in futures:\n if not task.done():\n task.cancel()\n pending.append(task)\n # ...and run until all Exceptions show up\n if pending:\n await asyncio.wait(pending, timeout=60)\n raise\n\n\nclass EndStage(Stage):\n \"\"\"\n A Stages API stage that drains incoming items and does nothing with the items. This is\n required at the end of all pipelines.\n\n Without this stage, the `maxsize` of the last stage's `_out_q` could fill up and block the\n entire pipeline.\n \"\"\"\n\n async def __call__(self):\n \"\"\"\n This method drains items from the last queue and drops them.\n\n Importantly it does not try to put items into the nonexistent next queue.\n \"\"\"\n # We overwrite __call__ here to avoid trying to put None in `self._out_q`.\n async for _ in self.items(): # noqa\n pass\n", "path": "pulpcore/plugin/stages/api.py"}]}
3,348
335